Compare commits
1995 Commits
310_plus
...
go_httpx_b
Author | SHA1 | Date |
---|---|---|
|
4c486e6dd2 | |
|
44b8c70521 | |
|
e6af97c596 | |
|
95ace5acb8 | |
|
129cf58d41 | |
|
1fd8654ca5 | |
|
d0170982bf | |
|
821e73a409 | |
|
3d03781810 | |
|
83d1f117a8 | |
|
e4ce79f720 | |
|
264246d89b | |
|
7c96c9fafe | |
|
52b349fe79 | |
|
6959429af8 | |
|
05f874001a | |
|
fc216d37de | |
|
03e429abf8 | |
|
7ae7cc829f | |
|
b23d44e21a | |
|
2669db785c | |
|
d3e7b5cd0e | |
|
9be29a707d | |
|
c82ca812a8 | |
|
a7ad50cf8f | |
|
661805695e | |
|
3de7c9a9eb | |
|
59536bd284 | |
|
5702e422d8 | |
|
07331a160e | |
|
0d18cb65c3 | |
|
ad565936ec | |
|
d4b07cc95a | |
|
1231c459aa | |
|
88f415e5b8 | |
|
d9c574e291 | |
|
a86573b5a2 | |
|
1d7e97a295 | |
|
bbb98597a0 | |
|
e33d6333ec | |
|
263a5a8d07 | |
|
a681b2f0bb | |
|
5b0c94933b | |
|
61e52213b2 | |
|
b064a5f94d | |
|
e7fa841263 | |
|
1f346483a0 | |
|
d006ecce7e | |
|
69368f20c2 | |
|
31fa0b02f5 | |
|
5a60974990 | |
|
8d324acf91 | |
|
ab84303da7 | |
|
659649ec48 | |
|
f7cc43ee0b | |
|
f5dc21d3f4 | |
|
4568c55f17 | |
|
d5d68f75ea | |
|
1f9a497637 | |
|
40c5d88a9b | |
|
8989c73a93 | |
|
3639f360c3 | |
|
afd0781b62 | |
|
ba154ef413 | |
|
97e2403fb1 | |
|
a4084d6a0b | |
|
83bdca46a2 | |
|
c129f5bb4a | |
|
c4853a3fee | |
|
f274c3db3b | |
|
b95932ea09 | |
|
e8bf4c6e04 | |
|
8e4d1a48ed | |
|
b03eceebef | |
|
f7a8d79b7b | |
|
49c458710e | |
|
b94582cb35 | |
|
7311000846 | |
|
e719733f97 | |
|
cb941a5554 | |
|
2d72a052aa | |
|
2eeef2a123 | |
|
b6d2550f33 | |
|
b9af6176c5 | |
|
dd0167b9a5 | |
|
9e71e0768f | |
|
6029f39a3f | |
|
656e2c6a88 | |
|
b8065a413b | |
|
9245d24b47 | |
|
22bd83943b | |
|
b94931bbdd | |
|
239c1c457e | |
|
24a54a7085 | |
|
ebd1eb114e | |
|
29ce8de462 | |
|
d3dab17939 | |
|
cadc200818 | |
|
363c8dfdb1 | |
|
00c046c280 | |
|
9165515811 | |
|
543c11f377 | |
|
637d33d7cc | |
|
e5fdb33e31 | |
|
81a8cd1685 | |
|
a382f01c85 | |
|
653348fcd8 | |
|
e139d2e259 | |
|
7258d57c69 | |
|
5d081a40d5 | |
|
fcececce19 | |
|
b6ac6069fe | |
|
a98f5877bc | |
|
50ddef0985 | |
|
b1cde3df49 | |
|
57010d479d | |
|
f94244aad4 | |
|
261c331602 | |
|
3b4a4db7b6 | |
|
ad59a581c7 | |
|
c312f90c0c | |
|
1a859bc1a2 | |
|
e9887cb611 | |
|
0ba75df877 | |
|
a97a0ced8c | |
|
46d83e9ca9 | |
|
d4833eba21 | |
|
14f124164a | |
|
05959eaf70 | |
|
30d55fdb27 | |
|
2c88ebe697 | |
|
4a180019f0 | |
|
4d274b16d8 | |
|
481618cc51 | |
|
778d26067d | |
|
e54c3dc523 | |
|
ad37cfbe2f | |
|
8369f557c7 | |
|
461764419d | |
|
1002ce1e10 | |
|
546049b62f | |
|
e9517cdb02 | |
|
2b8cd031e8 | |
|
2e6b1330f3 | |
|
995d1534b6 | |
|
9d31941d42 | |
|
a695208992 | |
|
fed89562dc | |
|
9005335e18 | |
|
c3f8b089be | |
|
0068119a6d | |
|
94540ce1cf | |
|
ea9a5e524c | |
|
6b22024570 | |
|
847cb7740c | |
|
84dd0ae4ce | |
|
6b90e2e3ee | |
|
482ad1cc83 | |
|
6e8d07852c | |
|
4aa04e1c8e | |
|
c5ed6e6ac4 | |
|
077d9bf1d2 | |
|
78178c2fb7 | |
|
f66a1f8b23 | |
|
562d027ee6 | |
|
ff2bbd5aca | |
|
85a38d057b | |
|
eba6a77966 | |
|
5ed8544fd1 | |
|
5d86d336f2 | |
|
e4ea7d6193 | |
|
60751acf85 | |
|
e9dfd28aac | |
|
ae444d1bc7 | |
|
a51a61090d | |
|
94ebe1e87e | |
|
fff610fa8d | |
|
7ecf2bd89a | |
|
1e3a4ca36d | |
|
b6a705852d | |
|
29bab02c64 | |
|
85ae180f8f | |
|
5d24b5defb | |
|
100be54641 | |
|
a088ebf5e2 | |
|
b37a447595 | |
|
b1edaf0639 | |
|
385561276b | |
|
d94ab9d5b2 | |
|
08e8990fe3 | |
|
2c6ae5d994 | |
|
f1289ccce2 | |
|
7802febd20 | |
|
64329d44e7 | |
|
bd0af7a4c0 | |
|
618c461bfb | |
|
c00cf41541 | |
|
4436342d33 | |
|
58cf7ce10e | |
|
9fbb75ce7f | |
|
d0f72bf269 | |
|
188508575a | |
|
bebc817d19 | |
|
1d35747fbf | |
|
e344bdbf1b | |
|
b33be86b2f | |
|
50b221f788 | |
|
897c20bd4a | |
|
759ebe71e9 | |
|
e88913e1f3 | |
|
5e7916a0df | |
|
5eb310cac9 | |
|
8a10cbf6ab | |
|
fe78277948 | |
|
9e87b6515b | |
|
a05a82486d | |
|
e4731eff10 | |
|
dfa13afe22 | |
|
912f1bc635 | |
|
82fd785646 | |
|
71d0097dc7 | |
|
8fb667686f | |
|
2dab0e2e56 | |
|
e8025d0985 | |
|
430309b5dc | |
|
4c5507301e | |
|
a5821ae9b1 | |
|
d794afcb5c | |
|
3d20490ee5 | |
|
69314e9fca | |
|
b9fec091ca | |
|
803f4a6354 | |
|
494b3faa9b | |
|
da206f5242 | |
|
7f4884a6d9 | |
|
c30d8ac9ba | |
|
8b9494281d | |
|
06c581bfab | |
|
108e8c7082 | |
|
ddcdbce1a2 | |
|
14d5b3c963 | |
|
8330b36e58 | |
|
243821aab1 | |
|
4123c97139 | |
|
55c3d617fa | |
|
a2c6749112 | |
|
19be8348e5 | |
|
3c84ac326a | |
|
c9681d0aa2 | |
|
8f40e522ef | |
|
87185cf8bb | |
|
ff267890d1 | |
|
749401e500 | |
|
3704e2ceac | |
|
8f1983fd8e | |
|
f5d4f58610 | |
|
0e94e89373 | |
|
520414a096 | |
|
ddc5f2b441 | |
|
3994fd8384 | |
|
13f231b926 | |
|
309b91676d | |
|
c8c28df62f | |
|
005023275e | |
|
05af2b3e64 | |
|
745c144314 | |
|
10ebc855e4 | |
|
c0929c042a | |
|
9748b22d34 | |
|
3ff9fb3e10 | |
|
75f01e22d7 | |
|
87d6115954 | |
|
c780164f69 | |
|
482403c887 | |
|
2ac8191722 | |
|
35af5f11fa | |
|
a7ec59862a | |
|
ad4847cbac | |
|
da07685e8b | |
|
f1eb76d29f | |
|
46b22958f0 | |
|
57399e4f5d | |
|
5690595064 | |
|
63a6c6efde | |
|
f2fff5a5fa | |
|
c0d575c009 | |
|
66d402b80e | |
|
ea270d3396 | |
|
621634b5a2 | |
|
eacc59226f | |
|
7b4472e37e | |
|
4a8eafabb8 | |
|
e7e7919a43 | |
|
cdf9105d0d | |
|
49e67d5f36 | |
|
85fa87fe6f | |
|
249b091c2f | |
|
2d291bd2c3 | |
|
cf1f4bed75 | |
|
032976b118 | |
|
cbe364cb62 | |
|
efd52e8ce3 | |
|
3be1d610e0 | |
|
b1ef549276 | |
|
f7f76137ca | |
|
3fcf44aa52 | |
|
d9708e28c8 | |
|
65f2549d90 | |
|
a4d16ec6ab | |
|
d82173dd50 | |
|
5d930175e4 | |
|
e4c1003aba | |
|
676b00592d | |
|
9970fa89ee | |
|
fe902c017b | |
|
77db2fa7c8 | |
|
7f39de59d4 | |
|
5c315ba163 | |
|
dc3ac8de01 | |
|
6eee6ead79 | |
|
572badb4d8 | |
|
4eeb232248 | |
|
3f555b2f5a | |
|
09007cbf08 | |
|
8a06e4d073 | |
|
45ded4f2d1 | |
|
60b0b721c5 | |
|
249d358737 | |
|
a9c016ba10 | |
|
98f6d85b65 | |
|
f36061a149 | |
|
43494e4994 | |
|
c6d1007e66 | |
|
1bb7c9a2e4 | |
|
2ee11f65f0 | |
|
0c74a67ee1 | |
|
9972bd387a | |
|
f792ecf3af | |
|
3c89295efe | |
|
9ff03ba00c | |
|
8e03212e40 | |
|
4c4787ce58 | |
|
e68c55e9bd | |
|
dc23f1c9bd | |
|
d173d373cb | |
|
8220bd152e | |
|
aa49c38d55 | |
|
dac93dd8f8 | |
|
ae1c5a0db0 | |
|
ed0c2555fc | |
|
26a8638836 | |
|
e035af2f42 | |
|
2dc8ee2b4e | |
|
06026ec661 | |
|
7c00ca0254 | |
|
eaaf6e4cc1 | |
|
ef544ba55a | |
|
e85e031df7 | |
|
e03da40867 | |
|
f8af13d010 | |
|
1d9c195506 | |
|
d3a504864a | |
|
f99e8fe7eb | |
|
bc4ded2662 | |
|
35359861bb | |
|
a44bc4aeb3 | |
|
c4277ebd8e | |
|
d42aa60325 | |
|
c57d4b2181 | |
|
6c10c2f623 | |
|
ad31631a8f | |
|
020a3955d2 | |
|
736bbbff77 | |
|
80461e18a5 | |
|
a149e71fb1 | |
|
b28b38afab | |
|
84613cd596 | |
|
909f880211 | |
|
bc58e42a74 | |
|
77dfeb4bf2 | |
|
f2c1988536 | |
|
81d5ca9bc2 | |
|
a4b8fb2d6b | |
|
e7437cb722 | |
|
f81ea64cab | |
|
2e878ca52a | |
|
6b2e85e4b3 | |
|
6a1c49be4e | |
|
0f8c685735 | |
|
921e18728c | |
|
c0552fa352 | |
|
90810dcffd | |
|
ebbfa7f48d | |
|
bb02775cab | |
|
b15e736e3e | |
|
cc3037149c | |
|
d704d631ba | |
|
58c096bfad | |
|
9eeea51165 | |
|
33ec27715b | |
|
e1be098406 | |
|
dd3e4b5a1f | |
|
2a1835843f | |
|
8947932289 | |
|
0484e97382 | |
|
5251561e20 | |
|
937d8c410d | |
|
75ff3921b6 | |
|
c8f8724887 | |
|
c1546eb043 | |
|
f8ab3bde35 | |
|
c1201c164c | |
|
a575e67fab | |
|
34dd6ffc22 | |
|
fda7111305 | |
|
8233d12afb | |
|
f25248c871 | |
|
54f8a615fc | |
|
2dbcecdac7 | |
|
0dcfcea6ee | |
|
7a5c43d01a | |
|
f1252983e4 | |
|
6dc3ed8d6a | |
|
4f4860cfb0 | |
|
1e683a4b91 | |
|
9fd412f631 | |
|
d027ad5a4f | |
|
106ebe94bf | |
|
d2accdac9b | |
|
c020ab76be | |
|
c52e889fe5 | |
|
0ba3c798d7 | |
|
7b4f4bf804 | |
|
8de92179da | |
|
94733c4a0b | |
|
7d1cc47db9 | |
|
9859f601ca | |
|
af64152640 | |
|
bf21d2e329 | |
|
848577488e | |
|
e82538eded | |
|
8ccb8b0744 | |
|
e83de2906f | |
|
33c464524b | |
|
cb774e5a5d | |
|
1ec9b0565f | |
|
7ab97fb21d | |
|
29211b200d | |
|
ae8358a5e7 | |
|
00a51c0288 | |
|
994564f923 | |
|
12172cc5cd | |
|
a65910c732 | |
|
949fa9fbb9 | |
|
4b77de5e2d | |
|
d660376206 | |
|
201b0d99c1 | |
|
c27da99e12 | |
|
e51ba404fc | |
|
abd3cefd84 | |
|
f6549fcb62 | |
|
41aa87f847 | |
|
d6331ce9e1 | |
|
4f67ac0337 | |
|
024cf8b8c2 | |
|
9ec664f7c8 | |
|
5e2107ff15 | |
|
5f1d0fcb8c | |
|
3b5bd8f43e | |
|
40c5f39f0d | |
|
3d8c1a7b3c | |
|
06cc3ac92c | |
|
4a8e8a32f9 | |
|
9bc11d8dd9 | |
|
9c80969fd5 | |
|
da4d344e63 | |
|
073ff0103a | |
|
f0a346dcc3 | |
|
7381c361cd | |
|
1b577eebf6 | |
|
39af215d61 | |
|
35f0520cb0 | |
|
738d0ca38b | |
|
bd8e4760d5 | |
|
9a063ccb11 | |
|
e8787d89c6 | |
|
8e97814c1f | |
|
e82f7f9012 | |
|
b44b0915ca | |
|
ff74d47fd5 | |
|
6ad8c603d5 | |
|
cd55d027c4 | |
|
d094625bd6 | |
|
e7a172b656 | |
|
bd919f9d66 | |
|
611d1ee3fc | |
|
56b23e1fcc | |
|
d3bafb0063 | |
|
7f246697b4 | |
|
dd10acbbf9 | |
|
31a00eca94 | |
|
c93d119873 | |
|
588770d034 | |
|
2f2d612b5f | |
|
660a94d610 | |
|
e4e4cacef3 | |
|
60a6f3269c | |
|
53003618cb | |
|
c6da09f3c6 | |
|
89d24cfe33 | |
|
8d7a9fa19e | |
|
a1a10676cd | |
|
97b2b25256 | |
|
b2bf0b06f2 | |
|
907eaa68cb | |
|
89e8a834bf | |
|
12bfabf056 | |
|
a44e926c2f | |
|
d0ba9a0a58 | |
|
3294defee1 | |
|
ae049eb84f | |
|
5c8a45c64a | |
|
07b7d1d229 | |
|
147e1baee9 | |
|
b096ee3b7a | |
|
f20e2d6ee2 | |
|
1263835034 | |
|
1e1e64f7f9 | |
|
98c043815a | |
|
ebe351e2ee | |
|
cfb125beef | |
|
1f0db3103d | |
|
2e8268b53e | |
|
b572cd1b77 | |
|
b288d7051a | |
|
c349d50f2f | |
|
779c0b73c9 | |
|
50a4c425d3 | |
|
df96155057 | |
|
a62283bae2 | |
|
2865f0efe9 | |
|
5f79434b23 | |
|
5278f8b560 | |
|
488a0cd119 | |
|
957224bdc5 | |
|
7ff8aa1ba0 | |
|
e06f9dc5c0 | |
|
c6e5368520 | |
|
769b292dca | |
|
361fc4645c | |
|
f1f2ba2e02 | |
|
80338e1ddd | |
|
f8c8f63e87 | |
|
96532ad38c | |
|
88f3912b2d | |
|
cb8833d430 | |
|
038b20d13a | |
|
05fb4a4014 | |
|
c415bd1ee1 | |
|
226c3364c3 | |
|
685688d2b2 | |
|
7a3bce3f33 | |
|
363a2bbcc6 | |
|
0a8dd7b6da | |
|
0b43e0aa8c | |
|
ed434e284b | |
|
af068c5c51 | |
|
f6cd08c6fa | |
|
34ff5ff249 | |
|
b03564da2c | |
|
59743b7b73 | |
|
9d04accf2e | |
|
3cd853cb5d | |
|
4a0beda77e | |
|
d7288972b7 | |
|
0d93871c88 | |
|
d0e01ff9b6 | |
|
af2f8756c5 | |
|
bcf355e2c8 | |
|
1b50bff625 | |
|
e317310ed3 | |
|
4131ff1152 | |
|
83802e932a | |
|
765b8f8e5c | |
|
b4f2f49001 | |
|
d1cf90e2ae | |
|
6008497b89 | |
|
adb62dc7b4 | |
|
4129d693be | |
|
d48b2c5b57 | |
|
6f5a2654ab | |
|
afdbf8e10a | |
|
d4c8ba19a2 | |
|
53a41ba93d | |
|
06b80ff9ed | |
|
fa88924f84 | |
|
83f1922f6e | |
|
4b7ac1d895 | |
|
7ee6f36e62 | |
|
f106472bcb | |
|
bba1ee43ff | |
|
0d2e713e9a | |
|
10a39ca42c | |
|
0917b580c9 | |
|
a301fabd6c | |
|
611d86d988 | |
|
b1e162ebb4 | |
|
b810de3089 | |
|
48cae3c178 | |
|
02eb966a87 | |
|
146e0993a9 | |
|
2cf7daca30 | |
|
dedc51a939 | |
|
3b7579990b | |
|
7de914d54c | |
|
589232d12d | |
|
928765074f | |
|
2ed9e40d5e | |
|
30af91a82c | |
|
e524c6fe4f | |
|
abbba1fa6e | |
|
484565988d | |
|
f92c289842 | |
|
b7ddf9cb05 | |
|
250e1c4c51 | |
|
62259880fd | |
|
f42bc2dbce | |
|
55b4866d5e | |
|
83514b0e90 | |
|
21401853c4 | |
|
6decd4112a | |
|
3f2f5edb28 | |
|
1d2d4b40a8 | |
|
5ee044e418 | |
|
05a33ae634 | |
|
b8a975a3fd | |
|
33a78366ff | |
|
2806a4c0e5 | |
|
2d609dceac | |
|
b2a5f8698d | |
|
70efce1631 | |
|
a63599828b | |
|
f51361435f | |
|
9770a39d7b | |
|
97e3c06af8 | |
|
4c1d174801 | |
|
eb7a7462ad | |
|
1944f75ae8 | |
|
b619e4a82d | |
|
d67031d9ab | |
|
008bfed702 | |
|
96006b2422 | |
|
56cd15fa51 | |
|
879657cc75 | |
|
fb13c7cbf6 | |
|
72abe98475 | |
|
48f096995f | |
|
2cc77c21ba | |
|
1560330acd | |
|
a74caa9f77 | |
|
61fb783c4e | |
|
9f7aa3d1ff | |
|
50be10a9bd | |
|
29a5910b90 | |
|
a336def65f | |
|
2cb59fe450 | |
|
4494acbc01 | |
|
7b3d724908 | |
|
bc249fbeca | |
|
53c76d3680 | |
|
60123066e1 | |
|
29ad20bc63 | |
|
978c59f5f0 | |
|
2c23bc166b | |
|
ff285fbbda | |
|
ccfafeeec2 | |
|
e0067a4e1d | |
|
485a17af26 | |
|
c5b172a7df | |
|
b718b5634e | |
|
8f79c37b99 | |
|
aa5f25231a | |
|
f3049016d6 | |
|
16e11d447c | |
|
199a5e8b38 | |
|
59b095b2d5 | |
|
c59ec77d9c | |
|
3e5da64571 | |
|
1c576d72d1 | |
|
ea42f66b54 | |
|
2ae9576cd8 | |
|
a462de6f2d | |
|
3bf48ab597 | |
|
2454dda18f | |
|
7498cbb5f4 | |
|
581782800d | |
|
069466218e | |
|
fd9e484b55 | |
|
406565f74d | |
|
6272cae8d4 | |
|
dc2332c980 | |
|
7be85a882b | |
|
b6df83a0e9 | |
|
d62fb655eb | |
|
a9778e4001 | |
|
580165f2f4 | |
|
0f3041724b | |
|
1d08ee6d01 | |
|
d4a5a3057c | |
|
452cd7db8a | |
|
2cc80d53ca | |
|
7eb0b1d249 | |
|
589b3f4201 | |
|
6d5d9731ed | |
|
25363ebd2e | |
|
b9c7e1b0c7 | |
|
ea9ea4a6d7 | |
|
76cd5519b3 | |
|
677a6fc113 | |
|
99199905b6 | |
|
55b6cba31e | |
|
17b976eb88 | |
|
7a8e615fa6 | |
|
335e8d10d4 | |
|
6431071b2a | |
|
8fdff8769d | |
|
66782d29d1 | |
|
cfbba9e0b3 | |
|
7aba290541 | |
|
da10422160 | |
|
9e2eff507e | |
|
71fc8b95dd | |
|
72c97d4672 | |
|
7b28c7a43f | |
|
cf9442f4d5 | |
|
85ddfc0f2d | |
|
56f736e7ca | |
|
63304f535c | |
|
2583706b35 | |
|
65a7853cf3 | |
|
69c9ecc5e3 | |
|
3be53540c1 | |
|
a44b6f7c2f | |
|
e65f3f84b9 | |
|
acc5af1fdb | |
|
91dda3020e | |
|
9f03484c4d | |
|
7904c27127 | |
|
22622e1c01 | |
|
f549de7c88 | |
|
beb6544bad | |
|
d01fdbf981 | |
|
badc30baae | |
|
4f36a03df2 | |
|
0a2187a73f | |
|
166f99b3d1 | |
|
0d9acb1cb0 | |
|
1ea0163b04 | |
|
3836f7d458 | |
|
ae3f6696a7 | |
|
a06a4f67cc | |
|
a69c8a8b44 | |
|
efad49ec5b | |
|
d772fe45c0 | |
|
6f91c2932d | |
|
d07a73cf70 | |
|
fcdddadec1 | |
|
9fcfb8d780 | |
|
37ce04ca9a | |
|
a109a8bf67 | |
|
b01771be1b | |
|
0e4095c947 | |
|
dae56baeba | |
|
9706803220 | |
|
8403d8a482 | |
|
59249a8c1e | |
|
a111819667 | |
|
4f576b6f36 | |
|
672c01f13a | |
|
f67ffeb70f | |
|
1b1e35d32d | |
|
9f5dfe8501 | |
|
11bd2e2f65 | |
|
ebfd490a1a | |
|
89bb124728 | |
|
63e34cf595 | |
|
92f372dcc8 | |
|
b00abd0e51 | |
|
52a015d927 | |
|
2c82b2aba9 | |
|
ff0f8dfaca | |
|
ace04af21a | |
|
70db20b07c | |
|
d2f3a79c09 | |
|
bedbbc3025 | |
|
6e55f6706f | |
|
d1b0608c88 | |
|
3bed3a64c3 | |
|
93e7d54c5e | |
|
9db84e8029 | |
|
ea21656624 | |
|
5a0d29c774 | |
|
13df3e70d5 | |
|
208a8e5d7a | |
|
ca937dff5e | |
|
c68fcf7e1c | |
|
48c3b333b2 | |
|
b71f6b6c67 | |
|
54cf648d74 | |
|
68d0327d41 | |
|
68a06093e9 | |
|
52aadb374b | |
|
dfd030a6aa | |
|
788e158d9f | |
|
81890a39d9 | |
|
ae170f2645 | |
|
e2e5191ded | |
|
dcbb7fa64f | |
|
32107d0ac3 | |
|
7bdebd47d1 | |
|
ac31bca181 | |
|
52070c00f9 | |
|
5ff0cc7905 | |
|
6ad1e3da38 | |
|
9bf6f557ed | |
|
50e1070004 | |
|
1c4c19b351 | |
|
199a70880c | |
|
b14b323068 | |
|
a3c7bec576 | |
|
ac34ca7cad | |
|
ade2c32adb | |
|
109e7d7b43 | |
|
1a655b7e39 | |
|
cda045f123 | |
|
7074ca7713 | |
|
8e91e215b3 | |
|
c751c36a8b | |
|
ad9d645782 | |
|
c96d4387c5 | |
|
5fdec8012d | |
|
609b91e848 | |
|
78eb784091 | |
|
973e4b5f44 | |
|
9197e6decb | |
|
f3b04f27e6 | |
|
889e920796 | |
|
1aab9f1f81 | |
|
5c697de58e | |
|
3066b1541e | |
|
32339cb41a | |
|
12e196a6f7 | |
|
8a87e5f390 | |
|
5958acebe1 | |
|
8d1c713a5a | |
|
32926747c6 | |
|
712f1a47a0 | |
|
51f3733487 | |
|
4bb580ae60 | |
|
05aee4a311 | |
|
fc98d66ffc | |
|
57d56c4791 | |
|
7e6e04b7e2 | |
|
12bee716c2 | |
|
6690bd4576 | |
|
9c8bd9b8ce | |
|
eea850450a | |
|
f7dfe57090 | |
|
9b960594aa | |
|
75642929e3 | |
|
eda283f059 | |
|
77401a94fb | |
|
75807f4a96 | |
|
94f0ef13ef | |
|
7579863068 | |
|
993bb47138 | |
|
8c392fda60 | |
|
45e97dd4c8 | |
|
01ea706644 | |
|
c1ea8552ac | |
|
6601dea8cc | |
|
4d11c5c89c | |
|
29418e9655 | |
|
8fd5c67f2a | |
|
62e0889bf5 | |
|
26690b061b | |
|
98b7d78476 | |
|
35c40e825a | |
|
753e991dae | |
|
54ecb0990f | |
|
5f470d6122 | |
|
d5ba26cfaf | |
|
cb5e2d48e2 | |
|
a6d1053c50 | |
|
3dc1f66ff6 | |
|
091afccb72 | |
|
cda3bcc1f6 | |
|
2d7359851f | |
|
db1e0a04f8 | |
|
972b723a5d | |
|
74c215d5b2 | |
|
c646b435bf | |
|
0a939311fe | |
|
a7db6adc2e | |
|
c57567ab0d | |
|
3daee0caa9 | |
|
6ea64a7d2e | |
|
25cf8df367 | |
|
91d41ebf76 | |
|
c690e141e1 | |
|
2ed43c0758 | |
|
7a83a7288c | |
|
9930f25ad3 | |
|
5dd69b2295 | |
|
246d07021e | |
|
7ebcd6d734 | |
|
5a8fd42c0c | |
|
517c68f3ad | |
|
ea84505682 | |
|
5eaca18ee0 | |
|
e06d4b405d | |
|
cf67c790e5 | |
|
ec8679ad74 | |
|
9418f53244 | |
|
497174c687 | |
|
481f1b3d7e | |
|
776ffd2b1c | |
|
896259d9e4 | |
|
89e2e7fc54 | |
|
32f21dc06b | |
|
a0fb84f55b | |
|
fc6ccc306c | |
|
c2dd255e8a | |
|
7e421ba57b | |
|
0591cb09f6 | |
|
052ce65682 | |
|
52ac1053aa | |
|
dfc35253ea | |
|
8a5b9f4e8c | |
|
f89e11fc7d | |
|
fc73becd5f | |
|
223e9d999c | |
|
eb51033b18 | |
|
12883c3c90 | |
|
8ceaa27872 | |
|
97290fcb05 | |
|
44a3115539 | |
|
0772b4a0fa | |
|
15064d94cb | |
|
9a00c45923 | |
|
7cc9911565 | |
|
79b0db4449 | |
|
5aaa7f47dc | |
|
aa36abf36e | |
|
2014019b06 | |
|
75b7a8b56e | |
|
31392af427 | |
|
6540c415c1 | |
|
fbc12b1b07 | |
|
cda7a54718 | |
|
6f92c6b52d | |
|
441243f83b | |
|
cec2967071 | |
|
f95ea19b21 | |
|
eca048c0c5 | |
|
a2d40937a3 | |
|
31f2b01c3e | |
|
b226b678e9 | |
|
dd87d1142e | |
|
afac553ea2 | |
|
93c81fa4d1 | |
|
bfe3ea1f59 | |
|
56629b6b2e | |
|
bb723abc9d | |
|
7694419e71 | |
|
b078a06621 | |
|
05b67c27d0 | |
|
8c66f066bd | |
|
959e423849 | |
|
7b196b1b97 | |
|
fe0695fb7b | |
|
dae8e59d26 | |
|
aba238e8b1 | |
|
d3192bb8c2 | |
|
6cd18576aa | |
|
daa6a5c80a | |
|
201f86e482 | |
|
d4ac8972ac | |
|
b4a1cc8f22 | |
|
69b85aa7e5 | |
|
3a4794e9d1 | |
|
6be96a96aa | |
|
d704b153ca | |
|
20d91f5e06 | |
|
6c23c79f2a | |
|
f5b8b9a14f | |
|
dc78994dcf | |
|
269a04ba1a | |
|
569df45d18 | |
|
f53f4df583 | |
|
d04fe366ab | |
|
c83fe5aaa7 | |
|
41f81eb701 | |
|
05fdc9dd60 | |
|
1323981cc4 | |
|
882032e3a3 | |
|
a6257ae615 | |
|
973c068e96 | |
|
d7317c3710 | |
|
87eb9c5772 | |
|
ecb22dda1a | |
|
6f15d47012 | |
|
802af306ac | |
|
e4e368923d | |
|
342aec648b | |
|
55253c8469 | |
|
4b72d3ba99 | |
|
61296bbdfc | |
|
36f466fff8 | |
|
26146097eb | |
|
fcd8b8eb78 | |
|
3e83764b5b | |
|
3a6fbabaf8 | |
|
85ad23a1e9 | |
|
15525c2b46 | |
|
76736a5441 | |
|
4c2e776e01 | |
|
1e748f11ef | |
|
3fcad16298 | |
|
2d25d1f048 | |
|
e54d928405 | |
|
c99381216d | |
|
db2e2ed78f | |
|
3bc54e308f | |
|
8c9c165e0a | |
|
7bd8019876 | |
|
8122e6c86f | |
|
7e87dc52eb | |
|
2c366d7349 | |
|
9acbfacd4c | |
|
316ead577d | |
|
4b6d3fe138 | |
|
0dec2b9c89 | |
|
acc86ae6db | |
|
730906a072 | |
|
e5cefeb44b | |
|
7142a6a7ca | |
|
dff8abd6ad | |
|
b180602a3e | |
|
95b9dacb7a | |
|
df868cec35 | |
|
68a196218b | |
|
84cd1e0059 | |
|
86b4386522 | |
|
5bb93ccc5f | |
|
3028a8b1f8 | |
|
6126c4f438 | |
|
41bb0445e0 | |
|
97627a4976 | |
|
1b2fce430f | |
|
8cd2354d73 | |
|
9c28d7086e | |
|
a4bd51a01b | |
|
b67d020e23 | |
|
85a1b858b4 | |
|
47bf45f30e | |
|
b96e2c314a | |
|
f96d6a04b6 | |
|
acc6249d88 | |
|
82174d01c5 | |
|
0b678c97f4 | |
|
d0d1554d74 | |
|
4122c482ba | |
|
b5cdf14036 | |
|
3ce8bfa012 | |
|
bf9ca4a4a8 | |
|
17a4fe4b2f | |
|
0dc24bd475 | |
|
b3400f0d9c | |
|
2bad692703 | |
|
cd3e9b1b2a | |
|
e01220af14 | |
|
bfc0220a47 | |
|
139b8ba0f4 | |
|
71b2f24a2e | |
|
ffd707db62 | |
|
fefb0de51f | |
|
59f34c94b0 | |
|
ebf53e32bd | |
|
9ce52033f0 | |
|
9876f200c1 | |
|
81b8cd5461 | |
|
731eb91a58 | |
|
49ca743e6a | |
|
a36d4b1dc6 | |
|
33df4f9927 | |
|
72a9af21ac | |
|
1a10514cad | |
|
5d9b7c72b3 | |
|
efddd43760 | |
|
1606b3a9c3 | |
|
8b5b1c214b | |
|
9780263cfa | |
|
e1e3afb495 | |
|
f9eb880404 | |
|
a3bbbeda9d | |
|
3ad7844fdf | |
|
b71c61e23f | |
|
9650b32786 | |
|
433697cc4f | |
|
d622b4157c | |
|
1add591b2c | |
|
60440bc6b7 | |
|
4003729231 | |
|
934b32c342 | |
|
97bb3b48da | |
|
da618e1d38 | |
|
23c03a0905 | |
|
07c8ed8a3a | |
|
bcf2a9868d | |
|
c09c3925a4 | |
|
92ce1b3304 | |
|
0fc06a98d4 | |
|
4ba99494f0 | |
|
a8e1796a8b | |
|
5ced05aab0 | |
|
4a6339ffc2 | |
|
efa4089920 | |
|
35cc37ddc1 | |
|
5ea4be1d4b | |
|
0c5b5a5aea | |
|
4027d683e9 | |
|
7afc9301ac | |
|
12c6d58c2a | |
|
c5db7295e6 | |
|
02c3ea1743 | |
|
63f0567418 | |
|
3e17e52555 | |
|
65dca16dc0 | |
|
e742d18a6c | |
|
7e29c36a24 | |
|
4d2b5c8f86 | |
|
fe932a96a9 | |
|
c1b7063e3c | |
|
42d2f9e461 | |
|
31fc2d73ce | |
|
1346c33f04 | |
|
cee6321a9f | |
|
1abed2ad9e | |
|
5bd6fa3cbf | |
|
a82911d8a9 | |
|
dc88364253 | |
|
4c51a68691 | |
|
42d3537516 | |
|
3fd394d693 | |
|
a7a08aced9 | |
|
1d83fdb510 | |
|
924fcca463 | |
|
26f497e2bb | |
|
e37e118a7e | |
|
b2bb7f4923 | |
|
97b03bbfbb | |
|
d690ad2bab | |
|
0f082ed9d4 | |
|
2851a0ecc5 | |
|
340045af77 | |
|
c1988c4d8d | |
|
6a0c36922e | |
|
459cbfdbad | |
|
fc17187ff4 | |
|
a7d78a3f40 | |
|
7ce3f10e73 | |
|
bfc6014ad3 | |
|
a5eed8fc1e | |
|
cdec4782f0 | |
|
f30a48b82c | |
|
98de22a740 | |
|
efbb8e86d4 | |
|
b6521498f4 | |
|
06f1b94147 | |
|
ffb57f0256 | |
|
ed1f64cf43 | |
|
bf8ea33697 | |
|
bc17308de7 | |
|
1ece704d6e | |
|
dea1c1c2d6 | |
|
3300a240c6 | |
|
50ef4efccb | |
|
51f2461e8b | |
|
444768d30f | |
|
0d0675ac7e | |
|
24b384f3ef | |
|
93330954c2 | |
|
edf721f755 | |
|
530b2731ba | |
|
14104185d2 | |
|
3019c35e30 | |
|
4d74bc29b4 | |
|
3638ae8d3e | |
|
c5dd67e63c | |
|
0663880a6d | |
|
3bed142d15 | |
|
9fcc6f9c44 | |
|
7aef31701b | |
|
135627e142 | |
|
5216a6b732 | |
|
2a797d32dc | |
|
35a16ded2d | |
|
44f50e3d0e | |
|
96b871c4d7 | |
|
d2aad74dfc | |
|
50209752c3 | |
|
5ab4e5493e | |
|
e252f70253 | |
|
98438e29ef | |
|
d649a7d1fa | |
|
2669ced629 | |
|
f2c0987a04 | |
|
bb84715bf0 | |
|
0bdb7261d1 | |
|
12857a258b | |
|
46808fbb89 | |
|
6ca8334253 | |
|
a3844f9922 | |
|
58b36db2e5 | |
|
a33f58a61a | |
|
a4392696a1 | |
|
d5844ce8ff | |
|
bf88b40a50 | |
|
e4a0d4ecea | |
|
cca3417c57 | |
|
031d7967de | |
|
2e67e98b4d | |
|
7124a131dd | |
|
9052ed5ddf | |
|
7ec21c7f3b | |
|
309ae240cf | |
|
382a619a03 | |
|
7f3f6f871a | |
|
6ea04f850d | |
|
3d5695f40a | |
|
5affad942f | |
|
eb9ab20646 | |
|
f3bab826f6 | |
|
2b9ca5f805 | |
|
25a75e5bec | |
|
702ae29a2c | |
|
ac1f37a2c2 | |
|
344d2eeb9e | |
|
9133103f8f | |
|
166d14af69 | |
|
696c6f8897 | |
|
be21f9829e | |
|
5a0673d66f | |
|
6cacd7d18b | |
|
5b08e9cba3 | |
|
d3f5ff1b4f | |
|
e45bc4c619 | |
|
baee86a2d6 | |
|
86d09d9305 | |
|
9ace053aaf | |
|
69707786fc | |
|
096e87cd3b | |
|
5017c541db | |
|
3ea6554ab0 | |
|
f0b17cb8f7 | |
|
5ca45362c8 | |
|
1f2081911f | |
|
a7d02ecec8 | |
|
11ba706797 | |
|
50ad7370c7 | |
|
0616cbd1f1 | |
|
af92602027 | |
|
d8bf45b02d | |
|
07ab853d3d | |
|
414866fc6b | |
|
bc7fe6114d | |
|
8d592886fa | |
|
69ea296a9b | |
|
03821fdf6f | |
|
1aa9ab03da | |
|
1d83b43efe | |
|
6986be1b21 | |
|
92c50aa6a7 | |
|
eac79c5cdd | |
|
7aec238f5f | |
|
be3dc69290 | |
|
6100bd19c7 | |
|
d57bc6c6d9 | |
|
58b42d629f | |
|
36a81cb2de | |
|
ae0f3118f4 | |
|
727c7ce2b1 | |
|
a39c980266 | |
|
00be100e71 | |
|
9217610734 | |
|
31af7a2c99 | |
|
34fac364fd | |
|
dcdfd2577a | |
|
6733dc57af | |
|
05c4b6afb9 | |
|
4b22325ffc | |
|
9d16299f60 | |
|
ab1f15506d | |
|
0db5451e47 | |
|
61218f30f5 | |
|
fcfc0f31f0 | |
|
69074f4fa5 | |
|
fe4fb37b58 | |
|
7cfd431a2b | |
|
61e20a86cc | |
|
d9b73e1d08 | |
|
4833d56ecb | |
|
090d1ba524 | |
|
afc45a8e16 | |
|
844626f6dc | |
|
470079665f | |
|
0cd87d9e54 | |
|
09711750bf | |
|
71ca4c8e1f | |
|
9811dcf5f3 | |
|
da659cf607 | |
|
37e0ec7b7d | |
|
045b76bab5 | |
|
c8c641a038 | |
|
6a1bb13feb | |
|
75591dd7e9 | |
|
d792fed099 | |
|
d66fb49077 | |
|
78c7c8524c | |
|
a746258f99 | |
|
5adb234a24 | |
|
2778ee1401 | |
|
e0ca5d5200 | |
|
b3d1b1aa63 | |
|
5ec1a72a3d | |
|
a342f7d2d4 | |
|
2c76cee928 | |
|
b5f2ff854c | |
|
3efb0b5884 | |
|
009bbe456e | |
|
daf7b3f4a5 | |
|
b0a6dd46e4 | |
|
1c5141f4c6 | |
|
4cdd2271b0 | |
|
89095d4e9f | |
|
04c0d77595 | |
|
d1b07c625f | |
|
a5bb33b0ff | |
|
8e1ceca43d | |
|
c85e7790de | |
|
2399c618b6 | |
|
7ec88f8cac | |
|
eacd44dd65 | |
|
e5e70a6011 | |
|
7da5c2b238 | |
|
1ee49df31d | |
|
f2df32a673 | |
|
125e31dbf3 | |
|
715e693564 | |
|
43717c92d9 | |
|
f370685c62 | |
|
4300470786 | |
|
b89fd9652c | |
|
51f4afbd88 | |
|
7ef8111381 | |
|
35b097469b | |
|
94290c7d8b | |
|
73379d3627 | |
|
23835f2c08 | |
|
d2aee00a56 | |
|
cf6e44cb9c | |
|
a146ad9e69 | |
|
70ad1a1860 | |
|
f3ef73ef41 | |
|
a9832dc0cb | |
|
9be245e955 | |
|
800773e585 | |
|
8d1eb81f16 | |
|
963e5bdd62 | |
|
55de9abc41 | |
|
593db0ed0d | |
|
06622105cd | |
|
008ae47e14 | |
|
81585d9e6e | |
|
f6b7057b0d | |
|
76f920a16b | |
|
f232d6d4ee | |
|
b7e1443618 | |
|
5d021ffb85 | |
|
28fd795280 | |
|
c944db5f02 | |
|
967e28b7ac | |
|
2a158aea2c | |
|
88870fdda7 | |
|
326f153a47 | |
|
f5cd63ad35 | |
|
1e96ca32df | |
|
c088963cf2 | |
|
79fcbcc281 | |
|
ddbba76095 | |
|
0a959c1c74 | |
|
e348968113 | |
|
7bbe86d6fb | |
|
7b9db86753 | |
|
20a396270e | |
|
81516c5204 | |
|
d6fb6fe3ae | |
|
8476d8d056 | |
|
36868bb86e | |
|
29b6b3e54f | |
|
8a01c9e42b | |
|
2c4daf08e0 | |
|
7daab6329d | |
|
bb6452b969 | |
|
25bfe6f035 | |
|
32b36aa042 | |
|
e7de5404d3 | |
|
18dc8b08e4 | |
|
5bf3cb8e4b | |
|
c7d5db5f90 | |
|
1bf1965a8b | |
|
051a8729b6 | |
|
8e85ed92c8 | |
|
2a9042b1b1 | |
|
344a634cb6 | |
|
508de6182a | |
|
40000345a1 | |
|
220d38b4a9 | |
|
888438ca25 | |
|
d84bcf77c0 | |
|
0474d66531 | |
|
f218b804b4 | |
|
7b14f498a8 | |
|
18e4352faf | |
|
a6e921548b | |
|
3f5dec82ed | |
|
db0b59abaa | |
|
f5bcd1d91c | |
|
db11c3c0f8 | |
|
df6071ae9e | |
|
cc1694760c | |
|
4d8b22dd8f | |
|
fd296a557e | |
|
0de2f863bd | |
|
de93da202b | |
|
5c459f21be | |
|
5915cf3acf | |
|
997bf31bd4 | |
|
f3427bb13b | |
|
6fa266e3e0 | |
|
019a6432fb | |
|
209e1085ae | |
|
0ef75e6aa6 | |
|
243d0329f6 | |
|
a0ce9ecc0d | |
|
af9c30c3f5 | |
|
ebbfa47baf | |
|
02fbc0a0ed | |
|
4729e4c6bc | |
|
a44b8e3e22 | |
|
8a89303cb3 | |
|
e547b307f6 | |
|
72ec9b1e10 | |
|
40c70ae6d8 | |
|
d3fefdeaff | |
|
8be005212f | |
|
5a2795e76b | |
|
a987f0ab81 | |
|
d99b40317d | |
|
9ae519f6fa | |
|
8f3fe8e542 | |
|
490d85aba5 | |
|
ba2e1e04cd | |
|
5d4929db9c | |
|
c41400ae18 | |
|
e71bd2cb1e | |
|
be24473fb4 | |
|
b524ea5c22 | |
|
d46945cb09 | |
|
1d4fc6f327 | |
|
5976acbe76 | |
|
11ecf9cb09 | |
|
2dac531729 | |
|
1fadf58ab7 | |
|
ceca0d9fb7 | |
|
df16726211 | |
|
fb4f1732b6 | |
|
d5b357b69a | |
|
610fb5f7c6 | |
|
2b231ba631 | |
|
286228c290 | |
|
a1a24da7b6 | |
|
553d0557b6 | |
|
2f7b272d8c | |
|
dc1edeecda | |
|
4ca7817735 | |
|
5b63585398 | |
|
0000d9a314 | |
|
f7ec66362e | |
|
b7ef0596b9 | |
|
143e86a80c | |
|
956c7d3435 | |
|
330d16262e | |
|
c7f57b940c | |
|
27bd3c07af | |
|
55dc27a197 | |
|
a11f20fac2 | |
|
daebb78755 | |
|
90a395a069 | |
|
23d0353934 | |
|
ede67ed184 | |
|
811d21e111 | |
|
54567d33da | |
|
61ca5f7e19 | |
|
7396624be0 | |
|
25b90afbdb | |
|
72dfeb2b4e | |
|
6b34c9e866 | |
|
e7ec01b8e6 | |
|
fce7055c62 | |
|
bf7d5e9a71 | |
|
2a866dde65 | |
|
220981e718 | |
|
8537a4091b | |
|
71a11a23bd | |
|
fa368b1263 | |
|
e6dd1458f8 | |
|
9486d993ce | |
|
30994dac10 | |
|
8a61211c8c | |
|
c43f7eb656 | |
|
d05caa4b02 | |
|
63e9af002d | |
|
5144299f4f | |
|
c437f9370a | |
|
94f81587ab | |
|
2bc25e3593 | |
|
1d9ab7b0de | |
|
4c96a4878e | |
|
8cd56cb6d3 | |
|
c246dcef6f | |
|
26d6e10ad7 | |
|
3924c66bd0 | |
|
2fbfe583dd | |
|
525f805cdb | |
|
b65c02336d | |
|
d3abfce540 | |
|
49433ea87d | |
|
31b0d8cee8 | |
|
35871d0213 | |
|
4877af9bc3 | |
|
909e068121 | |
|
cf835b97ca | |
|
30bce42c0b | |
|
48ff4859e6 | |
|
887583d27f | |
|
45b97bf6c3 | |
|
91397b85a4 | |
|
47f81b31af | |
|
30c452cfd0 | |
|
fda1c5b554 | |
|
d6c9834a9a | |
|
41b0c11aaa | |
|
cc67d23eee | |
|
4818af1445 | |
|
2cf1742999 | |
|
25ac6e6665 | |
|
90754f979b | |
|
c0d490ed63 | |
|
7c6d12d982 | |
|
fd8c05e024 | |
|
5d65c86c84 | |
|
cf11e8d7d8 | |
|
ed868f6246 | |
|
5d371ad80e | |
|
6897aed6b6 | |
|
a61a11f86b | |
|
286f620f8e | |
|
b7e60b9653 | |
|
df42e7acc4 | |
|
e492e9ca0c | |
|
44c6f6dfda | |
|
ad2100fe3f | |
|
ae64ac79a6 | |
|
20663dfa1c | |
|
70f2241d22 | |
|
b3fcc25e21 | |
|
4f15ce346b | |
|
445849337f | |
|
3fd7107e08 | |
|
73a02d54b7 | |
|
b734af6dd0 | |
|
f7c0ee930a | |
|
ead426abc4 | |
|
bcd6bbb7ca | |
|
80929d080f | |
|
eed47b3733 | |
|
d5f0c59b57 | |
|
d11dc787a1 | |
|
1e81feee46 | |
|
40a9761943 | |
|
256bcf36d3 | |
|
9944277096 | |
|
f9dc5637fa | |
|
addedc20f1 | |
|
1fa6e8d9ba | |
|
2a06dc997f | |
|
6b93eedcda | |
|
a786df65de | |
|
8f2823d5f0 | |
|
58fe220fde | |
|
161448c31a | |
|
1c685189d1 | |
|
ceac3f2ee4 | |
|
a07367fae2 | |
|
006190d227 | |
|
412197019e | |
|
271e378ce3 | |
|
8e07fda88f | |
|
a4935b8fa8 | |
|
2b76baeb10 | |
|
2dfa8976a0 | |
|
d3402f715b | |
|
f070f9a984 | |
|
416270ee6c | |
|
14bee778ec | |
|
10c1944de5 | |
|
7958d8ad4f | |
|
50c5dc255c | |
|
31735f26d3 | |
|
2ef6460853 | |
|
5e98a30537 | |
|
dd03ef42ac | |
|
59884d251e | |
|
e06e257a81 | |
|
6e574835c8 | |
|
49ccfdd673 | |
|
3a434f312b | |
|
bb4dc448b3 | |
|
9846396df2 | |
|
f0d417ce42 | |
|
55fc4114b4 | |
|
97b074365b | |
|
f79c3617d6 | |
|
861fe791eb | |
|
60052ff73a | |
|
4d2708cd42 | |
|
d1cc52dff5 | |
|
4fa901dbcb | |
|
f2c488c1e0 | |
|
4a9c16d298 | |
|
b9d5b904f4 | |
|
0aef762d9a | |
|
c724117c1a | |
|
cc3bb85c66 | |
|
20817313b1 | |
|
23d0b8a7ac | |
|
087a34f061 | |
|
653f5c824b | |
|
f9217570ab | |
|
7f224f0342 | |
|
75a5f3795a | |
|
de9f215c83 | |
|
848e345364 | |
|
38b190e598 | |
|
3a9bc8058f | |
|
739a231afc | |
|
7dfa4c3cde | |
|
7b653fe4f4 | |
|
77a687bced | |
|
d5c1cdd91d | |
|
46d3fe88ca | |
|
5c8c5d8fbf | |
|
71412310c4 | |
|
0c323fdc0b | |
|
02f53d0c13 | |
|
8792c97de6 | |
|
980815d075 | |
|
4cedfedc21 | |
|
fe3d0c6fdd | |
|
9200e8da57 | |
|
430d065da6 | |
|
ecd93cb05a | |
|
4facd161a9 | |
|
c5447fda06 | |
|
0447612b34 | |
|
b5499b8225 | |
|
00aabddfe8 | |
|
43fb720877 | |
|
9626dbd7ac | |
|
f286c79a03 | |
|
accb0eee6c | |
|
e97dd1cbdb | |
|
34fb497eb4 | |
|
6669ba6590 | |
|
cb8099bb8c | |
|
80a1a58bfc | |
|
d60f222bb7 | |
|
2c2e43d8ac | |
|
212b3d620d | |
|
92090b01b8 | |
|
9073fbc317 | |
|
f55f56a29f | |
|
28e025d02e | |
|
e558e5837e | |
|
a0b415095a | |
|
6df181c233 | |
|
7acc4e3208 | |
|
10ea242143 | |
|
eda6ecd529 | |
|
cf5b0bf9c6 | |
|
b9dba48306 | |
|
4d2e23b5ce | |
|
973bf87e67 | |
|
5861839783 | |
|
06845e5504 | |
|
43bdd4d022 | |
|
bafd2cb44f | |
|
be8fd32e7d | |
|
ee8c00684b | |
|
7379dc03af | |
|
a602c47d47 | |
|
317610e00a | |
|
c4af706d51 | |
|
665bb183f7 | |
|
f6ba95a6c7 | |
|
e2cd8c4aef | |
|
c8bff81220 | |
|
2aec1c5f1d | |
|
bec32956a8 | |
|
91fdc7c5c7 | |
|
b59ed74bc1 | |
|
16012f6f02 | |
|
2b61672723 | |
|
176b230a46 | |
|
7fa9dbf869 | |
|
87ed9abefa | |
|
2548aae73d | |
|
1cfa04927d | |
|
e34ea94f9f | |
|
1510383738 | |
|
016b669d63 | |
|
682a0191ef | |
|
9e36dbe47f | |
|
8bef67642e | |
|
52febac6ae | |
|
f202699c25 | |
|
0fb07670d2 | |
|
73d2e7716f | |
|
999ae5a1c6 | |
|
23ba0e5e69 | |
|
941a2196b3 | |
|
0cf4e07b84 | |
|
7bec989eed | |
|
6856ca207f | |
|
2e5616850c | |
|
a83bd9c608 | |
|
9651ca84bf | |
|
109b35f6eb | |
|
e28c1748fc | |
|
72889b4d1f | |
|
ae001c3dd7 | |
|
2309e7ab05 | |
|
46c51b55f7 | |
|
a9185e7d6f | |
|
3a0987e0be | |
|
d280a592b1 | |
|
ef5829a6b7 | |
|
30bcfdcc83 | |
|
1a291939c3 | |
|
69e501764a | |
|
7f3f7f0372 | |
|
1cbf45b4c4 | |
|
227a80469e | |
|
dc8072c6db | |
|
808dbb12e6 | |
|
44e21b1de9 | |
|
b3058b8c78 | |
|
db564d7977 | |
|
e6a3e8b65a | |
|
d43ba47ebe | |
|
168c9863cb | |
|
0fb31586fd | |
|
8b609f531b | |
|
d502274eb9 | |
|
b1419c850d | |
|
aa7f24b6db | |
|
319e68c855 | |
|
64f920d7e5 | |
|
3b79743c7b | |
|
54008a1976 | |
|
b96b7a8b9c | |
|
0fca1b3e1a | |
|
2386270cad | |
|
5b135fad61 | |
|
abb6854e74 | |
|
22f9b2552c | |
|
57f2478dc7 | |
|
5dc9a61ec4 | |
|
b0d3d9bb01 | |
|
caecbaa231 | |
|
a20a8d95d5 | |
|
ba93f96c71 | |
|
804e9afdde | |
|
89bcaed15e | |
|
bb2f8e4304 | |
|
8ab8268edc | |
|
bbcc55b24c | |
|
9fa9c27e4d | |
|
d9b4c4a413 | |
|
84cab1327d | |
|
df4cec930b | |
|
ab08dc582d | |
|
f79d9865a0 | |
|
00378c330c | |
|
180b97b180 | |
|
f0b3a4d5c0 | |
|
e2e66324cc | |
|
d950c78b81 | |
|
7dbcbfdcd5 | |
|
279c899de5 | |
|
db5aacdb9c | |
|
c7b84ab500 | |
|
9967adb371 | |
|
30ff793a22 | |
|
666587991a | |
|
01005e40a8 | |
|
d81e629c29 | |
|
2766fad719 | |
|
ae71168216 | |
|
a0c238daa7 | |
|
7cbdc6a246 | |
|
2ff8be71aa | |
|
ddffaa952d | |
|
5520e9ef21 | |
|
958e542f7d | |
|
927bbc7258 | |
|
45bef0cea9 | |
|
a3d46f713e | |
|
5684120c11 | |
|
ddb195ed2c | |
|
6747831677 | |
|
9326379b04 | |
|
09d9a7ea2b | |
|
45871d5846 | |
|
bf7a49c19b | |
|
0a7fce087c | |
|
d3130ca04c | |
|
e30a3c5b54 | |
|
2393965e83 | |
|
fb39da19f4 | |
|
a27431c34f | |
|
070b9f3dc1 | |
|
f2dba44169 | |
|
0ef5da0881 | |
|
0580b204a3 | |
|
6ce699ae1f | |
|
3aa72abacf | |
|
04004525c1 | |
|
a7f0adf1cf | |
|
cef511092d | |
|
4e5df973a9 | |
|
6a1a62d8c0 | |
|
e0491cf2e7 | |
|
90bc9b9730 | |
|
f449672c68 | |
|
fd22f45178 | |
|
37f634a2ed | |
|
dfee9dd97e | |
|
2a99f7a4d7 | |
|
b44e2d9ed9 | |
|
795d4d76f4 | |
|
c26acb1fa8 | |
|
11b6699a54 | |
|
f9bdd643cf | |
|
2baea21c7d | |
|
bea0111753 | |
|
c870665be0 | |
|
4ff1090284 | |
|
f22461a844 | |
|
458c7211ee | |
|
5cc4b19a7c | |
|
f5236f658b | |
|
a360b66cc0 | |
|
4bcb791161 | |
|
4c7c78c815 | |
|
019867b413 | |
|
f356fb0a68 | |
|
756249ff70 | |
|
419ebebe72 | |
|
a229996ebe | |
|
af01e89612 | |
|
609034c634 | |
|
95dd0e6bd6 | |
|
479ad1bb15 | |
|
d506235a8b | |
|
7846446a44 | |
|
214f864dcf | |
|
4c0f2099aa | |
|
aea7bec2c3 | |
|
47777e4192 | |
|
f6888057c3 | |
|
f65f56ec75 | |
|
5d39b04552 | |
|
735fbc6259 | |
|
fcd7e0f3f3 | |
|
9106d13dfe | |
|
d3caad6e11 | |
|
f87a2a810a | |
|
208e2e9e97 | |
|
90cc6eb317 | |
|
b118becc84 | |
|
7442d68ecf | |
|
076c167d6e | |
|
64d8cd448f | |
|
ec6a28a8b1 | |
|
cc15d02488 | |
|
d5bc43e8dd | |
|
287a2c8396 | |
|
453ebdfe30 | |
|
2b1fb90e03 | |
|
695ba5288d | |
|
d6c32bba86 | |
|
fa89207583 | |
|
557562e25c | |
|
c6efa2641b | |
|
8a7e391b4e | |
|
aec48a1dd5 | |
|
87f301500d | |
|
566a54ffb6 | |
|
f9c4b3cc96 | |
|
a12e6800ff | |
|
cc68501c7a | |
|
7ebf8a8dc0 | |
|
4475823e48 | |
|
3713288b48 | |
|
4fdfb81876 | |
|
f32b4d37cb | |
|
2063b9d8bb | |
|
fe14605034 | |
|
68b32208de | |
|
f1fe369bbf | |
|
16b2937d23 | |
|
bfad676b7c | |
|
c617a06905 | |
|
ff74f4302a | |
|
21153a0e1e | |
|
b6f344f34a | |
|
ecdc747ced | |
|
5147cd7be0 | |
|
3dcb72d429 | |
|
fbee33b00d | |
|
3991d8f911 | |
|
7b2e8f1ba5 | |
|
cbcbb2b243 | |
|
cd3bfb1ea4 | |
|
82b718d5a3 | |
|
05a1a4e3d8 | |
|
412138a75b | |
|
c1b63f4757 | |
|
5d774bef90 | |
|
de77c7d209 | |
|
ce1eb11b59 | |
|
b629ce177d | |
|
73fa320917 | |
|
dd05ed1371 | |
|
2a641ab8b4 | |
|
f8f7ca350c | |
|
88b4ccc768 | |
|
eb2bad5138 | |
|
f768576060 | |
|
add0e92335 | |
|
1eb7e109e6 | |
|
725909a94c | |
|
050aa7594c | |
|
450009ff9c | |
|
b2d5892010 | |
|
5a3b465ac0 | |
|
be7afdaa89 | |
|
1c561207f5 | |
|
ed2c962bb9 | |
|
147ceca016 | |
|
03a7940f83 | |
|
dd2a9f74f1 | |
|
49c720af3c | |
|
c620517543 | |
|
a425c29ef1 | |
|
783914c7fe | |
|
920a394539 | |
|
e977597cd0 | |
|
7a33ba64f1 | |
|
191b94b67c | |
|
4ad7b073c3 | |
|
d92ff9c7a0 |
|
@ -3,9 +3,8 @@ name: CI
|
|||
|
||||
on:
|
||||
# Triggers the workflow on push or pull request events but only for the master branch
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
|
@ -14,19 +13,49 @@ on:
|
|||
|
||||
jobs:
|
||||
|
||||
# test that we can generate a software distribution and install it
|
||||
# thus avoid missing file issues after packaging.
|
||||
sdist-linux:
|
||||
name: 'sdist'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Build sdist
|
||||
run: python setup.py sdist --formats=zip
|
||||
|
||||
- name: Install sdist from .zips
|
||||
run: python -m pip install dist/*.zip
|
||||
|
||||
testing:
|
||||
name: 'install + test-suite'
|
||||
timeout-minutes: 10
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# elastic only
|
||||
# - name: Build DB container
|
||||
# run: docker build -t piker:elastic dockering/elastic
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
# elastic only
|
||||
# - name: Install dependencies
|
||||
# run: pip install -U .[es] -r requirements-test.txt -r requirements.txt --upgrade-strategy eager
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install -U . -r requirements-test.txt -r requirements.txt --upgrade-strategy eager
|
||||
|
||||
|
|
128
README.rst
128
README.rst
|
@ -13,13 +13,14 @@ computational trading targeted at `hardcore Linux users <comp_trader>`_ .
|
|||
we use as much bleeding edge tech as possible including (but not limited to):
|
||||
|
||||
- latest python for glue_
|
||||
- trio_ for `structured concurrency`_
|
||||
- tractor_ for distributed, multi-core, real-time streaming
|
||||
- marketstore_ for historical and real-time tick data persistence and sharing
|
||||
- techtonicdb_ for L2 book storage
|
||||
- trio_ & tractor_ for our distributed, multi-core, real-time streaming
|
||||
`structured concurrency`_ runtime B)
|
||||
- Qt_ for pristine high performance UIs
|
||||
- pyqtgraph_ for real-time charting
|
||||
- ``numpy`` and ``numba`` for `fast numerics`_
|
||||
- ``polars`` ``numpy`` and ``numba`` for `fast numerics`_
|
||||
- `apache arrow and parquet`_ for time series history management
|
||||
persistence and sharing
|
||||
- (prototyped) techtonicdb_ for L2 book storage
|
||||
|
||||
.. |travis| image:: https://img.shields.io/travis/pikers/piker/master.svg
|
||||
:target: https://travis-ci.org/pikers/piker
|
||||
|
@ -31,6 +32,7 @@ we use as much bleeding edge tech as possible including (but not limited to):
|
|||
.. _Qt: https://www.qt.io/
|
||||
.. _pyqtgraph: https://github.com/pyqtgraph/pyqtgraph
|
||||
.. _glue: https://numpy.org/doc/stable/user/c-info.python-as-glue.html#using-python-as-glue
|
||||
.. _apache arrow and parquet: https://arrow.apache.org/faq/
|
||||
.. _fast numerics: https://zerowithdot.com/python-numpy-and-pandas-performance/
|
||||
.. _comp_trader: https://jfaleiro.wordpress.com/2019/10/09/computational-trader/
|
||||
|
||||
|
@ -58,8 +60,33 @@ building the best looking, most reliable, keyboard friendly trading
|
|||
platform is the dream; join the cause.
|
||||
|
||||
|
||||
install
|
||||
*******
|
||||
sane install with `poetry`
|
||||
**************************
|
||||
TODO!
|
||||
|
||||
|
||||
rigorous install on ``nixos`` using ``poetry2nix``
|
||||
**************************************************
|
||||
TODO!
|
||||
|
||||
|
||||
hacky install on nixos
|
||||
**********************
|
||||
`NixOS` is our core devs' distro of choice for which we offer
|
||||
a stringently defined development shell envoirment that can be loaded with::
|
||||
|
||||
nix-shell develop.nix
|
||||
|
||||
this will setup the required python environment to run piker, make sure to
|
||||
run::
|
||||
|
||||
pip install -r requirements.txt -e .
|
||||
|
||||
once after loading the shell
|
||||
|
||||
|
||||
install wild-west style via `pip`
|
||||
*********************************
|
||||
``piker`` is currently under heavy pre-alpha development and as such
|
||||
should be cloned from this repo and hacked on directly.
|
||||
|
||||
|
@ -72,93 +99,6 @@ for a development install::
|
|||
pip install -r requirements.txt -e .
|
||||
|
||||
|
||||
install for tinas
|
||||
*****************
|
||||
for windows peeps you can start by installing all the prerequisite software:
|
||||
|
||||
- install git with all default settings - https://git-scm.com/download/win
|
||||
- install anaconda all default settings - https://www.anaconda.com/products/individual
|
||||
- install microsoft build tools (check the box for Desktop development for C++, you might be able to uncheck some optional downloads) - https://visualstudio.microsoft.com/visual-cpp-build-tools/
|
||||
- install visual studio code default settings - https://code.visualstudio.com/download
|
||||
|
||||
|
||||
then, `crack a conda shell`_ and run the following commands::
|
||||
|
||||
mkdir code # create code directory
|
||||
cd code # change directory to code
|
||||
git clone https://github.com/pikers/piker.git # downloads piker installation package from github
|
||||
cd piker # change directory to piker
|
||||
|
||||
conda create -n pikonda # creates conda environment named pikonda
|
||||
conda activate pikonda # activates pikonda
|
||||
|
||||
conda install -c conda-forge python-levenshtein # in case it is not already installed
|
||||
conda install pip # may already be installed
|
||||
pip # will show if pip is installed
|
||||
|
||||
pip install -e . -r requirements.txt # install piker in editable mode
|
||||
|
||||
test Piker to see if it is working::
|
||||
|
||||
piker -b binance chart btcusdt.binance # formatting for loading a chart
|
||||
piker -b kraken -b binance chart xbtusdt.kraken
|
||||
piker -b kraken -b binance -b ib chart qqq.nasdaq.ib
|
||||
piker -b ib chart tsla.nasdaq.ib
|
||||
|
||||
potential error::
|
||||
|
||||
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\user\\AppData\\Roaming\\piker\\brokers.toml'
|
||||
|
||||
solution:
|
||||
|
||||
- navigate to file directory above (may be different on your machine, location should be listed in the error code)
|
||||
- copy and paste file from 'C:\\Users\\user\\code\\data/brokers.toml' or create a blank file using notepad at the location above
|
||||
|
||||
Visual Studio Code setup:
|
||||
|
||||
- now that piker is installed we can set up vscode as the default terminal for running piker and editing the code
|
||||
- open Visual Studio Code
|
||||
- file --> Add Folder to Workspace --> C:\Users\user\code\piker (adds piker directory where all piker files are located)
|
||||
- file --> Save Workspace As --> save it wherever you want and call it whatever you want, this is going to be your default workspace for running and editing piker code
|
||||
- ctrl + shift + p --> start typing Python: Select Interpetter --> when the option comes up select it --> Select at the workspace level --> select the one that shows ('pikonda')
|
||||
- change the default terminal to cmd.exe instead of powershell (default)
|
||||
- now when you create a new terminal VScode should automatically activate you conda env so that piker can be run as the first command after a new terminal is created
|
||||
|
||||
also, try out fancyzones as part of powertoyz for a decent tiling windows manager to manage all the cool new software you are going to be running.
|
||||
|
||||
.. _conda installed: https://
|
||||
.. _C++ build toolz: https://
|
||||
.. _crack a conda shell: https://
|
||||
.. _vscode: https://
|
||||
|
||||
.. link to the tina guide
|
||||
.. _setup a coolio tiled wm console: https://
|
||||
|
||||
provider support
|
||||
****************
|
||||
for live data feeds the in-progress set of supported brokers is:
|
||||
|
||||
- IB_ via ``ib_insync``, also see our `container docs`_
|
||||
- binance_ and kraken_ for crypto over their public websocket API
|
||||
- questrade_ (ish) which comes with effectively free L1
|
||||
|
||||
coming soon...
|
||||
|
||||
- webull_ via the reverse engineered public API
|
||||
- yahoo via yliveticker_
|
||||
|
||||
if you want your broker supported and they have an API let us know.
|
||||
|
||||
.. _IB: https://interactivebrokers.github.io/tws-api/index.html
|
||||
.. _container docs: https://github.com/pikers/piker/tree/master/dockering/ib
|
||||
.. _questrade: https://www.questrade.com/api/documentation
|
||||
.. _kraken: https://www.kraken.com/features/api#public-market-data
|
||||
.. _binance: https://github.com/pikers/piker/pull/182
|
||||
.. _webull: https://github.com/tedchou12/webull
|
||||
.. _yliveticker: https://github.com/yahoofinancelive/yliveticker
|
||||
.. _coinbase: https://docs.pro.coinbase.com/#websocket-feed
|
||||
|
||||
|
||||
check out our charts
|
||||
********************
|
||||
bet you weren't expecting this from the foss::
|
||||
|
|
|
@ -1,19 +1,52 @@
|
|||
[questrade]
|
||||
refresh_token = ""
|
||||
access_token = ""
|
||||
api_server = "https://api06.iq.questrade.com/"
|
||||
expires_in = 1800
|
||||
token_type = "Bearer"
|
||||
expires_at = 1616095326.355846
|
||||
################
|
||||
# ---- CEXY ----
|
||||
################
|
||||
[binance]
|
||||
accounts.paper = 'paper'
|
||||
|
||||
accounts.usdtm = 'futes'
|
||||
futes.use_testnet = false
|
||||
futes.api_key = ''
|
||||
futes.api_secret = ''
|
||||
|
||||
accounts.spot = 'spot'
|
||||
spot.use_testnet = false
|
||||
spot.api_key = ''
|
||||
spot.api_secret = ''
|
||||
|
||||
|
||||
[deribit]
|
||||
key_id = ''
|
||||
key_secret = ''
|
||||
|
||||
|
||||
[kraken]
|
||||
key_descr = "api_0"
|
||||
api_key = ""
|
||||
secret = ""
|
||||
key_descr = ''
|
||||
api_key = ''
|
||||
secret = ''
|
||||
|
||||
|
||||
[kucoin]
|
||||
key_id = ''
|
||||
key_secret = ''
|
||||
key_passphrase = ''
|
||||
|
||||
|
||||
################
|
||||
# -- BROKERZ ---
|
||||
################
|
||||
[questrade]
|
||||
refresh_token = ''
|
||||
access_token = ''
|
||||
api_server = 'https://api06.iq.questrade.com/'
|
||||
expires_in = 1800
|
||||
token_type = 'Bearer'
|
||||
expires_at = 1616095326.355846
|
||||
|
||||
|
||||
[ib]
|
||||
hosts = [
|
||||
"127.0.0.1",
|
||||
'127.0.0.1',
|
||||
]
|
||||
# XXX: the order in which ports will be scanned
|
||||
# (by the `brokerd` daemon-actor)
|
||||
|
@ -30,8 +63,8 @@ ports = [
|
|||
# is not supported so you have to manually download
|
||||
# and XML report and put it in a location that can be
|
||||
# accessed by the ``brokerd.ib`` backend code for parsing.
|
||||
flex_token = '666666666666666666666666'
|
||||
flex_trades_query_id = '666666' # live account
|
||||
flex_token = ''
|
||||
flex_trades_query_id = '' # live account
|
||||
|
||||
# when clients are being scanned this determines
|
||||
# which clients are preferred to be used for data
|
||||
|
@ -47,6 +80,6 @@ prefer_data_account = [
|
|||
# the order in which accounts will be selectable
|
||||
# in the order mode UI (if found via clients during
|
||||
# API-app scanning)when a new symbol is loaded.
|
||||
paper = "XX0000000"
|
||||
margin = "X0000000"
|
||||
ira = "X0000000"
|
||||
paper = 'XX0000000'
|
||||
margin = 'X0000000'
|
||||
ira = 'X0000000'
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
[network]
|
||||
tsdb.backend = 'marketstore'
|
||||
tsdb.host = 'localhost'
|
||||
tsdb.grpc_port = 5995
|
||||
|
||||
[ui]
|
||||
# set custom font + size which will scale entire UI
|
||||
# font_size = 16
|
||||
# font_name = 'Monospaced'
|
||||
|
||||
# colorscheme = 'default' # UNUSED
|
||||
# graphics.update_throttle = 60 # Hz # TODO
|
|
@ -0,0 +1,47 @@
|
|||
with (import <nixpkgs> {});
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "poetry-env";
|
||||
buildInputs = [
|
||||
# System requirements.
|
||||
readline
|
||||
|
||||
# TODO: hacky non-poetry install stuff we need to get rid of!!
|
||||
poetry
|
||||
# virtualenv
|
||||
# setuptools
|
||||
# pip
|
||||
|
||||
# Python requirements (enough to get a virtualenv going).
|
||||
python311Full
|
||||
|
||||
# obviously, and see below for hacked linking
|
||||
python311Packages.pyqt5
|
||||
python311Packages.pyqt5_sip
|
||||
# python311Packages.qtpy
|
||||
|
||||
# numerics deps
|
||||
python311Packages.levenshtein
|
||||
python311Packages.fastparquet
|
||||
python311Packages.polars
|
||||
|
||||
];
|
||||
# environment.sessionVariables = {
|
||||
# LD_LIBRARY_PATH = "${pkgs.stdenv.cc.cc.lib}/lib";
|
||||
# };
|
||||
src = null;
|
||||
shellHook = ''
|
||||
# Allow the use of wheels.
|
||||
SOURCE_DATE_EPOCH=$(date +%s)
|
||||
|
||||
# Augment the dynamic linker path
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${R}/lib/R/lib:${readline}/lib
|
||||
export QT_QPA_PLATFORM_PLUGIN_PATH="${qt5.qtbase.bin}/lib/qt-${qt5.qtbase.version}/plugins";
|
||||
|
||||
if [ ! -d ".venv" ]; then
|
||||
poetry install --with uis
|
||||
fi
|
||||
|
||||
poetry shell
|
||||
'';
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
FROM elasticsearch:7.17.4
|
||||
|
||||
ENV ES_JAVA_OPTS "-Xms2g -Xmx2g"
|
||||
ENV ELASTIC_USERNAME "elastic"
|
||||
ENV ELASTIC_PASSWORD "password"
|
||||
|
||||
COPY elasticsearch.yml /usr/share/elasticsearch/config/
|
||||
|
||||
RUN printf "password" | ./bin/elasticsearch-keystore add -f -x "bootstrap.password"
|
||||
|
||||
EXPOSE 19200
|
|
@ -0,0 +1,5 @@
|
|||
network.host: 0.0.0.0
|
||||
|
||||
http.port: 19200
|
||||
|
||||
discovery.type: single-node
|
|
@ -2,12 +2,27 @@
|
|||
# https://github.com/waytrade/ib-gateway-docker/blob/master/docker-compose.yml
|
||||
version: "3.5"
|
||||
|
||||
|
||||
services:
|
||||
ib-gateway:
|
||||
|
||||
ib_gw_paper:
|
||||
|
||||
# apparently java is a mega cukc:
|
||||
# https://stackoverflow.com/a/56895801
|
||||
# https://bugs.openjdk.org/browse/JDK-8150460
|
||||
ulimits:
|
||||
# nproc: 65535
|
||||
nproc: 6000
|
||||
nofile:
|
||||
soft: 2000
|
||||
hard: 3000
|
||||
|
||||
# other image tags available:
|
||||
# https://github.com/waytrade/ib-gateway-docker#supported-tags
|
||||
image: waytrade/ib-gateway:981.3j
|
||||
restart: always
|
||||
# image: waytrade/ib-gateway:1012.2i
|
||||
image: ghcr.io/gnzsnz/ib-gateway:latest
|
||||
|
||||
restart: 'no' # restart on boot whenev there's a crash or user clicsk
|
||||
network_mode: 'host'
|
||||
|
||||
volumes:
|
||||
|
@ -39,14 +54,12 @@ services:
|
|||
# this compose file which looks something like:
|
||||
# TWS_USERID='myuser'
|
||||
# TWS_PASSWORD='guest'
|
||||
# TRADING_MODE=paper (or live)
|
||||
# VNC_SERVER_PASSWORD='diggity'
|
||||
|
||||
environment:
|
||||
TWS_USERID: ${TWS_USERID}
|
||||
TWS_PASSWORD: ${TWS_PASSWORD}
|
||||
TRADING_MODE: ${TRADING_MODE:-paper}
|
||||
VNC_SERVER_PASSWORD: ${VNC_SERVER_PASSWORD:-}
|
||||
TRADING_MODE: 'paper'
|
||||
VNC_SERVER_PASSWORD: 'doggy'
|
||||
VNC_SERVER_PORT: '3003'
|
||||
|
||||
# ports:
|
||||
# - target: 4002
|
||||
|
@ -62,3 +75,40 @@ services:
|
|||
# - "127.0.0.1:4001:4001"
|
||||
# - "127.0.0.1:4002:4002"
|
||||
# - "127.0.0.1:5900:5900"
|
||||
|
||||
# ib_gw_live:
|
||||
# image: waytrade/ib-gateway:1012.2i
|
||||
# restart: no
|
||||
# network_mode: 'host'
|
||||
|
||||
# volumes:
|
||||
# - type: bind
|
||||
# source: ./jts_live.ini
|
||||
# target: /root/jts/jts.ini
|
||||
# # don't let ibc clobber this file for
|
||||
# # the main reason of not having a stupid
|
||||
# # timezone set..
|
||||
# read_only: true
|
||||
|
||||
# # force our own ibc config
|
||||
# - type: bind
|
||||
# source: ./ibc.ini
|
||||
# target: /root/ibc/config.ini
|
||||
|
||||
# # force our noop script - socat isn't needed in host mode.
|
||||
# - type: bind
|
||||
# source: ./fork_ports_delayed.sh
|
||||
# target: /root/scripts/fork_ports_delayed.sh
|
||||
|
||||
# # force our noop script - socat isn't needed in host mode.
|
||||
# - type: bind
|
||||
# source: ./run_x11_vnc.sh
|
||||
# target: /root/scripts/run_x11_vnc.sh
|
||||
# read_only: true
|
||||
|
||||
# # NOTE: to fill these out, define an `.env` file in the same dir as
|
||||
# # this compose file which looks something like:
|
||||
# environment:
|
||||
# TRADING_MODE: 'live'
|
||||
# VNC_SERVER_PASSWORD: 'doggy'
|
||||
# VNC_SERVER_PORT: '3004'
|
||||
|
|
|
@ -117,9 +117,57 @@ SecondFactorDevice=
|
|||
|
||||
# If you use the IBKR Mobile app for second factor authentication,
|
||||
# and you fail to complete the process before the time limit imposed
|
||||
# by IBKR, you can use this setting to tell IBC to exit: arrangements
|
||||
# can then be made to automatically restart IBC in order to initiate
|
||||
# the login sequence afresh. Otherwise, manual intervention at TWS's
|
||||
# by IBKR, this setting tells IBC whether to automatically restart
|
||||
# the login sequence, giving you another opportunity to complete
|
||||
# second factor authentication.
|
||||
#
|
||||
# Permitted values are 'yes' and 'no'.
|
||||
#
|
||||
# If this setting is not present or has no value, then the value
|
||||
# of the deprecated ExitAfterSecondFactorAuthenticationTimeout is
|
||||
# used instead. If this also has no value, then this setting defaults
|
||||
# to 'no'.
|
||||
#
|
||||
# NB: you must be using IBC v3.14.0 or later to use this setting:
|
||||
# earlier versions ignore it.
|
||||
|
||||
ReloginAfterSecondFactorAuthenticationTimeout=
|
||||
|
||||
|
||||
# This setting is only relevant if
|
||||
# ReloginAfterSecondFactorAuthenticationTimeout is set to 'yes',
|
||||
# or if ExitAfterSecondFactorAuthenticationTimeout is set to 'yes'.
|
||||
#
|
||||
# It controls how long (in seconds) IBC waits for login to complete
|
||||
# after the user acknowledges the second factor authentication
|
||||
# alert at the IBKR Mobile app. If login has not completed after
|
||||
# this time, IBC terminates.
|
||||
# The default value is 60.
|
||||
|
||||
SecondFactorAuthenticationExitInterval=
|
||||
|
||||
|
||||
# This setting specifies the timeout for second factor authentication
|
||||
# imposed by IB. The value is in seconds. You should not change this
|
||||
# setting unless you have reason to believe that IB has changed the
|
||||
# timeout. The default value is 180.
|
||||
|
||||
SecondFactorAuthenticationTimeout=180
|
||||
|
||||
|
||||
# DEPRECATED SETTING
|
||||
# ------------------
|
||||
#
|
||||
# ExitAfterSecondFactorAuthenticationTimeout - THIS SETTING WILL BE
|
||||
# REMOVED IN A FUTURE RELEASE. For IBC version 3.14.0 and later, see
|
||||
# the notes for ReloginAfterSecondFactorAuthenticationTimeout above.
|
||||
#
|
||||
# For IBC versions earlier than 3.14.0: If you use the IBKR Mobile
|
||||
# app for second factor authentication, and you fail to complete the
|
||||
# process before the time limit imposed by IBKR, you can use this
|
||||
# setting to tell IBC to exit: arrangements can then be made to
|
||||
# automatically restart IBC in order to initiate the login sequence
|
||||
# afresh. Otherwise, manual intervention at TWS's
|
||||
# Second Factor Authentication dialog is needed to complete the
|
||||
# login.
|
||||
#
|
||||
|
@ -132,29 +180,18 @@ SecondFactorDevice=
|
|||
ExitAfterSecondFactorAuthenticationTimeout=no
|
||||
|
||||
|
||||
# This setting is only relevant if
|
||||
# ExitAfterSecondFactorAuthenticationTimeout is set to 'yes'.
|
||||
#
|
||||
# It controls how long (in seconds) IBC waits for login to complete
|
||||
# after the user acknowledges the second factor authentication
|
||||
# alert at the IBKR Mobile app. If login has not completed after
|
||||
# this time, IBC terminates.
|
||||
# The default value is 40.
|
||||
|
||||
SecondFactorAuthenticationExitInterval=
|
||||
|
||||
|
||||
# Trading Mode
|
||||
# ------------
|
||||
#
|
||||
# TWS 955 introduced a new Trading Mode combo box on its login
|
||||
# dialog. This indicates whether the live account or the paper
|
||||
# trading account corresponding to the supplied credentials is
|
||||
# to be used. The allowed values are 'live' (the default) and
|
||||
# 'paper'. For earlier versions of TWS this setting has no
|
||||
# effect.
|
||||
# This indicates whether the live account or the paper trading
|
||||
# account corresponding to the supplied credentials is to be used.
|
||||
# The allowed values are 'live' (the default) and 'paper'.
|
||||
#
|
||||
# If this is set to 'live', then the credentials for the live
|
||||
# account must be supplied. If it is set to 'paper', then either
|
||||
# the live or the paper-trading credentials may be supplied.
|
||||
|
||||
TradingMode=
|
||||
TradingMode=paper
|
||||
|
||||
|
||||
# Paper-trading Account Warning
|
||||
|
@ -188,7 +225,7 @@ AcceptNonBrokerageAccountWarning=yes
|
|||
#
|
||||
# The default value is 60.
|
||||
|
||||
LoginDialogDisplayTimeout = 60
|
||||
LoginDialogDisplayTimeout=60
|
||||
|
||||
|
||||
|
||||
|
@ -217,7 +254,15 @@ LoginDialogDisplayTimeout = 60
|
|||
# but they are acceptable.
|
||||
#
|
||||
# The default is the current working directory when IBC is
|
||||
# started.
|
||||
# started, unless the TWS_SETTINGS_PATH setting in the relevant
|
||||
# start script is set.
|
||||
#
|
||||
# If both this setting and TWS_SETTINGS_PATH are set, then this
|
||||
# setting takes priority. Note that if they have different values,
|
||||
# auto-restart will not work.
|
||||
#
|
||||
# NB: this setting is now DEPRECATED. You should use the
|
||||
# TWS_SETTINGS_PATH setting in the relevant start script.
|
||||
|
||||
IbDir=/root/Jts
|
||||
|
||||
|
@ -286,13 +331,30 @@ ExistingSessionDetectedAction=primary
|
|||
#
|
||||
# If OverrideTwsApiPort is set to an integer, IBC changes the
|
||||
# 'Socket port' in TWS's API configuration to that number shortly
|
||||
# after startup. Leaving the setting blank will make no change to
|
||||
# after startup (but note that for the FIX Gateway, this setting is
|
||||
# actually stored in jts.ini rather than the Gateway's settings
|
||||
# file). Leaving the setting blank will make no change to
|
||||
# the current setting. This setting is only intended for use in
|
||||
# certain specialized situations where the port number needs to
|
||||
# be set dynamically at run-time, and for the FIX Gateway: most
|
||||
# non-FIX users will never need it, so don't use it unless you know
|
||||
# you need it.
|
||||
|
||||
OverrideTwsApiPort=4000
|
||||
|
||||
|
||||
# Override TWS Master Client ID
|
||||
# -----------------------------
|
||||
#
|
||||
# If OverrideTwsMasterClientID is set to an integer, IBC changes the
|
||||
# 'Master Client ID' value in TWS's API configuration to that
|
||||
# value shortly after startup. Leaving the setting blank will make
|
||||
# no change to the current setting. This setting is only intended
|
||||
# for use in certain specialized situations where the value needs to
|
||||
# be set dynamically at run-time: most users will never need it,
|
||||
# so don't use it unless you know you need it.
|
||||
|
||||
OverrideTwsApiPort=4002
|
||||
OverrideTwsMasterClientID=
|
||||
|
||||
|
||||
# Read-only Login
|
||||
|
@ -302,11 +364,13 @@ OverrideTwsApiPort=4002
|
|||
# account security programme, the user will not be asked to perform
|
||||
# the second factor authentication action, and login to TWS will
|
||||
# occur automatically in read-only mode: in this mode, placing or
|
||||
# managing orders is not allowed. If set to 'no', and the user is
|
||||
# enrolled in IB's account security programme, the user must perform
|
||||
# the relevant second factor authentication action to complete the
|
||||
# login.
|
||||
|
||||
# managing orders is not allowed.
|
||||
#
|
||||
# If set to 'no', and the user is enrolled in IB's account security
|
||||
# programme, the second factor authentication process is handled
|
||||
# according to the Second Factor Authentication Settings described
|
||||
# elsewhere in this file.
|
||||
#
|
||||
# If the user is not enrolled in IB's account security programme,
|
||||
# this setting is ignored. The default is 'no'.
|
||||
|
||||
|
@ -326,7 +390,44 @@ ReadOnlyLogin=no
|
|||
# set the relevant checkbox (this only needs to be done once) and
|
||||
# not provide a value for this setting.
|
||||
|
||||
ReadOnlyApi=no
|
||||
ReadOnlyApi=
|
||||
|
||||
|
||||
# API Precautions
|
||||
# ---------------
|
||||
#
|
||||
# These settings relate to the corresponding 'Precautions' checkboxes in the
|
||||
# API section of the Global Configuration dialog.
|
||||
#
|
||||
# For all of these, the accepted values are:
|
||||
# - 'yes' sets the checkbox
|
||||
# - 'no' clears the checkbox
|
||||
# - if not set, the existing TWS/Gateway configuration is unchanged
|
||||
#
|
||||
# NB: thess settings are really only supplied for the benefit of new TWS
|
||||
# or Gateway instances that are being automatically installed and
|
||||
# started without user intervention, or where user settings are not preserved
|
||||
# between sessions (eg some Docker containers). Where a user is involved, they
|
||||
# should use the Global Configuration to set the relevant checkboxes and not
|
||||
# provide values for these settings.
|
||||
|
||||
BypassOrderPrecautions=
|
||||
|
||||
BypassBondWarning=
|
||||
|
||||
BypassNegativeYieldToWorstConfirmation=
|
||||
|
||||
BypassCalledBondWarning=
|
||||
|
||||
BypassSameActionPairTradeWarning=
|
||||
|
||||
BypassPriceBasedVolatilityRiskWarning=
|
||||
|
||||
BypassUSStocksMarketDataInSharesWarning=
|
||||
|
||||
BypassRedirectOrderWarning=
|
||||
|
||||
BypassNoOverfillProtectionPrecaution=
|
||||
|
||||
|
||||
# Market data size for US stocks - lots or shares
|
||||
|
@ -381,54 +482,145 @@ AcceptBidAskLastSizeDisplayUpdateNotification=accept
|
|||
SendMarketDataInLotsForUSstocks=
|
||||
|
||||
|
||||
# Trusted API Client IPs
|
||||
# ----------------------
|
||||
#
|
||||
# NB: THIS SETTING IS ONLY RELEVANT FOR THE GATEWAY, AND ONLY WHEN FIX=yes.
|
||||
# In all other cases it is ignored.
|
||||
#
|
||||
# This is a list of IP addresses separated by commas. API clients with IP
|
||||
# addresses in this list are able to connect to the API without Gateway
|
||||
# generating the 'Incoming connection' popup.
|
||||
#
|
||||
# Note that 127.0.0.1 is always permitted to connect, so do not include it
|
||||
# in this setting.
|
||||
|
||||
TrustedTwsApiClientIPs=
|
||||
|
||||
|
||||
# Reset Order ID Sequence
|
||||
# -----------------------
|
||||
#
|
||||
# The setting resets the order id sequence for orders submitted via the API, so
|
||||
# that the next invocation of the `NextValidId` API callback will return the
|
||||
# value 1. The reset occurs when TWS starts.
|
||||
#
|
||||
# Note that order ids are reset for all API clients, except those that have
|
||||
# outstanding (ie incomplete) orders: their order id sequence carries on as
|
||||
# before.
|
||||
#
|
||||
# Valid values are 'yes', 'true', 'false' and 'no'. The default is 'no'.
|
||||
|
||||
ResetOrderIdsAtStart=
|
||||
|
||||
|
||||
# This setting specifies IBC's action when TWS displays the dialog asking for
|
||||
# confirmation of a request to reset the API order id sequence.
|
||||
#
|
||||
# Note that the Gateway never displays this dialog, so this setting is ignored
|
||||
# for a Gateway session.
|
||||
#
|
||||
# Valid values consist of two strings separated by a solidus '/'. The first
|
||||
# value specifies the action to take when the order id reset request resulted
|
||||
# from setting ResetOrderIdsAtStart=yes. The second specifies the action to
|
||||
# take when the order id reset request is a result of the user clicking the
|
||||
# 'Reset API order ID sequence' button in the API configuration. Each value
|
||||
# must be one of the following:
|
||||
#
|
||||
# 'confirm'
|
||||
# order ids will be reset
|
||||
#
|
||||
# 'reject'
|
||||
# order ids will not be reset
|
||||
#
|
||||
# 'ignore'
|
||||
# IBC will ignore the dialog. The user must take action.
|
||||
#
|
||||
# The default setting is ignore/ignore
|
||||
|
||||
# Examples:
|
||||
#
|
||||
# 'confirm/reject' - confirm order id reset only if ResetOrderIdsAtStart=yes
|
||||
# and reject any user-initiated requests
|
||||
#
|
||||
# 'ignore/confirm' - user must decide what to do if ResetOrderIdsAtStart=yes
|
||||
# and confirm user-initiated requests
|
||||
#
|
||||
# 'reject/ignore' - reject order id reset if ResetOrderIdsAtStart=yes but
|
||||
# allow user to handle user-initiated requests
|
||||
|
||||
ConfirmOrderIdReset=
|
||||
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 4. TWS Auto-Closedown
|
||||
# 4. TWS Auto-Logoff and Auto-Restart
|
||||
# =============================================================================
|
||||
#
|
||||
# IMPORTANT NOTE: Starting with TWS 974, this setting no longer
|
||||
# works properly, because IB have changed the way TWS handles its
|
||||
# autologoff mechanism.
|
||||
# TWS and Gateway insist on being restarted every day. Two alternative
|
||||
# automatic options are offered:
|
||||
#
|
||||
# You should now configure the TWS autologoff time to something
|
||||
# convenient for you, and restart IBC each day.
|
||||
# - Auto-Logoff: at a specified time, TWS shuts down tidily, without
|
||||
# restarting.
|
||||
#
|
||||
# Alternatively, discontinue use of IBC and use the auto-relogin
|
||||
# mechanism within TWS 974 and later versions (note that the
|
||||
# auto-relogin mechanism provided by IB is not available if you
|
||||
# use IBC).
|
||||
# - Auto-Restart: at a specified time, TWS shuts down and then restarts
|
||||
# without the user having to re-autheticate.
|
||||
#
|
||||
# The normal way to configure the time at which this happens is via the Lock
|
||||
# and Exit section of the Configuration dialog. Once this time has been
|
||||
# configured in this way, the setting persists until the user changes it again.
|
||||
#
|
||||
# However, there are situations where there is no user available to do this
|
||||
# configuration, or where there is no persistent storage (for example some
|
||||
# Docker images). In such cases, the auto-restart or auto-logoff time can be
|
||||
# set whenever IBC starts with the settings below.
|
||||
#
|
||||
# The value, if specified, must be a time in HH:MM AM/PM format, for example
|
||||
# 08:00 AM or 10:00 PM. Note that there must be a single space between the
|
||||
# two parts of this value; also that midnight is "12:00 AM" and midday is
|
||||
# "12:00 PM".
|
||||
#
|
||||
# If no value is specified for either setting, the currently configured
|
||||
# settings will apply. If a value is supplied for one setting, the other
|
||||
# setting is cleared. If values are supplied for both settings, only the
|
||||
# auto-restart time is set, and the auto-logoff time is cleared.
|
||||
#
|
||||
# Note that for a normal TWS/Gateway installation with persistent storage
|
||||
# (for example on a desktop computer) the value will be persisted as if the
|
||||
# user had set it via the configuration dialog.
|
||||
#
|
||||
# If you choose to auto-restart, you should take note of the considerations
|
||||
# described at the link below. Note that where this information mentions
|
||||
# 'manual authentication', restarting IBC will do the job (IBKR does not
|
||||
# recognise the existence of IBC in its docuemntation).
|
||||
#
|
||||
# https://www.interactivebrokers.com/en/software/tws/twsguide.htm#usersguidebook/configuretws/auto_restart_info.htm
|
||||
#
|
||||
# If you use the "RESTART" command via the IBC command server, and IBC is
|
||||
# running any version of the Gateway (or a version of TWS earlier than 1018),
|
||||
# note that this will set the Auto-Restart time in Gateway/TWS's configuration
|
||||
# dialog to the time at which the restart actually happens (which may be up to
|
||||
# a minute after the RESTART command is issued). To prevent future auto-
|
||||
# restarts at this time, you must make sure you have set AutoLogoffTime or
|
||||
# AutoRestartTime to your desired value before running IBC. NB: this does not
|
||||
# apply to TWS from version 1018 onwards.
|
||||
|
||||
# Set to yes or no (lower case).
|
||||
#
|
||||
# yes means allow TWS to shut down automatically at its
|
||||
# specified shutdown time, which is set via the TWS
|
||||
# configuration menu.
|
||||
#
|
||||
# no means TWS never shuts down automatically.
|
||||
#
|
||||
# NB: IB recommends that you do not keep TWS running
|
||||
# continuously. If you set this setting to 'no', you may
|
||||
# experience incorrect TWS operation.
|
||||
#
|
||||
# NB: the default for this setting is 'no'. Since this will
|
||||
# only work properly with TWS versions earlier than 974, you
|
||||
# should explicitly set this to 'yes' for version 974 and later.
|
||||
|
||||
IbAutoClosedown=yes
|
||||
AutoLogoffTime=
|
||||
|
||||
AutoRestartTime=
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 5. TWS Tidy Closedown Time
|
||||
# =============================================================================
|
||||
#
|
||||
# NB: starting with TWS 974 this is no longer a useful option
|
||||
# because both TWS and Gateway now have the same auto-logoff
|
||||
# mechanism, and IBC can no longer avoid this.
|
||||
# Specifies a time at which TWS will close down tidily, with no restart.
|
||||
#
|
||||
# Note that giving this setting a value does not change TWS's
|
||||
# auto-logoff in any way: any setting will be additional to the
|
||||
# TWS auto-logoff.
|
||||
# There is little reason to use this setting. It is similar to AutoLogoffTime,
|
||||
# but can include a day-of-the-week, whereas AutoLogoffTime and AutoRestartTime
|
||||
# apply every day. So for example you could use ClosedownAt in conjunction with
|
||||
# AutoRestartTime to shut down TWS on Friday evenings after the markets
|
||||
# close, without it running on Saturday as well.
|
||||
#
|
||||
# To tell IBC to tidily close TWS at a specified time every
|
||||
# day, set this value to <hh:mm>, for example:
|
||||
|
@ -487,7 +679,7 @@ AcceptIncomingConnectionAction=reject
|
|||
# no means the dialog remains on display and must be
|
||||
# handled by the user.
|
||||
|
||||
AllowBlindTrading=yes
|
||||
AllowBlindTrading=no
|
||||
|
||||
|
||||
# Save Settings on a Schedule
|
||||
|
@ -530,6 +722,26 @@ AllowBlindTrading=yes
|
|||
SaveTwsSettingsAt=
|
||||
|
||||
|
||||
# Confirm Crypto Currency Orders Automatically
|
||||
# --------------------------------------------
|
||||
#
|
||||
# When you place an order for a cryptocurrency contract, a dialog is displayed
|
||||
# asking you to confirm that you want to place the order, and notifying you
|
||||
# that you are placing an order to trade cryptocurrency with Paxos, a New York
|
||||
# limited trust company, and not at Interactive Brokers.
|
||||
#
|
||||
# transmit means that the order will be placed automatically, and the
|
||||
# dialog will then be closed
|
||||
#
|
||||
# cancel means that the order will not be placed, and the dialog will
|
||||
# then be closed
|
||||
#
|
||||
# manual means that IBC will take no action and the user must deal
|
||||
# with the dialog
|
||||
|
||||
ConfirmCryptoCurrencyOrders=transmit
|
||||
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 7. Settings Specific to Indian Versions of TWS
|
||||
|
@ -566,13 +778,17 @@ DismissNSEComplianceNotice=yes
|
|||
#
|
||||
# The port number that IBC listens on for commands
|
||||
# such as "STOP". DO NOT set this to the port number
|
||||
# used for TWS API connections. There is no good reason
|
||||
# to change this setting unless the port is used by
|
||||
# some other application (typically another instance of
|
||||
# IBC). The default value is 0, which tells IBC not to
|
||||
# start the command server
|
||||
# used for TWS API connections.
|
||||
#
|
||||
# The convention is to use 7462 for this port,
|
||||
# but it must be set to a different value from any other
|
||||
# IBC instance that might run at the same time.
|
||||
#
|
||||
# The default value is 0, which tells IBC not to start
|
||||
# the command server
|
||||
|
||||
#CommandServerPort=7462
|
||||
CommandServerPort=0
|
||||
|
||||
|
||||
# Permitted Command Sources
|
||||
|
@ -583,19 +799,19 @@ DismissNSEComplianceNotice=yes
|
|||
# IBC. Commands can always be sent from the
|
||||
# same host as IBC is running on.
|
||||
|
||||
ControlFrom=127.0.0.1
|
||||
ControlFrom=
|
||||
|
||||
|
||||
# Address for Receiving Commands
|
||||
# ------------------------------
|
||||
#
|
||||
# Specifies the IP address on which the Command Server
|
||||
# is so listen. For a multi-homed host, this can be used
|
||||
# is to listen. For a multi-homed host, this can be used
|
||||
# to specify that connection requests are only to be
|
||||
# accepted on the specified address. The default is to
|
||||
# accept connection requests on all local addresses.
|
||||
|
||||
BindAddress=127.0.0.1
|
||||
BindAddress=
|
||||
|
||||
|
||||
# Command Prompt
|
||||
|
@ -621,7 +837,7 @@ CommandPrompt=
|
|||
# information is sent. The default is that such information
|
||||
# is not sent.
|
||||
|
||||
SuppressInfoMessages=no
|
||||
SuppressInfoMessages=yes
|
||||
|
||||
|
||||
|
||||
|
@ -651,10 +867,10 @@ SuppressInfoMessages=no
|
|||
# The LogStructureScope setting indicates which windows are
|
||||
# eligible for structure logging:
|
||||
#
|
||||
# - if set to 'known', only windows that IBC recognizes
|
||||
# are eligible - these are windows that IBC has some
|
||||
# interest in monitoring, usually to take some action
|
||||
# on the user's behalf;
|
||||
# - (default value) if set to 'known', only windows that
|
||||
# IBC recognizes are eligible - these are windows that
|
||||
# IBC has some interest in monitoring, usually to take
|
||||
# some action on the user's behalf;
|
||||
#
|
||||
# - if set to 'unknown', only windows that IBC does not
|
||||
# recognize are eligible. Most windows displayed by
|
||||
|
@ -667,9 +883,8 @@ SuppressInfoMessages=no
|
|||
# - if set to 'all', then every window displayed by TWS
|
||||
# is eligible.
|
||||
#
|
||||
# The default value is 'known'.
|
||||
|
||||
LogStructureScope=all
|
||||
LogStructureScope=known
|
||||
|
||||
|
||||
# When to Log Window Structure
|
||||
|
@ -682,13 +897,15 @@ LogStructureScope=all
|
|||
# structure of an eligible window the first time it
|
||||
# is encountered;
|
||||
#
|
||||
# - if set to 'openclose', the structure is logged every
|
||||
# time an eligible window is opened or closed;
|
||||
#
|
||||
# - if set to 'activate', the structure is logged every
|
||||
# time an eligible window is made active;
|
||||
#
|
||||
# - if set to 'never' or 'no' or 'false', structure
|
||||
# information is never logged.
|
||||
# - (default value) if set to 'never' or 'no' or 'false',
|
||||
# structure information is never logged.
|
||||
#
|
||||
# The default value is 'never'.
|
||||
|
||||
LogStructureWhen=never
|
||||
|
||||
|
@ -708,4 +925,3 @@ LogStructureWhen=never
|
|||
#LogComponents=
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
[IBGateway]
|
||||
ApiOnly=true
|
||||
LocalServerPort=4001
|
||||
# NOTE: must be set if using IBC's "reject" mode
|
||||
TrustedIPs=127.0.0.1
|
||||
; RemoteHostOrderRouting=ndc1.ibllc.com
|
||||
; WriteDebug=true
|
||||
; RemotePortOrderRouting=4001
|
||||
; useRemoteSettings=false
|
||||
; tradingMode=p
|
||||
; Steps=8
|
||||
; colorPalletName=dark
|
||||
|
||||
# window geo, this may be useful for sending `xdotool` commands?
|
||||
; MainWindow.Width=1986
|
||||
; screenHeight=3960
|
||||
|
||||
|
||||
[Logon]
|
||||
Locale=en
|
||||
# most markets are oriented around this zone
|
||||
# so might as well hard code it.
|
||||
TimeZone=America/New_York
|
||||
UseSSL=true
|
||||
displayedproxymsg=1
|
||||
os_titlebar=true
|
||||
s3store=true
|
||||
useRemoteSettings=false
|
||||
|
||||
[Communication]
|
||||
ctciAutoEncrypt=true
|
||||
Region=usr
|
||||
; Peer=cdc1.ibllc.com:4001
|
|
@ -1,16 +1,35 @@
|
|||
#!/bin/sh
|
||||
# start vnc server and listen for connections
|
||||
# on port specced in `$VNC_SERVER_PORT`
|
||||
|
||||
# start VNC server
|
||||
x11vnc \
|
||||
-ncache_cr \
|
||||
-listen localhost \
|
||||
-listen 127.0.0.1 \
|
||||
-allow 127.0.0.1 \
|
||||
-rfbport "${VNC_SERVER_PORT}" \
|
||||
-display :1 \
|
||||
-forever \
|
||||
-shared \
|
||||
-logappend /var/log/x11vnc.log \
|
||||
-bg \
|
||||
-nowf \
|
||||
-noxdamage \
|
||||
-noxfixes \
|
||||
-no6 \
|
||||
-noipv6 \
|
||||
-autoport 3003 \
|
||||
# can't use this because of ``asyncvnc`` issue:
|
||||
|
||||
|
||||
# -nowcr \
|
||||
# TODO: can't use this because of ``asyncvnc`` issue:
|
||||
# https://github.com/barneygale/asyncvnc/issues/1
|
||||
# -passwd 'ibcansmbz'
|
||||
|
||||
# XXX: optional graphics caching flags that seem to rekt the overlay
|
||||
# of the 2 gw windows? When running a single gateway
|
||||
# this seems to maybe optimize some memory usage?
|
||||
# -ncache_cr \
|
||||
# -ncache \
|
||||
|
||||
# NOTE: this will prevent logs from going to the console.
|
||||
# -logappend /var/log/x11vnc.log \
|
||||
|
||||
# where to start allocating ports
|
||||
# -autoport "${VNC_SERVER_PORT}" \
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
### NOTE this is likely out of date given it was written some
|
||||
(years) time ago by a user that has since not really partaken in
|
||||
contributing since.
|
||||
|
||||
install for tinas
|
||||
*****************
|
||||
for windows peeps you can start by installing all the prerequisite software:
|
||||
|
||||
- install git with all default settings - https://git-scm.com/download/win
|
||||
- install anaconda all default settings - https://www.anaconda.com/products/individual
|
||||
- install microsoft build tools (check the box for Desktop development for C++, you might be able to uncheck some optional downloads) - https://visualstudio.microsoft.com/visual-cpp-build-tools/
|
||||
- install visual studio code default settings - https://code.visualstudio.com/download
|
||||
|
||||
|
||||
then, `crack a conda shell`_ and run the following commands::
|
||||
|
||||
mkdir code # create code directory
|
||||
cd code # change directory to code
|
||||
git clone https://github.com/pikers/piker.git # downloads piker installation package from github
|
||||
cd piker # change directory to piker
|
||||
|
||||
conda create -n pikonda # creates conda environment named pikonda
|
||||
conda activate pikonda # activates pikonda
|
||||
|
||||
conda install -c conda-forge python-levenshtein # in case it is not already installed
|
||||
conda install pip # may already be installed
|
||||
pip # will show if pip is installed
|
||||
|
||||
pip install -e . -r requirements.txt # install piker in editable mode
|
||||
|
||||
test Piker to see if it is working::
|
||||
|
||||
piker -b binance chart btcusdt.binance # formatting for loading a chart
|
||||
piker -b kraken -b binance chart xbtusdt.kraken
|
||||
piker -b kraken -b binance -b ib chart qqq.nasdaq.ib
|
||||
piker -b ib chart tsla.nasdaq.ib
|
||||
|
||||
potential error::
|
||||
|
||||
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\user\\AppData\\Roaming\\piker\\brokers.toml'
|
||||
|
||||
solution:
|
||||
|
||||
- navigate to file directory above (may be different on your machine, location should be listed in the error code)
|
||||
- copy and paste file from 'C:\\Users\\user\\code\\data/brokers.toml' or create a blank file using notepad at the location above
|
||||
|
||||
Visual Studio Code setup:
|
||||
|
||||
- now that piker is installed we can set up vscode as the default terminal for running piker and editing the code
|
||||
- open Visual Studio Code
|
||||
- file --> Add Folder to Workspace --> C:\Users\user\code\piker (adds piker directory where all piker files are located)
|
||||
- file --> Save Workspace As --> save it wherever you want and call it whatever you want, this is going to be your default workspace for running and editing piker code
|
||||
- ctrl + shift + p --> start typing Python: Select Interpetter --> when the option comes up select it --> Select at the workspace level --> select the one that shows ('pikonda')
|
||||
- change the default terminal to cmd.exe instead of powershell (default)
|
||||
- now when you create a new terminal VScode should automatically activate you conda env so that piker can be run as the first command after a new terminal is created
|
||||
|
||||
also, try out fancyzones as part of powertoyz for a decent tiling windows manager to manage all the cool new software you are going to be running.
|
||||
|
||||
.. _conda installed: https://
|
||||
.. _C++ build toolz: https://
|
||||
.. _crack a conda shell: https://
|
||||
.. _vscode: https://
|
||||
|
||||
.. link to the tina guide
|
||||
.. _setup a coolio tiled wm console: https://
|
||||
|
||||
provider support
|
||||
****************
|
||||
for live data feeds the in-progress set of supported brokers is:
|
||||
|
||||
- IB_ via ``ib_insync``, also see our `container docs`_
|
||||
- binance_ and kraken_ for crypto over their public websocket API
|
||||
- questrade_ (ish) which comes with effectively free L1
|
||||
|
||||
coming soon...
|
||||
|
||||
- webull_ via the reverse engineered public API
|
||||
- yahoo via yliveticker_
|
||||
|
||||
if you want your broker supported and they have an API let us know.
|
||||
|
||||
.. _IB: https://interactivebrokers.github.io/tws-api/index.html
|
||||
.. _container docs: https://github.com/pikers/piker/tree/master/dockering/ib
|
||||
.. _questrade: https://www.questrade.com/api/documentation
|
||||
.. _kraken: https://www.kraken.com/features/api#public-market-data
|
||||
.. _binance: https://github.com/pikers/piker/pull/182
|
||||
.. _webull: https://github.com/tedchou12/webull
|
||||
.. _yliveticker: https://github.com/yahoofinancelive/yliveticker
|
||||
.. _coinbase: https://docs.pro.coinbase.com/#websocket-feed
|
||||
|
||||
|
|
@ -0,0 +1,263 @@
|
|||
# from pprint import pformat
|
||||
from functools import partial
|
||||
from decimal import Decimal
|
||||
from typing import Callable
|
||||
|
||||
import tractor
|
||||
import trio
|
||||
from uuid import uuid4
|
||||
|
||||
from piker.service import maybe_open_pikerd
|
||||
from piker.accounting import dec_digits
|
||||
from piker.clearing import (
|
||||
open_ems,
|
||||
OrderClient,
|
||||
)
|
||||
# TODO: we should probably expose these top level in this subsys?
|
||||
from piker.clearing._messages import (
|
||||
Order,
|
||||
Status,
|
||||
BrokerdPosition,
|
||||
)
|
||||
from piker.data import (
|
||||
iterticks,
|
||||
Flume,
|
||||
open_feed,
|
||||
Feed,
|
||||
# ShmArray,
|
||||
)
|
||||
|
||||
|
||||
# TODO: handle other statuses:
|
||||
# - fills, errors, and position tracking
|
||||
async def wait_for_order_status(
|
||||
trades_stream: tractor.MsgStream,
|
||||
oid: str,
|
||||
expect_status: str,
|
||||
|
||||
) -> tuple[
|
||||
list[Status],
|
||||
list[BrokerdPosition],
|
||||
]:
|
||||
'''
|
||||
Wait for a specific order status for a given dialog, return msg flow
|
||||
up to that msg and any position update msgs in a tuple.
|
||||
|
||||
'''
|
||||
# Wait for position message before moving on to verify flow(s)
|
||||
# for the multi-order position entry/exit.
|
||||
status_msgs: list[Status] = []
|
||||
pp_msgs: list[BrokerdPosition] = []
|
||||
|
||||
async for msg in trades_stream:
|
||||
match msg:
|
||||
case {'name': 'position'}:
|
||||
ppmsg = BrokerdPosition(**msg)
|
||||
pp_msgs.append(ppmsg)
|
||||
|
||||
case {
|
||||
'name': 'status',
|
||||
}:
|
||||
msg = Status(**msg)
|
||||
status_msgs.append(msg)
|
||||
|
||||
# if we get the status we expect then return all
|
||||
# collected msgs from the brokerd dialog up to the
|
||||
# exected msg B)
|
||||
if (
|
||||
msg.resp == expect_status
|
||||
and msg.oid == oid
|
||||
):
|
||||
return status_msgs, pp_msgs
|
||||
|
||||
|
||||
async def bot_main():
|
||||
'''
|
||||
Boot the piker runtime, open an ems connection, submit
|
||||
and process orders statuses in real-time.
|
||||
|
||||
'''
|
||||
ll: str = 'info'
|
||||
|
||||
# open an order ctl client, live data feed, trio nursery for
|
||||
# spawning an order trailer task
|
||||
client: OrderClient
|
||||
trades_stream: tractor.MsgStream
|
||||
feed: Feed
|
||||
accounts: list[str]
|
||||
|
||||
fqme: str = 'btcusdt.usdtm.perp.binance'
|
||||
|
||||
async with (
|
||||
|
||||
# TODO: do this implicitly inside `open_ems()` ep below?
|
||||
# init and sync actor-service runtime
|
||||
maybe_open_pikerd(
|
||||
loglevel=ll,
|
||||
debug_mode=True,
|
||||
|
||||
),
|
||||
open_ems(
|
||||
fqme,
|
||||
mode='paper', # {'live', 'paper'}
|
||||
# mode='live', # for real-brokerd submissions
|
||||
loglevel=ll,
|
||||
|
||||
) as (
|
||||
client, # OrderClient
|
||||
trades_stream, # tractor.MsgStream startup_pps,
|
||||
_, # positions
|
||||
accounts,
|
||||
_, # dialogs
|
||||
),
|
||||
|
||||
open_feed(
|
||||
fqmes=[fqme],
|
||||
loglevel=ll,
|
||||
|
||||
# TODO: if you want to throttle via downsampling
|
||||
# how many tick updates your feed received on
|
||||
# quote streams B)
|
||||
# tick_throttle=10,
|
||||
) as feed,
|
||||
|
||||
trio.open_nursery() as tn,
|
||||
):
|
||||
assert accounts
|
||||
print(f'Loaded binance accounts: {accounts}')
|
||||
|
||||
flume: Flume = feed.flumes[fqme]
|
||||
min_tick = Decimal(flume.mkt.price_tick)
|
||||
min_tick_digits: int = dec_digits(min_tick)
|
||||
price_round: Callable = partial(
|
||||
round,
|
||||
ndigits=min_tick_digits,
|
||||
)
|
||||
|
||||
quote_stream: trio.abc.ReceiveChannel = feed.streams['binance']
|
||||
|
||||
|
||||
# always keep live limit 0.003% below last
|
||||
# clearing price
|
||||
clear_margin: float = 0.9997
|
||||
|
||||
async def trailer(
|
||||
order: Order,
|
||||
):
|
||||
# ref shm OHLCV array history, if you want
|
||||
# s_shm: ShmArray = flume.rt_shm
|
||||
# m_shm: ShmArray = flume.hist_shm
|
||||
|
||||
# NOTE: if you wanted to frame ticks by type like the
|
||||
# the quote throttler does.. and this is probably
|
||||
# faster in terms of getting the latest tick type
|
||||
# embedded value of interest?
|
||||
# from piker.data._sampling import frame_ticks
|
||||
|
||||
async for quotes in quote_stream:
|
||||
for fqme, quote in quotes.items():
|
||||
# print(
|
||||
# f'{quote["symbol"]} -> {quote["ticks"]}\n'
|
||||
# f'last 1s OHLC:\n{s_shm.array[-1]}\n'
|
||||
# f'last 1m OHLC:\n{m_shm.array[-1]}\n'
|
||||
# )
|
||||
|
||||
for tick in iterticks(
|
||||
quote,
|
||||
reverse=True,
|
||||
# types=('trade', 'dark_trade'), # defaults
|
||||
):
|
||||
|
||||
await client.update(
|
||||
uuid=order.oid,
|
||||
price=price_round(
|
||||
clear_margin
|
||||
*
|
||||
tick['price']
|
||||
),
|
||||
)
|
||||
msgs, pps = await wait_for_order_status(
|
||||
trades_stream,
|
||||
order.oid,
|
||||
'open'
|
||||
)
|
||||
# if multiple clears per quote just
|
||||
# skip to the next quote?
|
||||
break
|
||||
|
||||
|
||||
# get first live quote to be sure we submit the initial
|
||||
# live buy limit low enough that it doesn't clear due to
|
||||
# a stale initial price from the data feed layer!
|
||||
first_ask_price: float | None = None
|
||||
async for quotes in quote_stream:
|
||||
for fqme, quote in quotes.items():
|
||||
# print(quote['symbol'])
|
||||
for tick in iterticks(quote, types=('ask')):
|
||||
first_ask_price: float = tick['price']
|
||||
break
|
||||
|
||||
if first_ask_price:
|
||||
break
|
||||
|
||||
# setup order dialog via first msg
|
||||
price: float = price_round(
|
||||
clear_margin
|
||||
*
|
||||
first_ask_price,
|
||||
)
|
||||
|
||||
# compute a 1k USD sized pos
|
||||
size: float = round(1e3/price, ndigits=3)
|
||||
|
||||
order = Order(
|
||||
|
||||
# docs on how this all works, bc even i'm not entirely
|
||||
# clear XD. also we probably want to figure out how to
|
||||
# offer both the paper engine running and the brokerd
|
||||
# order ctl tasks with the ems choosing which stream to
|
||||
# route msgs on given the account value!
|
||||
account='paper', # use built-in paper clearing engine and .accounting
|
||||
# account='binance.usdtm', # for live binance futes
|
||||
|
||||
oid=str(uuid4()),
|
||||
exec_mode='live', # {'dark', 'live', 'alert'}
|
||||
|
||||
action='buy', # TODO: remove this from our schema?
|
||||
|
||||
size=size,
|
||||
symbol=fqme,
|
||||
price=price,
|
||||
brokers=['binance'],
|
||||
)
|
||||
await client.send(order)
|
||||
|
||||
msgs, pps = await wait_for_order_status(
|
||||
trades_stream,
|
||||
order.oid,
|
||||
'open',
|
||||
)
|
||||
|
||||
assert not pps
|
||||
assert msgs[-1].oid == order.oid
|
||||
|
||||
# start "trailer task" which tracks rt quote stream
|
||||
tn.start_soon(trailer, order)
|
||||
|
||||
try:
|
||||
# wait for ctl-c from user..
|
||||
await trio.sleep_forever()
|
||||
except KeyboardInterrupt:
|
||||
# cancel the open order
|
||||
await client.cancel(order.oid)
|
||||
|
||||
msgs, pps = await wait_for_order_status(
|
||||
trades_stream,
|
||||
order.oid,
|
||||
'canceled'
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(bot_main)
|
|
@ -0,0 +1,138 @@
|
|||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1689068808,
|
||||
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1689068808,
|
||||
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-github-actions": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"poetry2nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1688870561,
|
||||
"narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-github-actions",
|
||||
"rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-github-actions",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1692174805,
|
||||
"narHash": "sha256-xmNPFDi/AUMIxwgOH/IVom55Dks34u1g7sFKKebxUm0=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "caac0eb6bdcad0b32cb2522e03e4002c8975c62e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"poetry2nix": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nix-github-actions": "nix-github-actions",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1692048894,
|
||||
"narHash": "sha256-cDw03rso2V4CDc3Mll0cHN+ztzysAvdI8pJ7ybbz714=",
|
||||
"ref": "refs/heads/pyqt6",
|
||||
"rev": "b059ad4c3051f45d6c912e17747aae37a9ec1544",
|
||||
"revCount": 2276,
|
||||
"type": "git",
|
||||
"url": "file:///home/lord_fomo/repos/poetry2nix"
|
||||
},
|
||||
"original": {
|
||||
"type": "git",
|
||||
"url": "file:///home/lord_fomo/repos/poetry2nix"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"poetry2nix": "poetry2nix"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
|
@ -0,0 +1,180 @@
|
|||
# NOTE: to convert to a poetry2nix env like this here are the
|
||||
# steps:
|
||||
# - install poetry in your system nix config
|
||||
# - convert the repo to use poetry using `poetry init`:
|
||||
# https://python-poetry.org/docs/basic-usage/#initialising-a-pre-existing-project
|
||||
# - then manually ensuring all deps are converted over:
|
||||
# - add this file to the repo and commit it
|
||||
# -
|
||||
|
||||
# GROKin tips:
|
||||
# - CLI eps are (ostensibly) added via an `entry_points.txt`:
|
||||
# - https://packaging.python.org/en/latest/specifications/entry-points/#file-format
|
||||
# - https://github.com/nix-community/poetry2nix/blob/master/editable.nix#L49
|
||||
{
|
||||
description = "piker: trading gear for hackers (pkged with poetry2nix)";
|
||||
|
||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
|
||||
# see https://github.com/nix-community/poetry2nix/tree/master#api
|
||||
inputs.poetry2nix = {
|
||||
# url = "github:nix-community/poetry2nix";
|
||||
# url = "github:K900/poetry2nix/qt5-explicit-deps";
|
||||
url = "/home/lord_fomo/repos/poetry2nix";
|
||||
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
poetry2nix,
|
||||
}:
|
||||
# TODO: build cross-OS and use the `${system}` var thingy..
|
||||
flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
# use PWD as sources
|
||||
projectDir = ./.;
|
||||
pyproject = ./pyproject.toml;
|
||||
poetrylock = ./poetry.lock;
|
||||
|
||||
# TODO: port to 3.11 and support both versions?
|
||||
python = "python3.10";
|
||||
|
||||
# for more functions and examples.
|
||||
# inherit
|
||||
# (poetry2nix.legacyPackages.${system})
|
||||
# mkPoetryApplication;
|
||||
# pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
pkgs = nixpkgs.legacyPackages.x86_64-linux;
|
||||
lib = pkgs.lib;
|
||||
p2npkgs = poetry2nix.legacyPackages.x86_64-linux;
|
||||
|
||||
# define all pkg overrides per dep, see edgecases.md:
|
||||
# https://github.com/nix-community/poetry2nix/blob/master/docs/edgecases.md
|
||||
# TODO: add these into the json file:
|
||||
# https://github.com/nix-community/poetry2nix/blob/master/overrides/build-systems.json
|
||||
pypkgs-build-requirements = {
|
||||
asyncvnc = [ "setuptools" ];
|
||||
eventkit = [ "setuptools" ];
|
||||
ib-insync = [ "setuptools" "flake8" ];
|
||||
msgspec = [ "setuptools"];
|
||||
pdbp = [ "setuptools" ];
|
||||
pyqt6-sip = [ "setuptools" ];
|
||||
tabcompleter = [ "setuptools" ];
|
||||
tractor = [ "setuptools" ];
|
||||
tricycle = [ "setuptools" ];
|
||||
trio-typing = [ "setuptools" ];
|
||||
trio-util = [ "setuptools" ];
|
||||
xonsh = [ "setuptools" ];
|
||||
};
|
||||
|
||||
# auto-generate override entries
|
||||
p2n-overrides = p2npkgs.defaultPoetryOverrides.extend (self: super:
|
||||
builtins.mapAttrs (package: build-requirements:
|
||||
(builtins.getAttr package super).overridePythonAttrs (old: {
|
||||
buildInputs = (
|
||||
old.buildInputs or [ ]
|
||||
) ++ (
|
||||
builtins.map (
|
||||
pkg: if builtins.isString pkg then builtins.getAttr pkg super else pkg
|
||||
) build-requirements
|
||||
);
|
||||
})
|
||||
) pypkgs-build-requirements
|
||||
);
|
||||
|
||||
# override some ahead-of-time compiled extensions
|
||||
# to be built with their wheels.
|
||||
ahot_overrides = p2n-overrides.extend(
|
||||
final: prev: {
|
||||
|
||||
# llvmlite = prev.llvmlite.override {
|
||||
# preferWheel = false;
|
||||
# };
|
||||
|
||||
# TODO: get this workin with p2n and nixpkgs..
|
||||
# pyqt6 = prev.pyqt6.override {
|
||||
# preferWheel = true;
|
||||
# };
|
||||
|
||||
# NOTE: this DOESN'T work atm but after a fix
|
||||
# to poetry2nix, it will and actually this line
|
||||
# won't be needed - thanks @k900:
|
||||
# https://github.com/nix-community/poetry2nix/pull/1257
|
||||
pyqt5 = prev.pyqt5.override {
|
||||
# withWebkit = false;
|
||||
preferWheel = true;
|
||||
};
|
||||
|
||||
# see PR from @k900:
|
||||
# https://github.com/nix-community/poetry2nix/pull/1257
|
||||
# pyqt5-qt5 = prev.pyqt5-qt5.override {
|
||||
# withWebkit = false;
|
||||
# preferWheel = true;
|
||||
# };
|
||||
|
||||
# TODO: patch in an override for polars to build
|
||||
# from src! See the details likely needed from
|
||||
# the cryptography entry:
|
||||
# https://github.com/nix-community/poetry2nix/blob/master/overrides/default.nix#L426-L435
|
||||
polars = prev.polars.override {
|
||||
preferWheel = true;
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
# WHY!? -> output-attrs that `nix develop` scans for:
|
||||
# https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-develop.html#flake-output-attributes
|
||||
in
|
||||
rec {
|
||||
packages = {
|
||||
# piker = poetry2nix.legacyPackages.x86_64-linux.mkPoetryEditablePackage {
|
||||
# editablePackageSources = { piker = ./piker; };
|
||||
|
||||
piker = p2npkgs.mkPoetryApplication {
|
||||
projectDir = projectDir;
|
||||
|
||||
# SEE ABOVE for auto-genned input set, override
|
||||
# buncha deps with extras.. like `setuptools` mostly.
|
||||
# TODO: maybe propose a patch to p2n to show that you
|
||||
# can even do this in the edgecases docs?
|
||||
overrides = ahot_overrides;
|
||||
|
||||
# XXX: won't work on llvmlite..
|
||||
# preferWheels = true;
|
||||
};
|
||||
};
|
||||
|
||||
# devShells.default = pkgs.mkShell {
|
||||
# projectDir = projectDir;
|
||||
# python = "python3.10";
|
||||
# overrides = ahot_overrides;
|
||||
# inputsFrom = [ self.packages.x86_64-linux.piker ];
|
||||
# packages = packages;
|
||||
# # packages = [ poetry2nix.packages.${system}.poetry ];
|
||||
# };
|
||||
|
||||
# TODO: grok the difference here..
|
||||
# - avoid re-cloning git repos on every develop entry..
|
||||
# - ideally allow hacking on the src code of some deps
|
||||
# (tractor, pyqtgraph, tomlkit, etc.) WITHOUT having to
|
||||
# re-install them every time a change is made.
|
||||
# - boot a usable xonsh inside the poetry virtualenv when
|
||||
# defined via a custom entry point?
|
||||
devShells.default = p2npkgs.mkPoetryEnv {
|
||||
# env = p2npkgs.mkPoetryEnv {
|
||||
projectDir = projectDir;
|
||||
python = pkgs.python310;
|
||||
overrides = ahot_overrides;
|
||||
editablePackageSources = packages;
|
||||
# piker = "./";
|
||||
# tractor = "../tractor/";
|
||||
# }; # wut?
|
||||
};
|
||||
}
|
||||
); # end of .outputs scope
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers.
|
||||
# Copyright 2020-eternity Tyler Goodlet (in stewardship for piker0)
|
||||
# Copyright 2020-eternity Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -14,7 +14,14 @@
|
|||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
'''
|
||||
piker: trading gear for hackers.
|
||||
|
||||
"""
|
||||
'''
|
||||
from .service import open_piker_runtime
|
||||
from .data.feed import open_feed
|
||||
|
||||
__all__ = [
|
||||
'open_piker_runtime',
|
||||
'open_feed',
|
||||
]
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -14,37 +14,71 @@
|
|||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
'''
|
||||
Cacheing apis and toolz.
|
||||
|
||||
"""
|
||||
'''
|
||||
|
||||
from collections import OrderedDict
|
||||
from contextlib import (
|
||||
asynccontextmanager,
|
||||
from typing import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
ParamSpec,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from tractor.trionics import maybe_open_context
|
||||
|
||||
from .brokers import get_brokermod
|
||||
from .log import get_logger
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
T = TypeVar("T")
|
||||
P = ParamSpec("P")
|
||||
|
||||
def async_lifo_cache(maxsize=128):
|
||||
"""Async ``cache`` with a LIFO policy.
|
||||
|
||||
# TODO: move this to `tractor.trionics`..
|
||||
# - egs. to replicate for tests: https://github.com/aio-libs/async-lru#usage
|
||||
# - their suite as well:
|
||||
# https://github.com/aio-libs/async-lru/tree/master/tests
|
||||
# - asked trio_util about it too:
|
||||
# https://github.com/groove-x/trio-util/issues/21
|
||||
def async_lifo_cache(
|
||||
maxsize=128,
|
||||
|
||||
# NOTE: typing style was learned from:
|
||||
# https://stackoverflow.com/a/71132186
|
||||
) -> Callable[
|
||||
Callable[P, Awaitable[T]],
|
||||
Callable[
|
||||
Callable[P, Awaitable[T]],
|
||||
Callable[P, Awaitable[T]],
|
||||
],
|
||||
]:
|
||||
'''
|
||||
Async ``cache`` with a LIFO policy.
|
||||
|
||||
Implemented my own since no one else seems to have
|
||||
a standard. I'll wait for the smarter people to come
|
||||
up with one, but until then...
|
||||
"""
|
||||
|
||||
NOTE: when decorating, due to this simple/naive implementation, you
|
||||
MUST call the decorator like,
|
||||
|
||||
.. code:: python
|
||||
|
||||
@async_lifo_cache()
|
||||
async def cache_target():
|
||||
|
||||
'''
|
||||
cache = OrderedDict()
|
||||
|
||||
def decorator(fn):
|
||||
def decorator(
|
||||
fn: Callable[P, Awaitable[T]],
|
||||
) -> Callable[P, Awaitable[T]]:
|
||||
|
||||
async def wrapper(*args):
|
||||
async def decorated(
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> T:
|
||||
key = args
|
||||
try:
|
||||
return cache[key]
|
||||
|
@ -53,27 +87,13 @@ def async_lifo_cache(maxsize=128):
|
|||
# discard last added new entry
|
||||
cache.popitem()
|
||||
|
||||
# do it
|
||||
cache[key] = await fn(*args)
|
||||
# call underlying
|
||||
cache[key] = await fn(
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
return cache[key]
|
||||
|
||||
return wrapper
|
||||
return decorated
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def open_cached_client(
|
||||
brokername: str,
|
||||
) -> 'Client': # noqa
|
||||
'''
|
||||
Get a cached broker client from the current actor's local vars.
|
||||
|
||||
If one has not been setup do it and cache it.
|
||||
|
||||
'''
|
||||
brokermod = get_brokermod(brokername)
|
||||
async with maybe_open_context(
|
||||
acm_func=brokermod.get_client,
|
||||
) as (cache_hit, client):
|
||||
yield client
|
||||
|
|
561
piker/_daemon.py
561
piker/_daemon.py
|
@ -1,561 +0,0 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Structured, daemon tree service management.
|
||||
|
||||
"""
|
||||
from typing import Optional, Union, Callable, Any
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from collections import defaultdict
|
||||
|
||||
from pydantic import BaseModel
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
import tractor
|
||||
|
||||
from .log import get_logger, get_console_log
|
||||
from .brokers import get_brokermod
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
_root_dname = 'pikerd'
|
||||
|
||||
_registry_addr = ('127.0.0.1', 6116)
|
||||
_tractor_kwargs: dict[str, Any] = {
|
||||
# use a different registry addr then tractor's default
|
||||
'arbiter_addr': _registry_addr
|
||||
}
|
||||
_root_modules = [
|
||||
__name__,
|
||||
'piker.clearing._ems',
|
||||
'piker.clearing._client',
|
||||
]
|
||||
|
||||
|
||||
class Services(BaseModel):
|
||||
|
||||
actor_n: tractor._supervise.ActorNursery
|
||||
service_n: trio.Nursery
|
||||
debug_mode: bool # tractor sub-actor debug mode flag
|
||||
service_tasks: dict[str, tuple[trio.CancelScope, tractor.Portal]] = {}
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
async def start_service_task(
|
||||
self,
|
||||
name: str,
|
||||
portal: tractor.Portal,
|
||||
target: Callable,
|
||||
**kwargs,
|
||||
|
||||
) -> (trio.CancelScope, tractor.Context):
|
||||
'''
|
||||
Open a context in a service sub-actor, add to a stack
|
||||
that gets unwound at ``pikerd`` teardown.
|
||||
|
||||
This allows for allocating long-running sub-services in our main
|
||||
daemon and explicitly controlling their lifetimes.
|
||||
|
||||
'''
|
||||
async def open_context_in_task(
|
||||
task_status: TaskStatus[
|
||||
trio.CancelScope] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> Any:
|
||||
|
||||
with trio.CancelScope() as cs:
|
||||
async with portal.open_context(
|
||||
target,
|
||||
**kwargs,
|
||||
|
||||
) as (ctx, first):
|
||||
|
||||
# unblock once the remote context has started
|
||||
task_status.started((cs, first))
|
||||
log.info(
|
||||
f'`pikerd` service {name} started with value {first}'
|
||||
)
|
||||
try:
|
||||
# wait on any context's return value
|
||||
ctx_res = await ctx.result()
|
||||
except tractor.ContextCancelled:
|
||||
return await self.cancel_service(name)
|
||||
else:
|
||||
# wait on any error from the sub-actor
|
||||
# NOTE: this will block indefinitely until
|
||||
# cancelled either by error from the target
|
||||
# context function or by being cancelled here by
|
||||
# the surrounding cancel scope
|
||||
return (await portal.result(), ctx_res)
|
||||
|
||||
cs, first = await self.service_n.start(open_context_in_task)
|
||||
|
||||
# store the cancel scope and portal for later cancellation or
|
||||
# retstart if needed.
|
||||
self.service_tasks[name] = (cs, portal)
|
||||
|
||||
return cs, first
|
||||
|
||||
# TODO: per service cancellation by scope, we aren't using this
|
||||
# anywhere right?
|
||||
async def cancel_service(
|
||||
self,
|
||||
name: str,
|
||||
) -> Any:
|
||||
log.info(f'Cancelling `pikerd` service {name}')
|
||||
cs, portal = self.service_tasks[name]
|
||||
# XXX: not entirely sure why this is required,
|
||||
# and should probably be better fine tuned in
|
||||
# ``tractor``?
|
||||
cs.cancel()
|
||||
return await portal.cancel_actor()
|
||||
|
||||
|
||||
_services: Optional[Services] = None
|
||||
|
||||
|
||||
@acm
|
||||
async def open_pikerd(
|
||||
start_method: str = 'trio',
|
||||
loglevel: Optional[str] = None,
|
||||
|
||||
# XXX: you should pretty much never want debug mode
|
||||
# for data daemons when running in production.
|
||||
debug_mode: bool = False,
|
||||
|
||||
) -> Optional[tractor._portal.Portal]:
|
||||
'''
|
||||
Start a root piker daemon who's lifetime extends indefinitely
|
||||
until cancelled.
|
||||
|
||||
A root actor nursery is created which can be used to create and keep
|
||||
alive underling services (see below).
|
||||
|
||||
'''
|
||||
global _services
|
||||
assert _services is None
|
||||
|
||||
# XXX: this may open a root actor as well
|
||||
async with (
|
||||
tractor.open_root_actor(
|
||||
|
||||
# passed through to ``open_root_actor``
|
||||
arbiter_addr=_registry_addr,
|
||||
name=_root_dname,
|
||||
loglevel=loglevel,
|
||||
debug_mode=debug_mode,
|
||||
start_method=start_method,
|
||||
|
||||
# TODO: eventually we should be able to avoid
|
||||
# having the root have more then permissions to
|
||||
# spawn other specialized daemons I think?
|
||||
enable_modules=_root_modules,
|
||||
) as _,
|
||||
|
||||
tractor.open_nursery() as actor_nursery,
|
||||
):
|
||||
async with trio.open_nursery() as service_nursery:
|
||||
|
||||
# # setup service mngr singleton instance
|
||||
# async with AsyncExitStack() as stack:
|
||||
|
||||
# assign globally for future daemon/task creation
|
||||
_services = Services(
|
||||
actor_n=actor_nursery,
|
||||
service_n=service_nursery,
|
||||
debug_mode=debug_mode,
|
||||
)
|
||||
|
||||
yield _services
|
||||
|
||||
|
||||
@acm
|
||||
async def open_piker_runtime(
|
||||
name: str,
|
||||
enable_modules: list[str] = [],
|
||||
start_method: str = 'trio',
|
||||
loglevel: Optional[str] = None,
|
||||
|
||||
# XXX: you should pretty much never want debug mode
|
||||
# for data daemons when running in production.
|
||||
debug_mode: bool = False,
|
||||
|
||||
) -> Optional[tractor._portal.Portal]:
|
||||
'''
|
||||
Start a piker actor who's runtime will automatically
|
||||
sync with existing piker actors in local network
|
||||
based on configuration.
|
||||
|
||||
'''
|
||||
global _services
|
||||
assert _services is None
|
||||
|
||||
# XXX: this may open a root actor as well
|
||||
async with (
|
||||
tractor.open_root_actor(
|
||||
|
||||
# passed through to ``open_root_actor``
|
||||
arbiter_addr=_registry_addr,
|
||||
name=name,
|
||||
loglevel=loglevel,
|
||||
debug_mode=debug_mode,
|
||||
start_method=start_method,
|
||||
|
||||
# TODO: eventually we should be able to avoid
|
||||
# having the root have more then permissions to
|
||||
# spawn other specialized daemons I think?
|
||||
enable_modules=_root_modules,
|
||||
) as _,
|
||||
):
|
||||
yield tractor.current_actor()
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_runtime(
|
||||
loglevel: Optional[str] = None,
|
||||
**kwargs,
|
||||
|
||||
) -> None:
|
||||
"""
|
||||
Start the ``tractor`` runtime (a root actor) if none exists.
|
||||
|
||||
"""
|
||||
settings = _tractor_kwargs
|
||||
settings.update(kwargs)
|
||||
|
||||
if not tractor.current_actor(err_on_no_runtime=False):
|
||||
async with tractor.open_root_actor(
|
||||
loglevel=loglevel,
|
||||
**settings,
|
||||
):
|
||||
yield
|
||||
else:
|
||||
yield
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_pikerd(
|
||||
loglevel: Optional[str] = None,
|
||||
**kwargs,
|
||||
|
||||
) -> Union[tractor._portal.Portal, Services]:
|
||||
"""If no ``pikerd`` daemon-root-actor can be found start it and
|
||||
yield up (we should probably figure out returning a portal to self
|
||||
though).
|
||||
|
||||
"""
|
||||
if loglevel:
|
||||
get_console_log(loglevel)
|
||||
|
||||
# subtle, we must have the runtime up here or portal lookup will fail
|
||||
async with maybe_open_runtime(loglevel, **kwargs):
|
||||
|
||||
async with tractor.find_actor(_root_dname) as portal:
|
||||
# assert portal is not None
|
||||
if portal is not None:
|
||||
yield portal
|
||||
return
|
||||
|
||||
# presume pikerd role since no daemon could be found at
|
||||
# configured address
|
||||
async with open_pikerd(
|
||||
|
||||
loglevel=loglevel,
|
||||
debug_mode=kwargs.get('debug_mode', False),
|
||||
|
||||
) as _:
|
||||
# in the case where we're starting up the
|
||||
# tractor-piker runtime stack in **this** process
|
||||
# we return no portal to self.
|
||||
yield None
|
||||
|
||||
|
||||
# brokerd enabled modules
|
||||
_data_mods = [
|
||||
'piker.brokers.core',
|
||||
'piker.brokers.data',
|
||||
'piker.data',
|
||||
'piker.data.feed',
|
||||
'piker.data._sampling'
|
||||
]
|
||||
|
||||
|
||||
class Brokerd:
|
||||
locks = defaultdict(trio.Lock)
|
||||
|
||||
|
||||
@acm
|
||||
async def find_service(
|
||||
service_name: str,
|
||||
) -> Optional[tractor.Portal]:
|
||||
|
||||
log.info(f'Scanning for service `{service_name}`')
|
||||
# attach to existing daemon by name if possible
|
||||
async with tractor.find_actor(
|
||||
service_name,
|
||||
arbiter_sockaddr=_registry_addr,
|
||||
) as maybe_portal:
|
||||
yield maybe_portal
|
||||
|
||||
|
||||
async def check_for_service(
|
||||
service_name: str,
|
||||
|
||||
) -> bool:
|
||||
'''
|
||||
Service daemon "liveness" predicate.
|
||||
|
||||
'''
|
||||
async with tractor.query_actor(
|
||||
service_name,
|
||||
arbiter_sockaddr=_registry_addr,
|
||||
) as sockaddr:
|
||||
return sockaddr
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_spawn_daemon(
|
||||
|
||||
service_name: str,
|
||||
service_task_target: Callable,
|
||||
spawn_args: dict[str, Any],
|
||||
loglevel: Optional[str] = None,
|
||||
**kwargs,
|
||||
|
||||
) -> tractor.Portal:
|
||||
'''
|
||||
If no ``service_name`` daemon-actor can be found,
|
||||
spawn one in a local subactor and return a portal to it.
|
||||
|
||||
If this function is called from a non-pikerd actor, the
|
||||
spawned service will persist as long as pikerd does or
|
||||
it is requested to be cancelled.
|
||||
|
||||
This can be seen as a service starting api for remote-actor
|
||||
clients.
|
||||
|
||||
'''
|
||||
if loglevel:
|
||||
get_console_log(loglevel)
|
||||
|
||||
# serialize access to this section to avoid
|
||||
# 2 or more tasks racing to create a daemon
|
||||
lock = Brokerd.locks[service_name]
|
||||
await lock.acquire()
|
||||
|
||||
async with find_service(service_name) as portal:
|
||||
if portal is not None:
|
||||
lock.release()
|
||||
yield portal
|
||||
return
|
||||
|
||||
log.warning(f"Couldn't find any existing {service_name}")
|
||||
|
||||
# ask root ``pikerd`` daemon to spawn the daemon we need if
|
||||
# pikerd is not live we now become the root of the
|
||||
# process tree
|
||||
async with maybe_open_pikerd(
|
||||
|
||||
loglevel=loglevel,
|
||||
**kwargs,
|
||||
|
||||
) as pikerd_portal:
|
||||
|
||||
if pikerd_portal is None:
|
||||
# we are the root and thus are `pikerd`
|
||||
# so spawn the target service directly by calling
|
||||
# the provided target routine.
|
||||
# XXX: this assumes that the target is well formed and will
|
||||
# do the right things to setup both a sub-actor **and** call
|
||||
# the ``_Services`` api from above to start the top level
|
||||
# service task for that actor.
|
||||
await service_task_target(**spawn_args)
|
||||
|
||||
else:
|
||||
# tell the remote `pikerd` to start the target,
|
||||
# the target can't return a non-serializable value
|
||||
# since it is expected that service startingn is
|
||||
# non-blocking and the target task will persist running
|
||||
# on `pikerd` after the client requesting it's start
|
||||
# disconnects.
|
||||
await pikerd_portal.run(
|
||||
service_task_target,
|
||||
**spawn_args,
|
||||
)
|
||||
|
||||
async with tractor.wait_for_actor(service_name) as portal:
|
||||
lock.release()
|
||||
yield portal
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
async def spawn_brokerd(
|
||||
|
||||
brokername: str,
|
||||
loglevel: Optional[str] = None,
|
||||
**tractor_kwargs,
|
||||
|
||||
) -> bool:
|
||||
|
||||
log.info(f'Spawning {brokername} broker daemon')
|
||||
|
||||
brokermod = get_brokermod(brokername)
|
||||
dname = f'brokerd.{brokername}'
|
||||
|
||||
extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {})
|
||||
tractor_kwargs.update(extra_tractor_kwargs)
|
||||
|
||||
global _services
|
||||
assert _services
|
||||
|
||||
# ask `pikerd` to spawn a new sub-actor and manage it under its
|
||||
# actor nursery
|
||||
modpath = brokermod.__name__
|
||||
broker_enable = [modpath]
|
||||
for submodname in getattr(
|
||||
brokermod,
|
||||
'__enable_modules__',
|
||||
[],
|
||||
):
|
||||
subpath = f'{modpath}.{submodname}'
|
||||
broker_enable.append(subpath)
|
||||
|
||||
portal = await _services.actor_n.start_actor(
|
||||
dname,
|
||||
enable_modules=_data_mods + broker_enable,
|
||||
loglevel=loglevel,
|
||||
debug_mode=_services.debug_mode,
|
||||
**tractor_kwargs
|
||||
)
|
||||
|
||||
# non-blocking setup of brokerd service nursery
|
||||
from .data import _setup_persistent_brokerd
|
||||
|
||||
await _services.start_service_task(
|
||||
dname,
|
||||
portal,
|
||||
_setup_persistent_brokerd,
|
||||
brokername=brokername,
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_spawn_brokerd(
|
||||
|
||||
brokername: str,
|
||||
loglevel: Optional[str] = None,
|
||||
**kwargs,
|
||||
|
||||
) -> tractor.Portal:
|
||||
'''
|
||||
Helper to spawn a brokerd service *from* a client
|
||||
who wishes to use the sub-actor-daemon.
|
||||
|
||||
'''
|
||||
async with maybe_spawn_daemon(
|
||||
|
||||
f'brokerd.{brokername}',
|
||||
service_task_target=spawn_brokerd,
|
||||
spawn_args={'brokername': brokername, 'loglevel': loglevel},
|
||||
loglevel=loglevel,
|
||||
**kwargs,
|
||||
|
||||
) as portal:
|
||||
yield portal
|
||||
|
||||
|
||||
async def spawn_emsd(
|
||||
|
||||
loglevel: Optional[str] = None,
|
||||
**extra_tractor_kwargs
|
||||
|
||||
) -> bool:
|
||||
"""
|
||||
Start the clearing engine under ``pikerd``.
|
||||
|
||||
"""
|
||||
log.info('Spawning emsd')
|
||||
|
||||
global _services
|
||||
assert _services
|
||||
|
||||
portal = await _services.actor_n.start_actor(
|
||||
'emsd',
|
||||
enable_modules=[
|
||||
'piker.clearing._ems',
|
||||
'piker.clearing._client',
|
||||
],
|
||||
loglevel=loglevel,
|
||||
debug_mode=_services.debug_mode, # set by pikerd flag
|
||||
**extra_tractor_kwargs
|
||||
)
|
||||
|
||||
# non-blocking setup of clearing service
|
||||
from .clearing._ems import _setup_persistent_emsd
|
||||
|
||||
await _services.start_service_task(
|
||||
'emsd',
|
||||
portal,
|
||||
_setup_persistent_emsd,
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_emsd(
|
||||
|
||||
brokername: str,
|
||||
loglevel: Optional[str] = None,
|
||||
**kwargs,
|
||||
|
||||
) -> tractor._portal.Portal: # noqa
|
||||
|
||||
async with maybe_spawn_daemon(
|
||||
|
||||
'emsd',
|
||||
service_task_target=spawn_emsd,
|
||||
spawn_args={'loglevel': loglevel},
|
||||
loglevel=loglevel,
|
||||
**kwargs,
|
||||
|
||||
) as portal:
|
||||
yield portal
|
||||
|
||||
|
||||
# TODO: ideally we can start the tsdb "on demand" but it's
|
||||
# probably going to require "rootless" docker, at least if we don't
|
||||
# want to expect the user to start ``pikerd`` with root perms all the
|
||||
# time.
|
||||
# async def maybe_open_marketstored(
|
||||
# loglevel: Optional[str] = None,
|
||||
# **kwargs,
|
||||
|
||||
# ) -> tractor._portal.Portal: # noqa
|
||||
|
||||
# async with maybe_spawn_daemon(
|
||||
|
||||
# 'marketstored',
|
||||
# service_task_target=spawn_emsd,
|
||||
# spawn_args={'loglevel': loglevel},
|
||||
# loglevel=loglevel,
|
||||
# **kwargs,
|
||||
|
||||
# ) as portal:
|
||||
# yield portal
|
|
@ -0,0 +1,16 @@
|
|||
.accounting
|
||||
-----------
|
||||
A subsystem for transaction processing, storage and historical
|
||||
measurement.
|
||||
|
||||
|
||||
.pnl
|
||||
----
|
||||
BEP, the break even price: the price at which liquidating
|
||||
a remaining position results in a zero PnL since the position was
|
||||
"opened" in the destination asset.
|
||||
|
||||
PPU: price-per-unit: the "average cost" (in cumulative mean terms)
|
||||
of the "entry" transactions which "make a position larger"; taking
|
||||
a profit relative to this price means that you will "make more
|
||||
profit then made prior" since the position was opened.
|
|
@ -0,0 +1,107 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
"Accounting for degens": count dem numberz that tracks how much you got
|
||||
for tendiez.
|
||||
|
||||
'''
|
||||
from ..log import get_logger
|
||||
|
||||
from .calc import (
|
||||
iter_by_dt,
|
||||
)
|
||||
from ._ledger import (
|
||||
Transaction,
|
||||
TransactionLedger,
|
||||
open_trade_ledger,
|
||||
)
|
||||
from ._pos import (
|
||||
Account,
|
||||
load_account,
|
||||
load_account_from_ledger,
|
||||
open_pps,
|
||||
open_account,
|
||||
Position,
|
||||
)
|
||||
from ._mktinfo import (
|
||||
Asset,
|
||||
dec_digits,
|
||||
digits_to_dec,
|
||||
MktPair,
|
||||
Symbol,
|
||||
unpack_fqme,
|
||||
_derivs as DerivTypes,
|
||||
)
|
||||
from ._allocate import (
|
||||
mk_allocator,
|
||||
Allocator,
|
||||
)
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
__all__ = [
|
||||
'Account',
|
||||
'Allocator',
|
||||
'Asset',
|
||||
'MktPair',
|
||||
'Position',
|
||||
'Symbol',
|
||||
'Transaction',
|
||||
'TransactionLedger',
|
||||
'dec_digits',
|
||||
'digits_to_dec',
|
||||
'iter_by_dt',
|
||||
'load_account',
|
||||
'load_account_from_ledger',
|
||||
'mk_allocator',
|
||||
'open_account',
|
||||
'open_pps',
|
||||
'open_trade_ledger',
|
||||
'unpack_fqme',
|
||||
'DerivTypes',
|
||||
]
|
||||
|
||||
|
||||
def get_likely_pair(
|
||||
src: str,
|
||||
dst: str,
|
||||
bs_mktid: str,
|
||||
|
||||
) -> str | None:
|
||||
'''
|
||||
Attempt to get the likely trading pair matching a given destination
|
||||
asset `dst: str`.
|
||||
|
||||
'''
|
||||
try:
|
||||
src_name_start: str = bs_mktid.rindex(src)
|
||||
except (
|
||||
ValueError, # substr not found
|
||||
):
|
||||
# TODO: handle nested positions..(i.e.
|
||||
# positions where the src fiat was used to
|
||||
# buy some other dst which was furhter used
|
||||
# to buy another dst..)
|
||||
# log.warning(
|
||||
# f'No src fiat {src} found in {bs_mktid}?'
|
||||
# )
|
||||
return None
|
||||
|
||||
likely_dst: str = bs_mktid[:src_name_start]
|
||||
if likely_dst == dst:
|
||||
return bs_mktid
|
|
@ -22,54 +22,10 @@ from enum import Enum
|
|||
from typing import Optional
|
||||
|
||||
from bidict import bidict
|
||||
from pydantic import BaseModel, validator
|
||||
|
||||
from ..data._source import Symbol
|
||||
from ._messages import BrokerdPosition, Status
|
||||
|
||||
|
||||
class Position(BaseModel):
|
||||
'''
|
||||
Basic pp (personal position) model with attached fills history.
|
||||
|
||||
This type should be IPC wire ready?
|
||||
|
||||
'''
|
||||
symbol: Symbol
|
||||
|
||||
# last size and avg entry price
|
||||
size: float
|
||||
avg_price: float # TODO: contextual pricing
|
||||
|
||||
# ordered record of known constituent trade messages
|
||||
fills: list[Status] = []
|
||||
|
||||
def update_from_msg(
|
||||
self,
|
||||
msg: BrokerdPosition,
|
||||
|
||||
) -> None:
|
||||
|
||||
# XXX: better place to do this?
|
||||
symbol = self.symbol
|
||||
|
||||
lot_size_digits = symbol.lot_size_digits
|
||||
avg_price, size = (
|
||||
round(msg['avg_price'], ndigits=symbol.tick_size_digits),
|
||||
round(msg['size'], ndigits=lot_size_digits),
|
||||
)
|
||||
|
||||
self.avg_price = avg_price
|
||||
self.size = size
|
||||
|
||||
@property
|
||||
def dsize(self) -> float:
|
||||
'''
|
||||
The "dollar" size of the pp, normally in trading (fiat) unit
|
||||
terms.
|
||||
|
||||
'''
|
||||
return self.avg_price * self.size
|
||||
from ._pos import Position
|
||||
from . import MktPair
|
||||
from piker.types import Struct
|
||||
|
||||
|
||||
_size_units = bidict({
|
||||
|
@ -84,34 +40,9 @@ SizeUnit = Enum(
|
|||
)
|
||||
|
||||
|
||||
class Allocator(BaseModel):
|
||||
class Allocator(Struct):
|
||||
|
||||
class Config:
|
||||
validate_assignment = True
|
||||
copy_on_model_validation = False
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
# required to get the account validator lookup working?
|
||||
extra = 'allow'
|
||||
underscore_attrs_are_private = False
|
||||
|
||||
symbol: Symbol
|
||||
account: Optional[str] = 'paper'
|
||||
# TODO: for enums this clearly doesn't fucking work, you can't set
|
||||
# a default at startup by passing in a `dict` but yet you can set
|
||||
# that value through assignment..for wtv cucked reason.. honestly, pure
|
||||
# unintuitive garbage.
|
||||
size_unit: str = 'currency'
|
||||
_size_units: dict[str, Optional[str]] = _size_units
|
||||
|
||||
@validator('size_unit', pre=True)
|
||||
def maybe_lookup_key(cls, v):
|
||||
# apply the corresponding enum key for the text "description" value
|
||||
if v not in _size_units:
|
||||
return _size_units.inverse[v]
|
||||
|
||||
assert v in _size_units
|
||||
return v
|
||||
mkt: MktPair
|
||||
|
||||
# TODO: if we ever want ot support non-uniform entry-slot-proportion
|
||||
# "sizes"
|
||||
|
@ -120,6 +51,28 @@ class Allocator(BaseModel):
|
|||
units_limit: float
|
||||
currency_limit: float
|
||||
slots: int
|
||||
account: Optional[str] = 'paper'
|
||||
|
||||
_size_units: bidict[str, Optional[str]] = _size_units
|
||||
|
||||
# TODO: for enums this clearly doesn't fucking work, you can't set
|
||||
# a default at startup by passing in a `dict` but yet you can set
|
||||
# that value through assignment..for wtv cucked reason.. honestly, pure
|
||||
# unintuitive garbage.
|
||||
_size_unit: str = 'currency'
|
||||
|
||||
@property
|
||||
def size_unit(self) -> str:
|
||||
return self._size_unit
|
||||
|
||||
@size_unit.setter
|
||||
def size_unit(self, v: str) -> Optional[str]:
|
||||
if v not in _size_units:
|
||||
v = _size_units.inverse[v]
|
||||
|
||||
assert v in _size_units
|
||||
self._size_unit = v
|
||||
return v
|
||||
|
||||
def step_sizes(
|
||||
self,
|
||||
|
@ -140,10 +93,13 @@ class Allocator(BaseModel):
|
|||
else:
|
||||
return self.units_limit
|
||||
|
||||
def limit_info(self) -> tuple[str, float]:
|
||||
return self.size_unit, self.limit()
|
||||
|
||||
def next_order_info(
|
||||
self,
|
||||
|
||||
# we only need a startup size for exit calcs, we can the
|
||||
# we only need a startup size for exit calcs, we can then
|
||||
# determine how large slots should be if the initial pp size was
|
||||
# larger then the current live one, and the live one is smaller
|
||||
# then the initial config settings.
|
||||
|
@ -158,24 +114,24 @@ class Allocator(BaseModel):
|
|||
depending on position / order entry config.
|
||||
|
||||
'''
|
||||
sym = self.symbol
|
||||
ld = sym.lot_size_digits
|
||||
mkt: MktPair = self.mkt
|
||||
ld: int = mkt.size_tick_digits
|
||||
|
||||
size_unit = self.size_unit
|
||||
live_size = live_pp.size
|
||||
live_size = live_pp.cumsize
|
||||
abs_live_size = abs(live_size)
|
||||
abs_startup_size = abs(startup_pp.size)
|
||||
abs_startup_size = abs(startup_pp.cumsize)
|
||||
|
||||
u_per_slot, currency_per_slot = self.step_sizes()
|
||||
|
||||
if size_unit == 'units':
|
||||
slot_size = u_per_slot
|
||||
l_sub_pp = self.units_limit - abs_live_size
|
||||
slot_size: float = u_per_slot
|
||||
l_sub_pp: float = self.units_limit - abs_live_size
|
||||
|
||||
elif size_unit == 'currency':
|
||||
live_cost_basis = abs_live_size * live_pp.avg_price
|
||||
slot_size = currency_per_slot / price
|
||||
l_sub_pp = (self.currency_limit - live_cost_basis) / price
|
||||
live_cost_basis: float = abs_live_size * live_pp.ppu
|
||||
slot_size: float = currency_per_slot / price
|
||||
l_sub_pp: float = (self.currency_limit - live_cost_basis) / price
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
|
@ -184,12 +140,20 @@ class Allocator(BaseModel):
|
|||
|
||||
# an entry (adding-to or starting a pp)
|
||||
if (
|
||||
action == 'buy' and live_size > 0 or
|
||||
action == 'sell' and live_size < 0 or
|
||||
live_size == 0
|
||||
or (
|
||||
action == 'buy'
|
||||
and live_size > 0
|
||||
)
|
||||
or (
|
||||
action == 'sell'
|
||||
and live_size < 0
|
||||
)
|
||||
):
|
||||
|
||||
order_size = min(slot_size, l_sub_pp)
|
||||
order_size = min(
|
||||
slot_size,
|
||||
max(l_sub_pp, 0),
|
||||
)
|
||||
|
||||
# an exit (removing-from or going to net-zero pp)
|
||||
else:
|
||||
|
@ -205,7 +169,7 @@ class Allocator(BaseModel):
|
|||
if size_unit == 'currency':
|
||||
# compute the "projected" limit's worth of units at the
|
||||
# current pp (weighted) price:
|
||||
slot_size = currency_per_slot / live_pp.avg_price
|
||||
slot_size = currency_per_slot / live_pp.ppu
|
||||
|
||||
else:
|
||||
slot_size = u_per_slot
|
||||
|
@ -220,7 +184,7 @@ class Allocator(BaseModel):
|
|||
order_size = max(slotted_pp, slot_size)
|
||||
|
||||
if (
|
||||
abs_live_size < slot_size or
|
||||
abs_live_size < slot_size
|
||||
|
||||
# NOTE: front/back "loading" heurstic:
|
||||
# if the remaining pp is in between 0-1.5x a slot's
|
||||
|
@ -229,14 +193,17 @@ class Allocator(BaseModel):
|
|||
# **without** going past a net-zero pp. if the pp is
|
||||
# > 1.5x a slot size, then front load: exit a slot's and
|
||||
# expect net-zero to be acquired on the final exit.
|
||||
slot_size < pp_size < round((1.5*slot_size), ndigits=ld) or
|
||||
or slot_size < pp_size < round((1.5*slot_size), ndigits=ld)
|
||||
or (
|
||||
|
||||
# underlying requires discrete (int) units (eg. stocks)
|
||||
# and thus our slot size (based on our limit) would
|
||||
# exit a fractional unit's worth so, presuming we aren't
|
||||
# supporting a fractional-units-style broker, we need
|
||||
# exit the final unit.
|
||||
ld == 0 and abs_live_size == 1
|
||||
ld == 0
|
||||
and abs_live_size == 1
|
||||
)
|
||||
):
|
||||
order_size = abs_live_size
|
||||
|
||||
|
@ -244,9 +211,13 @@ class Allocator(BaseModel):
|
|||
if order_size < slot_size:
|
||||
# compute a fractional slots size to display
|
||||
slots_used = self.slots_used(
|
||||
Position(symbol=sym, size=order_size, avg_price=price)
|
||||
Position(
|
||||
mkt=mkt,
|
||||
bs_mktid=mkt.bs_mktid,
|
||||
)
|
||||
)
|
||||
|
||||
# TODO: render an actual ``Executable`` type here?
|
||||
return {
|
||||
'size': abs(round(order_size, ndigits=ld)),
|
||||
'size_digits': ld,
|
||||
|
@ -268,11 +239,11 @@ class Allocator(BaseModel):
|
|||
Calc and return the number of slots used by this ``Position``.
|
||||
|
||||
'''
|
||||
abs_pp_size = abs(pp.size)
|
||||
abs_pp_size = abs(pp.cumsize)
|
||||
|
||||
if self.size_unit == 'currency':
|
||||
# live_currency_size = size or (abs_pp_size * pp.avg_price)
|
||||
live_currency_size = abs_pp_size * pp.avg_price
|
||||
# live_currency_size = size or (abs_pp_size * pp.ppu)
|
||||
live_currency_size = abs_pp_size * pp.ppu
|
||||
prop = live_currency_size / self.currency_limit
|
||||
|
||||
else:
|
||||
|
@ -284,23 +255,15 @@ class Allocator(BaseModel):
|
|||
return round(prop * self.slots)
|
||||
|
||||
|
||||
_derivs = (
|
||||
'future',
|
||||
'continuous_future',
|
||||
'option',
|
||||
'futures_option',
|
||||
)
|
||||
|
||||
|
||||
def mk_allocator(
|
||||
|
||||
symbol: Symbol,
|
||||
mkt: MktPair,
|
||||
startup_pp: Position,
|
||||
|
||||
# default allocation settings
|
||||
defaults: dict[str, float] = {
|
||||
'account': None, # select paper by default
|
||||
'size_unit': 'currency',
|
||||
# 'size_unit': 'currency',
|
||||
'units_limit': 400,
|
||||
'currency_limit': 5e3,
|
||||
'slots': 4,
|
||||
|
@ -318,42 +281,9 @@ def mk_allocator(
|
|||
'currency_limit': 6e3,
|
||||
'slots': 6,
|
||||
}
|
||||
|
||||
defaults.update(user_def)
|
||||
|
||||
alloc = Allocator(
|
||||
symbol=symbol,
|
||||
return Allocator(
|
||||
mkt=mkt,
|
||||
**defaults,
|
||||
)
|
||||
|
||||
asset_type = symbol.type_key
|
||||
|
||||
# specific configs by asset class / type
|
||||
|
||||
if asset_type in _derivs:
|
||||
# since it's harder to know how currency "applies" in this case
|
||||
# given leverage properties
|
||||
alloc.size_unit = '# units'
|
||||
|
||||
# set units limit to slots size thus making make the next
|
||||
# entry step 1.0
|
||||
alloc.units_limit = alloc.slots
|
||||
|
||||
# if the current position is already greater then the limit
|
||||
# settings, increase the limit to the current position
|
||||
if alloc.size_unit == 'currency':
|
||||
startup_size = startup_pp.size * startup_pp.avg_price
|
||||
|
||||
if startup_size > alloc.currency_limit:
|
||||
alloc.currency_limit = round(startup_size, ndigits=2)
|
||||
|
||||
else:
|
||||
startup_size = abs(startup_pp.size)
|
||||
|
||||
if startup_size > alloc.units_limit:
|
||||
alloc.units_limit = startup_size
|
||||
|
||||
if asset_type in _derivs:
|
||||
alloc.slots = alloc.units_limit
|
||||
|
||||
return alloc
|
|
@ -0,0 +1,421 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Trade and transaction ledger processing.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from collections import UserDict
|
||||
from contextlib import contextmanager as cm
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from types import ModuleType
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Generator,
|
||||
Literal,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
from pendulum import (
|
||||
DateTime,
|
||||
)
|
||||
import tomli_w # for fast ledger writing
|
||||
|
||||
from piker.types import Struct
|
||||
from piker import config
|
||||
from ..log import get_logger
|
||||
from .calc import (
|
||||
iter_by_dt,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..data._symcache import (
|
||||
SymbologyCache,
|
||||
)
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
TxnType = Literal[
|
||||
'clear',
|
||||
'transfer',
|
||||
|
||||
# TODO: see https://github.com/pikers/piker/issues/510
|
||||
# 'split',
|
||||
# 'rename',
|
||||
# 'resize',
|
||||
# 'removal',
|
||||
]
|
||||
|
||||
|
||||
class Transaction(Struct, frozen=True):
|
||||
|
||||
# NOTE: this is a unified acronym also used in our `MktPair`
|
||||
# and can stand for any of a
|
||||
# "fully qualified <blank> endpoint":
|
||||
# - "market" in the case of financial trades
|
||||
# (btcusdt.spot.binance).
|
||||
# - "merkel (tree)" aka a blockchain system "wallet tranfers"
|
||||
# (btc.blockchain)
|
||||
# - "money" for tradtitional (digital databases)
|
||||
# *bank accounts* (usd.swift, eur.sepa)
|
||||
fqme: str
|
||||
|
||||
tid: str | int # unique transaction id
|
||||
size: float
|
||||
price: float
|
||||
cost: float # commisions or other additional costs
|
||||
dt: DateTime
|
||||
|
||||
# the "event type" in terms of "market events" see above and
|
||||
# https://github.com/pikers/piker/issues/510
|
||||
etype: TxnType = 'clear'
|
||||
|
||||
# TODO: we can drop this right since we
|
||||
# can instead expect the backend to provide this
|
||||
# via the `MktPair`?
|
||||
expiry: DateTime | None = None
|
||||
|
||||
# (optional) key-id defined by the broker-service backend which
|
||||
# ensures the instrument-symbol market key for this record is unique
|
||||
# in the "their backend/system" sense; i.e. this uid for the market
|
||||
# as defined (internally) in some namespace defined by the broker
|
||||
# service.
|
||||
bs_mktid: str | int | None = None
|
||||
|
||||
def to_dict(
|
||||
self,
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
dct: dict[str, Any] = super().to_dict(**kwargs)
|
||||
|
||||
# ensure we use a pendulum formatted
|
||||
# ISO style str here!@
|
||||
dct['dt'] = str(self.dt)
|
||||
|
||||
return dct
|
||||
|
||||
|
||||
class TransactionLedger(UserDict):
|
||||
'''
|
||||
Very simple ``dict`` wrapper + ``pathlib.Path`` handle to
|
||||
a TOML formatted transaction file for enabling file writes
|
||||
dynamically whilst still looking exactly like a ``dict`` from the
|
||||
outside.
|
||||
|
||||
'''
|
||||
# NOTE: see `open_trade_ledger()` for defaults, this should
|
||||
# never be constructed manually!
|
||||
def __init__(
|
||||
self,
|
||||
ledger_dict: dict,
|
||||
file_path: Path,
|
||||
account: str,
|
||||
mod: ModuleType, # broker mod
|
||||
tx_sort: Callable,
|
||||
symcache: SymbologyCache,
|
||||
|
||||
) -> None:
|
||||
self.account: str = account
|
||||
self.file_path: Path = file_path
|
||||
self.mod: ModuleType = mod
|
||||
self.tx_sort: Callable = tx_sort
|
||||
|
||||
self._symcache: SymbologyCache = symcache
|
||||
|
||||
# any added txns we keep in that form for meta-data
|
||||
# gathering purposes
|
||||
self._txns: dict[str, Transaction] = {}
|
||||
|
||||
super().__init__(ledger_dict)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f'TransactionLedger: {len(self)}\n'
|
||||
f'{pformat(list(self.data))}'
|
||||
)
|
||||
|
||||
@property
|
||||
def symcache(self) -> SymbologyCache:
|
||||
'''
|
||||
Read-only ref to backend's ``SymbologyCache``.
|
||||
|
||||
'''
|
||||
return self._symcache
|
||||
|
||||
def update_from_t(
|
||||
self,
|
||||
t: Transaction,
|
||||
) -> None:
|
||||
'''
|
||||
Given an input `Transaction`, cast to `dict` and update
|
||||
from it's transaction id.
|
||||
|
||||
'''
|
||||
self.data[t.tid] = t.to_dict()
|
||||
self._txns[t.tid] = t
|
||||
|
||||
def iter_txns(
|
||||
self,
|
||||
symcache: SymbologyCache | None = None,
|
||||
|
||||
) -> Generator[
|
||||
Transaction,
|
||||
None,
|
||||
None,
|
||||
]:
|
||||
'''
|
||||
Deliver trades records in ``(key: str, t: Transaction)``
|
||||
form via generator.
|
||||
|
||||
'''
|
||||
symcache = symcache or self._symcache
|
||||
|
||||
if self.account == 'paper':
|
||||
from piker.clearing import _paper_engine
|
||||
norm_trade: Callable = partial(
|
||||
_paper_engine.norm_trade,
|
||||
brokermod=self.mod,
|
||||
)
|
||||
|
||||
else:
|
||||
norm_trade: Callable = self.mod.norm_trade
|
||||
|
||||
# datetime-sort and pack into txs
|
||||
for tid, txdict in self.tx_sort(self.data.items()):
|
||||
txn: Transaction = norm_trade(
|
||||
tid,
|
||||
txdict,
|
||||
pairs=symcache.pairs,
|
||||
symcache=symcache,
|
||||
)
|
||||
yield txn
|
||||
|
||||
def to_txns(
|
||||
self,
|
||||
symcache: SymbologyCache | None = None,
|
||||
|
||||
) -> dict[str, Transaction]:
|
||||
'''
|
||||
Return entire output from ``.iter_txns()`` in a ``dict``.
|
||||
|
||||
'''
|
||||
txns: dict[str, Transaction] = {}
|
||||
for t in self.iter_txns(symcache=symcache):
|
||||
|
||||
if not t:
|
||||
log.warning(f'{self.mod.name}:{self.account} TXN is -> {t}')
|
||||
continue
|
||||
|
||||
txns[t.tid] = t
|
||||
|
||||
return txns
|
||||
|
||||
def write_config(self) -> None:
|
||||
'''
|
||||
Render the self.data ledger dict to its TOML file form.
|
||||
|
||||
ALWAYS order datetime sorted!
|
||||
|
||||
'''
|
||||
is_paper: bool = self.account == 'paper'
|
||||
|
||||
symcache: SymbologyCache = self._symcache
|
||||
towrite: dict[str, Any] = {}
|
||||
for tid, txdict in self.tx_sort(self.data.copy()):
|
||||
# write blank-str expiry for non-expiring assets
|
||||
if (
|
||||
'expiry' in txdict
|
||||
and txdict['expiry'] is None
|
||||
):
|
||||
txdict['expiry'] = ''
|
||||
|
||||
# (maybe) re-write old acro-key
|
||||
if (
|
||||
is_paper
|
||||
# if symcache is empty/not supported (yet), don't
|
||||
# bother xD
|
||||
and symcache.mktmaps
|
||||
):
|
||||
fqme: str = txdict.pop('fqsn', None) or txdict['fqme']
|
||||
bs_mktid: str | None = txdict.get('bs_mktid')
|
||||
|
||||
if (
|
||||
|
||||
fqme not in symcache.mktmaps
|
||||
or (
|
||||
# also try to see if this is maybe a paper
|
||||
# engine ledger in which case the bs_mktid
|
||||
# should be the fqme as well!
|
||||
bs_mktid
|
||||
and fqme != bs_mktid
|
||||
)
|
||||
):
|
||||
# always take any (paper) bs_mktid if defined and
|
||||
# in the backend's cache key set.
|
||||
if bs_mktid in symcache.mktmaps:
|
||||
fqme: str = bs_mktid
|
||||
else:
|
||||
best_fqme: str = list(symcache.search(fqme))[0]
|
||||
log.warning(
|
||||
f'Could not find FQME: {fqme} in qualified set?\n'
|
||||
f'Qualifying and expanding {fqme} -> {best_fqme}'
|
||||
)
|
||||
fqme = best_fqme
|
||||
|
||||
if (
|
||||
bs_mktid
|
||||
and bs_mktid != fqme
|
||||
):
|
||||
# in paper account case always make sure both the
|
||||
# fqme and bs_mktid are fully qualified..
|
||||
txdict['bs_mktid'] = fqme
|
||||
|
||||
# in paper ledgers always write the latest
|
||||
# symbology key field: an FQME.
|
||||
txdict['fqme'] = fqme
|
||||
|
||||
towrite[tid] = txdict
|
||||
|
||||
with self.file_path.open(mode='wb') as fp:
|
||||
tomli_w.dump(towrite, fp)
|
||||
|
||||
|
||||
def load_ledger(
|
||||
brokername: str,
|
||||
acctid: str,
|
||||
|
||||
# for testing or manual load from file
|
||||
dirpath: Path | None = None,
|
||||
|
||||
) -> tuple[dict, Path]:
|
||||
'''
|
||||
Load a ledger (TOML) file from user's config directory:
|
||||
$CONFIG_DIR/accounting/ledgers/trades_<brokername>_<acctid>.toml
|
||||
|
||||
Return its `dict`-content and file path.
|
||||
|
||||
'''
|
||||
import time
|
||||
try:
|
||||
import tomllib
|
||||
except ModuleNotFoundError:
|
||||
import tomli as tomllib
|
||||
|
||||
ldir: Path = (
|
||||
dirpath
|
||||
or
|
||||
config._config_dir / 'accounting' / 'ledgers'
|
||||
)
|
||||
if not ldir.is_dir():
|
||||
ldir.mkdir()
|
||||
|
||||
fname = f'trades_{brokername}_{acctid}.toml'
|
||||
fpath: Path = ldir / fname
|
||||
|
||||
if not fpath.is_file():
|
||||
log.info(
|
||||
f'Creating new local trades ledger: {fpath}'
|
||||
)
|
||||
fpath.touch()
|
||||
|
||||
with fpath.open(mode='rb') as cf:
|
||||
start = time.time()
|
||||
ledger_dict = tomllib.load(cf)
|
||||
log.debug(f'Ledger load took {time.time() - start}s')
|
||||
|
||||
return ledger_dict, fpath
|
||||
|
||||
|
||||
@cm
|
||||
def open_trade_ledger(
|
||||
broker: str,
|
||||
account: str,
|
||||
|
||||
allow_from_sync_code: bool = False,
|
||||
symcache: SymbologyCache | None = None,
|
||||
|
||||
# default is to sort by detected datetime-ish field
|
||||
tx_sort: Callable = iter_by_dt,
|
||||
rewrite: bool = False,
|
||||
|
||||
# for testing or manual load from file
|
||||
_fp: Path | None = None,
|
||||
|
||||
) -> Generator[TransactionLedger, None, None]:
|
||||
'''
|
||||
Indempotently create and read in a trade log file from the
|
||||
``<configuration_dir>/ledgers/`` directory.
|
||||
|
||||
Files are named per broker account of the form
|
||||
``<brokername>_<accountname>.toml``. The ``accountname`` here is the
|
||||
name as defined in the user's ``brokers.toml`` config.
|
||||
|
||||
'''
|
||||
from ..brokers import get_brokermod
|
||||
mod: ModuleType = get_brokermod(broker)
|
||||
|
||||
ledger_dict, fpath = load_ledger(
|
||||
broker,
|
||||
account,
|
||||
dirpath=_fp,
|
||||
)
|
||||
cpy = ledger_dict.copy()
|
||||
|
||||
# XXX NOTE: if not provided presume we are being called from
|
||||
# sync code and need to maybe run `trio` to generate..
|
||||
if symcache is None:
|
||||
|
||||
# XXX: be mega pendantic and ensure the caller knows what
|
||||
# they're doing!
|
||||
if not allow_from_sync_code:
|
||||
raise RuntimeError(
|
||||
'You MUST set `allow_from_sync_code=True` when '
|
||||
'calling `open_trade_ledger()` from sync code! '
|
||||
'If you are calling from async code you MUST '
|
||||
'instead pass a `symcache: SymbologyCache`!'
|
||||
)
|
||||
|
||||
from ..data._symcache import (
|
||||
get_symcache,
|
||||
)
|
||||
symcache: SymbologyCache = get_symcache(broker)
|
||||
|
||||
assert symcache
|
||||
|
||||
ledger = TransactionLedger(
|
||||
ledger_dict=cpy,
|
||||
file_path=fpath,
|
||||
account=account,
|
||||
mod=mod,
|
||||
symcache=symcache,
|
||||
tx_sort=getattr(mod, 'tx_sort', tx_sort),
|
||||
)
|
||||
try:
|
||||
yield ledger
|
||||
finally:
|
||||
if (
|
||||
ledger.data != ledger_dict
|
||||
or rewrite
|
||||
):
|
||||
# TODO: show diff output?
|
||||
# https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
|
||||
log.info(f'Updating ledger for {fpath}:\n')
|
||||
ledger.write_config()
|
|
@ -0,0 +1,766 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Market (pair) meta-info layer: sane addressing semantics and meta-data
|
||||
for cross-provider marketplaces.
|
||||
|
||||
We intoduce the concept of,
|
||||
|
||||
- a FQMA: fully qualified market address,
|
||||
- a sane schema for FQMAs including derivatives,
|
||||
- a msg-serializeable description of markets for
|
||||
easy sharing with other pikers B)
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from decimal import (
|
||||
Decimal,
|
||||
ROUND_HALF_EVEN,
|
||||
)
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
)
|
||||
|
||||
from piker.types import Struct
|
||||
|
||||
|
||||
# TODO: make these literals..
|
||||
_underlyings: list[str] = [
|
||||
'stock',
|
||||
'bond',
|
||||
'crypto',
|
||||
'fiat',
|
||||
'commodity',
|
||||
]
|
||||
|
||||
_crypto_derivs: list[str] = [
|
||||
'perpetual_future',
|
||||
'crypto_future',
|
||||
]
|
||||
|
||||
_derivs: list[str] = [
|
||||
'swap',
|
||||
'future',
|
||||
'continuous_future',
|
||||
'option',
|
||||
'futures_option',
|
||||
|
||||
# if we can't figure it out, presume the worst XD
|
||||
'unknown',
|
||||
]
|
||||
|
||||
# NOTE: a tag for other subsystems to try
|
||||
# and do default settings for certain things:
|
||||
# - allocator does unit vs. dolla size limiting.
|
||||
AssetTypeName: Literal[
|
||||
_underlyings
|
||||
+
|
||||
_derivs
|
||||
+
|
||||
_crypto_derivs
|
||||
]
|
||||
|
||||
# egs. stock, futer, option, bond etc.
|
||||
|
||||
|
||||
def dec_digits(
|
||||
value: float | str | Decimal,
|
||||
|
||||
) -> int:
|
||||
'''
|
||||
Return the number of precision digits read from a decimal or float
|
||||
value.
|
||||
|
||||
'''
|
||||
if value == 0:
|
||||
return 0
|
||||
|
||||
return int(
|
||||
-Decimal(str(value)).as_tuple().exponent
|
||||
)
|
||||
|
||||
|
||||
float_digits = dec_digits
|
||||
|
||||
|
||||
def digits_to_dec(
|
||||
ndigits: int,
|
||||
) -> Decimal:
|
||||
'''
|
||||
Return the minimum float value for an input integer value.
|
||||
|
||||
eg. 3 -> 0.001
|
||||
|
||||
'''
|
||||
if ndigits == 0:
|
||||
return Decimal('0')
|
||||
|
||||
return Decimal('0.' + '0'*(ndigits-1) + '1')
|
||||
|
||||
|
||||
class Asset(Struct, frozen=True):
|
||||
'''
|
||||
Container type describing any transactable asset and its
|
||||
contract-like and/or underlying technology meta-info.
|
||||
|
||||
'''
|
||||
name: str
|
||||
atype: str # AssetTypeName
|
||||
|
||||
# minimum transaction size / precision.
|
||||
# eg. for buttcoin this is a "satoshi".
|
||||
tx_tick: Decimal
|
||||
|
||||
# NOTE: additional info optionally packed in by the backend, but
|
||||
# should not be explicitly required in our generic API.
|
||||
info: dict | None = None
|
||||
|
||||
# `None` is not toml-compat so drop info
|
||||
# if no extra data added..
|
||||
def to_dict(
|
||||
self,
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
dct = super().to_dict(**kwargs)
|
||||
if (info := dct.pop('info', None)):
|
||||
dct['info'] = info
|
||||
|
||||
assert dct['tx_tick']
|
||||
return dct
|
||||
|
||||
@classmethod
|
||||
def from_msg(
|
||||
cls,
|
||||
msg: dict[str, Any],
|
||||
) -> Asset:
|
||||
return cls(
|
||||
tx_tick=Decimal(str(msg.pop('tx_tick'))),
|
||||
info=msg.pop('info', None),
|
||||
**msg,
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.name
|
||||
|
||||
def quantize(
|
||||
self,
|
||||
size: float,
|
||||
|
||||
) -> Decimal:
|
||||
'''
|
||||
Truncate input ``size: float`` using ``Decimal``
|
||||
quantized form of the digit precision defined
|
||||
by ``self.lot_tick_size``.
|
||||
|
||||
'''
|
||||
digits = float_digits(self.tx_tick)
|
||||
return Decimal(size).quantize(
|
||||
Decimal(f'1.{"0".ljust(digits, "0")}'),
|
||||
rounding=ROUND_HALF_EVEN
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def guess_from_mkt_ep_key(
|
||||
cls,
|
||||
mkt_ep_key: str,
|
||||
atype: str | None = None,
|
||||
|
||||
) -> Asset:
|
||||
'''
|
||||
A hacky guess method for presuming a (target) asset's properties
|
||||
based on either the actualy market endpoint key, or config settings
|
||||
from the user.
|
||||
|
||||
'''
|
||||
atype = atype or 'unknown'
|
||||
|
||||
# attempt to strip off any source asset
|
||||
# via presumed syntax of:
|
||||
# - <dst>/<src>
|
||||
# - <dst>.<src>
|
||||
# - etc.
|
||||
for char in ['/', '.']:
|
||||
dst, _, src = mkt_ep_key.partition(char)
|
||||
if src:
|
||||
if not atype:
|
||||
atype = 'fiat'
|
||||
break
|
||||
|
||||
return Asset(
|
||||
name=dst,
|
||||
atype=atype,
|
||||
tx_tick=Decimal('0.01'),
|
||||
)
|
||||
|
||||
|
||||
def maybe_cons_tokens(
|
||||
tokens: list[Any],
|
||||
delim_char: str = '.',
|
||||
) -> str:
|
||||
'''
|
||||
Construct `str` output from a maybe-concatenation of input
|
||||
sequence of elements in ``tokens``.
|
||||
|
||||
'''
|
||||
return delim_char.join(filter(bool, tokens)).lower()
|
||||
|
||||
|
||||
class MktPair(Struct, frozen=True):
|
||||
'''
|
||||
Market description for a pair of assets which are tradeable:
|
||||
a market which enables transactions of the form,
|
||||
buy: source asset -> destination asset
|
||||
sell: destination asset -> source asset
|
||||
|
||||
The main intention of this type is for a **simple** cross-asset
|
||||
venue/broker normalized descrption type from which all
|
||||
market-auctions can be mapped from FQME identifiers.
|
||||
|
||||
TODO: our eventual target fqme format/schema is:
|
||||
<dst>/<src>.<expiry>.<con_info_1>.<con_info_2>. -> .<venue>.<broker>
|
||||
^ -- optional tokens ------------------------------- ^
|
||||
|
||||
|
||||
Notes:
|
||||
------
|
||||
|
||||
Some venues provide a different semantic (which we frankly find
|
||||
confusing and non-general) such as "base" and "quote" asset.
|
||||
For example this is how `binance` defines the terms:
|
||||
|
||||
https://binance-docs.github.io/apidocs/websocket_api/en/#public-api-definitions
|
||||
https://binance-docs.github.io/apidocs/futures/en/#public-endpoints-info
|
||||
|
||||
- *base* asset refers to the asset that is the *quantity* of a symbol.
|
||||
- *quote* asset refers to the asset that is the *price* of a symbol.
|
||||
|
||||
In other words the "quote" asset is the asset that the market
|
||||
is pricing "buys" *in*, and the *base* asset it the one that the market
|
||||
allows you to "buy" an *amount of*. Put more simply the *quote*
|
||||
asset is our "source" asset and the *base* asset is our "destination"
|
||||
asset.
|
||||
|
||||
This defintion can be further understood reading our
|
||||
`.brokers.binance.api.Pair` type wherein the
|
||||
`Pair.[quote/base]AssetPrecision` field determines the (transfer)
|
||||
transaction precision available per asset; i.e. the satoshis
|
||||
unit in bitcoin for representing the minimum size of a
|
||||
transaction that can take place on the blockchain.
|
||||
|
||||
'''
|
||||
dst: str | Asset
|
||||
# "destination asset" (name) used to buy *to*
|
||||
# (or used to sell *from*)
|
||||
|
||||
price_tick: Decimal # minimum price increment
|
||||
size_tick: Decimal # minimum size (aka vlm) increment
|
||||
# the tick size is the number describing the smallest step in value
|
||||
# available in this market between the source and destination
|
||||
# assets.
|
||||
# https://en.wikipedia.org/wiki/Tick_size
|
||||
# https://en.wikipedia.org/wiki/Commodity_tick
|
||||
# https://en.wikipedia.org/wiki/Percentage_in_point
|
||||
|
||||
# unique "broker id" since every market endpoint provider
|
||||
# has their own nomenclature and schema for market maps.
|
||||
bs_mktid: str
|
||||
broker: str # the middle man giving access
|
||||
|
||||
# NOTE: to start this field is optional but should eventually be
|
||||
# required; the reason is for backward compat since more positioning
|
||||
# calculations were not originally stored with a src asset..
|
||||
|
||||
src: str | Asset = ''
|
||||
# "source asset" (name) used to buy *from*
|
||||
# (or used to sell *to*).
|
||||
|
||||
venue: str = '' # market venue provider name
|
||||
expiry: str = '' # for derivs, expiry datetime parseable str
|
||||
|
||||
# destination asset's financial type/classification name
|
||||
# NOTE: this is required for the order size allocator system,
|
||||
# since we use different default settings based on the type
|
||||
# of the destination asset, eg. futes use a units limits vs.
|
||||
# equities a $limit.
|
||||
# dst_type: AssetTypeName | None = None
|
||||
|
||||
# source asset's financial type/classification name
|
||||
# TODO: is a src type required for trading?
|
||||
# there's no reason to need any more then the one-way alloc-limiter
|
||||
# config right?
|
||||
# src_type: AssetTypeName
|
||||
|
||||
# for derivs, info describing contract, egs.
|
||||
# strike price, call or put, swap type, exercise model, etc.
|
||||
contract_info: list[str] | None = None
|
||||
|
||||
# TODO: rename to sectype since all of these can
|
||||
# be considered "securities"?
|
||||
_atype: str = ''
|
||||
|
||||
# allow explicit disable of the src part of the market
|
||||
# pair name -> useful for legacy markets like qqq.nasdaq.ib
|
||||
_fqme_without_src: bool = False
|
||||
|
||||
# NOTE: when cast to `str` return fqme
|
||||
def __str__(self) -> str:
|
||||
return self.fqme
|
||||
|
||||
def to_dict(
|
||||
self,
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
d = super().to_dict(**kwargs)
|
||||
d['src'] = self.src.to_dict(**kwargs)
|
||||
|
||||
if not isinstance(self.dst, str):
|
||||
d['dst'] = self.dst.to_dict(**kwargs)
|
||||
else:
|
||||
d['dst'] = str(self.dst)
|
||||
|
||||
d['price_tick'] = str(self.price_tick)
|
||||
d['size_tick'] = str(self.size_tick)
|
||||
|
||||
if self.contract_info is None:
|
||||
d.pop('contract_info')
|
||||
|
||||
# d.pop('_fqme_without_src')
|
||||
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
def from_msg(
|
||||
cls,
|
||||
msg: dict[str, Any],
|
||||
|
||||
) -> MktPair:
|
||||
'''
|
||||
Constructor for a received msg-dict normally received over IPC.
|
||||
|
||||
'''
|
||||
if not isinstance(
|
||||
dst_asset_msg := msg.pop('dst'),
|
||||
str,
|
||||
):
|
||||
dst: Asset = Asset.from_msg(dst_asset_msg) # .copy()
|
||||
else:
|
||||
dst: str = dst_asset_msg
|
||||
|
||||
src_asset_msg: dict = msg.pop('src')
|
||||
src: Asset = Asset.from_msg(src_asset_msg) # .copy()
|
||||
|
||||
# XXX NOTE: ``msgspec`` can encode `Decimal` but it doesn't
|
||||
# decide to it by default since we aren't spec-cing these
|
||||
# msgs as structs proper to get them to decode implictily
|
||||
# (yet) as per,
|
||||
# - https://github.com/pikers/piker/pull/354
|
||||
# - https://github.com/goodboy/tractor/pull/311
|
||||
# SO we have to ensure we do a struct type
|
||||
# case (which `.copy()` does) to ensure we get the right
|
||||
# type!
|
||||
return cls(
|
||||
dst=dst,
|
||||
src=src,
|
||||
price_tick=Decimal(msg.pop('price_tick')),
|
||||
size_tick=Decimal(msg.pop('size_tick')),
|
||||
**msg,
|
||||
).copy()
|
||||
|
||||
@property
|
||||
def resolved(self) -> bool:
|
||||
return isinstance(self.dst, Asset)
|
||||
|
||||
@classmethod
|
||||
def from_fqme(
|
||||
cls,
|
||||
fqme: str,
|
||||
|
||||
price_tick: float | str,
|
||||
size_tick: float | str,
|
||||
bs_mktid: str,
|
||||
|
||||
broker: str | None = None,
|
||||
**kwargs,
|
||||
|
||||
) -> MktPair:
|
||||
|
||||
_fqme: str = fqme
|
||||
if (
|
||||
broker
|
||||
and broker not in fqme
|
||||
):
|
||||
_fqme = f'{fqme}.{broker}'
|
||||
|
||||
broker, mkt_ep_key, venue, expiry = unpack_fqme(_fqme)
|
||||
|
||||
kven: str = kwargs.pop('venue', venue)
|
||||
if venue:
|
||||
assert venue == kven
|
||||
else:
|
||||
venue = kven
|
||||
|
||||
exp: str = kwargs.pop('expiry', expiry)
|
||||
if expiry:
|
||||
assert exp == expiry
|
||||
else:
|
||||
expiry = exp
|
||||
|
||||
dst: Asset = Asset.guess_from_mkt_ep_key(
|
||||
mkt_ep_key,
|
||||
atype=kwargs.get('_atype'),
|
||||
)
|
||||
|
||||
# XXX: loading from a fqme string will
|
||||
# leave this pair as "un resolved" meaning
|
||||
# we don't yet have `.dst` set as an `Asset`
|
||||
# which we expect to be filled in by some
|
||||
# backend client with access to that data-info.
|
||||
return cls(
|
||||
dst=dst,
|
||||
# XXX: not resolved to ``Asset`` :(
|
||||
#src=src,
|
||||
|
||||
broker=broker,
|
||||
venue=venue,
|
||||
# XXX NOTE: we presume this token
|
||||
# if the expiry for now!
|
||||
expiry=expiry,
|
||||
|
||||
price_tick=price_tick,
|
||||
size_tick=size_tick,
|
||||
bs_mktid=bs_mktid,
|
||||
|
||||
**kwargs,
|
||||
|
||||
).copy()
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
'''
|
||||
The "endpoint key" for this market.
|
||||
|
||||
'''
|
||||
return self.pair
|
||||
|
||||
def pair(
|
||||
self,
|
||||
delim_char: str | None = None,
|
||||
) -> str:
|
||||
'''
|
||||
The "endpoint asset pair key" for this market.
|
||||
Eg. mnq/usd or btc/usdt or xmr/btc
|
||||
|
||||
In most other tina platforms this is referred to as the
|
||||
"symbol".
|
||||
|
||||
'''
|
||||
return maybe_cons_tokens(
|
||||
[str(self.dst),
|
||||
str(self.src)],
|
||||
# TODO: make the default '/'
|
||||
delim_char=delim_char or '',
|
||||
)
|
||||
|
||||
@property
|
||||
def suffix(self) -> str:
|
||||
'''
|
||||
The "contract suffix" for this market.
|
||||
|
||||
Eg. mnq/usd.20230616.cme.ib
|
||||
^ ----- ^
|
||||
or tsla/usd.20230324.200c.cboe.ib
|
||||
^ ---------- ^
|
||||
|
||||
In most other tina platforms they only show you these details in
|
||||
some kinda "meta data" format, we have FQMEs so we do this up
|
||||
front and explicit.
|
||||
|
||||
'''
|
||||
field_strs = [self.expiry]
|
||||
con_info = self.contract_info
|
||||
if con_info is not None:
|
||||
field_strs.extend(con_info)
|
||||
|
||||
return maybe_cons_tokens(field_strs)
|
||||
|
||||
def get_fqme(
|
||||
self,
|
||||
|
||||
# NOTE: allow dropping the source asset from the
|
||||
# market endpoint's pair key. Eg. to change
|
||||
# mnq/usd.<> -> mnq.<> which is useful when
|
||||
# searching (legacy) stock exchanges.
|
||||
without_src: bool = False,
|
||||
delim_char: str | None = None,
|
||||
|
||||
) -> str:
|
||||
'''
|
||||
Return the fully qualified market endpoint-address for the
|
||||
pair of transacting assets.
|
||||
|
||||
fqme = "fully qualified market endpoint"
|
||||
|
||||
And yes, you pronounce it colloquially as read..
|
||||
|
||||
Basically the idea here is for all client code (consumers of piker's
|
||||
APIs which query the data/broker-provider agnostic layer(s)) should be
|
||||
able to tell which backend / venue / derivative each data feed/flow is
|
||||
from by an explicit string-key of the current form:
|
||||
|
||||
<market-instrument-name>
|
||||
.<venue>
|
||||
.<expiry>
|
||||
.<derivative-suffix-info>
|
||||
.<brokerbackendname>
|
||||
|
||||
eg. for an explicit daq mini futes contract: mnq.cme.20230317.ib
|
||||
|
||||
TODO: I have thoughts that we should actually change this to be
|
||||
more like an "attr lookup" (like how the web should have done
|
||||
urls, but marketting peeps ruined it etc. etc.)
|
||||
|
||||
<broker>.<venue>.<instrumentname>.<suffixwithmetadata>
|
||||
|
||||
TODO:
|
||||
See community discussion on naming and nomenclature, order
|
||||
of addressing hierarchy, general schema, internal representation:
|
||||
|
||||
https://github.com/pikers/piker/issues/467
|
||||
|
||||
'''
|
||||
key: str = (
|
||||
self.pair(delim_char=delim_char)
|
||||
if not (without_src or self._fqme_without_src)
|
||||
else str(self.dst)
|
||||
)
|
||||
|
||||
return maybe_cons_tokens([
|
||||
key, # final "pair name" (eg. qqq[/usd], btcusdt)
|
||||
self.venue,
|
||||
self.suffix, # includes expiry and other con info
|
||||
self.broker,
|
||||
])
|
||||
|
||||
# NOTE: the main idea behind an fqme is to map a "market address"
|
||||
# to some endpoint from a transaction provider (eg. a broker) such
|
||||
# that we build a table of `fqme: str -> bs_mktid: Any` where any "piker
|
||||
# market address" maps 1-to-1 to some broker trading endpoint.
|
||||
# @cached_property
|
||||
fqme = property(get_fqme)
|
||||
|
||||
def get_bs_fqme(
|
||||
self,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
'''
|
||||
FQME sin broker part XD
|
||||
|
||||
'''
|
||||
sin_broker, *_ = self.get_fqme(**kwargs).rpartition('.')
|
||||
return sin_broker
|
||||
|
||||
bs_fqme = property(get_bs_fqme)
|
||||
|
||||
@property
|
||||
def fqsn(self) -> str:
|
||||
return self.fqme
|
||||
|
||||
def quantize(
|
||||
self,
|
||||
size: float,
|
||||
|
||||
quantity_type: Literal['price', 'size'] = 'size',
|
||||
|
||||
) -> Decimal:
|
||||
'''
|
||||
Truncate input ``size: float`` using ``Decimal``
|
||||
and ``.size_tick``'s # of digits.
|
||||
|
||||
'''
|
||||
match quantity_type:
|
||||
case 'price':
|
||||
digits = float_digits(self.price_tick)
|
||||
case 'size':
|
||||
digits = float_digits(self.size_tick)
|
||||
|
||||
return Decimal(size).quantize(
|
||||
Decimal(f'1.{"0".ljust(digits, "0")}'),
|
||||
rounding=ROUND_HALF_EVEN
|
||||
)
|
||||
|
||||
# TODO: BACKWARD COMPAT, TO REMOVE?
|
||||
@property
|
||||
def type_key(self) -> str:
|
||||
|
||||
# if set explicitly then use it!
|
||||
if self._atype:
|
||||
return self._atype
|
||||
|
||||
if isinstance(self.dst, Asset):
|
||||
return str(self.dst.atype)
|
||||
|
||||
return 'UNKNOWN'
|
||||
|
||||
@property
|
||||
def price_tick_digits(self) -> int:
|
||||
return float_digits(self.price_tick)
|
||||
|
||||
@property
|
||||
def size_tick_digits(self) -> int:
|
||||
return float_digits(self.size_tick)
|
||||
|
||||
|
||||
def unpack_fqme(
|
||||
fqme: str,
|
||||
|
||||
broker: str | None = None
|
||||
|
||||
) -> tuple[str, ...]:
|
||||
'''
|
||||
Unpack a fully-qualified-symbol-name to ``tuple``.
|
||||
|
||||
'''
|
||||
venue = ''
|
||||
suffix = ''
|
||||
|
||||
# TODO: probably reverse the order of all this XD
|
||||
tokens = fqme.split('.')
|
||||
|
||||
match tokens:
|
||||
case [mkt_ep, broker]:
|
||||
# probably crypto
|
||||
return (
|
||||
broker,
|
||||
mkt_ep,
|
||||
'',
|
||||
'',
|
||||
)
|
||||
|
||||
# TODO: swap venue and suffix/deriv-info here?
|
||||
case [mkt_ep, venue, suffix, broker]:
|
||||
pass
|
||||
|
||||
# handle `bs_mktid` + `broker` input case
|
||||
case [
|
||||
mkt_ep, venue, suffix
|
||||
] if (
|
||||
broker
|
||||
and suffix != broker
|
||||
):
|
||||
pass
|
||||
|
||||
case [mkt_ep, venue, broker]:
|
||||
suffix = ''
|
||||
|
||||
case _:
|
||||
raise ValueError(f'Invalid fqme: {fqme}')
|
||||
|
||||
return (
|
||||
broker,
|
||||
mkt_ep,
|
||||
venue,
|
||||
# '.'.join([mkt_ep, venue]),
|
||||
suffix,
|
||||
)
|
||||
|
||||
|
||||
class Symbol(Struct):
|
||||
'''
|
||||
I guess this is some kinda container thing for dealing with
|
||||
all the different meta-data formats from brokers?
|
||||
|
||||
'''
|
||||
key: str
|
||||
|
||||
broker: str = ''
|
||||
venue: str = ''
|
||||
|
||||
# precision descriptors for price and vlm
|
||||
tick_size: Decimal = Decimal('0.01')
|
||||
lot_tick_size: Decimal = Decimal('0.0')
|
||||
|
||||
suffix: str = ''
|
||||
broker_info: dict[str, dict[str, Any]] = {}
|
||||
|
||||
@classmethod
|
||||
def from_fqme(
|
||||
cls,
|
||||
fqsn: str,
|
||||
info: dict[str, Any],
|
||||
|
||||
) -> Symbol:
|
||||
broker, mktep, venue, suffix = unpack_fqme(fqsn)
|
||||
tick_size = info.get('price_tick_size', 0.01)
|
||||
lot_size = info.get('lot_tick_size', 0.0)
|
||||
|
||||
return Symbol(
|
||||
broker=broker,
|
||||
key=mktep,
|
||||
tick_size=tick_size,
|
||||
lot_tick_size=lot_size,
|
||||
venue=venue,
|
||||
suffix=suffix,
|
||||
broker_info={broker: info},
|
||||
)
|
||||
|
||||
@property
|
||||
def type_key(self) -> str:
|
||||
return list(self.broker_info.values())[0]['asset_type']
|
||||
|
||||
@property
|
||||
def tick_size_digits(self) -> int:
|
||||
return float_digits(self.tick_size)
|
||||
|
||||
@property
|
||||
def lot_size_digits(self) -> int:
|
||||
return float_digits(self.lot_tick_size)
|
||||
|
||||
@property
|
||||
def price_tick(self) -> Decimal:
|
||||
return Decimal(str(self.tick_size))
|
||||
|
||||
@property
|
||||
def size_tick(self) -> Decimal:
|
||||
return Decimal(str(self.lot_tick_size))
|
||||
|
||||
@property
|
||||
def broker(self) -> str:
|
||||
return list(self.broker_info.keys())[0]
|
||||
|
||||
@property
|
||||
def fqme(self) -> str:
|
||||
return maybe_cons_tokens([
|
||||
self.key, # final "pair name" (eg. qqq[/usd], btcusdt)
|
||||
self.venue,
|
||||
self.suffix, # includes expiry and other con info
|
||||
self.broker,
|
||||
])
|
||||
|
||||
def quantize(
|
||||
self,
|
||||
size: float,
|
||||
) -> Decimal:
|
||||
digits = float_digits(self.lot_tick_size)
|
||||
return Decimal(size).quantize(
|
||||
Decimal(f'1.{"0".ljust(digits, "0")}'),
|
||||
rounding=ROUND_HALF_EVEN
|
||||
)
|
||||
|
||||
# NOTE: when cast to `str` return fqme
|
||||
def __str__(self) -> str:
|
||||
return self.fqme
|
|
@ -0,0 +1,983 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Personal/Private position parsing, calculating, summarizing in a way
|
||||
that doesn't try to cuk most humans who prefer to not lose their moneys..
|
||||
|
||||
(looking at you `ib` and dirt-bird friends)
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from contextlib import contextmanager as cm
|
||||
from decimal import Decimal
|
||||
from pprint import pformat
|
||||
from pathlib import Path
|
||||
from types import ModuleType
|
||||
from typing import (
|
||||
Any,
|
||||
Iterator,
|
||||
Generator
|
||||
)
|
||||
|
||||
import pendulum
|
||||
from pendulum import (
|
||||
datetime,
|
||||
now,
|
||||
)
|
||||
import polars as pl
|
||||
import tomlkit
|
||||
|
||||
from ._ledger import (
|
||||
Transaction,
|
||||
TransactionLedger,
|
||||
)
|
||||
from ._mktinfo import (
|
||||
MktPair,
|
||||
Asset,
|
||||
unpack_fqme,
|
||||
)
|
||||
from .calc import (
|
||||
ppu,
|
||||
# iter_by_dt,
|
||||
)
|
||||
from .. import config
|
||||
from ..clearing._messages import (
|
||||
BrokerdPosition,
|
||||
)
|
||||
from piker.types import Struct
|
||||
from piker.data._symcache import SymbologyCache
|
||||
from ..log import get_logger
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
class Position(Struct):
|
||||
'''
|
||||
An asset "position" model with attached clearing transaction history.
|
||||
|
||||
A financial "position" in `piker` terms is a summary of accounting
|
||||
metrics computed from a transaction ledger; generally it describes
|
||||
some accumulative "size" and "average price" from the summarized
|
||||
underlying transaction set.
|
||||
|
||||
In piker we focus on the `.ppu` (price per unit) and the `.bep`
|
||||
(break even price) including all transaction entries and exits since
|
||||
the last "net-zero" size of the destination asset's holding.
|
||||
|
||||
This interface serves as an object API for computing and
|
||||
tracking positions as well as supports serialization for
|
||||
storage in the local file system (in TOML) and to interchange
|
||||
as a msg over IPC.
|
||||
|
||||
'''
|
||||
mkt: MktPair
|
||||
|
||||
# can be +ve or -ve for long/short
|
||||
# size: float
|
||||
|
||||
# "price-per-unit price" above or below which pnl moves above and
|
||||
# below zero for the entirety of the current "trade state". The ppu
|
||||
# is only modified on "increases of" the absolute size of a position
|
||||
# in one of a long/short "direction" (i.e. abs(.size_i) > 0 after
|
||||
# the next transaction given .size was > 0 before that tx, and vice
|
||||
# versa for -ve sized positions).
|
||||
# ppu: float
|
||||
|
||||
# TODO: break-even-price support!
|
||||
# bep: float
|
||||
|
||||
# unique "backend system market id"
|
||||
bs_mktid: str
|
||||
|
||||
split_ratio: int | None = None
|
||||
|
||||
# TODO: use a `pl.DataFrame` intead?
|
||||
_events: dict[str, Transaction | dict] = {}
|
||||
|
||||
@property
|
||||
def expiry(self) -> datetime | None:
|
||||
'''
|
||||
Security expiry if it has a limited lifetime.
|
||||
|
||||
For non-derivative markets this is normally `None`.
|
||||
|
||||
'''
|
||||
exp: str | None = self.mkt.expiry
|
||||
if exp is None:
|
||||
return None
|
||||
|
||||
match exp.lower():
|
||||
# empty str, 'perp' (contract) or simply a null
|
||||
# signifies instrument with NO expiry.
|
||||
case 'perp' | '' | None:
|
||||
return None
|
||||
|
||||
case str():
|
||||
return pendulum.parse(exp)
|
||||
|
||||
case _:
|
||||
raise ValueError(
|
||||
f'Unhandled `MktPair.expiry`: `{exp}`'
|
||||
)
|
||||
|
||||
# TODO: idea: "real LIFO" dynamic positioning.
|
||||
# - when a trade takes place where the pnl for
|
||||
# the (set of) trade(s) is below the breakeven price
|
||||
# it may be that the trader took a +ve pnl on a short(er)
|
||||
# term trade in the same account.
|
||||
# - in this case we could recalc the be price to
|
||||
# be reverted back to it's prior value before the nearest term
|
||||
# trade was opened.?
|
||||
# def bep() -> float:
|
||||
# ...
|
||||
def clears_df(self) -> pl.DataFrame:
|
||||
...
|
||||
|
||||
def clearsitems(self) -> list[(str, dict)]:
|
||||
return ppu(
|
||||
self.iter_by_type('clear'),
|
||||
as_ledger=True
|
||||
)
|
||||
|
||||
def iter_by_type(
|
||||
self,
|
||||
etype: str,
|
||||
|
||||
) -> Iterator[dict | Transaction]:
|
||||
'''
|
||||
Iterate the internally managed ``._events: dict`` table in
|
||||
datetime-stamped order.
|
||||
|
||||
'''
|
||||
# sort on the expected datetime field
|
||||
# for event in iter_by_dt(
|
||||
for event in sorted(
|
||||
self._events.values(),
|
||||
key=lambda entry: entry.dt
|
||||
):
|
||||
# if event.etype == etype:
|
||||
match event:
|
||||
case (
|
||||
{'etype': _etype} |
|
||||
Transaction(etype=str(_etype))
|
||||
):
|
||||
assert _etype == etype
|
||||
yield event
|
||||
|
||||
|
||||
def minimized_clears(self) -> dict[str, dict]:
|
||||
'''
|
||||
Minimize the position's clears entries by removing
|
||||
all transactions before the last net zero size except for when
|
||||
a clear event causes a position "side" change (i.e. long to short
|
||||
after a single fill) wherein we store the transaction prior to the
|
||||
net-zero pass.
|
||||
|
||||
This avoids unnecessary history irrelevant to the current
|
||||
non-net-zero size state when serializing for offline storage.
|
||||
|
||||
'''
|
||||
# scan for the last "net zero" position by iterating
|
||||
# transactions until the next net-zero cumsize, rinse,
|
||||
# repeat.
|
||||
cumsize: float = 0
|
||||
clears_since_zero: list[dict] = []
|
||||
|
||||
for tid, cleardict in self.clearsitems():
|
||||
cumsize = float(
|
||||
# self.mkt.quantize(cumsize + cleardict['tx'].size
|
||||
self.mkt.quantize(cleardict['cumsize'])
|
||||
)
|
||||
clears_since_zero.append(cleardict)
|
||||
|
||||
# NOTE: always pop sign change since we just use it to
|
||||
# determine which entry to clear "up to".
|
||||
sign_change: bool = cleardict.pop('sign_change')
|
||||
if cumsize == 0:
|
||||
clears_since_zero = clears_since_zero[:-2]
|
||||
# clears_since_zero.clear()
|
||||
|
||||
elif sign_change:
|
||||
clears_since_zero = clears_since_zero[:-1]
|
||||
|
||||
return clears_since_zero
|
||||
|
||||
def to_pretoml(self) -> tuple[str, dict]:
|
||||
'''
|
||||
Prep this position's data contents for export as an entry
|
||||
in a TOML "account file" (such as
|
||||
`account.binance.paper.toml`) including re-structuring of
|
||||
the ``._events`` entries as an array of inline-subtables
|
||||
for better ``pps.toml`` compactness.
|
||||
|
||||
'''
|
||||
mkt: MktPair = self.mkt
|
||||
assert isinstance(mkt, MktPair)
|
||||
|
||||
# TODO: we need to figure out how to have one top level
|
||||
# listing venue here even when the backend isn't providing
|
||||
# it via the trades ledger..
|
||||
# drop symbol obj in serialized form
|
||||
fqme: str = mkt.fqme
|
||||
broker, mktep, venue, suffix = unpack_fqme(fqme)
|
||||
|
||||
# an asset resolved mkt where we have ``Asset`` info about
|
||||
# each tradeable asset in the market.
|
||||
asset_type: str = 'n/a'
|
||||
if mkt.resolved:
|
||||
dst: Asset = mkt.dst
|
||||
asset_type = dst.atype
|
||||
|
||||
asdict: dict[str, Any] = {
|
||||
'bs_mktid': self.bs_mktid,
|
||||
# 'expiry': self.expiry or '',
|
||||
'asset_type': asset_type,
|
||||
'price_tick': mkt.price_tick,
|
||||
'size_tick': mkt.size_tick,
|
||||
}
|
||||
if exp := self.expiry:
|
||||
asdict['expiry'] = exp
|
||||
|
||||
clears_since_zero: list[dict] = self.minimized_clears()
|
||||
|
||||
# setup a "multi-line array of inline tables" which we call
|
||||
# the "clears table", contained by each position entry in
|
||||
# an "account file".
|
||||
clears_table: tomlkit.Array = tomlkit.array()
|
||||
clears_table.multiline(
|
||||
multiline=True,
|
||||
indent='',
|
||||
)
|
||||
|
||||
for entry in clears_since_zero:
|
||||
inline_table = tomlkit.inline_table()
|
||||
|
||||
# insert optional clear fields in column order
|
||||
for k in ['ppu', 'cumsize']:
|
||||
if val := entry.get(k):
|
||||
inline_table[k] = val
|
||||
|
||||
# insert required fields
|
||||
for k in ['price', 'size', 'cost']:
|
||||
inline_table[k] = entry[k]
|
||||
|
||||
# NOTE: we don't actually need to serialize datetime to parsable `str`
|
||||
# since `tomlkit` supports a native `DateTime` but
|
||||
# seems like we're not doing it entirely in clearing
|
||||
# tables yet?
|
||||
inline_table['dt'] = entry['dt'] # .isoformat('T')
|
||||
|
||||
tid: str = entry['tid']
|
||||
inline_table['tid'] = tid
|
||||
clears_table.append(inline_table)
|
||||
|
||||
# assert not events
|
||||
asdict['clears'] = clears_table
|
||||
|
||||
return fqme, asdict
|
||||
|
||||
def update_from_msg(
|
||||
self,
|
||||
msg: BrokerdPosition,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Hard-set the current position from a remotely-received
|
||||
(normally via IPC) msg by applying the msg as the one (and
|
||||
only) txn in the `._events` table thus forcing the current
|
||||
asset allocation blindly.
|
||||
|
||||
'''
|
||||
mkt: MktPair = self.mkt
|
||||
now_dt: pendulum.DateTime = now()
|
||||
now_str: str = str(now_dt)
|
||||
|
||||
# XXX: wipe all prior txn history since we wanted it we wouldn't
|
||||
# be using this method to compute our state!
|
||||
self._events.clear()
|
||||
|
||||
# NOTE WARNING XXX: we summarize the pos with a single
|
||||
# summary transaction (for now) until we either pass THIS
|
||||
# type as msg directly from emsd or come up with a better
|
||||
# way?
|
||||
t = Transaction(
|
||||
fqme=mkt.fqme,
|
||||
bs_mktid=mkt.bs_mktid,
|
||||
size=msg['size'],
|
||||
price=msg['avg_price'],
|
||||
cost=0,
|
||||
|
||||
# NOTE: special provisions required!
|
||||
# - tid needs to be unique or this txn will be ignored!!
|
||||
tid=now_str,
|
||||
|
||||
# TODO: also figure out how to avoid this!
|
||||
dt=now_dt,
|
||||
)
|
||||
self.add_clear(t)
|
||||
|
||||
@property
|
||||
def dsize(self) -> float:
|
||||
'''
|
||||
The "dollar" size of the pp, normally in source asset
|
||||
(fiat) units.
|
||||
|
||||
'''
|
||||
return self.ppu * self.cumsize
|
||||
|
||||
def expired(self) -> bool:
|
||||
'''
|
||||
Predicate which checks if the contract/instrument is past
|
||||
its expiry.
|
||||
|
||||
'''
|
||||
return bool(self.expiry) and self.expiry < now()
|
||||
|
||||
def add_clear(
|
||||
self,
|
||||
t: Transaction,
|
||||
) -> bool:
|
||||
'''
|
||||
Update clearing table by calculating the rolling ppu and
|
||||
(accumulative) size in both the clears entry and local
|
||||
attrs state.
|
||||
|
||||
Inserts are always done in datetime sorted order.
|
||||
|
||||
'''
|
||||
# added: bool = False
|
||||
tid: str = t.tid
|
||||
if tid in self._events:
|
||||
log.warning(f'{t} is already added?!')
|
||||
# return added
|
||||
|
||||
# TODO: apparently this IS possible with a dict but not
|
||||
# common and probably not that beneficial unless we're also
|
||||
# going to do cum-calcs on each insert?
|
||||
# https://stackoverflow.com/questions/38079171/python-insert-new-element-into-sorted-list-of-dictionaries
|
||||
# from bisect import insort
|
||||
# insort(
|
||||
# self._clears,
|
||||
# clear,
|
||||
# key=lambda entry: entry['dt']
|
||||
# )
|
||||
self._events[tid] = t
|
||||
return True
|
||||
|
||||
# TODO: compute these incrementally instead
|
||||
# of re-looping through each time resulting in O(n**2)
|
||||
# behaviour..? Can we have some kinda clears len to cached
|
||||
# output subsys?
|
||||
def calc_ppu(self) -> float:
|
||||
return ppu(self.iter_by_type('clear'))
|
||||
|
||||
# # return self.clearsdict()
|
||||
# # )
|
||||
# return list(self.clearsdict())[-1][1]['ppu']
|
||||
|
||||
@property
|
||||
def ppu(self) -> float:
|
||||
return round(
|
||||
self.calc_ppu(),
|
||||
ndigits=self.mkt.price_tick_digits,
|
||||
)
|
||||
|
||||
def calc_size(self) -> float:
|
||||
'''
|
||||
Calculate the unit size of this position in the destination
|
||||
asset using the clears/trade event table; zero if expired.
|
||||
|
||||
'''
|
||||
# time-expired pps (normally derivatives) are "closed"
|
||||
# and have a zero size.
|
||||
if self.expired():
|
||||
return 0.
|
||||
|
||||
clears: list[(str, dict)] = self.clearsitems()
|
||||
if clears:
|
||||
return clears[-1][1]['cumsize']
|
||||
else:
|
||||
return 0.
|
||||
|
||||
# if self.split_ratio is not None:
|
||||
# size = round(size * self.split_ratio)
|
||||
|
||||
# return float(
|
||||
# self.mkt.quantize(size),
|
||||
# )
|
||||
|
||||
# TODO: ideally we don't implicitly recompute the
|
||||
# full sequence from `.clearsdict()` every read..
|
||||
# the writer-updates-local-attr-state was actually kinda nice
|
||||
# before, but sometimes led to hard to detect bugs when
|
||||
# state was de-synced.
|
||||
@property
|
||||
def cumsize(self) -> float:
|
||||
|
||||
if (
|
||||
self.expiry
|
||||
and self.expiry < now()
|
||||
):
|
||||
return 0
|
||||
|
||||
return round(
|
||||
self.calc_size(),
|
||||
ndigits=self.mkt.size_tick_digits,
|
||||
)
|
||||
|
||||
# TODO: once we have an `.events` table with diff
|
||||
# mkt event types..?
|
||||
# def suggest_split(self) -> float:
|
||||
# ...
|
||||
|
||||
|
||||
class Account(Struct):
|
||||
'''
|
||||
The real-time (double-entry accounting) state of
|
||||
a given **asset ownership tracking system**, normally offered
|
||||
or measured from some brokerage, CEX or (implied virtual)
|
||||
summary crypto$ "wallets" aggregated and tracked over some set
|
||||
of DEX-es.
|
||||
|
||||
Both market-mapped and ledger-system-native (aka inter-account
|
||||
"transfers") transactions are accounted and they pertain to
|
||||
(implied) PnL relatve to any other accountable asset.
|
||||
|
||||
More specifically in piker terms, an account tracks all of:
|
||||
|
||||
- the *balances* of all assets currently available for use either
|
||||
in (future) market or (inter-account/wallet) transfer
|
||||
transactions.
|
||||
- a transaction *ledger* from a given brokerd backend whic
|
||||
is a recording of all (know) such transactions from the past.
|
||||
- a set of financial *positions* as measured from the current
|
||||
ledger state.
|
||||
|
||||
See the semantic origins from double-bookeeping:
|
||||
https://en.wikipedia.org/wiki/Double-entry_bookkeeping
|
||||
|
||||
'''
|
||||
mod: ModuleType
|
||||
acctid: str
|
||||
pps: dict[str, Position]
|
||||
|
||||
conf_path: Path
|
||||
conf: dict | None = {}
|
||||
|
||||
# TODO: track a table of asset balances as `.balances:
|
||||
# dict[Asset, float]`?
|
||||
|
||||
@property
|
||||
def brokername(self) -> str:
|
||||
return self.mod.name
|
||||
|
||||
def update_from_ledger(
|
||||
self,
|
||||
ledger: TransactionLedger | dict[str, Transaction],
|
||||
cost_scalar: float = 2,
|
||||
symcache: SymbologyCache | None = None,
|
||||
|
||||
_mktmap_table: dict[str, MktPair] | None = None,
|
||||
|
||||
) -> dict[str, Position]:
|
||||
'''
|
||||
Update the internal `.pps[str, Position]` table from input
|
||||
transactions recomputing the price-per-unit (ppu) and
|
||||
accumulative size for each entry.
|
||||
|
||||
'''
|
||||
if (
|
||||
not isinstance(ledger, TransactionLedger)
|
||||
):
|
||||
if symcache is None:
|
||||
raise RuntimeError(
|
||||
'No ledger provided!\n'
|
||||
'We can not determine the `MktPair`s without a symcache..\n'
|
||||
'Please provide `symcache: SymbologyCache` when '
|
||||
'processing NEW positions!'
|
||||
)
|
||||
itertxns = sorted(
|
||||
ledger.values(),
|
||||
key=lambda t: t.dt,
|
||||
)
|
||||
else:
|
||||
itertxns = ledger.iter_txns()
|
||||
symcache = ledger.symcache
|
||||
|
||||
pps = self.pps
|
||||
updated: dict[str, Position] = {}
|
||||
|
||||
# lifo update all pps from records, ensuring
|
||||
# we compute the PPU and size sorted in time!
|
||||
for txn in itertxns:
|
||||
fqme: str = txn.fqme
|
||||
bs_mktid: str = txn.bs_mktid
|
||||
|
||||
# template the mkt-info presuming a legacy market ticks
|
||||
# if no info exists in the transactions..
|
||||
try:
|
||||
mkt: MktPair = symcache.mktmaps[fqme]
|
||||
except KeyError:
|
||||
if _mktmap_table is None:
|
||||
raise
|
||||
|
||||
# XXX: caller is allowed to provide a fallback
|
||||
# mktmap table for the case where a new position is
|
||||
# being added and the preloaded symcache didn't
|
||||
# have this entry prior (eg. with frickin IB..)
|
||||
mkt = _mktmap_table[fqme]
|
||||
|
||||
if not (pos := pps.get(bs_mktid)):
|
||||
|
||||
assert isinstance(
|
||||
mkt,
|
||||
MktPair,
|
||||
)
|
||||
|
||||
# if no existing pos, allocate fresh one.
|
||||
pos = pps[bs_mktid] = Position(
|
||||
mkt=mkt,
|
||||
bs_mktid=bs_mktid,
|
||||
)
|
||||
else:
|
||||
# NOTE: if for some reason a "less resolved" mkt pair
|
||||
# info has been set (based on the `.fqme` being
|
||||
# a shorter string), instead use the one from the
|
||||
# transaction since it likely has (more) full
|
||||
# information from the provider.
|
||||
if len(pos.mkt.fqme) < len(fqme):
|
||||
pos.mkt = mkt
|
||||
|
||||
# update clearing acnt!
|
||||
# NOTE: likely you'll see repeats of the same
|
||||
# ``Transaction`` passed in here if/when you are
|
||||
# restarting a ``brokerd.ib`` where the API will
|
||||
# re-report trades from the current session, so we need
|
||||
# to make sure we don't "double count" these in pp
|
||||
# calculations; `Position.add_clear()` stores txs in
|
||||
# a `._events: dict[tid, tx]` which should always
|
||||
# ensure this is true!
|
||||
pos.add_clear(txn)
|
||||
updated[txn.bs_mktid] = pos
|
||||
|
||||
# NOTE: deliver only the position entries that were
|
||||
# actually updated (modified the state) from the input
|
||||
# transaction set.
|
||||
return updated
|
||||
|
||||
def dump_active(
|
||||
self,
|
||||
) -> tuple[
|
||||
dict[str, Position],
|
||||
dict[str, Position]
|
||||
]:
|
||||
'''
|
||||
Iterate all tabulated positions, render active positions to
|
||||
a ``dict`` format amenable to serialization (via TOML) and drop
|
||||
from state (``.pps``) as well as return in a ``dict`` all
|
||||
``Position``s which have recently closed.
|
||||
|
||||
'''
|
||||
# NOTE: newly closed position are also important to report/return
|
||||
# since a consumer, like an order mode UI ;), might want to react
|
||||
# based on the closure (for example removing the breakeven line
|
||||
# and clearing the entry from any lists/monitors).
|
||||
closed_pp_objs: dict[str, Position] = {}
|
||||
open_pp_objs: dict[str, Position] = {}
|
||||
|
||||
pp_objs = self.pps
|
||||
for bs_mktid in list(pp_objs):
|
||||
pos = pp_objs[bs_mktid]
|
||||
# pos.ensure_state()
|
||||
|
||||
# "net-zero" is a "closed" position
|
||||
if pos.cumsize == 0:
|
||||
# NOTE: we DO NOT pop the pos here since it can still be
|
||||
# used to check for duplicate clears that may come in as
|
||||
# new transaction from some backend API and need to be
|
||||
# ignored; the closed positions won't be written to the
|
||||
# ``pps.toml`` since ``pp_active_entries`` above is what's
|
||||
# written.
|
||||
closed_pp_objs[bs_mktid] = pos
|
||||
|
||||
else:
|
||||
open_pp_objs[bs_mktid] = pos
|
||||
|
||||
return open_pp_objs, closed_pp_objs
|
||||
|
||||
def prep_toml(
|
||||
self,
|
||||
active: dict[str, Position] | None = None,
|
||||
|
||||
) -> dict[str, Any]:
|
||||
|
||||
if active is None:
|
||||
active, _ = self.dump_active()
|
||||
|
||||
# ONLY dict-serialize all active positions; those that are
|
||||
# closed we don't store in the ``pps.toml``.
|
||||
to_toml_dict: dict[str, Any] = {}
|
||||
|
||||
pos: Position
|
||||
for bs_mktid, pos in active.items():
|
||||
# pos.ensure_state()
|
||||
|
||||
# serialize to pre-toml form
|
||||
# NOTE: we only store the minimal amount of clears that
|
||||
# make up this position since the last net-zero state,
|
||||
# see `Position.to_pretoml()` for details
|
||||
fqme, asdict = pos.to_pretoml()
|
||||
|
||||
# clears: list[dict] = asdict['clears']
|
||||
# assert 'Datetime' not in [0]['dt']
|
||||
log.info(f'Updating active pp: {fqme}')
|
||||
|
||||
# XXX: ugh, it's cuz we push the section under
|
||||
# the broker name.. maybe we need to rethink this?
|
||||
brokerless_key = fqme.removeprefix(f'{self.brokername}.')
|
||||
to_toml_dict[brokerless_key] = asdict
|
||||
|
||||
return to_toml_dict
|
||||
|
||||
def write_config(self) -> None:
|
||||
'''
|
||||
Write the current account state to the user's account TOML file, normally
|
||||
something like ``pps.toml``.
|
||||
|
||||
'''
|
||||
# TODO: show diff output?
|
||||
# https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
|
||||
# active, closed_pp_objs = acnt.dump_active()
|
||||
|
||||
active, closed = self.dump_active()
|
||||
pp_entries = self.prep_toml(active=active)
|
||||
if pp_entries:
|
||||
log.info(
|
||||
f'Updating positions in ``{self.conf_path}``:\n'
|
||||
f'n{pformat(pp_entries)}'
|
||||
)
|
||||
|
||||
if self.brokername in self.conf:
|
||||
log.warning(
|
||||
f'Rewriting {self.conf_path} keys to drop <broker.acct>!'
|
||||
)
|
||||
# legacy key schema including <brokername.account>, so
|
||||
# rewrite all entries to drop those tables since we now
|
||||
# put that in the filename!
|
||||
accounts = self.conf.pop(self.brokername)
|
||||
assert len(accounts) == 1
|
||||
entries = accounts.pop(self.acctid)
|
||||
self.conf.update(entries)
|
||||
|
||||
self.conf.update(pp_entries)
|
||||
|
||||
# drop any entries that are computed as net-zero
|
||||
# we don't care about storing in the pps file.
|
||||
if closed:
|
||||
bs_mktid: str
|
||||
for bs_mktid, pos in closed.items():
|
||||
fqme: str = pos.mkt.fqme
|
||||
if fqme in self.conf:
|
||||
self.conf.pop(fqme)
|
||||
else:
|
||||
# TODO: we reallly need a diff set of
|
||||
# loglevels/colors per subsys.
|
||||
log.warning(
|
||||
f'Recent position for {fqme} was closed!'
|
||||
)
|
||||
|
||||
# if there are no active position entries according
|
||||
# to the toml dump output above, then clear the config
|
||||
# file of all entries.
|
||||
elif self.conf:
|
||||
for entry in list(self.conf):
|
||||
del self.conf[entry]
|
||||
|
||||
# XXX WTF: if we use a tomlkit.Integer here we get this
|
||||
# super weird --1 thing going on for cumsize!?1!
|
||||
# NOTE: the fix was to always float() the size value loaded
|
||||
# in open_pps() below!
|
||||
config.write(
|
||||
config=self.conf,
|
||||
path=self.conf_path,
|
||||
fail_empty=False,
|
||||
)
|
||||
|
||||
|
||||
def load_account(
|
||||
brokername: str,
|
||||
acctid: str,
|
||||
|
||||
dirpath: Path | None = None,
|
||||
|
||||
) -> tuple[dict, Path]:
|
||||
'''
|
||||
Load a accounting (with positions) file from
|
||||
$CONFIG_DIR/accounting/account.<brokername>.<acctid>.toml
|
||||
|
||||
Where normally $CONFIG_DIR = ~/.config/piker/
|
||||
and we implicitly create a accounting subdir which should
|
||||
normally be linked to a git repo managed by the user B)
|
||||
|
||||
'''
|
||||
legacy_fn: str = f'pps.{brokername}.{acctid}.toml'
|
||||
fn: str = f'account.{brokername}.{acctid}.toml'
|
||||
|
||||
dirpath: Path = dirpath or (config._config_dir / 'accounting')
|
||||
if not dirpath.is_dir():
|
||||
dirpath.mkdir()
|
||||
|
||||
conf, path = config.load(
|
||||
path=dirpath / fn,
|
||||
decode=tomlkit.parse,
|
||||
touch_if_dne=True,
|
||||
)
|
||||
|
||||
if not conf:
|
||||
legacypath = dirpath / legacy_fn
|
||||
log.warning(
|
||||
f'Your account file is using the legacy `pps.` prefix..\n'
|
||||
f'Rewriting contents to new name -> {path}\n'
|
||||
'Please delete the old file!\n'
|
||||
f'|-> {legacypath}\n'
|
||||
)
|
||||
if legacypath.is_file():
|
||||
legacy_config, _ = config.load(
|
||||
path=legacypath,
|
||||
|
||||
# TODO: move to tomlkit:
|
||||
# - needs to be fixed to support bidict?
|
||||
# https://github.com/sdispater/tomlkit/issues/289
|
||||
# - we need to use or fork's fix to do multiline array
|
||||
# indenting.
|
||||
decode=tomlkit.parse,
|
||||
)
|
||||
conf.update(legacy_config)
|
||||
|
||||
# XXX: override the presumably previously non-existant
|
||||
# file with legacy's contents.
|
||||
config.write(
|
||||
conf,
|
||||
path=path,
|
||||
fail_empty=False,
|
||||
)
|
||||
|
||||
return conf, path
|
||||
|
||||
|
||||
# TODO: make this async and offer a `get_account()` that
|
||||
# can be used from sync code which does the same thing as
|
||||
# open_trade_ledger()!
|
||||
@cm
|
||||
def open_account(
|
||||
brokername: str,
|
||||
acctid: str,
|
||||
write_on_exit: bool = False,
|
||||
|
||||
# for testing or manual load from file
|
||||
_fp: Path | None = None,
|
||||
|
||||
) -> Generator[Account, None, None]:
|
||||
'''
|
||||
Read out broker-specific position entries from
|
||||
incremental update file: ``pps.toml``.
|
||||
|
||||
'''
|
||||
conf: dict
|
||||
conf_path: Path
|
||||
conf, conf_path = load_account(
|
||||
brokername,
|
||||
acctid,
|
||||
dirpath=_fp,
|
||||
)
|
||||
|
||||
if brokername in conf:
|
||||
log.warning(
|
||||
f'Rewriting {conf_path} keys to drop <broker.acct>!'
|
||||
)
|
||||
# legacy key schema including <brokername.account>, so
|
||||
# rewrite all entries to drop those tables since we now
|
||||
# put that in the filename!
|
||||
accounts = conf.pop(brokername)
|
||||
for acctid in accounts.copy():
|
||||
entries = accounts.pop(acctid)
|
||||
conf.update(entries)
|
||||
|
||||
# TODO: ideally we can pass in an existing
|
||||
# pps state to this right? such that we
|
||||
# don't have to do a ledger reload all the
|
||||
# time.. a couple ideas I can think of,
|
||||
# - mirror this in some client side actor which
|
||||
# does the actual ledger updates (say the paper
|
||||
# engine proc if we decide to always spawn it?),
|
||||
# - do diffs against updates from the ledger writer
|
||||
# actor and the in-mem state here?
|
||||
from ..brokers import get_brokermod
|
||||
mod: ModuleType = get_brokermod(brokername)
|
||||
|
||||
pp_objs: dict[str, Position] = {}
|
||||
acnt = Account(
|
||||
mod,
|
||||
acctid,
|
||||
pp_objs,
|
||||
conf_path,
|
||||
conf=conf,
|
||||
)
|
||||
|
||||
# unmarshal/load ``pps.toml`` config entries into object form
|
||||
# and update `Account` obj entries.
|
||||
for fqme, entry in conf.items():
|
||||
|
||||
# unique broker-backend-system market id
|
||||
bs_mktid = str(
|
||||
entry.get('bsuid')
|
||||
or entry.get('bs_mktid')
|
||||
)
|
||||
price_tick = Decimal(str(
|
||||
entry.get('price_tick_size')
|
||||
or entry.get('price_tick')
|
||||
or '0.01'
|
||||
))
|
||||
size_tick = Decimal(str(
|
||||
entry.get('lot_tick_size')
|
||||
or entry.get('size_tick')
|
||||
or '0.0'
|
||||
))
|
||||
|
||||
# load the pair using the fqme which
|
||||
# will make the pair "unresolved" until
|
||||
# the backend broker actually loads
|
||||
# the market and position info.
|
||||
mkt = MktPair.from_fqme(
|
||||
fqme,
|
||||
price_tick=price_tick,
|
||||
size_tick=size_tick,
|
||||
bs_mktid=bs_mktid,
|
||||
)
|
||||
|
||||
# TODO: RE: general "events" instead of just "clears":
|
||||
# - make this an `events` field and support more event types
|
||||
# such as 'split', 'name_change', 'mkt_info', etc..
|
||||
# - should be make a ``Struct`` for clear/event entries? convert
|
||||
# "clear events table" from the toml config (list of a dicts)
|
||||
# and load it into object form for use in position processing of
|
||||
# new clear events.
|
||||
|
||||
# convert clears sub-tables (only in this form
|
||||
# for toml re-presentation) back into a master table.
|
||||
toml_clears_list: list[dict[str, Any]] = entry['clears']
|
||||
trans: list[Transaction] = []
|
||||
|
||||
for clears_table in toml_clears_list:
|
||||
tid = clears_table['tid']
|
||||
dt: tomlkit.items.DateTime | str = clears_table['dt']
|
||||
|
||||
# woa cool, `tomlkit` will actually load datetimes into
|
||||
# native form B)
|
||||
if isinstance(dt, str):
|
||||
dt = pendulum.parse(dt)
|
||||
|
||||
clears_table['dt'] = dt
|
||||
trans.append(Transaction(
|
||||
fqme=bs_mktid,
|
||||
# sym=mkt,
|
||||
bs_mktid=bs_mktid,
|
||||
tid=tid,
|
||||
# XXX: not sure why sometimes these are loaded as
|
||||
# `tomlkit.Integer` and are eventually written with
|
||||
# an extra `-` in front like `--1`?
|
||||
size=float(clears_table['size']),
|
||||
price=float(clears_table['price']),
|
||||
cost=clears_table['cost'],
|
||||
dt=dt,
|
||||
))
|
||||
|
||||
split_ratio = entry.get('split_ratio')
|
||||
|
||||
# if a string-ified expiry field is loaded we try to parse
|
||||
# it, THO, they should normally be serialized as native
|
||||
# TOML datetimes, since that's supported.
|
||||
if (
|
||||
(expiry := entry.get('expiry'))
|
||||
and isinstance(expiry, str)
|
||||
):
|
||||
expiry: pendulum.DateTime = pendulum.parse(expiry)
|
||||
|
||||
pp = pp_objs[bs_mktid] = Position(
|
||||
mkt,
|
||||
split_ratio=split_ratio,
|
||||
bs_mktid=bs_mktid,
|
||||
)
|
||||
|
||||
# XXX: super critical, we need to be sure to include
|
||||
# all pps.toml clears to avoid reusing clears that were
|
||||
# already included in the current incremental update
|
||||
# state, since today's records may have already been
|
||||
# processed!
|
||||
for t in trans:
|
||||
pp.add_clear(t)
|
||||
|
||||
try:
|
||||
yield acnt
|
||||
finally:
|
||||
if write_on_exit:
|
||||
acnt.write_config()
|
||||
|
||||
|
||||
# TODO: drop the old name and THIS!
|
||||
@cm
|
||||
def open_pps(
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> Generator[Account, None, None]:
|
||||
log.warning(
|
||||
'`open_pps()` is now deprecated!\n'
|
||||
'Please use `with open_account() as cnt:`'
|
||||
)
|
||||
with open_account(*args, **kwargs) as acnt:
|
||||
yield acnt
|
||||
|
||||
|
||||
def load_account_from_ledger(
|
||||
|
||||
brokername: str,
|
||||
acctname: str,
|
||||
|
||||
# post normalization filter on ledger entries to be processed
|
||||
filter_by_ids: dict[str, list[str]] | None = None,
|
||||
|
||||
ledger: TransactionLedger | None = None,
|
||||
**kwargs,
|
||||
|
||||
) -> Account:
|
||||
'''
|
||||
Open a ledger file by broker name and account and read in and
|
||||
process any trade records into our normalized ``Transaction`` form
|
||||
and then update the equivalent ``Pptable`` and deliver the two
|
||||
bs_mktid-mapped dict-sets of the transactions and pps.
|
||||
|
||||
'''
|
||||
acnt: Account
|
||||
with open_account(
|
||||
brokername,
|
||||
acctname,
|
||||
**kwargs,
|
||||
) as acnt:
|
||||
if ledger is not None:
|
||||
acnt.update_from_ledger(ledger)
|
||||
|
||||
return acnt
|
|
@ -0,0 +1,698 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Calculation routines for balance and position tracking such that
|
||||
you know when you're losing money (if possible) XD
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from collections.abc import ValuesView
|
||||
from contextlib import contextmanager as cm
|
||||
from math import copysign
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Iterator,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
import polars as pl
|
||||
from pendulum import (
|
||||
DateTime,
|
||||
from_timestamp,
|
||||
parse,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._ledger import (
|
||||
Transaction,
|
||||
TransactionLedger,
|
||||
)
|
||||
|
||||
|
||||
def ppu(
|
||||
clears: Iterator[Transaction],
|
||||
|
||||
# include transaction cost in breakeven price
|
||||
# and presume the worst case of the same cost
|
||||
# to exit this transaction (even though in reality
|
||||
# it will be dynamic based on exit stratetgy).
|
||||
cost_scalar: float = 2,
|
||||
|
||||
# return the ledger of clears as a (now dt sorted) dict with
|
||||
# new position fields inserted alongside each entry.
|
||||
as_ledger: bool = False,
|
||||
|
||||
) -> float | list[(str, dict)]:
|
||||
'''
|
||||
Compute the "price-per-unit" price for the given non-zero sized
|
||||
rolling position.
|
||||
|
||||
The recurrence relation which computes this (exponential) mean
|
||||
per new clear which **increases** the accumulative postiion size
|
||||
is:
|
||||
|
||||
ppu[-1] = (
|
||||
ppu[-2] * accum_size[-2]
|
||||
+
|
||||
ppu[-1] * size
|
||||
) / accum_size[-1]
|
||||
|
||||
where `cost_basis` for the current step is simply the price
|
||||
* size of the most recent clearing transaction.
|
||||
|
||||
-----
|
||||
TODO: get the BEP computed and working similarly!
|
||||
-----
|
||||
the equivalent "break even price" or bep at each new clear
|
||||
event step conversely only changes when an "position exiting
|
||||
clear" which **decreases** the cumulative dst asset size:
|
||||
|
||||
bep[-1] = ppu[-1] - (cum_pnl[-1] / cumsize[-1])
|
||||
|
||||
'''
|
||||
asize_h: list[float] = [] # historical accumulative size
|
||||
ppu_h: list[float] = [] # historical price-per-unit
|
||||
# ledger: dict[str, dict] = {}
|
||||
ledger: list[dict] = []
|
||||
|
||||
t: Transaction
|
||||
for t in clears:
|
||||
clear_size: float = t.size
|
||||
clear_price: str | float = t.price
|
||||
is_clear: bool = not isinstance(clear_price, str)
|
||||
|
||||
last_accum_size = asize_h[-1] if asize_h else 0
|
||||
accum_size: float = last_accum_size + clear_size
|
||||
accum_sign = copysign(1, accum_size)
|
||||
sign_change: bool = False
|
||||
|
||||
# on transfers we normally write some non-valid
|
||||
# price since withdrawal to another account/wallet
|
||||
# has nothing to do with inter-asset-market prices.
|
||||
# TODO: this should be better handled via a `type: 'tx'`
|
||||
# field as per existing issue surrounding all this:
|
||||
# https://github.com/pikers/piker/issues/510
|
||||
if isinstance(clear_price, str):
|
||||
# TODO: we can't necessarily have this commit to
|
||||
# the overall pos size since we also need to
|
||||
# include other positions contributions to this
|
||||
# balance or we might end up with a -ve balance for
|
||||
# the position..
|
||||
continue
|
||||
|
||||
# test if the pp somehow went "passed" a net zero size state
|
||||
# resulting in a change of the "sign" of the size (+ve for
|
||||
# long, -ve for short).
|
||||
sign_change = (
|
||||
copysign(1, last_accum_size) + accum_sign == 0
|
||||
and last_accum_size != 0
|
||||
)
|
||||
|
||||
# since we passed the net-zero-size state the new size
|
||||
# after sum should be the remaining size the new
|
||||
# "direction" (aka, long vs. short) for this clear.
|
||||
if sign_change:
|
||||
clear_size: float = accum_size
|
||||
abs_diff: float = abs(accum_size)
|
||||
asize_h.append(0)
|
||||
ppu_h.append(0)
|
||||
|
||||
else:
|
||||
# old size minus the new size gives us size diff with
|
||||
# +ve -> increase in pp size
|
||||
# -ve -> decrease in pp size
|
||||
abs_diff = abs(accum_size) - abs(last_accum_size)
|
||||
|
||||
# XXX: LIFO breakeven price update. only an increaze in size
|
||||
# of the position contributes the breakeven price,
|
||||
# a decrease does not (i.e. the position is being made
|
||||
# smaller).
|
||||
# abs_clear_size = abs(clear_size)
|
||||
abs_new_size: float | int = abs(accum_size)
|
||||
|
||||
if (
|
||||
abs_diff > 0
|
||||
and is_clear
|
||||
):
|
||||
cost_basis = (
|
||||
# cost basis for this clear
|
||||
clear_price * abs(clear_size)
|
||||
+
|
||||
# transaction cost
|
||||
accum_sign * cost_scalar * t.cost
|
||||
)
|
||||
|
||||
if asize_h:
|
||||
size_last: float = abs(asize_h[-1])
|
||||
cb_last: float = ppu_h[-1] * size_last
|
||||
ppu: float = (cost_basis + cb_last) / abs_new_size
|
||||
|
||||
else:
|
||||
ppu: float = cost_basis / abs_new_size
|
||||
|
||||
else:
|
||||
# TODO: for PPU we should probably handle txs out
|
||||
# (aka withdrawals) similarly by simply not having
|
||||
# them contrib to the running PPU calc and only
|
||||
# when the next entry clear comes in (which will
|
||||
# then have a higher weighting on the PPU).
|
||||
|
||||
# on "exit" clears from a given direction,
|
||||
# only the size changes not the price-per-unit
|
||||
# need to be updated since the ppu remains constant
|
||||
# and gets weighted by the new size.
|
||||
ppu: float = ppu_h[-1] if ppu_h else 0 # set to previous value
|
||||
|
||||
# extend with new rolling metric for this step
|
||||
ppu_h.append(ppu)
|
||||
asize_h.append(accum_size)
|
||||
|
||||
# ledger[t.tid] = {
|
||||
# 'txn': t,
|
||||
# ledger[t.tid] = t.to_dict() | {
|
||||
ledger.append((
|
||||
t.tid,
|
||||
t.to_dict() | {
|
||||
'ppu': ppu,
|
||||
'cumsize': accum_size,
|
||||
'sign_change': sign_change,
|
||||
|
||||
# TODO: cum_pnl, bep
|
||||
}
|
||||
))
|
||||
|
||||
final_ppu = ppu_h[-1] if ppu_h else 0
|
||||
# TODO: once we have etypes in all ledger entries..
|
||||
# handle any split info entered (for now) manually by user
|
||||
# if self.split_ratio is not None:
|
||||
# final_ppu /= self.split_ratio
|
||||
|
||||
if as_ledger:
|
||||
return ledger
|
||||
|
||||
else:
|
||||
return final_ppu
|
||||
|
||||
|
||||
def iter_by_dt(
|
||||
records: (
|
||||
dict[str, dict[str, Any]]
|
||||
| ValuesView[dict] # eg. `Position._events.values()`
|
||||
| list[dict]
|
||||
| list[Transaction] # XXX preferred!
|
||||
),
|
||||
|
||||
# NOTE: parsers are looked up in the insert order
|
||||
# so if you know that the record stats show some field
|
||||
# is more common then others, stick it at the top B)
|
||||
parsers: dict[str, Callable | None] = {
|
||||
'dt': parse, # parity case
|
||||
'datetime': parse, # datetime-str
|
||||
'time': from_timestamp, # float epoch
|
||||
},
|
||||
key: Callable | None = None,
|
||||
|
||||
) -> Iterator[tuple[str, dict]]:
|
||||
'''
|
||||
Iterate entries of a transaction table sorted by entry recorded
|
||||
datetime presumably set at the ``'dt'`` field in each entry.
|
||||
|
||||
'''
|
||||
if isinstance(records, dict):
|
||||
records: list[tuple[str, dict]] = list(records.items())
|
||||
|
||||
def dyn_parse_to_dt(
|
||||
tx: tuple[str, dict[str, Any]] | Transaction,
|
||||
) -> DateTime:
|
||||
|
||||
# handle `.items()` inputs
|
||||
if isinstance(tx, tuple):
|
||||
tx = tx[1]
|
||||
|
||||
# dict or tx object?
|
||||
isdict: bool = isinstance(tx, dict)
|
||||
|
||||
# get best parser for this record..
|
||||
for k in parsers:
|
||||
if (
|
||||
isdict and k in tx
|
||||
or getattr(tx, k, None)
|
||||
):
|
||||
v = tx[k] if isdict else tx.dt
|
||||
assert v is not None, f'No valid value for `{k}`!?'
|
||||
|
||||
# only call parser on the value if not None from
|
||||
# the `parsers` table above (when NOT using
|
||||
# `.get()`), otherwise pass through the value and
|
||||
# sort on it directly
|
||||
if (
|
||||
not isinstance(v, DateTime)
|
||||
and (parser := parsers.get(k))
|
||||
):
|
||||
return parser(v)
|
||||
else:
|
||||
return v
|
||||
|
||||
else:
|
||||
# XXX: should never get here..
|
||||
breakpoint()
|
||||
|
||||
entry: tuple[str, dict] | Transaction
|
||||
for entry in sorted(
|
||||
records,
|
||||
key=key or dyn_parse_to_dt,
|
||||
):
|
||||
# NOTE the type sig above; either pairs or txns B)
|
||||
yield entry
|
||||
|
||||
|
||||
# TODO: probably just move this into the test suite or
|
||||
# keep it here for use from as such?
|
||||
# def ensure_state(self) -> None:
|
||||
# '''
|
||||
# Audit either the `.cumsize` and `.ppu` local instance vars against
|
||||
# the clears table calculations and return the calc-ed values if
|
||||
# they differ and log warnings to console.
|
||||
|
||||
# '''
|
||||
# # clears: list[dict] = self._clears
|
||||
|
||||
# # self.first_clear_dt = min(clears, key=lambda e: e['dt'])['dt']
|
||||
# last_clear: dict = clears[-1]
|
||||
# csize: float = self.calc_size()
|
||||
# accum: float = last_clear['accum_size']
|
||||
|
||||
# if not self.expired():
|
||||
# if (
|
||||
# csize != accum
|
||||
# and csize != round(accum * (self.split_ratio or 1))
|
||||
# ):
|
||||
# raise ValueError(f'Size mismatch: {csize}')
|
||||
# else:
|
||||
# assert csize == 0, 'Contract is expired but non-zero size?'
|
||||
|
||||
# if self.cumsize != csize:
|
||||
# log.warning(
|
||||
# 'Position state mismatch:\n'
|
||||
# f'{self.cumsize} => {csize}'
|
||||
# )
|
||||
# self.cumsize = csize
|
||||
|
||||
# cppu: float = self.calc_ppu()
|
||||
# ppu: float = last_clear['ppu']
|
||||
# if (
|
||||
# cppu != ppu
|
||||
# and self.split_ratio is not None
|
||||
|
||||
# # handle any split info entered (for now) manually by user
|
||||
# and cppu != (ppu / self.split_ratio)
|
||||
# ):
|
||||
# raise ValueError(f'PPU mismatch: {cppu}')
|
||||
|
||||
# if self.ppu != cppu:
|
||||
# log.warning(
|
||||
# 'Position state mismatch:\n'
|
||||
# f'{self.ppu} => {cppu}'
|
||||
# )
|
||||
# self.ppu = cppu
|
||||
|
||||
|
||||
@cm
|
||||
def open_ledger_dfs(
|
||||
|
||||
brokername: str,
|
||||
acctname: str,
|
||||
|
||||
ledger: TransactionLedger | None = None,
|
||||
|
||||
**kwargs,
|
||||
|
||||
) -> tuple[
|
||||
dict[str, pl.DataFrame],
|
||||
TransactionLedger,
|
||||
]:
|
||||
'''
|
||||
Open a ledger of trade records (presumably from some broker
|
||||
backend), normalize the records into `Transactions` via the
|
||||
backend's declared endpoint, cast to a `polars.DataFrame` which
|
||||
can update the ledger on exit.
|
||||
|
||||
'''
|
||||
from piker.toolz import open_crash_handler
|
||||
with open_crash_handler():
|
||||
if not ledger:
|
||||
import time
|
||||
from ._ledger import open_trade_ledger
|
||||
|
||||
now = time.time()
|
||||
|
||||
with open_trade_ledger(
|
||||
brokername,
|
||||
acctname,
|
||||
rewrite=True,
|
||||
allow_from_sync_code=True,
|
||||
|
||||
# proxied through from caller
|
||||
**kwargs,
|
||||
|
||||
) as ledger:
|
||||
if not ledger:
|
||||
raise ValueError(f'No ledger for {acctname}@{brokername} exists?')
|
||||
|
||||
print(f'LEDGER LOAD TIME: {time.time() - now}')
|
||||
|
||||
yield ledger_to_dfs(ledger), ledger
|
||||
|
||||
|
||||
def ledger_to_dfs(
|
||||
ledger: TransactionLedger,
|
||||
|
||||
) -> dict[str, pl.DataFrame]:
|
||||
|
||||
txns: dict[str, Transaction] = ledger.to_txns()
|
||||
|
||||
# ldf = pl.DataFrame(
|
||||
# list(txn.to_dict() for txn in txns.values()),
|
||||
ldf = pl.from_dicts(
|
||||
list(txn.to_dict() for txn in txns.values()),
|
||||
|
||||
# only for ordering the cols
|
||||
schema=[
|
||||
('fqme', str),
|
||||
('tid', str),
|
||||
('bs_mktid', str),
|
||||
('expiry', str),
|
||||
('etype', str),
|
||||
('dt', str),
|
||||
('size', pl.Float64),
|
||||
('price', pl.Float64),
|
||||
('cost', pl.Float64),
|
||||
],
|
||||
).sort( # chronological order
|
||||
'dt'
|
||||
).with_columns([
|
||||
pl.col('dt').str.to_datetime(),
|
||||
# pl.col('expiry').str.to_datetime(),
|
||||
# pl.col('expiry').dt.date(),
|
||||
])
|
||||
|
||||
# filter out to the columns matching values filter passed
|
||||
# as input.
|
||||
# if filter_by_ids:
|
||||
# for col, vals in filter_by_ids.items():
|
||||
# str_vals = set(map(str, vals))
|
||||
# pred: pl.Expr = pl.col(col).eq(str_vals.pop())
|
||||
# for val in str_vals:
|
||||
# pred |= pl.col(col).eq(val)
|
||||
|
||||
# fdf = df.filter(pred)
|
||||
|
||||
# TODO: originally i had tried just using a plain ol' groupby
|
||||
# + agg here but the issue was re-inserting to the src frame.
|
||||
# however, learning more about `polars` seems like maybe we can
|
||||
# use `.over()`?
|
||||
# https://pola-rs.github.io/polars/py-polars/html/reference/expressions/api/polars.Expr.over.html#polars.Expr.over
|
||||
# => CURRENTLY we break up into a frame per mkt / fqme
|
||||
dfs: dict[str, pl.DataFrame] = ldf.partition_by(
|
||||
'bs_mktid',
|
||||
as_dict=True,
|
||||
)
|
||||
|
||||
# TODO: not sure if this is even possible but..
|
||||
# - it'd be more ideal to use `ppt = df.groupby('fqme').agg([`
|
||||
# - ppu and bep calcs!
|
||||
for key in dfs:
|
||||
|
||||
# covert to lazy form (since apparently we might need it
|
||||
# eventually ...)
|
||||
df: pl.DataFrame = dfs[key]
|
||||
|
||||
ldf: pl.LazyFrame = df.lazy()
|
||||
|
||||
df = dfs[key] = ldf.with_columns([
|
||||
|
||||
pl.cumsum('size').alias('cumsize'),
|
||||
|
||||
# amount of source asset "sent" (via buy txns in
|
||||
# the market) to acquire the dst asset, PER txn.
|
||||
# when this value is -ve (i.e. a sell operation) then
|
||||
# the amount sent is actually "returned".
|
||||
(
|
||||
(pl.col('price') * pl.col('size'))
|
||||
+
|
||||
(pl.col('cost')) # * pl.col('size').sign())
|
||||
).alias('dst_bot'),
|
||||
|
||||
]).with_columns([
|
||||
|
||||
# rolling balance in src asset units
|
||||
(pl.col('dst_bot').cumsum() * -1).alias('src_balance'),
|
||||
|
||||
# "position operation type" in terms of increasing the
|
||||
# amount in the dst asset (entering) or decreasing the
|
||||
# amount in the dst asset (exiting).
|
||||
pl.when(
|
||||
pl.col('size').sign() == pl.col('cumsize').sign()
|
||||
|
||||
).then(
|
||||
pl.lit('enter') # see above, but is just price * size per txn
|
||||
|
||||
).otherwise(
|
||||
pl.when(pl.col('cumsize') == 0)
|
||||
.then(pl.lit('exit_to_zero'))
|
||||
.otherwise(pl.lit('exit'))
|
||||
).alias('descr'),
|
||||
|
||||
(pl.col('cumsize').sign() == pl.col('size').sign())
|
||||
.alias('is_enter'),
|
||||
|
||||
]).with_columns([
|
||||
|
||||
# pl.lit(0, dtype=pl.Utf8).alias('virt_cost'),
|
||||
pl.lit(0, dtype=pl.Float64).alias('applied_cost'),
|
||||
pl.lit(0, dtype=pl.Float64).alias('pos_ppu'),
|
||||
pl.lit(0, dtype=pl.Float64).alias('per_txn_pnl'),
|
||||
pl.lit(0, dtype=pl.Float64).alias('cum_pos_pnl'),
|
||||
pl.lit(0, dtype=pl.Float64).alias('pos_bep'),
|
||||
pl.lit(0, dtype=pl.Float64).alias('cum_ledger_pnl'),
|
||||
pl.lit(None, dtype=pl.Float64).alias('ledger_bep'),
|
||||
|
||||
# TODO: instead of the iterative loop below i guess we
|
||||
# could try using embedded lists to track which txns
|
||||
# are part of which ppu / bep calcs? Not sure this will
|
||||
# look any better nor be any more performant though xD
|
||||
# pl.lit([[0]], dtype=pl.List(pl.Float64)).alias('list'),
|
||||
|
||||
# choose fields to emit for accounting puposes
|
||||
]).select([
|
||||
pl.exclude([
|
||||
'tid',
|
||||
# 'dt',
|
||||
'expiry',
|
||||
'bs_mktid',
|
||||
'etype',
|
||||
# 'is_enter',
|
||||
]),
|
||||
]).collect()
|
||||
|
||||
# compute recurrence relations for ppu and bep
|
||||
last_ppu: float = 0
|
||||
last_cumsize: float = 0
|
||||
last_ledger_pnl: float = 0
|
||||
last_pos_pnl: float = 0
|
||||
virt_costs: list[float, float] = [0., 0.]
|
||||
|
||||
# imperatively compute the PPU (price per unit) and BEP
|
||||
# (break even price) iteratively over the ledger, oriented
|
||||
# around each position state: a state of split balances in
|
||||
# > 1 asset.
|
||||
for i, row in enumerate(df.iter_rows(named=True)):
|
||||
|
||||
cumsize: float = row['cumsize']
|
||||
is_enter: bool = row['is_enter']
|
||||
price: float = row['price']
|
||||
size: float = row['size']
|
||||
|
||||
# the profit is ALWAYS decreased, aka made a "loss"
|
||||
# by the constant fee charged by the txn provider!
|
||||
# see below in final PnL calculation and row element
|
||||
# set.
|
||||
txn_cost: float = row['cost']
|
||||
pnl: float = 0
|
||||
|
||||
# ALWAYS reset per-position cum PnL
|
||||
if last_cumsize == 0:
|
||||
last_pos_pnl: float = 0
|
||||
|
||||
# a "position size INCREASING" or ENTER transaction
|
||||
# which "makes larger", in src asset unit terms, the
|
||||
# trade's side-size of the destination asset:
|
||||
# - "buying" (more) units of the dst asset
|
||||
# - "selling" (more short) units of the dst asset
|
||||
if is_enter:
|
||||
|
||||
# Naively include transaction cost in breakeven
|
||||
# price and presume the worst case of the
|
||||
# exact-same-cost-to-exit this transaction's worth
|
||||
# of size even though in reality it will be dynamic
|
||||
# based on exit strategy, price, liquidity, etc..
|
||||
virt_cost: float = txn_cost
|
||||
|
||||
# cpu: float = cost / size
|
||||
# cummean of the cost-per-unit used for modelling
|
||||
# a projected future exit cost which we immediately
|
||||
# include in the costs incorporated to BEP on enters
|
||||
last_cum_costs_size, last_cpu = virt_costs
|
||||
cum_costs_size: float = last_cum_costs_size + abs(size)
|
||||
cumcpu = (
|
||||
(last_cpu * last_cum_costs_size)
|
||||
+
|
||||
txn_cost
|
||||
) / cum_costs_size
|
||||
virt_costs = [cum_costs_size, cumcpu]
|
||||
|
||||
txn_cost = txn_cost + virt_cost
|
||||
# df[i, 'virt_cost'] = f'{-virt_cost} FROM {cumcpu}@{cum_costs_size}'
|
||||
|
||||
# a cumulative mean of the price-per-unit acquired
|
||||
# in the destination asset:
|
||||
# https://en.wikipedia.org/wiki/Moving_average#Cumulative_average
|
||||
# You could also think of this measure more
|
||||
# generally as an exponential mean with `alpha
|
||||
# = 1/N` where `N` is the current number of txns
|
||||
# included in the "position" defining set:
|
||||
# https://en.wikipedia.org/wiki/Exponential_smoothing
|
||||
ppu: float = (
|
||||
(
|
||||
(last_ppu * last_cumsize)
|
||||
+
|
||||
(price * size)
|
||||
) /
|
||||
cumsize
|
||||
)
|
||||
|
||||
# a "position size DECREASING" or EXIT transaction
|
||||
# which "makes smaller" the trade's side-size of the
|
||||
# destination asset:
|
||||
# - selling previously bought units of the dst asset
|
||||
# (aka 'closing' a long position).
|
||||
# - buying previously borrowed and sold (short) units
|
||||
# of the dst asset (aka 'covering'/'closing' a short
|
||||
# position).
|
||||
else:
|
||||
# only changes on position size increasing txns
|
||||
ppu: float = last_ppu
|
||||
|
||||
# UNWIND IMPLIED COSTS FROM ENTRIES
|
||||
# => Reverse the virtual/modelled (2x predicted) txn
|
||||
# cost that was included in the least-recently
|
||||
# entered txn that is still part of the current CSi
|
||||
# set.
|
||||
# => we look up the cost-per-unit cumsum and apply
|
||||
# if over the current txn size (by multiplication)
|
||||
# and then reverse that previusly applied cost on
|
||||
# the txn_cost for this record.
|
||||
#
|
||||
# NOTE: current "model" is just to previously assumed 2x
|
||||
# the txn cost for a matching enter-txn's
|
||||
# cost-per-unit; we then immediately reverse this
|
||||
# prediction and apply the real cost received here.
|
||||
last_cum_costs_size, last_cpu = virt_costs
|
||||
prev_virt_cost: float = last_cpu * abs(size)
|
||||
txn_cost: float = txn_cost - prev_virt_cost # +ve thus a "reversal"
|
||||
cum_costs_size: float = last_cum_costs_size - abs(size)
|
||||
virt_costs = [cum_costs_size, last_cpu]
|
||||
|
||||
# df[i, 'virt_cost'] = (
|
||||
# f'{-prev_virt_cost} FROM {last_cpu}@{cum_costs_size}'
|
||||
# )
|
||||
|
||||
# the per-txn profit or loss (PnL) given we are
|
||||
# (partially) "closing"/"exiting" the position via
|
||||
# this txn.
|
||||
pnl: float = (last_ppu - price) * size
|
||||
|
||||
# always subtract txn cost from total txn pnl
|
||||
txn_pnl: float = pnl - txn_cost
|
||||
|
||||
# cumulative PnLs per txn
|
||||
last_ledger_pnl = (
|
||||
last_ledger_pnl + txn_pnl
|
||||
)
|
||||
last_pos_pnl = df[i, 'cum_pos_pnl'] = (
|
||||
last_pos_pnl + txn_pnl
|
||||
)
|
||||
|
||||
if cumsize == 0:
|
||||
last_ppu = ppu = 0
|
||||
|
||||
# compute the BEP: "break even price", a value that
|
||||
# determines at what price the remaining cumsize can be
|
||||
# liquidated such that the net-PnL on the current
|
||||
# position will result in ZERO gain or loss from open
|
||||
# to close including all txn costs B)
|
||||
if (
|
||||
abs(cumsize) > 0 # non-exit-to-zero position txn
|
||||
):
|
||||
cumsize_sign: float = copysign(1, cumsize)
|
||||
ledger_bep: float = (
|
||||
(
|
||||
(ppu * cumsize)
|
||||
-
|
||||
(last_ledger_pnl * cumsize_sign)
|
||||
) / cumsize
|
||||
)
|
||||
|
||||
# NOTE: when we "enter more" dst asset units (aka
|
||||
# increase position state) AFTER having exited some
|
||||
# units (aka decreasing the pos size some) the bep
|
||||
# needs to be RECOMPUTED based on new ppu such that
|
||||
# liquidation of the cumsize at the bep price
|
||||
# results in a zero-pnl for the existing position
|
||||
# (since the last one).
|
||||
# for position lifetime BEP we never can have
|
||||
# a valid value once the position is "closed"
|
||||
# / full exitted Bo
|
||||
pos_bep: float = (
|
||||
(
|
||||
(ppu * cumsize)
|
||||
-
|
||||
(last_pos_pnl * cumsize_sign)
|
||||
) / cumsize
|
||||
)
|
||||
|
||||
# inject DF row with all values
|
||||
df[i, 'pos_ppu'] = ppu
|
||||
df[i, 'per_txn_pnl'] = txn_pnl
|
||||
df[i, 'applied_cost'] = -txn_cost
|
||||
df[i, 'cum_pos_pnl'] = last_pos_pnl
|
||||
df[i, 'pos_bep'] = pos_bep
|
||||
df[i, 'cum_ledger_pnl'] = last_ledger_pnl
|
||||
df[i, 'ledger_bep'] = ledger_bep
|
||||
|
||||
# keep backrefs to suffice reccurence relation
|
||||
last_ppu: float = ppu
|
||||
last_cumsize: float = cumsize
|
||||
|
||||
# TODO?: pass back the current `Position` object loaded from
|
||||
# the account as well? Would provide incentive to do all
|
||||
# this ledger loading inside a new async open_account().
|
||||
# bs_mktid: str = df[0]['bs_mktid']
|
||||
# pos: Position = acnt.pps[bs_mktid]
|
||||
|
||||
return dfs
|
|
@ -0,0 +1,311 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
CLI front end for trades ledger and position tracking management.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
from rich.console import Console
|
||||
from rich.markdown import Markdown
|
||||
import polars as pl
|
||||
import tractor
|
||||
import trio
|
||||
import typer
|
||||
|
||||
from ..log import get_logger
|
||||
from ..service import (
|
||||
open_piker_runtime,
|
||||
)
|
||||
from ..clearing._messages import BrokerdPosition
|
||||
from ..calc import humanize
|
||||
from ..brokers._daemon import broker_init
|
||||
from ._ledger import (
|
||||
load_ledger,
|
||||
TransactionLedger,
|
||||
# open_trade_ledger,
|
||||
)
|
||||
from .calc import (
|
||||
open_ledger_dfs,
|
||||
)
|
||||
|
||||
|
||||
ledger = typer.Typer()
|
||||
|
||||
|
||||
def unpack_fqan(
|
||||
fully_qualified_account_name: str,
|
||||
console: Console | None = None,
|
||||
) -> tuple | bool:
|
||||
try:
|
||||
brokername, account = fully_qualified_account_name.split('.')
|
||||
return brokername, account
|
||||
except ValueError:
|
||||
if console is not None:
|
||||
md = Markdown(
|
||||
f'=> `{fully_qualified_account_name}` <=\n\n'
|
||||
'is not a valid '
|
||||
'__fully qualified account name?__\n\n'
|
||||
'Your account name needs to be of the form '
|
||||
'`<brokername>.<account_name>`\n'
|
||||
)
|
||||
console.print(md)
|
||||
return False
|
||||
|
||||
|
||||
@ledger.command()
|
||||
def sync(
|
||||
fully_qualified_account_name: str,
|
||||
pdb: bool = False,
|
||||
|
||||
loglevel: str = typer.Option(
|
||||
'error',
|
||||
"-l",
|
||||
),
|
||||
):
|
||||
log = get_logger(loglevel)
|
||||
console = Console()
|
||||
|
||||
pair: tuple[str, str]
|
||||
if not (pair := unpack_fqan(
|
||||
fully_qualified_account_name,
|
||||
console,
|
||||
)):
|
||||
return
|
||||
|
||||
brokername, account = pair
|
||||
|
||||
brokermod, start_kwargs, deamon_ep = broker_init(
|
||||
brokername,
|
||||
loglevel=loglevel,
|
||||
)
|
||||
brokername: str = brokermod.name
|
||||
|
||||
async def main():
|
||||
|
||||
async with (
|
||||
open_piker_runtime(
|
||||
name='ledger_cli',
|
||||
loglevel=loglevel,
|
||||
debug_mode=pdb,
|
||||
|
||||
) as (actor, sockaddr),
|
||||
|
||||
tractor.open_nursery() as an,
|
||||
):
|
||||
try:
|
||||
log.info(
|
||||
f'Piker runtime up as {actor.uid}@{sockaddr}'
|
||||
)
|
||||
|
||||
portal = await an.start_actor(
|
||||
loglevel=loglevel,
|
||||
debug_mode=pdb,
|
||||
**start_kwargs,
|
||||
)
|
||||
|
||||
from ..clearing import (
|
||||
open_brokerd_dialog,
|
||||
)
|
||||
brokerd_stream: tractor.MsgStream
|
||||
|
||||
async with (
|
||||
# engage the brokerd daemon context
|
||||
portal.open_context(
|
||||
deamon_ep,
|
||||
brokername=brokername,
|
||||
loglevel=loglevel,
|
||||
),
|
||||
|
||||
# manually open the brokerd trade dialog EP
|
||||
# (what the EMS normally does internall) B)
|
||||
open_brokerd_dialog(
|
||||
brokermod,
|
||||
portal,
|
||||
exec_mode=(
|
||||
'paper'
|
||||
if account == 'paper'
|
||||
else 'live'
|
||||
),
|
||||
loglevel=loglevel,
|
||||
) as (
|
||||
brokerd_stream,
|
||||
pp_msg_table,
|
||||
accounts,
|
||||
),
|
||||
):
|
||||
try:
|
||||
assert len(accounts) == 1
|
||||
if not pp_msg_table:
|
||||
ld, fpath = load_ledger(brokername, account)
|
||||
assert not ld, f'WTF did we fail to parse ledger:\n{ld}'
|
||||
|
||||
console.print(
|
||||
'[yellow]'
|
||||
'No pps found for '
|
||||
f'`{brokername}.{account}` '
|
||||
'account!\n\n'
|
||||
'[/][underline]'
|
||||
'None of the following ledger files exist:\n\n[/]'
|
||||
f'{fpath.as_uri()}\n'
|
||||
)
|
||||
return
|
||||
|
||||
pps_by_symbol: dict[str, BrokerdPosition] = pp_msg_table[
|
||||
brokername,
|
||||
account,
|
||||
]
|
||||
|
||||
summary: str = (
|
||||
'[dim underline]Piker Position Summary[/] '
|
||||
f'[dim blue underline]{brokername}[/]'
|
||||
'[dim].[/]'
|
||||
f'[blue underline]{account}[/]'
|
||||
f'[dim underline] -> total pps: [/]'
|
||||
f'[green]{len(pps_by_symbol)}[/]\n'
|
||||
)
|
||||
# for ppdict in positions:
|
||||
for fqme, ppmsg in pps_by_symbol.items():
|
||||
# ppmsg = BrokerdPosition(**ppdict)
|
||||
size = ppmsg.size
|
||||
if size:
|
||||
ppu: float = round(
|
||||
ppmsg.avg_price,
|
||||
ndigits=2,
|
||||
)
|
||||
cost_basis: str = humanize(size * ppu)
|
||||
h_size: str = humanize(size)
|
||||
|
||||
if size < 0:
|
||||
pcolor = 'red'
|
||||
else:
|
||||
pcolor = 'green'
|
||||
|
||||
# sematic-highlight of fqme
|
||||
fqme = ppmsg.symbol
|
||||
tokens = fqme.split('.')
|
||||
styled_fqme = f'[blue underline]{tokens[0]}[/]'
|
||||
for tok in tokens[1:]:
|
||||
styled_fqme += '[dim].[/]'
|
||||
styled_fqme += f'[dim blue underline]{tok}[/]'
|
||||
|
||||
# TODO: instead display in a ``rich.Table``?
|
||||
summary += (
|
||||
styled_fqme +
|
||||
'[dim]: [/]'
|
||||
f'[{pcolor}]{h_size}[/]'
|
||||
'[dim blue]u @[/]'
|
||||
f'[{pcolor}]{ppu}[/]'
|
||||
'[dim blue] = [/]'
|
||||
f'[{pcolor}]$ {cost_basis}\n[/]'
|
||||
)
|
||||
|
||||
console.print(summary)
|
||||
|
||||
finally:
|
||||
# exit via ctx cancellation.
|
||||
brokerd_ctx: tractor.Context = brokerd_stream._ctx
|
||||
await brokerd_ctx.cancel(timeout=1)
|
||||
|
||||
# TODO: once ported to newer tractor branch we should
|
||||
# be able to do a loop like this:
|
||||
# while brokerd_ctx.cancel_called_remote is None:
|
||||
# await trio.sleep(0.01)
|
||||
# await brokerd_ctx.cancel()
|
||||
|
||||
finally:
|
||||
await portal.cancel_actor()
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
@ledger.command()
|
||||
def disect(
|
||||
# "fully_qualified_account_name"
|
||||
fqan: str,
|
||||
fqme: str, # for ib
|
||||
|
||||
# TODO: in tractor we should really have
|
||||
# a debug_mode ctx for wrapping any kind of code no?
|
||||
pdb: bool = False,
|
||||
bs_mktid: str = typer.Option(
|
||||
None,
|
||||
"-bid",
|
||||
),
|
||||
loglevel: str = typer.Option(
|
||||
'error',
|
||||
"-l",
|
||||
),
|
||||
):
|
||||
from piker.log import get_console_log
|
||||
from piker.toolz import open_crash_handler
|
||||
get_console_log(loglevel)
|
||||
|
||||
pair: tuple[str, str]
|
||||
if not (pair := unpack_fqan(fqan)):
|
||||
raise ValueError('{fqan} malformed!?')
|
||||
|
||||
brokername, account = pair
|
||||
|
||||
# ledger dfs groupby-partitioned by fqme
|
||||
dfs: dict[str, pl.DataFrame]
|
||||
# actual ledger instance
|
||||
ldgr: TransactionLedger
|
||||
|
||||
pl.Config.set_tbl_cols(-1)
|
||||
pl.Config.set_tbl_rows(-1)
|
||||
with (
|
||||
open_crash_handler(),
|
||||
open_ledger_dfs(
|
||||
brokername,
|
||||
account,
|
||||
) as (dfs, ldgr),
|
||||
):
|
||||
|
||||
# look up specific frame for fqme-selected asset
|
||||
if (df := dfs.get(fqme)) is None:
|
||||
mktids2fqmes: dict[str, list[str]] = {}
|
||||
for bs_mktid in dfs:
|
||||
df: pl.DataFrame = dfs[bs_mktid]
|
||||
fqmes: pl.Series[str] = df['fqme']
|
||||
uniques: list[str] = fqmes.unique()
|
||||
mktids2fqmes[bs_mktid] = set(uniques)
|
||||
if fqme in uniques:
|
||||
break
|
||||
print(
|
||||
f'No specific ledger for fqme={fqme} could be found in\n'
|
||||
f'{pformat(mktids2fqmes)}?\n'
|
||||
f'Maybe the `{brokername}` backend uses something '
|
||||
'else for its `bs_mktid` then the `fqme`?\n'
|
||||
'Scanning for matches in unique fqmes per frame..\n'
|
||||
)
|
||||
|
||||
# :pray:
|
||||
assert not df.is_empty()
|
||||
|
||||
# muck around in pdbp REPL
|
||||
breakpoint()
|
||||
|
||||
# TODO: we REALLY need a better console REPL for this
|
||||
# kinda thing..
|
||||
# - `xonsh` is an obvious option (and it looks amazin) but
|
||||
# we need to figure out how to embed it better then just:
|
||||
# from xonsh.main import main
|
||||
# main(argv=[])
|
||||
# which will not actually inject the `df` to globals?
|
|
@ -17,33 +17,95 @@
|
|||
"""
|
||||
Broker clients, daemons and general back end machinery.
|
||||
"""
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from importlib import import_module
|
||||
from types import ModuleType
|
||||
|
||||
# TODO: move to urllib3/requests once supported
|
||||
import asks
|
||||
asks.init('trio')
|
||||
from tractor.trionics import maybe_open_context
|
||||
|
||||
__brokers__ = [
|
||||
from ._util import (
|
||||
log,
|
||||
BrokerError,
|
||||
SymbolNotFound,
|
||||
NoData,
|
||||
DataUnavailable,
|
||||
DataThrottle,
|
||||
resproc,
|
||||
get_logger,
|
||||
)
|
||||
|
||||
__all__: list[str] = [
|
||||
'BrokerError',
|
||||
'SymbolNotFound',
|
||||
'NoData',
|
||||
'DataUnavailable',
|
||||
'DataThrottle',
|
||||
'resproc',
|
||||
'get_logger',
|
||||
]
|
||||
|
||||
__brokers__: list[str] = [
|
||||
'binance',
|
||||
'questrade',
|
||||
'robinhood',
|
||||
'ib',
|
||||
'kraken',
|
||||
'kucoin',
|
||||
|
||||
# broken but used to work
|
||||
# 'questrade',
|
||||
# 'robinhood',
|
||||
|
||||
# TODO: we should get on these stat!
|
||||
# alpaca
|
||||
# wstrade
|
||||
# iex
|
||||
|
||||
# deribit
|
||||
# bitso
|
||||
]
|
||||
|
||||
|
||||
def get_brokermod(brokername: str) -> ModuleType:
|
||||
"""Return the imported broker module by name.
|
||||
"""
|
||||
module = import_module('.' + brokername, 'piker.brokers')
|
||||
'''
|
||||
Return the imported broker module by name.
|
||||
|
||||
'''
|
||||
module: ModuleType = import_module('.' + brokername, 'piker.brokers')
|
||||
# we only allow monkeying because it's for internal keying
|
||||
module.name = module.__name__.split('.')[-1]
|
||||
return module
|
||||
|
||||
|
||||
def iter_brokermods():
|
||||
"""Iterate all built-in broker modules.
|
||||
"""
|
||||
'''
|
||||
Iterate all built-in broker modules.
|
||||
|
||||
'''
|
||||
for name in __brokers__:
|
||||
yield get_brokermod(name)
|
||||
|
||||
|
||||
@acm
|
||||
async def open_cached_client(
|
||||
brokername: str,
|
||||
**kwargs,
|
||||
|
||||
) -> 'Client': # noqa
|
||||
'''
|
||||
Get a cached broker client from the current actor's local vars.
|
||||
|
||||
If one has not been setup do it and cache it.
|
||||
|
||||
'''
|
||||
brokermod = get_brokermod(brokername)
|
||||
async with maybe_open_context(
|
||||
acm_func=brokermod.get_client,
|
||||
kwargs=kwargs,
|
||||
|
||||
) as (cache_hit, client):
|
||||
|
||||
if cache_hit:
|
||||
log.runtime(f'Reusing existing {client}')
|
||||
|
||||
yield client
|
||||
|
|
|
@ -0,0 +1,276 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Broker-daemon-actor "endpoint-hooks": the service task entry points for
|
||||
``brokerd``.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from types import ModuleType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AsyncContextManager,
|
||||
)
|
||||
import exceptiongroup as eg
|
||||
|
||||
import tractor
|
||||
import trio
|
||||
|
||||
from . import _util
|
||||
from . import get_brokermod
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..data import _FeedsBus
|
||||
|
||||
# `brokerd` enabled modules
|
||||
# TODO: move this def to the `.data` subpkg..
|
||||
# NOTE: keeping this list as small as possible is part of our caps-sec
|
||||
# model and should be treated with utmost care!
|
||||
_data_mods: str = [
|
||||
'piker.brokers.core',
|
||||
'piker.brokers.data',
|
||||
'piker.brokers._daemon',
|
||||
'piker.data',
|
||||
'piker.data.feed',
|
||||
'piker.data._sampling'
|
||||
]
|
||||
|
||||
|
||||
# TODO: we should rename the daemon to datad prolly once we split up
|
||||
# broker vs. data tasks into separate actors?
|
||||
@tractor.context
|
||||
async def _setup_persistent_brokerd(
|
||||
ctx: tractor.Context,
|
||||
brokername: str,
|
||||
loglevel: str | None = None,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Allocate a actor-wide service nursery in ``brokerd``
|
||||
such that feeds can be run in the background persistently by
|
||||
the broker backend as needed.
|
||||
|
||||
'''
|
||||
# NOTE: we only need to setup logging once (and only) here
|
||||
# since all hosted daemon tasks will reference this same
|
||||
# log instance's (actor local) state and thus don't require
|
||||
# any further (level) configuration on their own B)
|
||||
log = _util.get_console_log(
|
||||
loglevel or tractor.current_actor().loglevel,
|
||||
name=f'{_util.subsys}.{brokername}',
|
||||
)
|
||||
|
||||
# set global for this actor to this new process-wide instance B)
|
||||
_util.log = log
|
||||
|
||||
# further, set the log level on any broker broker specific
|
||||
# logger instance.
|
||||
|
||||
from piker.data import feed
|
||||
assert not feed._bus
|
||||
|
||||
# allocate a nursery to the bus for spawning background
|
||||
# tasks to service client IPC requests, normally
|
||||
# `tractor.Context` connections to explicitly required
|
||||
# `brokerd` endpoints such as:
|
||||
# - `stream_quotes()`,
|
||||
# - `manage_history()`,
|
||||
# - `allocate_persistent_feed()`,
|
||||
# - `open_symbol_search()`
|
||||
# NOTE: see ep invocation details inside `.data.feed`.
|
||||
try:
|
||||
async with trio.open_nursery() as service_nursery:
|
||||
bus: _FeedsBus = feed.get_feed_bus(
|
||||
brokername,
|
||||
service_nursery,
|
||||
)
|
||||
assert bus is feed._bus
|
||||
|
||||
# unblock caller
|
||||
await ctx.started()
|
||||
|
||||
# we pin this task to keep the feeds manager active until the
|
||||
# parent actor decides to tear it down
|
||||
await trio.sleep_forever()
|
||||
|
||||
except eg.ExceptionGroup:
|
||||
# TODO: likely some underlying `brokerd` IPC connection
|
||||
# broke so here we handle a respawn and re-connect attempt!
|
||||
# This likely should pair with development of the OCO task
|
||||
# nusery in dev over @ `tractor` B)
|
||||
# https://github.com/goodboy/tractor/pull/363
|
||||
raise
|
||||
|
||||
|
||||
def broker_init(
|
||||
brokername: str,
|
||||
loglevel: str | None = None,
|
||||
|
||||
**start_actor_kwargs,
|
||||
|
||||
) -> tuple[
|
||||
ModuleType,
|
||||
dict,
|
||||
AsyncContextManager,
|
||||
]:
|
||||
'''
|
||||
Given an input broker name, load all named arguments
|
||||
which can be passed for daemon endpoint + context spawn
|
||||
as required in every `brokerd` (actor) service.
|
||||
|
||||
This includes:
|
||||
- load the appropriate <brokername>.py pkg module,
|
||||
- reads any declared `__enable_modules__: listr[str]` which will be
|
||||
passed to `tractor.ActorNursery.start_actor(enabled_modules=<this>)`
|
||||
at actor start time,
|
||||
- deliver a references to the daemon lifetime fixture, which
|
||||
for now is always the `_setup_persistent_brokerd()` context defined
|
||||
above.
|
||||
|
||||
'''
|
||||
from ..brokers import get_brokermod
|
||||
brokermod = get_brokermod(brokername)
|
||||
modpath: str = brokermod.__name__
|
||||
|
||||
start_actor_kwargs['name'] = f'brokerd.{brokername}'
|
||||
start_actor_kwargs.update(
|
||||
getattr(
|
||||
brokermod,
|
||||
'_spawn_kwargs',
|
||||
{},
|
||||
)
|
||||
)
|
||||
|
||||
# XXX TODO: make this not so hacky/monkeypatched..
|
||||
# -> we need a sane way to configure the logging level for all
|
||||
# code running in brokerd.
|
||||
# if utilmod := getattr(brokermod, '_util', False):
|
||||
# utilmod.log.setLevel(loglevel.upper())
|
||||
|
||||
# lookup actor-enabled modules declared by the backend offering the
|
||||
# `brokerd` endpoint(s).
|
||||
enabled: list[str]
|
||||
enabled = start_actor_kwargs['enable_modules'] = [
|
||||
__name__, # so that eps from THIS mod can be invoked
|
||||
modpath,
|
||||
]
|
||||
for submodname in getattr(
|
||||
brokermod,
|
||||
'__enable_modules__',
|
||||
[],
|
||||
):
|
||||
subpath: str = f'{modpath}.{submodname}'
|
||||
enabled.append(subpath)
|
||||
|
||||
return (
|
||||
brokermod,
|
||||
start_actor_kwargs, # to `ActorNursery.start_actor()`
|
||||
|
||||
# XXX see impl above; contains all (actor global)
|
||||
# setup/teardown expected in all `brokerd` actor instances.
|
||||
_setup_persistent_brokerd,
|
||||
)
|
||||
|
||||
|
||||
async def spawn_brokerd(
|
||||
|
||||
brokername: str,
|
||||
loglevel: str | None = None,
|
||||
|
||||
**tractor_kwargs,
|
||||
|
||||
) -> bool:
|
||||
|
||||
from piker.service._util import log # use service mngr log
|
||||
log.info(f'Spawning {brokername} broker daemon')
|
||||
|
||||
(
|
||||
brokermode,
|
||||
tractor_kwargs,
|
||||
daemon_fixture_ep,
|
||||
) = broker_init(
|
||||
brokername,
|
||||
loglevel,
|
||||
**tractor_kwargs,
|
||||
)
|
||||
|
||||
brokermod = get_brokermod(brokername)
|
||||
extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {})
|
||||
tractor_kwargs.update(extra_tractor_kwargs)
|
||||
|
||||
# ask `pikerd` to spawn a new sub-actor and manage it under its
|
||||
# actor nursery
|
||||
from piker.service import Services
|
||||
|
||||
dname: str = tractor_kwargs.pop('name') # f'brokerd.{brokername}'
|
||||
portal = await Services.actor_n.start_actor(
|
||||
dname,
|
||||
enable_modules=_data_mods + tractor_kwargs.pop('enable_modules'),
|
||||
debug_mode=Services.debug_mode,
|
||||
**tractor_kwargs
|
||||
)
|
||||
|
||||
# NOTE: the service mngr expects an already spawned actor + its
|
||||
# portal ref in order to do non-blocking setup of brokerd
|
||||
# service nursery.
|
||||
await Services.start_service_task(
|
||||
dname,
|
||||
portal,
|
||||
|
||||
# signature of target root-task endpoint
|
||||
daemon_fixture_ep,
|
||||
brokername=brokername,
|
||||
loglevel=loglevel,
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_spawn_brokerd(
|
||||
|
||||
brokername: str,
|
||||
loglevel: str | None = None,
|
||||
|
||||
**pikerd_kwargs,
|
||||
|
||||
) -> tractor.Portal:
|
||||
'''
|
||||
Helper to spawn a brokerd service *from* a client who wishes to
|
||||
use the sub-actor-daemon but is fine with re-using any existing
|
||||
and contactable `brokerd`.
|
||||
|
||||
Mas o menos, acts as a cached-actor-getter factory.
|
||||
|
||||
'''
|
||||
from piker.service import maybe_spawn_daemon
|
||||
|
||||
async with maybe_spawn_daemon(
|
||||
|
||||
f'brokerd.{brokername}',
|
||||
service_task_target=spawn_brokerd,
|
||||
spawn_args={
|
||||
'brokername': brokername,
|
||||
},
|
||||
loglevel=loglevel,
|
||||
|
||||
**pikerd_kwargs,
|
||||
|
||||
) as portal:
|
||||
yield portal
|
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -15,13 +15,32 @@
|
|||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Handy utils.
|
||||
Handy cross-broker utils.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from functools import partial
|
||||
|
||||
import json
|
||||
import asks
|
||||
import httpx
|
||||
import logging
|
||||
|
||||
from ..log import colorize_json
|
||||
from ..log import (
|
||||
get_logger,
|
||||
get_console_log,
|
||||
colorize_json,
|
||||
)
|
||||
subsys: str = 'piker.brokers'
|
||||
|
||||
# NOTE: level should be reset by any actor that is spawned
|
||||
# as well as given a (more) explicit name/key such
|
||||
# as `piker.brokers.binance` matching the subpkg.
|
||||
log = get_logger(subsys)
|
||||
|
||||
get_console_log = partial(
|
||||
get_console_log,
|
||||
name=subsys,
|
||||
)
|
||||
|
||||
|
||||
class BrokerError(Exception):
|
||||
|
@ -32,6 +51,7 @@ class SymbolNotFound(BrokerError):
|
|||
"Symbol not found by broker search"
|
||||
|
||||
|
||||
# TODO: these should probably be moved to `.tsp/.data`?
|
||||
class NoData(BrokerError):
|
||||
'''
|
||||
Symbol data not permitted or no data
|
||||
|
@ -41,14 +61,15 @@ class NoData(BrokerError):
|
|||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
frame_size: int = 1000,
|
||||
info: dict|None = None,
|
||||
|
||||
) -> None:
|
||||
super().__init__(*args)
|
||||
self.info: dict|None = info
|
||||
|
||||
# when raised, machinery can check if the backend
|
||||
# set a "frame size" for doing datetime calcs.
|
||||
self.frame_size: int = 1000
|
||||
# self.frame_size: int = 1000
|
||||
|
||||
|
||||
class DataUnavailable(BrokerError):
|
||||
|
@ -69,18 +90,19 @@ class DataThrottle(BrokerError):
|
|||
# TODO: add in throttle metrics/feedback
|
||||
|
||||
|
||||
|
||||
def resproc(
|
||||
resp: asks.response_objects.Response,
|
||||
resp: httpx.Response,
|
||||
log: logging.Logger,
|
||||
return_json: bool = True,
|
||||
log_resp: bool = False,
|
||||
|
||||
) -> asks.response_objects.Response:
|
||||
"""Process response and return its json content.
|
||||
) -> httpx.Response:
|
||||
'''
|
||||
Process response and return its json content.
|
||||
|
||||
Raise the appropriate error on non-200 OK responses.
|
||||
"""
|
||||
|
||||
'''
|
||||
if not resp.status_code == 200:
|
||||
raise BrokerError(resp.body)
|
||||
try:
|
||||
|
|
|
@ -1,566 +0,0 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Binance backend
|
||||
|
||||
"""
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from datetime import datetime
|
||||
from typing import (
|
||||
Any, Union, Optional,
|
||||
AsyncGenerator, Callable,
|
||||
)
|
||||
import time
|
||||
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
import pendulum
|
||||
import asks
|
||||
from fuzzywuzzy import process as fuzzy
|
||||
import numpy as np
|
||||
import tractor
|
||||
from pydantic.dataclasses import dataclass
|
||||
from pydantic import BaseModel
|
||||
import wsproto
|
||||
|
||||
from .._cacheables import open_cached_client
|
||||
from ._util import resproc, SymbolNotFound
|
||||
from ..log import get_logger, get_console_log
|
||||
from ..data import ShmArray
|
||||
from ..data._web_bs import open_autorecon_ws, NoBsWs
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
_url = 'https://api.binance.com'
|
||||
|
||||
|
||||
# Broker specific ohlc schema (rest)
|
||||
_ohlc_dtype = [
|
||||
('index', int),
|
||||
('time', int),
|
||||
('open', float),
|
||||
('high', float),
|
||||
('low', float),
|
||||
('close', float),
|
||||
('volume', float),
|
||||
('bar_wap', float), # will be zeroed by sampler if not filled
|
||||
|
||||
# XXX: some additional fields are defined in the docs:
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-data
|
||||
|
||||
# ('close_time', int),
|
||||
# ('quote_vol', float),
|
||||
# ('num_trades', int),
|
||||
# ('buy_base_vol', float),
|
||||
# ('buy_quote_vol', float),
|
||||
# ('ignore', float),
|
||||
]
|
||||
|
||||
# UI components allow this to be declared such that additional
|
||||
# (historical) fields can be exposed.
|
||||
ohlc_dtype = np.dtype(_ohlc_dtype)
|
||||
|
||||
_show_wap_in_history = False
|
||||
|
||||
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#exchange-information
|
||||
class Pair(BaseModel):
|
||||
symbol: str
|
||||
status: str
|
||||
|
||||
baseAsset: str
|
||||
baseAssetPrecision: int
|
||||
quoteAsset: str
|
||||
quotePrecision: int
|
||||
quoteAssetPrecision: int
|
||||
|
||||
baseCommissionPrecision: int
|
||||
quoteCommissionPrecision: int
|
||||
|
||||
orderTypes: list[str]
|
||||
|
||||
icebergAllowed: bool
|
||||
ocoAllowed: bool
|
||||
quoteOrderQtyMarketAllowed: bool
|
||||
isSpotTradingAllowed: bool
|
||||
isMarginTradingAllowed: bool
|
||||
|
||||
filters: list[dict[str, Union[str, int, float]]]
|
||||
permissions: list[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class OHLC:
|
||||
"""Description of the flattened OHLC quote format.
|
||||
|
||||
For schema details see:
|
||||
https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams
|
||||
|
||||
"""
|
||||
time: int
|
||||
|
||||
open: float
|
||||
high: float
|
||||
low: float
|
||||
close: float
|
||||
volume: float
|
||||
|
||||
close_time: int
|
||||
|
||||
quote_vol: float
|
||||
num_trades: int
|
||||
buy_base_vol: float
|
||||
buy_quote_vol: float
|
||||
ignore: int
|
||||
|
||||
# null the place holder for `bar_wap` until we
|
||||
# figure out what to extract for this.
|
||||
bar_wap: float = 0.0
|
||||
|
||||
|
||||
# convert datetime obj timestamp to unixtime in milliseconds
|
||||
def binance_timestamp(when):
|
||||
return int((when.timestamp() * 1000) + (when.microsecond / 1000))
|
||||
|
||||
|
||||
class Client:
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._sesh = asks.Session(connections=4)
|
||||
self._sesh.base_location = _url
|
||||
self._pairs: dict[str, Any] = {}
|
||||
|
||||
async def _api(
|
||||
self,
|
||||
method: str,
|
||||
params: dict,
|
||||
) -> dict[str, Any]:
|
||||
resp = await self._sesh.get(
|
||||
path=f'/api/v3/{method}',
|
||||
params=params,
|
||||
timeout=float('inf')
|
||||
)
|
||||
return resproc(resp, log)
|
||||
|
||||
async def symbol_info(
|
||||
|
||||
self,
|
||||
sym: Optional[str] = None,
|
||||
|
||||
) -> dict[str, Any]:
|
||||
'''Get symbol info for the exchange.
|
||||
|
||||
'''
|
||||
# TODO: we can load from our self._pairs cache
|
||||
# on repeat calls...
|
||||
|
||||
# will retrieve all symbols by default
|
||||
params = {}
|
||||
|
||||
if sym is not None:
|
||||
sym = sym.upper()
|
||||
params = {'symbol': sym}
|
||||
|
||||
resp = await self._api(
|
||||
'exchangeInfo',
|
||||
params=params,
|
||||
)
|
||||
|
||||
entries = resp['symbols']
|
||||
if not entries:
|
||||
raise SymbolNotFound(f'{sym} not found')
|
||||
|
||||
syms = {item['symbol']: item for item in entries}
|
||||
|
||||
if sym is not None:
|
||||
return syms[sym]
|
||||
else:
|
||||
return syms
|
||||
|
||||
async def cache_symbols(
|
||||
self,
|
||||
) -> dict:
|
||||
if not self._pairs:
|
||||
self._pairs = await self.symbol_info()
|
||||
|
||||
return self._pairs
|
||||
|
||||
async def search_symbols(
|
||||
self,
|
||||
pattern: str,
|
||||
limit: int = None,
|
||||
) -> dict[str, Any]:
|
||||
if self._pairs is not None:
|
||||
data = self._pairs
|
||||
else:
|
||||
data = await self.symbol_info()
|
||||
|
||||
matches = fuzzy.extractBests(
|
||||
pattern,
|
||||
data,
|
||||
score_cutoff=50,
|
||||
)
|
||||
# repack in dict form
|
||||
return {item[0]['symbol']: item[0]
|
||||
for item in matches}
|
||||
|
||||
async def bars(
|
||||
self,
|
||||
symbol: str,
|
||||
start_dt: Optional[datetime] = None,
|
||||
end_dt: Optional[datetime] = None,
|
||||
limit: int = 1000, # <- max allowed per query
|
||||
as_np: bool = True,
|
||||
|
||||
) -> dict:
|
||||
|
||||
if end_dt is None:
|
||||
end_dt = pendulum.now('UTC')
|
||||
|
||||
if start_dt is None:
|
||||
start_dt = end_dt.start_of(
|
||||
'minute').subtract(minutes=limit)
|
||||
|
||||
start_time = binance_timestamp(start_dt)
|
||||
end_time = binance_timestamp(end_dt)
|
||||
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-data
|
||||
bars = await self._api(
|
||||
'klines',
|
||||
params={
|
||||
'symbol': symbol.upper(),
|
||||
'interval': '1m',
|
||||
'startTime': start_time,
|
||||
'endTime': end_time,
|
||||
'limit': limit
|
||||
}
|
||||
)
|
||||
|
||||
# TODO: pack this bars scheme into a ``pydantic`` validator type:
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-data
|
||||
|
||||
# TODO: we should port this to ``pydantic`` to avoid doing
|
||||
# manual validation ourselves..
|
||||
new_bars = []
|
||||
for i, bar in enumerate(bars):
|
||||
|
||||
bar = OHLC(*bar)
|
||||
|
||||
row = []
|
||||
for j, (name, ftype) in enumerate(_ohlc_dtype[1:]):
|
||||
|
||||
# TODO: maybe we should go nanoseconds on all
|
||||
# history time stamps?
|
||||
if name == 'time':
|
||||
# convert to epoch seconds: float
|
||||
row.append(bar.time / 1000.0)
|
||||
|
||||
else:
|
||||
row.append(getattr(bar, name))
|
||||
|
||||
new_bars.append((i,) + tuple(row))
|
||||
|
||||
array = np.array(new_bars, dtype=_ohlc_dtype) if as_np else bars
|
||||
return array
|
||||
|
||||
|
||||
@acm
|
||||
async def get_client() -> Client:
|
||||
client = Client()
|
||||
await client.cache_symbols()
|
||||
yield client
|
||||
|
||||
|
||||
# validation type
|
||||
class AggTrade(BaseModel):
|
||||
e: str # Event type
|
||||
E: int # Event time
|
||||
s: str # Symbol
|
||||
a: int # Aggregate trade ID
|
||||
p: float # Price
|
||||
q: float # Quantity
|
||||
f: int # First trade ID
|
||||
l: int # Last trade ID
|
||||
T: int # Trade time
|
||||
m: bool # Is the buyer the market maker?
|
||||
M: bool # Ignore
|
||||
|
||||
|
||||
async def stream_messages(ws: NoBsWs) -> AsyncGenerator[NoBsWs, dict]:
|
||||
|
||||
timeouts = 0
|
||||
while True:
|
||||
|
||||
with trio.move_on_after(3) as cs:
|
||||
msg = await ws.recv_msg()
|
||||
|
||||
if cs.cancelled_caught:
|
||||
|
||||
timeouts += 1
|
||||
if timeouts > 2:
|
||||
log.error("binance feed seems down and slow af? rebooting...")
|
||||
await ws._connect()
|
||||
|
||||
continue
|
||||
|
||||
# for l1 streams binance doesn't add an event type field so
|
||||
# identify those messages by matching keys
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#individual-symbol-book-ticker-streams
|
||||
|
||||
if msg.get('u'):
|
||||
sym = msg['s']
|
||||
bid = float(msg['b'])
|
||||
bsize = float(msg['B'])
|
||||
ask = float(msg['a'])
|
||||
asize = float(msg['A'])
|
||||
|
||||
yield 'l1', {
|
||||
'symbol': sym,
|
||||
'ticks': [
|
||||
{'type': 'bid', 'price': bid, 'size': bsize},
|
||||
{'type': 'bsize', 'price': bid, 'size': bsize},
|
||||
{'type': 'ask', 'price': ask, 'size': asize},
|
||||
{'type': 'asize', 'price': ask, 'size': asize}
|
||||
]
|
||||
}
|
||||
|
||||
elif msg.get('e') == 'aggTrade':
|
||||
|
||||
# validate
|
||||
msg = AggTrade(**msg)
|
||||
|
||||
# TODO: type out and require this quote format
|
||||
# from all backends!
|
||||
yield 'trade', {
|
||||
'symbol': msg.s,
|
||||
'last': msg.p,
|
||||
'brokerd_ts': time.time(),
|
||||
'ticks': [{
|
||||
'type': 'trade',
|
||||
'price': msg.p,
|
||||
'size': msg.q,
|
||||
'broker_ts': msg.T,
|
||||
}],
|
||||
}
|
||||
|
||||
|
||||
def make_sub(pairs: list[str], sub_name: str, uid: int) -> dict[str, str]:
|
||||
"""Create a request subscription packet dict.
|
||||
|
||||
https://binance-docs.github.io/apidocs/spot/en/#live-subscribing-unsubscribing-to-streams
|
||||
"""
|
||||
return {
|
||||
'method': 'SUBSCRIBE',
|
||||
'params': [
|
||||
f'{pair.lower()}@{sub_name}'
|
||||
for pair in pairs
|
||||
],
|
||||
'id': uid
|
||||
}
|
||||
|
||||
|
||||
@acm
|
||||
async def open_history_client(
|
||||
symbol: str,
|
||||
|
||||
) -> tuple[Callable, int]:
|
||||
|
||||
# TODO implement history getter for the new storage layer.
|
||||
async with open_cached_client('binance') as client:
|
||||
|
||||
async def get_ohlc(
|
||||
end_dt: Optional[datetime] = None,
|
||||
start_dt: Optional[datetime] = None,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
datetime, # start
|
||||
datetime, # end
|
||||
]:
|
||||
|
||||
array = await client.bars(
|
||||
symbol,
|
||||
start_dt=start_dt,
|
||||
end_dt=end_dt,
|
||||
)
|
||||
start_dt = pendulum.from_timestamp(array[0]['time'])
|
||||
end_dt = pendulum.from_timestamp(array[-1]['time'])
|
||||
return array, start_dt, end_dt
|
||||
|
||||
yield get_ohlc, {'erlangs': 3, 'rate': 3}
|
||||
|
||||
|
||||
async def backfill_bars(
|
||||
sym: str,
|
||||
shm: ShmArray, # type: ignore # noqa
|
||||
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
|
||||
) -> None:
|
||||
"""Fill historical bars into shared mem / storage afap.
|
||||
"""
|
||||
with trio.CancelScope() as cs:
|
||||
async with open_cached_client('binance') as client:
|
||||
bars = await client.bars(symbol=sym)
|
||||
shm.push(bars)
|
||||
task_status.started(cs)
|
||||
|
||||
|
||||
async def stream_quotes(
|
||||
|
||||
send_chan: trio.abc.SendChannel,
|
||||
symbols: list[str],
|
||||
feed_is_live: trio.Event,
|
||||
loglevel: str = None,
|
||||
|
||||
# startup sync
|
||||
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
# XXX: required to propagate ``tractor`` loglevel to piker logging
|
||||
get_console_log(loglevel or tractor.current_actor().loglevel)
|
||||
|
||||
sym_infos = {}
|
||||
uid = 0
|
||||
|
||||
async with (
|
||||
open_cached_client('binance') as client,
|
||||
send_chan as send_chan,
|
||||
):
|
||||
|
||||
# keep client cached for real-time section
|
||||
cache = await client.cache_symbols()
|
||||
|
||||
for sym in symbols:
|
||||
d = cache[sym.upper()]
|
||||
syminfo = Pair(**d) # validation
|
||||
|
||||
si = sym_infos[sym] = syminfo.dict()
|
||||
|
||||
# XXX: after manually inspecting the response format we
|
||||
# just directly pick out the info we need
|
||||
si['price_tick_size'] = float(syminfo.filters[0]['tickSize'])
|
||||
si['lot_tick_size'] = float(syminfo.filters[2]['stepSize'])
|
||||
si['asset_type'] = 'crypto'
|
||||
|
||||
symbol = symbols[0]
|
||||
|
||||
init_msgs = {
|
||||
# pass back token, and bool, signalling if we're the writer
|
||||
# and that history has been written
|
||||
symbol: {
|
||||
'symbol_info': sym_infos[sym],
|
||||
'shm_write_opts': {'sum_tick_vml': False},
|
||||
'fqsn': sym,
|
||||
},
|
||||
}
|
||||
|
||||
@acm
|
||||
async def subscribe(ws: wsproto.WSConnection):
|
||||
# setup subs
|
||||
|
||||
# trade data (aka L1)
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#symbol-order-book-ticker
|
||||
l1_sub = make_sub(symbols, 'bookTicker', uid)
|
||||
await ws.send_msg(l1_sub)
|
||||
|
||||
# aggregate (each order clear by taker **not** by maker)
|
||||
# trades data:
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#aggregate-trade-streams
|
||||
agg_trades_sub = make_sub(symbols, 'aggTrade', uid)
|
||||
await ws.send_msg(agg_trades_sub)
|
||||
|
||||
# ack from ws server
|
||||
res = await ws.recv_msg()
|
||||
assert res['id'] == uid
|
||||
|
||||
yield
|
||||
|
||||
subs = []
|
||||
for sym in symbols:
|
||||
subs.append("{sym}@aggTrade")
|
||||
subs.append("{sym}@bookTicker")
|
||||
|
||||
# unsub from all pairs on teardown
|
||||
await ws.send_msg({
|
||||
"method": "UNSUBSCRIBE",
|
||||
"params": subs,
|
||||
"id": uid,
|
||||
})
|
||||
|
||||
# XXX: do we need to ack the unsub?
|
||||
# await ws.recv_msg()
|
||||
|
||||
async with open_autorecon_ws(
|
||||
'wss://stream.binance.com/ws',
|
||||
fixture=subscribe,
|
||||
) as ws:
|
||||
|
||||
# pull a first quote and deliver
|
||||
msg_gen = stream_messages(ws)
|
||||
|
||||
typ, quote = await msg_gen.__anext__()
|
||||
|
||||
while typ != 'trade':
|
||||
# TODO: use ``anext()`` when it lands in 3.10!
|
||||
typ, quote = await msg_gen.__anext__()
|
||||
|
||||
task_status.started((init_msgs, quote))
|
||||
|
||||
# signal to caller feed is ready for consumption
|
||||
feed_is_live.set()
|
||||
|
||||
# import time
|
||||
# last = time.time()
|
||||
|
||||
# start streaming
|
||||
async for typ, msg in msg_gen:
|
||||
|
||||
# period = time.time() - last
|
||||
# hz = 1/period if period else float('inf')
|
||||
# if hz > 60:
|
||||
# log.info(f'Binance quotez : {hz}')
|
||||
|
||||
topic = msg['symbol'].lower()
|
||||
await send_chan.send({topic: msg})
|
||||
# last = time.time()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_symbol_search(
|
||||
ctx: tractor.Context,
|
||||
) -> Client:
|
||||
async with open_cached_client('binance') as client:
|
||||
|
||||
# load all symbols locally for fast search
|
||||
cache = await client.cache_symbols()
|
||||
await ctx.started()
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
async for pattern in stream:
|
||||
# results = await client.symbol_info(sym=pattern.upper())
|
||||
|
||||
matches = fuzzy.extractBests(
|
||||
pattern,
|
||||
cache,
|
||||
score_cutoff=50,
|
||||
)
|
||||
# repack in dict form
|
||||
await stream.send(
|
||||
{item[0]['symbol']: item[0]
|
||||
for item in matches}
|
||||
)
|
|
@ -0,0 +1,60 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C)
|
||||
# Guillermo Rodriguez (aka ze jefe)
|
||||
# Tyler Goodlet
|
||||
# (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
binancial secs on the floor, in the office, behind the dumpster.
|
||||
|
||||
"""
|
||||
from .api import (
|
||||
get_client,
|
||||
)
|
||||
from .feed import (
|
||||
get_mkt_info,
|
||||
open_history_client,
|
||||
open_symbol_search,
|
||||
stream_quotes,
|
||||
)
|
||||
from .broker import (
|
||||
open_trade_dialog,
|
||||
get_cost,
|
||||
)
|
||||
from .venues import (
|
||||
SpotPair,
|
||||
FutesPair,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'get_client',
|
||||
'get_mkt_info',
|
||||
'get_cost',
|
||||
'SpotPair',
|
||||
'FutesPair',
|
||||
'open_trade_dialog',
|
||||
'open_history_client',
|
||||
'open_symbol_search',
|
||||
'stream_quotes',
|
||||
]
|
||||
|
||||
|
||||
# `brokerd` modules
|
||||
__enable_modules__: list[str] = [
|
||||
'api',
|
||||
'feed',
|
||||
'broker',
|
||||
]
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,705 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C)
|
||||
# Guillermo Rodriguez (aka ze jefe)
|
||||
# Tyler Goodlet
|
||||
# (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Live order control B)
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Any,
|
||||
AsyncIterator,
|
||||
)
|
||||
import time
|
||||
from time import time_ns
|
||||
|
||||
from bidict import bidict
|
||||
import tractor
|
||||
import trio
|
||||
|
||||
from piker.accounting import (
|
||||
Asset,
|
||||
)
|
||||
from piker.brokers._util import (
|
||||
get_logger,
|
||||
)
|
||||
from piker.data._web_bs import (
|
||||
open_autorecon_ws,
|
||||
NoBsWs,
|
||||
)
|
||||
from piker.brokers import (
|
||||
open_cached_client,
|
||||
BrokerError,
|
||||
)
|
||||
from piker.clearing import (
|
||||
OrderDialogs,
|
||||
)
|
||||
from piker.clearing._messages import (
|
||||
BrokerdOrder,
|
||||
BrokerdOrderAck,
|
||||
BrokerdStatus,
|
||||
BrokerdPosition,
|
||||
BrokerdFill,
|
||||
BrokerdCancel,
|
||||
BrokerdError,
|
||||
Status,
|
||||
Order,
|
||||
)
|
||||
from .venues import (
|
||||
Pair,
|
||||
_futes_ws,
|
||||
_testnet_futes_ws,
|
||||
)
|
||||
from .api import Client
|
||||
|
||||
log = get_logger('piker.brokers.binance')
|
||||
|
||||
|
||||
# Fee schedule template, mostly for paper engine fees modelling.
|
||||
# https://www.binance.com/en/support/faq/what-are-market-makers-and-takers-360007720071
|
||||
def get_cost(
|
||||
price: float,
|
||||
size: float,
|
||||
is_taker: bool = False,
|
||||
|
||||
) -> float:
|
||||
|
||||
# https://www.binance.com/en/fee/trading
|
||||
cb: float = price * size
|
||||
match is_taker:
|
||||
case True:
|
||||
return cb * 0.001000
|
||||
|
||||
case False if cb < 1e6:
|
||||
return cb * 0.001000
|
||||
|
||||
case False if 1e6 >= cb < 5e6:
|
||||
return cb * 0.000900
|
||||
|
||||
# NOTE: there's more but are you really going
|
||||
# to have a cb bigger then this per trade?
|
||||
case False if cb >= 5e6:
|
||||
return cb * 0.000800
|
||||
|
||||
|
||||
async def handle_order_requests(
|
||||
ems_order_stream: tractor.MsgStream,
|
||||
client: Client,
|
||||
dids: bidict[str, str],
|
||||
dialogs: OrderDialogs,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Receive order requests from `emsd`, translate tramsit API calls and transmit.
|
||||
|
||||
'''
|
||||
msg: dict | BrokerdOrder | BrokerdCancel
|
||||
async for msg in ems_order_stream:
|
||||
log.info(f'Rx order request:\n{pformat(msg)}')
|
||||
match msg:
|
||||
case {
|
||||
'action': 'cancel',
|
||||
}:
|
||||
cancel = BrokerdCancel(**msg)
|
||||
existing: BrokerdOrder | None = dialogs.get(cancel.oid)
|
||||
if not existing:
|
||||
log.error(
|
||||
f'NO Existing order-dialog for {cancel.oid}!?'
|
||||
)
|
||||
await ems_order_stream.send(BrokerdError(
|
||||
oid=cancel.oid,
|
||||
|
||||
# TODO: do we need the symbol?
|
||||
# https://github.com/pikers/piker/issues/514
|
||||
symbol='unknown',
|
||||
|
||||
reason=(
|
||||
'Invalid `binance` order request dialog oid',
|
||||
)
|
||||
))
|
||||
continue
|
||||
|
||||
else:
|
||||
symbol: str = existing['symbol']
|
||||
try:
|
||||
await client.submit_cancel(
|
||||
symbol,
|
||||
cancel.oid,
|
||||
)
|
||||
except BrokerError as be:
|
||||
await ems_order_stream.send(
|
||||
BrokerdError(
|
||||
oid=msg['oid'],
|
||||
symbol=symbol,
|
||||
reason=(
|
||||
'`binance` CANCEL failed:\n'
|
||||
f'{be}'
|
||||
))
|
||||
)
|
||||
continue
|
||||
|
||||
case {
|
||||
'account': ('binance.usdtm' | 'binance.spot') as account,
|
||||
'action': action,
|
||||
} if action in {'buy', 'sell'}:
|
||||
|
||||
# validate
|
||||
order = BrokerdOrder(**msg)
|
||||
oid: str = order.oid # emsd order id
|
||||
modify: bool = False
|
||||
|
||||
# NOTE: check and report edits
|
||||
if existing := dialogs.get(order.oid):
|
||||
log.info(
|
||||
f'Existing order for {oid} updated:\n'
|
||||
f'{pformat(existing.maps[-1])} -> {pformat(msg)}'
|
||||
)
|
||||
modify = True
|
||||
|
||||
# only add new msg AFTER the existing check
|
||||
dialogs.add_msg(oid, msg)
|
||||
|
||||
else:
|
||||
# XXX NOTE: update before the ack!
|
||||
# track latest request state such that map
|
||||
# lookups start at the most recent msg and then
|
||||
# scan reverse-chronologically.
|
||||
dialogs.add_msg(oid, msg)
|
||||
|
||||
# XXX: ACK the request **immediately** before sending
|
||||
# the api side request to ensure the ems maps the oid ->
|
||||
# reqid correctly!
|
||||
resp = BrokerdOrderAck(
|
||||
oid=oid, # ems order request id
|
||||
reqid=oid, # our custom int mapping
|
||||
account='binance', # piker account
|
||||
)
|
||||
await ems_order_stream.send(resp)
|
||||
|
||||
# call our client api to submit the order
|
||||
# NOTE: modifies only require diff key for user oid:
|
||||
# https://binance-docs.github.io/apidocs/futures/en/#modify-order-trade
|
||||
try:
|
||||
reqid = await client.submit_limit(
|
||||
symbol=order.symbol,
|
||||
side=order.action,
|
||||
quantity=order.size,
|
||||
price=order.price,
|
||||
oid=oid,
|
||||
modify=modify,
|
||||
)
|
||||
|
||||
# SMH they do gen their own order id: ints..
|
||||
# assert reqid == order.oid
|
||||
dids[order.oid] = reqid
|
||||
|
||||
except BrokerError as be:
|
||||
await ems_order_stream.send(
|
||||
BrokerdError(
|
||||
oid=msg['oid'],
|
||||
symbol=msg['symbol'],
|
||||
reason=(
|
||||
'`binance` request failed:\n'
|
||||
f'{be}'
|
||||
))
|
||||
)
|
||||
continue
|
||||
|
||||
case _:
|
||||
account = msg.get('account')
|
||||
if account not in {'binance.spot', 'binance.futes'}:
|
||||
log.error(
|
||||
'Order request does not have a valid binance account name?\n'
|
||||
'Only one of\n'
|
||||
'- `binance.spot` or,\n'
|
||||
'- `binance.usdtm`\n'
|
||||
'is currently valid!'
|
||||
)
|
||||
await ems_order_stream.send(
|
||||
BrokerdError(
|
||||
oid=msg['oid'],
|
||||
symbol=msg['symbol'],
|
||||
reason=(
|
||||
f'Invalid `binance` broker request msg:\n{msg}'
|
||||
))
|
||||
)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_trade_dialog(
|
||||
ctx: tractor.Context,
|
||||
|
||||
) -> AsyncIterator[dict[str, Any]]:
|
||||
|
||||
# TODO: how do we set this from the EMS such that
|
||||
# positions are loaded from the correct venue on the user
|
||||
# stream at startup? (that is in an attempt to support both
|
||||
# spot and futes markets?)
|
||||
# - I guess we just want to instead start 2 separate user
|
||||
# stream tasks right? unless we want another actor pool?
|
||||
# XXX: see issue: <urlhere>
|
||||
venue_name: str = 'futes'
|
||||
venue_mode: str = 'usdtm_futes'
|
||||
account_name: str = 'usdtm'
|
||||
use_testnet: bool = False
|
||||
|
||||
# TODO: if/when we add .accounting support we need to
|
||||
# do a open_symcache() call.. though maybe we can hide
|
||||
# this in a new async version of open_account()?
|
||||
async with open_cached_client('binance') as client:
|
||||
subconf: dict = client.conf[venue_name]
|
||||
use_testnet = subconf.get('use_testnet', False)
|
||||
|
||||
# XXX: if no futes.api_key or spot.api_key has been set we
|
||||
# always fall back to the paper engine!
|
||||
if not subconf.get('api_key'):
|
||||
await ctx.started('paper')
|
||||
return
|
||||
|
||||
async with (
|
||||
open_cached_client('binance') as client,
|
||||
):
|
||||
client.mkt_mode: str = venue_mode
|
||||
|
||||
# TODO: map these wss urls depending on spot or futes
|
||||
# setting passed when this task is spawned?
|
||||
wss_url: str = _futes_ws if not use_testnet else _testnet_futes_ws
|
||||
|
||||
wss: NoBsWs
|
||||
async with (
|
||||
client.manage_listen_key() as listen_key,
|
||||
open_autorecon_ws(f'{wss_url}/?listenKey={listen_key}') as wss,
|
||||
):
|
||||
nsid: int = time_ns()
|
||||
await wss.send_msg({
|
||||
# "method": "SUBSCRIBE",
|
||||
"method": "REQUEST",
|
||||
"params":
|
||||
[
|
||||
f"{listen_key}@account",
|
||||
f"{listen_key}@balance",
|
||||
f"{listen_key}@position",
|
||||
|
||||
# TODO: does this even work!? seems to cause
|
||||
# a hang on the first msg..? lelelel.
|
||||
# f"{listen_key}@order",
|
||||
],
|
||||
"id": nsid
|
||||
})
|
||||
|
||||
with trio.fail_after(6):
|
||||
msg = await wss.recv_msg()
|
||||
assert msg['id'] == nsid
|
||||
|
||||
# TODO: load other market wide data / statistics:
|
||||
# - OI: https://binance-docs.github.io/apidocs/futures/en/#open-interest
|
||||
# - OI stats: https://binance-docs.github.io/apidocs/futures/en/#open-interest-statistics
|
||||
accounts: bidict[str, str] = bidict({'binance.usdtm': None})
|
||||
balances: dict[Asset, float] = {}
|
||||
positions: list[BrokerdPosition] = []
|
||||
|
||||
for resp_dict in msg['result']:
|
||||
resp: dict = resp_dict['res']
|
||||
req: str = resp_dict['req']
|
||||
|
||||
# @account response should be something like:
|
||||
# {'accountAlias': 'sRFzFzAuuXsR',
|
||||
# 'canDeposit': True,
|
||||
# 'canTrade': True,
|
||||
# 'canWithdraw': True,
|
||||
# 'feeTier': 0}
|
||||
if 'account' in req:
|
||||
# NOTE: fill in the hash-like key/alias binance
|
||||
# provides for the account.
|
||||
alias: str = resp['accountAlias']
|
||||
accounts['binance.usdtm'] = alias
|
||||
|
||||
# @balance response:
|
||||
# {'accountAlias': 'sRFzFzAuuXsR',
|
||||
# 'balances': [{'asset': 'BTC',
|
||||
# 'availableBalance': '0.00000000',
|
||||
# 'balance': '0.00000000',
|
||||
# 'crossUnPnl': '0.00000000',
|
||||
# 'crossWalletBalance': '0.00000000',
|
||||
# 'maxWithdrawAmount': '0.00000000',
|
||||
# 'updateTime': 0}]
|
||||
# ...
|
||||
# }
|
||||
elif 'balance' in req:
|
||||
for entry in resp['balances']:
|
||||
name: str = entry['asset']
|
||||
balance: float = float(entry['balance'])
|
||||
last_update_t: int = entry['updateTime']
|
||||
|
||||
spot_asset: Asset = client._venue2assets['spot'][name]
|
||||
|
||||
if balance > 0:
|
||||
balances[spot_asset] = (balance, last_update_t)
|
||||
# await tractor.pause()
|
||||
|
||||
# @position response:
|
||||
# {'positions': [{'entryPrice': '0.0',
|
||||
# 'isAutoAddMargin': False,
|
||||
# 'isolatedMargin': '0',
|
||||
# 'leverage': 20,
|
||||
# 'liquidationPrice': '0',
|
||||
# 'marginType': 'CROSSED',
|
||||
# 'markPrice': '0.60289650',
|
||||
# 'markPrice': '0.00000000',
|
||||
# 'maxNotionalValue': '25000',
|
||||
# 'notional': '0',
|
||||
# 'positionAmt': '0',
|
||||
# 'positionSide': 'BOTH',
|
||||
# 'symbol': 'ETHUSDT_230630',
|
||||
# 'unRealizedProfit': '0.00000000',
|
||||
# 'updateTime': 1672741444894}
|
||||
# ...
|
||||
# }
|
||||
elif 'position' in req:
|
||||
for entry in resp['positions']:
|
||||
bs_mktid: str = entry['symbol']
|
||||
entry_size: float = float(entry['positionAmt'])
|
||||
|
||||
pair: Pair | None = client._venue2pairs[
|
||||
venue_mode
|
||||
].get(bs_mktid)
|
||||
if (
|
||||
pair
|
||||
and entry_size > 0
|
||||
):
|
||||
entry_price: float = float(entry['entryPrice'])
|
||||
|
||||
ppmsg = BrokerdPosition(
|
||||
broker='binance',
|
||||
account=f'binance.{account_name}',
|
||||
|
||||
# TODO: maybe we should be passing back
|
||||
# a `MktPair` here?
|
||||
symbol=pair.bs_fqme.lower() + '.binance',
|
||||
|
||||
size=entry_size,
|
||||
avg_price=entry_price,
|
||||
)
|
||||
positions.append(ppmsg)
|
||||
|
||||
if pair is None:
|
||||
log.warning(
|
||||
f'`{bs_mktid}` Position entry but no market pair?\n'
|
||||
f'{pformat(entry)}\n'
|
||||
)
|
||||
|
||||
await ctx.started((
|
||||
positions,
|
||||
list(accounts)
|
||||
))
|
||||
|
||||
# TODO: package more state tracking into the dialogs API?
|
||||
# - hmm maybe we could include `OrderDialogs.dids:
|
||||
# bidict` as part of the interface and then ask for
|
||||
# a reqid field to be passed at init?
|
||||
# |-> `OrderDialog(reqid_field='orderId')` kinda thing?
|
||||
# - also maybe bundle in some kind of dialog to account
|
||||
# table?
|
||||
dialogs = OrderDialogs()
|
||||
dids: dict[str, int] = bidict()
|
||||
|
||||
# TODO: further init setup things to get full EMS and
|
||||
# .accounting support B)
|
||||
# - live order loading via user stream subscription and
|
||||
# update to the order dialog table.
|
||||
# - MAKE SURE we add live orders loaded during init
|
||||
# into the dialogs table to ensure they can be
|
||||
# cancelled, meaning we can do a symbol lookup.
|
||||
# - position loading using `piker.accounting` subsys
|
||||
# and comparison with binance's own position calcs.
|
||||
# - load pps and accounts using accounting apis, write
|
||||
# the ledger and account files
|
||||
# - table: Account
|
||||
# - ledger: TransactionLedger
|
||||
|
||||
async with (
|
||||
trio.open_nursery() as tn,
|
||||
ctx.open_stream() as ems_stream,
|
||||
):
|
||||
# deliver all pre-exist open orders to EMS thus syncing
|
||||
# state with existing live limits reported by them.
|
||||
order: Order
|
||||
for order in await client.get_open_orders():
|
||||
status_msg = Status(
|
||||
time_ns=time.time_ns(),
|
||||
resp='open',
|
||||
oid=order.oid,
|
||||
reqid=order.oid,
|
||||
|
||||
# embedded order info
|
||||
req=order,
|
||||
src='binance',
|
||||
)
|
||||
dialogs.add_msg(order.oid, order.to_dict())
|
||||
await ems_stream.send(status_msg)
|
||||
|
||||
tn.start_soon(
|
||||
handle_order_requests,
|
||||
ems_stream,
|
||||
client,
|
||||
dids,
|
||||
dialogs,
|
||||
)
|
||||
tn.start_soon(
|
||||
handle_order_updates,
|
||||
venue_mode,
|
||||
account_name,
|
||||
client,
|
||||
ems_stream,
|
||||
wss,
|
||||
dialogs,
|
||||
|
||||
)
|
||||
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
async def handle_order_updates(
|
||||
venue: str,
|
||||
account_name: str,
|
||||
client: Client,
|
||||
ems_stream: tractor.MsgStream,
|
||||
wss: NoBsWs,
|
||||
dialogs: OrderDialogs,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Main msg handling loop for all things order management.
|
||||
|
||||
This code is broken out to make the context explicit and state
|
||||
variables defined in the signature clear to the reader.
|
||||
|
||||
'''
|
||||
async for msg in wss:
|
||||
log.info(f'Rx USERSTREAM msg:\n{pformat(msg)}')
|
||||
match msg:
|
||||
|
||||
# ORDER update
|
||||
# spot: https://binance-docs.github.io/apidocs/spot/en/#payload-balance-update
|
||||
# futes: https://binance-docs.github.io/apidocs/futures/en/#event-order-update
|
||||
# futes: https://binance-docs.github.io/apidocs/futures/en/#event-balance-and-position-update
|
||||
# {'o': {
|
||||
# 'L': '0',
|
||||
# 'N': 'USDT',
|
||||
# 'R': False,
|
||||
# 'S': 'BUY',
|
||||
# 'T': 1687028772484,
|
||||
# 'X': 'NEW',
|
||||
# 'a': '0',
|
||||
# 'ap': '0',
|
||||
# 'b': '7012.06520',
|
||||
# 'c': '518d4122-8d3e-49b0-9a1e-1fabe6f62e4c',
|
||||
# 'cp': False,
|
||||
# 'f': 'GTC',
|
||||
# 'i': 3376956924,
|
||||
# 'l': '0',
|
||||
# 'm': False,
|
||||
# 'n': '0',
|
||||
# 'o': 'LIMIT',
|
||||
# 'ot': 'LIMIT',
|
||||
# 'p': '21136.80',
|
||||
# 'pP': False,
|
||||
# 'ps': 'BOTH',
|
||||
# 'q': '0.047',
|
||||
# 'rp': '0',
|
||||
# 's': 'BTCUSDT',
|
||||
# 'si': 0,
|
||||
# 'sp': '0',
|
||||
# 'ss': 0,
|
||||
# 't': 0,
|
||||
# 'wt': 'CONTRACT_PRICE',
|
||||
# 'x': 'NEW',
|
||||
# 'z': '0'}
|
||||
# }
|
||||
case {
|
||||
# 'e': 'executionReport',
|
||||
'e': 'ORDER_TRADE_UPDATE',
|
||||
'T': int(epoch_ms),
|
||||
'o': {
|
||||
's': bs_mktid,
|
||||
|
||||
# XXX NOTE XXX see special ids for market
|
||||
# events or margin calls:
|
||||
# // special client order id:
|
||||
# // starts with "autoclose-": liquidation order
|
||||
# // "adl_autoclose": ADL auto close order
|
||||
# // "settlement_autoclose-": settlement order
|
||||
# for delisting or delivery
|
||||
'c': oid,
|
||||
# 'i': reqid, # binance internal int id
|
||||
|
||||
# prices
|
||||
'a': submit_price,
|
||||
'ap': avg_price,
|
||||
'L': fill_price,
|
||||
|
||||
# sizing
|
||||
'q': req_size,
|
||||
'l': clear_size_filled, # this event
|
||||
'z': accum_size_filled, # accum
|
||||
|
||||
# commissions
|
||||
'n': cost,
|
||||
'N': cost_asset,
|
||||
|
||||
# state
|
||||
'S': side,
|
||||
'X': status,
|
||||
},
|
||||
} as order_msg:
|
||||
log.info(
|
||||
f'{status} for {side} ORDER oid: {oid}\n'
|
||||
f'bs_mktid: {bs_mktid}\n\n'
|
||||
|
||||
f'order size: {req_size}\n'
|
||||
f'cleared size: {clear_size_filled}\n'
|
||||
f'accum filled size: {accum_size_filled}\n\n'
|
||||
|
||||
f'submit price: {submit_price}\n'
|
||||
f'fill_price: {fill_price}\n'
|
||||
f'avg clearing price: {avg_price}\n\n'
|
||||
|
||||
f'cost: {cost}@{cost_asset}\n'
|
||||
)
|
||||
|
||||
# status remap from binance to piker's
|
||||
# status set:
|
||||
# - NEW
|
||||
# - PARTIALLY_FILLED
|
||||
# - FILLED
|
||||
# - CANCELED
|
||||
# - EXPIRED
|
||||
# https://binance-docs.github.io/apidocs/futures/en/#event-order-update
|
||||
|
||||
req_size: float = float(req_size)
|
||||
accum_size_filled: float = float(accum_size_filled)
|
||||
fill_price: float = float(fill_price)
|
||||
|
||||
match status:
|
||||
case 'PARTIALLY_FILLED' | 'FILLED':
|
||||
status = 'fill'
|
||||
|
||||
fill_msg = BrokerdFill(
|
||||
time_ns=time_ns(),
|
||||
# reqid=reqid,
|
||||
reqid=oid,
|
||||
|
||||
# just use size value for now?
|
||||
# action=action,
|
||||
size=clear_size_filled,
|
||||
price=fill_price,
|
||||
|
||||
# TODO: maybe capture more msg data
|
||||
# i.e fees?
|
||||
broker_details={'name': 'broker'} | order_msg,
|
||||
broker_time=time.time(),
|
||||
)
|
||||
await ems_stream.send(fill_msg)
|
||||
|
||||
if accum_size_filled == req_size:
|
||||
status = 'closed'
|
||||
dialogs.pop(oid)
|
||||
|
||||
case 'NEW':
|
||||
status = 'open'
|
||||
|
||||
case 'EXPIRED':
|
||||
status = 'canceled'
|
||||
dialogs.pop(oid)
|
||||
|
||||
case _:
|
||||
status = status.lower()
|
||||
|
||||
resp = BrokerdStatus(
|
||||
time_ns=time_ns(),
|
||||
# reqid=reqid,
|
||||
reqid=oid,
|
||||
|
||||
# TODO: i feel like we don't need to make the
|
||||
# ems and upstream clients aware of this?
|
||||
# account='binance.usdtm',
|
||||
|
||||
status=status,
|
||||
|
||||
filled=accum_size_filled,
|
||||
remaining=req_size - accum_size_filled,
|
||||
broker_details={
|
||||
'name': 'binance',
|
||||
'broker_time': epoch_ms / 1000.
|
||||
}
|
||||
)
|
||||
await ems_stream.send(resp)
|
||||
|
||||
# ACCOUNT and POSITION update B)
|
||||
# {
|
||||
# 'E': 1687036749218,
|
||||
# 'e': 'ACCOUNT_UPDATE'
|
||||
# 'T': 1687036749215,
|
||||
# 'a': {'B': [{'a': 'USDT',
|
||||
# 'bc': '0',
|
||||
# 'cw': '1267.48920735',
|
||||
# 'wb': '1410.90245576'}],
|
||||
# 'P': [{'cr': '-3292.10973007',
|
||||
# 'ep': '26349.90000',
|
||||
# 'iw': '143.41324841',
|
||||
# 'ma': 'USDT',
|
||||
# 'mt': 'isolated',
|
||||
# 'pa': '0.038',
|
||||
# 'ps': 'BOTH',
|
||||
# 's': 'BTCUSDT',
|
||||
# 'up': '5.17555453'}],
|
||||
# 'm': 'ORDER'},
|
||||
# }
|
||||
case {
|
||||
'T': int(epoch_ms),
|
||||
'e': 'ACCOUNT_UPDATE',
|
||||
'a': {
|
||||
'P': [{
|
||||
's': bs_mktid,
|
||||
'pa': pos_amount,
|
||||
'ep': entry_price,
|
||||
}],
|
||||
},
|
||||
}:
|
||||
# real-time relay position updates back to EMS
|
||||
pair: Pair | None = client._venue2pairs[venue].get(bs_mktid)
|
||||
ppmsg = BrokerdPosition(
|
||||
broker='binance',
|
||||
account=f'binance.{account_name}',
|
||||
|
||||
# TODO: maybe we should be passing back
|
||||
# a `MktPair` here?
|
||||
symbol=pair.bs_fqme.lower() + '.binance',
|
||||
|
||||
size=float(pos_amount),
|
||||
avg_price=float(entry_price),
|
||||
)
|
||||
await ems_stream.send(ppmsg)
|
||||
|
||||
case _:
|
||||
log.warning(
|
||||
'Unhandled event:\n'
|
||||
f'{pformat(msg)}'
|
||||
)
|
|
@ -0,0 +1,547 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Real-time and historical data feed endpoints.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
aclosing,
|
||||
)
|
||||
from datetime import datetime
|
||||
from functools import (
|
||||
partial,
|
||||
)
|
||||
import itertools
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
Callable,
|
||||
Generator,
|
||||
)
|
||||
import time
|
||||
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
from pendulum import (
|
||||
from_timestamp,
|
||||
)
|
||||
from rapidfuzz import process as fuzzy
|
||||
import numpy as np
|
||||
import tractor
|
||||
|
||||
from piker.brokers import (
|
||||
open_cached_client,
|
||||
)
|
||||
from piker._cacheables import (
|
||||
async_lifo_cache,
|
||||
)
|
||||
from piker.accounting import (
|
||||
Asset,
|
||||
DerivTypes,
|
||||
MktPair,
|
||||
unpack_fqme,
|
||||
)
|
||||
from piker.types import Struct
|
||||
from piker.data.validate import FeedInit
|
||||
from piker.data._web_bs import (
|
||||
open_autorecon_ws,
|
||||
NoBsWs,
|
||||
)
|
||||
from piker.brokers._util import (
|
||||
DataUnavailable,
|
||||
get_logger,
|
||||
)
|
||||
|
||||
from .api import (
|
||||
Client,
|
||||
)
|
||||
from .venues import (
|
||||
Pair,
|
||||
FutesPair,
|
||||
get_api_eps,
|
||||
)
|
||||
|
||||
log = get_logger('piker.brokers.binance')
|
||||
|
||||
|
||||
class L1(Struct):
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#individual-symbol-book-ticker-streams
|
||||
|
||||
update_id: int
|
||||
sym: str
|
||||
|
||||
bid: float
|
||||
bsize: float
|
||||
ask: float
|
||||
asize: float
|
||||
|
||||
|
||||
# validation type
|
||||
class AggTrade(Struct, frozen=True):
|
||||
e: str # Event type
|
||||
E: int # Event time
|
||||
s: str # Symbol
|
||||
a: int # Aggregate trade ID
|
||||
p: float # Price
|
||||
q: float # Quantity
|
||||
f: int # First trade ID
|
||||
l: int # noqa Last trade ID
|
||||
T: int # Trade time
|
||||
m: bool # Is the buyer the market maker?
|
||||
M: bool | None = None # Ignore
|
||||
|
||||
|
||||
async def stream_messages(
|
||||
ws: NoBsWs,
|
||||
) -> AsyncGenerator[NoBsWs, dict]:
|
||||
|
||||
# TODO: match syntax here!
|
||||
msg: dict[str, Any]
|
||||
async for msg in ws:
|
||||
match msg:
|
||||
# for l1 streams binance doesn't add an event type field so
|
||||
# identify those messages by matching keys
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#individual-symbol-book-ticker-streams
|
||||
case {
|
||||
# NOTE: this is never an old value it seems, so
|
||||
# they are always sending real L1 spread updates.
|
||||
'u': upid, # update id
|
||||
's': sym,
|
||||
'b': bid,
|
||||
'B': bsize,
|
||||
'a': ask,
|
||||
'A': asize,
|
||||
}:
|
||||
# TODO: it would be super nice to have a `L1` piker type
|
||||
# which "renders" incremental tick updates from a packed
|
||||
# msg-struct:
|
||||
# - backend msgs after packed into the type such that we
|
||||
# can reduce IPC usage but without each backend having
|
||||
# to do that incremental update logic manually B)
|
||||
# - would it maybe be more efficient to use this instead?
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#diff-depth-stream
|
||||
l1 = L1(
|
||||
update_id=upid,
|
||||
sym=sym,
|
||||
bid=bid,
|
||||
bsize=bsize,
|
||||
ask=ask,
|
||||
asize=asize,
|
||||
)
|
||||
# for speed probably better to only specifically
|
||||
# cast fields we need in numerical form?
|
||||
# l1.typecast()
|
||||
|
||||
# repack into piker's tick-quote format
|
||||
yield 'l1', {
|
||||
'symbol': l1.sym,
|
||||
'ticks': [
|
||||
{
|
||||
'type': 'bid',
|
||||
'price': float(l1.bid),
|
||||
'size': float(l1.bsize),
|
||||
},
|
||||
{
|
||||
'type': 'bsize',
|
||||
'price': float(l1.bid),
|
||||
'size': float(l1.bsize),
|
||||
},
|
||||
{
|
||||
'type': 'ask',
|
||||
'price': float(l1.ask),
|
||||
'size': float(l1.asize),
|
||||
},
|
||||
{
|
||||
'type': 'asize',
|
||||
'price': float(l1.ask),
|
||||
'size': float(l1.asize),
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#aggregate-trade-streams
|
||||
case {
|
||||
'e': 'aggTrade',
|
||||
}:
|
||||
# NOTE: this is purely for a definition,
|
||||
# ``msgspec.Struct`` does not runtime-validate until you
|
||||
# decode/encode, see:
|
||||
# https://jcristharif.com/msgspec/structs.html#type-validation
|
||||
msg = AggTrade(**msg) # TODO: should we .copy() ?
|
||||
piker_quote: dict = {
|
||||
'symbol': msg.s,
|
||||
'last': float(msg.p),
|
||||
'brokerd_ts': time.time(),
|
||||
'ticks': [{
|
||||
'type': 'trade',
|
||||
'price': float(msg.p),
|
||||
'size': float(msg.q),
|
||||
'broker_ts': msg.T,
|
||||
}],
|
||||
}
|
||||
yield 'trade', piker_quote
|
||||
|
||||
|
||||
def make_sub(pairs: list[str], sub_name: str, uid: int) -> dict[str, str]:
|
||||
'''
|
||||
Create a request subscription packet dict.
|
||||
|
||||
- spot:
|
||||
https://binance-docs.github.io/apidocs/spot/en/#live-subscribing-unsubscribing-to-streams
|
||||
|
||||
- futes:
|
||||
https://binance-docs.github.io/apidocs/futures/en/#websocket-market-streams
|
||||
|
||||
'''
|
||||
return {
|
||||
'method': 'SUBSCRIBE',
|
||||
'params': [
|
||||
f'{pair.lower()}@{sub_name}'
|
||||
for pair in pairs
|
||||
],
|
||||
'id': uid
|
||||
}
|
||||
|
||||
|
||||
@acm
|
||||
async def open_history_client(
|
||||
mkt: MktPair,
|
||||
|
||||
) -> tuple[Callable, int]:
|
||||
|
||||
# TODO implement history getter for the new storage layer.
|
||||
async with open_cached_client('binance') as client:
|
||||
|
||||
async def get_ohlc(
|
||||
timeframe: float,
|
||||
end_dt: datetime | None = None,
|
||||
start_dt: datetime | None = None,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
datetime, # start
|
||||
datetime, # end
|
||||
]:
|
||||
if timeframe != 60:
|
||||
raise DataUnavailable('Only 1m bars are supported')
|
||||
|
||||
# TODO: better wrapping for venue / mode?
|
||||
# - eventually logic for usd vs. coin settled futes
|
||||
# based on `MktPair.src` type/value?
|
||||
# - maybe something like `async with
|
||||
# Client.use_venue('usdtm_futes')`
|
||||
if mkt.type_key in DerivTypes:
|
||||
client.mkt_mode = 'usdtm_futes'
|
||||
else:
|
||||
client.mkt_mode = 'spot'
|
||||
|
||||
# NOTE: always query using their native symbology!
|
||||
mktid: str = mkt.bs_mktid
|
||||
array = await client.bars(
|
||||
mktid,
|
||||
start_dt=start_dt,
|
||||
end_dt=end_dt,
|
||||
)
|
||||
times = array['time']
|
||||
if (
|
||||
end_dt is None
|
||||
):
|
||||
inow = round(time.time())
|
||||
if (inow - times[-1]) > 60:
|
||||
await tractor.pause()
|
||||
|
||||
start_dt = from_timestamp(times[0])
|
||||
end_dt = from_timestamp(times[-1])
|
||||
|
||||
return array, start_dt, end_dt
|
||||
|
||||
yield get_ohlc, {'erlangs': 3, 'rate': 3}
|
||||
|
||||
|
||||
@async_lifo_cache()
|
||||
async def get_mkt_info(
|
||||
fqme: str,
|
||||
|
||||
) -> tuple[MktPair, Pair] | None:
|
||||
|
||||
# uppercase since kraken bs_mktid is always upper
|
||||
if 'binance' not in fqme.lower():
|
||||
fqme += '.binance'
|
||||
|
||||
mkt_mode: str = ''
|
||||
broker, mkt_ep, venue, expiry = unpack_fqme(fqme)
|
||||
|
||||
# NOTE: we always upper case all tokens to be consistent with
|
||||
# binance's symbology style for pairs, like `BTCUSDT`, but in
|
||||
# theory we could also just keep things lower case; as long as
|
||||
# we're consistent and the symcache matches whatever this func
|
||||
# returns, always!
|
||||
expiry: str = expiry.upper()
|
||||
venue: str = venue.upper()
|
||||
venue_lower: str = venue.lower()
|
||||
|
||||
# XXX TODO: we should change the usdtm_futes name to just
|
||||
# usdm_futes (dropping the tether part) since it turns out that
|
||||
# there are indeed USD-tokens OTHER THEN tether being used as
|
||||
# the margin assets.. it's going to require a wholesale
|
||||
# (variable/key) rename as well as file name adjustments to any
|
||||
# existing tsdb set..
|
||||
if 'usd' in venue_lower:
|
||||
mkt_mode: str = 'usdtm_futes'
|
||||
|
||||
# NO IDEA what these contracts (some kinda DEX-ish futes?) are
|
||||
# but we're masking them for now..
|
||||
elif (
|
||||
'defi' in venue_lower
|
||||
|
||||
# TODO: handle coinm futes which have a margin asset that
|
||||
# is some crypto token!
|
||||
# https://binance-docs.github.io/apidocs/delivery/en/#exchange-information
|
||||
or 'btc' in venue_lower
|
||||
):
|
||||
return None
|
||||
|
||||
else:
|
||||
# NOTE: see the `FutesPair.bs_fqme: str` implementation
|
||||
# to understand the reverse market info lookup below.
|
||||
mkt_mode = venue_lower or 'spot'
|
||||
|
||||
if (
|
||||
venue
|
||||
and 'spot' not in venue_lower
|
||||
|
||||
# XXX: catch all in case user doesn't know which
|
||||
# venue they want (usdtm vs. coinm) and we can choose
|
||||
# a default (via config?) once we support coin-m APIs.
|
||||
or 'perp' in venue_lower
|
||||
):
|
||||
if not mkt_mode:
|
||||
mkt_mode: str = f'{venue_lower}_futes'
|
||||
|
||||
async with open_cached_client(
|
||||
'binance',
|
||||
) as client:
|
||||
|
||||
assets: dict[str, Asset] = await client.get_assets()
|
||||
pair_str: str = mkt_ep.upper()
|
||||
|
||||
# switch venue-mode depending on input pattern parsing
|
||||
# since we want to use a particular endpoint (set) for
|
||||
# pair info lookup!
|
||||
client.mkt_mode = mkt_mode
|
||||
|
||||
pair: Pair = await client.exch_info(
|
||||
pair_str,
|
||||
venue=mkt_mode, # explicit
|
||||
expiry=expiry,
|
||||
)
|
||||
|
||||
if 'futes' in mkt_mode:
|
||||
assert isinstance(pair, FutesPair)
|
||||
|
||||
dst: Asset | None = assets.get(pair.bs_dst_asset)
|
||||
if (
|
||||
not dst
|
||||
# TODO: a known asset DNE list?
|
||||
# and pair.baseAsset == 'DEFI'
|
||||
):
|
||||
log.warning(
|
||||
f'UNKNOWN {venue} asset {pair.baseAsset} from,\n'
|
||||
f'{pformat(pair.to_dict())}'
|
||||
)
|
||||
|
||||
# XXX UNKNOWN missing "asset", though no idea why?
|
||||
# maybe it's only avail in the margin venue(s): /dapi/ ?
|
||||
return None
|
||||
|
||||
mkt = MktPair(
|
||||
dst=dst,
|
||||
src=assets[pair.bs_src_asset],
|
||||
price_tick=pair.price_tick,
|
||||
size_tick=pair.size_tick,
|
||||
bs_mktid=pair.symbol,
|
||||
expiry=expiry,
|
||||
venue=venue,
|
||||
broker='binance',
|
||||
|
||||
# NOTE: sectype is always taken from dst, see
|
||||
# `MktPair.type_key` and `Client._cache_pairs()`
|
||||
# _atype=sectype,
|
||||
)
|
||||
return mkt, pair
|
||||
|
||||
|
||||
@acm
|
||||
async def subscribe(
|
||||
ws: NoBsWs,
|
||||
symbols: list[str],
|
||||
|
||||
# defined once at import time to keep a global state B)
|
||||
iter_subids: Generator[int, None, None] = itertools.count(),
|
||||
|
||||
):
|
||||
# setup subs
|
||||
|
||||
subid: int = next(iter_subids)
|
||||
|
||||
# trade data (aka L1)
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#symbol-order-book-ticker
|
||||
l1_sub = make_sub(symbols, 'bookTicker', subid)
|
||||
await ws.send_msg(l1_sub)
|
||||
|
||||
# aggregate (each order clear by taker **not** by maker)
|
||||
# trades data:
|
||||
# https://binance-docs.github.io/apidocs/spot/en/#aggregate-trade-streams
|
||||
agg_trades_sub = make_sub(symbols, 'aggTrade', subid)
|
||||
await ws.send_msg(agg_trades_sub)
|
||||
|
||||
# might get ack from ws server, or maybe some
|
||||
# other msg still in transit..
|
||||
res = await ws.recv_msg()
|
||||
subid: str | None = res.get('id')
|
||||
if subid:
|
||||
assert res['id'] == subid
|
||||
|
||||
yield
|
||||
|
||||
subs = []
|
||||
for sym in symbols:
|
||||
subs.append("{sym}@aggTrade")
|
||||
subs.append("{sym}@bookTicker")
|
||||
|
||||
# unsub from all pairs on teardown
|
||||
if ws.connected():
|
||||
await ws.send_msg({
|
||||
"method": "UNSUBSCRIBE",
|
||||
"params": subs,
|
||||
"id": subid,
|
||||
})
|
||||
|
||||
# XXX: do we need to ack the unsub?
|
||||
# await ws.recv_msg()
|
||||
|
||||
|
||||
async def stream_quotes(
|
||||
|
||||
send_chan: trio.abc.SendChannel,
|
||||
symbols: list[str],
|
||||
feed_is_live: trio.Event,
|
||||
loglevel: str = None,
|
||||
|
||||
# startup sync
|
||||
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
|
||||
async with (
|
||||
send_chan as send_chan,
|
||||
open_cached_client('binance') as client,
|
||||
):
|
||||
init_msgs: list[FeedInit] = []
|
||||
for sym in symbols:
|
||||
mkt, pair = await get_mkt_info(sym)
|
||||
|
||||
# build out init msgs according to latest spec
|
||||
init_msgs.append(
|
||||
FeedInit(mkt_info=mkt)
|
||||
)
|
||||
|
||||
wss_url: str = get_api_eps(client.mkt_mode)[1] # 2nd elem is wss url
|
||||
|
||||
# TODO: for sanity, but remove eventually Xp
|
||||
if 'future' in mkt.type_key:
|
||||
assert 'fstream' in wss_url
|
||||
|
||||
async with (
|
||||
open_autorecon_ws(
|
||||
url=wss_url,
|
||||
fixture=partial(
|
||||
subscribe,
|
||||
symbols=[mkt.bs_mktid],
|
||||
),
|
||||
) as ws,
|
||||
|
||||
# avoid stream-gen closure from breaking trio..
|
||||
aclosing(stream_messages(ws)) as msg_gen,
|
||||
):
|
||||
# log.info('WAITING ON FIRST LIVE QUOTE..')
|
||||
typ, quote = await anext(msg_gen)
|
||||
|
||||
# pull a first quote and deliver
|
||||
while typ != 'trade':
|
||||
typ, quote = await anext(msg_gen)
|
||||
|
||||
task_status.started((init_msgs, quote))
|
||||
|
||||
# signal to caller feed is ready for consumption
|
||||
feed_is_live.set()
|
||||
|
||||
# import time
|
||||
# last = time.time()
|
||||
|
||||
# XXX NOTE: can't include the `.binance` suffix
|
||||
# or the sampling loop will not broadcast correctly
|
||||
# since `bus._subscribers.setdefault(bs_fqme, set())`
|
||||
# is used inside `.data.open_feed_bus()` !!!
|
||||
topic: str = mkt.bs_fqme
|
||||
|
||||
# start streaming
|
||||
async for typ, quote in msg_gen:
|
||||
|
||||
# period = time.time() - last
|
||||
# hz = 1/period if period else float('inf')
|
||||
# if hz > 60:
|
||||
# log.info(f'Binance quotez : {hz}')
|
||||
await send_chan.send({topic: quote})
|
||||
# last = time.time()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_symbol_search(
|
||||
ctx: tractor.Context,
|
||||
) -> Client:
|
||||
|
||||
# NOTE: symbology tables are loaded as part of client
|
||||
# startup in ``.api.get_client()`` and in this case
|
||||
# are stored as `Client._pairs`.
|
||||
async with open_cached_client('binance') as client:
|
||||
|
||||
# TODO: maybe we should deliver the cache
|
||||
# so that client's can always do a local-lookup-first
|
||||
# style try and then update async as (new) match results
|
||||
# are delivered from here?
|
||||
await ctx.started()
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
pattern: str
|
||||
async for pattern in stream:
|
||||
# NOTE: pattern fuzzy-matching is done within
|
||||
# the methd impl.
|
||||
pairs: dict[str, Pair] = await client.search_symbols(
|
||||
pattern,
|
||||
)
|
||||
|
||||
# repack in fqme-keyed table
|
||||
byfqme: dict[start, Pair] = {}
|
||||
for pair in pairs.values():
|
||||
byfqme[pair.bs_fqme] = pair
|
||||
|
||||
await stream.send(byfqme)
|
|
@ -0,0 +1,302 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Per market data-type definitions and schemas types.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from typing import (
|
||||
Literal,
|
||||
)
|
||||
from decimal import Decimal
|
||||
|
||||
from msgspec import field
|
||||
|
||||
from piker.types import Struct
|
||||
|
||||
|
||||
# API endpoint paths by venue / sub-API
|
||||
_domain: str = 'binance.com'
|
||||
_spot_url = f'https://api.{_domain}'
|
||||
_futes_url = f'https://fapi.{_domain}'
|
||||
|
||||
# WEBsocketz
|
||||
# NOTE XXX: see api docs which show diff addr?
|
||||
# https://developers.binance.com/docs/binance-trading-api/websocket_api#general-api-information
|
||||
_spot_ws: str = 'wss://stream.binance.com/ws'
|
||||
# or this one? ..
|
||||
# 'wss://ws-api.binance.com:443/ws-api/v3',
|
||||
|
||||
# https://binance-docs.github.io/apidocs/futures/en/#websocket-market-streams
|
||||
_futes_ws: str = f'wss://fstream.{_domain}/ws'
|
||||
_auth_futes_ws: str = 'wss://fstream-auth.{_domain}/ws'
|
||||
|
||||
# test nets
|
||||
# NOTE: spot test network only allows certain ep sets:
|
||||
# https://testnet.binance.vision/
|
||||
# https://www.binance.com/en/support/faq/how-to-test-my-functions-on-binance-testnet-ab78f9a1b8824cf0a106b4229c76496d
|
||||
_testnet_spot_url: str = 'https://testnet.binance.vision/api'
|
||||
_testnet_spot_ws: str = 'wss://testnet.binance.vision/ws'
|
||||
# or this one? ..
|
||||
# 'wss://testnet.binance.vision/ws-api/v3'
|
||||
|
||||
_testnet_futes_url: str = 'https://testnet.binancefuture.com'
|
||||
_testnet_futes_ws: str = 'wss://stream.binancefuture.com/ws'
|
||||
|
||||
|
||||
MarketType = Literal[
|
||||
'spot',
|
||||
# 'margin',
|
||||
'usdtm_futes',
|
||||
# 'coinm_futes',
|
||||
]
|
||||
|
||||
|
||||
def get_api_eps(venue: MarketType) -> tuple[str, str]:
|
||||
'''
|
||||
Return API ep root paths per venue.
|
||||
|
||||
'''
|
||||
return {
|
||||
'spot': (
|
||||
_spot_url,
|
||||
_spot_ws,
|
||||
),
|
||||
'usdtm_futes': (
|
||||
_futes_url,
|
||||
_futes_ws,
|
||||
),
|
||||
}[venue]
|
||||
|
||||
|
||||
class Pair(Struct, frozen=True, kw_only=True):
|
||||
|
||||
symbol: str
|
||||
status: str
|
||||
orderTypes: list[str]
|
||||
|
||||
# src
|
||||
quoteAsset: str
|
||||
quotePrecision: int
|
||||
|
||||
# dst
|
||||
baseAsset: str
|
||||
baseAssetPrecision: int
|
||||
|
||||
filters: dict[
|
||||
str,
|
||||
str | int | float,
|
||||
] = field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def price_tick(self) -> Decimal:
|
||||
# XXX: lul, after manually inspecting the response format we
|
||||
# just directly pick out the info we need
|
||||
step_size: str = self.filters['PRICE_FILTER']['tickSize'].rstrip('0')
|
||||
return Decimal(step_size)
|
||||
|
||||
@property
|
||||
def size_tick(self) -> Decimal:
|
||||
step_size: str = self.filters['LOT_SIZE']['stepSize'].rstrip('0')
|
||||
return Decimal(step_size)
|
||||
|
||||
@property
|
||||
def bs_fqme(self) -> str:
|
||||
return self.symbol
|
||||
|
||||
@property
|
||||
def bs_mktid(self) -> str:
|
||||
return f'{self.symbol}.{self.venue}'
|
||||
|
||||
|
||||
class SpotPair(Pair, frozen=True):
|
||||
|
||||
cancelReplaceAllowed: bool
|
||||
allowTrailingStop: bool
|
||||
quoteAssetPrecision: int
|
||||
|
||||
baseCommissionPrecision: int
|
||||
quoteCommissionPrecision: int
|
||||
|
||||
icebergAllowed: bool
|
||||
ocoAllowed: bool
|
||||
quoteOrderQtyMarketAllowed: bool
|
||||
isSpotTradingAllowed: bool
|
||||
isMarginTradingAllowed: bool
|
||||
|
||||
defaultSelfTradePreventionMode: str
|
||||
allowedSelfTradePreventionModes: list[str]
|
||||
permissions: list[str]
|
||||
|
||||
# NOTE: see `.data._symcache.SymbologyCache.load()` for why
|
||||
ns_path: str = 'piker.brokers.binance:SpotPair'
|
||||
|
||||
@property
|
||||
def venue(self) -> str:
|
||||
return 'SPOT'
|
||||
|
||||
@property
|
||||
def bs_fqme(self) -> str:
|
||||
return f'{self.symbol}.SPOT'
|
||||
|
||||
@property
|
||||
def bs_src_asset(self) -> str:
|
||||
return f'{self.quoteAsset}'
|
||||
|
||||
@property
|
||||
def bs_dst_asset(self) -> str:
|
||||
return f'{self.baseAsset}'
|
||||
|
||||
|
||||
class FutesPair(Pair):
|
||||
symbol: str # 'BTCUSDT',
|
||||
pair: str # 'BTCUSDT',
|
||||
baseAssetPrecision: int # 8,
|
||||
contractType: str # 'PERPETUAL',
|
||||
deliveryDate: int # 4133404800000,
|
||||
liquidationFee: float # '0.012500',
|
||||
maintMarginPercent: float # '2.5000',
|
||||
marginAsset: str # 'USDT',
|
||||
marketTakeBound: float # '0.05',
|
||||
maxMoveOrderLimit: int # 10000,
|
||||
onboardDate: int # 1569398400000,
|
||||
pricePrecision: int # 2,
|
||||
quantityPrecision: int # 3,
|
||||
quoteAsset: str # 'USDT',
|
||||
quotePrecision: int # 8,
|
||||
requiredMarginPercent: float # '5.0000',
|
||||
settlePlan: int # 0,
|
||||
timeInForce: list[str] # ['GTC', 'IOC', 'FOK', 'GTX'],
|
||||
triggerProtect: float # '0.0500',
|
||||
underlyingSubType: list[str] # ['PoW'],
|
||||
underlyingType: str # 'COIN'
|
||||
|
||||
# NOTE: see `.data._symcache.SymbologyCache.load()` for why
|
||||
ns_path: str = 'piker.brokers.binance:FutesPair'
|
||||
|
||||
# NOTE: for compat with spot pairs and `MktPair.src: Asset`
|
||||
# processing..
|
||||
@property
|
||||
def quoteAssetPrecision(self) -> int:
|
||||
return self.quotePrecision
|
||||
|
||||
@property
|
||||
def expiry(self) -> str:
|
||||
symbol: str = self.symbol
|
||||
contype: str = self.contractType
|
||||
match contype:
|
||||
case (
|
||||
'CURRENT_QUARTER'
|
||||
| 'CURRENT_QUARTER DELIVERING'
|
||||
| 'NEXT_QUARTER' # su madre binance..
|
||||
):
|
||||
pair, _, expiry = symbol.partition('_')
|
||||
assert pair == self.pair # sanity
|
||||
return f'{expiry}'
|
||||
|
||||
case 'PERPETUAL':
|
||||
return 'PERP'
|
||||
|
||||
case '':
|
||||
subtype: list[str] = self.underlyingSubType
|
||||
if not subtype:
|
||||
if self.status == 'PENDING_TRADING':
|
||||
return 'PENDING'
|
||||
|
||||
match subtype:
|
||||
case ['DEFI']:
|
||||
return 'PERP'
|
||||
|
||||
# wow, just wow you binance guys suck..
|
||||
if self.status == 'PENDING_TRADING':
|
||||
return 'PENDING'
|
||||
|
||||
# XXX: yeah no clue then..
|
||||
raise ValueError(
|
||||
f'Bad .expiry token match: {contype} for {symbol}'
|
||||
)
|
||||
|
||||
@property
|
||||
def venue(self) -> str:
|
||||
symbol: str = self.symbol
|
||||
ctype: str = self.contractType
|
||||
margin: str = self.marginAsset
|
||||
|
||||
match ctype:
|
||||
case 'PERPETUAL':
|
||||
return f'{margin}M'
|
||||
|
||||
case (
|
||||
'CURRENT_QUARTER'
|
||||
| 'CURRENT_QUARTER DELIVERING'
|
||||
| 'NEXT_QUARTER' # su madre binance..
|
||||
):
|
||||
_, _, expiry = symbol.partition('_')
|
||||
return f'{margin}M'
|
||||
|
||||
case '':
|
||||
subtype: list[str] = self.underlyingSubType
|
||||
if not subtype:
|
||||
if self.status == 'PENDING_TRADING':
|
||||
return f'{margin}M'
|
||||
|
||||
match subtype:
|
||||
case (
|
||||
['DEFI']
|
||||
| ['USDC']
|
||||
):
|
||||
return f'{subtype[0]}'
|
||||
|
||||
# XXX: yeah no clue then..
|
||||
raise ValueError(
|
||||
f'Bad .venue token match: {ctype}'
|
||||
)
|
||||
|
||||
@property
|
||||
def bs_fqme(self) -> str:
|
||||
symbol: str = self.symbol
|
||||
ctype: str = self.contractType
|
||||
venue: str = self.venue
|
||||
pair: str = self.pair
|
||||
|
||||
match ctype:
|
||||
case (
|
||||
'CURRENT_QUARTER'
|
||||
| 'NEXT_QUARTER' # su madre binance..
|
||||
):
|
||||
pair, _, expiry = symbol.partition('_')
|
||||
assert pair == self.pair
|
||||
|
||||
return f'{pair}.{venue}.{self.expiry}'
|
||||
|
||||
@property
|
||||
def bs_src_asset(self) -> str:
|
||||
return f'{self.quoteAsset}'
|
||||
|
||||
@property
|
||||
def bs_dst_asset(self) -> str:
|
||||
return f'{self.baseAsset}.{self.venue}'
|
||||
|
||||
|
||||
PAIRTYPES: dict[MarketType, Pair] = {
|
||||
'spot': SpotPair,
|
||||
'usdtm_futes': FutesPair,
|
||||
|
||||
# TODO: support coin-margined venue:
|
||||
# https://binance-docs.github.io/apidocs/delivery/en/#change-log
|
||||
# 'coinm_futes': CoinFutesPair,
|
||||
}
|
|
@ -21,6 +21,7 @@ import os
|
|||
from functools import partial
|
||||
from operator import attrgetter
|
||||
from operator import itemgetter
|
||||
from types import ModuleType
|
||||
|
||||
import click
|
||||
import trio
|
||||
|
@ -28,17 +29,170 @@ import tractor
|
|||
|
||||
from ..cli import cli
|
||||
from .. import watchlists as wl
|
||||
from ..log import get_console_log, colorize_json, get_logger
|
||||
from .._daemon import maybe_spawn_brokerd, maybe_open_pikerd
|
||||
from ..brokers import core, get_brokermod, data
|
||||
|
||||
log = get_logger('cli')
|
||||
DEFAULT_BROKER = 'questrade'
|
||||
from ..log import (
|
||||
colorize_json,
|
||||
)
|
||||
from ._util import (
|
||||
log,
|
||||
get_console_log,
|
||||
)
|
||||
from ..service import (
|
||||
maybe_spawn_brokerd,
|
||||
maybe_open_pikerd,
|
||||
)
|
||||
from ..brokers import (
|
||||
core,
|
||||
get_brokermod,
|
||||
data,
|
||||
)
|
||||
DEFAULT_BROKER = 'binance'
|
||||
|
||||
_config_dir = click.get_app_dir('piker')
|
||||
_watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
|
||||
|
||||
|
||||
OK = '\033[92m'
|
||||
WARNING = '\033[93m'
|
||||
FAIL = '\033[91m'
|
||||
ENDC = '\033[0m'
|
||||
|
||||
|
||||
def print_ok(s: str, **kwargs):
|
||||
print(OK + s + ENDC, **kwargs)
|
||||
|
||||
|
||||
def print_error(s: str, **kwargs):
|
||||
print(FAIL + s + ENDC, **kwargs)
|
||||
|
||||
|
||||
def get_method(client, meth_name: str):
|
||||
print(f'checking client for method \'{meth_name}\'...', end='', flush=True)
|
||||
method = getattr(client, meth_name, None)
|
||||
assert method
|
||||
print_ok('found!.')
|
||||
return method
|
||||
|
||||
|
||||
async def run_method(client, meth_name: str, **kwargs):
|
||||
method = get_method(client, meth_name)
|
||||
print('running...', end='', flush=True)
|
||||
result = await method(**kwargs)
|
||||
print_ok(f'done! result: {type(result)}')
|
||||
return result
|
||||
|
||||
|
||||
async def run_test(broker_name: str):
|
||||
brokermod = get_brokermod(broker_name)
|
||||
total = 0
|
||||
passed = 0
|
||||
failed = 0
|
||||
|
||||
print('getting client...', end='', flush=True)
|
||||
if not hasattr(brokermod, 'get_client'):
|
||||
print_error('fail! no \'get_client\' context manager found.')
|
||||
return
|
||||
|
||||
async with brokermod.get_client(is_brokercheck=True) as client:
|
||||
print_ok('done! inside client context.')
|
||||
|
||||
# check for methods present on brokermod
|
||||
method_list = [
|
||||
'backfill_bars',
|
||||
'get_client',
|
||||
'trades_dialogue',
|
||||
'open_history_client',
|
||||
'open_symbol_search',
|
||||
'stream_quotes',
|
||||
|
||||
]
|
||||
|
||||
for method in method_list:
|
||||
print(
|
||||
f'checking brokermod for method \'{method}\'...',
|
||||
end='', flush=True)
|
||||
if not hasattr(brokermod, method):
|
||||
print_error(f'fail! method \'{method}\' not found.')
|
||||
failed += 1
|
||||
else:
|
||||
print_ok('done!')
|
||||
passed += 1
|
||||
|
||||
total += 1
|
||||
|
||||
# check for methods present con brokermod.Client and their
|
||||
# results
|
||||
|
||||
# for private methods only check is present
|
||||
method_list = [
|
||||
'get_balances',
|
||||
'get_assets',
|
||||
'get_trades',
|
||||
'get_xfers',
|
||||
'submit_limit',
|
||||
'submit_cancel',
|
||||
'search_symbols',
|
||||
]
|
||||
|
||||
for method_name in method_list:
|
||||
try:
|
||||
get_method(client, method_name)
|
||||
passed += 1
|
||||
|
||||
except AssertionError:
|
||||
print_error(f'fail! method \'{method_name}\' not found.')
|
||||
failed += 1
|
||||
|
||||
total += 1
|
||||
|
||||
# check for methods present con brokermod.Client and their
|
||||
# results
|
||||
|
||||
syms = await run_method(client, 'symbol_info')
|
||||
total += 1
|
||||
|
||||
if len(syms) == 0:
|
||||
raise BaseException('Empty Symbol list?')
|
||||
|
||||
passed += 1
|
||||
|
||||
first_sym = tuple(syms.keys())[0]
|
||||
|
||||
method_list = [
|
||||
('cache_symbols', {}),
|
||||
('search_symbols', {'pattern': first_sym[:-1]}),
|
||||
('bars', {'symbol': first_sym})
|
||||
]
|
||||
|
||||
for method_name, method_kwargs in method_list:
|
||||
try:
|
||||
await run_method(client, method_name, **method_kwargs)
|
||||
passed += 1
|
||||
|
||||
except AssertionError:
|
||||
print_error(f'fail! method \'{method_name}\' not found.')
|
||||
failed += 1
|
||||
|
||||
total += 1
|
||||
|
||||
print(f'total: {total}, passed: {passed}, failed: {failed}')
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument('broker', nargs=1, required=True)
|
||||
@click.pass_obj
|
||||
def brokercheck(config, broker):
|
||||
'''
|
||||
Test broker apis for completeness.
|
||||
|
||||
'''
|
||||
async def bcheck_main():
|
||||
async with maybe_spawn_brokerd(broker) as portal:
|
||||
await portal.run(run_test, broker)
|
||||
await portal.cancel_actor()
|
||||
|
||||
trio.run(run_test, broker)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option('--keys', '-k', multiple=True,
|
||||
help='Return results only for these keys')
|
||||
|
@ -88,7 +242,7 @@ def quote(config, tickers):
|
|||
|
||||
'''
|
||||
# global opts
|
||||
brokermod = config['brokermods'][0]
|
||||
brokermod = list(config['brokermods'].values())[0]
|
||||
|
||||
quotes = trio.run(partial(core.stocks_quote, brokermod, tickers))
|
||||
if not quotes:
|
||||
|
@ -115,7 +269,7 @@ def bars(config, symbol, count):
|
|||
|
||||
'''
|
||||
# global opts
|
||||
brokermod = config['brokermods'][0]
|
||||
brokermod = list(config['brokermods'].values())[0]
|
||||
|
||||
# broker backend should return at the least a
|
||||
# list of candle dictionaries
|
||||
|
@ -150,7 +304,7 @@ def record(config, rate, name, dhost, filename):
|
|||
|
||||
'''
|
||||
# global opts
|
||||
brokermod = config['brokermods'][0]
|
||||
brokermod = list(config['brokermods'].values())[0]
|
||||
loglevel = config['loglevel']
|
||||
log = config['log']
|
||||
|
||||
|
@ -215,7 +369,7 @@ def optsquote(config, symbol, date):
|
|||
|
||||
'''
|
||||
# global opts
|
||||
brokermod = config['brokermods'][0]
|
||||
brokermod = list(config['brokermods'].values())[0]
|
||||
|
||||
quotes = trio.run(
|
||||
partial(
|
||||
|
@ -232,47 +386,104 @@ def optsquote(config, symbol, date):
|
|||
@cli.command()
|
||||
@click.argument('tickers', nargs=-1, required=True)
|
||||
@click.pass_obj
|
||||
def symbol_info(config, tickers):
|
||||
def mkt_info(
|
||||
config: dict,
|
||||
tickers: list[str],
|
||||
):
|
||||
'''
|
||||
Print symbol quotes to the console
|
||||
|
||||
'''
|
||||
# global opts
|
||||
brokermod = config['brokermods'][0]
|
||||
from msgspec.json import encode, decode
|
||||
from ..accounting import MktPair
|
||||
from ..service import (
|
||||
open_piker_runtime,
|
||||
)
|
||||
|
||||
quotes = trio.run(partial(core.symbol_info, brokermod, tickers))
|
||||
if not quotes:
|
||||
log.error(f"No quotes could be found for {tickers}?")
|
||||
# global opts
|
||||
brokermods: dict[str, ModuleType] = config['brokermods']
|
||||
|
||||
mkts: list[MktPair] = []
|
||||
async def main():
|
||||
|
||||
async with open_piker_runtime(
|
||||
name='mkt_info_query',
|
||||
# loglevel=loglevel,
|
||||
debug_mode=True,
|
||||
|
||||
) as (_, _):
|
||||
for fqme in tickers:
|
||||
bs_fqme, _, broker = fqme.rpartition('.')
|
||||
brokermod: ModuleType = brokermods[broker]
|
||||
mkt, bs_pair = await core.mkt_info(
|
||||
brokermod,
|
||||
bs_fqme,
|
||||
)
|
||||
mkts.append((mkt, bs_pair))
|
||||
|
||||
trio.run(main)
|
||||
|
||||
if not mkts:
|
||||
log.error(
|
||||
f'No market info could be found for {tickers}'
|
||||
)
|
||||
return
|
||||
|
||||
if len(quotes) < len(tickers):
|
||||
syms = tuple(map(itemgetter('symbol'), quotes))
|
||||
if len(mkts) < len(tickers):
|
||||
syms = tuple(map(itemgetter('fqme'), mkts))
|
||||
for ticker in tickers:
|
||||
if ticker not in syms:
|
||||
brokermod.log.warn(f"Could not find symbol {ticker}?")
|
||||
log.warn(f"Could not find symbol {ticker}?")
|
||||
|
||||
click.echo(colorize_json(quotes))
|
||||
|
||||
# TODO: use ``rich.Table`` intead here!
|
||||
for mkt, bs_pair in mkts:
|
||||
click.echo(
|
||||
'\n'
|
||||
'----------------------------------------------------\n'
|
||||
f'{type(bs_pair)}\n'
|
||||
'----------------------------------------------------\n'
|
||||
f'{colorize_json(bs_pair.to_dict())}\n'
|
||||
'----------------------------------------------------\n'
|
||||
f'as piker `MktPair` with fqme: {mkt.fqme}\n'
|
||||
'----------------------------------------------------\n'
|
||||
# NOTE: roundtrip to json codec for console print
|
||||
f'{colorize_json(decode(encode(mkt)))}'
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument('pattern', required=True)
|
||||
# TODO: move this to top level click/typer context for all subs
|
||||
@click.option(
|
||||
'--pdb',
|
||||
is_flag=True,
|
||||
help='Enable tractor debug mode',
|
||||
)
|
||||
@click.pass_obj
|
||||
def search(config, pattern):
|
||||
def search(
|
||||
config: dict,
|
||||
pattern: str,
|
||||
pdb: bool,
|
||||
):
|
||||
'''
|
||||
Search for symbols from broker backend(s).
|
||||
|
||||
'''
|
||||
# global opts
|
||||
brokermods = config['brokermods']
|
||||
brokermods = list(config['brokermods'].values())
|
||||
|
||||
# define tractor entrypoint
|
||||
async def main(func):
|
||||
|
||||
async with maybe_open_pikerd(
|
||||
loglevel=config['loglevel'],
|
||||
debug_mode=pdb,
|
||||
):
|
||||
return await func()
|
||||
|
||||
from piker.toolz import open_crash_handler
|
||||
with open_crash_handler():
|
||||
quotes = trio.run(
|
||||
main,
|
||||
partial(
|
||||
|
@ -287,3 +498,39 @@ def search(config, pattern):
|
|||
return
|
||||
|
||||
click.echo(colorize_json(quotes))
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument('section', required=False)
|
||||
@click.argument('value', required=False)
|
||||
@click.option('--delete', '-d', flag_value=True, help='Delete section')
|
||||
@click.pass_obj
|
||||
def brokercfg(config, section, value, delete):
|
||||
'''
|
||||
If invoked with no arguments, open an editor to edit broker
|
||||
configs file or get / update an individual section.
|
||||
|
||||
'''
|
||||
from .. import config
|
||||
|
||||
if section:
|
||||
conf, path = config.load()
|
||||
|
||||
if not delete:
|
||||
if value:
|
||||
config.set_value(conf, section, value)
|
||||
|
||||
click.echo(
|
||||
colorize_json(
|
||||
config.get_value(conf, section))
|
||||
)
|
||||
else:
|
||||
config.del_value(conf, section)
|
||||
|
||||
config.write(config=conf)
|
||||
|
||||
else:
|
||||
conf, path = config.load(raw=True)
|
||||
config.write(
|
||||
raw=click.edit(text=conf)
|
||||
)
|
||||
|
|
|
@ -26,13 +26,11 @@ from typing import List, Dict, Any, Optional
|
|||
|
||||
import trio
|
||||
|
||||
from ..log import get_logger
|
||||
from ._util import log
|
||||
from . import get_brokermod
|
||||
from .._daemon import maybe_spawn_brokerd
|
||||
from .._cacheables import open_cached_client
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
from ..service import maybe_spawn_brokerd
|
||||
from . import open_cached_client
|
||||
from ..accounting import MktPair
|
||||
|
||||
|
||||
async def api(brokername: str, methname: str, **kwargs) -> dict:
|
||||
|
@ -97,15 +95,15 @@ async def option_chain(
|
|||
return await client.option_chains(contracts)
|
||||
|
||||
|
||||
async def contracts(
|
||||
brokermod: ModuleType,
|
||||
symbol: str,
|
||||
) -> Dict[str, Dict[str, Dict[str, Any]]]:
|
||||
"""Return option contracts (all expiries) for ``symbol``.
|
||||
"""
|
||||
async with brokermod.get_client() as client:
|
||||
# return await client.get_all_contracts([symbol])
|
||||
return await client.get_all_contracts([symbol])
|
||||
# async def contracts(
|
||||
# brokermod: ModuleType,
|
||||
# symbol: str,
|
||||
# ) -> Dict[str, Dict[str, Dict[str, Any]]]:
|
||||
# """Return option contracts (all expiries) for ``symbol``.
|
||||
# """
|
||||
# async with brokermod.get_client() as client:
|
||||
# # return await client.get_all_contracts([symbol])
|
||||
# return await client.get_all_contracts([symbol])
|
||||
|
||||
|
||||
async def bars(
|
||||
|
@ -119,17 +117,6 @@ async def bars(
|
|||
return await client.bars(symbol, **kwargs)
|
||||
|
||||
|
||||
async def symbol_info(
|
||||
brokermod: ModuleType,
|
||||
symbol: str,
|
||||
**kwargs,
|
||||
) -> Dict[str, Dict[str, Dict[str, Any]]]:
|
||||
"""Return symbol info from broker.
|
||||
"""
|
||||
async with brokermod.get_client() as client:
|
||||
return await client.symbol_info(symbol, **kwargs)
|
||||
|
||||
|
||||
async def search_w_brokerd(name: str, pattern: str) -> dict:
|
||||
|
||||
async with open_cached_client(name) as client:
|
||||
|
@ -158,7 +145,11 @@ async def symbol_search(
|
|||
|
||||
async with maybe_spawn_brokerd(
|
||||
mod.name,
|
||||
infect_asyncio=getattr(mod, '_infect_asyncio', False),
|
||||
infect_asyncio=getattr(
|
||||
mod,
|
||||
'_infect_asyncio',
|
||||
False,
|
||||
),
|
||||
) as portal:
|
||||
|
||||
results.append((
|
||||
|
@ -176,3 +167,20 @@ async def symbol_search(
|
|||
n.start_soon(search_backend, mod.name)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def mkt_info(
|
||||
brokermod: ModuleType,
|
||||
fqme: str,
|
||||
**kwargs,
|
||||
|
||||
) -> MktPair:
|
||||
'''
|
||||
Return MktPair info from broker including src and dst assets.
|
||||
|
||||
'''
|
||||
async with open_cached_client(brokermod.name) as client:
|
||||
assert client
|
||||
return await brokermod.get_mkt_info(
|
||||
fqme.replace(brokermod.name, '')
|
||||
)
|
||||
|
|
|
@ -41,13 +41,13 @@ import tractor
|
|||
from tractor.experimental import msgpub
|
||||
from async_generator import asynccontextmanager
|
||||
|
||||
from ..log import get_logger, get_console_log
|
||||
from ._util import (
|
||||
log,
|
||||
get_console_log,
|
||||
)
|
||||
from . import get_brokermod
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
async def wait_for_network(
|
||||
net_func: Callable,
|
||||
sleep: int = 1
|
||||
|
@ -227,26 +227,28 @@ async def get_cached_feed(
|
|||
|
||||
@tractor.stream
|
||||
async def start_quote_stream(
|
||||
ctx: tractor.Context, # marks this as a streaming func
|
||||
stream: tractor.Context, # marks this as a streaming func
|
||||
broker: str,
|
||||
symbols: List[Any],
|
||||
feed_type: str = 'stock',
|
||||
rate: int = 3,
|
||||
) -> None:
|
||||
"""Handle per-broker quote stream subscriptions using a "lazy" pub-sub
|
||||
'''
|
||||
Handle per-broker quote stream subscriptions using a "lazy" pub-sub
|
||||
pattern.
|
||||
|
||||
Spawns new quoter tasks for each broker backend on-demand.
|
||||
Since most brokers seems to support batch quote requests we
|
||||
limit to one task per process (for now).
|
||||
"""
|
||||
|
||||
'''
|
||||
# XXX: why do we need this again?
|
||||
get_console_log(tractor.current_actor().loglevel)
|
||||
|
||||
# pull global vars from local actor
|
||||
symbols = list(symbols)
|
||||
log.info(
|
||||
f"{ctx.chan.uid} subscribed to {broker} for symbols {symbols}")
|
||||
f"{stream.chan.uid} subscribed to {broker} for symbols {symbols}")
|
||||
# another actor task may have already created it
|
||||
async with get_cached_feed(broker) as feed:
|
||||
|
||||
|
@ -290,13 +292,13 @@ async def start_quote_stream(
|
|||
assert fquote['displayable']
|
||||
payload[sym] = fquote
|
||||
|
||||
await ctx.send_yield(payload)
|
||||
await stream.send_yield(payload)
|
||||
|
||||
await stream_poll_requests(
|
||||
|
||||
# ``trionics.msgpub`` required kwargs
|
||||
task_name=feed_type,
|
||||
ctx=ctx,
|
||||
ctx=stream,
|
||||
topics=symbols,
|
||||
packetizer=feed.mod.packetizer,
|
||||
|
||||
|
@ -319,9 +321,11 @@ async def call_client(
|
|||
|
||||
|
||||
class DataFeed:
|
||||
"""Data feed client for streaming symbol data from and making API client calls
|
||||
to a (remote) ``brokerd`` daemon.
|
||||
"""
|
||||
'''
|
||||
Data feed client for streaming symbol data from and making API
|
||||
client calls to a (remote) ``brokerd`` daemon.
|
||||
|
||||
'''
|
||||
_allowed = ('stock', 'option')
|
||||
|
||||
def __init__(self, portal, brokermod):
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
``deribit`` backend
|
||||
------------------
|
||||
pretty good liquidity crypto derivatives, uses custom json rpc over ws for
|
||||
client methods, then `cryptofeed` for data streams.
|
||||
|
||||
status
|
||||
******
|
||||
- supports option charts
|
||||
- no order support yet
|
||||
|
||||
|
||||
config
|
||||
******
|
||||
In order to get order mode support your ``brokers.toml``
|
||||
needs to have something like the following:
|
||||
|
||||
.. code:: toml
|
||||
|
||||
[deribit]
|
||||
key_id = 'XXXXXXXX'
|
||||
key_secret = 'Xx_XxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXx'
|
||||
|
||||
To obtain an api id and secret you need to create an account, which can be a
|
||||
real market account over at:
|
||||
|
||||
- deribit.com (requires KYC for deposit address)
|
||||
|
||||
Or a testnet account over at:
|
||||
|
||||
- test.deribit.com
|
||||
|
||||
For testnet once the account is created here is how you deposit fake crypto to
|
||||
try it out:
|
||||
|
||||
1) Go to Wallet:
|
||||
|
||||
.. figure:: assets/0_wallet.png
|
||||
:align: center
|
||||
:target: assets/0_wallet.png
|
||||
:alt: wallet page
|
||||
|
||||
2) Then click on the elipsis menu and select deposit
|
||||
|
||||
.. figure:: assets/1_wallet_select_deposit.png
|
||||
:align: center
|
||||
:target: assets/1_wallet_select_deposit.png
|
||||
:alt: wallet deposit page
|
||||
|
||||
3) This will take you to the deposit address page
|
||||
|
||||
.. figure:: assets/2_gen_deposit_addr.png
|
||||
:align: center
|
||||
:target: assets/2_gen_deposit_addr.png
|
||||
:alt: generate deposit address page
|
||||
|
||||
4) After clicking generate you should see the address, copy it and go to the
|
||||
`coin faucet <https://test.deribit.com/dericoin/BTC/deposit>`_ and send fake
|
||||
coins to that address.
|
||||
|
||||
.. figure:: assets/3_deposit_address.png
|
||||
:align: center
|
||||
:target: assets/3_deposit_address.png
|
||||
:alt: generated address
|
||||
|
||||
5) Back in the deposit address page you should see the deposit in your history
|
||||
|
||||
.. figure:: assets/4_wallet_deposit_history.png
|
||||
:align: center
|
||||
:target: assets/4_wallet_deposit_history.png
|
||||
:alt: wallet deposit history
|
|
@ -0,0 +1,65 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Deribit backend.
|
||||
|
||||
'''
|
||||
|
||||
from piker.log import get_logger
|
||||
|
||||
from .api import (
|
||||
get_client,
|
||||
)
|
||||
from .feed import (
|
||||
open_history_client,
|
||||
open_symbol_search,
|
||||
stream_quotes,
|
||||
# backfill_bars,
|
||||
)
|
||||
# from .broker import (
|
||||
# open_trade_dialog,
|
||||
# norm_trade_records,
|
||||
# )
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
__all__ = [
|
||||
'get_client',
|
||||
# 'trades_dialogue',
|
||||
'open_history_client',
|
||||
'open_symbol_search',
|
||||
'stream_quotes',
|
||||
# 'norm_trade_records',
|
||||
]
|
||||
|
||||
|
||||
# tractor RPC enable arg
|
||||
__enable_modules__: list[str] = [
|
||||
'api',
|
||||
'feed',
|
||||
# 'broker',
|
||||
]
|
||||
|
||||
# passed to ``tractor.ActorNursery.start_actor()``
|
||||
_spawn_kwargs = {
|
||||
'infect_asyncio': True,
|
||||
}
|
||||
|
||||
# annotation to let backend agnostic code
|
||||
# know if ``brokerd`` should be spawned with
|
||||
# ``tractor``'s aio mode.
|
||||
_infect_asyncio: bool = True
|
|
@ -0,0 +1,675 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Deribit backend.
|
||||
|
||||
'''
|
||||
import asyncio
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
import time
|
||||
from typing import (
|
||||
Any,
|
||||
Optional,
|
||||
Callable,
|
||||
)
|
||||
|
||||
import pendulum
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
from rapidfuzz import process as fuzzy
|
||||
import numpy as np
|
||||
from tractor.trionics import (
|
||||
broadcast_receiver,
|
||||
maybe_open_context
|
||||
)
|
||||
from tractor import to_asyncio
|
||||
# XXX WOOPS XD
|
||||
# yeah you'll need to install it since it was removed in #489 by
|
||||
# accident; well i thought we had removed all usage..
|
||||
from cryptofeed import FeedHandler
|
||||
from cryptofeed.defines import (
|
||||
DERIBIT,
|
||||
L1_BOOK, TRADES,
|
||||
OPTION, CALL, PUT
|
||||
)
|
||||
from cryptofeed.symbols import Symbol
|
||||
|
||||
from piker.data import (
|
||||
def_iohlcv_fields,
|
||||
match_from_pairs,
|
||||
Struct,
|
||||
)
|
||||
from piker.data._web_bs import (
|
||||
open_jsonrpc_session
|
||||
)
|
||||
|
||||
|
||||
from piker import config
|
||||
from piker.log import get_logger
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
_spawn_kwargs = {
|
||||
'infect_asyncio': True,
|
||||
}
|
||||
|
||||
|
||||
_url = 'https://www.deribit.com'
|
||||
_ws_url = 'wss://www.deribit.com/ws/api/v2'
|
||||
_testnet_ws_url = 'wss://test.deribit.com/ws/api/v2'
|
||||
|
||||
|
||||
class JSONRPCResult(Struct):
|
||||
jsonrpc: str = '2.0'
|
||||
id: int
|
||||
result: Optional[list[dict]] = None
|
||||
error: Optional[dict] = None
|
||||
usIn: int
|
||||
usOut: int
|
||||
usDiff: int
|
||||
testnet: bool
|
||||
|
||||
class JSONRPCChannel(Struct):
|
||||
jsonrpc: str = '2.0'
|
||||
method: str
|
||||
params: dict
|
||||
|
||||
|
||||
class KLinesResult(Struct):
|
||||
close: list[float]
|
||||
cost: list[float]
|
||||
high: list[float]
|
||||
low: list[float]
|
||||
open: list[float]
|
||||
status: str
|
||||
ticks: list[int]
|
||||
volume: list[float]
|
||||
|
||||
class Trade(Struct):
|
||||
trade_seq: int
|
||||
trade_id: str
|
||||
timestamp: int
|
||||
tick_direction: int
|
||||
price: float
|
||||
mark_price: float
|
||||
iv: float
|
||||
instrument_name: str
|
||||
index_price: float
|
||||
direction: str
|
||||
combo_trade_id: Optional[int] = 0,
|
||||
combo_id: Optional[str] = '',
|
||||
amount: float
|
||||
|
||||
class LastTradesResult(Struct):
|
||||
trades: list[Trade]
|
||||
has_more: bool
|
||||
|
||||
|
||||
# convert datetime obj timestamp to unixtime in milliseconds
|
||||
def deribit_timestamp(when):
|
||||
return int((when.timestamp() * 1000) + (when.microsecond / 1000))
|
||||
|
||||
|
||||
def str_to_cb_sym(name: str) -> Symbol:
|
||||
base, strike_price, expiry_date, option_type = name.split('-')
|
||||
|
||||
quote = base
|
||||
|
||||
if option_type == 'put':
|
||||
option_type = PUT
|
||||
elif option_type == 'call':
|
||||
option_type = CALL
|
||||
else:
|
||||
raise Exception("Couldn\'t parse option type")
|
||||
|
||||
return Symbol(
|
||||
base, quote,
|
||||
type=OPTION,
|
||||
strike_price=strike_price,
|
||||
option_type=option_type,
|
||||
expiry_date=expiry_date,
|
||||
expiry_normalize=False)
|
||||
|
||||
|
||||
def piker_sym_to_cb_sym(name: str) -> Symbol:
|
||||
base, expiry_date, strike_price, option_type = tuple(
|
||||
name.upper().split('-'))
|
||||
|
||||
quote = base
|
||||
|
||||
if option_type == 'P':
|
||||
option_type = PUT
|
||||
elif option_type == 'C':
|
||||
option_type = CALL
|
||||
else:
|
||||
raise Exception("Couldn\'t parse option type")
|
||||
|
||||
return Symbol(
|
||||
base, quote,
|
||||
type=OPTION,
|
||||
strike_price=strike_price,
|
||||
option_type=option_type,
|
||||
expiry_date=expiry_date.upper())
|
||||
|
||||
|
||||
def cb_sym_to_deribit_inst(sym: Symbol):
|
||||
# cryptofeed normalized
|
||||
cb_norm = ['F', 'G', 'H', 'J', 'K', 'M', 'N', 'Q', 'U', 'V', 'X', 'Z']
|
||||
|
||||
# deribit specific
|
||||
months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
|
||||
|
||||
exp = sym.expiry_date
|
||||
|
||||
# YYMDD
|
||||
# 01234
|
||||
year, month, day = (
|
||||
exp[:2], months[cb_norm.index(exp[2:3])], exp[3:])
|
||||
|
||||
otype = 'C' if sym.option_type == CALL else 'P'
|
||||
|
||||
return f'{sym.base}-{day}{month}{year}-{sym.strike_price}-{otype}'
|
||||
|
||||
|
||||
def get_config() -> dict[str, Any]:
|
||||
|
||||
conf, path = config.load()
|
||||
|
||||
section = conf.get('deribit')
|
||||
|
||||
# TODO: document why we send this, basically because logging params for cryptofeed
|
||||
conf['log'] = {}
|
||||
conf['log']['disabled'] = True
|
||||
|
||||
if section is None:
|
||||
log.warning(f'No config section found for deribit in {path}')
|
||||
|
||||
return conf
|
||||
|
||||
|
||||
class Client:
|
||||
|
||||
def __init__(self, json_rpc: Callable) -> None:
|
||||
self._pairs: dict[str, Any] = None
|
||||
|
||||
config = get_config().get('deribit', {})
|
||||
|
||||
if ('key_id' in config) and ('key_secret' in config):
|
||||
self._key_id = config['key_id']
|
||||
self._key_secret = config['key_secret']
|
||||
|
||||
else:
|
||||
self._key_id = None
|
||||
self._key_secret = None
|
||||
|
||||
self.json_rpc = json_rpc
|
||||
|
||||
@property
|
||||
def currencies(self):
|
||||
return ['btc', 'eth', 'sol', 'usd']
|
||||
|
||||
async def get_balances(self, kind: str = 'option') -> dict[str, float]:
|
||||
"""Return the set of positions for this account
|
||||
by symbol.
|
||||
"""
|
||||
balances = {}
|
||||
|
||||
for currency in self.currencies:
|
||||
resp = await self.json_rpc(
|
||||
'private/get_positions', params={
|
||||
'currency': currency.upper(),
|
||||
'kind': kind})
|
||||
|
||||
balances[currency] = resp.result
|
||||
|
||||
return balances
|
||||
|
||||
async def get_assets(self) -> dict[str, float]:
|
||||
"""Return the set of asset balances for this account
|
||||
by symbol.
|
||||
"""
|
||||
balances = {}
|
||||
|
||||
for currency in self.currencies:
|
||||
resp = await self.json_rpc(
|
||||
'private/get_account_summary', params={
|
||||
'currency': currency.upper()})
|
||||
|
||||
balances[currency] = resp.result['balance']
|
||||
|
||||
return balances
|
||||
|
||||
async def submit_limit(
|
||||
self,
|
||||
symbol: str,
|
||||
price: float,
|
||||
action: str,
|
||||
size: float
|
||||
) -> dict:
|
||||
"""Place an order
|
||||
"""
|
||||
params = {
|
||||
'instrument_name': symbol.upper(),
|
||||
'amount': size,
|
||||
'type': 'limit',
|
||||
'price': price,
|
||||
}
|
||||
resp = await self.json_rpc(
|
||||
f'private/{action}', params)
|
||||
|
||||
return resp.result
|
||||
|
||||
async def submit_cancel(self, oid: str):
|
||||
"""Send cancel request for order id
|
||||
"""
|
||||
resp = await self.json_rpc(
|
||||
'private/cancel', {'order_id': oid})
|
||||
return resp.result
|
||||
|
||||
async def symbol_info(
|
||||
self,
|
||||
instrument: Optional[str] = None,
|
||||
currency: str = 'btc', # BTC, ETH, SOL, USDC
|
||||
kind: str = 'option',
|
||||
expired: bool = False
|
||||
|
||||
) -> dict[str, dict]:
|
||||
'''
|
||||
Get symbol infos.
|
||||
|
||||
'''
|
||||
if self._pairs:
|
||||
return self._pairs
|
||||
|
||||
# will retrieve all symbols by default
|
||||
params: dict[str, str] = {
|
||||
'currency': currency.upper(),
|
||||
'kind': kind,
|
||||
'expired': str(expired).lower()
|
||||
}
|
||||
|
||||
resp: JSONRPCResult = await self.json_rpc(
|
||||
'public/get_instruments',
|
||||
params,
|
||||
)
|
||||
# convert to symbol-keyed table
|
||||
results: list[dict] | None = resp.result
|
||||
instruments: dict[str, dict] = {
|
||||
item['instrument_name'].lower(): item
|
||||
for item in results
|
||||
}
|
||||
|
||||
if instrument is not None:
|
||||
return instruments[instrument]
|
||||
else:
|
||||
return instruments
|
||||
|
||||
async def cache_symbols(
|
||||
self,
|
||||
) -> dict:
|
||||
|
||||
if not self._pairs:
|
||||
self._pairs = await self.symbol_info()
|
||||
|
||||
return self._pairs
|
||||
|
||||
async def search_symbols(
|
||||
self,
|
||||
pattern: str,
|
||||
limit: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
'''
|
||||
Fuzzy search symbology set for pairs matching `pattern`.
|
||||
|
||||
'''
|
||||
pairs: dict[str, Any] = await self.symbol_info()
|
||||
matches: dict[str, Pair] = match_from_pairs(
|
||||
pairs=pairs,
|
||||
query=pattern.upper(),
|
||||
score_cutoff=35,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
# repack in name-keyed table
|
||||
return {
|
||||
pair['instrument_name'].lower(): pair
|
||||
for pair in matches.values()
|
||||
}
|
||||
|
||||
async def bars(
|
||||
self,
|
||||
symbol: str,
|
||||
start_dt: Optional[datetime] = None,
|
||||
end_dt: Optional[datetime] = None,
|
||||
limit: int = 1000,
|
||||
as_np: bool = True,
|
||||
) -> dict:
|
||||
instrument = symbol
|
||||
|
||||
if end_dt is None:
|
||||
end_dt = pendulum.now('UTC')
|
||||
|
||||
if start_dt is None:
|
||||
start_dt = end_dt.start_of(
|
||||
'minute').subtract(minutes=limit)
|
||||
|
||||
start_time = deribit_timestamp(start_dt)
|
||||
end_time = deribit_timestamp(end_dt)
|
||||
|
||||
# https://docs.deribit.com/#public-get_tradingview_chart_data
|
||||
resp = await self.json_rpc(
|
||||
'public/get_tradingview_chart_data',
|
||||
params={
|
||||
'instrument_name': instrument.upper(),
|
||||
'start_timestamp': start_time,
|
||||
'end_timestamp': end_time,
|
||||
'resolution': '1'
|
||||
})
|
||||
|
||||
result = KLinesResult(**resp.result)
|
||||
new_bars = []
|
||||
for i in range(len(result.close)):
|
||||
|
||||
_open = result.open[i]
|
||||
high = result.high[i]
|
||||
low = result.low[i]
|
||||
close = result.close[i]
|
||||
volume = result.volume[i]
|
||||
|
||||
row = [
|
||||
(start_time + (i * (60 * 1000))) / 1000.0, # time
|
||||
result.open[i],
|
||||
result.high[i],
|
||||
result.low[i],
|
||||
result.close[i],
|
||||
result.volume[i],
|
||||
0
|
||||
]
|
||||
|
||||
new_bars.append((i,) + tuple(row))
|
||||
|
||||
array = np.array(new_bars, dtype=def_iohlcv_fields) if as_np else klines
|
||||
return array
|
||||
|
||||
async def last_trades(
|
||||
self,
|
||||
instrument: str,
|
||||
count: int = 10
|
||||
):
|
||||
resp = await self.json_rpc(
|
||||
'public/get_last_trades_by_instrument',
|
||||
params={
|
||||
'instrument_name': instrument,
|
||||
'count': count
|
||||
})
|
||||
|
||||
return LastTradesResult(**resp.result)
|
||||
|
||||
|
||||
@acm
|
||||
async def get_client(
|
||||
is_brokercheck: bool = False
|
||||
) -> Client:
|
||||
|
||||
async with (
|
||||
trio.open_nursery() as n,
|
||||
open_jsonrpc_session(
|
||||
_testnet_ws_url, dtype=JSONRPCResult) as json_rpc
|
||||
):
|
||||
client = Client(json_rpc)
|
||||
|
||||
_refresh_token: Optional[str] = None
|
||||
_access_token: Optional[str] = None
|
||||
|
||||
async def _auth_loop(
|
||||
task_status: TaskStatus = trio.TASK_STATUS_IGNORED
|
||||
):
|
||||
"""Background task that adquires a first access token and then will
|
||||
refresh the access token while the nursery isn't cancelled.
|
||||
|
||||
https://docs.deribit.com/?python#authentication-2
|
||||
"""
|
||||
renew_time = 10
|
||||
access_scope = 'trade:read_write'
|
||||
_expiry_time = time.time()
|
||||
got_access = False
|
||||
nonlocal _refresh_token
|
||||
nonlocal _access_token
|
||||
|
||||
while True:
|
||||
if time.time() - _expiry_time < renew_time:
|
||||
# if we are close to token expiry time
|
||||
|
||||
if _refresh_token != None:
|
||||
# if we have a refresh token already dont need to send
|
||||
# secret
|
||||
params = {
|
||||
'grant_type': 'refresh_token',
|
||||
'refresh_token': _refresh_token,
|
||||
'scope': access_scope
|
||||
}
|
||||
|
||||
else:
|
||||
# we don't have refresh token, send secret to initialize
|
||||
params = {
|
||||
'grant_type': 'client_credentials',
|
||||
'client_id': client._key_id,
|
||||
'client_secret': client._key_secret,
|
||||
'scope': access_scope
|
||||
}
|
||||
|
||||
resp = await json_rpc('public/auth', params)
|
||||
result = resp.result
|
||||
|
||||
_expiry_time = time.time() + result['expires_in']
|
||||
_refresh_token = result['refresh_token']
|
||||
|
||||
if 'access_token' in result:
|
||||
_access_token = result['access_token']
|
||||
|
||||
if not got_access:
|
||||
# first time this loop runs we must indicate task is
|
||||
# started, we have auth
|
||||
got_access = True
|
||||
task_status.started()
|
||||
|
||||
else:
|
||||
await trio.sleep(renew_time / 2)
|
||||
|
||||
# if we have client creds launch auth loop
|
||||
if client._key_id is not None:
|
||||
await n.start(_auth_loop)
|
||||
|
||||
await client.cache_symbols()
|
||||
yield client
|
||||
n.cancel_scope.cancel()
|
||||
|
||||
|
||||
@acm
|
||||
async def open_feed_handler():
|
||||
fh = FeedHandler(config=get_config())
|
||||
yield fh
|
||||
await to_asyncio.run_task(fh.stop_async)
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_feed_handler() -> trio.abc.ReceiveStream:
|
||||
async with maybe_open_context(
|
||||
acm_func=open_feed_handler,
|
||||
key='feedhandler',
|
||||
) as (cache_hit, fh):
|
||||
yield fh
|
||||
|
||||
|
||||
async def aio_price_feed_relay(
|
||||
fh: FeedHandler,
|
||||
instrument: Symbol,
|
||||
from_trio: asyncio.Queue,
|
||||
to_trio: trio.abc.SendChannel,
|
||||
) -> None:
|
||||
async def _trade(data: dict, receipt_timestamp):
|
||||
to_trio.send_nowait(('trade', {
|
||||
'symbol': cb_sym_to_deribit_inst(
|
||||
str_to_cb_sym(data.symbol)).lower(),
|
||||
'last': data,
|
||||
'broker_ts': time.time(),
|
||||
'data': data.to_dict(),
|
||||
'receipt': receipt_timestamp
|
||||
}))
|
||||
|
||||
async def _l1(data: dict, receipt_timestamp):
|
||||
to_trio.send_nowait(('l1', {
|
||||
'symbol': cb_sym_to_deribit_inst(
|
||||
str_to_cb_sym(data.symbol)).lower(),
|
||||
'ticks': [
|
||||
{'type': 'bid',
|
||||
'price': float(data.bid_price), 'size': float(data.bid_size)},
|
||||
{'type': 'bsize',
|
||||
'price': float(data.bid_price), 'size': float(data.bid_size)},
|
||||
{'type': 'ask',
|
||||
'price': float(data.ask_price), 'size': float(data.ask_size)},
|
||||
{'type': 'asize',
|
||||
'price': float(data.ask_price), 'size': float(data.ask_size)}
|
||||
]
|
||||
}))
|
||||
|
||||
fh.add_feed(
|
||||
DERIBIT,
|
||||
channels=[TRADES, L1_BOOK],
|
||||
symbols=[piker_sym_to_cb_sym(instrument)],
|
||||
callbacks={
|
||||
TRADES: _trade,
|
||||
L1_BOOK: _l1
|
||||
})
|
||||
|
||||
if not fh.running:
|
||||
fh.run(
|
||||
start_loop=False,
|
||||
install_signal_handlers=False)
|
||||
|
||||
# sync with trio
|
||||
to_trio.send_nowait(None)
|
||||
|
||||
await asyncio.sleep(float('inf'))
|
||||
|
||||
|
||||
@acm
|
||||
async def open_price_feed(
|
||||
instrument: str
|
||||
) -> trio.abc.ReceiveStream:
|
||||
async with maybe_open_feed_handler() as fh:
|
||||
async with to_asyncio.open_channel_from(
|
||||
partial(
|
||||
aio_price_feed_relay,
|
||||
fh,
|
||||
instrument
|
||||
)
|
||||
) as (first, chan):
|
||||
yield chan
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_price_feed(
|
||||
instrument: str
|
||||
) -> trio.abc.ReceiveStream:
|
||||
|
||||
# TODO: add a predicate to maybe_open_context
|
||||
async with maybe_open_context(
|
||||
acm_func=open_price_feed,
|
||||
kwargs={
|
||||
'instrument': instrument
|
||||
},
|
||||
key=f'{instrument}-price',
|
||||
) as (cache_hit, feed):
|
||||
if cache_hit:
|
||||
yield broadcast_receiver(feed, 10)
|
||||
else:
|
||||
yield feed
|
||||
|
||||
|
||||
|
||||
async def aio_order_feed_relay(
|
||||
fh: FeedHandler,
|
||||
instrument: Symbol,
|
||||
from_trio: asyncio.Queue,
|
||||
to_trio: trio.abc.SendChannel,
|
||||
) -> None:
|
||||
async def _fill(data: dict, receipt_timestamp):
|
||||
breakpoint()
|
||||
|
||||
async def _order_info(data: dict, receipt_timestamp):
|
||||
breakpoint()
|
||||
|
||||
fh.add_feed(
|
||||
DERIBIT,
|
||||
channels=[FILLS, ORDER_INFO],
|
||||
symbols=[instrument.upper()],
|
||||
callbacks={
|
||||
FILLS: _fill,
|
||||
ORDER_INFO: _order_info,
|
||||
})
|
||||
|
||||
if not fh.running:
|
||||
fh.run(
|
||||
start_loop=False,
|
||||
install_signal_handlers=False)
|
||||
|
||||
# sync with trio
|
||||
to_trio.send_nowait(None)
|
||||
|
||||
await asyncio.sleep(float('inf'))
|
||||
|
||||
|
||||
@acm
|
||||
async def open_order_feed(
|
||||
instrument: list[str]
|
||||
) -> trio.abc.ReceiveStream:
|
||||
async with maybe_open_feed_handler() as fh:
|
||||
async with to_asyncio.open_channel_from(
|
||||
partial(
|
||||
aio_order_feed_relay,
|
||||
fh,
|
||||
instrument
|
||||
)
|
||||
) as (first, chan):
|
||||
yield chan
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_order_feed(
|
||||
instrument: str
|
||||
) -> trio.abc.ReceiveStream:
|
||||
|
||||
# TODO: add a predicate to maybe_open_context
|
||||
async with maybe_open_context(
|
||||
acm_func=open_order_feed,
|
||||
kwargs={
|
||||
'instrument': instrument,
|
||||
'fh': fh
|
||||
},
|
||||
key=f'{instrument}-order',
|
||||
) as (cache_hit, feed):
|
||||
if cache_hit:
|
||||
yield broadcast_receiver(feed, 10)
|
||||
else:
|
||||
yield feed
|
Binary file not shown.
After Width: | Height: | Size: 169 KiB |
Binary file not shown.
After Width: | Height: | Size: 106 KiB |
Binary file not shown.
After Width: | Height: | Size: 59 KiB |
Binary file not shown.
After Width: | Height: | Size: 70 KiB |
Binary file not shown.
After Width: | Height: | Size: 132 KiB |
|
@ -0,0 +1,185 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Deribit backend.
|
||||
|
||||
'''
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional, Callable
|
||||
import time
|
||||
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
import pendulum
|
||||
from rapidfuzz import process as fuzzy
|
||||
import numpy as np
|
||||
import tractor
|
||||
|
||||
from piker.brokers import open_cached_client
|
||||
from piker.log import get_logger, get_console_log
|
||||
from piker.data import ShmArray
|
||||
from piker.brokers._util import (
|
||||
BrokerError,
|
||||
DataUnavailable,
|
||||
)
|
||||
|
||||
from cryptofeed import FeedHandler
|
||||
from cryptofeed.defines import (
|
||||
DERIBIT, L1_BOOK, TRADES, OPTION, CALL, PUT
|
||||
)
|
||||
from cryptofeed.symbols import Symbol
|
||||
|
||||
from .api import (
|
||||
Client, Trade,
|
||||
get_config,
|
||||
str_to_cb_sym, piker_sym_to_cb_sym, cb_sym_to_deribit_inst,
|
||||
maybe_open_price_feed
|
||||
)
|
||||
|
||||
_spawn_kwargs = {
|
||||
'infect_asyncio': True,
|
||||
}
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
@acm
|
||||
async def open_history_client(
|
||||
mkt: MktPair,
|
||||
) -> tuple[Callable, int]:
|
||||
|
||||
fnstrument: str = mkt.bs_fqme
|
||||
# TODO implement history getter for the new storage layer.
|
||||
async with open_cached_client('deribit') as client:
|
||||
|
||||
async def get_ohlc(
|
||||
end_dt: Optional[datetime] = None,
|
||||
start_dt: Optional[datetime] = None,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
datetime, # start
|
||||
datetime, # end
|
||||
]:
|
||||
|
||||
array = await client.bars(
|
||||
instrument,
|
||||
start_dt=start_dt,
|
||||
end_dt=end_dt,
|
||||
)
|
||||
if len(array) == 0:
|
||||
raise DataUnavailable
|
||||
|
||||
start_dt = pendulum.from_timestamp(array[0]['time'])
|
||||
end_dt = pendulum.from_timestamp(array[-1]['time'])
|
||||
|
||||
return array, start_dt, end_dt
|
||||
|
||||
yield get_ohlc, {'erlangs': 3, 'rate': 3}
|
||||
|
||||
|
||||
async def stream_quotes(
|
||||
|
||||
send_chan: trio.abc.SendChannel,
|
||||
symbols: list[str],
|
||||
feed_is_live: trio.Event,
|
||||
loglevel: str = None,
|
||||
|
||||
# startup sync
|
||||
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
# XXX: required to propagate ``tractor`` loglevel to piker logging
|
||||
get_console_log(loglevel or tractor.current_actor().loglevel)
|
||||
|
||||
sym = symbols[0]
|
||||
|
||||
async with (
|
||||
open_cached_client('deribit') as client,
|
||||
send_chan as send_chan
|
||||
):
|
||||
|
||||
init_msgs = {
|
||||
# pass back token, and bool, signalling if we're the writer
|
||||
# and that history has been written
|
||||
sym: {
|
||||
'symbol_info': {
|
||||
'asset_type': 'option',
|
||||
'price_tick_size': 0.0005
|
||||
},
|
||||
'shm_write_opts': {'sum_tick_vml': False},
|
||||
'fqsn': sym,
|
||||
},
|
||||
}
|
||||
|
||||
nsym = piker_sym_to_cb_sym(sym)
|
||||
|
||||
async with maybe_open_price_feed(sym) as stream:
|
||||
|
||||
cache = await client.cache_symbols()
|
||||
|
||||
last_trades = (await client.last_trades(
|
||||
cb_sym_to_deribit_inst(nsym), count=1)).trades
|
||||
|
||||
if len(last_trades) == 0:
|
||||
last_trade = None
|
||||
async for typ, quote in stream:
|
||||
if typ == 'trade':
|
||||
last_trade = Trade(**(quote['data']))
|
||||
break
|
||||
|
||||
else:
|
||||
last_trade = Trade(**(last_trades[0]))
|
||||
|
||||
first_quote = {
|
||||
'symbol': sym,
|
||||
'last': last_trade.price,
|
||||
'brokerd_ts': last_trade.timestamp,
|
||||
'ticks': [{
|
||||
'type': 'trade',
|
||||
'price': last_trade.price,
|
||||
'size': last_trade.amount,
|
||||
'broker_ts': last_trade.timestamp
|
||||
}]
|
||||
}
|
||||
task_status.started((init_msgs, first_quote))
|
||||
|
||||
feed_is_live.set()
|
||||
|
||||
async for typ, quote in stream:
|
||||
topic = quote['symbol']
|
||||
await send_chan.send({topic: quote})
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_symbol_search(
|
||||
ctx: tractor.Context,
|
||||
) -> Client:
|
||||
async with open_cached_client('deribit') as client:
|
||||
|
||||
# load all symbols locally for fast search
|
||||
cache = await client.cache_symbols()
|
||||
await ctx.started()
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
async for pattern in stream:
|
||||
# repack in dict form
|
||||
await stream.send(
|
||||
await client.search_symbols(pattern))
|
|
@ -0,0 +1,134 @@
|
|||
``ib`` backend
|
||||
--------------
|
||||
more or less the "everything broker" for traditional and international
|
||||
markets. they are the "go to" provider for automatic retail trading
|
||||
and we interface to their APIs using the `ib_insync` project.
|
||||
|
||||
status
|
||||
******
|
||||
current support is *production grade* and both real-time data and order
|
||||
management should be correct and fast. this backend is used by core devs
|
||||
for live trading.
|
||||
|
||||
currently there is not yet full support for:
|
||||
- options charting and trading
|
||||
- paxos based crypto rt feeds and trading
|
||||
|
||||
|
||||
config
|
||||
******
|
||||
In order to get order mode support your ``brokers.toml``
|
||||
needs to have something like the following:
|
||||
|
||||
.. code:: toml
|
||||
|
||||
[ib]
|
||||
hosts = [
|
||||
"127.0.0.1",
|
||||
]
|
||||
# TODO: when we eventually spawn gateways in our
|
||||
# container, we can just dynamically allocate these
|
||||
# using IBC.
|
||||
ports = [
|
||||
4002,
|
||||
4003,
|
||||
4006,
|
||||
4001,
|
||||
7497,
|
||||
]
|
||||
|
||||
# XXX: for a paper account the flex web query service
|
||||
# is not supported so you have to manually download
|
||||
# and XML report and put it in a location that can be
|
||||
# accessed by the ``brokerd.ib`` backend code for parsing.
|
||||
flex_token = '1111111111111111'
|
||||
flex_trades_query_id = '6969696' # live accounts only?
|
||||
|
||||
# 3rd party web-api token
|
||||
# (XXX: not sure if this works yet)
|
||||
trade_log_token = '111111111111111'
|
||||
|
||||
# when clients are being scanned this determines
|
||||
# which clients are preferred to be used for data feeds
|
||||
# based on account names which are detected as active
|
||||
# on each client.
|
||||
prefer_data_account = [
|
||||
# this has to be first in order to make data work with dual paper + live
|
||||
'main',
|
||||
'algopaper',
|
||||
]
|
||||
|
||||
[ib.accounts]
|
||||
main = 'U69696969'
|
||||
algopaper = 'DU9696969'
|
||||
|
||||
|
||||
If everything works correctly you should see any current positions
|
||||
loaded in the pps pane on chart load and you should also be able to
|
||||
check your trade records in the file::
|
||||
|
||||
<pikerk_conf_dir>/ledgers/trades_ib_algopaper.toml
|
||||
|
||||
|
||||
An example ledger file will have entries written verbatim from the
|
||||
trade events schema:
|
||||
|
||||
.. code:: toml
|
||||
|
||||
["0000e1a7.630f5e5a.01.01"]
|
||||
secType = "FUT"
|
||||
conId = 515416577
|
||||
symbol = "MNQ"
|
||||
lastTradeDateOrContractMonth = "20221216"
|
||||
strike = 0.0
|
||||
right = ""
|
||||
multiplier = "2"
|
||||
exchange = "GLOBEX"
|
||||
primaryExchange = ""
|
||||
currency = "USD"
|
||||
localSymbol = "MNQZ2"
|
||||
tradingClass = "MNQ"
|
||||
includeExpired = false
|
||||
secIdType = ""
|
||||
secId = ""
|
||||
comboLegsDescrip = ""
|
||||
comboLegs = []
|
||||
execId = "0000e1a7.630f5e5a.01.01"
|
||||
time = 1661972086.0
|
||||
acctNumber = "DU69696969"
|
||||
side = "BOT"
|
||||
shares = 1.0
|
||||
price = 12372.75
|
||||
permId = 441472655
|
||||
clientId = 6116
|
||||
orderId = 985
|
||||
liquidation = 0
|
||||
cumQty = 1.0
|
||||
avgPrice = 12372.75
|
||||
orderRef = ""
|
||||
evRule = ""
|
||||
evMultiplier = 0.0
|
||||
modelCode = ""
|
||||
lastLiquidity = 1
|
||||
broker_time = 1661972086.0
|
||||
name = "ib"
|
||||
commission = 0.57
|
||||
realizedPNL = 243.41
|
||||
yield_ = 0.0
|
||||
yieldRedemptionDate = 0
|
||||
listingExchange = "GLOBEX"
|
||||
date = "2022-08-31T18:54:46+00:00"
|
||||
|
||||
|
||||
your ``pps.toml`` file will have position entries like,
|
||||
|
||||
.. code:: toml
|
||||
|
||||
[ib.algopaper."mnq.globex.20221216"]
|
||||
size = -1.0
|
||||
ppu = 12423.630576923071
|
||||
bs_mktid = 515416577
|
||||
expiry = "2022-12-16T00:00:00+00:00"
|
||||
clears = [
|
||||
{ dt = "2022-08-31T18:54:46+00:00", ppu = 12423.630576923071, accum_size = -19.0, price = 12372.75, size = 1.0, cost = 0.57, tid = "0000e1a7.630f5e5a.01.01" },
|
||||
]
|
|
@ -20,41 +20,62 @@ Interactive Brokers API backend.
|
|||
Sub-modules within break into the core functionalities:
|
||||
|
||||
- ``broker.py`` part for orders / trading endpoints
|
||||
- ``data.py`` for real-time data feed endpoints
|
||||
|
||||
- ``client.py`` for the core API machinery which is ``trio``-ized
|
||||
- ``feed.py`` for real-time data feed endpoints
|
||||
- ``api.py`` for the core API machinery which is ``trio``-ized
|
||||
wrapping around ``ib_insync``.
|
||||
|
||||
- ``report.py`` for the hackery to build manual pp calcs
|
||||
to avoid ib's absolute bullshit FIFO style position
|
||||
tracking..
|
||||
|
||||
"""
|
||||
from .api import (
|
||||
get_client,
|
||||
)
|
||||
from .feed import (
|
||||
open_history_client,
|
||||
open_symbol_search,
|
||||
stream_quotes,
|
||||
)
|
||||
from .broker import trades_dialogue
|
||||
from .broker import (
|
||||
open_trade_dialog,
|
||||
)
|
||||
from .ledger import (
|
||||
norm_trade,
|
||||
norm_trade_records,
|
||||
tx_sort,
|
||||
)
|
||||
from .symbols import (
|
||||
get_mkt_info,
|
||||
open_symbol_search,
|
||||
_search_conf,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'get_client',
|
||||
'trades_dialogue',
|
||||
'get_mkt_info',
|
||||
'norm_trade',
|
||||
'norm_trade_records',
|
||||
'open_trade_dialog',
|
||||
'open_history_client',
|
||||
'open_symbol_search',
|
||||
'stream_quotes',
|
||||
'_search_conf',
|
||||
'tx_sort',
|
||||
]
|
||||
|
||||
_brokerd_mods: list[str] = [
|
||||
'api',
|
||||
'broker',
|
||||
]
|
||||
|
||||
_datad_mods: list[str] = [
|
||||
'feed',
|
||||
'symbols',
|
||||
]
|
||||
|
||||
|
||||
# tractor RPC enable arg
|
||||
__enable_modules__: list[str] = [
|
||||
'api',
|
||||
'feed',
|
||||
'broker',
|
||||
]
|
||||
__enable_modules__: list[str] = (
|
||||
_brokerd_mods
|
||||
+
|
||||
_datad_mods
|
||||
)
|
||||
|
||||
# passed to ``tractor.ActorNursery.start_actor()``
|
||||
_spawn_kwargs = {
|
||||
|
@ -65,3 +86,8 @@ _spawn_kwargs = {
|
|||
# know if ``brokerd`` should be spawned with
|
||||
# ``tractor``'s aio mode.
|
||||
_infect_asyncio: bool = True
|
||||
|
||||
# XXX NOTE: for now we disable symcache with this backend since
|
||||
# there is no clearly simple nor practical way to download "all
|
||||
# symbology info" for all supported venues..
|
||||
_no_symcache: bool = True
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
"FLEX" report processing utils.
|
||||
|
||||
"""
|
||||
from bidict import bidict
|
||||
import pendulum
|
||||
from pprint import pformat
|
||||
from typing import Any
|
||||
|
||||
from .api import (
|
||||
get_config,
|
||||
log,
|
||||
)
|
||||
from piker.accounting import (
|
||||
open_trade_ledger,
|
||||
)
|
||||
|
||||
|
||||
def parse_flex_dt(
|
||||
record: str,
|
||||
) -> pendulum.datetime:
|
||||
'''
|
||||
Parse stupid flex record datetime stamps for the `dateTime` field..
|
||||
|
||||
'''
|
||||
date, ts = record.split(';')
|
||||
dt = pendulum.parse(date)
|
||||
ts = f'{ts[:2]}:{ts[2:4]}:{ts[4:]}'
|
||||
tsdt = pendulum.parse(ts)
|
||||
return dt.set(hour=tsdt.hour, minute=tsdt.minute, second=tsdt.second)
|
||||
|
||||
|
||||
def flex_records_to_ledger_entries(
|
||||
accounts: bidict,
|
||||
trade_entries: list[object],
|
||||
|
||||
) -> dict:
|
||||
'''
|
||||
Convert flex report entry objects into ``dict`` form, pretty much
|
||||
straight up without modification except add a `pydatetime` field
|
||||
from the parsed timestamp.
|
||||
|
||||
'''
|
||||
trades_by_account = {}
|
||||
for t in trade_entries:
|
||||
entry = t.__dict__
|
||||
|
||||
# XXX: LOL apparently ``toml`` has a bug
|
||||
# where a section key error will show up in the write
|
||||
# if you leave a table key as an `int`? So i guess
|
||||
# cast to strs for all keys..
|
||||
|
||||
# oddly for some so-called "BookTrade" entries
|
||||
# this field seems to be blank, no cuckin clue.
|
||||
# trade['ibExecID']
|
||||
tid = str(entry.get('ibExecID') or entry['tradeID'])
|
||||
# date = str(entry['tradeDate'])
|
||||
|
||||
# XXX: is it going to cause problems if a account name
|
||||
# get's lost? The user should be able to find it based
|
||||
# on the actual exec history right?
|
||||
acctid = accounts[str(entry['accountId'])]
|
||||
|
||||
# probably a flex record with a wonky non-std timestamp..
|
||||
dt = entry['pydatetime'] = parse_flex_dt(entry['dateTime'])
|
||||
entry['datetime'] = str(dt)
|
||||
|
||||
if not tid:
|
||||
# this is likely some kind of internal adjustment
|
||||
# transaction, likely one of the following:
|
||||
# - an expiry event that will show a "book trade" indicating
|
||||
# some adjustment to cash balances: zeroing or itm settle.
|
||||
# - a manual cash balance position adjustment likely done by
|
||||
# the user from the accounts window in TWS where they can
|
||||
# manually set the avg price and size:
|
||||
# https://api.ibkr.com/lib/cstools/faq/web1/index.html#/tag/DTWS_ADJ_AVG_COST
|
||||
log.warning(f'Skipping ID-less ledger entry:\n{pformat(entry)}')
|
||||
continue
|
||||
|
||||
trades_by_account.setdefault(
|
||||
acctid, {}
|
||||
)[tid] = entry
|
||||
|
||||
for acctid in trades_by_account:
|
||||
trades_by_account[acctid] = dict(sorted(
|
||||
trades_by_account[acctid].items(),
|
||||
key=lambda entry: entry[1]['pydatetime'],
|
||||
))
|
||||
|
||||
return trades_by_account
|
||||
|
||||
|
||||
def load_flex_trades(
|
||||
path: str | None = None,
|
||||
|
||||
) -> dict[str, Any]:
|
||||
|
||||
from ib_insync import flexreport, util
|
||||
|
||||
conf = get_config()
|
||||
|
||||
if not path:
|
||||
# load ``brokers.toml`` and try to get the flex
|
||||
# token and query id that must be previously defined
|
||||
# by the user.
|
||||
token = conf.get('flex_token')
|
||||
if not token:
|
||||
raise ValueError(
|
||||
'You must specify a ``flex_token`` field in your'
|
||||
'`brokers.toml` in order load your trade log, see our'
|
||||
'intructions for how to set this up here:\n'
|
||||
'PUT LINK HERE!'
|
||||
)
|
||||
|
||||
qid = conf['flex_trades_query_id']
|
||||
|
||||
# TODO: hack this into our logging
|
||||
# system like we do with the API client..
|
||||
util.logToConsole()
|
||||
|
||||
# TODO: rewrite the query part of this with async..httpx?
|
||||
report = flexreport.FlexReport(
|
||||
token=token,
|
||||
queryId=qid,
|
||||
)
|
||||
|
||||
else:
|
||||
# XXX: another project we could potentially look at,
|
||||
# https://pypi.org/project/ibflex/
|
||||
report = flexreport.FlexReport(path=path)
|
||||
|
||||
trade_entries = report.extract('Trade')
|
||||
ln = len(trade_entries)
|
||||
log.info(f'Loaded {ln} trades from flex query')
|
||||
|
||||
trades_by_account = flex_records_to_ledger_entries(
|
||||
conf['accounts'].inverse, # reverse map to user account names
|
||||
trade_entries,
|
||||
)
|
||||
|
||||
ledger_dict: dict | None = None
|
||||
|
||||
for acctid in trades_by_account:
|
||||
trades_by_id = trades_by_account[acctid]
|
||||
|
||||
with open_trade_ledger(
|
||||
'ib',
|
||||
acctid,
|
||||
allow_from_sync_code=True,
|
||||
) as ledger_dict:
|
||||
tid_delta = set(trades_by_id) - set(ledger_dict)
|
||||
log.info(
|
||||
'New trades detected\n'
|
||||
f'{pformat(tid_delta)}'
|
||||
)
|
||||
if tid_delta:
|
||||
sorted_delta = dict(sorted(
|
||||
{tid: trades_by_id[tid] for tid in tid_delta}.items(),
|
||||
key=lambda entry: entry[1].pop('pydatetime'),
|
||||
))
|
||||
ledger_dict.update(sorted_delta)
|
||||
|
||||
return ledger_dict
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
import os
|
||||
|
||||
args = sys.argv
|
||||
if len(args) > 1:
|
||||
args = args[1:]
|
||||
for arg in args:
|
||||
path = os.path.abspath(arg)
|
||||
load_flex_trades(path=path)
|
||||
else:
|
||||
# expect brokers.toml to have an entry and
|
||||
# pull from the web service.
|
||||
load_flex_trades()
|
|
@ -0,0 +1,269 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
``ib`` utilities and hacks suitable for use in the backend and/or as
|
||||
runnable script-programs.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from functools import partial
|
||||
from typing import (
|
||||
Literal,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
import subprocess
|
||||
|
||||
import tractor
|
||||
|
||||
from piker.brokers._util import get_logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .api import Client
|
||||
from ib_insync import IB
|
||||
|
||||
log = get_logger('piker.brokers.ib')
|
||||
|
||||
_reset_tech: Literal[
|
||||
'vnc',
|
||||
'i3ipc_xdotool',
|
||||
|
||||
# TODO: in theory we can use a different linux DE API or
|
||||
# some other type of similar window scanning/mgmt client
|
||||
# (on other OSs) to do the same.
|
||||
|
||||
] = 'vnc'
|
||||
|
||||
|
||||
async def data_reset_hack(
|
||||
# vnc_host: str,
|
||||
client: Client,
|
||||
reset_type: Literal['data', 'connection'],
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Run key combos for resetting data feeds and yield back to caller
|
||||
when complete.
|
||||
|
||||
NOTE: this is a linux-only hack around!
|
||||
|
||||
There are multiple "techs" you can use depending on your infra setup:
|
||||
|
||||
- if running ib-gw in a container with a VNC server running the most
|
||||
performant method is the `'vnc'` option.
|
||||
|
||||
- if running ib-gw/tws locally, and you are using `i3` you can use
|
||||
the ``i3ipc`` lib and ``xdotool`` to send the appropriate click
|
||||
and key-combos automatically to your local desktop's java X-apps.
|
||||
|
||||
https://interactivebrokers.github.io/tws-api/historical_limitations.html#pacing_violations
|
||||
|
||||
TODOs:
|
||||
- a return type that hopefully determines if the hack was
|
||||
successful.
|
||||
- other OS support?
|
||||
- integration with ``ib-gw`` run in docker + Xorg?
|
||||
- is it possible to offer a local server that can be accessed by
|
||||
a client? Would be sure be handy for running native java blobs
|
||||
that need to be wrangle.
|
||||
|
||||
'''
|
||||
ib_client: IB = client.ib
|
||||
|
||||
# look up any user defined vnc socket address mapped from
|
||||
# a particular API socket port.
|
||||
api_port: str = str(ib_client.client.port)
|
||||
vnc_host: str
|
||||
vnc_port: int
|
||||
vnc_sockaddr: tuple[str] | None = client.conf.get('vnc_addrs')
|
||||
|
||||
no_setup_msg:str = (
|
||||
f'No data reset hack test setup for {vnc_sockaddr}!\n'
|
||||
'See config setup tips @\n'
|
||||
'https://github.com/pikers/piker/tree/master/piker/brokers/ib'
|
||||
)
|
||||
|
||||
if not vnc_sockaddr:
|
||||
log.warning(
|
||||
no_setup_msg
|
||||
+
|
||||
f'REQUIRES A `vnc_addrs: array` ENTRY'
|
||||
)
|
||||
|
||||
vnc_host, vnc_port = vnc_sockaddr.get(
|
||||
api_port,
|
||||
('localhost', 3003)
|
||||
)
|
||||
global _reset_tech
|
||||
|
||||
match _reset_tech:
|
||||
case 'vnc':
|
||||
try:
|
||||
await tractor.to_asyncio.run_task(
|
||||
partial(
|
||||
vnc_click_hack,
|
||||
host=vnc_host,
|
||||
port=vnc_port,
|
||||
)
|
||||
)
|
||||
except OSError:
|
||||
if vnc_host != 'localhost':
|
||||
log.warning(no_setup_msg)
|
||||
return False
|
||||
|
||||
try:
|
||||
import i3ipc # noqa (since a deps dynamic check)
|
||||
except ModuleNotFoundError:
|
||||
log.warning(no_setup_msg)
|
||||
return False
|
||||
|
||||
try:
|
||||
i3ipc_xdotool_manual_click_hack()
|
||||
_reset_tech = 'i3ipc_xdotool'
|
||||
return True
|
||||
except OSError:
|
||||
log.exception(no_setup_msg)
|
||||
return False
|
||||
|
||||
case 'i3ipc_xdotool':
|
||||
i3ipc_xdotool_manual_click_hack()
|
||||
|
||||
case _ as tech:
|
||||
raise RuntimeError(f'{tech} is not supported for reset tech!?')
|
||||
|
||||
# we don't really need the ``xdotool`` approach any more B)
|
||||
return True
|
||||
|
||||
|
||||
async def vnc_click_hack(
|
||||
host: str,
|
||||
port: int,
|
||||
reset_type: str = 'data'
|
||||
) -> None:
|
||||
'''
|
||||
Reset the data or network connection for the VNC attached
|
||||
ib gateway using magic combos.
|
||||
|
||||
'''
|
||||
try:
|
||||
import asyncvnc
|
||||
except ModuleNotFoundError:
|
||||
log.warning(
|
||||
"In order to leverage `piker`'s built-in data reset hacks, install "
|
||||
"the `asyncvnc` project: https://github.com/barneygale/asyncvnc"
|
||||
)
|
||||
return
|
||||
|
||||
# two different hot keys which trigger diff types of reset
|
||||
# requests B)
|
||||
key = {
|
||||
'data': 'f',
|
||||
'connection': 'r'
|
||||
}[reset_type]
|
||||
|
||||
async with asyncvnc.connect(
|
||||
host,
|
||||
port=port,
|
||||
|
||||
# TODO: doesn't work see:
|
||||
# https://github.com/barneygale/asyncvnc/issues/7
|
||||
# password='ibcansmbz',
|
||||
|
||||
) as client:
|
||||
|
||||
# move to middle of screen
|
||||
# 640x1800
|
||||
client.mouse.move(
|
||||
x=500,
|
||||
y=500,
|
||||
)
|
||||
client.mouse.click()
|
||||
client.keyboard.press('Ctrl', 'Alt', key) # keys are stacked
|
||||
|
||||
|
||||
def i3ipc_xdotool_manual_click_hack() -> None:
|
||||
'''
|
||||
Do the data reset hack but expecting a local X-window using `xdotool`.
|
||||
|
||||
'''
|
||||
import i3ipc
|
||||
i3 = i3ipc.Connection()
|
||||
|
||||
# TODO: might be worth offering some kinda api for grabbing
|
||||
# the window id from the pid?
|
||||
# https://stackoverflow.com/a/2250879
|
||||
t = i3.get_tree()
|
||||
|
||||
orig_win_id = t.find_focused().window
|
||||
|
||||
# for tws
|
||||
win_names: list[str] = [
|
||||
'Interactive Brokers', # tws running in i3
|
||||
'IB Gateway', # gw running in i3
|
||||
# 'IB', # gw running in i3 (newer version?)
|
||||
]
|
||||
|
||||
try:
|
||||
for name in win_names:
|
||||
results = t.find_titled(name)
|
||||
print(f'results for {name}: {results}')
|
||||
if results:
|
||||
con = results[0]
|
||||
print(f'Resetting data feed for {name}')
|
||||
win_id = str(con.window)
|
||||
w, h = con.rect.width, con.rect.height
|
||||
|
||||
# TODO: seems to be a few libs for python but not sure
|
||||
# if they support all the sub commands we need, order of
|
||||
# most recent commit history:
|
||||
# https://github.com/rr-/pyxdotool
|
||||
# https://github.com/ShaneHutter/pyxdotool
|
||||
# https://github.com/cphyc/pyxdotool
|
||||
|
||||
# TODO: only run the reconnect (2nd) kc on a detected
|
||||
# disconnect?
|
||||
for key_combo, timeout in [
|
||||
# only required if we need a connection reset.
|
||||
# ('ctrl+alt+r', 12),
|
||||
# data feed reset.
|
||||
('ctrl+alt+f', 6)
|
||||
]:
|
||||
subprocess.call([
|
||||
'xdotool',
|
||||
'windowactivate', '--sync', win_id,
|
||||
|
||||
# move mouse to bottom left of window (where
|
||||
# there should be nothing to click).
|
||||
'mousemove_relative', '--sync', str(w-4), str(h-4),
|
||||
|
||||
# NOTE: we may need to stick a `--retry 3` in here..
|
||||
'click', '--window', win_id,
|
||||
'--repeat', '3', '1',
|
||||
|
||||
# hackzorzes
|
||||
'key', key_combo,
|
||||
],
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# re-activate and focus original window
|
||||
subprocess.call([
|
||||
'xdotool',
|
||||
'windowactivate', '--sync', str(orig_win_id),
|
||||
'click', '--window', str(orig_win_id), '1',
|
||||
])
|
||||
except subprocess.TimeoutExpired:
|
||||
log.exception('xdotool timed out?')
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,495 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Trade transaction accounting and normalization.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from bisect import insort
|
||||
from dataclasses import asdict
|
||||
from decimal import Decimal
|
||||
from functools import partial
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
from bidict import bidict
|
||||
import pendulum
|
||||
from ib_insync import (
|
||||
Contract,
|
||||
Commodity,
|
||||
Fill,
|
||||
Execution,
|
||||
CommissionReport,
|
||||
)
|
||||
|
||||
from piker.types import Struct
|
||||
from piker.data import (
|
||||
SymbologyCache,
|
||||
)
|
||||
from piker.accounting import (
|
||||
Asset,
|
||||
dec_digits,
|
||||
digits_to_dec,
|
||||
Transaction,
|
||||
MktPair,
|
||||
iter_by_dt,
|
||||
)
|
||||
from ._flex_reports import parse_flex_dt
|
||||
from ._util import log
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .api import (
|
||||
Client,
|
||||
MethodProxy,
|
||||
)
|
||||
|
||||
|
||||
tx_sort: Callable = partial(
|
||||
iter_by_dt,
|
||||
parsers={
|
||||
'dateTime': parse_flex_dt,
|
||||
'datetime': pendulum.parse,
|
||||
# for some some fucking 2022 and
|
||||
# back options records...fuck me.
|
||||
'date': pendulum.parse,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def norm_trade(
|
||||
tid: str,
|
||||
record: dict[str, Any],
|
||||
|
||||
# this is the dict that was returned from
|
||||
# `Client.get_mkt_pairs()` and when running offline ledger
|
||||
# processing from `.accounting`, this will be the table loaded
|
||||
# into `SymbologyCache.pairs`.
|
||||
pairs: dict[str, Struct],
|
||||
symcache: SymbologyCache | None = None,
|
||||
|
||||
) -> Transaction | None:
|
||||
|
||||
conid: int = str(record.get('conId') or record['conid'])
|
||||
bs_mktid: str = str(conid)
|
||||
comms = record.get('commission')
|
||||
if comms is None:
|
||||
comms = -1*record['ibCommission']
|
||||
|
||||
price = record.get('price') or record['tradePrice']
|
||||
|
||||
# the api doesn't do the -/+ on the quantity for you but flex
|
||||
# records do.. are you fucking serious ib...!?
|
||||
size = record.get('quantity') or record['shares'] * {
|
||||
'BOT': 1,
|
||||
'SLD': -1,
|
||||
}[record['side']]
|
||||
|
||||
symbol: str = record['symbol']
|
||||
exch: str = (
|
||||
record.get('listingExchange')
|
||||
or record.get('primaryExchange')
|
||||
or record['exchange']
|
||||
)
|
||||
|
||||
# NOTE: remove null values since `tomlkit` can't serialize
|
||||
# them to file.
|
||||
if dnc := record.pop('deltaNeutralContract', None):
|
||||
record['deltaNeutralContract'] = dnc
|
||||
|
||||
# likely an opts contract record from a flex report..
|
||||
# TODO: no idea how to parse ^ the strike part from flex..
|
||||
# (00010000 any, or 00007500 tsla, ..)
|
||||
# we probably must do the contract lookup for this?
|
||||
if (
|
||||
' ' in symbol
|
||||
or '--' in exch
|
||||
):
|
||||
underlying, _, tail = symbol.partition(' ')
|
||||
exch: str = 'opt'
|
||||
expiry: str = tail[:6]
|
||||
# otype = tail[6]
|
||||
# strike = tail[7:]
|
||||
|
||||
print(f'skipping opts contract {symbol}')
|
||||
return None
|
||||
|
||||
# timestamping is way different in API records
|
||||
dtstr = record.get('datetime')
|
||||
date = record.get('date')
|
||||
flex_dtstr = record.get('dateTime')
|
||||
|
||||
if dtstr or date:
|
||||
dt = pendulum.parse(dtstr or date)
|
||||
|
||||
elif flex_dtstr:
|
||||
# probably a flex record with a wonky non-std timestamp..
|
||||
dt = parse_flex_dt(record['dateTime'])
|
||||
|
||||
# special handling of symbol extraction from
|
||||
# flex records using some ad-hoc schema parsing.
|
||||
asset_type: str = record.get(
|
||||
'assetCategory'
|
||||
) or record.get('secType', 'STK')
|
||||
|
||||
if (expiry := (
|
||||
record.get('lastTradeDateOrContractMonth')
|
||||
or record.get('expiry')
|
||||
)
|
||||
):
|
||||
expiry: str = str(expiry).strip(' ')
|
||||
# NOTE: we directly use the (simple and usually short)
|
||||
# date-string expiry token when packing the `MktPair`
|
||||
# since we want the fqme to contain *that* token.
|
||||
# It might make sense later to instead parse and then
|
||||
# render different output str format(s) for this same
|
||||
# purpose depending on asset-type-market down the road.
|
||||
# Eg. for derivs we use the short token only for fqme
|
||||
# but use the isoformat('T') for transactions and
|
||||
# account file position entries?
|
||||
# dt_str: str = pendulum.parse(expiry).isoformat('T')
|
||||
|
||||
# XXX: pretty much all legacy market assets have a fiat
|
||||
# currency (denomination) determined by their venue.
|
||||
currency: str = record['currency']
|
||||
src = Asset(
|
||||
name=currency.lower(),
|
||||
atype='fiat',
|
||||
tx_tick=Decimal('0.01'),
|
||||
)
|
||||
|
||||
match asset_type:
|
||||
case 'FUT':
|
||||
# XXX (flex) ledger entries don't necessarily have any
|
||||
# simple 3-char key.. sometimes the .symbol is some
|
||||
# weird internal key that we probably don't want in the
|
||||
# .fqme => we should probably just wrap `Contract` to
|
||||
# this like we do other crypto$ backends XD
|
||||
|
||||
# NOTE: at least older FLEX records should have
|
||||
# this field.. no idea about API entries..
|
||||
local_symbol: str | None = record.get('localSymbol')
|
||||
underlying_key: str = record.get('underlyingSymbol')
|
||||
descr: str | None = record.get('description')
|
||||
|
||||
if (
|
||||
not (
|
||||
local_symbol
|
||||
and symbol in local_symbol
|
||||
)
|
||||
and (
|
||||
descr
|
||||
and symbol not in descr
|
||||
)
|
||||
):
|
||||
con_key, exp_str = descr.split(' ')
|
||||
symbol: str = underlying_key or con_key
|
||||
|
||||
dst = Asset(
|
||||
name=symbol.lower(),
|
||||
atype='future',
|
||||
tx_tick=Decimal('1'),
|
||||
)
|
||||
|
||||
case 'STK':
|
||||
dst = Asset(
|
||||
name=symbol.lower(),
|
||||
atype='stock',
|
||||
tx_tick=Decimal('1'),
|
||||
)
|
||||
|
||||
case 'CASH':
|
||||
if currency not in symbol:
|
||||
# likely a dict-casted `Forex` contract which
|
||||
# has .symbol as the dst and .currency as the
|
||||
# src.
|
||||
name: str = symbol.lower()
|
||||
else:
|
||||
# likely a flex-report record which puts
|
||||
# EUR.USD as the symbol field and just USD in
|
||||
# the currency field.
|
||||
name: str = symbol.lower().replace(f'.{src.name}', '')
|
||||
|
||||
dst = Asset(
|
||||
name=name,
|
||||
atype='fiat',
|
||||
tx_tick=Decimal('0.01'),
|
||||
)
|
||||
|
||||
case 'OPT':
|
||||
dst = Asset(
|
||||
name=symbol.lower(),
|
||||
atype='option',
|
||||
tx_tick=Decimal('1'),
|
||||
|
||||
# TODO: we should probably always cast to the
|
||||
# `Contract` instance then dict-serialize that for
|
||||
# the `.info` field!
|
||||
# info=asdict(Option()),
|
||||
)
|
||||
|
||||
case 'CMDTY':
|
||||
from .symbols import _adhoc_symbol_map
|
||||
con_kwargs, _ = _adhoc_symbol_map[symbol.upper()]
|
||||
dst = Asset(
|
||||
name=symbol.lower(),
|
||||
atype='commodity',
|
||||
tx_tick=Decimal('1'),
|
||||
info=asdict(Commodity(**con_kwargs)),
|
||||
)
|
||||
|
||||
# try to build out piker fqme from record.
|
||||
# src: str = record['currency']
|
||||
price_tick: Decimal = digits_to_dec(dec_digits(price))
|
||||
|
||||
# NOTE: can't serlialize `tomlkit.String` so cast to native
|
||||
atype: str = str(dst.atype)
|
||||
|
||||
# if not (mkt := symcache.mktmaps.get(bs_mktid)):
|
||||
mkt = MktPair(
|
||||
bs_mktid=bs_mktid,
|
||||
dst=dst,
|
||||
|
||||
price_tick=price_tick,
|
||||
# NOTE: for "legacy" assets, volume is normally discreet, not
|
||||
# a float, but we keep a digit in case the suitz decide
|
||||
# to get crazy and change it; we'll be kinda ready
|
||||
# schema-wise..
|
||||
size_tick=Decimal('1'),
|
||||
|
||||
src=src, # XXX: normally always a fiat
|
||||
|
||||
_atype=atype,
|
||||
|
||||
venue=exch,
|
||||
expiry=expiry,
|
||||
broker='ib',
|
||||
|
||||
_fqme_without_src=(atype != 'fiat'),
|
||||
)
|
||||
|
||||
fqme: str = mkt.fqme
|
||||
|
||||
# XXX: if passed in, we fill out the symcache ad-hoc in order
|
||||
# to make downstream accounting work..
|
||||
if symcache is not None:
|
||||
orig_mkt: MktPair | None = symcache.mktmaps.get(bs_mktid)
|
||||
if (
|
||||
orig_mkt
|
||||
and orig_mkt.fqme != mkt.fqme
|
||||
):
|
||||
log.warning(
|
||||
# print(
|
||||
f'Contracts with common `conId`: {bs_mktid} mismatch..\n'
|
||||
f'{orig_mkt.fqme} -> {mkt.fqme}\n'
|
||||
# 'with DIFF:\n'
|
||||
# f'{mkt - orig_mkt}'
|
||||
)
|
||||
|
||||
symcache.mktmaps[bs_mktid] = mkt
|
||||
symcache.mktmaps[fqme] = mkt
|
||||
symcache.assets[src.name] = src
|
||||
symcache.assets[dst.name] = dst
|
||||
|
||||
# NOTE: for flex records the normal fields for defining an fqme
|
||||
# sometimes won't be available so we rely on two approaches for
|
||||
# the "reverse lookup" of piker style fqme keys:
|
||||
# - when dealing with API trade records received from
|
||||
# `IB.trades()` we do a contract lookup at he time of processing
|
||||
# - when dealing with flex records, it is assumed the record
|
||||
# is at least a day old and thus the TWS position reporting system
|
||||
# should already have entries if the pps are still open, in
|
||||
# which case, we can pull the fqme from that table (see
|
||||
# `trades_dialogue()` above).
|
||||
return Transaction(
|
||||
fqme=fqme,
|
||||
tid=tid,
|
||||
size=size,
|
||||
price=price,
|
||||
cost=comms,
|
||||
dt=dt,
|
||||
expiry=expiry,
|
||||
bs_mktid=str(conid),
|
||||
)
|
||||
|
||||
|
||||
|
||||
def norm_trade_records(
|
||||
ledger: dict[str, Any],
|
||||
symcache: SymbologyCache | None = None,
|
||||
|
||||
) -> dict[str, Transaction]:
|
||||
'''
|
||||
Normalize (xml) flex-report or (recent) API trade records into
|
||||
our ledger format with parsing for `MktPair` and `Asset`
|
||||
extraction to fill in the `Transaction.sys: MktPair` field.
|
||||
|
||||
'''
|
||||
records: list[Transaction] = []
|
||||
for tid, record in ledger.items():
|
||||
|
||||
txn = norm_trade(
|
||||
tid,
|
||||
record,
|
||||
|
||||
# NOTE: currently no symcache support
|
||||
pairs={},
|
||||
symcache=symcache,
|
||||
)
|
||||
|
||||
if txn is None:
|
||||
continue
|
||||
|
||||
insort(
|
||||
records,
|
||||
txn,
|
||||
key=lambda t: t.dt
|
||||
)
|
||||
|
||||
return {r.tid: r for r in records}
|
||||
|
||||
|
||||
def api_trades_to_ledger_entries(
|
||||
accounts: bidict[str, str],
|
||||
fills: list[Fill],
|
||||
|
||||
) -> dict[str, dict]:
|
||||
'''
|
||||
Convert API execution objects entry objects into
|
||||
flattened-``dict`` form, pretty much straight up without
|
||||
modification except add a `pydatetime` field from the parsed
|
||||
timestamp so that on write
|
||||
|
||||
'''
|
||||
trades_by_account: dict[str, dict] = {}
|
||||
for fill in fills:
|
||||
|
||||
# NOTE: for the schema, see the defn for `Fill` which is
|
||||
# a `NamedTuple` subtype
|
||||
fdict: dict = fill._asdict()
|
||||
|
||||
# flatten all (sub-)objects and convert to dicts.
|
||||
# with values packed into one top level entry.
|
||||
val: CommissionReport | Execution | Contract
|
||||
txn_dict: dict[str, Any] = {}
|
||||
for attr_name, val in fdict.items():
|
||||
match attr_name:
|
||||
# value is a `@dataclass` subtype
|
||||
case 'contract' | 'execution' | 'commissionReport':
|
||||
txn_dict.update(asdict(val))
|
||||
|
||||
case 'time':
|
||||
# ib has wack ns timestamps, or is that us?
|
||||
continue
|
||||
|
||||
# TODO: we can remove this case right since there's
|
||||
# only 4 fields on a `Fill`?
|
||||
case _:
|
||||
txn_dict[attr_name] = val
|
||||
|
||||
tid = str(txn_dict['execId'])
|
||||
dt = pendulum.from_timestamp(txn_dict['time'])
|
||||
txn_dict['datetime'] = str(dt)
|
||||
acctid = accounts[txn_dict['acctNumber']]
|
||||
|
||||
# NOTE: only inserted (then later popped) for sorting below!
|
||||
txn_dict['pydatetime'] = dt
|
||||
|
||||
if not tid:
|
||||
# this is likely some kind of internal adjustment
|
||||
# transaction, likely one of the following:
|
||||
# - an expiry event that will show a "book trade" indicating
|
||||
# some adjustment to cash balances: zeroing or itm settle.
|
||||
# - a manual cash balance position adjustment likely done by
|
||||
# the user from the accounts window in TWS where they can
|
||||
# manually set the avg price and size:
|
||||
# https://api.ibkr.com/lib/cstools/faq/web1/index.html#/tag/DTWS_ADJ_AVG_COST
|
||||
log.warning(
|
||||
'Skipping ID-less ledger txn_dict:\n'
|
||||
f'{pformat(txn_dict)}'
|
||||
)
|
||||
continue
|
||||
|
||||
trades_by_account.setdefault(
|
||||
acctid, {}
|
||||
)[tid] = txn_dict
|
||||
|
||||
# TODO: maybe we should just bisect.insort() into a list of
|
||||
# tuples and then return a dict of that?
|
||||
# sort entries in output by python based datetime
|
||||
for acctid in trades_by_account:
|
||||
trades_by_account[acctid] = dict(sorted(
|
||||
trades_by_account[acctid].items(),
|
||||
key=lambda entry: entry[1].pop('pydatetime'),
|
||||
))
|
||||
|
||||
return trades_by_account
|
||||
|
||||
|
||||
async def update_ledger_from_api_trades(
|
||||
fills: list[Fill],
|
||||
client: Client | MethodProxy,
|
||||
accounts_def_inv: bidict[str, str],
|
||||
|
||||
# NOTE: provided for ad-hoc insertions "as transactions are
|
||||
# processed" -> see `norm_trade()` signature requirements.
|
||||
symcache: SymbologyCache | None = None,
|
||||
|
||||
) -> tuple[
|
||||
dict[str, Transaction],
|
||||
dict[str, dict],
|
||||
]:
|
||||
# XXX; ERRGGG..
|
||||
# pack in the "primary/listing exchange" value from a
|
||||
# contract lookup since it seems this isn't available by
|
||||
# default from the `.fills()` method endpoint...
|
||||
fill: Fill
|
||||
for fill in fills:
|
||||
con: Contract = fill.contract
|
||||
conid: str = con.conId
|
||||
pexch: str | None = con.primaryExchange
|
||||
|
||||
if not pexch:
|
||||
cons = await client.get_con(conid=conid)
|
||||
if cons:
|
||||
con = cons[0]
|
||||
pexch = con.primaryExchange or con.exchange
|
||||
else:
|
||||
# for futes it seems like the primary is always empty?
|
||||
pexch: str = con.exchange
|
||||
|
||||
# pack in the ``Contract.secType``
|
||||
# entry['asset_type'] = condict['secType']
|
||||
|
||||
entries: dict[str, dict] = api_trades_to_ledger_entries(
|
||||
accounts_def_inv,
|
||||
fills,
|
||||
)
|
||||
# normalize recent session's trades to the `Transaction` type
|
||||
trans_by_acct: dict[str, dict[str, Transaction]] = {}
|
||||
|
||||
for acctid, trades_by_id in entries.items():
|
||||
# normalize to transaction form
|
||||
trans_by_acct[acctid] = norm_trade_records(
|
||||
trades_by_id,
|
||||
symcache=symcache,
|
||||
)
|
||||
|
||||
return trans_by_acct, entries
|
|
@ -0,0 +1,598 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Symbology search and normalization.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from contextlib import (
|
||||
nullcontext,
|
||||
)
|
||||
from decimal import Decimal
|
||||
import time
|
||||
from typing import (
|
||||
Awaitable,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
from rapidfuzz import process as fuzzy
|
||||
import ib_insync as ibis
|
||||
import tractor
|
||||
import trio
|
||||
|
||||
from piker.accounting import (
|
||||
Asset,
|
||||
MktPair,
|
||||
unpack_fqme,
|
||||
)
|
||||
from piker._cacheables import (
|
||||
async_lifo_cache,
|
||||
)
|
||||
|
||||
from ._util import (
|
||||
log,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .api import (
|
||||
MethodProxy,
|
||||
Client,
|
||||
)
|
||||
|
||||
_futes_venues = (
|
||||
'GLOBEX',
|
||||
'NYMEX',
|
||||
'CME',
|
||||
'CMECRYPTO',
|
||||
'COMEX',
|
||||
# 'CMDTY', # special name case..
|
||||
'CBOT', # (treasury) yield futures
|
||||
)
|
||||
|
||||
_adhoc_cmdty_set = {
|
||||
# metals
|
||||
# https://misc.interactivebrokers.com/cstools/contract_info/v3.10/index.php?action=Conid%20Info&wlId=IB&conid=69067924
|
||||
'xauusd.cmdty', # london gold spot ^
|
||||
'xagusd.cmdty', # silver spot
|
||||
}
|
||||
|
||||
# NOTE: if you aren't seeing one of these symbol's futues contracts
|
||||
# show up, it's likely the `.<venue>` part is wrong!
|
||||
_adhoc_futes_set = {
|
||||
|
||||
# equities
|
||||
'nq.cme',
|
||||
'mnq.cme', # micro
|
||||
|
||||
'es.cme',
|
||||
'mes.cme', # micro
|
||||
|
||||
# cypto$
|
||||
'brr.cme',
|
||||
'mbt.cme', # micro
|
||||
'ethusdrr.cme',
|
||||
|
||||
# agriculture
|
||||
'he.comex', # lean hogs
|
||||
'le.comex', # live cattle (geezers)
|
||||
'gf.comex', # feeder cattle (younguns)
|
||||
|
||||
# raw
|
||||
'lb.comex', # random len lumber
|
||||
|
||||
'gc.comex',
|
||||
'mgc.comex', # micro
|
||||
|
||||
# oil & gas
|
||||
'cl.nymex',
|
||||
|
||||
'ni.comex', # silver futes
|
||||
'qi.comex', # mini-silver futes
|
||||
|
||||
# treasury yields
|
||||
# etfs by duration:
|
||||
# SHY -> IEI -> IEF -> TLT
|
||||
'zt.cbot', # 2y
|
||||
'z3n.cbot', # 3y
|
||||
'zf.cbot', # 5y
|
||||
'zn.cbot', # 10y
|
||||
'zb.cbot', # 30y
|
||||
|
||||
# (micros of above)
|
||||
'2yy.cbot',
|
||||
'5yy.cbot',
|
||||
'10y.cbot',
|
||||
'30y.cbot',
|
||||
}
|
||||
|
||||
|
||||
# taken from list here:
|
||||
# https://www.interactivebrokers.com/en/trading/products-spot-currencies.php
|
||||
_adhoc_fiat_set = set((
|
||||
'USD, AED, AUD, CAD,'
|
||||
'CHF, CNH, CZK, DKK,'
|
||||
'EUR, GBP, HKD, HUF,'
|
||||
'ILS, JPY, MXN, NOK,'
|
||||
'NZD, PLN, RUB, SAR,'
|
||||
'SEK, SGD, TRY, ZAR'
|
||||
).split(' ,')
|
||||
)
|
||||
|
||||
# manually discovered tick discrepancies,
|
||||
# onl god knows how or why they'd cuck these up..
|
||||
_adhoc_mkt_infos: dict[int | str, dict] = {
|
||||
'vtgn.nasdaq': {'price_tick': Decimal('0.01')},
|
||||
}
|
||||
|
||||
|
||||
# map of symbols to contract ids
|
||||
_adhoc_symbol_map = {
|
||||
# https://misc.interactivebrokers.com/cstools/contract_info/v3.10/index.php?action=Conid%20Info&wlId=IB&conid=69067924
|
||||
|
||||
# NOTE: some cmdtys/metals don't have trade data like gold/usd:
|
||||
# https://groups.io/g/twsapi/message/44174
|
||||
'XAUUSD': ({'conId': 69067924}, {'whatToShow': 'MIDPOINT'}),
|
||||
}
|
||||
for qsn in _adhoc_futes_set:
|
||||
sym, venue = qsn.split('.')
|
||||
assert venue.upper() in _futes_venues, f'{venue}'
|
||||
_adhoc_symbol_map[sym.upper()] = (
|
||||
{'exchange': venue},
|
||||
{},
|
||||
)
|
||||
|
||||
|
||||
# exchanges we don't support at the moment due to not knowing
|
||||
# how to do symbol-contract lookup correctly likely due
|
||||
# to not having the data feeds subscribed.
|
||||
_exch_skip_list = {
|
||||
|
||||
'ASX', # aussie stocks
|
||||
'MEXI', # mexican stocks
|
||||
|
||||
# no idea
|
||||
'NSE',
|
||||
'VALUE',
|
||||
'FUNDSERV',
|
||||
'SWB2',
|
||||
'PSE',
|
||||
'PHLX',
|
||||
}
|
||||
|
||||
# optional search config the backend can register for
|
||||
# it's symbol search handling (in this case we avoid
|
||||
# accepting patterns before the kb has settled more then
|
||||
# a quarter second).
|
||||
_search_conf = {
|
||||
'pause_period': 6 / 16,
|
||||
}
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_symbol_search(ctx: tractor.Context) -> None:
|
||||
'''
|
||||
Symbology search brokerd-endpoint.
|
||||
|
||||
'''
|
||||
from .api import open_client_proxies
|
||||
from .feed import open_data_client
|
||||
|
||||
# TODO: load user defined symbol set locally for fast search?
|
||||
await ctx.started({})
|
||||
|
||||
async with (
|
||||
open_client_proxies() as (proxies, _),
|
||||
open_data_client() as data_proxy,
|
||||
):
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
# select a non-history client for symbol search to lighten
|
||||
# the load in the main data node.
|
||||
proxy = data_proxy
|
||||
for name, proxy in proxies.items():
|
||||
if proxy is data_proxy:
|
||||
continue
|
||||
break
|
||||
|
||||
ib_client = proxy._aio_ns.ib
|
||||
log.info(f'Using {ib_client} for symbol search')
|
||||
|
||||
last = time.time()
|
||||
async for pattern in stream:
|
||||
log.info(f'received {pattern}')
|
||||
now: float = time.time()
|
||||
|
||||
# this causes tractor hang...
|
||||
# assert 0
|
||||
|
||||
assert pattern, 'IB can not accept blank search pattern'
|
||||
|
||||
# throttle search requests to no faster then 1Hz
|
||||
diff = now - last
|
||||
if diff < 1.0:
|
||||
log.debug('throttle sleeping')
|
||||
await trio.sleep(diff)
|
||||
try:
|
||||
pattern = stream.receive_nowait()
|
||||
except trio.WouldBlock:
|
||||
pass
|
||||
|
||||
if (
|
||||
not pattern
|
||||
or pattern.isspace()
|
||||
|
||||
# XXX: not sure if this is a bad assumption but it
|
||||
# seems to make search snappier?
|
||||
or len(pattern) < 1
|
||||
):
|
||||
log.warning('empty pattern received, skipping..')
|
||||
|
||||
# TODO: *BUG* if nothing is returned here the client
|
||||
# side will cache a null set result and not showing
|
||||
# anything to the use on re-searches when this query
|
||||
# timed out. We probably need a special "timeout" msg
|
||||
# or something...
|
||||
|
||||
# XXX: this unblocks the far end search task which may
|
||||
# hold up a multi-search nursery block
|
||||
await stream.send({})
|
||||
|
||||
continue
|
||||
|
||||
log.info(f'searching for {pattern}')
|
||||
|
||||
last = time.time()
|
||||
|
||||
# async batch search using api stocks endpoint and module
|
||||
# defined adhoc symbol set.
|
||||
stock_results = []
|
||||
|
||||
async def extend_results(
|
||||
target: Awaitable[list]
|
||||
) -> None:
|
||||
try:
|
||||
results = await target
|
||||
except tractor.trionics.Lagged:
|
||||
print("IB SYM-SEARCH OVERRUN?!?")
|
||||
return
|
||||
|
||||
stock_results.extend(results)
|
||||
|
||||
for _ in range(10):
|
||||
with trio.move_on_after(3) as cs:
|
||||
async with trio.open_nursery() as sn:
|
||||
sn.start_soon(
|
||||
extend_results,
|
||||
proxy.search_symbols(
|
||||
pattern=pattern,
|
||||
upto=5,
|
||||
),
|
||||
)
|
||||
|
||||
# trigger async request
|
||||
await trio.sleep(0)
|
||||
|
||||
if cs.cancelled_caught:
|
||||
log.warning(
|
||||
f'Search timeout? {proxy._aio_ns.ib.client}'
|
||||
)
|
||||
continue
|
||||
elif stock_results:
|
||||
break
|
||||
# else:
|
||||
await tractor.pause()
|
||||
|
||||
# # match against our ad-hoc set immediately
|
||||
# adhoc_matches = fuzzy.extract(
|
||||
# pattern,
|
||||
# list(_adhoc_futes_set),
|
||||
# score_cutoff=90,
|
||||
# )
|
||||
# log.info(f'fuzzy matched adhocs: {adhoc_matches}')
|
||||
# adhoc_match_results = {}
|
||||
# if adhoc_matches:
|
||||
# # TODO: do we need to pull contract details?
|
||||
# adhoc_match_results = {i[0]: {} for i in
|
||||
# adhoc_matches}
|
||||
|
||||
log.debug(f'fuzzy matching stocks {stock_results}')
|
||||
stock_matches = fuzzy.extract(
|
||||
pattern,
|
||||
stock_results,
|
||||
score_cutoff=50,
|
||||
)
|
||||
|
||||
# matches = adhoc_match_results | {
|
||||
matches = {
|
||||
item[0]: {} for item in stock_matches
|
||||
}
|
||||
# TODO: we used to deliver contract details
|
||||
# {item[2]: item[0] for item in stock_matches}
|
||||
|
||||
log.debug(f"sending matches: {matches.keys()}")
|
||||
await stream.send(matches)
|
||||
|
||||
|
||||
# re-mapping to piker asset type names
|
||||
# https://github.com/erdewit/ib_insync/blob/master/ib_insync/contract.py#L113
|
||||
_asset_type_map = {
|
||||
'STK': 'stock',
|
||||
'OPT': 'option',
|
||||
'FUT': 'future',
|
||||
'CONTFUT': 'continuous_future',
|
||||
'CASH': 'fiat',
|
||||
'IND': 'index',
|
||||
'CFD': 'cfd',
|
||||
'BOND': 'bond',
|
||||
'CMDTY': 'commodity',
|
||||
'FOP': 'futures_option',
|
||||
'FUND': 'mutual_fund',
|
||||
'WAR': 'warrant',
|
||||
'IOPT': 'warran',
|
||||
'BAG': 'bag',
|
||||
'CRYPTO': 'crypto', # bc it's diff then fiat?
|
||||
# 'NEWS': 'news',
|
||||
}
|
||||
|
||||
|
||||
def parse_patt2fqme(
|
||||
# client: Client,
|
||||
pattern: str,
|
||||
|
||||
) -> tuple[str, str, str, str]:
|
||||
|
||||
# TODO: we can't use this currently because
|
||||
# ``wrapper.starTicker()`` currently cashes ticker instances
|
||||
# which means getting a singel quote will potentially look up
|
||||
# a quote for a ticker that it already streaming and thus run
|
||||
# into state clobbering (eg. list: Ticker.ticks). It probably
|
||||
# makes sense to try this once we get the pub-sub working on
|
||||
# individual symbols...
|
||||
|
||||
# XXX UPDATE: we can probably do the tick/trades scraping
|
||||
# inside our eventkit handler instead to bypass this entirely?
|
||||
|
||||
currency = ''
|
||||
|
||||
# fqme parsing stage
|
||||
# ------------------
|
||||
if '.ib' in pattern:
|
||||
_, symbol, venue, expiry = unpack_fqme(pattern)
|
||||
|
||||
else:
|
||||
symbol = pattern
|
||||
expiry = ''
|
||||
|
||||
# # another hack for forex pairs lul.
|
||||
# if (
|
||||
# '.idealpro' in symbol
|
||||
# # or '/' in symbol
|
||||
# ):
|
||||
# exch: str = 'IDEALPRO'
|
||||
# symbol = symbol.removesuffix('.idealpro')
|
||||
# if '/' in symbol:
|
||||
# symbol, currency = symbol.split('/')
|
||||
|
||||
# else:
|
||||
# TODO: yes, a cache..
|
||||
# try:
|
||||
# # give the cache a go
|
||||
# return client._contracts[symbol]
|
||||
# except KeyError:
|
||||
# log.debug(f'Looking up contract for {symbol}')
|
||||
expiry: str = ''
|
||||
if symbol.count('.') > 1:
|
||||
symbol, _, expiry = symbol.rpartition('.')
|
||||
|
||||
# use heuristics to figure out contract "type"
|
||||
symbol, venue = symbol.upper().rsplit('.', maxsplit=1)
|
||||
|
||||
return symbol, currency, venue, expiry
|
||||
|
||||
|
||||
def con2fqme(
|
||||
con: ibis.Contract,
|
||||
_cache: dict[int, (str, bool)] = {}
|
||||
|
||||
) -> tuple[str, bool]:
|
||||
'''
|
||||
Convert contracts to fqme-style strings to be used both in
|
||||
symbol-search matching and as feed tokens passed to the front
|
||||
end data deed layer.
|
||||
|
||||
Previously seen contracts are cached by id.
|
||||
|
||||
'''
|
||||
# should be real volume for this contract by default
|
||||
calc_price: bool = False
|
||||
if con.conId:
|
||||
try:
|
||||
# TODO: LOL so apparently IB just changes the contract
|
||||
# ID (int) on a whim.. so we probably need to use an
|
||||
# FQME style key after all...
|
||||
return _cache[con.conId]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
suffix: str = con.primaryExchange or con.exchange
|
||||
symbol: str = con.symbol
|
||||
expiry: str = con.lastTradeDateOrContractMonth or ''
|
||||
|
||||
match con:
|
||||
case ibis.Option():
|
||||
# TODO: option symbol parsing and sane display:
|
||||
symbol = con.localSymbol.replace(' ', '')
|
||||
|
||||
case (
|
||||
ibis.Commodity()
|
||||
# search API endpoint returns std con box..
|
||||
| ibis.Contract(secType='CMDTY')
|
||||
):
|
||||
# commodities and forex don't have an exchange name and
|
||||
# no real volume so we have to calculate the price
|
||||
suffix = con.secType
|
||||
|
||||
# no real volume on this tract
|
||||
calc_price = True
|
||||
|
||||
case ibis.Forex() | ibis.Contract(secType='CASH'):
|
||||
dst, src = con.localSymbol.split('.')
|
||||
symbol = ''.join([dst, src])
|
||||
suffix = con.exchange or 'idealpro'
|
||||
|
||||
# no real volume on forex feeds..
|
||||
calc_price = True
|
||||
|
||||
if not suffix:
|
||||
entry = _adhoc_symbol_map.get(
|
||||
con.symbol or con.localSymbol
|
||||
)
|
||||
if entry:
|
||||
meta, kwargs = entry
|
||||
cid = meta.get('conId')
|
||||
if cid:
|
||||
assert con.conId == meta['conId']
|
||||
suffix = meta['exchange']
|
||||
|
||||
# append a `.<suffix>` to the returned symbol
|
||||
# key for derivatives that normally is the expiry
|
||||
# date key.
|
||||
if expiry:
|
||||
suffix += f'.{expiry}'
|
||||
|
||||
fqme_key = symbol.lower()
|
||||
if suffix:
|
||||
fqme_key = '.'.join((fqme_key, suffix)).lower()
|
||||
|
||||
_cache[con.conId] = fqme_key, calc_price
|
||||
return fqme_key, calc_price
|
||||
|
||||
|
||||
@async_lifo_cache()
|
||||
async def get_mkt_info(
|
||||
fqme: str,
|
||||
|
||||
proxy: MethodProxy | None = None,
|
||||
|
||||
) -> tuple[MktPair, ibis.ContractDetails]:
|
||||
|
||||
if '.ib' not in fqme:
|
||||
fqme += '.ib'
|
||||
broker, pair, venue, expiry = unpack_fqme(fqme)
|
||||
|
||||
proxy: MethodProxy
|
||||
if proxy is not None:
|
||||
client_ctx = nullcontext(proxy)
|
||||
else:
|
||||
from .feed import (
|
||||
open_data_client,
|
||||
)
|
||||
client_ctx = open_data_client
|
||||
|
||||
async with client_ctx as proxy:
|
||||
try:
|
||||
(
|
||||
con, # Contract
|
||||
details, # ContractDetails
|
||||
) = await proxy.get_sym_details(fqme=fqme)
|
||||
except ConnectionError:
|
||||
log.exception(f'Proxy is ded {proxy._aio_ns}')
|
||||
raise
|
||||
|
||||
# TODO: more consistent field translation
|
||||
atype = _asset_type_map[con.secType]
|
||||
|
||||
if atype == 'commodity':
|
||||
venue: str = 'cmdty'
|
||||
else:
|
||||
venue = con.primaryExchange or con.exchange
|
||||
|
||||
price_tick: Decimal = Decimal(str(details.minTick))
|
||||
# price_tick: Decimal = Decimal('0.01')
|
||||
|
||||
if atype == 'stock':
|
||||
# XXX: GRRRR they don't support fractional share sizes for
|
||||
# stocks from the API?!
|
||||
# if con.secType == 'STK':
|
||||
size_tick = Decimal('1')
|
||||
else:
|
||||
size_tick: Decimal = Decimal(
|
||||
str(details.minSize).rstrip('0')
|
||||
)
|
||||
# |-> TODO: there is also the Contract.sizeIncrement, bt wtf is it?
|
||||
|
||||
# NOTE: this is duplicate from the .broker.norm_trade_records()
|
||||
# routine, we should factor all this parsing somewhere..
|
||||
expiry_str = str(con.lastTradeDateOrContractMonth)
|
||||
# if expiry:
|
||||
# expiry_str: str = str(pendulum.parse(
|
||||
# str(expiry).strip(' ')
|
||||
# ))
|
||||
|
||||
# TODO: currently we can't pass the fiat src asset because
|
||||
# then we'll get a `MNQUSD` request for history data..
|
||||
# we need to figure out how we're going to handle this (later?)
|
||||
# but likely we want all backends to eventually handle
|
||||
# ``dst/src.venue.`` style !?
|
||||
src = Asset(
|
||||
name=str(con.currency).lower(),
|
||||
atype='fiat',
|
||||
tx_tick=Decimal('0.01'), # right?
|
||||
)
|
||||
dst = Asset(
|
||||
name=con.symbol.lower(),
|
||||
atype=atype,
|
||||
tx_tick=size_tick,
|
||||
)
|
||||
|
||||
mkt = MktPair(
|
||||
src=src,
|
||||
dst=dst,
|
||||
|
||||
price_tick=price_tick,
|
||||
size_tick=size_tick,
|
||||
|
||||
bs_mktid=str(con.conId),
|
||||
venue=str(venue),
|
||||
expiry=expiry_str,
|
||||
broker='ib',
|
||||
|
||||
# TODO: options contract info as str?
|
||||
# contract_info=<optionsdetails>
|
||||
_fqme_without_src=(atype != 'fiat'),
|
||||
)
|
||||
|
||||
# just.. wow.
|
||||
if entry := _adhoc_mkt_infos.get(mkt.bs_fqme):
|
||||
log.warning(f'Frickin {mkt.fqme} has an adhoc {entry}..')
|
||||
new = mkt.to_dict()
|
||||
new['price_tick'] = entry['price_tick']
|
||||
new['src'] = src
|
||||
new['dst'] = dst
|
||||
mkt = MktPair(**new)
|
||||
|
||||
# if possible register the bs_mktid to the just-built
|
||||
# mkt so that it can be retreived by order mode tasks later.
|
||||
# TODO NOTE: this is going to be problematic if/when we split
|
||||
# out the datatd vs. brokerd actors since the mktmap lookup
|
||||
# table will now be inaccessible..
|
||||
if proxy is not None:
|
||||
client: Client = proxy._aio_ns
|
||||
client._contracts[mkt.bs_fqme] = con
|
||||
client._cons2mkts[con] = mkt
|
||||
|
||||
return mkt, details
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,64 @@
|
|||
``kraken`` backend
|
||||
------------------
|
||||
though they don't have the most liquidity of all the cexes they sure are
|
||||
accommodating to those of us who appreciate a little ``xmr``.
|
||||
|
||||
status
|
||||
******
|
||||
current support is *production grade* and both real-time data and order
|
||||
management should be correct and fast. this backend is used by core devs
|
||||
for live trading.
|
||||
|
||||
|
||||
config
|
||||
******
|
||||
In order to get order mode support your ``brokers.toml``
|
||||
needs to have something like the following:
|
||||
|
||||
.. code:: toml
|
||||
|
||||
[kraken]
|
||||
accounts.spot = 'spot'
|
||||
key_descr = "spot"
|
||||
api_key = "69696969696969696696969696969696969696969696969696969696"
|
||||
secret = "BOOBSBOOBSBOOBSBOOBSBOOBSSMBZ69696969696969669969696969696"
|
||||
|
||||
|
||||
If everything works correctly you should see any current positions
|
||||
loaded in the pps pane on chart load and you should also be able to
|
||||
check your trade records in the file::
|
||||
|
||||
<pikerk_conf_dir>/ledgers/trades_kraken_spot.toml
|
||||
|
||||
|
||||
An example ledger file will have entries written verbatim from the
|
||||
trade events schema:
|
||||
|
||||
.. code:: toml
|
||||
|
||||
[TFJBKK-SMBZS-VJ4UWS]
|
||||
ordertxid = "SMBZSA-7CNQU-3HWLNJ"
|
||||
postxid = "SMBZSE-M7IF5-CFI7LT"
|
||||
pair = "XXMRZEUR"
|
||||
time = 1655691993.4133966
|
||||
type = "buy"
|
||||
ordertype = "limit"
|
||||
price = "103.97000000"
|
||||
cost = "499.99999977"
|
||||
fee = "0.80000000"
|
||||
vol = "4.80907954"
|
||||
margin = "0.00000000"
|
||||
misc = ""
|
||||
|
||||
|
||||
your ``pps.toml`` file will have position entries like,
|
||||
|
||||
.. code:: toml
|
||||
|
||||
[kraken.spot."xmreur.kraken"]
|
||||
size = 4.80907954
|
||||
ppu = 103.97000000
|
||||
bs_mktid = "XXMRZEUR"
|
||||
clears = [
|
||||
{ tid = "TFJBKK-SMBZS-VJ4UWS", cost = 0.8, price = 103.97, size = 4.80907954, dt = "2022-05-20T02:26:33.413397+00:00" },
|
||||
]
|
|
@ -0,0 +1,75 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Kraken backend.
|
||||
|
||||
Sub-modules within break into the core functionalities:
|
||||
|
||||
- .api: for the core API machinery which generally
|
||||
a ``asks``/``trio-websocket`` implemented ``Client``.
|
||||
- .broker: part for orders / trading endpoints.
|
||||
- .feed: for real-time and historical data query endpoints.
|
||||
- .ledger: for transaction processing as it pertains to accounting.
|
||||
- .symbols: for market (name) search and symbology meta-defs.
|
||||
|
||||
'''
|
||||
from .symbols import (
|
||||
Pair, # for symcache
|
||||
open_symbol_search,
|
||||
# required by `.accounting`, `.data`
|
||||
get_mkt_info,
|
||||
)
|
||||
# required by `.brokers`
|
||||
from .api import (
|
||||
get_client,
|
||||
)
|
||||
from .feed import (
|
||||
# required by `.data`
|
||||
stream_quotes,
|
||||
open_history_client,
|
||||
)
|
||||
from .broker import (
|
||||
# required by `.clearing`
|
||||
open_trade_dialog,
|
||||
)
|
||||
from .ledger import (
|
||||
# required by `.accounting`
|
||||
norm_trade,
|
||||
norm_trade_records,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
'get_client',
|
||||
'get_mkt_info',
|
||||
'Pair',
|
||||
'open_trade_dialog',
|
||||
'open_history_client',
|
||||
'open_symbol_search',
|
||||
'stream_quotes',
|
||||
'norm_trade_records',
|
||||
'norm_trade',
|
||||
]
|
||||
|
||||
|
||||
# tractor RPC enable arg
|
||||
__enable_modules__: list[str] = [
|
||||
'api',
|
||||
'broker',
|
||||
'feed',
|
||||
'symbols',
|
||||
]
|
|
@ -0,0 +1,703 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Core (web) API client
|
||||
|
||||
'''
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from datetime import datetime
|
||||
import itertools
|
||||
from typing import (
|
||||
Any,
|
||||
Union,
|
||||
)
|
||||
import time
|
||||
|
||||
import httpx
|
||||
import pendulum
|
||||
import numpy as np
|
||||
import urllib.parse
|
||||
import hashlib
|
||||
import hmac
|
||||
import base64
|
||||
import trio
|
||||
|
||||
from piker import config
|
||||
from piker.data import (
|
||||
def_iohlcv_fields,
|
||||
match_from_pairs,
|
||||
)
|
||||
from piker.accounting._mktinfo import (
|
||||
Asset,
|
||||
digits_to_dec,
|
||||
dec_digits,
|
||||
)
|
||||
from piker.brokers._util import (
|
||||
resproc,
|
||||
SymbolNotFound,
|
||||
BrokerError,
|
||||
DataThrottle,
|
||||
)
|
||||
from piker.accounting import Transaction
|
||||
from piker.log import get_logger
|
||||
from .symbols import Pair
|
||||
|
||||
log = get_logger('piker.brokers.kraken')
|
||||
|
||||
# <uri>/<version>/
|
||||
_url = 'https://api.kraken.com/0'
|
||||
|
||||
_headers: dict[str, str] = {
|
||||
'User-Agent': 'krakenex/2.1.0 (+https://github.com/veox/python3-krakenex)'
|
||||
}
|
||||
|
||||
# TODO: this is the only backend providing this right?
|
||||
# in which case we should drop it from the defaults and
|
||||
# instead make a custom fields descr in this module!
|
||||
_show_wap_in_history = True
|
||||
_symbol_info_translation: dict[str, str] = {
|
||||
'tick_decimals': 'pair_decimals',
|
||||
}
|
||||
|
||||
|
||||
def get_config() -> dict[str, Any]:
|
||||
'''
|
||||
Load our section from `piker/brokers.toml`.
|
||||
|
||||
'''
|
||||
conf, path = config.load(
|
||||
conf_name='brokers',
|
||||
touch_if_dne=True,
|
||||
)
|
||||
if (section := conf.get('kraken')) is None:
|
||||
log.warning(
|
||||
f'No config section found for kraken in {path}'
|
||||
)
|
||||
return {}
|
||||
|
||||
return section
|
||||
|
||||
|
||||
def get_kraken_signature(
|
||||
urlpath: str,
|
||||
data: dict[str, Any],
|
||||
secret: str
|
||||
) -> str:
|
||||
postdata = urllib.parse.urlencode(data)
|
||||
encoded = (str(data['nonce']) + postdata).encode()
|
||||
message = urlpath.encode() + hashlib.sha256(encoded).digest()
|
||||
|
||||
mac = hmac.new(base64.b64decode(secret), message, hashlib.sha512)
|
||||
sigdigest = base64.b64encode(mac.digest())
|
||||
return sigdigest.decode()
|
||||
|
||||
|
||||
class InvalidKey(ValueError):
|
||||
'''
|
||||
EAPI:Invalid key
|
||||
This error is returned when the API key used for the call is
|
||||
either expired or disabled, please review the API key in your
|
||||
Settings -> API tab of account management or generate a new one
|
||||
and update your application.
|
||||
|
||||
'''
|
||||
|
||||
|
||||
class Client:
|
||||
|
||||
# assets and mkt pairs are key-ed by kraken's ReST response
|
||||
# symbol-bs_mktids (we call them "X-keys" like fricking
|
||||
# "XXMRZEUR"). these keys used directly since ledger endpoints
|
||||
# return transaction sets keyed with the same set!
|
||||
_Assets: dict[str, Asset] = {}
|
||||
_AssetPairs: dict[str, Pair] = {}
|
||||
|
||||
# offer lookup tables for all .altname and .wsname
|
||||
# to the equivalent .xname so that various symbol-schemas
|
||||
# can be mapped to `Pair`s in the tables above.
|
||||
_altnames: dict[str, str] = {}
|
||||
_wsnames: dict[str, str] = {}
|
||||
|
||||
# key-ed by `Pair.bs_fqme: str`, and thus used for search
|
||||
# allowing for lookup using piker's own FQME symbology sys.
|
||||
_pairs: dict[str, Pair] = {}
|
||||
_assets: dict[str, Asset] = {}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: dict[str, str],
|
||||
httpx_client: httpx.AsyncClient,
|
||||
|
||||
name: str = '',
|
||||
api_key: str = '',
|
||||
secret: str = ''
|
||||
) -> None:
|
||||
|
||||
self._sesh: httpx.AsyncClient = httpx_client
|
||||
|
||||
self._name = name
|
||||
self._api_key = api_key
|
||||
self._secret = secret
|
||||
|
||||
self.conf: dict[str, str] = config
|
||||
|
||||
@property
|
||||
def pairs(self) -> dict[str, Pair]:
|
||||
|
||||
if self._pairs is None:
|
||||
raise RuntimeError(
|
||||
"Client didn't run `.get_mkt_pairs()` on startup?!"
|
||||
)
|
||||
|
||||
return self._pairs
|
||||
|
||||
async def _public(
|
||||
self,
|
||||
method: str,
|
||||
data: dict,
|
||||
) -> dict[str, Any]:
|
||||
resp: httpx.Response = await self._sesh.post(
|
||||
url=f'/public/{method}',
|
||||
json=data,
|
||||
)
|
||||
return resproc(resp, log)
|
||||
|
||||
async def _private(
|
||||
self,
|
||||
method: str,
|
||||
data: dict,
|
||||
uri_path: str
|
||||
) -> dict[str, Any]:
|
||||
headers = {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
'API-Key': self._api_key,
|
||||
'API-Sign': get_kraken_signature(
|
||||
uri_path,
|
||||
data,
|
||||
self._secret,
|
||||
),
|
||||
}
|
||||
resp: httpx.Response = await self._sesh.post(
|
||||
url=f'/private/{method}',
|
||||
data=data,
|
||||
headers=headers,
|
||||
)
|
||||
return resproc(resp, log)
|
||||
|
||||
async def endpoint(
|
||||
self,
|
||||
method: str,
|
||||
data: dict[str, Any]
|
||||
|
||||
) -> dict[str, Any]:
|
||||
uri_path = f'/0/private/{method}'
|
||||
data['nonce'] = str(int(1000*time.time()))
|
||||
return await self._private(method, data, uri_path)
|
||||
|
||||
async def get_balances(
|
||||
self,
|
||||
) -> dict[str, float]:
|
||||
'''
|
||||
Return the set of asset balances for this account
|
||||
by symbol.
|
||||
|
||||
'''
|
||||
resp = await self.endpoint(
|
||||
'Balance',
|
||||
{},
|
||||
)
|
||||
by_bsmktid: dict[str, dict] = resp['result']
|
||||
|
||||
balances: dict = {}
|
||||
for xname, bal in by_bsmktid.items():
|
||||
asset: Asset = self._Assets[xname]
|
||||
|
||||
# TODO: which KEY should we use? it's used to index
|
||||
# the `Account.pps: dict` ..
|
||||
key: str = asset.name.lower()
|
||||
# TODO: should we just return a `Decimal` here
|
||||
# or is the rounded version ok?
|
||||
balances[key] = round(
|
||||
float(bal),
|
||||
ndigits=dec_digits(asset.tx_tick)
|
||||
)
|
||||
|
||||
return balances
|
||||
|
||||
async def get_assets(
|
||||
self,
|
||||
reload: bool = False,
|
||||
|
||||
) -> dict[str, Asset]:
|
||||
'''
|
||||
Load and cache all asset infos and pack into
|
||||
our native ``Asset`` struct.
|
||||
|
||||
https://docs.kraken.com/rest/#tag/Market-Data/operation/getAssetInfo
|
||||
|
||||
return msg:
|
||||
"asset1": {
|
||||
"aclass": "string",
|
||||
"altname": "string",
|
||||
"decimals": 0,
|
||||
"display_decimals": 0,
|
||||
"collateral_value": 0,
|
||||
"status": "string"
|
||||
}
|
||||
|
||||
'''
|
||||
if (
|
||||
not self._assets
|
||||
or reload
|
||||
):
|
||||
resp = await self._public('Assets', {})
|
||||
assets: dict[str, dict] = resp['result']
|
||||
|
||||
for bs_mktid, info in assets.items():
|
||||
|
||||
altname: str = info['altname']
|
||||
aclass: str = info['aclass']
|
||||
asset = Asset(
|
||||
name=altname,
|
||||
atype=f'crypto_{aclass}',
|
||||
tx_tick=digits_to_dec(info['decimals']),
|
||||
info=info,
|
||||
)
|
||||
# NOTE: yes we keep 2 sets since kraken insists on
|
||||
# keeping 3 frickin sets bc apparently they have
|
||||
# no sane data engineers whol all like different
|
||||
# keys for their fricking symbology sets..
|
||||
self._Assets[bs_mktid] = asset
|
||||
self._assets[altname.lower()] = asset
|
||||
self._assets[altname] = asset
|
||||
|
||||
# we return the "most native" set merged with our preferred
|
||||
# naming (which i guess is the "altname" one) since that's
|
||||
# what the symcache loader will be storing, and we need the
|
||||
# keys that are easiest to match against in any trade
|
||||
# records.
|
||||
return self._Assets | self._assets
|
||||
|
||||
async def get_trades(
|
||||
self,
|
||||
fetch_limit: int | None = None,
|
||||
|
||||
) -> dict[str, Any]:
|
||||
'''
|
||||
Get the trades (aka cleared orders) history from the rest endpoint:
|
||||
https://docs.kraken.com/rest/#operation/getTradeHistory
|
||||
|
||||
'''
|
||||
ofs = 0
|
||||
trades_by_id: dict[str, Any] = {}
|
||||
|
||||
for i in itertools.count():
|
||||
if (
|
||||
fetch_limit
|
||||
and i >= fetch_limit
|
||||
):
|
||||
break
|
||||
|
||||
# increment 'ofs' pagination offset
|
||||
ofs = i*50
|
||||
|
||||
resp = await self.endpoint(
|
||||
'TradesHistory',
|
||||
{'ofs': ofs},
|
||||
)
|
||||
by_id = resp['result']['trades']
|
||||
trades_by_id.update(by_id)
|
||||
|
||||
# can get up to 50 results per query, see:
|
||||
# https://docs.kraken.com/rest/#tag/User-Data/operation/getTradeHistory
|
||||
if (
|
||||
len(by_id) < 50
|
||||
):
|
||||
err = resp.get('error')
|
||||
if err:
|
||||
raise BrokerError(err)
|
||||
|
||||
# we know we received the max amount of
|
||||
# trade results so there may be more history.
|
||||
# catch the end of the trades
|
||||
count = resp['result']['count']
|
||||
break
|
||||
|
||||
# santity check on update
|
||||
assert count == len(trades_by_id.values())
|
||||
return trades_by_id
|
||||
|
||||
async def get_xfers(
|
||||
self,
|
||||
asset: str,
|
||||
src_asset: str = '',
|
||||
|
||||
) -> dict[str, Transaction]:
|
||||
'''
|
||||
Get asset balance transfer transactions.
|
||||
|
||||
Currently only withdrawals are supported.
|
||||
|
||||
'''
|
||||
resp = await self.endpoint(
|
||||
'WithdrawStatus',
|
||||
{'asset': asset},
|
||||
)
|
||||
try:
|
||||
xfers: list[dict] = resp['result']
|
||||
except KeyError:
|
||||
log.exception(f'Kraken suxxx: {resp}')
|
||||
return []
|
||||
|
||||
# eg. resp schema:
|
||||
# 'result': [{'method': 'Bitcoin', 'aclass': 'currency', 'asset':
|
||||
# 'XXBT', 'refid': 'AGBJRMB-JHD2M4-NDI3NR', 'txid':
|
||||
# 'b95d66d3bb6fd76cbccb93f7639f99a505cb20752c62ea0acc093a0e46547c44',
|
||||
# 'info': 'bc1qc8enqjekwppmw3g80p56z5ns7ze3wraqk5rl9z',
|
||||
# 'amount': '0.00300726', 'fee': '0.00001000', 'time':
|
||||
# 1658347714, 'status': 'Success'}]}
|
||||
|
||||
if xfers:
|
||||
import tractor
|
||||
await tractor.pp()
|
||||
|
||||
trans: dict[str, Transaction] = {}
|
||||
for entry in xfers:
|
||||
# look up the normalized name and asset info
|
||||
asset_key: str = entry['asset']
|
||||
asset: Asset = self._Assets[asset_key]
|
||||
asset_key: str = asset.name.lower()
|
||||
|
||||
# XXX: this is in the asset units (likely) so it isn't
|
||||
# quite the same as a commisions cost necessarily..)
|
||||
# TODO: also round this based on `Pair` cost precision info?
|
||||
cost = float(entry['fee'])
|
||||
# fqme: str = asset_key + '.kraken'
|
||||
|
||||
tx = Transaction(
|
||||
fqme=asset_key, # this must map to an entry in .assets!
|
||||
tid=entry['txid'],
|
||||
dt=pendulum.from_timestamp(entry['time']),
|
||||
bs_mktid=f'{asset_key}{src_asset}',
|
||||
size=-1*(
|
||||
float(entry['amount'])
|
||||
+
|
||||
cost
|
||||
),
|
||||
# since this will be treated as a "sell" it
|
||||
# shouldn't be needed to compute the be price.
|
||||
price='NaN',
|
||||
|
||||
# XXX: see note above
|
||||
cost=cost,
|
||||
|
||||
# not a trade but a withdrawal or deposit on the
|
||||
# asset (chain) system.
|
||||
etype='transfer',
|
||||
|
||||
)
|
||||
trans[tx.tid] = tx
|
||||
|
||||
return trans
|
||||
|
||||
async def submit_limit(
|
||||
self,
|
||||
symbol: str,
|
||||
price: float,
|
||||
action: str,
|
||||
size: float,
|
||||
reqid: str = None,
|
||||
validate: bool = False # set True test call without a real submission
|
||||
|
||||
) -> dict:
|
||||
'''
|
||||
Place an order and return integer request id provided by client.
|
||||
|
||||
'''
|
||||
# Build common data dict for common keys from both endpoints
|
||||
data = {
|
||||
"pair": symbol,
|
||||
"price": str(price),
|
||||
"validate": validate
|
||||
}
|
||||
if reqid is None:
|
||||
# Build order data for kraken api
|
||||
data |= {
|
||||
"ordertype": "limit",
|
||||
"type": action,
|
||||
"volume": str(size),
|
||||
}
|
||||
return await self.endpoint('AddOrder', data)
|
||||
|
||||
else:
|
||||
# Edit order data for kraken api
|
||||
data["txid"] = reqid
|
||||
return await self.endpoint('EditOrder', data)
|
||||
|
||||
async def submit_cancel(
|
||||
self,
|
||||
reqid: str,
|
||||
) -> dict:
|
||||
'''
|
||||
Send cancel request for order id ``reqid``.
|
||||
|
||||
'''
|
||||
# txid is a transaction id given by kraken
|
||||
return await self.endpoint('CancelOrder', {"txid": reqid})
|
||||
|
||||
async def asset_pairs(
|
||||
self,
|
||||
pair_patt: str | None = None,
|
||||
|
||||
) -> dict[str, Pair] | Pair:
|
||||
'''
|
||||
Query for a tradeable asset pair (info), or all if no input
|
||||
pattern is provided.
|
||||
|
||||
https://docs.kraken.com/rest/#tag/Market-Data/operation/getTradableAssetPairs
|
||||
|
||||
'''
|
||||
if not self._AssetPairs:
|
||||
# get all pairs by default, or filter
|
||||
# to whatever pattern is provided as input.
|
||||
req_pairs: dict[str, str] | None = None
|
||||
if pair_patt is not None:
|
||||
req_pairs = {'pair': pair_patt}
|
||||
|
||||
resp = await self._public(
|
||||
'AssetPairs',
|
||||
req_pairs,
|
||||
)
|
||||
err = resp['error']
|
||||
if err:
|
||||
raise SymbolNotFound(pair_patt)
|
||||
|
||||
# NOTE: we try to key pairs by our custom defined
|
||||
# `.bs_fqme` field since we want to offer search over
|
||||
# this pattern set, callers should fill out lookup
|
||||
# tables for kraken's bs_mktid keys to map to these
|
||||
# keys!
|
||||
# XXX: FURTHER kraken's data eng team decided to offer
|
||||
# 3 frickin market-pair-symbol key sets depending on
|
||||
# which frickin API is being used.
|
||||
# Example for the trading pair 'LTC<EUR'
|
||||
# - the "X-key" from rest eps 'XLTCZEUR'
|
||||
# - the "websocket key" from ws msgs is 'LTC/EUR'
|
||||
# - the "altname key" also delivered in pair info is 'LTCEUR'
|
||||
for xkey, data in resp['result'].items():
|
||||
|
||||
# NOTE: always cache in pairs tables for faster lookup
|
||||
pair = Pair(xname=xkey, **data)
|
||||
|
||||
# register the above `Pair` structs for all
|
||||
# key-sets/monikers: a set of 4 (frickin) tables
|
||||
# acting as a combined surjection of all possible
|
||||
# (and stupid) kraken names to their `Pair` obj.
|
||||
self._AssetPairs[xkey] = pair
|
||||
self._pairs[pair.bs_fqme] = pair
|
||||
self._altnames[pair.altname] = pair
|
||||
self._wsnames[pair.wsname] = pair
|
||||
|
||||
if pair_patt is not None:
|
||||
return next(iter(self._pairs.items()))[1]
|
||||
|
||||
return self._AssetPairs
|
||||
|
||||
async def get_mkt_pairs(
|
||||
self,
|
||||
reload: bool = False,
|
||||
) -> dict:
|
||||
'''
|
||||
Load all market pair info build and cache it for downstream
|
||||
use.
|
||||
|
||||
Multiple pair info lookup tables (like ``._altnames:
|
||||
dict[str, str]``) are created for looking up the
|
||||
piker-native `Pair`-struct from any input of the three
|
||||
(yes, it's that idiotic..) available symbol/pair-key-sets
|
||||
that kraken frickin offers depending on the API including
|
||||
the .altname, .wsname and the weird ass default set they
|
||||
return in ReST responses .xname..
|
||||
|
||||
'''
|
||||
if (
|
||||
not self._pairs
|
||||
or reload
|
||||
):
|
||||
await self.asset_pairs()
|
||||
|
||||
return self._AssetPairs
|
||||
|
||||
async def search_symbols(
|
||||
self,
|
||||
pattern: str,
|
||||
|
||||
) -> dict[str, Any]:
|
||||
'''
|
||||
Search for a symbol by "alt name"..
|
||||
|
||||
It is expected that the ``Client._pairs`` table
|
||||
gets populated before conducting the underlying fuzzy-search
|
||||
over the pair-key set.
|
||||
|
||||
'''
|
||||
if not len(self._pairs):
|
||||
await self.get_mkt_pairs()
|
||||
assert self._pairs, '`Client.get_mkt_pairs()` was never called!?'
|
||||
|
||||
matches: dict[str, Pair] = match_from_pairs(
|
||||
pairs=self._pairs,
|
||||
query=pattern.upper(),
|
||||
score_cutoff=50,
|
||||
)
|
||||
|
||||
# repack in .altname-keyed output table
|
||||
return {
|
||||
pair.altname: pair
|
||||
for pair in matches.values()
|
||||
}
|
||||
|
||||
async def bars(
|
||||
self,
|
||||
symbol: str = 'XBTUSD',
|
||||
|
||||
# UTC 2017-07-02 12:53:20
|
||||
since: Union[int, datetime] | None = None,
|
||||
count: int = 720, # <- max allowed per query
|
||||
as_np: bool = True,
|
||||
|
||||
) -> dict:
|
||||
|
||||
if since is None:
|
||||
since = pendulum.now('UTC').start_of('minute').subtract(
|
||||
minutes=count).timestamp()
|
||||
|
||||
elif isinstance(since, int):
|
||||
since = pendulum.from_timestamp(since).timestamp()
|
||||
|
||||
else: # presumably a pendulum datetime
|
||||
since = since.timestamp()
|
||||
|
||||
# UTC 2017-07-02 12:53:20 is oldest seconds value
|
||||
since = str(max(1499000000, int(since)))
|
||||
json = await self._public(
|
||||
'OHLC',
|
||||
data={
|
||||
'pair': symbol,
|
||||
'since': since,
|
||||
},
|
||||
)
|
||||
try:
|
||||
res = json['result']
|
||||
res.pop('last')
|
||||
bars = next(iter(res.values()))
|
||||
|
||||
new_bars = []
|
||||
|
||||
first = bars[0]
|
||||
last_nz_vwap = first[-3]
|
||||
if last_nz_vwap == 0:
|
||||
# use close if vwap is zero
|
||||
last_nz_vwap = first[-4]
|
||||
|
||||
# convert all fields to native types
|
||||
for i, bar in enumerate(bars):
|
||||
# normalize weird zero-ed vwap values..cmon kraken..
|
||||
# indicates vwap didn't change since last bar
|
||||
vwap = float(bar.pop(-3))
|
||||
if vwap != 0:
|
||||
last_nz_vwap = vwap
|
||||
if vwap == 0:
|
||||
vwap = last_nz_vwap
|
||||
|
||||
# re-insert vwap as the last of the fields
|
||||
bar.append(vwap)
|
||||
|
||||
new_bars.append(
|
||||
(i,) + tuple(
|
||||
ftype(bar[j]) for j, (name, ftype) in enumerate(
|
||||
def_iohlcv_fields[1:]
|
||||
)
|
||||
)
|
||||
)
|
||||
array = np.array(new_bars, dtype=def_iohlcv_fields) if as_np else bars
|
||||
return array
|
||||
except KeyError:
|
||||
errmsg = json['error'][0]
|
||||
|
||||
if 'not found' in errmsg:
|
||||
raise SymbolNotFound(errmsg + f': {symbol}')
|
||||
|
||||
elif 'Too many requests' in errmsg:
|
||||
raise DataThrottle(f'{symbol}')
|
||||
|
||||
else:
|
||||
raise BrokerError(errmsg)
|
||||
|
||||
@classmethod
|
||||
def to_bs_fqme(
|
||||
cls,
|
||||
pair_str: str
|
||||
) -> str:
|
||||
'''
|
||||
Normalize symbol names to to a 3x3 pair from the global
|
||||
definition map which we build out from the data retreived from
|
||||
the 'AssetPairs' endpoint, see methods above.
|
||||
|
||||
'''
|
||||
try:
|
||||
return cls._altnames[pair_str.upper()].bs_fqme
|
||||
except KeyError as ke:
|
||||
raise SymbolNotFound(f'kraken has no {ke.args[0]}')
|
||||
|
||||
|
||||
@acm
|
||||
async def get_client() -> Client:
|
||||
|
||||
conf: dict[str, Any] = get_config()
|
||||
async with httpx.AsyncClient(
|
||||
base_url=_url,
|
||||
headers=_headers,
|
||||
|
||||
# TODO: is there a way to numerate this?
|
||||
# https://www.python-httpx.org/advanced/clients/#why-use-a-client
|
||||
# connections=4
|
||||
) as trio_client:
|
||||
if conf:
|
||||
client = Client(
|
||||
conf,
|
||||
httpx_client=trio_client,
|
||||
|
||||
# TODO: don't break these up and just do internal
|
||||
# conf lookups instead..
|
||||
name=conf['key_descr'],
|
||||
api_key=conf['api_key'],
|
||||
secret=conf['secret']
|
||||
)
|
||||
else:
|
||||
client = Client(
|
||||
conf={},
|
||||
httpx_client=trio_client,
|
||||
)
|
||||
|
||||
# at startup, load all symbols, and asset info in
|
||||
# batch requests.
|
||||
async with trio.open_nursery() as nurse:
|
||||
nurse.start_soon(client.get_assets)
|
||||
await client.get_mkt_pairs()
|
||||
|
||||
yield client
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,415 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Real-time and historical data feed endpoints.
|
||||
|
||||
'''
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
aclosing,
|
||||
)
|
||||
from datetime import datetime
|
||||
from typing import (
|
||||
AsyncGenerator,
|
||||
Callable,
|
||||
Optional,
|
||||
)
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import pendulum
|
||||
from trio_typing import TaskStatus
|
||||
import trio
|
||||
|
||||
from piker.accounting._mktinfo import (
|
||||
MktPair,
|
||||
)
|
||||
from piker.brokers import (
|
||||
open_cached_client,
|
||||
)
|
||||
from piker.brokers._util import (
|
||||
BrokerError,
|
||||
DataThrottle,
|
||||
DataUnavailable,
|
||||
)
|
||||
from piker.types import Struct
|
||||
from piker.data.validate import FeedInit
|
||||
from piker.data._web_bs import open_autorecon_ws, NoBsWs
|
||||
from .api import (
|
||||
log,
|
||||
)
|
||||
from .symbols import get_mkt_info
|
||||
|
||||
|
||||
class OHLC(Struct, frozen=True):
|
||||
'''
|
||||
Description of the flattened OHLC quote format.
|
||||
|
||||
For schema details see:
|
||||
https://docs.kraken.com/websockets/#message-ohlc
|
||||
|
||||
'''
|
||||
chan_id: int # internal kraken id
|
||||
chan_name: str # eg. ohlc-1 (name-interval)
|
||||
pair: str # fx pair
|
||||
|
||||
# unpacked from array
|
||||
time: float # Begin time of interval, in seconds since epoch
|
||||
etime: float # End time of interval, in seconds since epoch
|
||||
open: float # Open price of interval
|
||||
high: float # High price within interval
|
||||
low: float # Low price within interval
|
||||
close: float # Close price of interval
|
||||
vwap: float # Volume weighted average price within interval
|
||||
volume: float # Accumulated volume **within interval**
|
||||
count: int # Number of trades within interval
|
||||
|
||||
|
||||
async def stream_messages(
|
||||
ws: NoBsWs,
|
||||
):
|
||||
'''
|
||||
Message stream parser and heartbeat handler.
|
||||
|
||||
Deliver ws subscription messages as well as handle heartbeat logic
|
||||
though a single async generator.
|
||||
|
||||
'''
|
||||
last_hb: float = 0
|
||||
|
||||
async for msg in ws:
|
||||
match msg:
|
||||
case {'event': 'heartbeat'}:
|
||||
now = time.time()
|
||||
delay = now - last_hb
|
||||
last_hb = now
|
||||
|
||||
# XXX: why tf is this not printing without --tl flag?
|
||||
log.debug(f"Heartbeat after {delay}")
|
||||
# print(f"Heartbeat after {delay}")
|
||||
|
||||
continue
|
||||
|
||||
case _:
|
||||
# passthrough sub msgs
|
||||
yield msg
|
||||
|
||||
|
||||
async def process_data_feed_msgs(
|
||||
ws: NoBsWs,
|
||||
):
|
||||
'''
|
||||
Parse and pack data feed messages.
|
||||
|
||||
'''
|
||||
async with aclosing(stream_messages(ws)) as ws_stream:
|
||||
async for msg in ws_stream:
|
||||
match msg:
|
||||
case {
|
||||
'errorMessage': errmsg
|
||||
}:
|
||||
raise BrokerError(errmsg)
|
||||
|
||||
case {
|
||||
'event': 'subscriptionStatus',
|
||||
} as sub:
|
||||
log.info(
|
||||
'WS subscription is active:\n'
|
||||
f'{sub}'
|
||||
)
|
||||
continue
|
||||
|
||||
case [
|
||||
chan_id,
|
||||
*payload_array,
|
||||
chan_name,
|
||||
pair
|
||||
]:
|
||||
if 'ohlc' in chan_name:
|
||||
array: list = payload_array[0]
|
||||
ohlc = OHLC(
|
||||
chan_id,
|
||||
chan_name,
|
||||
pair,
|
||||
*map(float, array[:-1]),
|
||||
count=array[-1],
|
||||
)
|
||||
yield 'ohlc', ohlc.copy()
|
||||
|
||||
elif 'spread' in chan_name:
|
||||
|
||||
bid, ask, ts, bsize, asize = map(
|
||||
float, payload_array[0])
|
||||
|
||||
# TODO: really makes you think IB has a horrible API...
|
||||
quote = {
|
||||
'symbol': pair.replace('/', ''),
|
||||
'ticks': [
|
||||
{'type': 'bid', 'price': bid, 'size': bsize},
|
||||
{'type': 'bsize', 'price': bid, 'size': bsize},
|
||||
|
||||
{'type': 'ask', 'price': ask, 'size': asize},
|
||||
{'type': 'asize', 'price': ask, 'size': asize},
|
||||
],
|
||||
}
|
||||
yield 'l1', quote
|
||||
|
||||
# elif 'book' in msg[-2]:
|
||||
# chan_id, *payload_array, chan_name, pair = msg
|
||||
# print(msg)
|
||||
|
||||
case {
|
||||
'connectionID': conid,
|
||||
'event': 'systemStatus',
|
||||
'status': 'online',
|
||||
'version': ver,
|
||||
}:
|
||||
log.info(
|
||||
f'Established {ver} ws connection with id: {conid}'
|
||||
)
|
||||
continue
|
||||
|
||||
case _:
|
||||
print(f'UNHANDLED MSG: {msg}')
|
||||
# yield msg
|
||||
|
||||
|
||||
def normalize(ohlc: OHLC) -> dict:
|
||||
'''
|
||||
Norm an `OHLC` msg to piker's minimal (live-)quote schema.
|
||||
|
||||
'''
|
||||
quote = ohlc.to_dict()
|
||||
quote['broker_ts'] = quote['time']
|
||||
quote['brokerd_ts'] = time.time()
|
||||
quote['symbol'] = quote['pair'] = quote['pair'].replace('/', '')
|
||||
quote['last'] = quote['close']
|
||||
quote['bar_wap'] = ohlc.vwap
|
||||
return quote
|
||||
|
||||
|
||||
@acm
|
||||
async def open_history_client(
|
||||
mkt: MktPair,
|
||||
|
||||
) -> AsyncGenerator[Callable, None]:
|
||||
|
||||
symbol: str = mkt.bs_mktid
|
||||
|
||||
# TODO implement history getter for the new storage layer.
|
||||
async with open_cached_client('kraken') as client:
|
||||
|
||||
# lol, kraken won't send any more then the "last"
|
||||
# 720 1m bars.. so we have to just ignore further
|
||||
# requests of this type..
|
||||
queries: int = 0
|
||||
|
||||
async def get_ohlc(
|
||||
timeframe: float,
|
||||
end_dt: Optional[datetime] = None,
|
||||
start_dt: Optional[datetime] = None,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
datetime, # start
|
||||
datetime, # end
|
||||
]:
|
||||
|
||||
nonlocal queries
|
||||
if (
|
||||
queries > 0
|
||||
or timeframe != 60
|
||||
):
|
||||
raise DataUnavailable(
|
||||
'Only a single query for 1m bars supported')
|
||||
|
||||
count = 0
|
||||
while count <= 3:
|
||||
try:
|
||||
array = await client.bars(
|
||||
symbol,
|
||||
since=end_dt,
|
||||
)
|
||||
count += 1
|
||||
queries += 1
|
||||
break
|
||||
except DataThrottle:
|
||||
log.warning(f'kraken OHLC throttle for {symbol}')
|
||||
await trio.sleep(1)
|
||||
|
||||
start_dt = pendulum.from_timestamp(array[0]['time'])
|
||||
end_dt = pendulum.from_timestamp(array[-1]['time'])
|
||||
return array, start_dt, end_dt
|
||||
|
||||
yield get_ohlc, {'erlangs': 1, 'rate': 1}
|
||||
|
||||
|
||||
async def stream_quotes(
|
||||
|
||||
send_chan: trio.abc.SendChannel,
|
||||
symbols: list[str],
|
||||
feed_is_live: trio.Event,
|
||||
loglevel: str = None,
|
||||
|
||||
# backend specific
|
||||
sub_type: str = 'ohlc',
|
||||
|
||||
# startup sync
|
||||
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Subscribe for ohlc stream of quotes for ``pairs``.
|
||||
|
||||
``pairs`` must be formatted <crypto_symbol>/<fiat_symbol>.
|
||||
|
||||
'''
|
||||
|
||||
ws_pairs: list[str] = []
|
||||
init_msgs: list[FeedInit] = []
|
||||
|
||||
async with (
|
||||
send_chan as send_chan,
|
||||
):
|
||||
for sym_str in symbols:
|
||||
mkt, pair = await get_mkt_info(sym_str)
|
||||
init_msgs.append(
|
||||
FeedInit(mkt_info=mkt)
|
||||
)
|
||||
|
||||
ws_pairs.append(pair.wsname)
|
||||
|
||||
@acm
|
||||
async def subscribe(ws: NoBsWs):
|
||||
|
||||
# XXX: setup subs
|
||||
# https://docs.kraken.com/websockets/#message-subscribe
|
||||
# specific logic for this in kraken's sync client:
|
||||
# https://github.com/krakenfx/kraken-wsclient-py/blob/master/kraken_wsclient_py/kraken_wsclient_py.py#L188
|
||||
ohlc_sub = {
|
||||
'event': 'subscribe',
|
||||
'pair': ws_pairs,
|
||||
'subscription': {
|
||||
'name': 'ohlc',
|
||||
'interval': 1,
|
||||
},
|
||||
}
|
||||
|
||||
# TODO: we want to eventually allow unsubs which should
|
||||
# be completely fine to request from a separate task
|
||||
# since internally the ws methods appear to be FIFO
|
||||
# locked.
|
||||
await ws.send_msg(ohlc_sub)
|
||||
|
||||
# trade data (aka L1)
|
||||
l1_sub = {
|
||||
'event': 'subscribe',
|
||||
'pair': ws_pairs,
|
||||
'subscription': {
|
||||
'name': 'spread',
|
||||
# 'depth': 10}
|
||||
},
|
||||
}
|
||||
|
||||
# pull a first quote and deliver
|
||||
await ws.send_msg(l1_sub)
|
||||
|
||||
yield
|
||||
|
||||
# unsub from all pairs on teardown
|
||||
if ws.connected():
|
||||
await ws.send_msg({
|
||||
'pair': ws_pairs,
|
||||
'event': 'unsubscribe',
|
||||
'subscription': ['ohlc', 'spread'],
|
||||
})
|
||||
|
||||
# XXX: do we need to ack the unsub?
|
||||
# await ws.recv_msg()
|
||||
|
||||
# see the tips on reconnection logic:
|
||||
# https://support.kraken.com/hc/en-us/articles/360044504011-WebSocket-API-unexpected-disconnections-from-market-data-feeds
|
||||
ws: NoBsWs
|
||||
async with (
|
||||
open_autorecon_ws(
|
||||
'wss://ws.kraken.com/',
|
||||
fixture=subscribe,
|
||||
reset_after=20,
|
||||
) as ws,
|
||||
|
||||
# avoid stream-gen closure from breaking trio..
|
||||
# NOTE: not sure this actually works XD particularly
|
||||
# if we call `ws._connect()` manally in the streaming
|
||||
# async gen..
|
||||
aclosing(process_data_feed_msgs(ws)) as msg_gen,
|
||||
):
|
||||
# pull a first quote and deliver
|
||||
typ, ohlc_last = await anext(msg_gen)
|
||||
quote = normalize(ohlc_last)
|
||||
|
||||
task_status.started((init_msgs, quote))
|
||||
feed_is_live.set()
|
||||
|
||||
# keep start of last interval for volume tracking
|
||||
last_interval_start: float = ohlc_last.etime
|
||||
|
||||
# start streaming
|
||||
topic: str = mkt.bs_fqme
|
||||
async for typ, quote in msg_gen:
|
||||
match typ:
|
||||
|
||||
# TODO: can get rid of all this by using
|
||||
# ``trades`` subscription..? Not sure why this
|
||||
# wasn't used originally? (music queues) zoltannn..
|
||||
# https://docs.kraken.com/websockets/#message-trade
|
||||
case 'ohlc':
|
||||
# generate tick values to match time & sales pane:
|
||||
# https://trade.kraken.com/charts/KRAKEN:BTC-USD?period=1m
|
||||
volume = quote.volume
|
||||
|
||||
# new OHLC sample interval
|
||||
if quote.etime > last_interval_start:
|
||||
last_interval_start: float = quote.etime
|
||||
tick_volume: float = volume
|
||||
|
||||
else:
|
||||
# this is the tick volume *within the interval*
|
||||
tick_volume: float = volume - ohlc_last.volume
|
||||
|
||||
ohlc_last = quote
|
||||
last = quote.close
|
||||
|
||||
quote = normalize(quote)
|
||||
ticks = quote.setdefault(
|
||||
'ticks',
|
||||
[],
|
||||
)
|
||||
if tick_volume:
|
||||
ticks.append({
|
||||
'type': 'trade',
|
||||
'price': last,
|
||||
'size': tick_volume,
|
||||
})
|
||||
|
||||
case 'l1':
|
||||
# passthrough quote msg
|
||||
pass
|
||||
|
||||
case _:
|
||||
log.warning(f'Unknown WSS message: {typ}, {quote}')
|
||||
|
||||
await send_chan.send({topic: quote})
|
|
@ -0,0 +1,269 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Trade transaction accounting and normalization.
|
||||
|
||||
'''
|
||||
import math
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Any,
|
||||
)
|
||||
|
||||
import pendulum
|
||||
|
||||
from piker.accounting import (
|
||||
Transaction,
|
||||
Position,
|
||||
Account,
|
||||
get_likely_pair,
|
||||
TransactionLedger,
|
||||
# MktPair,
|
||||
)
|
||||
from piker.types import Struct
|
||||
from piker.data import (
|
||||
SymbologyCache,
|
||||
)
|
||||
from .api import (
|
||||
log,
|
||||
Client,
|
||||
Pair,
|
||||
)
|
||||
# from .feed import get_mkt_info
|
||||
|
||||
|
||||
def norm_trade(
|
||||
tid: str,
|
||||
record: dict[str, Any],
|
||||
|
||||
# this is the dict that was returned from
|
||||
# `Client.get_mkt_pairs()` and when running offline ledger
|
||||
# processing from `.accounting`, this will be the table loaded
|
||||
# into `SymbologyCache.pairs`.
|
||||
pairs: dict[str, Struct],
|
||||
symcache: SymbologyCache | None = None,
|
||||
|
||||
) -> Transaction:
|
||||
|
||||
size: float = float(record.get('vol')) * {
|
||||
'buy': 1,
|
||||
'sell': -1,
|
||||
}[record['type']]
|
||||
|
||||
# NOTE: this value may be either the websocket OR the rest schema
|
||||
# so we need to detect the key format and then choose the
|
||||
# correct symbol lookup table to evetually get a ``Pair``..
|
||||
# See internals of `Client.asset_pairs()` for deats!
|
||||
src_pair_key: str = record['pair']
|
||||
|
||||
# XXX: kraken's data engineering is soo bad they require THREE
|
||||
# different pair schemas (more or less seemingly tied to
|
||||
# transport-APIs)..LITERALLY they return different market id
|
||||
# pairs in the ledger endpoints vs. the websocket event subs..
|
||||
# lookup pair using appropriately provided tabled depending
|
||||
# on API-key-schema..
|
||||
pair: Pair = pairs[src_pair_key]
|
||||
fqme: str = pair.bs_fqme.lower() + '.kraken'
|
||||
|
||||
return Transaction(
|
||||
fqme=fqme,
|
||||
tid=tid,
|
||||
size=size,
|
||||
price=float(record['price']),
|
||||
cost=float(record['fee']),
|
||||
dt=pendulum.from_timestamp(float(record['time'])),
|
||||
bs_mktid=pair.bs_mktid,
|
||||
)
|
||||
|
||||
|
||||
async def norm_trade_records(
|
||||
ledger: dict[str, Any],
|
||||
client: Client,
|
||||
api_name_set: str = 'xname',
|
||||
|
||||
) -> dict[str, Transaction]:
|
||||
'''
|
||||
Loop through an input ``dict`` of trade records
|
||||
and convert them to ``Transactions``.
|
||||
|
||||
'''
|
||||
records: dict[str, Transaction] = {}
|
||||
for tid, record in ledger.items():
|
||||
|
||||
# manual_fqme: str = f'{bs_mktid.lower()}.kraken'
|
||||
# mkt: MktPair = (await get_mkt_info(manual_fqme))[0]
|
||||
# fqme: str = mkt.fqme
|
||||
# assert fqme == manual_fqme
|
||||
pairs: dict[str, Pair] = {
|
||||
'xname': client._AssetPairs,
|
||||
'wsname': client._wsnames,
|
||||
'altname': client._altnames,
|
||||
}[api_name_set]
|
||||
|
||||
records[tid] = norm_trade(
|
||||
tid,
|
||||
record,
|
||||
pairs=pairs,
|
||||
)
|
||||
|
||||
return records
|
||||
|
||||
|
||||
def has_pp(
|
||||
acnt: Account,
|
||||
src_fiat: str,
|
||||
dst: str,
|
||||
size: float,
|
||||
|
||||
) -> Position | None:
|
||||
|
||||
src2dst: dict[str, str] = {}
|
||||
for bs_mktid in acnt.pps:
|
||||
likely_pair = get_likely_pair(
|
||||
src_fiat,
|
||||
dst,
|
||||
bs_mktid,
|
||||
)
|
||||
if likely_pair:
|
||||
src2dst[src_fiat] = dst
|
||||
|
||||
for src, dst in src2dst.items():
|
||||
pair: str = f'{dst}{src_fiat}'
|
||||
pos: Position = acnt.pps.get(pair)
|
||||
if (
|
||||
pos
|
||||
and math.isclose(pos.size, size)
|
||||
):
|
||||
return pos
|
||||
|
||||
elif (
|
||||
size == 0
|
||||
and pos.size
|
||||
):
|
||||
log.warning(
|
||||
f'`kraken` account says you have a ZERO '
|
||||
f'balance for {bs_mktid}:{pair}\n'
|
||||
f'but piker seems to think `{pos.size}`\n'
|
||||
'This is likely a discrepancy in piker '
|
||||
'accounting if the above number is'
|
||||
"large,' though it's likely to due lack"
|
||||
"f tracking xfers fees.."
|
||||
)
|
||||
return pos
|
||||
|
||||
return None # indicate no entry found
|
||||
|
||||
|
||||
# TODO: factor most of this "account updating from txns" into the
|
||||
# the `Account` impl so has to provide for hiding the mostly
|
||||
# cross-provider updates from txn sets
|
||||
async def verify_balances(
|
||||
acnt: Account,
|
||||
src_fiat: str,
|
||||
balances: dict[str, float],
|
||||
client: Client,
|
||||
ledger: TransactionLedger,
|
||||
ledger_trans: dict[str, Transaction], # from toml
|
||||
api_trans: dict[str, Transaction], # from API
|
||||
|
||||
simulate_pp_update: bool = False,
|
||||
|
||||
) -> None:
|
||||
for dst, size in balances.items():
|
||||
|
||||
# we don't care about tracking positions
|
||||
# in the user's source fiat currency.
|
||||
if (
|
||||
dst == src_fiat
|
||||
or not any(
|
||||
dst in bs_mktid for bs_mktid in acnt.pps
|
||||
)
|
||||
):
|
||||
log.warning(
|
||||
f'Skipping balance `{dst}`:{size} for position calcs!'
|
||||
)
|
||||
continue
|
||||
|
||||
# we have a balance for which there is no pos entry
|
||||
# - we have to likely update from the ledger?
|
||||
if not has_pp(acnt, src_fiat, dst, size):
|
||||
updated = acnt.update_from_ledger(
|
||||
ledger_trans,
|
||||
symcache=ledger.symcache,
|
||||
)
|
||||
log.info(f'Updated pps from ledger:\n{pformat(updated)}')
|
||||
|
||||
# FIRST try reloading from API records
|
||||
if (
|
||||
not has_pp(acnt, src_fiat, dst, size)
|
||||
and not simulate_pp_update
|
||||
):
|
||||
acnt.update_from_ledger(
|
||||
api_trans,
|
||||
symcache=ledger.symcache,
|
||||
)
|
||||
|
||||
# get transfers to make sense of abs
|
||||
# balances.
|
||||
# NOTE: we do this after ledger and API
|
||||
# loading since we might not have an
|
||||
# entry in the
|
||||
# ``account.kraken.spot.toml`` for the
|
||||
# necessary pair yet and thus this
|
||||
# likely pair grabber will likely fail.
|
||||
if not has_pp(acnt, src_fiat, dst, size):
|
||||
for bs_mktid in acnt.pps:
|
||||
likely_pair: str | None = get_likely_pair(
|
||||
src_fiat,
|
||||
dst,
|
||||
bs_mktid,
|
||||
)
|
||||
if likely_pair:
|
||||
break
|
||||
else:
|
||||
raise ValueError(
|
||||
'Could not find a position pair in '
|
||||
'ledger for likely widthdrawal '
|
||||
f'candidate: {dst}'
|
||||
)
|
||||
|
||||
# this was likely pos that had a withdrawal
|
||||
# from the dst asset out of the account.
|
||||
if likely_pair:
|
||||
xfer_trans = await client.get_xfers(
|
||||
dst,
|
||||
|
||||
# TODO: not all src assets are
|
||||
# 3 chars long...
|
||||
src_asset=likely_pair[3:],
|
||||
)
|
||||
if xfer_trans:
|
||||
updated = acnt.update_from_ledger(
|
||||
xfer_trans,
|
||||
cost_scalar=1,
|
||||
symcache=ledger.symcache,
|
||||
)
|
||||
log.info(
|
||||
f'Updated {dst} from transfers:\n'
|
||||
f'{pformat(updated)}'
|
||||
)
|
||||
|
||||
if has_pp(acnt, src_fiat, dst, size):
|
||||
raise ValueError(
|
||||
'Could not reproduce balance:\n'
|
||||
f'dst: {dst}, {size}\n'
|
||||
)
|
|
@ -0,0 +1,206 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Symbology defs and search.
|
||||
|
||||
'''
|
||||
from decimal import Decimal
|
||||
|
||||
import tractor
|
||||
from rapidfuzz import process as fuzzy
|
||||
|
||||
from piker._cacheables import (
|
||||
async_lifo_cache,
|
||||
)
|
||||
from piker.accounting._mktinfo import (
|
||||
digits_to_dec,
|
||||
)
|
||||
from piker.brokers import (
|
||||
open_cached_client,
|
||||
SymbolNotFound,
|
||||
)
|
||||
from piker.types import Struct
|
||||
from piker.accounting._mktinfo import (
|
||||
Asset,
|
||||
MktPair,
|
||||
unpack_fqme,
|
||||
)
|
||||
|
||||
|
||||
# https://www.kraken.com/features/api#get-tradable-pairs
|
||||
class Pair(Struct):
|
||||
xname: str # idiotic bs_mktid equiv i guess?
|
||||
altname: str # alternate pair name
|
||||
wsname: str # WebSocket pair name (if available)
|
||||
aclass_base: str # asset class of base component
|
||||
base: str # asset id of base component
|
||||
aclass_quote: str # asset class of quote component
|
||||
quote: str # asset id of quote component
|
||||
lot: str # volume lot size
|
||||
|
||||
cost_decimals: int
|
||||
costmin: float
|
||||
pair_decimals: int # scaling decimal places for pair
|
||||
lot_decimals: int # scaling decimal places for volume
|
||||
|
||||
# amount to multiply lot volume by to get currency volume
|
||||
lot_multiplier: float
|
||||
|
||||
# array of leverage amounts available when buying
|
||||
leverage_buy: list[int]
|
||||
# array of leverage amounts available when selling
|
||||
leverage_sell: list[int]
|
||||
|
||||
# fee schedule array in [volume, percent fee] tuples
|
||||
fees: list[tuple[int, float]]
|
||||
|
||||
# maker fee schedule array in [volume, percent fee] tuples (if on
|
||||
# maker/taker)
|
||||
fees_maker: list[tuple[int, float]]
|
||||
|
||||
fee_volume_currency: str # volume discount currency
|
||||
margin_call: str # margin call level
|
||||
margin_stop: str # stop-out/liquidation margin level
|
||||
ordermin: float # minimum order volume for pair
|
||||
tick_size: float # min price step size
|
||||
status: str
|
||||
|
||||
short_position_limit: float = 0
|
||||
long_position_limit: float = float('inf')
|
||||
|
||||
# TODO: should we make this a literal NamespacePath ref?
|
||||
ns_path: str = 'piker.brokers.kraken:Pair'
|
||||
|
||||
@property
|
||||
def bs_mktid(self) -> str:
|
||||
'''
|
||||
Kraken seems to index it's market symbol sets in
|
||||
transaction ledgers using the key returned from rest
|
||||
queries.. so use that since apparently they can't
|
||||
make up their minds on a better key set XD
|
||||
|
||||
'''
|
||||
return self.xname
|
||||
|
||||
@property
|
||||
def price_tick(self) -> Decimal:
|
||||
return digits_to_dec(self.pair_decimals)
|
||||
|
||||
@property
|
||||
def size_tick(self) -> Decimal:
|
||||
return digits_to_dec(self.lot_decimals)
|
||||
|
||||
@property
|
||||
def bs_dst_asset(self) -> str:
|
||||
dst, _ = self.wsname.split('/')
|
||||
return dst
|
||||
|
||||
@property
|
||||
def bs_src_asset(self) -> str:
|
||||
_, src = self.wsname.split('/')
|
||||
return src
|
||||
|
||||
@property
|
||||
def bs_fqme(self) -> str:
|
||||
'''
|
||||
Basically the `.altname` but with special '.' handling and
|
||||
`.SPOT` suffix appending (for future multi-venue support).
|
||||
|
||||
'''
|
||||
dst, src = self.wsname.split('/')
|
||||
# XXX: omg for stupid shite like ETH2.S/ETH..
|
||||
dst = dst.replace('.', '-')
|
||||
return f'{dst}{src}.SPOT'
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_symbol_search(ctx: tractor.Context) -> None:
|
||||
async with open_cached_client('kraken') as client:
|
||||
|
||||
# load all symbols locally for fast search
|
||||
cache = await client.get_mkt_pairs()
|
||||
await ctx.started(cache)
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
async for pattern in stream:
|
||||
await stream.send(
|
||||
await client.search_symbols(pattern)
|
||||
)
|
||||
|
||||
|
||||
@async_lifo_cache()
|
||||
async def get_mkt_info(
|
||||
fqme: str,
|
||||
|
||||
) -> tuple[MktPair, Pair]:
|
||||
'''
|
||||
Query for and return a `MktPair` and backend-native `Pair` (or
|
||||
wtv else) info.
|
||||
|
||||
If more then one fqme is provided return a ``dict`` of native
|
||||
key-strs to `MktPair`s.
|
||||
|
||||
'''
|
||||
venue: str = 'spot'
|
||||
expiry: str = ''
|
||||
if '.kraken' not in fqme:
|
||||
fqme += '.kraken'
|
||||
|
||||
broker, pair, venue, expiry = unpack_fqme(fqme)
|
||||
venue: str = venue or 'spot'
|
||||
|
||||
if venue.lower() != 'spot':
|
||||
raise SymbolNotFound(
|
||||
'kraken only supports spot markets right now!\n'
|
||||
f'{fqme}\n'
|
||||
)
|
||||
|
||||
async with open_cached_client('kraken') as client:
|
||||
|
||||
# uppercase since kraken bs_mktid is always upper
|
||||
# bs_fqme, _, broker = fqme.partition('.')
|
||||
# pair_str: str = bs_fqme.upper()
|
||||
pair_str: str = f'{pair}.{venue}'
|
||||
|
||||
pair: Pair | None = client._pairs.get(pair_str.upper())
|
||||
if not pair:
|
||||
bs_fqme: str = client.to_bs_fqme(pair_str)
|
||||
pair: Pair = client._pairs[bs_fqme]
|
||||
|
||||
if not (assets := client._assets):
|
||||
assets: dict[str, Asset] = await client.get_assets()
|
||||
|
||||
dst_asset: Asset = assets[pair.bs_dst_asset]
|
||||
src_asset: Asset = assets[pair.bs_src_asset]
|
||||
|
||||
mkt = MktPair(
|
||||
dst=dst_asset,
|
||||
src=src_asset,
|
||||
|
||||
price_tick=pair.price_tick,
|
||||
size_tick=pair.size_tick,
|
||||
bs_mktid=pair.bs_mktid,
|
||||
|
||||
expiry=expiry,
|
||||
venue=venue or 'spot',
|
||||
|
||||
# TODO: futes
|
||||
# _atype=_atype,
|
||||
|
||||
broker='kraken',
|
||||
)
|
||||
return mkt, pair
|
|
@ -0,0 +1,952 @@
|
|||
# Copyright (C) (in stewardship for pikers)
|
||||
# - Jared Goldman
|
||||
# - Tyler Goodlet
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Kucoin cex API backend.
|
||||
|
||||
'''
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
aclosing,
|
||||
)
|
||||
from datetime import datetime
|
||||
from decimal import Decimal
|
||||
import base64
|
||||
import hmac
|
||||
import hashlib
|
||||
import time
|
||||
from functools import partial
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Literal,
|
||||
AsyncGenerator,
|
||||
)
|
||||
import wsproto
|
||||
from uuid import uuid4
|
||||
|
||||
from trio_typing import TaskStatus
|
||||
import httpx
|
||||
from bidict import bidict
|
||||
import numpy as np
|
||||
import pendulum
|
||||
import tractor
|
||||
import trio
|
||||
|
||||
from piker.accounting._mktinfo import (
|
||||
Asset,
|
||||
digits_to_dec,
|
||||
MktPair,
|
||||
)
|
||||
from piker import config
|
||||
from piker.brokers import (
|
||||
open_cached_client,
|
||||
)
|
||||
from piker._cacheables import (
|
||||
async_lifo_cache,
|
||||
)
|
||||
from piker.log import get_logger
|
||||
from piker.data.validate import FeedInit
|
||||
from piker.types import Struct
|
||||
from piker.data import (
|
||||
def_iohlcv_fields,
|
||||
match_from_pairs,
|
||||
)
|
||||
from piker.data._web_bs import (
|
||||
open_autorecon_ws,
|
||||
NoBsWs,
|
||||
)
|
||||
from ._util import DataUnavailable
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
_no_symcache: bool = True
|
||||
|
||||
|
||||
class KucoinMktPair(Struct, frozen=True):
|
||||
'''
|
||||
Kucoin's pair format:
|
||||
https://docs.kucoin.com/#get-symbols-list
|
||||
|
||||
'''
|
||||
baseCurrency: str
|
||||
baseIncrement: float
|
||||
|
||||
@property
|
||||
def price_tick(self) -> Decimal:
|
||||
return Decimal(str(self.quoteIncrement))
|
||||
|
||||
baseMaxSize: float
|
||||
baseMinSize: float
|
||||
|
||||
@property
|
||||
def size_tick(self) -> Decimal:
|
||||
return Decimal(str(self.quoteMinSize))
|
||||
|
||||
enableTrading: bool
|
||||
feeCurrency: str
|
||||
isMarginEnabled: bool
|
||||
market: str
|
||||
minFunds: float
|
||||
name: str
|
||||
priceIncrement: float
|
||||
priceLimitRate: float
|
||||
quoteCurrency: str
|
||||
quoteIncrement: float
|
||||
quoteMaxSize: float
|
||||
quoteMinSize: float
|
||||
symbol: str # our bs_mktid, kucoin's internal id
|
||||
|
||||
|
||||
class AccountTrade(Struct, frozen=True):
|
||||
'''
|
||||
Historical trade format:
|
||||
https://docs.kucoin.com/#get-account-ledgers
|
||||
|
||||
'''
|
||||
id: str
|
||||
currency: str
|
||||
amount: float
|
||||
fee: float
|
||||
balance: float
|
||||
accountType: str
|
||||
bizType: str
|
||||
direction: Literal['in', 'out']
|
||||
createdAt: float
|
||||
context: list[str]
|
||||
|
||||
|
||||
class AccountResponse(Struct, frozen=True):
|
||||
'''
|
||||
https://docs.kucoin.com/#get-account-ledgers
|
||||
|
||||
'''
|
||||
currentPage: int
|
||||
pageSize: int
|
||||
totalNum: int
|
||||
totalPage: int
|
||||
items: list[AccountTrade]
|
||||
|
||||
|
||||
class KucoinTrade(Struct, frozen=True):
|
||||
'''
|
||||
Real-time trade format:
|
||||
https://docs.kucoin.com/#symbol-ticker
|
||||
|
||||
'''
|
||||
bestAsk: float
|
||||
bestAskSize: float
|
||||
bestBid: float
|
||||
bestBidSize: float
|
||||
price: float
|
||||
sequence: float
|
||||
size: float
|
||||
time: float
|
||||
|
||||
|
||||
class KucoinL2(Struct, frozen=True):
|
||||
'''
|
||||
Real-time L2 order book format:
|
||||
https://docs.kucoin.com/#level2-5-best-ask-bid-orders
|
||||
|
||||
'''
|
||||
|
||||
asks: list[list[float]]
|
||||
bids: list[list[float]]
|
||||
timestamp: float
|
||||
|
||||
|
||||
class Currency(Struct, frozen=True):
|
||||
'''
|
||||
Currency (asset) info:
|
||||
https://docs.kucoin.com/#get-currencies
|
||||
|
||||
'''
|
||||
currency: str
|
||||
name: str
|
||||
fullName: str
|
||||
precision: int
|
||||
confirms: int
|
||||
contractAddress: str
|
||||
withdrawalMinSize: str
|
||||
withdrawalMinFee: str
|
||||
isWithdrawEnabled: bool
|
||||
isDepositEnabled: bool
|
||||
isMarginEnabled: bool
|
||||
isDebitEnabled: bool
|
||||
|
||||
|
||||
class BrokerConfig(Struct, frozen=True):
|
||||
key_id: str
|
||||
key_secret: str
|
||||
key_passphrase: str
|
||||
|
||||
|
||||
def get_config() -> BrokerConfig | None:
|
||||
conf, _ = config.load()
|
||||
|
||||
section = conf.get('kucoin')
|
||||
|
||||
if section is None:
|
||||
log.warning('No config section found for kucoin in config')
|
||||
return None
|
||||
|
||||
return BrokerConfig(**section).copy()
|
||||
|
||||
|
||||
class Client:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
httpx_client: httpx.AsyncClient,
|
||||
) -> None:
|
||||
self._http: httpx.AsyncClient = httpx_client
|
||||
self._config: BrokerConfig|None = get_config()
|
||||
self._pairs: dict[str, KucoinMktPair] = {}
|
||||
self._fqmes2mktids: bidict[str, str] = bidict()
|
||||
self._bars: list[list[float]] = []
|
||||
self._currencies: dict[str, Currency] = {}
|
||||
|
||||
def _gen_auth_req_headers(
|
||||
self,
|
||||
action: Literal['POST', 'GET'],
|
||||
endpoint: str,
|
||||
api: str = 'v2',
|
||||
|
||||
) -> dict[str, str | bytes]:
|
||||
'''
|
||||
Generate authenticated request headers:
|
||||
|
||||
https://docs.kucoin.com/#authentication
|
||||
https://www.kucoin.com/docs/basic-info/connection-method/authentication/creating-a-request
|
||||
https://www.kucoin.com/docs/basic-info/connection-method/authentication/signing-a-message
|
||||
|
||||
'''
|
||||
if not self._config:
|
||||
raise ValueError(
|
||||
'No config found when trying to send authenticated request'
|
||||
)
|
||||
|
||||
str_to_sign = (
|
||||
str(int(time.time() * 1000))
|
||||
+
|
||||
action
|
||||
+
|
||||
f'/api/{api}/{endpoint.lstrip("/")}'
|
||||
)
|
||||
|
||||
signature = base64.b64encode(
|
||||
hmac.new(
|
||||
self._config.key_secret.encode('utf-8'),
|
||||
str_to_sign.encode('utf-8'),
|
||||
hashlib.sha256,
|
||||
).digest()
|
||||
)
|
||||
|
||||
# TODO: can we cache this between calls?
|
||||
passphrase = base64.b64encode(
|
||||
hmac.new(
|
||||
self._config.key_secret.encode('utf-8'),
|
||||
self._config.key_passphrase.encode('utf-8'),
|
||||
hashlib.sha256,
|
||||
).digest()
|
||||
)
|
||||
|
||||
return {
|
||||
'KC-API-SIGN': signature,
|
||||
'KC-API-TIMESTAMP': str(pendulum.now().int_timestamp * 1000),
|
||||
'KC-API-KEY': self._config.key_id,
|
||||
'KC-API-PASSPHRASE': passphrase,
|
||||
# XXX: Even if using the v1 api - this stays the same
|
||||
'KC-API-KEY-VERSION': '2',
|
||||
}
|
||||
|
||||
async def _request(
|
||||
self,
|
||||
action: Literal['POST', 'GET'],
|
||||
endpoint: str,
|
||||
|
||||
api: str = 'v2',
|
||||
headers: dict = {},
|
||||
|
||||
) -> Any:
|
||||
'''
|
||||
Generic request wrapper for Kucoin API
|
||||
|
||||
'''
|
||||
if self._config:
|
||||
headers = self._gen_auth_req_headers(
|
||||
action,
|
||||
endpoint,
|
||||
api,
|
||||
)
|
||||
|
||||
req_meth: Callable = getattr(
|
||||
self._http,
|
||||
action.lower(),
|
||||
)
|
||||
res = await req_meth(
|
||||
url=f'/{api}/{endpoint}',
|
||||
headers=headers,
|
||||
)
|
||||
json: dict = res.json()
|
||||
if data := json.get('data'):
|
||||
return data
|
||||
else:
|
||||
log.error(
|
||||
f'Error making request to {api_url} ->\n'
|
||||
f'{pformat(res)}'
|
||||
)
|
||||
return json['msg']
|
||||
|
||||
async def _get_ws_token(
|
||||
self,
|
||||
private: bool = False,
|
||||
) -> tuple[str, int] | None:
|
||||
'''
|
||||
Fetch ws token needed for sub access:
|
||||
https://docs.kucoin.com/#apply-connect-token
|
||||
returns a token and the interval we must ping
|
||||
the server at to keep the connection alive
|
||||
|
||||
'''
|
||||
token_type = 'private' if private else 'public'
|
||||
try:
|
||||
data: dict[str, Any]|None = await self._request(
|
||||
'POST',
|
||||
endpoint=f'bullet-{token_type}',
|
||||
api='v1'
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(
|
||||
f'Error making request for Kucoin ws token -> {str(e)}')
|
||||
return None
|
||||
|
||||
if data and 'token' in data:
|
||||
# ping_interval is in ms
|
||||
ping_interval: int = data['instanceServers'][0]['pingInterval']
|
||||
return data['token'], ping_interval
|
||||
elif data:
|
||||
log.error(
|
||||
'Error making request for Kucoin ws token'
|
||||
f'{data.json()["msg"]}'
|
||||
)
|
||||
|
||||
async def get_currencies(
|
||||
self,
|
||||
update: bool = False,
|
||||
) -> dict[str, Currency]:
|
||||
'''
|
||||
Retrieve all "currency" info:
|
||||
https://docs.kucoin.com/#get-currencies
|
||||
|
||||
We use this for creating piker-interal ``Asset``s.
|
||||
|
||||
'''
|
||||
if (
|
||||
not self._currencies
|
||||
or update
|
||||
):
|
||||
currencies: dict[str, Currency] = {}
|
||||
entries: list[dict] = await self._request(
|
||||
'GET',
|
||||
endpoint='currencies',
|
||||
api='v1',
|
||||
)
|
||||
for entry in entries:
|
||||
curr = Currency(**entry).copy()
|
||||
currencies[curr.name] = curr
|
||||
|
||||
self._currencies.update(currencies)
|
||||
|
||||
return self._currencies
|
||||
|
||||
async def _get_pairs(
|
||||
self,
|
||||
) -> tuple[
|
||||
dict[str, KucoinMktPair],
|
||||
bidict[str, KucoinMktPair],
|
||||
]:
|
||||
entries = await self._request(
|
||||
'GET',
|
||||
endpoint='symbols',
|
||||
)
|
||||
log.info(f' {len(entries)} Kucoin market pairs fetched')
|
||||
|
||||
pairs: dict[str, KucoinMktPair] = {}
|
||||
fqmes2mktids: bidict[str, str] = bidict()
|
||||
for item in entries:
|
||||
pair = pairs[item['name']] = KucoinMktPair(**item)
|
||||
fqmes2mktids[
|
||||
item['name'].lower().replace('-', '')
|
||||
] = pair.name
|
||||
|
||||
return pairs, fqmes2mktids
|
||||
|
||||
async def get_mkt_pairs(
|
||||
self,
|
||||
update: bool = False,
|
||||
|
||||
) -> dict[str, KucoinMktPair]:
|
||||
'''
|
||||
Get request all market pairs and store in a local cache.
|
||||
|
||||
Also create a table of piker style fqme -> kucoin symbols.
|
||||
|
||||
'''
|
||||
if (
|
||||
not self._pairs
|
||||
or update
|
||||
):
|
||||
pairs, fqmes = await self._get_pairs()
|
||||
self._pairs.update(pairs)
|
||||
self._fqmes2mktids.update(fqmes)
|
||||
|
||||
return self._pairs
|
||||
|
||||
async def search_symbols(
|
||||
self,
|
||||
pattern: str,
|
||||
limit: int = 30,
|
||||
|
||||
) -> dict[str, KucoinMktPair]:
|
||||
'''
|
||||
Use fuzzy search engine to match against pairs, deliver
|
||||
matching ones.
|
||||
|
||||
'''
|
||||
if not len(self._pairs):
|
||||
await self.get_mkt_pairs()
|
||||
assert self._pairs, '`Client.get_mkt_pairs()` was never called!?'
|
||||
|
||||
matches: dict[str, KucoinMktPair] = match_from_pairs(
|
||||
pairs=self._pairs,
|
||||
# query=pattern.upper(),
|
||||
query=pattern.upper(),
|
||||
score_cutoff=35,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
# repack in dict form
|
||||
return {
|
||||
pair.name: pair
|
||||
for pair in matches.values()
|
||||
}
|
||||
|
||||
async def last_trades(self, sym: str) -> list[AccountTrade]:
|
||||
trades = await self._request(
|
||||
'GET',
|
||||
endpoint=f'accounts/ledgers?currency={sym}',
|
||||
api='v1'
|
||||
)
|
||||
trades = AccountResponse(**trades)
|
||||
return trades.items
|
||||
|
||||
async def _get_bars(
|
||||
self,
|
||||
fqme: str,
|
||||
|
||||
start_dt: datetime | None = None,
|
||||
end_dt: datetime | None = None,
|
||||
limit: int = 1000,
|
||||
as_np: bool = True,
|
||||
type: str = '1min',
|
||||
|
||||
) -> np.ndarray:
|
||||
'''
|
||||
Get OHLC data and convert to numpy array for perffff:
|
||||
https://docs.kucoin.com/#get-klines
|
||||
|
||||
Kucoin bar data format:
|
||||
[
|
||||
'1545904980', //Start time of the candle cycle 0
|
||||
'0.058', //opening price 1
|
||||
'0.049', //closing price 2
|
||||
'0.058', //highest price 3
|
||||
'0.049', //lowest price 4
|
||||
'0.018', //Transaction volume 5
|
||||
'0.000945' //Transaction amount 6
|
||||
],
|
||||
|
||||
piker ohlc numpy array format:
|
||||
[
|
||||
('index', int),
|
||||
('time', int),
|
||||
('open', float),
|
||||
('high', float),
|
||||
('low', float),
|
||||
('close', float),
|
||||
('volume', float),
|
||||
]
|
||||
|
||||
'''
|
||||
# Generate generic end and start time if values not passed
|
||||
# Currently gives us 12hrs of data
|
||||
if (
|
||||
end_dt is None
|
||||
and start_dt is None
|
||||
):
|
||||
end_dt = pendulum.now('UTC').add(minutes=1)
|
||||
start_dt = end_dt.start_of('minute').subtract(minutes=limit)
|
||||
|
||||
if (
|
||||
start_dt
|
||||
and end_dt is None
|
||||
):
|
||||
# just set end to limit's worth in future
|
||||
end_dt = start_dt.start_of('minute').add(minutes=limit)
|
||||
|
||||
else:
|
||||
start_dt = end_dt.start_of('minute').subtract(minutes=limit)
|
||||
|
||||
start_dt = int(start_dt.timestamp())
|
||||
end_dt = int(end_dt.timestamp())
|
||||
|
||||
kucoin_sym = self._fqmes2mktids[fqme]
|
||||
|
||||
url = (
|
||||
f'market/candles?type={type}'
|
||||
f'&symbol={kucoin_sym}'
|
||||
f'&startAt={start_dt}'
|
||||
f'&endAt={end_dt}'
|
||||
)
|
||||
|
||||
for i in range(10):
|
||||
data: list[list[str]] | dict = await self._request(
|
||||
'GET',
|
||||
url,
|
||||
api='v1',
|
||||
)
|
||||
|
||||
if not isinstance(data, list):
|
||||
# Do a gradual backoff if Kucoin is rate limiting us
|
||||
backoff_interval = i
|
||||
log.warn(
|
||||
f'History call failed, backing off for {backoff_interval}s'
|
||||
)
|
||||
await trio.sleep(backoff_interval)
|
||||
else:
|
||||
bars: list[list[str]] = data
|
||||
break
|
||||
|
||||
new_bars = []
|
||||
reversed_bars = bars[::-1]
|
||||
|
||||
# Convert from kucoin format to piker format
|
||||
for i, bar in enumerate(reversed_bars):
|
||||
new_bars.append(
|
||||
(
|
||||
# index
|
||||
i,
|
||||
# time
|
||||
int(bar[0]),
|
||||
# open
|
||||
float(bar[1]),
|
||||
# high
|
||||
float(bar[3]),
|
||||
# low
|
||||
float(bar[4]),
|
||||
# close
|
||||
float(bar[2]),
|
||||
# volume
|
||||
float(bar[5]),
|
||||
# bar_wap
|
||||
# 0.0,
|
||||
)
|
||||
)
|
||||
|
||||
array = np.array(
|
||||
new_bars, dtype=def_iohlcv_fields) if as_np else bars
|
||||
return array
|
||||
|
||||
|
||||
def fqme_to_kucoin_sym(
|
||||
fqme: str,
|
||||
pairs: dict[str, KucoinMktPair],
|
||||
|
||||
) -> str:
|
||||
pair_data = pairs[fqme]
|
||||
return pair_data.baseCurrency + '-' + pair_data.quoteCurrency
|
||||
|
||||
|
||||
@acm
|
||||
async def get_client() -> AsyncGenerator[Client, None]:
|
||||
'''
|
||||
Load an API `Client` preconfigured from user settings
|
||||
|
||||
'''
|
||||
async with (
|
||||
httpx.AsyncClient(
|
||||
base_url=f'https://api.kucoin.com/api',
|
||||
) as trio_client,
|
||||
):
|
||||
client = Client(httpx_client=trio_client)
|
||||
async with trio.open_nursery() as tn:
|
||||
tn.start_soon(client.get_mkt_pairs)
|
||||
await client.get_currencies()
|
||||
|
||||
yield client
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_symbol_search(
|
||||
ctx: tractor.Context,
|
||||
) -> None:
|
||||
async with open_cached_client('kucoin') as client:
|
||||
# load all symbols locally for fast search
|
||||
await client.get_mkt_pairs()
|
||||
await ctx.started()
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
async for pattern in stream:
|
||||
await stream.send(await client.search_symbols(pattern))
|
||||
log.info('Kucoin symbol search opened')
|
||||
|
||||
|
||||
@acm
|
||||
async def open_ping_task(
|
||||
ws: wsproto.WSConnection,
|
||||
ping_interval, connect_id
|
||||
) -> AsyncGenerator[None, None]:
|
||||
'''
|
||||
Spawn a non-blocking task that pings the ws
|
||||
server every ping_interval so Kucoin doesn't drop
|
||||
our connection
|
||||
|
||||
'''
|
||||
async with trio.open_nursery() as n:
|
||||
# TODO: cache this task so it's only called once
|
||||
async def ping_server():
|
||||
while True:
|
||||
await trio.sleep((ping_interval - 1000) / 1000)
|
||||
await ws.send_msg({'id': connect_id, 'type': 'ping'})
|
||||
|
||||
log.info('Starting ping task for kucoin ws connection')
|
||||
n.start_soon(ping_server)
|
||||
|
||||
yield
|
||||
|
||||
n.cancel_scope.cancel()
|
||||
|
||||
|
||||
@async_lifo_cache()
|
||||
async def get_mkt_info(
|
||||
fqme: str,
|
||||
|
||||
) -> tuple[MktPair, KucoinMktPair]:
|
||||
'''
|
||||
Query for and return a `MktPair` and `KucoinMktPair`.
|
||||
|
||||
'''
|
||||
async with open_cached_client('kucoin') as client:
|
||||
# split off any fqme broker part
|
||||
bs_fqme, _, broker = fqme.partition('.')
|
||||
|
||||
pairs: dict[str, KucoinMktPair] = await client.get_mkt_pairs()
|
||||
|
||||
try:
|
||||
# likely search result key which is already in native mkt symbol form
|
||||
pair: KucoinMktPair = pairs[bs_fqme]
|
||||
bs_mktid: str = bs_fqme
|
||||
|
||||
except KeyError:
|
||||
|
||||
# likely a piker-style fqme from API request or CLI
|
||||
bs_mktid: str = client._fqmes2mktids[bs_fqme]
|
||||
pair: KucoinMktPair = pairs[bs_mktid]
|
||||
|
||||
# symbology sanity
|
||||
assert bs_mktid == pair.symbol
|
||||
|
||||
assets: dict[str, Currency] = client._currencies
|
||||
|
||||
# TODO: maybe just do this processing in
|
||||
# a .get_assets() method (see kraken)?
|
||||
src: Currency = assets[pair.quoteCurrency]
|
||||
src_asset = Asset(
|
||||
name=src.name,
|
||||
atype='crypto_currency',
|
||||
tx_tick=digits_to_dec(src.precision),
|
||||
info=src.to_dict(),
|
||||
)
|
||||
dst: Currency = assets[pair.baseCurrency]
|
||||
dst_asset = Asset(
|
||||
name=dst.name,
|
||||
atype='crypto_currency',
|
||||
tx_tick=digits_to_dec(dst.precision),
|
||||
info=dst.to_dict(),
|
||||
)
|
||||
mkt = MktPair(
|
||||
dst=dst_asset,
|
||||
src=src_asset,
|
||||
|
||||
price_tick=pair.price_tick,
|
||||
size_tick=pair.size_tick,
|
||||
bs_mktid=bs_mktid,
|
||||
|
||||
broker='kucoin',
|
||||
)
|
||||
return mkt, pair
|
||||
|
||||
|
||||
async def stream_quotes(
|
||||
send_chan: trio.abc.SendChannel,
|
||||
symbols: list[str],
|
||||
feed_is_live: trio.Event,
|
||||
|
||||
task_status: TaskStatus[
|
||||
tuple[dict, dict]
|
||||
] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Required piker api to stream real-time data.
|
||||
Where the rubber hits the road baby
|
||||
|
||||
'''
|
||||
init_msgs: list[FeedInit] = []
|
||||
|
||||
async with open_cached_client('kucoin') as client:
|
||||
|
||||
log.info(f'Starting up quote stream(s) for {symbols}')
|
||||
for sym_str in symbols:
|
||||
mkt, pair = await get_mkt_info(sym_str)
|
||||
init_msgs.append(
|
||||
FeedInit(mkt_info=mkt)
|
||||
)
|
||||
|
||||
ws: NoBsWs
|
||||
token, ping_interval = await client._get_ws_token()
|
||||
connect_id = str(uuid4())
|
||||
async with (
|
||||
open_autorecon_ws(
|
||||
(
|
||||
f'wss://ws-api-spot.kucoin.com/?'
|
||||
f'token={token}&[connectId={connect_id}]'
|
||||
),
|
||||
fixture=partial(
|
||||
subscribe,
|
||||
connect_id=connect_id,
|
||||
bs_mktid=pair.symbol,
|
||||
),
|
||||
) as ws,
|
||||
open_ping_task(ws, ping_interval, connect_id),
|
||||
aclosing(stream_messages(ws, sym_str)) as msg_gen,
|
||||
):
|
||||
typ, quote = await anext(msg_gen)
|
||||
|
||||
while typ != 'trade':
|
||||
# take care to not unblock here until we get a real
|
||||
# trade quote
|
||||
typ, quote = await anext(msg_gen)
|
||||
|
||||
task_status.started((init_msgs, quote))
|
||||
feed_is_live.set()
|
||||
|
||||
async for typ, msg in msg_gen:
|
||||
await send_chan.send({sym_str: msg})
|
||||
|
||||
|
||||
@acm
|
||||
async def subscribe(
|
||||
ws: NoBsWs,
|
||||
connect_id,
|
||||
bs_mktid,
|
||||
|
||||
# subs are filled in with `bs_mktid` from avbove
|
||||
topics: list[str] = [
|
||||
'/market/ticker:{bs_mktid}', # clearing events
|
||||
'/spotMarket/level2Depth5:{bs_mktid}', # level 2
|
||||
],
|
||||
|
||||
) -> AsyncGenerator[None, None]:
|
||||
|
||||
eps: list[str] = []
|
||||
for topic in topics:
|
||||
ep: str = topic.format(bs_mktid=bs_mktid)
|
||||
eps.append(ep)
|
||||
await ws.send_msg(
|
||||
{
|
||||
'id': connect_id,
|
||||
'type': 'subscribe',
|
||||
'topic': ep,
|
||||
'privateChannel': False,
|
||||
'response': True,
|
||||
}
|
||||
)
|
||||
|
||||
welcome_msg = await ws.recv_msg()
|
||||
log.info(f'WS welcome: {welcome_msg}')
|
||||
|
||||
for _ in topics:
|
||||
ack_msg = await ws.recv_msg()
|
||||
log.info(f'Sub ACK: {ack_msg}')
|
||||
|
||||
yield
|
||||
|
||||
# unsub
|
||||
if ws.connected():
|
||||
log.info(f'Unsubscribing to {bs_mktid} feed')
|
||||
for ep in eps:
|
||||
await ws.send_msg(
|
||||
{
|
||||
'id': connect_id,
|
||||
'type': 'unsubscribe',
|
||||
'topic': ep,
|
||||
'privateChannel': False,
|
||||
'response': True,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def stream_messages(
|
||||
ws: NoBsWs,
|
||||
sym: str,
|
||||
|
||||
) -> AsyncGenerator[tuple[str, dict], None]:
|
||||
'''
|
||||
Core (live) feed msg handler: relay market events
|
||||
to the piker-ized tick-stream format.
|
||||
|
||||
'''
|
||||
last_trade_ts: float = 0
|
||||
|
||||
dict_msg: dict[str, Any]
|
||||
async for dict_msg in ws:
|
||||
match dict_msg:
|
||||
case {
|
||||
'subject': 'trade.ticker',
|
||||
'data': trade_data_dict,
|
||||
}:
|
||||
trade_data = KucoinTrade(**trade_data_dict)
|
||||
|
||||
# XXX: Filter out duplicate messages as ws feed will
|
||||
# send duplicate market state
|
||||
# https://docs.kucoin.com/#level2-5-best-ask-bid-orders
|
||||
if trade_data.time == last_trade_ts:
|
||||
continue
|
||||
|
||||
last_trade_ts = trade_data.time
|
||||
|
||||
yield 'trade', {
|
||||
'symbol': sym,
|
||||
'last': trade_data.price,
|
||||
'brokerd_ts': last_trade_ts,
|
||||
'ticks': [
|
||||
{
|
||||
'type': 'trade',
|
||||
'price': float(trade_data.price),
|
||||
'size': float(trade_data.size),
|
||||
'broker_ts': last_trade_ts,
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
case {
|
||||
'subject': 'level2',
|
||||
'data': trade_data_dict,
|
||||
}:
|
||||
l2_data = KucoinL2(**trade_data_dict)
|
||||
first_ask = l2_data.asks[0]
|
||||
first_bid = l2_data.bids[0]
|
||||
yield 'l1', {
|
||||
'symbol': sym,
|
||||
'ticks': [
|
||||
{
|
||||
'type': 'bid',
|
||||
'price': float(first_bid[0]),
|
||||
'size': float(first_bid[1]),
|
||||
},
|
||||
{
|
||||
'type': 'bsize',
|
||||
'price': float(first_bid[0]),
|
||||
'size': float(first_bid[1]),
|
||||
},
|
||||
{
|
||||
'type': 'ask',
|
||||
'price': float(first_ask[0]),
|
||||
'size': float(first_ask[1]),
|
||||
},
|
||||
{
|
||||
'type': 'asize',
|
||||
'price': float(first_ask[0]),
|
||||
'size': float(first_ask[1]),
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
case {'type': 'pong'}:
|
||||
# resp to ping task req
|
||||
continue
|
||||
|
||||
case _:
|
||||
log.warn(f'Unhandled message: {dict_msg}')
|
||||
|
||||
|
||||
@acm
|
||||
async def open_history_client(
|
||||
mkt: MktPair,
|
||||
|
||||
) -> AsyncGenerator[Callable, None]:
|
||||
|
||||
symbol: str = mkt.bs_fqme
|
||||
|
||||
async with open_cached_client('kucoin') as client:
|
||||
log.info('Attempting to open kucoin history client')
|
||||
|
||||
async def get_ohlc_history(
|
||||
timeframe: float,
|
||||
end_dt: datetime | None = None,
|
||||
start_dt: datetime | None = None,
|
||||
) -> tuple[
|
||||
np.ndarray, datetime
|
||||
| None, datetime
|
||||
| None
|
||||
]: # start # end
|
||||
if timeframe != 60:
|
||||
raise DataUnavailable('Only 1m bars are supported')
|
||||
|
||||
array = await client._get_bars(
|
||||
symbol,
|
||||
start_dt=start_dt,
|
||||
end_dt=end_dt,
|
||||
)
|
||||
|
||||
times = array['time']
|
||||
|
||||
if not len(times):
|
||||
raise DataUnavailable(
|
||||
f'No more history before {start_dt}?'
|
||||
)
|
||||
|
||||
if end_dt is None:
|
||||
inow = round(time.time())
|
||||
|
||||
print(
|
||||
f'difference in time between load and processing'
|
||||
f'{inow - times[-1]}'
|
||||
)
|
||||
|
||||
start_dt = pendulum.from_timestamp(times[0])
|
||||
end_dt = pendulum.from_timestamp(times[-1])
|
||||
|
||||
log.info('History succesfully fetched baby')
|
||||
|
||||
return array, start_dt, end_dt
|
||||
|
||||
yield get_ohlc_history, {}
|
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -40,13 +40,17 @@ import wrapt
|
|||
import asks
|
||||
|
||||
from ..calc import humanize, percent_change
|
||||
from .._cacheables import open_cached_client, async_lifo_cache
|
||||
from . import open_cached_client
|
||||
from piker._cacheables import async_lifo_cache
|
||||
from .. import config
|
||||
from ._util import resproc, BrokerError, SymbolNotFound
|
||||
from ..log import get_logger, colorize_json, get_console_log
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
from ..log import (
|
||||
colorize_json,
|
||||
)
|
||||
from ._util import (
|
||||
log,
|
||||
get_console_log,
|
||||
)
|
||||
|
||||
_use_practice_account = False
|
||||
_refresh_token_ep = 'https://{}login.questrade.com/oauth2/'
|
||||
|
|
|
@ -27,12 +27,13 @@ from typing import List
|
|||
from async_generator import asynccontextmanager
|
||||
import asks
|
||||
|
||||
from ..log import get_logger
|
||||
from ._util import resproc, BrokerError
|
||||
from ._util import (
|
||||
resproc,
|
||||
BrokerError,
|
||||
log,
|
||||
)
|
||||
from ..calc import percent_change
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
_service_ep = 'https://api.robinhood.com'
|
||||
|
||||
|
||||
|
@ -65,8 +66,10 @@ class Client:
|
|||
self.api = _API(self._sess)
|
||||
|
||||
def _zip_in_order(self, symbols: [str], quotes: List[dict]):
|
||||
return {quote.get('symbol', sym) if quote else sym: quote
|
||||
for sym, quote in zip(symbols, results_dict)}
|
||||
return {
|
||||
quote.get('symbol', sym) if quote else sym: quote
|
||||
for sym, quote in zip(symbols, quotes)
|
||||
}
|
||||
|
||||
async def quote(self, symbols: [str]):
|
||||
"""Retrieve quotes for a list of ``symbols``.
|
||||
|
|
|
@ -18,3 +18,38 @@
|
|||
Market machinery for order executions, book, management.
|
||||
|
||||
"""
|
||||
from ..log import get_logger
|
||||
from ._client import (
|
||||
open_ems,
|
||||
OrderClient,
|
||||
)
|
||||
from ._ems import (
|
||||
open_brokerd_dialog,
|
||||
)
|
||||
from ._util import OrderDialogs
|
||||
from ._messages import(
|
||||
Order,
|
||||
Status,
|
||||
Cancel,
|
||||
|
||||
# TODO: deprecate these and replace end-2-end with
|
||||
# client-side-dialog set above B)
|
||||
# https://github.com/pikers/piker/issues/514
|
||||
BrokerdPosition
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
'FeeModel',
|
||||
'open_ems',
|
||||
'OrderClient',
|
||||
'open_brokerd_dialog',
|
||||
'OrderDialogs',
|
||||
'Order',
|
||||
'Status',
|
||||
'Cancel',
|
||||
'BrokerdPosition'
|
||||
|
||||
]
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -18,211 +18,284 @@
|
|||
Orders and execution client API.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from typing import Dict
|
||||
from pprint import pformat
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
from tractor.trionics import broadcast_receiver
|
||||
|
||||
from ..log import get_logger
|
||||
from ._ems import _emsd_main
|
||||
from .._daemon import maybe_open_emsd
|
||||
from ._messages import Order, Cancel
|
||||
from ._util import (
|
||||
log, # sub-sys logger
|
||||
)
|
||||
from piker.types import Struct
|
||||
from ..service import maybe_open_emsd
|
||||
from ._messages import (
|
||||
Order,
|
||||
Cancel,
|
||||
BrokerdPosition,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._messages import (
|
||||
Status,
|
||||
)
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
class OrderClient(Struct):
|
||||
'''
|
||||
EMS-client-side order book ctl and tracking.
|
||||
|
||||
|
||||
@dataclass
|
||||
class OrderBook:
|
||||
'''EMS-client-side order book ctl and tracking.
|
||||
|
||||
A style similar to "model-view" is used here where this api is
|
||||
provided as a supervised control for an EMS actor which does all the
|
||||
hard/fast work of talking to brokers/exchanges to conduct
|
||||
executions.
|
||||
|
||||
Currently, this is mostly for keeping local state to match the EMS
|
||||
and use received events to trigger graphics updates.
|
||||
(A)sync API for submitting orders and alerts to the `emsd` service;
|
||||
this is the main control for execution management from client code.
|
||||
|
||||
'''
|
||||
# IPC stream to `emsd` actor
|
||||
_ems_stream: tractor.MsgStream
|
||||
|
||||
# mem channels used to relay order requests to the EMS daemon
|
||||
_to_ems: trio.abc.SendChannel
|
||||
_from_order_book: trio.abc.ReceiveChannel
|
||||
_to_relay_task: trio.abc.SendChannel
|
||||
_from_sync_order_client: trio.abc.ReceiveChannel
|
||||
|
||||
_sent_orders: Dict[str, Order] = field(default_factory=dict)
|
||||
_ready_to_receive: trio.Event = trio.Event()
|
||||
# history table
|
||||
_sent_orders: dict[str, Order] = {}
|
||||
|
||||
def send(
|
||||
def send_nowait(
|
||||
self,
|
||||
msg: Order,
|
||||
msg: Order | dict,
|
||||
|
||||
) -> dict:
|
||||
) -> dict | Order:
|
||||
'''
|
||||
Sync version of ``.send()``.
|
||||
|
||||
'''
|
||||
self._sent_orders[msg.oid] = msg
|
||||
self._to_ems.send_nowait(msg.dict())
|
||||
self._to_relay_task.send_nowait(msg)
|
||||
return msg
|
||||
|
||||
def update(
|
||||
async def send(
|
||||
self,
|
||||
msg: Order | dict,
|
||||
|
||||
) -> dict | Order:
|
||||
'''
|
||||
Send a new order msg async to the `emsd` service.
|
||||
|
||||
'''
|
||||
self._sent_orders[msg.oid] = msg
|
||||
await self._ems_stream.send(msg)
|
||||
return msg
|
||||
|
||||
def update_nowait(
|
||||
self,
|
||||
uuid: str,
|
||||
**data: dict,
|
||||
|
||||
) -> dict:
|
||||
cmd = self._sent_orders[uuid]
|
||||
msg = cmd.dict()
|
||||
msg.update(data)
|
||||
self._sent_orders[uuid] = Order(**msg)
|
||||
self._to_ems.send_nowait(msg)
|
||||
return cmd
|
||||
'''
|
||||
Sync version of ``.update()``.
|
||||
|
||||
def cancel(self, uuid: str) -> bool:
|
||||
"""Cancel an order (or alert) in the EMS.
|
||||
|
||||
"""
|
||||
'''
|
||||
cmd = self._sent_orders[uuid]
|
||||
msg = Cancel(
|
||||
msg = cmd.copy(update=data)
|
||||
self._sent_orders[uuid] = msg
|
||||
self._to_relay_task.send_nowait(msg)
|
||||
return msg
|
||||
|
||||
async def update(
|
||||
self,
|
||||
uuid: str,
|
||||
**data: dict,
|
||||
) -> dict:
|
||||
'''
|
||||
Update an existing order dialog with a msg updated from
|
||||
``update`` kwargs.
|
||||
|
||||
'''
|
||||
cmd = self._sent_orders[uuid]
|
||||
msg = cmd.copy(update=data)
|
||||
self._sent_orders[uuid] = msg
|
||||
await self._ems_stream.send(msg)
|
||||
return msg
|
||||
|
||||
def _mk_cancel_msg(
|
||||
self,
|
||||
uuid: str,
|
||||
) -> Cancel:
|
||||
cmd = self._sent_orders.get(uuid)
|
||||
if not cmd:
|
||||
log.error(
|
||||
f'Unknown order {uuid}!?\n'
|
||||
f'Maybe there is a stale entry or line?\n'
|
||||
f'You should report this as a bug!'
|
||||
)
|
||||
return
|
||||
|
||||
fqme = str(cmd.symbol)
|
||||
return Cancel(
|
||||
oid=uuid,
|
||||
symbol=cmd.symbol,
|
||||
)
|
||||
self._to_ems.send_nowait(msg.dict())
|
||||
|
||||
|
||||
_orders: OrderBook = None
|
||||
|
||||
|
||||
def get_orders(
|
||||
emsd_uid: tuple[str, str] = None
|
||||
) -> OrderBook:
|
||||
""""
|
||||
OrderBook singleton factory per actor.
|
||||
|
||||
"""
|
||||
if emsd_uid is not None:
|
||||
# TODO: read in target emsd's active book on startup
|
||||
pass
|
||||
|
||||
global _orders
|
||||
|
||||
if _orders is None:
|
||||
size = 100
|
||||
tx, rx = trio.open_memory_channel(size)
|
||||
brx = broadcast_receiver(rx, size)
|
||||
|
||||
# setup local ui event streaming channels for request/resp
|
||||
# streamging with EMS daemon
|
||||
_orders = OrderBook(
|
||||
_to_ems=tx,
|
||||
_from_order_book=brx,
|
||||
symbol=fqme,
|
||||
)
|
||||
|
||||
return _orders
|
||||
def cancel_nowait(
|
||||
self,
|
||||
uuid: str,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Sync version of ``.cancel()``.
|
||||
|
||||
'''
|
||||
self._to_relay_task.send_nowait(
|
||||
self._mk_cancel_msg(uuid)
|
||||
)
|
||||
|
||||
async def cancel(
|
||||
self,
|
||||
uuid: str,
|
||||
|
||||
) -> bool:
|
||||
'''
|
||||
Cancel an already existintg order (or alert) dialog.
|
||||
|
||||
'''
|
||||
await self._ems_stream.send(
|
||||
self._mk_cancel_msg(uuid)
|
||||
)
|
||||
|
||||
|
||||
# TODO: we can get rid of this relay loop once we move
|
||||
# order_mode inputs to async code!
|
||||
async def relay_order_cmds_from_sync_code(
|
||||
|
||||
async def relay_orders_from_sync_code(
|
||||
|
||||
client: OrderClient,
|
||||
symbol_key: str,
|
||||
to_ems_stream: tractor.MsgStream,
|
||||
|
||||
) -> None:
|
||||
"""
|
||||
Order streaming task: deliver orders transmitted from UI
|
||||
to downstream consumers.
|
||||
'''
|
||||
Order submission relay task: deliver orders sent from synchronous (UI)
|
||||
code to the EMS via ``OrderClient._from_sync_order_client``.
|
||||
|
||||
This is run in the UI actor (usually the one running Qt but could be
|
||||
any other client service code). This process simply delivers order
|
||||
messages to the above ``_to_ems`` send channel (from sync code using
|
||||
messages to the above ``_to_relay_task`` send channel (from sync code using
|
||||
``.send_nowait()``), these values are pulled from the channel here
|
||||
and relayed to any consumer(s) that called this function using
|
||||
a ``tractor`` portal.
|
||||
|
||||
This effectively makes order messages look like they're being
|
||||
"pushed" from the parent to the EMS where local sync code is likely
|
||||
doing the pushing from some UI.
|
||||
doing the pushing from some non-async UI handler.
|
||||
|
||||
"""
|
||||
book = get_orders()
|
||||
async with book._from_order_book.subscribe() as orders_stream:
|
||||
async for cmd in orders_stream:
|
||||
if cmd['symbol'] == symbol_key:
|
||||
log.info(f'Send order cmd:\n{pformat(cmd)}')
|
||||
'''
|
||||
async with (
|
||||
client._from_sync_order_client.subscribe() as sync_order_cmds
|
||||
):
|
||||
async for cmd in sync_order_cmds:
|
||||
sym = cmd.symbol
|
||||
msg = pformat(cmd.to_dict())
|
||||
|
||||
if sym == symbol_key:
|
||||
log.info(f'Send order cmd:\n{msg}')
|
||||
# send msg over IPC / wire
|
||||
await to_ems_stream.send(cmd)
|
||||
|
||||
else:
|
||||
log.warning(
|
||||
f'Ignoring unmatched order cmd for {sym} != {symbol_key}:'
|
||||
f'\n{msg}'
|
||||
)
|
||||
|
||||
|
||||
@acm
|
||||
async def open_ems(
|
||||
fqsn: str,
|
||||
fqme: str,
|
||||
mode: str = 'live',
|
||||
loglevel: str = 'error',
|
||||
|
||||
) -> (
|
||||
OrderBook,
|
||||
tractor.MsgStream,
|
||||
dict,
|
||||
):
|
||||
) -> tuple[
|
||||
OrderClient, # client
|
||||
tractor.MsgStream, # order ctl stream
|
||||
dict[
|
||||
# brokername, acctid
|
||||
tuple[str, str],
|
||||
dict[str, BrokerdPosition],
|
||||
],
|
||||
list[str],
|
||||
dict[str, Status],
|
||||
]:
|
||||
'''
|
||||
Spawn an EMS daemon and begin sending orders and receiving
|
||||
alerts.
|
||||
(Maybe) spawn an EMS-daemon (emsd), deliver an `OrderClient` for
|
||||
requesting orders/alerts and a `trades_stream` which delivers all
|
||||
response-msgs.
|
||||
|
||||
This EMS tries to reduce most broker's terrible order entry apis to
|
||||
a very simple protocol built on a few easy to grok and/or
|
||||
"rantsy" premises:
|
||||
|
||||
- most users will prefer "dark mode" where orders are not submitted
|
||||
to a broker until and execution condition is triggered
|
||||
(aka client-side "hidden orders")
|
||||
|
||||
- Brokers over-complicate their apis and generally speaking hire
|
||||
poor designers to create them. We're better off using creating a super
|
||||
minimal, schema-simple, request-event-stream protocol to unify all the
|
||||
existing piles of shit (and shocker, it'll probably just end up
|
||||
looking like a decent crypto exchange's api)
|
||||
|
||||
- all order types can be implemented with client-side limit orders
|
||||
|
||||
- we aren't reinventing a wheel in this case since none of these
|
||||
brokers are exposing FIX protocol; it is they doing the re-invention.
|
||||
|
||||
|
||||
TODO: make some fancy diagrams using mermaid.io
|
||||
|
||||
the possible set of responses from the stream is currently:
|
||||
- 'dark_submitted', 'broker_submitted'
|
||||
- 'dark_cancelled', 'broker_cancelled'
|
||||
- 'dark_executed', 'broker_executed'
|
||||
- 'broker_filled'
|
||||
This is a "client side" entrypoint which may spawn the `emsd` service
|
||||
if it can't be discovered and generally speaking is the lowest level
|
||||
broker control client-API.
|
||||
|
||||
'''
|
||||
# wait for service to connect back to us signalling
|
||||
# ready for order commands
|
||||
book = get_orders()
|
||||
# TODO: prolly hand in the `MktPair` instance directly here as well!
|
||||
from piker.accounting import unpack_fqme
|
||||
broker, mktep, venue, suffix = unpack_fqme(fqme)
|
||||
|
||||
from ..data._source import unpack_fqsn
|
||||
broker, symbol, suffix = unpack_fqsn(fqsn)
|
||||
|
||||
async with maybe_open_emsd(broker) as portal:
|
||||
async with maybe_open_emsd(
|
||||
broker,
|
||||
loglevel=loglevel,
|
||||
) as portal:
|
||||
|
||||
from ._ems import _emsd_main
|
||||
async with (
|
||||
# connect to emsd
|
||||
portal.open_context(
|
||||
|
||||
_emsd_main,
|
||||
fqsn=fqsn,
|
||||
fqme=fqme,
|
||||
exec_mode=mode,
|
||||
loglevel=loglevel,
|
||||
|
||||
) as (ctx, (positions, accounts)),
|
||||
) as (
|
||||
ctx,
|
||||
(
|
||||
positions,
|
||||
accounts,
|
||||
dialogs,
|
||||
)
|
||||
),
|
||||
|
||||
# open 2-way trade command stream
|
||||
ctx.open_stream() as trades_stream,
|
||||
):
|
||||
size: int = 100 # what should this be?
|
||||
tx, rx = trio.open_memory_channel(size)
|
||||
brx = broadcast_receiver(rx, size)
|
||||
|
||||
# setup local ui event streaming channels for request/resp
|
||||
# streamging with EMS daemon
|
||||
client = OrderClient(
|
||||
_ems_stream=trades_stream,
|
||||
_to_relay_task=tx,
|
||||
_from_sync_order_client=brx,
|
||||
)
|
||||
|
||||
client._ems_stream = trades_stream
|
||||
|
||||
# start sync code order msg delivery task
|
||||
async with trio.open_nursery() as n:
|
||||
n.start_soon(
|
||||
relay_order_cmds_from_sync_code,
|
||||
fqsn,
|
||||
relay_orders_from_sync_code,
|
||||
client,
|
||||
fqme,
|
||||
trades_stream
|
||||
)
|
||||
|
||||
yield book, trades_stream, positions, accounts
|
||||
yield (
|
||||
client,
|
||||
trades_stream,
|
||||
positions,
|
||||
accounts,
|
||||
dialogs,
|
||||
)
|
||||
|
||||
# stop the sync-msg-relay task on exit.
|
||||
n.cancel_scope.cancel()
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -15,108 +15,148 @@
|
|||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Clearing system messagingn types and protocols.
|
||||
Clearing sub-system message and protocols.
|
||||
|
||||
"""
|
||||
from typing import Optional, Union
|
||||
from __future__ import annotations
|
||||
from typing import (
|
||||
Literal,
|
||||
)
|
||||
|
||||
# TODO: try out just encoding/send direction for now?
|
||||
# import msgspec
|
||||
from pydantic import BaseModel
|
||||
from msgspec import field
|
||||
|
||||
from ..data._source import Symbol
|
||||
from piker.types import Struct
|
||||
|
||||
|
||||
# TODO: ``msgspec`` stuff worth paying attention to:
|
||||
# - schema evolution:
|
||||
# https://jcristharif.com/msgspec/usage.html#schema-evolution
|
||||
# - for eg. ``BrokerdStatus``, instead just have separate messages?
|
||||
# - use literals for a common msg determined by diff keys?
|
||||
# - https://jcristharif.com/msgspec/usage.html#literal
|
||||
|
||||
# --------------
|
||||
# Client -> emsd
|
||||
# --------------
|
||||
|
||||
class Order(Struct):
|
||||
|
||||
class Cancel(BaseModel):
|
||||
'''Cancel msg for removing a dark (ems triggered) or
|
||||
broker-submitted (live) trigger/order.
|
||||
|
||||
'''
|
||||
action: str = 'cancel'
|
||||
oid: str # uuid4
|
||||
symbol: str
|
||||
|
||||
|
||||
class Order(BaseModel):
|
||||
|
||||
action: str # {'buy', 'sell', 'alert'}
|
||||
# internal ``emdsd`` unique "order id"
|
||||
oid: str # uuid4
|
||||
symbol: Union[str, Symbol]
|
||||
account: str # should we set a default as '' ?
|
||||
|
||||
price: float
|
||||
size: float
|
||||
brokers: list[str]
|
||||
|
||||
# Assigned once initial ack is received
|
||||
# ack_time_ns: Optional[int] = None
|
||||
# TODO: ideally we can combine these 2 fields into
|
||||
# 1 and just use the size polarity to determine a buy/sell.
|
||||
# i would like to see this become more like
|
||||
# https://jcristharif.com/msgspec/usage.html#literal
|
||||
# action: Literal[
|
||||
# 'live',
|
||||
# 'dark',
|
||||
# 'alert',
|
||||
# ]
|
||||
|
||||
action: Literal[
|
||||
'buy',
|
||||
'sell',
|
||||
'alert',
|
||||
]
|
||||
# determines whether the create execution
|
||||
# will be submitted to the ems or directly to
|
||||
# the backend broker
|
||||
exec_mode: str # {'dark', 'live', 'paper'}
|
||||
exec_mode: Literal[
|
||||
'dark',
|
||||
'live',
|
||||
# 'paper', no right?
|
||||
]
|
||||
|
||||
class Config:
|
||||
# just for pre-loading a ``Symbol`` when used
|
||||
# in the order mode staging process
|
||||
arbitrary_types_allowed = True
|
||||
# don't copy this model instance when used in
|
||||
# a recursive model
|
||||
copy_on_model_validation = False
|
||||
# internal ``emdsd`` unique "order id"
|
||||
oid: str # uuid4
|
||||
# TODO: figure out how to optionally typecast this to `MktPair`?
|
||||
symbol: str # | MktPair
|
||||
account: str # should we set a default as '' ?
|
||||
|
||||
price: float
|
||||
size: float # -ve is "sell", +ve is "buy"
|
||||
|
||||
brokers: list[str] = []
|
||||
|
||||
|
||||
class Cancel(Struct):
|
||||
'''
|
||||
Cancel msg for removing a dark (ems triggered) or
|
||||
broker-submitted (live) trigger/order.
|
||||
|
||||
'''
|
||||
oid: str # uuid4
|
||||
symbol: str
|
||||
action: str = 'cancel'
|
||||
|
||||
|
||||
# --------------
|
||||
# Client <- emsd
|
||||
# --------------
|
||||
# update msgs from ems which relay state change info
|
||||
# from the active clearing engine.
|
||||
|
||||
class Status(Struct):
|
||||
|
||||
class Status(BaseModel):
|
||||
time_ns: int
|
||||
oid: str # uuid4 ems-order dialog id
|
||||
|
||||
resp: Literal[
|
||||
'pending', # acked by broker but not yet open
|
||||
'open',
|
||||
'dark_open', # dark/algo triggered order is open in ems clearing loop
|
||||
'triggered', # above triggered order sent to brokerd, or an alert closed
|
||||
'closed', # fully cleared all size/units
|
||||
'fill', # partial execution
|
||||
'canceled',
|
||||
'error',
|
||||
]
|
||||
|
||||
name: str = 'status'
|
||||
oid: str # uuid4
|
||||
time_ns: int
|
||||
|
||||
# {
|
||||
# 'dark_submitted',
|
||||
# 'dark_cancelled',
|
||||
# 'dark_triggered',
|
||||
|
||||
# 'broker_submitted',
|
||||
# 'broker_cancelled',
|
||||
# 'broker_executed',
|
||||
# 'broker_filled',
|
||||
# 'broker_errored',
|
||||
|
||||
# 'alert_submitted',
|
||||
# 'alert_triggered',
|
||||
|
||||
# }
|
||||
resp: str # "response", see above
|
||||
|
||||
# symbol: str
|
||||
|
||||
# trigger info
|
||||
trigger_price: Optional[float] = None
|
||||
# price: float
|
||||
|
||||
# broker: Optional[str] = None
|
||||
|
||||
# this maps normally to the ``BrokerdOrder.reqid`` below, an id
|
||||
# normally allocated internally by the backend broker routing system
|
||||
broker_reqid: Optional[Union[int, str]] = None
|
||||
reqid: int | str | None = None
|
||||
|
||||
# for relaying backend msg data "through" the ems layer
|
||||
# the (last) source order/request msg if provided
|
||||
# (eg. the Order/Cancel which causes this msg) and
|
||||
# acts as a back-reference to the corresponding
|
||||
# request message which was the source of this msg.
|
||||
req: Order | None = None
|
||||
|
||||
# XXX: better design/name here?
|
||||
# flag that can be set to indicate a message for an order
|
||||
# event that wasn't originated by piker's emsd (eg. some external
|
||||
# trading system which does it's own order control but that you
|
||||
# might want to "track" using piker UIs/systems).
|
||||
src: str | None = None
|
||||
|
||||
# set when a cancel request msg was set for this order flow dialog
|
||||
# but the brokerd dialog isn't yet in a cancelled state.
|
||||
cancel_called: bool = False
|
||||
|
||||
# for relaying a boxed brokerd-dialog-side msg data "through" the
|
||||
# ems layer to clients.
|
||||
brokerd_msg: dict = {}
|
||||
|
||||
|
||||
class Error(Status):
|
||||
resp: str = 'error'
|
||||
|
||||
# TODO: allow re-wrapping from existing (last) status?
|
||||
@classmethod
|
||||
def from_status(
|
||||
cls,
|
||||
msg: Status,
|
||||
) -> Error:
|
||||
...
|
||||
|
||||
|
||||
# ---------------
|
||||
# emsd -> brokerd
|
||||
# ---------------
|
||||
# requests *sent* from ems to respective backend broker daemon
|
||||
|
||||
class BrokerdCancel(BaseModel):
|
||||
class BrokerdCancel(Struct):
|
||||
|
||||
action: str = 'cancel'
|
||||
oid: str # piker emsd order id
|
||||
time_ns: int
|
||||
|
||||
|
@ -127,34 +167,39 @@ class BrokerdCancel(BaseModel):
|
|||
# for setting a unique order id then this value will be relayed back
|
||||
# on the emsd order request stream as the ``BrokerdOrderAck.reqid``
|
||||
# field
|
||||
reqid: Optional[Union[int, str]] = None
|
||||
reqid: int | str | None = None
|
||||
action: str = 'cancel'
|
||||
|
||||
|
||||
class BrokerdOrder(BaseModel):
|
||||
class BrokerdOrder(Struct):
|
||||
|
||||
action: str # {buy, sell}
|
||||
oid: str
|
||||
account: str
|
||||
time_ns: int
|
||||
|
||||
symbol: str # fqme
|
||||
price: float
|
||||
size: float
|
||||
|
||||
# TODO: if we instead rely on a +ve/-ve size to determine
|
||||
# the action we more or less don't need this field right?
|
||||
action: str = '' # {buy, sell}
|
||||
|
||||
# "broker request id": broker specific/internal order id if this is
|
||||
# None, creates a new order otherwise if the id is valid the backend
|
||||
# api must modify the existing matching order. If the broker allows
|
||||
# for setting a unique order id then this value will be relayed back
|
||||
# on the emsd order request stream as the ``BrokerdOrderAck.reqid``
|
||||
# field
|
||||
reqid: Optional[Union[int, str]] = None
|
||||
|
||||
symbol: str # symbol.<providername> ?
|
||||
price: float
|
||||
size: float
|
||||
reqid: int | str | None = None
|
||||
|
||||
|
||||
# ---------------
|
||||
# emsd <- brokerd
|
||||
# ---------------
|
||||
# requests *received* to ems from broker backend
|
||||
|
||||
|
||||
class BrokerdOrderAck(BaseModel):
|
||||
class BrokerdOrderAck(Struct):
|
||||
'''
|
||||
Immediate reponse to a brokerd order request providing the broker
|
||||
specific unique order id so that the EMS can associate this
|
||||
|
@ -162,102 +207,100 @@ class BrokerdOrderAck(BaseModel):
|
|||
``.oid`` (which is a uuid4).
|
||||
|
||||
'''
|
||||
name: str = 'ack'
|
||||
|
||||
# defined and provided by backend
|
||||
reqid: Union[int, str]
|
||||
reqid: int | str
|
||||
|
||||
# emsd id originally sent in matching request msg
|
||||
oid: str
|
||||
# TODO: do we need this?
|
||||
account: str = ''
|
||||
name: str = 'ack'
|
||||
|
||||
|
||||
class BrokerdStatus(BaseModel):
|
||||
class BrokerdStatus(Struct):
|
||||
|
||||
name: str = 'status'
|
||||
reqid: Union[int, str]
|
||||
time_ns: int
|
||||
reqid: int | str
|
||||
status: Literal[
|
||||
'open',
|
||||
'canceled',
|
||||
'pending',
|
||||
# 'error', # NOTE: use `BrokerdError`
|
||||
'closed',
|
||||
]
|
||||
name: str = 'status'
|
||||
|
||||
# XXX: should be best effort set for every update
|
||||
account: str = ''
|
||||
|
||||
# {
|
||||
# 'submitted',
|
||||
# 'cancelled',
|
||||
# 'filled',
|
||||
# }
|
||||
status: str
|
||||
|
||||
oid: str = ''
|
||||
# TODO: do we need this?
|
||||
account: str | None = None,
|
||||
filled: float = 0.0
|
||||
reason: str = ''
|
||||
remaining: float = 0.0
|
||||
|
||||
# XXX: better design/name here?
|
||||
# flag that can be set to indicate a message for an order
|
||||
# event that wasn't originated by piker's emsd (eg. some external
|
||||
# trading system which does it's own order control but that you
|
||||
# might want to "track" using piker UIs/systems).
|
||||
external: bool = False
|
||||
# external: bool = False
|
||||
|
||||
# XXX: not required schema as of yet
|
||||
broker_details: dict = {
|
||||
broker_details: dict = field(default_factory=lambda: {
|
||||
'name': '',
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
class BrokerdFill(BaseModel):
|
||||
class BrokerdFill(Struct):
|
||||
'''
|
||||
A single message indicating a "fill-details" event from the broker
|
||||
if avaiable.
|
||||
A single message indicating a "fill-details" event from the
|
||||
broker if avaiable.
|
||||
|
||||
'''
|
||||
name: str = 'fill'
|
||||
reqid: Union[int, str]
|
||||
time_ns: int
|
||||
|
||||
# order exeuction related
|
||||
action: str
|
||||
size: float
|
||||
price: float
|
||||
|
||||
broker_details: dict = {} # meta-data (eg. commisions etc.)
|
||||
|
||||
# brokerd timestamp required for order mode arrow placement on x-axis
|
||||
|
||||
# TODO: maybe int if we force ns?
|
||||
# we need to normalize this somehow since backends will use their
|
||||
# own format and likely across many disparate epoch clocks...
|
||||
time_ns: int
|
||||
broker_time: float
|
||||
reqid: int | str
|
||||
|
||||
# order exeuction related
|
||||
size: float
|
||||
price: float
|
||||
|
||||
name: str = 'fill'
|
||||
action: str | None = None
|
||||
broker_details: dict = {} # meta-data (eg. commisions etc.)
|
||||
|
||||
|
||||
class BrokerdError(BaseModel):
|
||||
class BrokerdError(Struct):
|
||||
'''
|
||||
Optional error type that can be relayed to emsd for error handling.
|
||||
|
||||
This is still a TODO thing since we're not sure how to employ it yet.
|
||||
|
||||
'''
|
||||
name: str = 'error'
|
||||
oid: str
|
||||
reason: str
|
||||
|
||||
# TODO: drop this right?
|
||||
symbol: str | None = None
|
||||
|
||||
oid: str | None = None
|
||||
# if no brokerd order request was actually submitted (eg. we errored
|
||||
# at the ``pikerd`` layer) then there will be ``reqid`` allocated.
|
||||
reqid: Optional[Union[int, str]] = None
|
||||
reqid: str | None = None
|
||||
|
||||
symbol: str
|
||||
reason: str
|
||||
name: str = 'error'
|
||||
broker_details: dict = {}
|
||||
|
||||
|
||||
class BrokerdPosition(BaseModel):
|
||||
'''Position update event from brokerd.
|
||||
# TODO: yeah, so we REALLY need to completely deprecate
|
||||
# this and use the `.accounting.Position` msg-type instead..
|
||||
class BrokerdPosition(Struct):
|
||||
'''
|
||||
Position update event from brokerd.
|
||||
|
||||
'''
|
||||
name: str = 'position'
|
||||
|
||||
broker: str
|
||||
account: str
|
||||
symbol: str
|
||||
currency: str
|
||||
size: float
|
||||
avg_price: float
|
||||
currency: str = ''
|
||||
name: str = 'position'
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,93 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
Sub-sys module commons.
|
||||
|
||||
"""
|
||||
from collections import ChainMap
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
|
||||
from ..log import (
|
||||
get_logger,
|
||||
get_console_log,
|
||||
)
|
||||
from piker.types import Struct
|
||||
subsys: str = 'piker.clearing'
|
||||
|
||||
log = get_logger(subsys)
|
||||
|
||||
get_console_log = partial(
|
||||
get_console_log,
|
||||
name=subsys,
|
||||
)
|
||||
|
||||
|
||||
class OrderDialogs(Struct):
|
||||
'''
|
||||
Order control dialog (and thus transaction) tracking via
|
||||
message recording.
|
||||
|
||||
Allows easily recording messages associated with a given set of
|
||||
order control transactions and looking up the latest field
|
||||
state using the entire (reverse chronological) msg flow.
|
||||
|
||||
'''
|
||||
_flows: dict[str, ChainMap] = {}
|
||||
|
||||
def add_msg(
|
||||
self,
|
||||
oid: str,
|
||||
msg: dict,
|
||||
) -> None:
|
||||
|
||||
# NOTE: manually enter a new map on the first msg add to
|
||||
# avoid creating one with an empty dict first entry in
|
||||
# `ChainMap.maps` which is the default if none passed at
|
||||
# init.
|
||||
cm: ChainMap = self._flows.get(oid)
|
||||
if cm:
|
||||
cm.maps.insert(0, msg)
|
||||
else:
|
||||
cm = ChainMap(msg)
|
||||
self._flows[oid] = cm
|
||||
|
||||
# TODO: wrap all this in the `collections.abc.Mapping` interface?
|
||||
def get(
|
||||
self,
|
||||
oid: str,
|
||||
|
||||
) -> ChainMap[str, Any]:
|
||||
'''
|
||||
Return the dialog `ChainMap` for provided id.
|
||||
|
||||
'''
|
||||
return self._flows.get(oid, None)
|
||||
|
||||
def pop(
|
||||
self,
|
||||
oid: str,
|
||||
|
||||
) -> ChainMap[str, Any]:
|
||||
'''
|
||||
Pop and thus remove the `ChainMap` containing the msg flow
|
||||
for the given order id.
|
||||
|
||||
'''
|
||||
if (flow := self._flows.pop(oid, None)) is None:
|
||||
log.warning(f'No flow found for oid: {oid}')
|
||||
|
||||
return flow
|
|
@ -1,93 +1,215 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of pikers)
|
||||
# Copyright (C) 2018-present Tyler Goodlet
|
||||
# (in stewardship for pikers, everywhere.)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
# This program is free software: you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Affero General Public
|
||||
# License as published by the Free Software Foundation, either
|
||||
# version 3 of the License, or (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
# You should have received a copy of the GNU Affero General Public
|
||||
# License along with this program. If not, see
|
||||
# <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
CLI commons.
|
||||
|
||||
'''
|
||||
import os
|
||||
from pprint import pformat
|
||||
# from contextlib import AsyncExitStack
|
||||
from types import ModuleType
|
||||
|
||||
import click
|
||||
import trio
|
||||
import tractor
|
||||
from tractor._multiaddr import parse_maddr
|
||||
|
||||
from ..log import get_console_log, get_logger, colorize_json
|
||||
from ..log import (
|
||||
get_console_log,
|
||||
get_logger,
|
||||
colorize_json,
|
||||
)
|
||||
from ..brokers import get_brokermod
|
||||
from .._daemon import _tractor_kwargs
|
||||
from ..service import (
|
||||
_default_registry_host,
|
||||
_default_registry_port,
|
||||
)
|
||||
from .. import config
|
||||
|
||||
|
||||
log = get_logger('cli')
|
||||
DEFAULT_BROKER = 'questrade'
|
||||
log = get_logger('piker.cli')
|
||||
|
||||
|
||||
def load_trans_eps(
|
||||
network: dict | None = None,
|
||||
maddrs: list[tuple] | None = None,
|
||||
|
||||
) -> dict[str, dict[str, dict]]:
|
||||
|
||||
# transport-oriented endpoint multi-addresses
|
||||
eps: dict[
|
||||
str, # service name, eg. `pikerd`, `emsd`..
|
||||
|
||||
# libp2p style multi-addresses parsed into prot layers
|
||||
list[dict[str, str | int]]
|
||||
] = {}
|
||||
|
||||
if (
|
||||
network
|
||||
and not maddrs
|
||||
):
|
||||
# load network section and (attempt to) connect all endpoints
|
||||
# which are reachable B)
|
||||
for key, maddrs in network.items():
|
||||
match key:
|
||||
|
||||
# TODO: resolve table across multiple discov
|
||||
# prots Bo
|
||||
case 'resolv':
|
||||
pass
|
||||
|
||||
case 'pikerd':
|
||||
dname: str = key
|
||||
for maddr in maddrs:
|
||||
layers: dict = parse_maddr(maddr)
|
||||
eps.setdefault(
|
||||
dname,
|
||||
[],
|
||||
).append(layers)
|
||||
|
||||
elif maddrs:
|
||||
# presume user is manually specifying the root actor ep.
|
||||
eps['pikerd'] = [parse_maddr(maddr)]
|
||||
|
||||
return eps
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option('--loglevel', '-l', default='warning', help='Logging level')
|
||||
@click.option('--tl', is_flag=True, help='Enable tractor logging')
|
||||
@click.option('--pdb', is_flag=True, help='Enable tractor debug mode')
|
||||
@click.option('--host', '-h', default='127.0.0.1', help='Host address to bind')
|
||||
@click.option(
|
||||
'--tsdb',
|
||||
is_flag=True,
|
||||
help='Enable local ``marketstore`` instance'
|
||||
'--loglevel',
|
||||
'-l',
|
||||
default='warning',
|
||||
help='Logging level',
|
||||
)
|
||||
def pikerd(loglevel, host, tl, pdb, tsdb):
|
||||
@click.option(
|
||||
'--tl',
|
||||
is_flag=True,
|
||||
help='Enable tractor-runtime logs',
|
||||
)
|
||||
@click.option(
|
||||
'--pdb',
|
||||
is_flag=True,
|
||||
help='Enable tractor debug mode',
|
||||
)
|
||||
@click.option(
|
||||
'--maddr',
|
||||
'-m',
|
||||
default=None,
|
||||
help='Multiaddrs to bind or contact',
|
||||
)
|
||||
# @click.option(
|
||||
# '--tsdb',
|
||||
# is_flag=True,
|
||||
# help='Enable local ``marketstore`` instance'
|
||||
# )
|
||||
# @click.option(
|
||||
# '--es',
|
||||
# is_flag=True,
|
||||
# help='Enable local ``elasticsearch`` instance'
|
||||
# )
|
||||
def pikerd(
|
||||
maddr: list[str] | None,
|
||||
loglevel: str,
|
||||
tl: bool,
|
||||
pdb: bool,
|
||||
# tsdb: bool,
|
||||
# es: bool,
|
||||
):
|
||||
'''
|
||||
Spawn the piker broker-daemon.
|
||||
|
||||
'''
|
||||
from .._daemon import open_pikerd
|
||||
log = get_console_log(loglevel)
|
||||
from tractor.devx import maybe_open_crash_handler
|
||||
with maybe_open_crash_handler(pdb=pdb):
|
||||
log = get_console_log(loglevel, name='cli')
|
||||
|
||||
if pdb:
|
||||
log.warning((
|
||||
"\n"
|
||||
"!!! You have enabled daemon DEBUG mode !!!\n"
|
||||
"If a daemon crashes it will likely block"
|
||||
" the service until resumed from console!\n"
|
||||
"!!! YOU HAVE ENABLED DAEMON DEBUG MODE !!!\n"
|
||||
"When a `piker` daemon crashes it will block the "
|
||||
"task-thread until resumed from console!\n"
|
||||
"\n"
|
||||
))
|
||||
|
||||
# service-actor registry endpoint socket-address set
|
||||
regaddrs: list[tuple[str, int]] = []
|
||||
|
||||
conf, _ = config.load(
|
||||
conf_name='conf',
|
||||
)
|
||||
network: dict = conf.get('network')
|
||||
if (
|
||||
network is None
|
||||
and not maddr
|
||||
):
|
||||
regaddrs = [(
|
||||
_default_registry_host,
|
||||
_default_registry_port,
|
||||
)]
|
||||
|
||||
else:
|
||||
eps: dict = load_trans_eps(
|
||||
network,
|
||||
maddr,
|
||||
)
|
||||
for layers in eps['pikerd']:
|
||||
regaddrs.append((
|
||||
layers['ipv4']['addr'],
|
||||
layers['tcp']['port'],
|
||||
))
|
||||
|
||||
from .. import service
|
||||
|
||||
async def main():
|
||||
service_mngr: service.Services
|
||||
|
||||
async with (
|
||||
open_pikerd(
|
||||
service.open_pikerd(
|
||||
registry_addrs=regaddrs,
|
||||
loglevel=loglevel,
|
||||
debug_mode=pdb,
|
||||
), # normally delivers a ``Services`` handle
|
||||
trio.open_nursery() as n,
|
||||
|
||||
) as service_mngr, # normally delivers a ``Services`` handle
|
||||
|
||||
# AsyncExitStack() as stack,
|
||||
):
|
||||
if tsdb:
|
||||
from piker.data._ahab import start_ahab
|
||||
from piker.data.marketstore import start_marketstore
|
||||
# TODO: spawn all other sub-actor daemons according to
|
||||
# multiaddress endpoint spec defined by user config
|
||||
assert service_mngr
|
||||
|
||||
log.info('Spawning `marketstore` supervisor')
|
||||
ctn_ready, config, (cid, pid) = await n.start(
|
||||
start_ahab,
|
||||
'marketstored',
|
||||
start_marketstore,
|
||||
# if tsdb:
|
||||
# dname, conf = await stack.enter_async_context(
|
||||
# service.marketstore.start_ahab_daemon(
|
||||
# service_mngr,
|
||||
# loglevel=loglevel,
|
||||
# )
|
||||
# )
|
||||
# log.info(f'TSDB `{dname}` up with conf:\n{conf}')
|
||||
|
||||
)
|
||||
log.info(
|
||||
f'`marketstore` up!\n'
|
||||
f'`marketstored` pid: {pid}\n'
|
||||
f'docker container id: {cid}\n'
|
||||
f'config: {pformat(config)}'
|
||||
)
|
||||
# if es:
|
||||
# dname, conf = await stack.enter_async_context(
|
||||
# service.elastic.start_ahab_daemon(
|
||||
# service_mngr,
|
||||
# loglevel=loglevel,
|
||||
# )
|
||||
# )
|
||||
# log.info(f'DB `{dname}` up with conf:\n{conf}')
|
||||
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
@ -97,25 +219,77 @@ def pikerd(loglevel, host, tl, pdb, tsdb):
|
|||
@click.group(context_settings=config._context_defaults)
|
||||
@click.option(
|
||||
'--brokers', '-b',
|
||||
default=[DEFAULT_BROKER],
|
||||
default=None,
|
||||
multiple=True,
|
||||
help='Broker backend to use'
|
||||
)
|
||||
@click.option('--loglevel', '-l', default='warning', help='Logging level')
|
||||
@click.option('--tl', is_flag=True, help='Enable tractor logging')
|
||||
@click.option('--configdir', '-c', help='Configuration directory')
|
||||
@click.option(
|
||||
'--pdb',
|
||||
is_flag=True,
|
||||
help='Enable runtime debug mode ',
|
||||
)
|
||||
@click.option(
|
||||
'--maddr',
|
||||
'-m',
|
||||
default=None,
|
||||
multiple=True,
|
||||
help='Multiaddr to bind',
|
||||
)
|
||||
@click.option(
|
||||
'--regaddr',
|
||||
'-r',
|
||||
default=None,
|
||||
help='Registrar addr to contact',
|
||||
)
|
||||
@click.pass_context
|
||||
def cli(ctx, brokers, loglevel, tl, configdir):
|
||||
def cli(
|
||||
ctx: click.Context,
|
||||
brokers: list[str],
|
||||
loglevel: str,
|
||||
tl: bool,
|
||||
configdir: str,
|
||||
pdb: bool,
|
||||
|
||||
# TODO: make these list[str] with multiple -m maddr0 -m maddr1
|
||||
maddr: list[str],
|
||||
regaddr: str,
|
||||
|
||||
) -> None:
|
||||
if configdir is not None:
|
||||
assert os.path.isdir(configdir), f"`{configdir}` is not a valid path"
|
||||
config._override_config_dir(configdir)
|
||||
|
||||
# TODO: for typer see
|
||||
# https://typer.tiangolo.com/tutorial/commands/context/
|
||||
ctx.ensure_object(dict)
|
||||
|
||||
if len(brokers) == 1:
|
||||
brokermods = [get_brokermod(brokers[0])]
|
||||
else:
|
||||
brokermods = [get_brokermod(broker) for broker in brokers]
|
||||
if not brokers:
|
||||
# (try to) load all (supposedly) supported data/broker backends
|
||||
from piker.brokers import __brokers__
|
||||
brokers = __brokers__
|
||||
|
||||
brokermods: dict[str, ModuleType] = {
|
||||
broker: get_brokermod(broker) for broker in brokers
|
||||
}
|
||||
assert brokermods
|
||||
|
||||
# TODO: load endpoints from `conf::[network].pikerd`
|
||||
# - pikerd vs. regd, separate registry daemon?
|
||||
# - expose datad vs. brokerd?
|
||||
# - bind emsd with certain perms on public iface?
|
||||
regaddrs: list[tuple[str, int]] = regaddr or [(
|
||||
_default_registry_host,
|
||||
_default_registry_port,
|
||||
)]
|
||||
|
||||
# TODO: factor [network] section parsing out from pikerd
|
||||
# above and call it here as well.
|
||||
# if maddr:
|
||||
# for addr in maddr:
|
||||
# layers: dict = parse_maddr(addr)
|
||||
|
||||
ctx.obj.update({
|
||||
'brokers': brokers,
|
||||
|
@ -125,6 +299,12 @@ def cli(ctx, brokers, loglevel, tl, configdir):
|
|||
'log': get_console_log(loglevel),
|
||||
'confdir': config._config_dir,
|
||||
'wl_path': config._watchlists_data_path,
|
||||
'registry_addrs': regaddrs,
|
||||
'pdb': pdb, # debug mode flag
|
||||
|
||||
# TODO: endpoint parsing, pinging and binding
|
||||
# on no existing server.
|
||||
# 'maddrs': maddr,
|
||||
})
|
||||
|
||||
# allow enabling same loglevel in ``tractor`` machinery
|
||||
|
@ -134,38 +314,52 @@ def cli(ctx, brokers, loglevel, tl, configdir):
|
|||
|
||||
@cli.command()
|
||||
@click.option('--tl', is_flag=True, help='Enable tractor logging')
|
||||
@click.argument('names', nargs=-1, required=False)
|
||||
@click.argument('ports', nargs=-1, required=False)
|
||||
@click.pass_obj
|
||||
def services(config, tl, names):
|
||||
def services(config, tl, ports):
|
||||
|
||||
from ..service import (
|
||||
open_piker_runtime,
|
||||
_default_registry_port,
|
||||
_default_registry_host,
|
||||
)
|
||||
|
||||
host = _default_registry_host
|
||||
if not ports:
|
||||
ports = [_default_registry_port]
|
||||
|
||||
async def list_services():
|
||||
|
||||
async with tractor.get_arbiter(
|
||||
*_tractor_kwargs['arbiter_addr']
|
||||
) as portal:
|
||||
nonlocal host
|
||||
async with (
|
||||
open_piker_runtime(
|
||||
name='service_query',
|
||||
loglevel=config['loglevel'] if tl else None,
|
||||
),
|
||||
tractor.get_arbiter(
|
||||
host=host,
|
||||
port=ports[0]
|
||||
) as portal
|
||||
):
|
||||
registry = await portal.run_from_ns('self', 'get_registry')
|
||||
json_d = {}
|
||||
for key, socket in registry.items():
|
||||
# name, uuid = uid
|
||||
host, port = socket
|
||||
json_d[key] = f'{host}:{port}'
|
||||
click.echo(f"{colorize_json(json_d)}")
|
||||
|
||||
tractor.run(
|
||||
list_services,
|
||||
name='service_query',
|
||||
loglevel=config['loglevel'] if tl else None,
|
||||
arbiter_addr=_tractor_kwargs['arbiter_addr'],
|
||||
)
|
||||
trio.run(list_services)
|
||||
|
||||
|
||||
def _load_clis() -> None:
|
||||
from ..data import marketstore # noqa
|
||||
from ..data import cli # noqa
|
||||
# from ..service import elastic # noqa
|
||||
from ..brokers import cli # noqa
|
||||
from ..ui import cli # noqa
|
||||
from ..watchlists import cli # noqa
|
||||
|
||||
# typer implemented
|
||||
from ..storage import cli # noqa
|
||||
from ..accounting import cli # noqa
|
||||
|
||||
|
||||
# load downstream cli modules
|
||||
_load_clis()
|
||||
|
|
255
piker/config.py
255
piker/config.py
|
@ -15,27 +15,42 @@
|
|||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Broker configuration mgmt.
|
||||
Platform configuration (files) mgmt.
|
||||
|
||||
"""
|
||||
import platform
|
||||
import sys
|
||||
import os
|
||||
from os.path import dirname
|
||||
import shutil
|
||||
from typing import Optional
|
||||
from typing import (
|
||||
Callable,
|
||||
MutableMapping,
|
||||
)
|
||||
from pathlib import Path
|
||||
|
||||
from bidict import bidict
|
||||
import toml
|
||||
import tomlkit
|
||||
try:
|
||||
import tomllib
|
||||
except ModuleNotFoundError:
|
||||
import tomli as tomllib
|
||||
|
||||
|
||||
from .log import get_logger
|
||||
|
||||
log = get_logger('broker-config')
|
||||
|
||||
|
||||
# taken from ``click`` since apparently they have some
|
||||
# XXX NOTE: taken from ``click`` since apparently they have some
|
||||
# super weirdness with sigint and sudo..no clue
|
||||
def get_app_dir(app_name, roaming=True, force_posix=False):
|
||||
# we're probably going to slowly just modify it to our own version over
|
||||
# time..
|
||||
def get_app_dir(
|
||||
app_name: str,
|
||||
roaming: bool = True,
|
||||
force_posix: bool = False,
|
||||
|
||||
) -> str:
|
||||
r"""Returns the config folder for the application. The default behavior
|
||||
is to return whatever is most appropriate for the operating system.
|
||||
|
||||
|
@ -74,7 +89,30 @@ def get_app_dir(app_name, roaming=True, force_posix=False):
|
|||
def _posixify(name):
|
||||
return "-".join(name.split()).lower()
|
||||
|
||||
# if WIN:
|
||||
# NOTE: for testing with `pytest` we leverage the `tmp_dir`
|
||||
# fixture to generate (and clean up) a test-request-specific
|
||||
# directory for isolated configuration files such that,
|
||||
# - multiple tests can run (possibly in parallel) without data races
|
||||
# on the config state,
|
||||
# - we don't need to ever worry about leaking configs into the
|
||||
# system thus avoiding needing to manage config cleaup fixtures or
|
||||
# other bothers (since obviously `tmp_dir` cleans up after itself).
|
||||
#
|
||||
# In order to "pass down" the test dir path to all (sub-)actors in
|
||||
# the actor tree we preload the root actor's runtime vars state (an
|
||||
# internal mechanism for inheriting state down an actor tree in
|
||||
# `tractor`) with the testing dir and check for it whenever we
|
||||
# detect `pytest` is being used (which it isn't under normal
|
||||
# operation).
|
||||
if "pytest" in sys.modules:
|
||||
import tractor
|
||||
actor = tractor.current_actor(err_on_no_runtime=False)
|
||||
if actor: # runtime is up
|
||||
rvs = tractor._state._runtime_vars
|
||||
testdirpath = Path(rvs['piker_vars']['piker_test_dir'])
|
||||
assert testdirpath.exists(), 'piker test harness might be borked!?'
|
||||
app_name = str(testdirpath)
|
||||
|
||||
if platform.system() == 'Windows':
|
||||
key = "APPDATA" if roaming else "LOCALAPPDATA"
|
||||
folder = os.environ.get(key)
|
||||
|
@ -94,28 +132,38 @@ def get_app_dir(app_name, roaming=True, force_posix=False):
|
|||
)
|
||||
|
||||
|
||||
_config_dir = _click_config_dir = get_app_dir('piker')
|
||||
_parent_user = os.environ.get('SUDO_USER')
|
||||
_click_config_dir: Path = Path(get_app_dir('piker'))
|
||||
_config_dir: Path = _click_config_dir
|
||||
|
||||
if _parent_user:
|
||||
non_root_user_dir = os.path.expanduser(
|
||||
f'~{_parent_user}'
|
||||
# NOTE: when using `sudo` we attempt to determine the non-root user
|
||||
# and still use their normal config dir.
|
||||
if (
|
||||
(_parent_user := os.environ.get('SUDO_USER'))
|
||||
and
|
||||
_parent_user != 'root'
|
||||
):
|
||||
non_root_user_dir = Path(
|
||||
os.path.expanduser(f'~{_parent_user}')
|
||||
)
|
||||
root = 'root'
|
||||
root: str = 'root'
|
||||
_ccds: str = str(_click_config_dir) # click config dir as string
|
||||
i_tail: int = int(_ccds.rfind(root) + len(root))
|
||||
_config_dir = (
|
||||
non_root_user_dir +
|
||||
_click_config_dir[
|
||||
_click_config_dir.rfind(root) + len(root):
|
||||
]
|
||||
non_root_user_dir
|
||||
/
|
||||
Path(_ccds[i_tail+1:]) # +1 to capture trailing '/'
|
||||
)
|
||||
|
||||
|
||||
_conf_names: set[str] = {
|
||||
'brokers',
|
||||
'trades',
|
||||
'watchlists',
|
||||
'conf', # god config
|
||||
'brokers', # sec backend deatz
|
||||
'watchlists', # (user defined) market lists
|
||||
}
|
||||
|
||||
_watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
|
||||
# TODO: probably drop all this super legacy, questrade specific,
|
||||
# config stuff XD ?
|
||||
_watchlists_data_path: Path = _config_dir / Path('watchlists.json')
|
||||
_context_defaults = dict(
|
||||
default_map={
|
||||
# Questrade specific quote poll rates
|
||||
|
@ -129,6 +177,14 @@ _context_defaults = dict(
|
|||
)
|
||||
|
||||
|
||||
class ConfigurationError(Exception):
|
||||
'Misconfigured settings, likely in a TOML file.'
|
||||
|
||||
|
||||
class NoSignature(ConfigurationError):
|
||||
'No credentials setup for broker backend!'
|
||||
|
||||
|
||||
def _override_config_dir(
|
||||
path: str
|
||||
) -> None:
|
||||
|
@ -143,75 +199,130 @@ def _conf_fn_w_ext(
|
|||
return f'{name}.toml'
|
||||
|
||||
|
||||
def get_conf_dir() -> Path:
|
||||
'''
|
||||
Return the user configuration directory ``Path``
|
||||
on the local filesystem.
|
||||
|
||||
'''
|
||||
return _config_dir
|
||||
|
||||
|
||||
def get_conf_path(
|
||||
conf_name: str = 'brokers',
|
||||
|
||||
) -> str:
|
||||
"""Return the default config path normally under
|
||||
``~/.config/piker`` on linux.
|
||||
) -> Path:
|
||||
'''
|
||||
Return the top-level default config path normally under
|
||||
``~/.config/piker`` on linux for a given ``conf_name``, the config
|
||||
name.
|
||||
|
||||
Contains files such as:
|
||||
- brokers.toml
|
||||
- watchlists.toml
|
||||
- trades.toml
|
||||
|
||||
# maybe coming soon ;)
|
||||
- signals.toml
|
||||
- strats.toml
|
||||
|
||||
"""
|
||||
assert conf_name in _conf_names
|
||||
'''
|
||||
if 'account.' not in conf_name:
|
||||
assert str(conf_name) in _conf_names
|
||||
|
||||
fn = _conf_fn_w_ext(conf_name)
|
||||
return os.path.join(
|
||||
_config_dir,
|
||||
fn,
|
||||
)
|
||||
return _config_dir / Path(fn)
|
||||
|
||||
|
||||
def repodir():
|
||||
def repodir() -> Path:
|
||||
'''
|
||||
Return the abspath to the repo directory.
|
||||
Return the abspath as ``Path`` to the git repo's root dir.
|
||||
|
||||
'''
|
||||
dirpath = os.path.abspath(
|
||||
# we're 3 levels down in **this** module file
|
||||
dirname(dirname(os.path.realpath(__file__)))
|
||||
)
|
||||
return dirpath
|
||||
repodir: Path = Path(__file__).absolute().parent.parent
|
||||
confdir: Path = repodir / 'config'
|
||||
|
||||
if not confdir.is_dir():
|
||||
# prolly inside stupid GH actions CI..
|
||||
repodir: Path = Path(os.environ.get('GITHUB_WORKSPACE'))
|
||||
confdir: Path = repodir / 'config'
|
||||
|
||||
assert confdir.is_dir(), f'{confdir} DNE, {repodir} is likely incorrect!'
|
||||
return repodir
|
||||
|
||||
|
||||
def load(
|
||||
conf_name: str = 'brokers',
|
||||
path: str = None
|
||||
# NOTE: always appended with .toml suffix
|
||||
conf_name: str = 'conf',
|
||||
path: Path | None = None,
|
||||
|
||||
) -> (dict, str):
|
||||
decode: Callable[
|
||||
[str | bytes,],
|
||||
MutableMapping,
|
||||
] = tomllib.loads,
|
||||
|
||||
touch_if_dne: bool = False,
|
||||
|
||||
**tomlkws,
|
||||
|
||||
) -> tuple[dict, Path]:
|
||||
'''
|
||||
Load config file by name.
|
||||
|
||||
'''
|
||||
path = path or get_conf_path(conf_name)
|
||||
if not os.path.isfile(path):
|
||||
fn = _conf_fn_w_ext(conf_name)
|
||||
If desired config is not in the top level piker-user config path then
|
||||
pass the ``path: Path`` explicitly.
|
||||
|
||||
template = os.path.join(
|
||||
repodir(),
|
||||
'config',
|
||||
fn
|
||||
'''
|
||||
# create the $HOME/.config/piker dir if dne
|
||||
if not _config_dir.is_dir():
|
||||
_config_dir.mkdir(
|
||||
parents=True,
|
||||
exist_ok=True,
|
||||
)
|
||||
# try to copy in a template config to the user's directory
|
||||
# if one exists.
|
||||
if os.path.isfile(template):
|
||||
|
||||
path_provided: bool = path is not None
|
||||
path: Path = path or get_conf_path(conf_name)
|
||||
|
||||
if (
|
||||
not path.is_file()
|
||||
and touch_if_dne
|
||||
):
|
||||
# only do a template if no path provided,
|
||||
# just touch an empty file with same name.
|
||||
if path_provided:
|
||||
with path.open(mode='x'):
|
||||
pass
|
||||
|
||||
# try to copy in a template config to the user's dir if one
|
||||
# exists.
|
||||
else:
|
||||
fn: str = _conf_fn_w_ext(conf_name)
|
||||
template: Path = repodir() / 'config' / fn
|
||||
if template.is_file():
|
||||
shutil.copyfile(template, path)
|
||||
|
||||
config = toml.load(path)
|
||||
elif fn and template:
|
||||
assert template.is_file(), f'{template} is not a file!?'
|
||||
|
||||
assert path.is_file(), f'Config file {path} not created!?'
|
||||
|
||||
with path.open(mode='r') as fp:
|
||||
config: dict = decode(
|
||||
fp.read(),
|
||||
**tomlkws,
|
||||
)
|
||||
|
||||
log.debug(f"Read config file {path}")
|
||||
return config, path
|
||||
|
||||
|
||||
def write(
|
||||
config: dict, # toml config as dict
|
||||
name: str = 'brokers',
|
||||
path: str = None,
|
||||
|
||||
name: str | None = None,
|
||||
path: Path | None = None,
|
||||
fail_empty: bool = True,
|
||||
|
||||
**toml_kwargs,
|
||||
|
||||
) -> None:
|
||||
''''
|
||||
|
@ -220,31 +331,41 @@ def write(
|
|||
Create a ``brokers.ini`` file if one does not exist.
|
||||
|
||||
'''
|
||||
path = path or get_conf_path(name)
|
||||
dirname = os.path.dirname(path)
|
||||
if not os.path.isdir(dirname):
|
||||
if name:
|
||||
path: Path = path or get_conf_path(name)
|
||||
dirname: Path = path.parent
|
||||
if not dirname.is_dir():
|
||||
log.debug(f"Creating config dir {_config_dir}")
|
||||
os.makedirs(dirname)
|
||||
dirname.mkdir()
|
||||
|
||||
if not config:
|
||||
if (
|
||||
not config
|
||||
and fail_empty
|
||||
):
|
||||
raise ValueError(
|
||||
"Watch out you're trying to write a blank config!")
|
||||
"Watch out you're trying to write a blank config!"
|
||||
)
|
||||
|
||||
log.debug(
|
||||
f"Writing config `{name}` file to:\n"
|
||||
f"{path}"
|
||||
)
|
||||
with open(path, 'w') as cf:
|
||||
return toml.dump(config, cf)
|
||||
with path.open(mode='w') as fp:
|
||||
return tomlkit.dump( # preserve style on write B)
|
||||
config,
|
||||
fp,
|
||||
**toml_kwargs,
|
||||
)
|
||||
|
||||
|
||||
def load_accounts(
|
||||
providers: list[str] | None = None
|
||||
|
||||
providers: Optional[list[str]] = None
|
||||
) -> bidict[str, str | None]:
|
||||
|
||||
) -> bidict[str, Optional[str]]:
|
||||
|
||||
conf, path = load()
|
||||
conf, path = load(
|
||||
conf_name='brokers',
|
||||
)
|
||||
accounts = bidict()
|
||||
for provider_name, section in conf.items():
|
||||
accounts_section = section.get('accounts')
|
||||
|
|
|
@ -22,7 +22,7 @@ and storing data from your brokers as well as
|
|||
sharing live streams over a network.
|
||||
|
||||
"""
|
||||
from ._normalize import iterticks
|
||||
from .ticktools import iterticks
|
||||
from ._sharedmem import (
|
||||
maybe_open_shm_array,
|
||||
attach_shm_array,
|
||||
|
@ -30,19 +30,42 @@ from ._sharedmem import (
|
|||
get_shm_token,
|
||||
ShmArray,
|
||||
)
|
||||
from .feed import (
|
||||
open_feed,
|
||||
_setup_persistent_brokerd,
|
||||
from ._source import (
|
||||
def_iohlcv_fields,
|
||||
def_ohlcv_fields,
|
||||
)
|
||||
from .feed import (
|
||||
Feed,
|
||||
open_feed,
|
||||
)
|
||||
from .flows import Flume
|
||||
from ._symcache import (
|
||||
SymbologyCache,
|
||||
open_symcache,
|
||||
get_symcache,
|
||||
match_from_pairs,
|
||||
)
|
||||
from ._sampling import open_sample_stream
|
||||
from ..types import Struct
|
||||
|
||||
|
||||
__all__ = [
|
||||
__all__: list[str] = [
|
||||
'Flume',
|
||||
'Feed',
|
||||
'open_feed',
|
||||
'ShmArray',
|
||||
'iterticks',
|
||||
'maybe_open_shm_array',
|
||||
'match_from_pairs',
|
||||
'attach_shm_array',
|
||||
'open_shm_array',
|
||||
'get_shm_token',
|
||||
'_setup_persistent_brokerd',
|
||||
'def_iohlcv_fields',
|
||||
'def_ohlcv_fields',
|
||||
'open_symcache',
|
||||
'open_sample_stream',
|
||||
'get_symcache',
|
||||
'Struct',
|
||||
'SymbologyCache',
|
||||
'types',
|
||||
]
|
||||
|
|
|
@ -1,385 +0,0 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Supervisor for docker with included specific-image service helpers.
|
||||
|
||||
'''
|
||||
import os
|
||||
import time
|
||||
from typing import (
|
||||
Optional,
|
||||
Callable,
|
||||
Any,
|
||||
)
|
||||
from contextlib import asynccontextmanager as acm
|
||||
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
import tractor
|
||||
from tractor.msg import NamespacePath
|
||||
import docker
|
||||
import json
|
||||
from docker.models.containers import Container as DockerContainer
|
||||
from docker.errors import (
|
||||
DockerException,
|
||||
APIError,
|
||||
)
|
||||
from requests.exceptions import ConnectionError, ReadTimeout
|
||||
|
||||
from ..log import get_logger, get_console_log
|
||||
from .. import config
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
class DockerNotStarted(Exception):
|
||||
'Prolly you dint start da daemon bruh'
|
||||
|
||||
|
||||
class ContainerError(RuntimeError):
|
||||
'Error reported via app-container logging level'
|
||||
|
||||
|
||||
@acm
|
||||
async def open_docker(
|
||||
url: Optional[str] = None,
|
||||
**kwargs,
|
||||
|
||||
) -> docker.DockerClient:
|
||||
|
||||
client: Optional[docker.DockerClient] = None
|
||||
try:
|
||||
client = docker.DockerClient(
|
||||
base_url=url,
|
||||
**kwargs
|
||||
) if url else docker.from_env(**kwargs)
|
||||
|
||||
yield client
|
||||
|
||||
except (
|
||||
DockerException,
|
||||
APIError,
|
||||
) as err:
|
||||
|
||||
def unpack_msg(err: Exception) -> str:
|
||||
args = getattr(err, 'args', None)
|
||||
if args:
|
||||
return args
|
||||
else:
|
||||
return str(err)
|
||||
|
||||
# could be more specific so let's check if it's just perms.
|
||||
if err.args:
|
||||
errs = err.args
|
||||
for err in errs:
|
||||
msg = unpack_msg(err)
|
||||
if 'PermissionError' in msg:
|
||||
raise DockerException('You dint run as root yo!')
|
||||
|
||||
elif 'FileNotFoundError' in msg:
|
||||
raise DockerNotStarted('Did you start da service sister?')
|
||||
|
||||
# not perms?
|
||||
raise
|
||||
|
||||
finally:
|
||||
if client:
|
||||
client.close()
|
||||
|
||||
|
||||
class Container:
|
||||
'''
|
||||
Wrapper around a ``docker.models.containers.Container`` to include
|
||||
log capture and relay through our native logging system and helper
|
||||
method(s) for cancellation/teardown.
|
||||
|
||||
'''
|
||||
def __init__(
|
||||
self,
|
||||
cntr: DockerContainer,
|
||||
) -> None:
|
||||
|
||||
self.cntr = cntr
|
||||
# log msg de-duplication
|
||||
self.seen_so_far = set()
|
||||
|
||||
async def process_logs_until(
|
||||
self,
|
||||
patt: str,
|
||||
bp_on_msg: bool = False,
|
||||
) -> bool:
|
||||
'''
|
||||
Attempt to capture container log messages and relay through our
|
||||
native logging system.
|
||||
|
||||
'''
|
||||
seen_so_far = self.seen_so_far
|
||||
|
||||
while True:
|
||||
logs = self.cntr.logs()
|
||||
entries = logs.decode().split('\n')
|
||||
for entry in entries:
|
||||
|
||||
# ignore null lines
|
||||
if not entry:
|
||||
continue
|
||||
|
||||
try:
|
||||
record = json.loads(entry.strip())
|
||||
except json.JSONDecodeError:
|
||||
if 'Error' in entry:
|
||||
raise RuntimeError(entry)
|
||||
raise
|
||||
|
||||
msg = record['msg']
|
||||
level = record['level']
|
||||
if msg and entry not in seen_so_far:
|
||||
seen_so_far.add(entry)
|
||||
if bp_on_msg:
|
||||
await tractor.breakpoint()
|
||||
|
||||
getattr(log, level, log.error)(f'{msg}')
|
||||
|
||||
# print(f'level: {level}')
|
||||
if level in ('error', 'fatal'):
|
||||
raise ContainerError(msg)
|
||||
|
||||
if patt in msg:
|
||||
return True
|
||||
|
||||
# do a checkpoint so we don't block if cancelled B)
|
||||
await trio.sleep(0.01)
|
||||
|
||||
return False
|
||||
|
||||
def try_signal(
|
||||
self,
|
||||
signal: str = 'SIGINT',
|
||||
|
||||
) -> bool:
|
||||
try:
|
||||
# XXX: market store doesn't seem to shutdown nicely all the
|
||||
# time with this (maybe because there are still open grpc
|
||||
# connections?) noticably after client connections have been
|
||||
# made or are in use/teardown. It works just fine if you
|
||||
# just start and stop the container tho?..
|
||||
log.cancel(f'SENDING {signal} to {self.cntr.id}')
|
||||
self.cntr.kill(signal)
|
||||
return True
|
||||
|
||||
except docker.errors.APIError as err:
|
||||
if 'is not running' in err.explanation:
|
||||
return False
|
||||
|
||||
async def cancel(
|
||||
self,
|
||||
stop_msg: str,
|
||||
) -> None:
|
||||
|
||||
cid = self.cntr.id
|
||||
# first try a graceful cancel
|
||||
log.cancel(
|
||||
f'SIGINT cancelling container: {cid}\n'
|
||||
f'waiting on stop msg: "{stop_msg}"'
|
||||
)
|
||||
self.try_signal('SIGINT')
|
||||
|
||||
start = time.time()
|
||||
for _ in range(30):
|
||||
|
||||
with trio.move_on_after(0.5) as cs:
|
||||
cs.shield = True
|
||||
await self.process_logs_until(stop_msg)
|
||||
|
||||
# if we aren't cancelled on above checkpoint then we
|
||||
# assume we read the expected stop msg and terminated.
|
||||
break
|
||||
|
||||
try:
|
||||
log.info(f'Polling for container shutdown:\n{cid}')
|
||||
|
||||
if self.cntr.status not in {'exited', 'not-running'}:
|
||||
self.cntr.wait(
|
||||
timeout=0.1,
|
||||
condition='not-running',
|
||||
)
|
||||
|
||||
break
|
||||
|
||||
except (
|
||||
ReadTimeout,
|
||||
):
|
||||
log.info(f'Still waiting on container:\n{cid}')
|
||||
continue
|
||||
|
||||
except (
|
||||
docker.errors.APIError,
|
||||
ConnectionError,
|
||||
):
|
||||
log.exception('Docker connection failure')
|
||||
break
|
||||
else:
|
||||
delay = time.time() - start
|
||||
log.error(
|
||||
f'Failed to kill container {cid} after {delay}s\n'
|
||||
'sending SIGKILL..'
|
||||
)
|
||||
# get out the big guns, bc apparently marketstore
|
||||
# doesn't actually know how to terminate gracefully
|
||||
# :eyeroll:...
|
||||
self.try_signal('SIGKILL')
|
||||
self.cntr.wait(
|
||||
timeout=3,
|
||||
condition='not-running',
|
||||
)
|
||||
|
||||
log.cancel(f'Container stopped: {cid}')
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_ahabd(
|
||||
ctx: tractor.Context,
|
||||
endpoint: str, # ns-pointer str-msg-type
|
||||
|
||||
**kwargs,
|
||||
|
||||
) -> None:
|
||||
get_console_log('info', name=__name__)
|
||||
|
||||
async with open_docker() as client:
|
||||
|
||||
# TODO: eventually offer a config-oriented API to do the mounts,
|
||||
# params, etc. passing to ``Containter.run()``?
|
||||
# call into endpoint for container config/init
|
||||
ep_func = NamespacePath(endpoint).load_ref()
|
||||
(
|
||||
dcntr,
|
||||
cntr_config,
|
||||
start_msg,
|
||||
stop_msg,
|
||||
) = ep_func(client)
|
||||
cntr = Container(dcntr)
|
||||
|
||||
with trio.move_on_after(1):
|
||||
found = await cntr.process_logs_until(start_msg)
|
||||
|
||||
if not found and cntr not in client.containers.list():
|
||||
raise RuntimeError(
|
||||
'Failed to start `marketstore` check logs deats'
|
||||
)
|
||||
|
||||
await ctx.started((
|
||||
cntr.cntr.id,
|
||||
os.getpid(),
|
||||
cntr_config,
|
||||
))
|
||||
|
||||
try:
|
||||
|
||||
# TODO: we might eventually want a proxy-style msg-prot here
|
||||
# to allow remote control of containers without needing
|
||||
# callers to have root perms?
|
||||
await trio.sleep_forever()
|
||||
|
||||
finally:
|
||||
with trio.CancelScope(shield=True):
|
||||
await cntr.cancel(stop_msg)
|
||||
|
||||
|
||||
async def start_ahab(
|
||||
service_name: str,
|
||||
endpoint: Callable[docker.DockerClient, DockerContainer],
|
||||
task_status: TaskStatus[
|
||||
tuple[
|
||||
trio.Event,
|
||||
dict[str, Any],
|
||||
],
|
||||
] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Start a ``docker`` container supervisor with given service name.
|
||||
|
||||
Currently the actor calling this task should normally be started
|
||||
with root permissions (until we decide to use something that doesn't
|
||||
require this, like docker's rootless mode or some wrapper project) but
|
||||
te root perms are de-escalated after the docker supervisor sub-actor
|
||||
is started.
|
||||
|
||||
'''
|
||||
cn_ready = trio.Event()
|
||||
try:
|
||||
async with tractor.open_nursery(
|
||||
loglevel='runtime',
|
||||
) as tn:
|
||||
|
||||
portal = await tn.start_actor(
|
||||
service_name,
|
||||
enable_modules=[__name__]
|
||||
)
|
||||
|
||||
# TODO: we have issues with this on teardown
|
||||
# where ``tractor`` tries to issue ``os.kill()``
|
||||
# and hits perms errors since the root process
|
||||
# doesn't any longer have root perms..
|
||||
|
||||
# de-escalate root perms to the original user
|
||||
# after the docker supervisor actor is spawned.
|
||||
if config._parent_user:
|
||||
import pwd
|
||||
os.setuid(
|
||||
pwd.getpwnam(
|
||||
config._parent_user
|
||||
)[2] # named user's uid
|
||||
)
|
||||
|
||||
async with portal.open_context(
|
||||
open_ahabd,
|
||||
endpoint=str(NamespacePath.from_ref(endpoint)),
|
||||
) as (ctx, first):
|
||||
|
||||
cid, pid, cntr_config = first
|
||||
|
||||
task_status.started((
|
||||
cn_ready,
|
||||
cntr_config,
|
||||
(cid, pid),
|
||||
))
|
||||
|
||||
await trio.sleep_forever()
|
||||
|
||||
# since we demoted root perms in this parent
|
||||
# we'll get a perms error on proc cleanup in
|
||||
# ``tractor`` nursery exit. just make sure
|
||||
# the child is terminated and don't raise the
|
||||
# error if so.
|
||||
|
||||
# TODO: we could also consider adding
|
||||
# a ``tractor.ZombieDetected`` or something that we could raise
|
||||
# if we find the child didn't terminate.
|
||||
except PermissionError:
|
||||
log.warning('Failed to cancel root permsed container')
|
||||
|
||||
except (
|
||||
trio.MultiError,
|
||||
) as err:
|
||||
for subexc in err.exceptions:
|
||||
if isinstance(subexc, PermissionError):
|
||||
log.warning('Failed to cancel root perms-ed container')
|
||||
return
|
||||
else:
|
||||
raise
|
|
@ -0,0 +1,838 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
Pre-(path)-graphics formatted x/y nd/1d rendering subsystem.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from typing import (
|
||||
Optional,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
import msgspec
|
||||
from msgspec import field
|
||||
import numpy as np
|
||||
from numpy.lib import recfunctions as rfn
|
||||
|
||||
from ._sharedmem import (
|
||||
ShmArray,
|
||||
)
|
||||
from ._pathops import (
|
||||
path_arrays_from_ohlc,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._dataviz import (
|
||||
Viz,
|
||||
)
|
||||
from piker.toolz import Profiler
|
||||
|
||||
# default gap between bars: "bar gap multiplier"
|
||||
# - 0.5 is no overlap between OC arms,
|
||||
# - 1.0 is full overlap on each neighbor sample
|
||||
BGM: float = 0.16
|
||||
|
||||
|
||||
class IncrementalFormatter(msgspec.Struct):
|
||||
'''
|
||||
Incrementally updating, pre-path-graphics tracking, formatter.
|
||||
|
||||
Allows tracking source data state in an updateable pre-graphics
|
||||
``np.ndarray`` format (in local process memory) as well as
|
||||
incrementally rendering from that format **to** 1d x/y for path
|
||||
generation using ``pg.functions.arrayToQPath()``.
|
||||
|
||||
'''
|
||||
shm: ShmArray
|
||||
viz: Viz
|
||||
|
||||
# the value to be multiplied any any index into the x/y_1d arrays
|
||||
# given the input index is based on the original source data array.
|
||||
flat_index_ratio: float = 1
|
||||
|
||||
@property
|
||||
def index_field(self) -> 'str':
|
||||
'''
|
||||
Value (``str``) used to look up the "index series" from the
|
||||
underlying source ``numpy`` struct-array; delegate directly to
|
||||
the managing ``Viz``.
|
||||
|
||||
'''
|
||||
return self.viz.index_field
|
||||
|
||||
# Incrementally updated xy ndarray formatted data, a pre-1d
|
||||
# format which is updated and cached independently of the final
|
||||
# pre-graphics-path 1d format.
|
||||
x_nd: Optional[np.ndarray] = None
|
||||
y_nd: Optional[np.ndarray] = None
|
||||
|
||||
@property
|
||||
def xy_nd(self) -> tuple[np.ndarray, np.ndarray]:
|
||||
return (
|
||||
self.x_nd[self.xy_slice],
|
||||
self.y_nd[self.xy_slice],
|
||||
)
|
||||
|
||||
@property
|
||||
def xy_slice(self) -> slice:
|
||||
return slice(
|
||||
self.xy_nd_start,
|
||||
self.xy_nd_stop,
|
||||
)
|
||||
|
||||
# indexes which slice into the above arrays (which are allocated
|
||||
# based on source data shm input size) and allow retrieving
|
||||
# incrementally updated data.
|
||||
xy_nd_start: int | None = None
|
||||
xy_nd_stop: int | None = None
|
||||
|
||||
# TODO: eventually incrementally update 1d-pre-graphics path data?
|
||||
x_1d: np.ndarray | None = None
|
||||
y_1d: np.ndarray | None = None
|
||||
|
||||
# incremental view-change state(s) tracking
|
||||
_last_vr: tuple[float, float] | None = None
|
||||
_last_ivdr: tuple[float, float] | None = None
|
||||
|
||||
@property
|
||||
def index_step_size(self) -> float:
|
||||
'''
|
||||
Readonly value computed on first ``.diff()`` call.
|
||||
|
||||
'''
|
||||
return self.viz.index_step()
|
||||
|
||||
def diff(
|
||||
self,
|
||||
new_read: tuple[np.ndarray],
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
]:
|
||||
# TODO:
|
||||
# - can the renderer just call ``Viz.read()`` directly? unpack
|
||||
# latest source data read
|
||||
# - eventually maybe we can implement some kind of
|
||||
# transform on the ``QPainterPath`` that will more or less
|
||||
# detect the diff in "elements" terms? update diff state since
|
||||
# we've now rendered paths.
|
||||
(
|
||||
xfirst,
|
||||
xlast,
|
||||
array,
|
||||
ivl,
|
||||
ivr,
|
||||
in_view,
|
||||
) = new_read
|
||||
|
||||
index = array['index']
|
||||
|
||||
# if the first index in the read array is 0 then
|
||||
# it means the source buffer has bee completely backfilled to
|
||||
# available space.
|
||||
src_start = index[0]
|
||||
src_stop = index[-1] + 1
|
||||
|
||||
# these are the "formatted output data" indices
|
||||
# for the pre-graphics arrays.
|
||||
nd_start = self.xy_nd_start
|
||||
nd_stop = self.xy_nd_stop
|
||||
|
||||
if (
|
||||
nd_start is None
|
||||
):
|
||||
assert nd_stop is None
|
||||
|
||||
# setup to do a prepend of all existing src history
|
||||
nd_start = self.xy_nd_start = src_stop
|
||||
# set us in a zero-to-append state
|
||||
nd_stop = self.xy_nd_stop = src_stop
|
||||
|
||||
# compute the length diffs between the first/last index entry in
|
||||
# the input data and the last indexes we have on record from the
|
||||
# last time we updated the curve index.
|
||||
prepend_length = int(nd_start - src_start)
|
||||
append_length = int(src_stop - nd_stop)
|
||||
|
||||
# blah blah blah
|
||||
# do diffing for prepend, append and last entry
|
||||
return (
|
||||
slice(src_start, nd_start),
|
||||
prepend_length,
|
||||
append_length,
|
||||
slice(nd_stop, src_stop),
|
||||
)
|
||||
|
||||
def _track_inview_range(
|
||||
self,
|
||||
view_range: tuple[int, int],
|
||||
|
||||
) -> bool:
|
||||
# if a view range is passed, plan to draw the
|
||||
# source ouput that's "in view" of the chart.
|
||||
vl, vr = view_range
|
||||
zoom_or_append = False
|
||||
last_vr = self._last_vr
|
||||
|
||||
# incremental in-view data update.
|
||||
if last_vr:
|
||||
lvl, lvr = last_vr # relative slice indices
|
||||
|
||||
# TODO: detecting more specifically the interaction changes
|
||||
# last_ivr = self._last_ivdr or (vl, vr)
|
||||
# al, ar = last_ivr # abs slice indices
|
||||
# left_change = abs(x_iv[0] - al) >= 1
|
||||
# right_change = abs(x_iv[-1] - ar) >= 1
|
||||
|
||||
# likely a zoom/pan view change or data append update
|
||||
if (
|
||||
(vr - lvr) > 2
|
||||
or vl < lvl
|
||||
|
||||
# append / prepend update
|
||||
# we had an append update where the view range
|
||||
# didn't change but the data-viewed (shifted)
|
||||
# underneath, so we need to redraw.
|
||||
# or left_change and right_change and last_vr == view_range
|
||||
|
||||
# not (left_change and right_change) and ivr
|
||||
# (
|
||||
# or abs(x_iv[ivr] - livr) > 1
|
||||
):
|
||||
zoom_or_append = True
|
||||
|
||||
self._last_vr = view_range
|
||||
|
||||
return zoom_or_append
|
||||
|
||||
def format_to_1d(
|
||||
self,
|
||||
new_read: tuple,
|
||||
array_key: str,
|
||||
profiler: Profiler,
|
||||
|
||||
slice_to_inview: bool = True,
|
||||
force_full_realloc: bool = False,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
]:
|
||||
shm = self.shm
|
||||
|
||||
(
|
||||
_,
|
||||
_,
|
||||
array,
|
||||
ivl,
|
||||
ivr,
|
||||
in_view,
|
||||
|
||||
) = new_read
|
||||
|
||||
(
|
||||
pre_slice,
|
||||
prepend_len,
|
||||
append_len,
|
||||
post_slice,
|
||||
) = self.diff(new_read)
|
||||
|
||||
# we first need to allocate xy data arrays
|
||||
# from the source data.
|
||||
if (
|
||||
self.y_nd is None
|
||||
or force_full_realloc
|
||||
):
|
||||
self.xy_nd_start = shm._first.value
|
||||
self.xy_nd_stop = shm._last.value
|
||||
self.x_nd, self.y_nd = self.allocate_xy_nd(
|
||||
shm,
|
||||
array_key,
|
||||
)
|
||||
profiler('allocated xy history')
|
||||
|
||||
# once allocated we do incremental pre/append
|
||||
# updates from the diff with the source buffer.
|
||||
else:
|
||||
if prepend_len:
|
||||
|
||||
self.incr_update_xy_nd(
|
||||
shm,
|
||||
array_key,
|
||||
|
||||
# this is the pre-sliced, "normally expected"
|
||||
# new data that an updater would normally be
|
||||
# expected to process, however in some cases (like
|
||||
# step curves) the updater routine may want to do
|
||||
# the source history-data reading itself, so we pass
|
||||
# both here.
|
||||
shm._array[pre_slice],
|
||||
pre_slice,
|
||||
prepend_len,
|
||||
|
||||
self.xy_nd_start,
|
||||
self.xy_nd_stop,
|
||||
is_append=False,
|
||||
)
|
||||
|
||||
self.xy_nd_start -= prepend_len
|
||||
profiler('prepended xy history: {prepend_length}')
|
||||
|
||||
if append_len:
|
||||
self.incr_update_xy_nd(
|
||||
shm,
|
||||
array_key,
|
||||
|
||||
shm._array[post_slice],
|
||||
post_slice,
|
||||
append_len,
|
||||
|
||||
self.xy_nd_start,
|
||||
self.xy_nd_stop,
|
||||
is_append=True,
|
||||
)
|
||||
self.xy_nd_stop += append_len
|
||||
profiler('appened xy history: {append_length}')
|
||||
# sanity
|
||||
# slice_ln = post_slice.stop - post_slice.start
|
||||
# assert append_len == slice_ln
|
||||
|
||||
view_changed: bool = False
|
||||
view_range: tuple[int, int] = (ivl, ivr)
|
||||
if slice_to_inview:
|
||||
view_changed = self._track_inview_range(view_range)
|
||||
array = in_view
|
||||
profiler(f'{self.viz.name} view range slice {view_range}')
|
||||
|
||||
# TODO: we need to check if the last-datum-in-view is true and
|
||||
# if so only slice to the 2nd last datumonly slice to the 2nd
|
||||
# last datum.
|
||||
# hist = array[:slice_to_head]
|
||||
|
||||
# XXX: WOA WTF TRACTOR DEBUGGING BUGGG
|
||||
# assert 0
|
||||
|
||||
# xy-path data transform: convert source data to a format
|
||||
# able to be passed to a `QPainterPath` rendering routine.
|
||||
if not len(array):
|
||||
# XXX: this might be why the profiler only has exits?
|
||||
return
|
||||
|
||||
# TODO: hist here should be the pre-sliced
|
||||
# x/y_data in the case where allocate_xy is
|
||||
# defined?
|
||||
x_1d, y_1d, connect = self.format_xy_nd_to_1d(
|
||||
array,
|
||||
array_key,
|
||||
view_range,
|
||||
)
|
||||
# cache/save last 1d outputs for use by other
|
||||
# readers (eg. `Viz.draw_last_datum()` in the
|
||||
# only-draw-last-uppx case).
|
||||
self.x_1d = x_1d
|
||||
self.y_1d = y_1d
|
||||
|
||||
# app_tres = None
|
||||
# if append_len:
|
||||
# appended = array[-append_len-1:slice_to_head]
|
||||
# app_tres = self.format_xy_nd_to_1d(
|
||||
# appended,
|
||||
# array_key,
|
||||
# (
|
||||
# view_range[1] - append_len + slice_to_head,
|
||||
# view_range[1]
|
||||
# ),
|
||||
# )
|
||||
# # assert (len(appended) - 1) == append_len
|
||||
# # assert len(appended) == append_len
|
||||
# print(
|
||||
# f'{self.viz.name} APPEND LEN: {append_len}\n'
|
||||
# f'{self.viz.name} APPENDED: {appended}\n'
|
||||
# f'{self.viz.name} app_tres: {app_tres}\n'
|
||||
# )
|
||||
|
||||
# update the last "in view data range"
|
||||
if len(x_1d):
|
||||
self._last_ivdr = x_1d[0], x_1d[-1]
|
||||
|
||||
profiler('.format_to_1d()')
|
||||
|
||||
return (
|
||||
x_1d,
|
||||
y_1d,
|
||||
connect,
|
||||
prepend_len,
|
||||
append_len,
|
||||
view_changed,
|
||||
# app_tres,
|
||||
)
|
||||
|
||||
###############################
|
||||
# Sub-type override interface #
|
||||
###############################
|
||||
|
||||
x_offset: np.ndarray = np.array([0])
|
||||
|
||||
# optional pre-graphics xy formatted data which
|
||||
# is incrementally updated in sync with the source data.
|
||||
# XXX: was ``.allocate_xy()``
|
||||
def allocate_xy_nd(
|
||||
self,
|
||||
src_shm: ShmArray,
|
||||
data_field: str,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray, # x
|
||||
np.nd.array # y
|
||||
]:
|
||||
'''
|
||||
Convert the structured-array ``src_shm`` format to
|
||||
a equivalently shaped (and field-less) ``np.ndarray``.
|
||||
|
||||
Eg. a 4 field x N struct-array => (N, 4)
|
||||
|
||||
'''
|
||||
y_nd = src_shm._array[data_field].copy()
|
||||
x_nd = (
|
||||
src_shm._array[self.index_field].copy()
|
||||
+
|
||||
self.x_offset
|
||||
)
|
||||
return x_nd, y_nd
|
||||
|
||||
# XXX: was ``.update_xy()``
|
||||
def incr_update_xy_nd(
|
||||
self,
|
||||
|
||||
src_shm: ShmArray,
|
||||
data_field: str,
|
||||
|
||||
new_from_src: np.ndarray, # portion of source that was updated
|
||||
|
||||
read_slc: slice,
|
||||
ln: int, # len of updated
|
||||
|
||||
nd_start: int,
|
||||
nd_stop: int,
|
||||
|
||||
is_append: bool,
|
||||
|
||||
) -> None:
|
||||
# write pushed data to flattened copy
|
||||
y_nd_new = new_from_src[data_field]
|
||||
self.y_nd[read_slc] = y_nd_new
|
||||
|
||||
x_nd_new = self.x_nd[read_slc]
|
||||
x_nd_new[:] = (
|
||||
new_from_src[self.index_field]
|
||||
+
|
||||
self.x_offset
|
||||
)
|
||||
|
||||
# x_nd = self.x_nd[self.xy_slice]
|
||||
# y_nd = self.y_nd[self.xy_slice]
|
||||
# name = self.viz.name
|
||||
# if 'trade_rate' == name:
|
||||
# s = 4
|
||||
# print(
|
||||
# f'{name.upper()}:\n'
|
||||
# 'NEW_FROM_SRC:\n'
|
||||
# f'new_from_src: {new_from_src}\n\n'
|
||||
|
||||
# f'PRE self.x_nd:'
|
||||
# f'\n{list(x_nd[-s:])}\n'
|
||||
|
||||
# f'PRE self.y_nd:\n'
|
||||
# f'{list(y_nd[-s:])}\n\n'
|
||||
|
||||
# f'TO WRITE:\n'
|
||||
|
||||
# f'x_nd_new:\n'
|
||||
# f'{x_nd_new[0]}\n'
|
||||
|
||||
# f'y_nd_new:\n'
|
||||
# f'{y_nd_new}\n'
|
||||
# )
|
||||
|
||||
# XXX: was ``.format_xy()``
|
||||
def format_xy_nd_to_1d(
|
||||
self,
|
||||
|
||||
array: np.ndarray,
|
||||
array_key: str,
|
||||
vr: tuple[int, int],
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray, # 1d x
|
||||
np.ndarray, # 1d y
|
||||
np.ndarray | str, # connection array/style
|
||||
]:
|
||||
'''
|
||||
Default xy-nd array to 1d pre-graphics-path render routine.
|
||||
|
||||
Return single field column data verbatim
|
||||
|
||||
'''
|
||||
# NOTE: we don't include the very last datum which is filled in
|
||||
# normally by another graphics object.
|
||||
x_1d = array[self.index_field][:-1]
|
||||
y_1d = array[array_key][:-1]
|
||||
|
||||
# name = self.viz.name
|
||||
# if 'trade_rate' == name:
|
||||
# s = 4
|
||||
# x_nd = list(self.x_nd[self.xy_slice][-s:-1])
|
||||
# y_nd = list(self.y_nd[self.xy_slice][-s:-1])
|
||||
# print(
|
||||
# f'{name}:\n'
|
||||
# f'XY data:\n'
|
||||
# f'x: {x_nd}\n'
|
||||
# f'y: {y_nd}\n\n'
|
||||
# f'x_1d: {list(x_1d[-s:])}\n'
|
||||
# f'y_1d: {list(y_1d[-s:])}\n\n'
|
||||
|
||||
# )
|
||||
return (
|
||||
x_1d,
|
||||
y_1d,
|
||||
|
||||
# 1d connection array or style-key to
|
||||
# ``pg.functions.arrayToQPath()``
|
||||
'all',
|
||||
)
|
||||
|
||||
|
||||
class OHLCBarsFmtr(IncrementalFormatter):
|
||||
|
||||
x_offset: np.ndarray = np.array([
|
||||
-0.5,
|
||||
0,
|
||||
0,
|
||||
0.5,
|
||||
])
|
||||
|
||||
fields: list[str] = field(
|
||||
default_factory=lambda: ['open', 'high', 'low', 'close']
|
||||
)
|
||||
flat_index_ratio: float = 4
|
||||
|
||||
def allocate_xy_nd(
|
||||
self,
|
||||
|
||||
ohlc_shm: ShmArray,
|
||||
data_field: str,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray, # x
|
||||
np.nd.array # y
|
||||
]:
|
||||
'''
|
||||
Convert an input struct-array holding OHLC samples into a pair of
|
||||
flattened x, y arrays with the same size (datums wise) as the source
|
||||
data.
|
||||
|
||||
'''
|
||||
y_nd = ohlc_shm.ustruct(self.fields)
|
||||
|
||||
# generate an flat-interpolated x-domain
|
||||
x_nd = (
|
||||
np.broadcast_to(
|
||||
ohlc_shm._array[self.index_field][:, None],
|
||||
(
|
||||
ohlc_shm._array.size,
|
||||
# 4, # only ohlc
|
||||
y_nd.shape[1],
|
||||
),
|
||||
)
|
||||
+
|
||||
self.x_offset
|
||||
)
|
||||
assert y_nd.any()
|
||||
|
||||
# write pushed data to flattened copy
|
||||
return (
|
||||
x_nd,
|
||||
y_nd,
|
||||
)
|
||||
|
||||
def incr_update_xy_nd(
|
||||
self,
|
||||
|
||||
src_shm: ShmArray,
|
||||
data_field: str,
|
||||
|
||||
new_from_src: np.ndarray, # portion of source that was updated
|
||||
|
||||
read_slc: slice,
|
||||
ln: int, # len of updated
|
||||
|
||||
nd_start: int,
|
||||
nd_stop: int,
|
||||
|
||||
is_append: bool,
|
||||
|
||||
) -> None:
|
||||
# write newly pushed data to flattened copy
|
||||
# a struct-arr is always passed in.
|
||||
new_y_nd = rfn.structured_to_unstructured(
|
||||
new_from_src[self.fields]
|
||||
)
|
||||
self.y_nd[read_slc] = new_y_nd
|
||||
|
||||
# generate same-valued-per-row x support based on y shape
|
||||
x_nd_new = self.x_nd[read_slc]
|
||||
x_nd_new[:] = np.broadcast_to(
|
||||
new_from_src[self.index_field][:, None],
|
||||
new_y_nd.shape,
|
||||
) + self.x_offset
|
||||
|
||||
# TODO: can we drop this frame and just use the above?
|
||||
def format_xy_nd_to_1d(
|
||||
self,
|
||||
|
||||
array: np.ndarray,
|
||||
array_key: str,
|
||||
vr: tuple[int, int],
|
||||
|
||||
start: int = 0, # XXX: do we need this?
|
||||
|
||||
# 0.5 is no overlap between arms, 1.0 is full overlap
|
||||
gap: float = BGM,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
]:
|
||||
'''
|
||||
More or less direct proxy to the ``numba``-fied
|
||||
``path_arrays_from_ohlc()`` (above) but with closed in kwargs
|
||||
for line spacing.
|
||||
|
||||
'''
|
||||
x, y, c = path_arrays_from_ohlc(
|
||||
array[:-1],
|
||||
start,
|
||||
bar_w=self.index_step_size,
|
||||
bar_gap=gap * self.index_step_size,
|
||||
|
||||
# XXX: don't ask, due to a ``numba`` bug..
|
||||
use_time_index=(self.index_field == 'time'),
|
||||
)
|
||||
return x, y, c
|
||||
|
||||
|
||||
class OHLCBarsAsCurveFmtr(OHLCBarsFmtr):
|
||||
|
||||
def format_xy_nd_to_1d(
|
||||
self,
|
||||
|
||||
array: np.ndarray,
|
||||
array_key: str,
|
||||
vr: tuple[int, int],
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
str,
|
||||
]:
|
||||
# TODO: in the case of an existing ``.update_xy()``
|
||||
# should we be passing in array as an xy arrays tuple?
|
||||
|
||||
# 2 more datum-indexes to capture zero at end
|
||||
x_flat = self.x_nd[self.xy_nd_start:self.xy_nd_stop-1]
|
||||
y_flat = self.y_nd[self.xy_nd_start:self.xy_nd_stop-1]
|
||||
|
||||
# slice to view
|
||||
ivl, ivr = vr
|
||||
x_iv_flat = x_flat[ivl:ivr]
|
||||
y_iv_flat = y_flat[ivl:ivr]
|
||||
|
||||
# reshape to 1d for graphics rendering
|
||||
y_iv = y_iv_flat.reshape(-1)
|
||||
x_iv = x_iv_flat.reshape(-1)
|
||||
|
||||
return x_iv, y_iv, 'all'
|
||||
|
||||
|
||||
class StepCurveFmtr(IncrementalFormatter):
|
||||
|
||||
x_offset: np.ndarray = np.array([
|
||||
0,
|
||||
1,
|
||||
])
|
||||
|
||||
def allocate_xy_nd(
|
||||
self,
|
||||
|
||||
shm: ShmArray,
|
||||
data_field: str,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray, # x
|
||||
np.nd.array # y
|
||||
]:
|
||||
'''
|
||||
Convert an input 1d shm array to a "step array" format
|
||||
for use by path graphics generation.
|
||||
|
||||
'''
|
||||
i = shm._array[self.index_field].copy()
|
||||
out = shm._array[data_field].copy()
|
||||
|
||||
x_out = (
|
||||
np.broadcast_to(
|
||||
i[:, None],
|
||||
(i.size, 2),
|
||||
)
|
||||
+
|
||||
self.x_offset
|
||||
)
|
||||
|
||||
# fill out Nx2 array to hold each step's left + right vertices.
|
||||
y_out = np.empty(
|
||||
x_out.shape,
|
||||
dtype=out.dtype,
|
||||
)
|
||||
# fill in (current) values from source shm buffer
|
||||
y_out[:] = out[:, np.newaxis]
|
||||
|
||||
# TODO: pretty sure we can drop this?
|
||||
# start y at origin level
|
||||
# y_out[0, 0] = 0
|
||||
# y_out[self.xy_nd_start] = 0
|
||||
return x_out, y_out
|
||||
|
||||
def incr_update_xy_nd(
|
||||
self,
|
||||
|
||||
src_shm: ShmArray,
|
||||
array_key: str,
|
||||
|
||||
new_from_src: np.ndarray, # portion of source that was updated
|
||||
read_slc: slice,
|
||||
ln: int, # len of updated
|
||||
|
||||
nd_start: int,
|
||||
nd_stop: int,
|
||||
|
||||
is_append: bool,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
slice,
|
||||
]:
|
||||
# NOTE: for a step curve we slice from one datum prior
|
||||
# to the current "update slice" to get the previous
|
||||
# "level".
|
||||
#
|
||||
# why this is needed,
|
||||
# - the current new append slice will often have a zero
|
||||
# value in the latest datum-step (at least for zero-on-new
|
||||
# cases like vlm in the) as per configuration of the FSP
|
||||
# engine.
|
||||
# - we need to look back a datum to get the last level which
|
||||
# will be used to terminate/complete the last step x-width
|
||||
# which will be set to pair with the last x-index THIS MEANS
|
||||
#
|
||||
# XXX: this means WE CAN'T USE the append slice since we need to
|
||||
# "look backward" one step to get the needed back-to-zero level
|
||||
# and the update data in ``new_from_src`` will only contain the
|
||||
# latest new data.
|
||||
back_1 = slice(
|
||||
read_slc.start - 1,
|
||||
read_slc.stop,
|
||||
)
|
||||
|
||||
to_write = src_shm._array[back_1]
|
||||
y_nd_new = self.y_nd[back_1]
|
||||
y_nd_new[:] = to_write[array_key][:, None]
|
||||
|
||||
x_nd_new = self.x_nd[read_slc]
|
||||
x_nd_new[:] = (
|
||||
new_from_src[self.index_field][:, None]
|
||||
+
|
||||
self.x_offset
|
||||
)
|
||||
|
||||
# XXX: uncomment for debugging
|
||||
# x_nd = self.x_nd[self.xy_slice]
|
||||
# y_nd = self.y_nd[self.xy_slice]
|
||||
# name = self.viz.name
|
||||
# if 'dolla_vlm' in name:
|
||||
# s = 4
|
||||
# print(
|
||||
# f'{name}:\n'
|
||||
# 'NEW_FROM_SRC:\n'
|
||||
# f'new_from_src: {new_from_src}\n\n'
|
||||
|
||||
# f'PRE self.x_nd:'
|
||||
# f'\n{x_nd[-s:]}\n'
|
||||
# f'PRE self.y_nd:\n'
|
||||
# f'{y_nd[-s:]}\n\n'
|
||||
|
||||
# f'TO WRITE:\n'
|
||||
# f'x_nd_new:\n'
|
||||
# f'{x_nd_new}\n'
|
||||
# f'y_nd_new:\n'
|
||||
# f'{y_nd_new}\n'
|
||||
# )
|
||||
|
||||
def format_xy_nd_to_1d(
|
||||
self,
|
||||
|
||||
array: np.ndarray,
|
||||
array_key: str,
|
||||
vr: tuple[int, int],
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
str,
|
||||
]:
|
||||
last_t, last = array[-1][[self.index_field, array_key]]
|
||||
|
||||
start = self.xy_nd_start
|
||||
stop = self.xy_nd_stop
|
||||
|
||||
x_step = self.x_nd[start:stop]
|
||||
y_step = self.y_nd[start:stop]
|
||||
|
||||
# slice out in-view data
|
||||
ivl, ivr = vr
|
||||
|
||||
# NOTE: add an extra step to get the vertical-line-down-to-zero
|
||||
# adjacent to the last-datum graphic (filled rect).
|
||||
x_step_iv = x_step[ivl:ivr+1]
|
||||
y_step_iv = y_step[ivl:ivr+1]
|
||||
|
||||
# flatten to 1d
|
||||
x_1d = x_step_iv.reshape(x_step_iv.size)
|
||||
y_1d = y_step_iv.reshape(y_step_iv.size)
|
||||
|
||||
# debugging
|
||||
# if y_1d.any():
|
||||
# s = 6
|
||||
# print(
|
||||
# f'x_step_iv:\n{x_step_iv[-s:]}\n'
|
||||
# f'y_step_iv:\n{y_step_iv[-s:]}\n\n'
|
||||
# f'x_1d:\n{x_1d[-s:]}\n'
|
||||
# f'y_1d:\n{y_1d[-s:]}\n'
|
||||
# )
|
||||
|
||||
return x_1d, y_1d, 'all'
|
|
@ -15,127 +15,34 @@
|
|||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Graphics related downsampling routines for compressing to pixel
|
||||
limits on the display device.
|
||||
Graphics downsampling using the infamous M4 algorithm.
|
||||
|
||||
This is one of ``piker``'s secret weapons allowing us to boss all other
|
||||
charting platforms B)
|
||||
|
||||
(AND DON'T YOU DARE TAKE THIS CODE WITHOUT CREDIT OR WE'LL SUE UR F#&@* ASS).
|
||||
|
||||
NOTES: this method is a so called "visualization driven data
|
||||
aggregation" approach. It gives error-free line chart
|
||||
downsampling, see
|
||||
further scientific paper resources:
|
||||
- http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
|
||||
- http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf
|
||||
|
||||
Details on implementation of this algo are based in,
|
||||
https://github.com/pikers/piker/issues/109
|
||||
|
||||
'''
|
||||
import math
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
from numpy.lib import recfunctions as rfn
|
||||
from numba import (
|
||||
jit,
|
||||
njit,
|
||||
# float64, optional, int64,
|
||||
)
|
||||
|
||||
from ..log import get_logger
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
def hl2mxmn(ohlc: np.ndarray) -> np.ndarray:
|
||||
'''
|
||||
Convert a OHLC struct-array containing 'high'/'low' columns
|
||||
to a "joined" max/min 1-d array.
|
||||
|
||||
'''
|
||||
index = ohlc['index']
|
||||
hls = ohlc[[
|
||||
'low',
|
||||
'high',
|
||||
]]
|
||||
|
||||
mxmn = np.empty(2*hls.size, dtype=np.float64)
|
||||
x = np.empty(2*hls.size, dtype=np.float64)
|
||||
trace_hl(hls, mxmn, x, index[0])
|
||||
x = x + index[0]
|
||||
|
||||
return mxmn, x
|
||||
|
||||
|
||||
@jit(
|
||||
# TODO: the type annots..
|
||||
# float64[:](float64[:],),
|
||||
nopython=True,
|
||||
)
|
||||
def trace_hl(
|
||||
hl: 'np.ndarray',
|
||||
out: np.ndarray,
|
||||
x: np.ndarray,
|
||||
start: int,
|
||||
|
||||
# the "offset" values in the x-domain which
|
||||
# place the 2 output points around each ``int``
|
||||
# master index.
|
||||
margin: float = 0.43,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
"Trace" the outline of the high-low values of an ohlc sequence
|
||||
as a line such that the maximum deviation (aka disperaion) between
|
||||
bars if preserved.
|
||||
|
||||
This routine is expected to modify input arrays in-place.
|
||||
|
||||
'''
|
||||
last_l = hl['low'][0]
|
||||
last_h = hl['high'][0]
|
||||
|
||||
for i in range(hl.size):
|
||||
row = hl[i]
|
||||
l, h = row['low'], row['high']
|
||||
|
||||
up_diff = h - last_l
|
||||
down_diff = last_h - l
|
||||
|
||||
if up_diff > down_diff:
|
||||
out[2*i + 1] = h
|
||||
out[2*i] = last_l
|
||||
else:
|
||||
out[2*i + 1] = l
|
||||
out[2*i] = last_h
|
||||
|
||||
last_l = l
|
||||
last_h = h
|
||||
|
||||
x[2*i] = int(i) - margin
|
||||
x[2*i + 1] = int(i) + margin
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def ohlc_flatten(
|
||||
ohlc: np.ndarray,
|
||||
use_mxmn: bool = True,
|
||||
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
'''
|
||||
Convert an OHLCV struct-array into a flat ready-for-line-plotting
|
||||
1-d array that is 4 times the size with x-domain values distributed
|
||||
evenly (by 0.5 steps) over each index.
|
||||
|
||||
'''
|
||||
index = ohlc['index']
|
||||
|
||||
if use_mxmn:
|
||||
# traces a line optimally over highs to lows
|
||||
# using numba. NOTE: pretty sure this is faster
|
||||
# and looks about the same as the below output.
|
||||
flat, x = hl2mxmn(ohlc)
|
||||
|
||||
else:
|
||||
flat = rfn.structured_to_unstructured(
|
||||
ohlc[['open', 'high', 'low', 'close']]
|
||||
).flatten()
|
||||
|
||||
x = np.linspace(
|
||||
start=index[0] - 0.5,
|
||||
stop=index[-1] + 0.5,
|
||||
num=len(flat),
|
||||
)
|
||||
return x, flat
|
||||
from ._util import log
|
||||
|
||||
|
||||
def ds_m4(
|
||||
|
@ -160,16 +67,6 @@ def ds_m4(
|
|||
This is more or less an OHLC style sampling of a line-style series.
|
||||
|
||||
'''
|
||||
# NOTE: this method is a so called "visualization driven data
|
||||
# aggregation" approach. It gives error-free line chart
|
||||
# downsampling, see
|
||||
# further scientific paper resources:
|
||||
# - http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
|
||||
# - http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf
|
||||
|
||||
# Details on implementation of this algo are based in,
|
||||
# https://github.com/pikers/piker/issues/109
|
||||
|
||||
# XXX: from infinite on downsampling viewable graphics:
|
||||
# "one thing i remembered about the binning - if you are
|
||||
# picking a range within your timeseries the start and end bin
|
||||
|
@ -191,6 +88,14 @@ def ds_m4(
|
|||
x_end = x[-1] # x end value/highest in domain
|
||||
xrange = (x_end - x_start)
|
||||
|
||||
if xrange < 0:
|
||||
log.error(f'-VE M4 X-RANGE: {x_start} -> {x_end}')
|
||||
# XXX: broken x-range calc-case, likely the x-end points
|
||||
# are wrong and have some default value set (such as
|
||||
# x_end -> <some epoch float> while x_start -> 0.5).
|
||||
# breakpoint()
|
||||
return None
|
||||
|
||||
# XXX: always round up on the input pixels
|
||||
# lnx = len(x)
|
||||
# uppx *= max(4 / (1 + math.log(uppx, 2)), 1)
|
||||
|
@ -223,14 +128,20 @@ def ds_m4(
|
|||
assert frames >= (xrange / uppx)
|
||||
|
||||
# call into ``numba``
|
||||
nb, i_win, y_out = _m4(
|
||||
(
|
||||
nb,
|
||||
x_out,
|
||||
y_out,
|
||||
ymn,
|
||||
ymx,
|
||||
) = _m4(
|
||||
x,
|
||||
y,
|
||||
|
||||
frames,
|
||||
|
||||
# TODO: see func below..
|
||||
# i_win,
|
||||
# x_out,
|
||||
# y_out,
|
||||
|
||||
# first index in x data to start at
|
||||
|
@ -243,14 +154,14 @@ def ds_m4(
|
|||
# filter out any overshoot in the input allocation arrays by
|
||||
# removing zero-ed tail entries which should start at a certain
|
||||
# index.
|
||||
i_win = i_win[i_win != 0]
|
||||
y_out = y_out[:i_win.size]
|
||||
x_out = x_out[x_out != 0]
|
||||
y_out = y_out[:x_out.size]
|
||||
|
||||
return nb, i_win, y_out
|
||||
# print(f'M4 output ymn, ymx: {ymn},{ymx}')
|
||||
return nb, x_out, y_out, ymn, ymx
|
||||
|
||||
|
||||
@jit(
|
||||
nopython=True,
|
||||
@njit(
|
||||
nogil=True,
|
||||
)
|
||||
def _m4(
|
||||
|
@ -260,8 +171,8 @@ def _m4(
|
|||
|
||||
frames: int,
|
||||
|
||||
# TODO: using this approach by having the ``.zeros()`` alloc lines
|
||||
# below, in put python was causing segs faults and alloc crashes..
|
||||
# TODO: using this approach, having the ``.zeros()`` alloc lines
|
||||
# below in pure python, there were segs faults and alloc crashes..
|
||||
# we might need to see how it behaves with shm arrays and consider
|
||||
# allocating them once at startup?
|
||||
|
||||
|
@ -274,14 +185,22 @@ def _m4(
|
|||
x_start: int,
|
||||
step: float,
|
||||
|
||||
) -> int:
|
||||
# nbins = len(i_win)
|
||||
# count = len(xs)
|
||||
) -> tuple[
|
||||
int,
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
float,
|
||||
float,
|
||||
]:
|
||||
'''
|
||||
Implementation of the m4 algorithm in ``numba``:
|
||||
http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
|
||||
|
||||
'''
|
||||
# these are pre-allocated and mutated by ``numba``
|
||||
# code in-place.
|
||||
y_out = np.zeros((frames, 4), ys.dtype)
|
||||
i_win = np.zeros(frames, xs.dtype)
|
||||
x_out = np.zeros(frames, xs.dtype)
|
||||
|
||||
bincount = 0
|
||||
x_left = x_start
|
||||
|
@ -295,24 +214,34 @@ def _m4(
|
|||
|
||||
# set all bins in the left-most entry to the starting left-most x value
|
||||
# (aka a row broadcast).
|
||||
i_win[bincount] = x_left
|
||||
x_out[bincount] = x_left
|
||||
# set all y-values to the first value passed in.
|
||||
y_out[bincount] = ys[0]
|
||||
|
||||
# full input y-data mx and mn
|
||||
mx: float = -np.inf
|
||||
mn: float = np.inf
|
||||
|
||||
# compute OHLC style max / min values per window sized x-frame.
|
||||
for i in range(len(xs)):
|
||||
|
||||
x = xs[i]
|
||||
y = ys[i]
|
||||
|
||||
if x < x_left + step: # the current window "step" is [bin, bin+1)
|
||||
y_out[bincount, 1] = min(y, y_out[bincount, 1])
|
||||
y_out[bincount, 2] = max(y, y_out[bincount, 2])
|
||||
ymn = y_out[bincount, 1] = min(y, y_out[bincount, 1])
|
||||
ymx = y_out[bincount, 2] = max(y, y_out[bincount, 2])
|
||||
y_out[bincount, 3] = y
|
||||
mx = max(mx, ymx)
|
||||
mn = min(mn, ymn)
|
||||
|
||||
else:
|
||||
# Find the next bin
|
||||
while x >= x_left + step:
|
||||
x_left += step
|
||||
|
||||
bincount += 1
|
||||
i_win[bincount] = x_left
|
||||
x_out[bincount] = x_left
|
||||
y_out[bincount] = y
|
||||
|
||||
return bincount, i_win, y_out
|
||||
return bincount, x_out, y_out, mn, mx
|
|
@ -1,82 +0,0 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Stream format enforcement.
|
||||
|
||||
'''
|
||||
from itertools import chain
|
||||
from typing import AsyncIterator
|
||||
|
||||
|
||||
def iterticks(
|
||||
quote: dict,
|
||||
types: tuple[str] = (
|
||||
'trade',
|
||||
'dark_trade',
|
||||
),
|
||||
deduplicate_darks: bool = False,
|
||||
|
||||
) -> AsyncIterator:
|
||||
'''
|
||||
Iterate through ticks delivered per quote cycle.
|
||||
|
||||
'''
|
||||
if deduplicate_darks:
|
||||
assert 'dark_trade' in types
|
||||
|
||||
# print(f"{quote}\n\n")
|
||||
ticks = quote.get('ticks', ())
|
||||
trades = {}
|
||||
darks = {}
|
||||
|
||||
if ticks:
|
||||
|
||||
# do a first pass and attempt to remove duplicate dark
|
||||
# trades with the same tick signature.
|
||||
if deduplicate_darks:
|
||||
for tick in ticks:
|
||||
ttype = tick.get('type')
|
||||
|
||||
time = tick.get('time', None)
|
||||
if time:
|
||||
sig = (
|
||||
time,
|
||||
tick['price'],
|
||||
tick['size']
|
||||
)
|
||||
|
||||
if ttype == 'dark_trade':
|
||||
darks[sig] = tick
|
||||
|
||||
elif ttype == 'trade':
|
||||
trades[sig] = tick
|
||||
|
||||
# filter duplicates
|
||||
for sig, tick in trades.items():
|
||||
tick = darks.pop(sig, None)
|
||||
if tick:
|
||||
ticks.remove(tick)
|
||||
# print(f'DUPLICATE {tick}')
|
||||
|
||||
# re-insert ticks
|
||||
ticks.extend(list(chain(trades.values(), darks.values())))
|
||||
|
||||
for tick in ticks:
|
||||
# print(f"{quote['symbol']}: {tick}")
|
||||
ttype = tick.get('type')
|
||||
if ttype in types:
|
||||
yield tick
|
|
@ -0,0 +1,281 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
Super fast ``QPainterPath`` generation related operator routines.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
from numpy.lib import recfunctions as rfn
|
||||
from numba import (
|
||||
# types,
|
||||
njit,
|
||||
float64,
|
||||
int64,
|
||||
# optional,
|
||||
)
|
||||
|
||||
# TODO: for ``numba`` typing..
|
||||
# from ._source import numba_ohlc_dtype
|
||||
from ._m4 import ds_m4
|
||||
|
||||
|
||||
def xy_downsample(
|
||||
x,
|
||||
y,
|
||||
uppx,
|
||||
|
||||
x_spacer: float = 0.5,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
float,
|
||||
float,
|
||||
]:
|
||||
'''
|
||||
Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input
|
||||
``uppx`` (units-per-pixel) and add space between discreet datums.
|
||||
|
||||
'''
|
||||
# downsample whenever more then 1 pixels per datum can be shown.
|
||||
# always refresh data bounds until we get diffing
|
||||
# working properly, see above..
|
||||
m4_out = ds_m4(
|
||||
x,
|
||||
y,
|
||||
uppx,
|
||||
)
|
||||
|
||||
if m4_out is not None:
|
||||
bins, x, y, ymn, ymx = m4_out
|
||||
# flatten output to 1d arrays suitable for path-graphics generation.
|
||||
x = np.broadcast_to(x[:, None], y.shape)
|
||||
x = (x + np.array(
|
||||
[-x_spacer, 0, 0, x_spacer]
|
||||
)).flatten()
|
||||
y = y.flatten()
|
||||
|
||||
return x, y, ymn, ymx
|
||||
|
||||
# XXX: we accept a None output for the case where the input range
|
||||
# to ``ds_m4()`` is bad (-ve) and we want to catch and debug
|
||||
# that (seemingly super rare) circumstance..
|
||||
return None
|
||||
|
||||
|
||||
@njit(
|
||||
# NOTE: need to construct this manually for readonly
|
||||
# arrays, see https://github.com/numba/numba/issues/4511
|
||||
# (
|
||||
# types.Array(
|
||||
# numba_ohlc_dtype,
|
||||
# 1,
|
||||
# 'C',
|
||||
# readonly=True,
|
||||
# ),
|
||||
# int64,
|
||||
# types.unicode_type,
|
||||
# optional(float64),
|
||||
# ),
|
||||
nogil=True
|
||||
)
|
||||
def path_arrays_from_ohlc(
|
||||
data: np.ndarray,
|
||||
start: int64,
|
||||
bar_w: float64,
|
||||
bar_gap: float64 = 0.16,
|
||||
use_time_index: bool = True,
|
||||
|
||||
# XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622
|
||||
# index_field: str,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
np.ndarray,
|
||||
]:
|
||||
'''
|
||||
Generate an array of lines objects from input ohlc data.
|
||||
|
||||
'''
|
||||
size = int(data.shape[0] * 6)
|
||||
|
||||
# XXX: see this for why the dtype might have to be defined outside
|
||||
# the routine.
|
||||
# https://github.com/numba/numba/issues/4098#issuecomment-493914533
|
||||
x = np.zeros(
|
||||
shape=size,
|
||||
dtype=float64,
|
||||
)
|
||||
y, c = x.copy(), x.copy()
|
||||
|
||||
half_w: float = bar_w/2
|
||||
|
||||
# TODO: report bug for assert @
|
||||
# ../piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
|
||||
for i, q in enumerate(data[start:], start):
|
||||
|
||||
open = q['open']
|
||||
high = q['high']
|
||||
low = q['low']
|
||||
close = q['close']
|
||||
|
||||
if use_time_index:
|
||||
index = float64(q['time'])
|
||||
else:
|
||||
index = float64(q['index'])
|
||||
|
||||
# XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622
|
||||
# index = float64(q[index_field])
|
||||
# AND this (probably)
|
||||
# open, high, low, close, index = q[
|
||||
# ['open', 'high', 'low', 'close', 'index']]
|
||||
|
||||
istart = i * 6
|
||||
istop = istart + 6
|
||||
|
||||
# x,y detail the 6 points which connect all vertexes of a ohlc bar
|
||||
mid: float = index + half_w
|
||||
x[istart:istop] = (
|
||||
index + bar_gap,
|
||||
mid,
|
||||
mid,
|
||||
mid,
|
||||
mid,
|
||||
index + bar_w - bar_gap,
|
||||
)
|
||||
y[istart:istop] = (
|
||||
open,
|
||||
open,
|
||||
low,
|
||||
high,
|
||||
close,
|
||||
close,
|
||||
)
|
||||
|
||||
# specifies that the first edge is never connected to the
|
||||
# prior bars last edge thus providing a small "gap"/"space"
|
||||
# between bars determined by ``bar_gap``.
|
||||
c[istart:istop] = (1, 1, 1, 1, 1, 0)
|
||||
|
||||
return x, y, c
|
||||
|
||||
|
||||
def hl2mxmn(
|
||||
ohlc: np.ndarray,
|
||||
index_field: str = 'index',
|
||||
|
||||
) -> np.ndarray:
|
||||
'''
|
||||
Convert a OHLC struct-array containing 'high'/'low' columns
|
||||
to a "joined" max/min 1-d array.
|
||||
|
||||
'''
|
||||
index = ohlc[index_field]
|
||||
hls = ohlc[[
|
||||
'low',
|
||||
'high',
|
||||
]]
|
||||
|
||||
mxmn = np.empty(2*hls.size, dtype=np.float64)
|
||||
x = np.empty(2*hls.size, dtype=np.float64)
|
||||
trace_hl(hls, mxmn, x, index[0])
|
||||
x = x + index[0]
|
||||
|
||||
return mxmn, x
|
||||
|
||||
|
||||
@njit(
|
||||
# TODO: the type annots..
|
||||
# float64[:](float64[:],),
|
||||
)
|
||||
def trace_hl(
|
||||
hl: 'np.ndarray',
|
||||
out: np.ndarray,
|
||||
x: np.ndarray,
|
||||
start: int,
|
||||
|
||||
# the "offset" values in the x-domain which
|
||||
# place the 2 output points around each ``int``
|
||||
# master index.
|
||||
margin: float = 0.43,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
"Trace" the outline of the high-low values of an ohlc sequence
|
||||
as a line such that the maximum deviation (aka disperaion) between
|
||||
bars if preserved.
|
||||
|
||||
This routine is expected to modify input arrays in-place.
|
||||
|
||||
'''
|
||||
last_l = hl['low'][0]
|
||||
last_h = hl['high'][0]
|
||||
|
||||
for i in range(hl.size):
|
||||
row = hl[i]
|
||||
lo, hi = row['low'], row['high']
|
||||
|
||||
up_diff = hi - last_l
|
||||
down_diff = last_h - lo
|
||||
|
||||
if up_diff > down_diff:
|
||||
out[2*i + 1] = hi
|
||||
out[2*i] = last_l
|
||||
else:
|
||||
out[2*i + 1] = lo
|
||||
out[2*i] = last_h
|
||||
|
||||
last_l = lo
|
||||
last_h = hi
|
||||
|
||||
x[2*i] = int(i) - margin
|
||||
x[2*i + 1] = int(i) + margin
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def ohlc_flatten(
|
||||
ohlc: np.ndarray,
|
||||
use_mxmn: bool = True,
|
||||
index_field: str = 'index',
|
||||
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
'''
|
||||
Convert an OHLCV struct-array into a flat ready-for-line-plotting
|
||||
1-d array that is 4 times the size with x-domain values distributed
|
||||
evenly (by 0.5 steps) over each index.
|
||||
|
||||
'''
|
||||
index = ohlc[index_field]
|
||||
|
||||
if use_mxmn:
|
||||
# traces a line optimally over highs to lows
|
||||
# using numba. NOTE: pretty sure this is faster
|
||||
# and looks about the same as the below output.
|
||||
flat, x = hl2mxmn(ohlc)
|
||||
|
||||
else:
|
||||
flat = rfn.structured_to_unstructured(
|
||||
ohlc[['open', 'high', 'low', 'close']]
|
||||
).flatten()
|
||||
|
||||
x = np.linspace(
|
||||
start=index[0] - 0.5,
|
||||
stop=index[-1] + 0.5,
|
||||
num=len(flat),
|
||||
)
|
||||
return x, flat
|
|
@ -20,53 +20,106 @@ financial data flows.
|
|||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from collections import Counter
|
||||
from collections import (
|
||||
Counter,
|
||||
defaultdict,
|
||||
)
|
||||
from contextlib import asynccontextmanager as acm
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
from typing import (
|
||||
Any,
|
||||
AsyncIterator,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
import tractor
|
||||
from tractor import (
|
||||
Context,
|
||||
MsgStream,
|
||||
Channel,
|
||||
)
|
||||
from tractor.trionics import (
|
||||
maybe_open_nursery,
|
||||
)
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
|
||||
from ..log import get_logger
|
||||
from .ticktools import (
|
||||
frame_ticks,
|
||||
_tick_groups,
|
||||
)
|
||||
from ._util import (
|
||||
log,
|
||||
get_console_log,
|
||||
)
|
||||
from ..service import maybe_spawn_daemon
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._sharedmem import ShmArray
|
||||
from .feed import _FeedsBus
|
||||
|
||||
log = get_logger(__name__)
|
||||
from ._sharedmem import (
|
||||
ShmArray,
|
||||
)
|
||||
from .feed import (
|
||||
_FeedsBus,
|
||||
Sub,
|
||||
)
|
||||
|
||||
|
||||
class sampler:
|
||||
# highest frequency sample step is 1 second by default, though in
|
||||
# the future we may want to support shorter periods or a dynamic style
|
||||
# tick-event stream.
|
||||
_default_delay_s: float = 1.0
|
||||
|
||||
|
||||
class Sampler:
|
||||
'''
|
||||
Global sampling engine registry.
|
||||
|
||||
Manages state for sampling events, shm incrementing and
|
||||
sample period logic.
|
||||
|
||||
This non-instantiated type is meant to be a singleton within
|
||||
a `samplerd` actor-service spawned once by the user wishing to
|
||||
time-step-sample (real-time) quote feeds, see
|
||||
``.service.maybe_open_samplerd()`` and the below
|
||||
``register_with_sampler()``.
|
||||
|
||||
'''
|
||||
service_nursery: None | trio.Nursery = None
|
||||
|
||||
# TODO: we could stick these in a composed type to avoid
|
||||
# angering the "i hate module scoped variables crowd" (yawn).
|
||||
ohlcv_shms: dict[int, list[ShmArray]] = {}
|
||||
ohlcv_shms: dict[float, list[ShmArray]] = {}
|
||||
|
||||
# holds one-task-per-sample-period tasks which are spawned as-needed by
|
||||
# data feed requests with a given detected time step usually from
|
||||
# history loading.
|
||||
incrementers: dict[int, trio.CancelScope] = {}
|
||||
incr_task_cs: trio.CancelScope | None = None
|
||||
|
||||
# holds all the ``tractor.Context`` remote subscriptions for
|
||||
# a particular sample period increment event: all subscribers are
|
||||
# notified on a step.
|
||||
subscribers: dict[int, tractor.Context] = {}
|
||||
subscribers: defaultdict[
|
||||
float,
|
||||
list[
|
||||
float,
|
||||
set[MsgStream]
|
||||
],
|
||||
] = defaultdict(
|
||||
lambda: [
|
||||
round(time.time()),
|
||||
set(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
async def increment_ohlc_buffer(
|
||||
delay_s: int,
|
||||
@classmethod
|
||||
async def increment_ohlc_buffer(
|
||||
self,
|
||||
period_s: float,
|
||||
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
|
||||
):
|
||||
):
|
||||
'''
|
||||
Task which inserts new bars into the provide shared memory array
|
||||
every ``delay_s`` seconds.
|
||||
every ``period_s`` seconds.
|
||||
|
||||
This task fulfills 2 purposes:
|
||||
- it takes the subscribed set of shm arrays and increments them
|
||||
|
@ -78,101 +131,157 @@ async def increment_ohlc_buffer(
|
|||
the underlying buffers will actually be incremented.
|
||||
|
||||
'''
|
||||
# # wait for brokerd to signal we should start sampling
|
||||
# await shm_incrementing(shm_token['shm_name']).wait()
|
||||
|
||||
# TODO: right now we'll spin printing bars if the last time stamp is
|
||||
# before a large period of no market activity. Likely the best way
|
||||
# to solve this is to make this task aware of the instrument's
|
||||
# tradable hours?
|
||||
|
||||
# adjust delay to compensate for trio processing time
|
||||
ad = min(sampler.ohlcv_shms.keys()) - 0.001
|
||||
|
||||
total_s = 0 # total seconds counted
|
||||
lowest = min(sampler.ohlcv_shms.keys())
|
||||
lowest_shm = sampler.ohlcv_shms[lowest][0]
|
||||
ad = lowest - 0.001
|
||||
total_s: float = 0 # total seconds counted
|
||||
ad = period_s - 0.001 # compensate for trio processing time
|
||||
|
||||
with trio.CancelScope() as cs:
|
||||
|
||||
# register this time period step as active
|
||||
sampler.incrementers[delay_s] = cs
|
||||
task_status.started(cs)
|
||||
|
||||
# sample step loop:
|
||||
# includes broadcasting to all connected consumers on every
|
||||
# new sample step as well incrementing any registered
|
||||
# buffers by registered sample period.
|
||||
while True:
|
||||
# TODO: do we want to support dynamically
|
||||
# adding a "lower" lowest increment period?
|
||||
await trio.sleep(ad)
|
||||
total_s += lowest
|
||||
total_s += period_s
|
||||
|
||||
# increment all subscribed shm arrays
|
||||
# TODO:
|
||||
# - this in ``numba``
|
||||
# - just lookup shms for this step instead of iterating?
|
||||
for delay_s, shms in sampler.ohlcv_shms.items():
|
||||
if total_s % delay_s != 0:
|
||||
|
||||
i_epoch = round(time.time())
|
||||
broadcasted: set[float] = set()
|
||||
|
||||
# print(f'epoch: {i_epoch} -> REGISTRY {self.ohlcv_shms}')
|
||||
for shm_period_s, shms in self.ohlcv_shms.items():
|
||||
|
||||
# short-circuit on any not-ready because slower sample
|
||||
# rate consuming shm buffers.
|
||||
if total_s % shm_period_s != 0:
|
||||
# print(f'skipping `{shm_period_s}s` sample update')
|
||||
continue
|
||||
|
||||
# update last epoch stamp for this period group
|
||||
if shm_period_s not in broadcasted:
|
||||
sub_pair = self.subscribers[shm_period_s]
|
||||
sub_pair[0] = i_epoch
|
||||
broadcasted.add(shm_period_s)
|
||||
|
||||
# TODO: ``numba`` this!
|
||||
for shm in shms:
|
||||
# TODO: in theory we could make this faster by copying the
|
||||
# "last" readable value into the underlying larger buffer's
|
||||
# next value and then incrementing the counter instead of
|
||||
# using ``.push()``?
|
||||
# print(f'UPDATE {shm_period_s}s STEP for {shm.token}')
|
||||
|
||||
# append new entry to buffer thus "incrementing" the bar
|
||||
# append new entry to buffer thus "incrementing"
|
||||
# the bar
|
||||
array = shm.array
|
||||
last = array[-1:][shm._write_fields].copy()
|
||||
# (index, t, close) = last[0][['index', 'time', 'close']]
|
||||
(t, close) = last[0][['time', 'close']]
|
||||
|
||||
# this copies non-std fields (eg. vwap) from the last datum
|
||||
last[
|
||||
['time', 'volume', 'open', 'high', 'low', 'close']
|
||||
][0] = (t + delay_s, 0, close, close, close, close)
|
||||
# guard against startup backfilling races where
|
||||
# the buffer has not yet been filled.
|
||||
if not last.size:
|
||||
continue
|
||||
|
||||
(t, close) = last[0][[
|
||||
'time',
|
||||
'close',
|
||||
]]
|
||||
|
||||
next_t = t + shm_period_s
|
||||
|
||||
if shm_period_s <= 1:
|
||||
next_t = i_epoch
|
||||
|
||||
# this copies non-std fields (eg. vwap) from the
|
||||
# last datum
|
||||
last[[
|
||||
'time',
|
||||
|
||||
'open',
|
||||
'high',
|
||||
'low',
|
||||
'close',
|
||||
|
||||
'volume',
|
||||
]][0] = (
|
||||
# epoch timestamp
|
||||
next_t,
|
||||
|
||||
# OHLC
|
||||
close,
|
||||
close,
|
||||
close,
|
||||
close,
|
||||
|
||||
0, # vlm
|
||||
)
|
||||
|
||||
# TODO: in theory we could make this faster by
|
||||
# copying the "last" readable value into the
|
||||
# underlying larger buffer's next value and then
|
||||
# incrementing the counter instead of using
|
||||
# ``.push()``?
|
||||
|
||||
# write to the buffer
|
||||
shm.push(last)
|
||||
|
||||
await broadcast(delay_s, shm=lowest_shm)
|
||||
# broadcast increment msg to all updated subs per period
|
||||
for shm_period_s in broadcasted:
|
||||
await self.broadcast(
|
||||
period_s=shm_period_s,
|
||||
time_stamp=i_epoch,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def broadcast(
|
||||
self,
|
||||
period_s: float,
|
||||
time_stamp: float | None = None,
|
||||
info: dict | None = None,
|
||||
|
||||
async def broadcast(
|
||||
delay_s: int,
|
||||
shm: Optional[ShmArray] = None,
|
||||
|
||||
) -> None:
|
||||
) -> None:
|
||||
'''
|
||||
Broadcast the given ``shm: ShmArray``'s buffer index step to any
|
||||
Broadcast the period size and last index step value to all
|
||||
subscribers for a given sample period.
|
||||
|
||||
The sent msg will include the first and last index which slice into
|
||||
the buffer's non-empty data.
|
||||
|
||||
'''
|
||||
subs = sampler.subscribers.get(delay_s, ())
|
||||
pair: list[float, set]
|
||||
pair = self.subscribers[period_s]
|
||||
|
||||
first = last = -1
|
||||
last_ts: float
|
||||
subs: set
|
||||
last_ts, subs = pair
|
||||
|
||||
if shm is None:
|
||||
periods = sampler.ohlcv_shms.keys()
|
||||
# if this is an update triggered by a history update there
|
||||
# might not actually be any sampling bus setup since there's
|
||||
# no "live feed" active yet.
|
||||
if periods:
|
||||
lowest = min(periods)
|
||||
shm = sampler.ohlcv_shms[lowest][0]
|
||||
first = shm._first.value
|
||||
last = shm._last.value
|
||||
|
||||
for stream in subs:
|
||||
task = trio.lowlevel.current_task()
|
||||
log.debug(
|
||||
f'SUBS {self.subscribers}\n'
|
||||
f'PAIR {pair}\n'
|
||||
f'TASK: {task}: {id(task)}\n'
|
||||
f'broadcasting {period_s} -> {last_ts}\n'
|
||||
# f'consumers: {subs}'
|
||||
)
|
||||
borked: set[MsgStream] = set()
|
||||
sent: set[MsgStream] = set()
|
||||
while True:
|
||||
try:
|
||||
await stream.send({
|
||||
'first': first,
|
||||
'last': last,
|
||||
'index': last,
|
||||
})
|
||||
for stream in (subs - sent):
|
||||
try:
|
||||
msg = {
|
||||
'index': time_stamp or last_ts,
|
||||
'period': period_s,
|
||||
}
|
||||
if info:
|
||||
msg.update(info)
|
||||
|
||||
await stream.send(msg)
|
||||
sent.add(stream)
|
||||
|
||||
except (
|
||||
trio.BrokenResourceError,
|
||||
trio.ClosedResourceError
|
||||
|
@ -180,60 +289,303 @@ async def broadcast(
|
|||
log.error(
|
||||
f'{stream._ctx.chan.uid} dropped connection'
|
||||
)
|
||||
borked.add(stream)
|
||||
else:
|
||||
break
|
||||
except RuntimeError:
|
||||
log.warning(f'Client subs {subs} changed while broadcasting')
|
||||
continue
|
||||
|
||||
for stream in borked:
|
||||
try:
|
||||
subs.remove(stream)
|
||||
except ValueError:
|
||||
except KeyError:
|
||||
log.warning(
|
||||
f'{stream._ctx.chan.uid} sub already removed!?'
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def broadcast_all(
|
||||
self,
|
||||
info: dict | None = None,
|
||||
) -> None:
|
||||
|
||||
# NOTE: take a copy of subs since removals can happen
|
||||
# during the broadcast checkpoint which can cause
|
||||
# a `RuntimeError` on interation of the underlying `dict`.
|
||||
for period_s in list(self.subscribers):
|
||||
await self.broadcast(
|
||||
period_s,
|
||||
info=info,
|
||||
)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def iter_ohlc_periods(
|
||||
ctx: tractor.Context,
|
||||
delay_s: int,
|
||||
async def register_with_sampler(
|
||||
ctx: Context,
|
||||
period_s: float,
|
||||
shms_by_period: dict[float, dict] | None = None,
|
||||
|
||||
open_index_stream: bool = True, # open a 2way stream for sample step msgs?
|
||||
sub_for_broadcasts: bool = True, # sampler side to send step updates?
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Subscribe to OHLC sampling "step" events: when the time
|
||||
aggregation period increments, this event stream emits an index
|
||||
event.
|
||||
|
||||
'''
|
||||
# add our subscription
|
||||
subs = sampler.subscribers.setdefault(delay_s, [])
|
||||
await ctx.started()
|
||||
async with ctx.open_stream() as stream:
|
||||
subs.append(stream)
|
||||
get_console_log(tractor.current_actor().loglevel)
|
||||
incr_was_started: bool = False
|
||||
|
||||
try:
|
||||
# stream and block until cancelled
|
||||
await trio.sleep_forever()
|
||||
async with maybe_open_nursery(
|
||||
Sampler.service_nursery
|
||||
) as service_nursery:
|
||||
|
||||
# init startup, create (actor-)local service nursery and start
|
||||
# increment task
|
||||
Sampler.service_nursery = service_nursery
|
||||
|
||||
# always ensure a period subs entry exists
|
||||
last_ts, subs = Sampler.subscribers[float(period_s)]
|
||||
|
||||
async with trio.Lock():
|
||||
if Sampler.incr_task_cs is None:
|
||||
Sampler.incr_task_cs = await service_nursery.start(
|
||||
Sampler.increment_ohlc_buffer,
|
||||
1.,
|
||||
)
|
||||
incr_was_started = True
|
||||
|
||||
# insert the base 1s period (for OHLC style sampling) into
|
||||
# the increment buffer set to update and shift every second.
|
||||
if shms_by_period is not None:
|
||||
from ._sharedmem import (
|
||||
attach_shm_array,
|
||||
_Token,
|
||||
)
|
||||
for period in shms_by_period:
|
||||
|
||||
# load and register shm handles
|
||||
shm_token_msg = shms_by_period[period]
|
||||
shm = attach_shm_array(
|
||||
_Token.from_msg(shm_token_msg),
|
||||
readonly=False,
|
||||
)
|
||||
shms_by_period[period] = shm
|
||||
Sampler.ohlcv_shms.setdefault(period, []).append(shm)
|
||||
|
||||
assert Sampler.ohlcv_shms
|
||||
|
||||
# unblock caller
|
||||
await ctx.started(set(Sampler.ohlcv_shms.keys()))
|
||||
|
||||
if open_index_stream:
|
||||
try:
|
||||
async with ctx.open_stream(
|
||||
allow_overruns=True,
|
||||
) as stream:
|
||||
if sub_for_broadcasts:
|
||||
subs.add(stream)
|
||||
|
||||
# except broadcast requests from the subscriber
|
||||
async for msg in stream:
|
||||
if 'broadcast_all' in msg:
|
||||
await Sampler.broadcast_all(
|
||||
info=msg['broadcast_all'],
|
||||
)
|
||||
finally:
|
||||
if (
|
||||
sub_for_broadcasts
|
||||
and subs
|
||||
):
|
||||
try:
|
||||
subs.remove(stream)
|
||||
except ValueError:
|
||||
log.error(
|
||||
f'iOHLC step stream was already dropped {ctx.chan.uid}?'
|
||||
except KeyError:
|
||||
log.warning(
|
||||
f'{stream._ctx.chan.uid} sub already removed!?'
|
||||
)
|
||||
else:
|
||||
# if no shms are passed in we just wait until cancelled
|
||||
# by caller.
|
||||
await trio.sleep_forever()
|
||||
|
||||
finally:
|
||||
# TODO: why tf isn't this working?
|
||||
if shms_by_period is not None:
|
||||
for period, shm in shms_by_period.items():
|
||||
Sampler.ohlcv_shms[period].remove(shm)
|
||||
|
||||
if incr_was_started:
|
||||
Sampler.incr_task_cs.cancel()
|
||||
Sampler.incr_task_cs = None
|
||||
|
||||
|
||||
async def spawn_samplerd(
|
||||
|
||||
loglevel: str | None = None,
|
||||
**extra_tractor_kwargs
|
||||
|
||||
) -> bool:
|
||||
'''
|
||||
Daemon-side service task: start a sampling daemon for common step
|
||||
update and increment count write and stream broadcasting.
|
||||
|
||||
'''
|
||||
from piker.service import Services
|
||||
|
||||
dname = 'samplerd'
|
||||
log.info(f'Spawning `{dname}`')
|
||||
|
||||
# singleton lock creation of ``samplerd`` since we only ever want
|
||||
# one daemon per ``pikerd`` proc tree.
|
||||
# TODO: make this built-into the service api?
|
||||
async with Services.locks[dname + '_singleton']:
|
||||
|
||||
if dname not in Services.service_tasks:
|
||||
|
||||
portal = await Services.actor_n.start_actor(
|
||||
dname,
|
||||
enable_modules=[
|
||||
'piker.data._sampling',
|
||||
],
|
||||
loglevel=loglevel,
|
||||
debug_mode=Services.debug_mode, # set by pikerd flag
|
||||
**extra_tractor_kwargs
|
||||
)
|
||||
|
||||
await Services.start_service_task(
|
||||
dname,
|
||||
portal,
|
||||
register_with_sampler,
|
||||
period_s=1,
|
||||
sub_for_broadcasts=False,
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_samplerd(
|
||||
|
||||
loglevel: str | None = None,
|
||||
**pikerd_kwargs,
|
||||
|
||||
) -> tractor.Portal: # noqa
|
||||
'''
|
||||
Client-side helper to maybe startup the ``samplerd`` service
|
||||
under the ``pikerd`` tree.
|
||||
|
||||
'''
|
||||
dname = 'samplerd'
|
||||
|
||||
async with maybe_spawn_daemon(
|
||||
dname,
|
||||
service_task_target=spawn_samplerd,
|
||||
spawn_args={},
|
||||
loglevel=loglevel,
|
||||
**pikerd_kwargs,
|
||||
|
||||
) as portal:
|
||||
yield portal
|
||||
|
||||
|
||||
@acm
|
||||
async def open_sample_stream(
|
||||
period_s: float,
|
||||
shms_by_period: dict[float, dict] | None = None,
|
||||
open_index_stream: bool = True,
|
||||
sub_for_broadcasts: bool = True,
|
||||
|
||||
cache_key: str | None = None,
|
||||
allow_new_sampler: bool = True,
|
||||
|
||||
ensure_is_active: bool = False,
|
||||
|
||||
) -> AsyncIterator[dict[str, float]]:
|
||||
'''
|
||||
Subscribe to OHLC sampling "step" events: when the time aggregation
|
||||
period increments, this event stream emits an index event.
|
||||
|
||||
This is a client-side endpoint that does all the work of ensuring
|
||||
the `samplerd` actor is up and that mult-consumer-tasks are given
|
||||
a broadcast stream when possible.
|
||||
|
||||
'''
|
||||
# TODO: wrap this manager with the following to make it cached
|
||||
# per client-multitasks entry.
|
||||
# maybe_open_context(
|
||||
# acm_func=partial(
|
||||
# portal.open_context,
|
||||
# register_with_sampler,
|
||||
# ),
|
||||
# key=cache_key or period_s,
|
||||
# )
|
||||
# if cache_hit:
|
||||
# # add a new broadcast subscription for the quote stream
|
||||
# # if this feed is likely already in use
|
||||
# async with istream.subscribe() as bistream:
|
||||
# yield bistream
|
||||
# else:
|
||||
|
||||
async with (
|
||||
# XXX: this should be singleton on a host,
|
||||
# a lone broker-daemon per provider should be
|
||||
# created for all practical purposes
|
||||
maybe_open_samplerd() as portal,
|
||||
|
||||
portal.open_context(
|
||||
register_with_sampler,
|
||||
**{
|
||||
'period_s': period_s,
|
||||
'shms_by_period': shms_by_period,
|
||||
'open_index_stream': open_index_stream,
|
||||
'sub_for_broadcasts': sub_for_broadcasts,
|
||||
},
|
||||
) as (ctx, first)
|
||||
):
|
||||
if ensure_is_active:
|
||||
assert len(first) > 1
|
||||
|
||||
async with (
|
||||
ctx.open_stream(
|
||||
allow_overruns=True,
|
||||
) as istream,
|
||||
|
||||
# TODO: we DO need this task-bcasting so that
|
||||
# for eg. the history chart update loop eventually
|
||||
# receceives all backfilling event msgs such that
|
||||
# the underlying graphics format arrays are
|
||||
# re-allocated until all history is loaded!
|
||||
istream.subscribe() as istream,
|
||||
):
|
||||
yield istream
|
||||
|
||||
|
||||
async def sample_and_broadcast(
|
||||
|
||||
bus: _FeedsBus, # noqa
|
||||
shm: ShmArray,
|
||||
rt_shm: ShmArray,
|
||||
hist_shm: ShmArray,
|
||||
quote_stream: trio.abc.ReceiveChannel,
|
||||
brokername: str,
|
||||
sum_tick_vlm: bool = True,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
`brokerd`-side task which writes latest datum sampled data.
|
||||
|
||||
This task is meant to run in the same actor (mem space) as the
|
||||
`brokerd` real-time quote feed which is being sampled to
|
||||
a ``ShmArray`` buffer.
|
||||
|
||||
'''
|
||||
log.info("Started shared mem bar writer")
|
||||
|
||||
overruns = Counter()
|
||||
|
||||
# iterate stream delivered by broker
|
||||
async for quotes in quote_stream:
|
||||
# print(quotes)
|
||||
|
||||
# TODO: ``numba`` this!
|
||||
for broker_symbol, quote in quotes.items():
|
||||
# TODO: in theory you can send the IPC msg *before* writing
|
||||
|
@ -248,22 +600,26 @@ async def sample_and_broadcast(
|
|||
# TODO: we should probably not write every single
|
||||
# value to an OHLC sample stream XD
|
||||
# for a tick stream sure.. but this is excessive..
|
||||
ticks = quote['ticks']
|
||||
ticks: list[dict] = quote['ticks']
|
||||
for tick in ticks:
|
||||
ticktype = tick['type']
|
||||
ticktype: str = tick['type']
|
||||
|
||||
# write trade events to shm last OHLC sample
|
||||
if ticktype in ('trade', 'utrade'):
|
||||
|
||||
last = tick['price']
|
||||
|
||||
# more compact inline-way to do this assignment
|
||||
# to both buffers?
|
||||
for shm in [rt_shm, hist_shm]:
|
||||
|
||||
# update last entry
|
||||
# benchmarked in the 4-5 us range
|
||||
o, high, low, v = shm.array[-1][
|
||||
['open', 'high', 'low', 'volume']
|
||||
]
|
||||
|
||||
new_v = tick.get('size', 0)
|
||||
new_v: float = tick.get('size', 0)
|
||||
|
||||
if v == 0 and new_v:
|
||||
# no trades for this bar yet so the open
|
||||
|
@ -282,139 +638,147 @@ async def sample_and_broadcast(
|
|||
'high',
|
||||
'low',
|
||||
'close',
|
||||
'bar_wap', # can be optionally provided
|
||||
# 'bar_wap', # can be optionally provided
|
||||
'volume',
|
||||
]][-1] = (
|
||||
o,
|
||||
max(high, last),
|
||||
min(low, last),
|
||||
last,
|
||||
quote.get('bar_wap', 0),
|
||||
# quote.get('bar_wap', 0),
|
||||
volume,
|
||||
)
|
||||
|
||||
# TODO: PUT THIS IN A ``_FeedsBus.broadcast()`` method!
|
||||
# XXX: we need to be very cautious here that no
|
||||
# context-channel is left lingering which doesn't have
|
||||
# a far end receiver actor-task. In such a case you can
|
||||
# end up triggering backpressure which which will
|
||||
# eventually block this producer end of the feed and
|
||||
# thus other consumers still attached.
|
||||
subs: list[
|
||||
tuple[
|
||||
Union[tractor.MsgStream, trio.MemorySendChannel],
|
||||
tractor.Context,
|
||||
Optional[float], # tick throttle in Hz
|
||||
]
|
||||
] = bus._subscribers[broker_symbol.lower()]
|
||||
sub_key: str = broker_symbol.lower()
|
||||
subs: set[Sub] = bus.get_subs(sub_key)
|
||||
|
||||
# NOTE: by default the broker backend doesn't append
|
||||
# it's own "name" into the fqsn schema (but maybe it
|
||||
# it's own "name" into the fqme schema (but maybe it
|
||||
# should?) so we have to manually generate the correct
|
||||
# key here.
|
||||
bsym = f'{broker_symbol}.{brokername}'
|
||||
fqme: str = f'{broker_symbol}.{brokername}'
|
||||
lags: int = 0
|
||||
|
||||
for (stream, ctx, tick_throttle) in subs:
|
||||
# XXX TODO XXX: speed up this loop in an AOT compiled
|
||||
# lang (like rust or nim or zig)!
|
||||
# AND/OR instead of doing a fan out to TCP sockets
|
||||
# here, we add a shm-style tick queue which readers can
|
||||
# pull from instead of placing the burden of broadcast
|
||||
# on solely on this `brokerd` actor. see issues:
|
||||
# - https://github.com/pikers/piker/issues/98
|
||||
# - https://github.com/pikers/piker/issues/107
|
||||
|
||||
# for (stream, tick_throttle) in subs.copy():
|
||||
for sub in subs.copy():
|
||||
ipc: MsgStream = sub.ipc
|
||||
throttle: float = sub.throttle_rate
|
||||
try:
|
||||
with trio.move_on_after(0.2) as cs:
|
||||
if tick_throttle:
|
||||
if throttle:
|
||||
send_chan: trio.abc.SendChannel = sub.send_chan
|
||||
|
||||
# this is a send mem chan that likely
|
||||
# pushes to the ``uniform_rate_send()`` below.
|
||||
try:
|
||||
stream.send_nowait(
|
||||
(bsym, quote)
|
||||
send_chan.send_nowait(
|
||||
(fqme, quote)
|
||||
)
|
||||
except trio.WouldBlock:
|
||||
chan = ctx.chan
|
||||
if ctx:
|
||||
overruns[sub_key] += 1
|
||||
ctx: Context = ipc._ctx
|
||||
chan: Channel = ctx.chan
|
||||
|
||||
log.warning(
|
||||
f'Feed overrun {bus.brokername} ->'
|
||||
f'{chan.uid} !!!'
|
||||
f'Feed OVERRUN {sub_key}'
|
||||
'@{bus.brokername} -> \n'
|
||||
f'feed @ {chan.uid}\n'
|
||||
f'throttle = {throttle} Hz'
|
||||
)
|
||||
else:
|
||||
key = id(stream)
|
||||
overruns[key] += 1
|
||||
log.warning(
|
||||
f'Feed overrun {broker_symbol}'
|
||||
'@{bus.brokername} -> '
|
||||
f'feed @ {tick_throttle} Hz'
|
||||
)
|
||||
if overruns[key] > 6:
|
||||
|
||||
if overruns[sub_key] > 6:
|
||||
# TODO: should we check for the
|
||||
# context being cancelled? this
|
||||
# could happen but the
|
||||
# channel-ipc-pipe is still up.
|
||||
if not chan.connected():
|
||||
if (
|
||||
not chan.connected()
|
||||
or ctx._cancel_called
|
||||
):
|
||||
log.warning(
|
||||
'Dropping broken consumer:\n'
|
||||
f'{broker_symbol}:'
|
||||
f'{sub_key}:'
|
||||
f'{ctx.cid}@{chan.uid}'
|
||||
)
|
||||
await stream.aclose()
|
||||
await ipc.aclose()
|
||||
raise trio.BrokenResourceError
|
||||
else:
|
||||
log.warning(
|
||||
'Feed getting overrun bro!\n'
|
||||
f'{broker_symbol}:'
|
||||
f'{ctx.cid}@{chan.uid}'
|
||||
)
|
||||
continue
|
||||
|
||||
else:
|
||||
await stream.send(
|
||||
{bsym: quote}
|
||||
await ipc.send(
|
||||
{fqme: quote}
|
||||
)
|
||||
|
||||
if cs.cancelled_caught:
|
||||
lags += 1
|
||||
if lags > 10:
|
||||
await tractor.breakpoint()
|
||||
await tractor.pause()
|
||||
|
||||
except (
|
||||
trio.BrokenResourceError,
|
||||
trio.ClosedResourceError,
|
||||
trio.EndOfChannel,
|
||||
):
|
||||
chan = ctx.chan
|
||||
ctx: Context = ipc._ctx
|
||||
chan: Channel = ctx.chan
|
||||
if ctx:
|
||||
log.warning(
|
||||
'Dropped `brokerd`-quotes-feed connection:\n'
|
||||
f'{broker_symbol}:'
|
||||
f'{ctx.cid}@{chan.uid}'
|
||||
)
|
||||
if tick_throttle:
|
||||
assert stream._closed
|
||||
if sub.throttle_rate:
|
||||
assert ipc._closed
|
||||
|
||||
# XXX: do we need to deregister here
|
||||
# if it's done in the fee bus code?
|
||||
# so far seems like no since this should all
|
||||
# be single-threaded. Doing it anyway though
|
||||
# since there seems to be some kinda race..
|
||||
try:
|
||||
subs.remove((stream, tick_throttle))
|
||||
except ValueError:
|
||||
log.error(
|
||||
f'Stream was already removed from subs!?\n'
|
||||
f'{broker_symbol}:'
|
||||
f'{ctx.cid}@{chan.uid}'
|
||||
bus.remove_subs(
|
||||
sub_key,
|
||||
{sub},
|
||||
)
|
||||
|
||||
|
||||
# TODO: a less naive throttler, here's some snippets:
|
||||
# token bucket by njs:
|
||||
# https://gist.github.com/njsmith/7ea44ec07e901cb78ebe1dd8dd846cb9
|
||||
|
||||
async def uniform_rate_send(
|
||||
|
||||
rate: float,
|
||||
quote_stream: trio.abc.ReceiveChannel,
|
||||
stream: tractor.MsgStream,
|
||||
stream: MsgStream,
|
||||
|
||||
task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Throttle a real-time (presumably tick event) stream to a uniform
|
||||
transmissiom rate, normally for the purposes of throttling a data
|
||||
flow being consumed by a graphics rendering actor which itself is limited
|
||||
by a fixed maximum display rate.
|
||||
|
||||
Though this function isn't documented (nor was intentially written
|
||||
to be) a token-bucket style algo, it effectively operates as one (we
|
||||
think?).
|
||||
|
||||
TODO: a less naive throttler, here's some snippets:
|
||||
token bucket by njs:
|
||||
https://gist.github.com/njsmith/7ea44ec07e901cb78ebe1dd8dd846cb9
|
||||
|
||||
'''
|
||||
# TODO: compute the approx overhead latency per cycle
|
||||
left_to_sleep = throttle_period = 1/rate - 0.000616
|
||||
|
||||
|
@ -424,6 +788,12 @@ async def uniform_rate_send(
|
|||
diff = 0
|
||||
|
||||
task_status.started()
|
||||
ticks_by_type: dict[
|
||||
str,
|
||||
list[dict[str, Any]],
|
||||
] = {}
|
||||
|
||||
clear_types = _tick_groups['clears']
|
||||
|
||||
while True:
|
||||
|
||||
|
@ -442,34 +812,17 @@ async def uniform_rate_send(
|
|||
|
||||
if not first_quote:
|
||||
first_quote = last_quote
|
||||
# first_quote['tbt'] = ticks_by_type
|
||||
|
||||
if (throttle_period - diff) > 0:
|
||||
# received a quote but the send cycle period hasn't yet
|
||||
# expired we aren't supposed to send yet so append
|
||||
# to the tick frame.
|
||||
|
||||
# append quotes since last iteration into the last quote's
|
||||
# tick array/buffer.
|
||||
ticks = last_quote.get('ticks')
|
||||
|
||||
# XXX: idea for frame type data structure we could
|
||||
# use on the wire instead of a simple list?
|
||||
# frames = {
|
||||
# 'index': ['type_a', 'type_c', 'type_n', 'type_n'],
|
||||
|
||||
# 'type_a': [tick0, tick1, tick2, .., tickn],
|
||||
# 'type_b': [tick0, tick1, tick2, .., tickn],
|
||||
# 'type_c': [tick0, tick1, tick2, .., tickn],
|
||||
# ...
|
||||
# 'type_n': [tick0, tick1, tick2, .., tickn],
|
||||
# }
|
||||
|
||||
# TODO: once we decide to get fancy really we should
|
||||
# have a shared mem tick buffer that is just
|
||||
# continually filled and the UI just ready from it
|
||||
# at it's display rate.
|
||||
if ticks:
|
||||
first_quote['ticks'].extend(ticks)
|
||||
frame_ticks(
|
||||
last_quote,
|
||||
ticks_in_order=first_quote['ticks'],
|
||||
ticks_by_type=ticks_by_type,
|
||||
)
|
||||
|
||||
# send cycle isn't due yet so continue waiting
|
||||
continue
|
||||
|
@ -486,17 +839,50 @@ async def uniform_rate_send(
|
|||
# received quote ASAP.
|
||||
sym, first_quote = await quote_stream.receive()
|
||||
|
||||
frame_ticks(
|
||||
first_quote,
|
||||
ticks_in_order=first_quote['ticks'],
|
||||
ticks_by_type=ticks_by_type,
|
||||
)
|
||||
|
||||
# we have a quote already so send it now.
|
||||
|
||||
with trio.move_on_after(throttle_period) as cs:
|
||||
while (
|
||||
not set(ticks_by_type).intersection(clear_types)
|
||||
):
|
||||
try:
|
||||
sym, last_quote = await quote_stream.receive()
|
||||
except trio.EndOfChannel:
|
||||
log.exception(f"feed for {stream} ended?")
|
||||
break
|
||||
|
||||
frame_ticks(
|
||||
last_quote,
|
||||
ticks_in_order=first_quote['ticks'],
|
||||
ticks_by_type=ticks_by_type,
|
||||
)
|
||||
|
||||
# measured_rate = 1 / (time.time() - last_send)
|
||||
# log.info(
|
||||
# f'`{sym}` throttled send hz: {round(measured_rate, ndigits=1)}'
|
||||
# )
|
||||
first_quote['tbt'] = ticks_by_type
|
||||
|
||||
# TODO: now if only we could sync this to the display
|
||||
# rate timing exactly lul
|
||||
try:
|
||||
await stream.send({sym: first_quote})
|
||||
except tractor.RemoteActorError as rme:
|
||||
if rme.type is not tractor._exceptions.StreamOverrun:
|
||||
raise
|
||||
ctx = stream._ctx
|
||||
chan = ctx.chan
|
||||
log.warning(
|
||||
'Throttled quote-stream overrun!\n'
|
||||
f'{sym}:{ctx.cid}@{chan.uid}'
|
||||
)
|
||||
|
||||
except (
|
||||
# NOTE: any of these can be raised by ``tractor``'s IPC
|
||||
# transport-layer and we want to be highly resilient
|
||||
|
@ -517,3 +903,4 @@ async def uniform_rate_send(
|
|||
first_quote = last_quote = None
|
||||
diff = 0
|
||||
last_send = time.time()
|
||||
ticks_by_type.clear()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -27,29 +27,22 @@ from multiprocessing.shared_memory import SharedMemory, _USE_POSIX
|
|||
if _USE_POSIX:
|
||||
from _posixshmem import shm_unlink
|
||||
|
||||
import tractor
|
||||
# import msgspec
|
||||
import numpy as np
|
||||
from pydantic import BaseModel
|
||||
from numpy.lib import recfunctions as rfn
|
||||
import tractor
|
||||
|
||||
from ..log import get_logger
|
||||
from ._source import base_iohlc_dtype
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
# how much is probably dependent on lifestyle
|
||||
_secs_in_day = int(60 * 60 * 24)
|
||||
# we try for a buncha times, but only on a run-every-other-day kinda week.
|
||||
_days_worth = 16
|
||||
_default_size = _days_worth * _secs_in_day
|
||||
# where to start the new data append index
|
||||
_rt_buffer_start = int((_days_worth - 1) * _secs_in_day)
|
||||
from ._util import log
|
||||
from ._source import def_iohlcv_fields
|
||||
from piker.types import Struct
|
||||
|
||||
|
||||
def cuckoff_mantracker():
|
||||
'''
|
||||
Disable all ``multiprocessing``` "resource tracking" machinery since
|
||||
it's an absolute multi-threaded mess of non-SC madness.
|
||||
|
||||
'''
|
||||
from multiprocessing import resource_tracker as mantracker
|
||||
|
||||
# Tell the "resource tracker" thing to fuck off.
|
||||
|
@ -68,7 +61,6 @@ def cuckoff_mantracker():
|
|||
mantracker._resource_tracker = ManTracker()
|
||||
mantracker.register = mantracker._resource_tracker.register
|
||||
mantracker.ensure_running = mantracker._resource_tracker.ensure_running
|
||||
# ensure_running = mantracker._resource_tracker.ensure_running
|
||||
mantracker.unregister = mantracker._resource_tracker.unregister
|
||||
mantracker.getfd = mantracker._resource_tracker.getfd
|
||||
|
||||
|
@ -107,36 +99,39 @@ class SharedInt:
|
|||
log.warning(f'Shm for {name} already unlinked?')
|
||||
|
||||
|
||||
class _Token(BaseModel):
|
||||
class _Token(Struct, frozen=True):
|
||||
'''
|
||||
Internal represenation of a shared memory "token"
|
||||
which can be used to key a system wide post shm entry.
|
||||
|
||||
'''
|
||||
class Config:
|
||||
frozen = True
|
||||
|
||||
shm_name: str # this servers as a "key" value
|
||||
shm_first_index_name: str
|
||||
shm_last_index_name: str
|
||||
dtype_descr: tuple
|
||||
size: int # in struct-array index / row terms
|
||||
|
||||
@property
|
||||
def dtype(self) -> np.dtype:
|
||||
return np.dtype(list(map(tuple, self.dtype_descr))).descr
|
||||
|
||||
def as_msg(self):
|
||||
return self.dict()
|
||||
return self.to_dict()
|
||||
|
||||
@classmethod
|
||||
def from_msg(cls, msg: dict) -> _Token:
|
||||
if isinstance(msg, _Token):
|
||||
return msg
|
||||
|
||||
# TODO: native struct decoding
|
||||
# return _token_dec.decode(msg)
|
||||
|
||||
msg['dtype_descr'] = tuple(map(tuple, msg['dtype_descr']))
|
||||
return _Token(**msg)
|
||||
|
||||
|
||||
# _token_dec = msgspec.msgpack.Decoder(_Token)
|
||||
|
||||
# TODO: this api?
|
||||
# _known_tokens = tractor.ActorVar('_shm_tokens', {})
|
||||
# _known_tokens = tractor.ContextStack('_known_tokens', )
|
||||
|
@ -155,6 +150,7 @@ def get_shm_token(key: str) -> _Token:
|
|||
|
||||
def _make_token(
|
||||
key: str,
|
||||
size: int,
|
||||
dtype: Optional[np.dtype] = None,
|
||||
) -> _Token:
|
||||
'''
|
||||
|
@ -162,12 +158,13 @@ def _make_token(
|
|||
to access a shared array.
|
||||
|
||||
'''
|
||||
dtype = base_iohlc_dtype if dtype is None else dtype
|
||||
dtype = def_iohlcv_fields if dtype is None else dtype
|
||||
return _Token(
|
||||
shm_name=key,
|
||||
shm_first_index_name=key + "_first",
|
||||
shm_last_index_name=key + "_last",
|
||||
dtype_descr=np.dtype(dtype).descr
|
||||
dtype_descr=tuple(np.dtype(dtype).descr),
|
||||
size=size,
|
||||
)
|
||||
|
||||
|
||||
|
@ -219,6 +216,7 @@ class ShmArray:
|
|||
shm_first_index_name=self._first._shm.name,
|
||||
shm_last_index_name=self._last._shm.name,
|
||||
dtype_descr=tuple(self._array.dtype.descr),
|
||||
size=self._len,
|
||||
)
|
||||
|
||||
@property
|
||||
|
@ -250,7 +248,6 @@ class ShmArray:
|
|||
# to load an empty array..
|
||||
if len(a) == 0 and self._post_init:
|
||||
raise RuntimeError('Empty array race condition hit!?')
|
||||
# breakpoint()
|
||||
|
||||
return a
|
||||
|
||||
|
@ -260,7 +257,7 @@ class ShmArray:
|
|||
|
||||
# type that all field values will be cast to
|
||||
# in the returned view.
|
||||
common_dtype: np.dtype = np.float,
|
||||
common_dtype: np.dtype = float,
|
||||
|
||||
) -> np.ndarray:
|
||||
|
||||
|
@ -315,7 +312,7 @@ class ShmArray:
|
|||
field_map: Optional[dict[str, str]] = None,
|
||||
prepend: bool = False,
|
||||
update_first: bool = True,
|
||||
start: Optional[int] = None,
|
||||
start: int | None = None,
|
||||
|
||||
) -> int:
|
||||
'''
|
||||
|
@ -357,7 +354,11 @@ class ShmArray:
|
|||
# tries to access ``.array`` (which due to the index
|
||||
# overlap will be empty). Pretty sure we've fixed it now
|
||||
# but leaving this here as a reminder.
|
||||
if prepend and update_first and length:
|
||||
if (
|
||||
prepend
|
||||
and update_first
|
||||
and length
|
||||
):
|
||||
assert index < self._first.value
|
||||
|
||||
if (
|
||||
|
@ -431,10 +432,10 @@ class ShmArray:
|
|||
|
||||
|
||||
def open_shm_array(
|
||||
|
||||
key: Optional[str] = None,
|
||||
size: int = _default_size,
|
||||
dtype: Optional[np.dtype] = None,
|
||||
size: int,
|
||||
key: str | None = None,
|
||||
dtype: np.dtype | None = None,
|
||||
append_start_index: int | None = None,
|
||||
readonly: bool = False,
|
||||
|
||||
) -> ShmArray:
|
||||
|
@ -464,7 +465,8 @@ def open_shm_array(
|
|||
|
||||
token = _make_token(
|
||||
key=key,
|
||||
dtype=dtype
|
||||
size=size,
|
||||
dtype=dtype,
|
||||
)
|
||||
|
||||
# create single entry arrays for storing an first and last indices
|
||||
|
@ -498,10 +500,13 @@ def open_shm_array(
|
|||
# ``ShmArray._start.value: int = 0`` and the yet-to-be written
|
||||
# real-time section will start at ``ShmArray.index: int``.
|
||||
|
||||
# this sets the index to 3/4 of the length of the buffer
|
||||
# leaving a "days worth of second samples" for the real-time
|
||||
# section.
|
||||
last.value = first.value = _rt_buffer_start
|
||||
# this sets the index to nearly 2/3rds into the the length of
|
||||
# the buffer leaving at least a "days worth of second samples"
|
||||
# for the real-time section.
|
||||
if append_start_index is None:
|
||||
append_start_index = round(size * 0.616)
|
||||
|
||||
last.value = first.value = append_start_index
|
||||
|
||||
shmarr = ShmArray(
|
||||
array,
|
||||
|
@ -515,16 +520,15 @@ def open_shm_array(
|
|||
|
||||
# "unlink" created shm on process teardown by
|
||||
# pushing teardown calls onto actor context stack
|
||||
|
||||
tractor._actor._lifetime_stack.callback(shmarr.close)
|
||||
tractor._actor._lifetime_stack.callback(shmarr.destroy)
|
||||
stack = tractor.current_actor().lifetime_stack
|
||||
stack.callback(shmarr.close)
|
||||
stack.callback(shmarr.destroy)
|
||||
|
||||
return shmarr
|
||||
|
||||
|
||||
def attach_shm_array(
|
||||
token: tuple[str, str, tuple[str, str]],
|
||||
size: int = _default_size,
|
||||
readonly: bool = True,
|
||||
|
||||
) -> ShmArray:
|
||||
|
@ -563,7 +567,7 @@ def attach_shm_array(
|
|||
raise _err
|
||||
|
||||
shmarr = np.ndarray(
|
||||
(size,),
|
||||
(token.size,),
|
||||
dtype=token.dtype,
|
||||
buffer=shm.buf
|
||||
)
|
||||
|
@ -602,15 +606,18 @@ def attach_shm_array(
|
|||
if key not in _known_tokens:
|
||||
_known_tokens[key] = token
|
||||
|
||||
# "close" attached shm on process teardown
|
||||
tractor._actor._lifetime_stack.callback(sha.close)
|
||||
# "close" attached shm on actor teardown
|
||||
tractor.current_actor().lifetime_stack.callback(sha.close)
|
||||
|
||||
return sha
|
||||
|
||||
|
||||
def maybe_open_shm_array(
|
||||
key: str,
|
||||
dtype: Optional[np.dtype] = None,
|
||||
size: int,
|
||||
dtype: np.dtype | None = None,
|
||||
append_start_index: int | None = None,
|
||||
readonly: bool = False,
|
||||
**kwargs,
|
||||
|
||||
) -> tuple[ShmArray, bool]:
|
||||
|
@ -634,23 +641,41 @@ def maybe_open_shm_array(
|
|||
try:
|
||||
# see if we already know this key
|
||||
token = _known_tokens[key]
|
||||
return attach_shm_array(token=token, **kwargs), False
|
||||
return (
|
||||
attach_shm_array(
|
||||
token=token,
|
||||
readonly=readonly,
|
||||
),
|
||||
False,
|
||||
)
|
||||
except KeyError:
|
||||
log.warning(f"Could not find {key} in shms cache")
|
||||
log.debug(f"Could not find {key} in shms cache")
|
||||
if dtype:
|
||||
token = _make_token(key, dtype)
|
||||
token = _make_token(
|
||||
key,
|
||||
size=size,
|
||||
dtype=dtype,
|
||||
)
|
||||
try:
|
||||
return attach_shm_array(token=token, **kwargs), False
|
||||
except FileNotFoundError:
|
||||
log.warning(f"Could not attach to shm with token {token}")
|
||||
log.debug(f"Could not attach to shm with token {token}")
|
||||
|
||||
# This actor does not know about memory
|
||||
# associated with the provided "key".
|
||||
# Attempt to open a block and expect
|
||||
# to fail if a block has been allocated
|
||||
# on the OS by someone else.
|
||||
return open_shm_array(key=key, dtype=dtype, **kwargs), True
|
||||
|
||||
return (
|
||||
open_shm_array(
|
||||
key=key,
|
||||
size=size,
|
||||
dtype=dtype,
|
||||
append_start_index=append_start_index,
|
||||
readonly=readonly,
|
||||
),
|
||||
True,
|
||||
)
|
||||
|
||||
def try_read(
|
||||
array: np.ndarray
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship for piker0)
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -18,34 +18,47 @@
|
|||
numpy data source coversion helpers.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from typing import Any
|
||||
import decimal
|
||||
|
||||
from bidict import bidict
|
||||
import numpy as np
|
||||
from pydantic import BaseModel
|
||||
# from numba import from_dtype
|
||||
|
||||
|
||||
ohlc_fields = [
|
||||
('time', float),
|
||||
def_iohlcv_fields: list[tuple[str, type]] = [
|
||||
|
||||
# YES WE KNOW, this isn't needed in polars but we use it for doing
|
||||
# ring-buffer like pre/append ops our our `ShmArray` real-time
|
||||
# numpy-array buffering system such that there is a master index
|
||||
# that can be used for index-arithmetic when write data to the
|
||||
# "middle" of the array. See the ``tractor.ipc.shm`` pkg for more
|
||||
# details.
|
||||
('index', int),
|
||||
|
||||
# presume int for epoch stamps since it's most common
|
||||
# and makes the most sense to avoid float rounding issues.
|
||||
# TODO: if we want higher reso we should use the new
|
||||
# ``time.time_ns()`` in python 3.10+
|
||||
('time', int),
|
||||
('open', float),
|
||||
('high', float),
|
||||
('low', float),
|
||||
('close', float),
|
||||
('volume', float),
|
||||
('bar_wap', float),
|
||||
|
||||
# TODO: can we elim this from default field set to save on mem?
|
||||
# i think only kraken really uses this in terms of what we get from
|
||||
# their ohlc history API?
|
||||
# ('bar_wap', float), # shouldn't be default right?
|
||||
]
|
||||
|
||||
ohlc_with_index = ohlc_fields.copy()
|
||||
ohlc_with_index.insert(0, ('index', int))
|
||||
|
||||
# our minimum structured array layout for ohlc data
|
||||
base_iohlc_dtype = np.dtype(ohlc_with_index)
|
||||
base_ohlc_dtype = np.dtype(ohlc_fields)
|
||||
# remove index field
|
||||
def_ohlcv_fields: list[tuple[str, type]] = def_iohlcv_fields.copy()
|
||||
def_ohlcv_fields.pop(0)
|
||||
assert (len(def_iohlcv_fields) - len(def_ohlcv_fields)) == 1
|
||||
|
||||
# TODO: for now need to construct this manually for readonly arrays, see
|
||||
# https://github.com/numba/numba/issues/4511
|
||||
# from numba import from_dtype
|
||||
# base_ohlc_dtype = np.dtype(def_ohlc_fields)
|
||||
# numba_ohlc_dtype = from_dtype(base_ohlc_dtype)
|
||||
|
||||
# map time frame "keys" to seconds values
|
||||
|
@ -60,28 +73,6 @@ tf_in_1s = bidict({
|
|||
})
|
||||
|
||||
|
||||
def mk_fqsn(
|
||||
provider: str,
|
||||
symbol: str,
|
||||
|
||||
) -> str:
|
||||
'''
|
||||
Generate a "fully qualified symbol name" which is
|
||||
a reverse-hierarchical cross broker/provider symbol
|
||||
|
||||
'''
|
||||
return '.'.join([symbol, provider]).lower()
|
||||
|
||||
|
||||
def float_digits(
|
||||
value: float,
|
||||
) -> int:
|
||||
if value == 0:
|
||||
return 0
|
||||
|
||||
return int(-decimal.Decimal(str(value)).as_tuple().exponent)
|
||||
|
||||
|
||||
def ohlc_zeros(length: int) -> np.ndarray:
|
||||
"""Construct an OHLC field formatted structarray.
|
||||
|
||||
|
@ -92,168 +83,6 @@ def ohlc_zeros(length: int) -> np.ndarray:
|
|||
return np.zeros(length, dtype=base_ohlc_dtype)
|
||||
|
||||
|
||||
def unpack_fqsn(fqsn: str) -> tuple[str, str, str]:
|
||||
'''
|
||||
Unpack a fully-qualified-symbol-name to ``tuple``.
|
||||
|
||||
'''
|
||||
venue = ''
|
||||
suffix = ''
|
||||
|
||||
# TODO: probably reverse the order of all this XD
|
||||
tokens = fqsn.split('.')
|
||||
if len(tokens) < 3:
|
||||
# probably crypto
|
||||
symbol, broker = tokens
|
||||
return (
|
||||
broker,
|
||||
symbol,
|
||||
'',
|
||||
)
|
||||
|
||||
elif len(tokens) > 3:
|
||||
symbol, venue, suffix, broker = tokens
|
||||
else:
|
||||
symbol, venue, broker = tokens
|
||||
suffix = ''
|
||||
|
||||
# head, _, broker = fqsn.rpartition('.')
|
||||
# symbol, _, suffix = head.rpartition('.')
|
||||
return (
|
||||
broker,
|
||||
'.'.join([symbol, venue]),
|
||||
suffix,
|
||||
)
|
||||
|
||||
|
||||
class Symbol(BaseModel):
|
||||
'''
|
||||
I guess this is some kinda container thing for dealing with
|
||||
all the different meta-data formats from brokers?
|
||||
|
||||
'''
|
||||
key: str
|
||||
tick_size: float = 0.01
|
||||
lot_tick_size: float = 0.0 # "volume" precision as min step value
|
||||
tick_size_digits: int = 2
|
||||
lot_size_digits: int = 0
|
||||
suffix: str = ''
|
||||
broker_info: dict[str, dict[str, Any]] = {}
|
||||
|
||||
# specifies a "class" of financial instrument
|
||||
# ex. stock, futer, option, bond etc.
|
||||
|
||||
# @validate_arguments
|
||||
@classmethod
|
||||
def from_broker_info(
|
||||
cls,
|
||||
broker: str,
|
||||
symbol: str,
|
||||
info: dict[str, Any],
|
||||
suffix: str = '',
|
||||
|
||||
# XXX: like wtf..
|
||||
# ) -> 'Symbol':
|
||||
) -> None:
|
||||
|
||||
tick_size = info.get('price_tick_size', 0.01)
|
||||
lot_tick_size = info.get('lot_tick_size', 0.0)
|
||||
|
||||
return Symbol(
|
||||
key=symbol,
|
||||
tick_size=tick_size,
|
||||
lot_tick_size=lot_tick_size,
|
||||
tick_size_digits=float_digits(tick_size),
|
||||
lot_size_digits=float_digits(lot_tick_size),
|
||||
suffix=suffix,
|
||||
broker_info={broker: info},
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_fqsn(
|
||||
cls,
|
||||
fqsn: str,
|
||||
info: dict[str, Any],
|
||||
|
||||
# XXX: like wtf..
|
||||
# ) -> 'Symbol':
|
||||
) -> None:
|
||||
broker, key, suffix = unpack_fqsn(fqsn)
|
||||
return cls.from_broker_info(
|
||||
broker,
|
||||
key,
|
||||
info=info,
|
||||
suffix=suffix,
|
||||
)
|
||||
|
||||
@property
|
||||
def type_key(self) -> str:
|
||||
return list(self.broker_info.values())[0]['asset_type']
|
||||
|
||||
@property
|
||||
def brokers(self) -> list[str]:
|
||||
return list(self.broker_info.keys())
|
||||
|
||||
def nearest_tick(self, value: float) -> float:
|
||||
'''
|
||||
Return the nearest tick value based on mininum increment.
|
||||
|
||||
'''
|
||||
mult = 1 / self.tick_size
|
||||
return round(value * mult) / mult
|
||||
|
||||
def front_feed(self) -> tuple[str, str]:
|
||||
'''
|
||||
Return the "current" feed key for this symbol.
|
||||
|
||||
(i.e. the broker + symbol key in a tuple).
|
||||
|
||||
'''
|
||||
return (
|
||||
list(self.broker_info.keys())[0],
|
||||
self.key,
|
||||
)
|
||||
|
||||
def tokens(self) -> tuple[str]:
|
||||
broker, key = self.front_feed()
|
||||
if self.suffix:
|
||||
return (key, self.suffix, broker)
|
||||
else:
|
||||
return (key, broker)
|
||||
|
||||
def front_fqsn(self) -> str:
|
||||
'''
|
||||
fqsn = "fully qualified symbol name"
|
||||
|
||||
Basically the idea here is for all client-ish code (aka programs/actors
|
||||
that ask the provider agnostic layers in the stack for data) should be
|
||||
able to tell which backend / venue / derivative each data feed/flow is
|
||||
from by an explicit string key of the current form:
|
||||
|
||||
<instrumentname>.<venue>.<suffixwithmetadata>.<brokerbackendname>
|
||||
|
||||
TODO: I have thoughts that we should actually change this to be
|
||||
more like an "attr lookup" (like how the web should have done
|
||||
urls, but marketting peeps ruined it etc. etc.):
|
||||
|
||||
<broker>.<venue>.<instrumentname>.<suffixwithmetadata>
|
||||
|
||||
'''
|
||||
tokens = self.tokens()
|
||||
fqsn = '.'.join(tokens)
|
||||
return fqsn
|
||||
|
||||
def iterfqsns(self) -> list[str]:
|
||||
keys = []
|
||||
for broker in self.broker_info.keys():
|
||||
fqsn = mk_fqsn(self.key, broker)
|
||||
if self.suffix:
|
||||
fqsn += f'.{self.suffix}'
|
||||
keys.append(fqsn)
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def _nan_to_closest_num(array: np.ndarray):
|
||||
"""Return interpolated values instead of NaN.
|
||||
|
||||
|
|
|
@ -0,0 +1,510 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Mega-simple symbology cache via TOML files.
|
||||
|
||||
Allow backend data providers and/or brokers to stash their
|
||||
symbology sets (aka the meta data we normalize into our
|
||||
`.accounting.MktPair` type) to the filesystem for faster lookup and
|
||||
offline usage.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Any,
|
||||
Sequence,
|
||||
Hashable,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
from types import ModuleType
|
||||
|
||||
from rapidfuzz import process as fuzzy
|
||||
import tomli_w # for fast symbol cache writing
|
||||
import tractor
|
||||
import trio
|
||||
try:
|
||||
import tomllib
|
||||
except ModuleNotFoundError:
|
||||
import tomli as tomllib
|
||||
from msgspec import field
|
||||
|
||||
from piker.log import get_logger
|
||||
from piker import config
|
||||
from piker.types import Struct
|
||||
from piker.brokers import (
|
||||
open_cached_client,
|
||||
get_brokermod,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..accounting import (
|
||||
Asset,
|
||||
MktPair,
|
||||
)
|
||||
|
||||
log = get_logger('data.cache')
|
||||
|
||||
|
||||
class SymbologyCache(Struct):
|
||||
'''
|
||||
Asset meta-data cache which holds lookup tables for 3 sets of
|
||||
market-symbology related struct-types required by the
|
||||
`.accounting` and `.data` subsystems.
|
||||
|
||||
'''
|
||||
mod: ModuleType
|
||||
fp: Path
|
||||
|
||||
# all asset-money-systems descriptions as minimally defined by
|
||||
# in `.accounting.Asset`
|
||||
assets: dict[str, Asset] = field(default_factory=dict)
|
||||
|
||||
# backend-system pairs loaded in provider (schema) specific
|
||||
# structs.
|
||||
pairs: dict[str, Struct] = field(default_factory=dict)
|
||||
# serialized namespace path to the backend's pair-info-`Struct`
|
||||
# defn B)
|
||||
pair_ns_path: tractor.msg.NamespacePath | None = None
|
||||
|
||||
# TODO: piker-normalized `.accounting.MktPair` table?
|
||||
# loaded from the `.pairs` and a normalizer
|
||||
# provided by the backend pkg.
|
||||
mktmaps: dict[str, MktPair] = field(default_factory=dict)
|
||||
|
||||
def write_config(self) -> None:
|
||||
|
||||
# put the backend's pair-struct type ref at the top
|
||||
# of file if possible.
|
||||
cachedict: dict[str, Any] = {
|
||||
'pair_ns_path': str(self.pair_ns_path) or '',
|
||||
}
|
||||
|
||||
# serialize all tables as dicts for TOML.
|
||||
for key, table in {
|
||||
'assets': self.assets,
|
||||
'pairs': self.pairs,
|
||||
'mktmaps': self.mktmaps,
|
||||
}.items():
|
||||
if not table:
|
||||
log.warning(
|
||||
f'Asset cache table for `{key}` is empty?'
|
||||
)
|
||||
continue
|
||||
|
||||
dct = cachedict[key] = {}
|
||||
for key, struct in table.items():
|
||||
dct[key] = struct.to_dict(include_non_members=False)
|
||||
|
||||
try:
|
||||
with self.fp.open(mode='wb') as fp:
|
||||
tomli_w.dump(cachedict, fp)
|
||||
except TypeError:
|
||||
self.fp.unlink()
|
||||
raise
|
||||
|
||||
async def load(self) -> None:
|
||||
'''
|
||||
Explicitly load the "symbology set" for this provider by using
|
||||
2 required `Client` methods:
|
||||
|
||||
- `.get_assets()`: returning a table of `Asset`s
|
||||
- `.get_mkt_pairs()`: returning a table of pair-`Struct`
|
||||
types, custom defined by the particular backend.
|
||||
|
||||
AND, the required `.get_mkt_info()` module-level endpoint
|
||||
which maps `fqme: str` -> `MktPair`s.
|
||||
|
||||
These tables are then used to fill out the `.assets`, `.pairs` and
|
||||
`.mktmaps` tables on this cache instance, respectively.
|
||||
|
||||
'''
|
||||
async with open_cached_client(self.mod.name) as client:
|
||||
|
||||
if get_assets := getattr(client, 'get_assets', None):
|
||||
assets: dict[str, Asset] = await get_assets()
|
||||
for bs_mktid, asset in assets.items():
|
||||
self.assets[bs_mktid] = asset
|
||||
else:
|
||||
log.warning(
|
||||
'No symbology cache `Asset` support for `{provider}`..\n'
|
||||
'Implement `Client.get_assets()`!'
|
||||
)
|
||||
|
||||
if get_mkt_pairs := getattr(client, 'get_mkt_pairs', None):
|
||||
|
||||
pairs: dict[str, Struct] = await get_mkt_pairs()
|
||||
for bs_fqme, pair in pairs.items():
|
||||
|
||||
# NOTE: every backend defined pair should
|
||||
# declare it's ns path for roundtrip
|
||||
# serialization lookup.
|
||||
if not getattr(pair, 'ns_path', None):
|
||||
raise TypeError(
|
||||
f'Pair-struct for {self.mod.name} MUST define a '
|
||||
'`.ns_path: str`!\n'
|
||||
f'{pair}'
|
||||
)
|
||||
|
||||
entry = await self.mod.get_mkt_info(pair.bs_fqme)
|
||||
if not entry:
|
||||
continue
|
||||
|
||||
mkt: MktPair
|
||||
pair: Struct
|
||||
mkt, _pair = entry
|
||||
assert _pair is pair, (
|
||||
f'`{self.mod.name}` backend probably has a '
|
||||
'keying-symmetry problem between the pair-`Struct` '
|
||||
'returned from `Client.get_mkt_pairs()`and the '
|
||||
'module level endpoint: `.get_mkt_info()`\n\n'
|
||||
"Here's the struct diff:\n"
|
||||
f'{_pair - pair}'
|
||||
)
|
||||
# NOTE XXX: this means backends MUST implement
|
||||
# a `Struct.bs_mktid: str` field to provide
|
||||
# a native-keyed map to their own symbol
|
||||
# set(s).
|
||||
self.pairs[pair.bs_mktid] = pair
|
||||
|
||||
# NOTE: `MktPair`s are keyed here using piker's
|
||||
# internal FQME schema so that search,
|
||||
# accounting and feed init can be accomplished
|
||||
# a sane, uniform, normalized basis.
|
||||
self.mktmaps[mkt.fqme] = mkt
|
||||
|
||||
self.pair_ns_path: str = tractor.msg.NamespacePath.from_ref(
|
||||
pair,
|
||||
)
|
||||
|
||||
else:
|
||||
log.warning(
|
||||
'No symbology cache `Pair` support for `{provider}`..\n'
|
||||
'Implement `Client.get_mkt_pairs()`!'
|
||||
)
|
||||
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_dict(
|
||||
cls: type,
|
||||
data: dict,
|
||||
**kwargs,
|
||||
) -> SymbologyCache:
|
||||
|
||||
# normal init inputs
|
||||
cache = cls(**kwargs)
|
||||
|
||||
# XXX WARNING: this may break if backend namespacing
|
||||
# changes (eg. `Pair` class def is moved to another
|
||||
# module) in which case you can manually update the
|
||||
# `pair_ns_path` in the symcache file and try again.
|
||||
# TODO: probably a verbose error about this?
|
||||
Pair: type = tractor.msg.NamespacePath(
|
||||
str(data['pair_ns_path'])
|
||||
).load_ref()
|
||||
|
||||
pairtable = data.pop('pairs')
|
||||
for key, pairtable in pairtable.items():
|
||||
|
||||
# allow each serialized pair-dict-table to declare its
|
||||
# specific struct type's path in cases where a backend
|
||||
# supports multiples (normally with different
|
||||
# schemas..) and we are storing them in a flat `.pairs`
|
||||
# table.
|
||||
ThisPair = Pair
|
||||
if this_pair_type := pairtable.get('ns_path'):
|
||||
ThisPair: type = tractor.msg.NamespacePath(
|
||||
str(this_pair_type)
|
||||
).load_ref()
|
||||
|
||||
pair: Struct = ThisPair(**pairtable)
|
||||
cache.pairs[key] = pair
|
||||
|
||||
from ..accounting import (
|
||||
Asset,
|
||||
MktPair,
|
||||
)
|
||||
|
||||
# load `dict` -> `Asset`
|
||||
assettable = data.pop('assets')
|
||||
for name, asdict in assettable.items():
|
||||
cache.assets[name] = Asset.from_msg(asdict)
|
||||
|
||||
# load `dict` -> `MktPair`
|
||||
dne: list[str] = []
|
||||
mkttable = data.pop('mktmaps')
|
||||
for fqme, mktdict in mkttable.items():
|
||||
|
||||
mkt = MktPair.from_msg(mktdict)
|
||||
assert mkt.fqme == fqme
|
||||
|
||||
# sanity check asset refs from those (presumably)
|
||||
# loaded asset set above.
|
||||
src: Asset = cache.assets[mkt.src.name]
|
||||
assert src == mkt.src
|
||||
dst: Asset
|
||||
if not (dst := cache.assets.get(mkt.dst.name)):
|
||||
dne.append(mkt.dst.name)
|
||||
continue
|
||||
else:
|
||||
assert dst.name == mkt.dst.name
|
||||
|
||||
cache.mktmaps[fqme] = mkt
|
||||
|
||||
log.warning(
|
||||
f'These `MktPair.dst: Asset`s DNE says `{cache.mod.name}`?\n'
|
||||
f'{pformat(dne)}'
|
||||
)
|
||||
return cache
|
||||
|
||||
@staticmethod
|
||||
async def from_scratch(
|
||||
mod: ModuleType,
|
||||
fp: Path,
|
||||
**kwargs,
|
||||
|
||||
) -> SymbologyCache:
|
||||
'''
|
||||
Generate (a) new symcache (contents) entirely from scratch
|
||||
including all (TOML) serialized data and file.
|
||||
|
||||
'''
|
||||
log.info(f'GENERATING symbology cache for `{mod.name}`')
|
||||
cache = SymbologyCache(
|
||||
mod=mod,
|
||||
fp=fp,
|
||||
**kwargs,
|
||||
)
|
||||
await cache.load()
|
||||
cache.write_config()
|
||||
return cache
|
||||
|
||||
def search(
|
||||
self,
|
||||
pattern: str,
|
||||
table: str = 'mktmaps'
|
||||
|
||||
) -> dict[str, Struct]:
|
||||
'''
|
||||
(Fuzzy) search this cache's `.mktmaps` table, which is
|
||||
keyed by FQMEs, for `pattern: str` and return the best
|
||||
matches in a `dict` including the `MktPair` values.
|
||||
|
||||
'''
|
||||
matches = fuzzy.extract(
|
||||
pattern,
|
||||
getattr(self, table),
|
||||
score_cutoff=50,
|
||||
)
|
||||
|
||||
# repack in dict[fqme, MktPair] form
|
||||
return {
|
||||
item[0].fqme: item[0]
|
||||
for item in matches
|
||||
}
|
||||
|
||||
|
||||
# actor-process-local in-mem-cache of symcaches (by backend).
|
||||
_caches: dict[str, SymbologyCache] = {}
|
||||
|
||||
|
||||
def mk_cachefile(
|
||||
provider: str,
|
||||
) -> Path:
|
||||
cachedir: Path = config.get_conf_dir() / '_cache'
|
||||
if not cachedir.is_dir():
|
||||
log.info(f'Creating `nativedb` director: {cachedir}')
|
||||
cachedir.mkdir()
|
||||
|
||||
cachefile: Path = cachedir / f'{str(provider)}.symcache.toml'
|
||||
cachefile.touch()
|
||||
return cachefile
|
||||
|
||||
|
||||
@acm
|
||||
async def open_symcache(
|
||||
mod_or_name: ModuleType | str,
|
||||
|
||||
reload: bool = False,
|
||||
only_from_memcache: bool = False, # no API req
|
||||
_no_symcache: bool = False, # no backend support
|
||||
|
||||
) -> SymbologyCache:
|
||||
|
||||
if isinstance(mod_or_name, str):
|
||||
mod = get_brokermod(mod_or_name)
|
||||
else:
|
||||
mod: ModuleType = mod_or_name
|
||||
|
||||
provider: str = mod.name
|
||||
cachefile: Path = mk_cachefile(provider)
|
||||
|
||||
# NOTE: certain backends might not support a symbology cache
|
||||
# (easily) and thus we allow for an empty instance to be loaded
|
||||
# and manually filled in at the whim of the caller presuming
|
||||
# the backend pkg-module is annotated appropriately.
|
||||
if (
|
||||
getattr(mod, '_no_symcache', False)
|
||||
or _no_symcache
|
||||
):
|
||||
yield SymbologyCache(
|
||||
mod=mod,
|
||||
fp=cachefile,
|
||||
)
|
||||
# don't do nuttin
|
||||
return
|
||||
|
||||
# actor-level cache-cache XD
|
||||
global _caches
|
||||
if not reload:
|
||||
try:
|
||||
yield _caches[provider]
|
||||
except KeyError:
|
||||
msg: str = (
|
||||
f'No asset info cache exists yet for `{provider}`'
|
||||
)
|
||||
if only_from_memcache:
|
||||
raise RuntimeError(msg)
|
||||
else:
|
||||
log.warning(msg)
|
||||
|
||||
# if no cache exists or an explicit reload is requested, load
|
||||
# the provider API and call appropriate endpoints to populate
|
||||
# the mkt and asset tables.
|
||||
if (
|
||||
reload
|
||||
or not cachefile.is_file()
|
||||
):
|
||||
cache = await SymbologyCache.from_scratch(
|
||||
mod=mod,
|
||||
fp=cachefile,
|
||||
)
|
||||
|
||||
else:
|
||||
log.info(
|
||||
f'Loading EXISTING `{mod.name}` symbology cache:\n'
|
||||
f'> {cachefile}'
|
||||
)
|
||||
import time
|
||||
now = time.time()
|
||||
with cachefile.open('rb') as existing_fp:
|
||||
data: dict[str, dict] = tomllib.load(existing_fp)
|
||||
log.runtime(f'SYMCACHE TOML LOAD TIME: {time.time() - now}')
|
||||
|
||||
# if there's an empty file for some reason we need
|
||||
# to do a full reload as well!
|
||||
if not data:
|
||||
cache = await SymbologyCache.from_scratch(
|
||||
mod=mod,
|
||||
fp=cachefile,
|
||||
)
|
||||
else:
|
||||
cache = SymbologyCache.from_dict(
|
||||
data,
|
||||
mod=mod,
|
||||
fp=cachefile,
|
||||
)
|
||||
|
||||
|
||||
# TODO: use a real profiling sys..
|
||||
# https://github.com/pikers/piker/issues/337
|
||||
log.info(f'SYMCACHE LOAD TIME: {time.time() - now}')
|
||||
|
||||
yield cache
|
||||
|
||||
# TODO: write only when changes detected? but that should
|
||||
# never happen right except on reload?
|
||||
# cache.write_config()
|
||||
|
||||
|
||||
def get_symcache(
|
||||
provider: str,
|
||||
force_reload: bool = False,
|
||||
|
||||
) -> SymbologyCache:
|
||||
'''
|
||||
Get any available symbology/assets cache from sync code by
|
||||
(maybe) manually running `trio` to do the work.
|
||||
|
||||
'''
|
||||
# spawn tractor runtime and generate cache
|
||||
# if not existing.
|
||||
async def sched_gen_symcache():
|
||||
async with (
|
||||
# only for runtime's debug mode
|
||||
tractor.open_nursery(debug_mode=True),
|
||||
|
||||
open_symcache(
|
||||
get_brokermod(provider),
|
||||
reload=force_reload,
|
||||
) as symcache,
|
||||
):
|
||||
return symcache
|
||||
|
||||
try:
|
||||
symcache: SymbologyCache = trio.run(sched_gen_symcache)
|
||||
assert symcache
|
||||
except BaseException:
|
||||
import pdbp
|
||||
pdbp.xpm()
|
||||
|
||||
return symcache
|
||||
|
||||
|
||||
def match_from_pairs(
|
||||
pairs: dict[str, Struct],
|
||||
query: str,
|
||||
score_cutoff: int = 50,
|
||||
**extract_kwargs,
|
||||
|
||||
) -> dict[str, Struct]:
|
||||
'''
|
||||
Fuzzy search over a "pairs table" maintained by most backends
|
||||
as part of their symbology-info caching internals.
|
||||
|
||||
Scan the native symbol key set and return best ranked
|
||||
matches back in a new `dict`.
|
||||
|
||||
'''
|
||||
|
||||
# TODO: somehow cache this list (per call) like we were in
|
||||
# `open_symbol_search()`?
|
||||
keys: list[str] = list(pairs)
|
||||
matches: list[tuple[
|
||||
Sequence[Hashable], # matching input key
|
||||
Any, # scores
|
||||
Any,
|
||||
]] = fuzzy.extract(
|
||||
# NOTE: most backends provide keys uppercased
|
||||
query=query,
|
||||
choices=keys,
|
||||
score_cutoff=score_cutoff,
|
||||
**extract_kwargs,
|
||||
)
|
||||
|
||||
# pop and repack pairs in output dict
|
||||
matched_pairs: dict[str, Struct] = {}
|
||||
for item in matches:
|
||||
pair_key: str = item[0]
|
||||
matched_pairs[pair_key] = pairs[pair_key]
|
||||
|
||||
return matched_pairs
|
|
@ -0,0 +1,34 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Data layer module commons.
|
||||
|
||||
'''
|
||||
from functools import partial
|
||||
|
||||
from ..log import (
|
||||
get_logger,
|
||||
get_console_log,
|
||||
)
|
||||
subsys: str = 'piker.data'
|
||||
|
||||
log = get_logger(subsys)
|
||||
|
||||
get_console_log = partial(
|
||||
get_console_log,
|
||||
name=subsys,
|
||||
)
|
|
@ -1,5 +1,5 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -18,13 +18,30 @@
|
|||
ToOlS fOr CoPInG wITh "tHE wEB" protocols.
|
||||
|
||||
"""
|
||||
from contextlib import asynccontextmanager, AsyncExitStack
|
||||
from __future__ import annotations
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from itertools import count
|
||||
from functools import partial
|
||||
from types import ModuleType
|
||||
from typing import Any, Callable, AsyncGenerator
|
||||
from typing import (
|
||||
Any,
|
||||
Optional,
|
||||
Callable,
|
||||
AsyncContextManager,
|
||||
AsyncGenerator,
|
||||
Iterable,
|
||||
)
|
||||
import json
|
||||
|
||||
import trio
|
||||
import trio_websocket
|
||||
from trio_typing import TaskStatus
|
||||
from trio_websocket import (
|
||||
WebSocketConnection,
|
||||
open_websocket_url,
|
||||
)
|
||||
from wsproto.utilities import LocalProtocolError
|
||||
from trio_websocket._impl import (
|
||||
ConnectionClosed,
|
||||
DisconnectionTimeout,
|
||||
|
@ -33,81 +50,71 @@ from trio_websocket._impl import (
|
|||
ConnectionTimeout,
|
||||
)
|
||||
|
||||
from ..log import get_logger
|
||||
|
||||
log = get_logger(__name__)
|
||||
from piker.types import Struct
|
||||
from ._util import log
|
||||
|
||||
|
||||
class NoBsWs:
|
||||
"""Make ``trio_websocket`` sockets stay up no matter the bs.
|
||||
'''
|
||||
Make ``trio_websocket`` sockets stay up no matter the bs.
|
||||
|
||||
"""
|
||||
A shim interface that allows client code to stream from some
|
||||
``WebSocketConnection`` but where any connectivy bs is handled
|
||||
automatcially and entirely in the background.
|
||||
|
||||
NOTE: this type should never be created directly but instead is
|
||||
provided via the ``open_autorecon_ws()`` factor below.
|
||||
|
||||
'''
|
||||
# apparently we can QoS for all sorts of reasons..so catch em.
|
||||
recon_errors = (
|
||||
ConnectionClosed,
|
||||
DisconnectionTimeout,
|
||||
ConnectionRejected,
|
||||
HandshakeError,
|
||||
ConnectionTimeout,
|
||||
LocalProtocolError,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
token: str,
|
||||
stack: AsyncExitStack,
|
||||
fixture: Callable,
|
||||
serializer: ModuleType = json,
|
||||
rxchan: trio.MemoryReceiveChannel,
|
||||
msg_recv_timeout: float,
|
||||
|
||||
serializer: ModuleType = json
|
||||
):
|
||||
self.url = url
|
||||
self.token = token
|
||||
self.fixture = fixture
|
||||
self._stack = stack
|
||||
self._ws: 'WebSocketConnection' = None # noqa
|
||||
self._rx = rxchan
|
||||
self._timeout = msg_recv_timeout
|
||||
|
||||
async def _connect(
|
||||
self,
|
||||
tries: int = 1000,
|
||||
) -> None:
|
||||
while True:
|
||||
try:
|
||||
await self._stack.aclose()
|
||||
except (DisconnectionTimeout, RuntimeError):
|
||||
await trio.sleep(0.5)
|
||||
else:
|
||||
break
|
||||
# signaling between caller and relay task which determines when
|
||||
# socket is connected (and subscribed).
|
||||
self._connected: trio.Event = trio.Event()
|
||||
|
||||
last_err = None
|
||||
for i in range(tries):
|
||||
try:
|
||||
self._ws = await self._stack.enter_async_context(
|
||||
trio_websocket.open_websocket_url(self.url)
|
||||
)
|
||||
# rerun user code fixture
|
||||
if self.token == '':
|
||||
ret = await self._stack.enter_async_context(
|
||||
self.fixture(self)
|
||||
)
|
||||
else:
|
||||
ret = await self._stack.enter_async_context(
|
||||
self.fixture(self, self.token)
|
||||
)
|
||||
# dynamically reset by the bg relay task
|
||||
self._ws: WebSocketConnection | None = None
|
||||
self._cs: trio.CancelScope | None = None
|
||||
|
||||
assert ret is None
|
||||
# interchange codec methods
|
||||
# TODO: obviously the method API here may be different
|
||||
# for another interchange format..
|
||||
self._dumps: Callable = serializer.dumps
|
||||
self._loads: Callable = serializer.loads
|
||||
|
||||
log.info(f'Connection success: {self.url}')
|
||||
return self._ws
|
||||
def connected(self) -> bool:
|
||||
return self._connected.is_set()
|
||||
|
||||
except self.recon_errors as err:
|
||||
last_err = err
|
||||
log.error(
|
||||
f'{self} connection bail with '
|
||||
f'{type(err)}...retry attempt {i}'
|
||||
)
|
||||
await trio.sleep(0.5)
|
||||
continue
|
||||
else:
|
||||
log.exception('ws connection fail...')
|
||||
raise last_err
|
||||
async def reset(self) -> None:
|
||||
'''
|
||||
Reset the underlying ws connection by cancelling
|
||||
the bg relay task and waiting for it to signal
|
||||
a new connection.
|
||||
|
||||
'''
|
||||
self._connected = trio.Event()
|
||||
self._cs.cancel()
|
||||
await self._connected.wait()
|
||||
|
||||
async def send_msg(
|
||||
self,
|
||||
|
@ -115,38 +122,348 @@ class NoBsWs:
|
|||
) -> None:
|
||||
while True:
|
||||
try:
|
||||
return await self._ws.send_message(json.dumps(data))
|
||||
msg: Any = self._dumps(data)
|
||||
return await self._ws.send_message(msg)
|
||||
except self.recon_errors:
|
||||
await self._connect()
|
||||
await self.reset()
|
||||
|
||||
async def recv_msg(
|
||||
async def recv_msg(self) -> Any:
|
||||
msg: Any = await self._rx.receive()
|
||||
data = self._loads(msg)
|
||||
return data
|
||||
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
async def __anext__(self):
|
||||
return await self.recv_msg()
|
||||
|
||||
def set_recv_timeout(
|
||||
self,
|
||||
) -> Any:
|
||||
timeout: float,
|
||||
) -> None:
|
||||
self._timeout = timeout
|
||||
|
||||
|
||||
async def _reconnect_forever(
|
||||
url: str,
|
||||
snd: trio.MemorySendChannel,
|
||||
nobsws: NoBsWs,
|
||||
reset_after: int, # msg recv timeout before reset attempt
|
||||
|
||||
fixture: AsyncContextManager | None = None,
|
||||
task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
|
||||
# TODO: can we just report "where" in the call stack
|
||||
# the client code is using the ws stream?
|
||||
# Maybe we can just drop this since it's already in the log msg
|
||||
# orefix?
|
||||
if fixture is not None:
|
||||
src_mod: str = fixture.__module__
|
||||
else:
|
||||
src_mod: str = 'unknown'
|
||||
|
||||
async def proxy_msgs(
|
||||
ws: WebSocketConnection,
|
||||
pcs: trio.CancelScope, # parent cancel scope
|
||||
):
|
||||
'''
|
||||
Receive (under `timeout` deadline) all msgs from from underlying
|
||||
websocket and relay them to (calling) parent task via ``trio``
|
||||
mem chan.
|
||||
|
||||
'''
|
||||
# after so many msg recv timeouts, reset the connection
|
||||
timeouts: int = 0
|
||||
|
||||
while True:
|
||||
with trio.move_on_after(
|
||||
# can be dynamically changed by user code
|
||||
nobsws._timeout,
|
||||
) as cs:
|
||||
try:
|
||||
return json.loads(await self._ws.get_message())
|
||||
except self.recon_errors:
|
||||
await self._connect()
|
||||
msg: Any = await ws.get_message()
|
||||
await snd.send(msg)
|
||||
except nobsws.recon_errors:
|
||||
log.exception(
|
||||
f'{src_mod}\n'
|
||||
f'{url} connection bail with:'
|
||||
)
|
||||
await trio.sleep(0.5)
|
||||
pcs.cancel()
|
||||
|
||||
# go back to reonnect loop in parent task
|
||||
return
|
||||
|
||||
if cs.cancelled_caught:
|
||||
timeouts += 1
|
||||
if timeouts > reset_after:
|
||||
log.error(
|
||||
f'{src_mod}\n'
|
||||
'WS feed seems down and slow af.. reconnecting\n'
|
||||
)
|
||||
pcs.cancel()
|
||||
|
||||
# go back to reonnect loop in parent task
|
||||
return
|
||||
|
||||
async def open_fixture(
|
||||
fixture: AsyncContextManager,
|
||||
nobsws: NoBsWs,
|
||||
task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
|
||||
):
|
||||
'''
|
||||
Open user provided `@acm` and sleep until any connection
|
||||
reset occurs.
|
||||
|
||||
'''
|
||||
async with fixture(nobsws) as ret:
|
||||
assert ret is None
|
||||
task_status.started()
|
||||
await trio.sleep_forever()
|
||||
|
||||
# last_err = None
|
||||
nobsws._connected = trio.Event()
|
||||
task_status.started()
|
||||
|
||||
while not snd._closed:
|
||||
log.info(
|
||||
f'{src_mod}\n'
|
||||
f'{url} trying (RE)CONNECT'
|
||||
)
|
||||
|
||||
ws: WebSocketConnection
|
||||
try:
|
||||
async with (
|
||||
trio.open_nursery() as n,
|
||||
open_websocket_url(url) as ws,
|
||||
):
|
||||
cs = nobsws._cs = n.cancel_scope
|
||||
nobsws._ws = ws
|
||||
log.info(
|
||||
f'{src_mod}\n'
|
||||
f'Connection success: {url}'
|
||||
)
|
||||
|
||||
# begin relay loop to forward msgs
|
||||
n.start_soon(
|
||||
proxy_msgs,
|
||||
ws,
|
||||
cs,
|
||||
)
|
||||
|
||||
if fixture is not None:
|
||||
log.info(
|
||||
f'{src_mod}\n'
|
||||
f'Entering fixture: {fixture}'
|
||||
)
|
||||
|
||||
# TODO: should we return an explicit sub-cs
|
||||
# from this fixture task?
|
||||
await n.start(
|
||||
open_fixture,
|
||||
fixture,
|
||||
nobsws,
|
||||
)
|
||||
|
||||
# indicate to wrapper / opener that we are up and block
|
||||
# to let tasks run **inside** the ws open block above.
|
||||
nobsws._connected.set()
|
||||
await trio.sleep_forever()
|
||||
except HandshakeError:
|
||||
log.exception(f'Retrying connection')
|
||||
|
||||
# ws & nursery block ends
|
||||
|
||||
nobsws._connected = trio.Event()
|
||||
if cs.cancelled_caught:
|
||||
log.cancel(
|
||||
f'{url} connection cancelled!'
|
||||
)
|
||||
# if wrapper cancelled us, we expect it to also
|
||||
# have re-assigned a new event
|
||||
assert (
|
||||
nobsws._connected
|
||||
and not nobsws._connected.is_set()
|
||||
)
|
||||
|
||||
# -> from here, move to next reconnect attempt iteration
|
||||
# in the while loop above Bp
|
||||
|
||||
else:
|
||||
log.exception(
|
||||
f'{src_mod}\n'
|
||||
'ws connection closed by client...'
|
||||
)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
@acm
|
||||
async def open_autorecon_ws(
|
||||
url: str,
|
||||
|
||||
# TODO: proper type annot smh
|
||||
fixture: Callable,
|
||||
# used for authenticated websockets
|
||||
token: str = '',
|
||||
) -> AsyncGenerator[tuple[...], NoBsWs]:
|
||||
"""Apparently we can QoS for all sorts of reasons..so catch em.
|
||||
fixture: AsyncContextManager | None = None,
|
||||
|
||||
"""
|
||||
async with AsyncExitStack() as stack:
|
||||
ws = NoBsWs(url, token, stack, fixture=fixture)
|
||||
await ws._connect()
|
||||
# time in sec between msgs received before
|
||||
# we presume connection might need a reset.
|
||||
msg_recv_timeout: float = 16,
|
||||
|
||||
# count of the number of above timeouts before connection reset
|
||||
reset_after: int = 3,
|
||||
|
||||
) -> AsyncGenerator[tuple[...], NoBsWs]:
|
||||
'''
|
||||
An auto-reconnect websocket (wrapper API) around
|
||||
``trio_websocket.open_websocket_url()`` providing automatic
|
||||
re-connection on network errors, msg latency and thus roaming.
|
||||
|
||||
Here we implement a re-connect websocket interface where a bg
|
||||
nursery runs ``WebSocketConnection.receive_message()``s in a loop
|
||||
and restarts the full http(s) handshake on catches of certain
|
||||
connetivity errors, or some user defined recv timeout.
|
||||
|
||||
You can provide a ``fixture`` async-context-manager which will be
|
||||
entered/exitted around each connection reset; eg. for (re)requesting
|
||||
subscriptions without requiring streaming setup code to rerun.
|
||||
|
||||
'''
|
||||
snd: trio.MemorySendChannel
|
||||
rcv: trio.MemoryReceiveChannel
|
||||
snd, rcv = trio.open_memory_channel(616)
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
nobsws = NoBsWs(
|
||||
url,
|
||||
rcv,
|
||||
msg_recv_timeout=msg_recv_timeout,
|
||||
)
|
||||
await n.start(
|
||||
partial(
|
||||
_reconnect_forever,
|
||||
url,
|
||||
snd,
|
||||
nobsws,
|
||||
fixture=fixture,
|
||||
reset_after=reset_after,
|
||||
)
|
||||
)
|
||||
await nobsws._connected.wait()
|
||||
assert nobsws._cs
|
||||
assert nobsws.connected()
|
||||
|
||||
try:
|
||||
yield ws
|
||||
|
||||
yield nobsws
|
||||
finally:
|
||||
await stack.aclose()
|
||||
n.cancel_scope.cancel()
|
||||
|
||||
|
||||
'''
|
||||
JSONRPC response-request style machinery for transparent multiplexing of msgs
|
||||
over a NoBsWs.
|
||||
|
||||
'''
|
||||
|
||||
|
||||
class JSONRPCResult(Struct):
|
||||
id: int
|
||||
jsonrpc: str = '2.0'
|
||||
result: Optional[dict] = None
|
||||
error: Optional[dict] = None
|
||||
|
||||
|
||||
@acm
|
||||
async def open_jsonrpc_session(
|
||||
url: str,
|
||||
start_id: int = 0,
|
||||
response_type: type = JSONRPCResult,
|
||||
request_type: Optional[type] = None,
|
||||
request_hook: Optional[Callable] = None,
|
||||
error_hook: Optional[Callable] = None,
|
||||
) -> Callable[[str, dict], dict]:
|
||||
|
||||
async with (
|
||||
trio.open_nursery() as n,
|
||||
open_autorecon_ws(url) as ws
|
||||
):
|
||||
rpc_id: Iterable = count(start_id)
|
||||
rpc_results: dict[int, dict] = {}
|
||||
|
||||
async def json_rpc(method: str, params: dict) -> dict:
|
||||
'''
|
||||
perform a json rpc call and wait for the result, raise exception in
|
||||
case of error field present on response
|
||||
'''
|
||||
msg = {
|
||||
'jsonrpc': '2.0',
|
||||
'id': next(rpc_id),
|
||||
'method': method,
|
||||
'params': params
|
||||
}
|
||||
_id = msg['id']
|
||||
|
||||
rpc_results[_id] = {
|
||||
'result': None,
|
||||
'event': trio.Event()
|
||||
}
|
||||
|
||||
await ws.send_msg(msg)
|
||||
|
||||
await rpc_results[_id]['event'].wait()
|
||||
|
||||
ret = rpc_results[_id]['result']
|
||||
|
||||
del rpc_results[_id]
|
||||
|
||||
if ret.error is not None:
|
||||
raise Exception(json.dumps(ret.error, indent=4))
|
||||
|
||||
return ret
|
||||
|
||||
async def recv_task():
|
||||
'''
|
||||
receives every ws message and stores it in its corresponding
|
||||
result field, then sets the event to wakeup original sender
|
||||
tasks. also recieves responses to requests originated from
|
||||
the server side.
|
||||
|
||||
'''
|
||||
async for msg in ws:
|
||||
match msg:
|
||||
case {
|
||||
'result': _,
|
||||
'id': mid,
|
||||
} if res_entry := rpc_results.get(mid):
|
||||
|
||||
res_entry['result'] = response_type(**msg)
|
||||
res_entry['event'].set()
|
||||
|
||||
case {
|
||||
'result': _,
|
||||
'id': mid,
|
||||
} if not rpc_results.get(mid):
|
||||
log.warning(
|
||||
f'Unexpected ws msg: {json.dumps(msg, indent=4)}'
|
||||
)
|
||||
|
||||
case {
|
||||
'method': _,
|
||||
'params': _,
|
||||
}:
|
||||
log.debug(f'Recieved\n{msg}')
|
||||
if request_hook:
|
||||
await request_hook(request_type(**msg))
|
||||
|
||||
case {
|
||||
'error': error
|
||||
}:
|
||||
log.warning(f'Recieved\n{error}')
|
||||
if error_hook:
|
||||
await error_hook(response_type(**msg))
|
||||
|
||||
case _:
|
||||
log.warning(f'Unhandled JSON-RPC msg!?\n{msg}')
|
||||
|
||||
n.start_soon(recv_task)
|
||||
yield json_rpc
|
||||
n.cancel_scope.cancel()
|
||||
|
|
|
@ -1,196 +0,0 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
marketstore cli.
|
||||
|
||||
"""
|
||||
from functools import partial
|
||||
from pprint import pformat
|
||||
|
||||
from anyio_marketstore import open_marketstore_client
|
||||
import trio
|
||||
import tractor
|
||||
import click
|
||||
import numpy as np
|
||||
|
||||
from .marketstore import (
|
||||
get_client,
|
||||
# stream_quotes,
|
||||
ingest_quote_stream,
|
||||
# _url,
|
||||
_tick_tbk_ids,
|
||||
mk_tbk,
|
||||
)
|
||||
from ..cli import cli
|
||||
from .. import watchlists as wl
|
||||
from ..log import get_logger
|
||||
from ._sharedmem import (
|
||||
maybe_open_shm_array,
|
||||
)
|
||||
from ._source import (
|
||||
base_iohlc_dtype,
|
||||
)
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
'--url',
|
||||
default='ws://localhost:5993/ws',
|
||||
help='HTTP URL of marketstore instance'
|
||||
)
|
||||
@click.argument('names', nargs=-1)
|
||||
@click.pass_obj
|
||||
def ms_stream(
|
||||
config: dict,
|
||||
names: list[str],
|
||||
url: str,
|
||||
) -> None:
|
||||
'''
|
||||
Connect to a marketstore time bucket stream for (a set of) symbols(s)
|
||||
and print to console.
|
||||
|
||||
'''
|
||||
async def main():
|
||||
# async for quote in stream_quotes(symbols=names):
|
||||
# log.info(f"Received quote:\n{quote}")
|
||||
...
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
# @cli.command()
|
||||
# @click.option(
|
||||
# '--url',
|
||||
# default=_url,
|
||||
# help='HTTP URL of marketstore instance'
|
||||
# )
|
||||
# @click.argument('names', nargs=-1)
|
||||
# @click.pass_obj
|
||||
# def ms_destroy(config: dict, names: list[str], url: str) -> None:
|
||||
# """Destroy symbol entries in the local marketstore instance.
|
||||
# """
|
||||
# async def main():
|
||||
# nonlocal names
|
||||
# async with get_client(url) as client:
|
||||
#
|
||||
# if not names:
|
||||
# names = await client.list_symbols()
|
||||
#
|
||||
# # default is to wipe db entirely.
|
||||
# answer = input(
|
||||
# "This will entirely wipe you local marketstore db @ "
|
||||
# f"{url} of the following symbols:\n {pformat(names)}"
|
||||
# "\n\nDelete [N/y]?\n")
|
||||
#
|
||||
# if answer == 'y':
|
||||
# for sym in names:
|
||||
# # tbk = _tick_tbk.format(sym)
|
||||
# tbk = tuple(sym, *_tick_tbk_ids)
|
||||
# print(f"Destroying {tbk}..")
|
||||
# await client.destroy(mk_tbk(tbk))
|
||||
# else:
|
||||
# print("Nothing deleted.")
|
||||
#
|
||||
# tractor.run(main)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
'--tl',
|
||||
is_flag=True,
|
||||
help='Enable tractor logging')
|
||||
@click.option(
|
||||
'--host',
|
||||
default='localhost'
|
||||
)
|
||||
@click.option(
|
||||
'--port',
|
||||
default=5993
|
||||
)
|
||||
@click.argument('symbols', nargs=-1)
|
||||
@click.pass_obj
|
||||
def storesh(
|
||||
config,
|
||||
tl,
|
||||
host,
|
||||
port,
|
||||
symbols: list[str],
|
||||
):
|
||||
'''
|
||||
Start an IPython shell ready to query the local marketstore db.
|
||||
|
||||
'''
|
||||
from piker.data.marketstore import tsdb_history_update
|
||||
from piker._daemon import open_piker_runtime
|
||||
|
||||
async def main():
|
||||
nonlocal symbols
|
||||
|
||||
async with open_piker_runtime(
|
||||
'storesh',
|
||||
enable_modules=['piker.data._ahab'],
|
||||
):
|
||||
symbol = symbols[0]
|
||||
await tsdb_history_update(symbol)
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option('--test-file', '-t', help='Test quote stream file')
|
||||
@click.option('--tl', is_flag=True, help='Enable tractor logging')
|
||||
@click.argument('name', nargs=1, required=True)
|
||||
@click.pass_obj
|
||||
def ingest(config, name, test_file, tl):
|
||||
'''
|
||||
Ingest real-time broker quotes and ticks to a marketstore instance.
|
||||
|
||||
'''
|
||||
# global opts
|
||||
loglevel = config['loglevel']
|
||||
tractorloglevel = config['tractorloglevel']
|
||||
# log = config['log']
|
||||
|
||||
watchlist_from_file = wl.ensure_watchlists(config['wl_path'])
|
||||
watchlists = wl.merge_watchlist(watchlist_from_file, wl._builtins)
|
||||
symbols = watchlists[name]
|
||||
|
||||
grouped_syms = {}
|
||||
for sym in symbols:
|
||||
symbol, _, provider = sym.rpartition('.')
|
||||
if provider not in grouped_syms:
|
||||
grouped_syms[provider] = []
|
||||
|
||||
grouped_syms[provider].append(symbol)
|
||||
|
||||
async def entry_point():
|
||||
async with tractor.open_nursery() as n:
|
||||
for provider, symbols in grouped_syms.items():
|
||||
await n.run_in_actor(
|
||||
ingest_quote_stream,
|
||||
name='ingest_marketstore',
|
||||
symbols=symbols,
|
||||
brokername=provider,
|
||||
tries=1,
|
||||
actorloglevel=loglevel,
|
||||
loglevel=tractorloglevel
|
||||
)
|
||||
|
||||
tractor.run(entry_point)
|
1713
piker/data/feed.py
1713
piker/data/feed.py
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,221 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Public abstractions for organizing, managing and generally operating-on
|
||||
real-time data processing data-structures.
|
||||
|
||||
"Streams, flumes, cascades and flows.."
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
import tractor
|
||||
import pendulum
|
||||
import numpy as np
|
||||
|
||||
from piker.types import Struct
|
||||
from ._sharedmem import (
|
||||
attach_shm_array,
|
||||
ShmArray,
|
||||
_Token,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..accounting import MktPair
|
||||
from .feed import Feed
|
||||
|
||||
|
||||
class Flume(Struct):
|
||||
'''
|
||||
Composite reference type which points to all the addressing
|
||||
handles and other meta-data necessary for the read, measure and
|
||||
management of a set of real-time updated data flows.
|
||||
|
||||
Can be thought of as a "flow descriptor" or "flow frame" which
|
||||
describes the high level properties of a set of data flows that
|
||||
can be used seamlessly across process-memory boundaries.
|
||||
|
||||
Each instance's sub-components normally includes:
|
||||
- a msg oriented quote stream provided via an IPC transport
|
||||
- history and real-time shm buffers which are both real-time
|
||||
updated and backfilled.
|
||||
- associated startup indexing information related to both buffer
|
||||
real-time-append and historical prepend addresses.
|
||||
- low level APIs to read and measure the updated data and manage
|
||||
queuing properties.
|
||||
|
||||
'''
|
||||
mkt: MktPair
|
||||
first_quote: dict
|
||||
_rt_shm_token: _Token
|
||||
|
||||
# optional since some data flows won't have a "downsampled" history
|
||||
# buffer/stream (eg. FSPs).
|
||||
_hist_shm_token: _Token | None = None
|
||||
|
||||
# private shm refs loaded dynamically from tokens
|
||||
_hist_shm: ShmArray | None = None
|
||||
_rt_shm: ShmArray | None = None
|
||||
_readonly: bool = True
|
||||
|
||||
stream: tractor.MsgStream | None = None
|
||||
izero_hist: int = 0
|
||||
izero_rt: int = 0
|
||||
throttle_rate: int | None = None
|
||||
|
||||
# TODO: do we need this really if we can pull the `Portal` from
|
||||
# ``tractor``'s internals?
|
||||
feed: Feed | None = None
|
||||
|
||||
@property
|
||||
def rt_shm(self) -> ShmArray:
|
||||
|
||||
if self._rt_shm is None:
|
||||
self._rt_shm = attach_shm_array(
|
||||
token=self._rt_shm_token,
|
||||
readonly=self._readonly,
|
||||
)
|
||||
|
||||
return self._rt_shm
|
||||
|
||||
@property
|
||||
def hist_shm(self) -> ShmArray:
|
||||
|
||||
if self._hist_shm_token is None:
|
||||
raise RuntimeError(
|
||||
'No shm token has been set for the history buffer?'
|
||||
)
|
||||
|
||||
if self._hist_shm is None:
|
||||
self._hist_shm = attach_shm_array(
|
||||
token=self._hist_shm_token,
|
||||
readonly=self._readonly,
|
||||
)
|
||||
|
||||
return self._hist_shm
|
||||
|
||||
async def receive(self) -> dict:
|
||||
return await self.stream.receive()
|
||||
|
||||
def get_ds_info(
|
||||
self,
|
||||
) -> tuple[float, float, float]:
|
||||
'''
|
||||
Compute the "downsampling" ratio info between the historical shm
|
||||
buffer and the real-time (HFT) one.
|
||||
|
||||
Return a tuple of the fast sample period, historical sample
|
||||
period and ratio between them.
|
||||
|
||||
'''
|
||||
times: np.ndarray = self.hist_shm.array['time']
|
||||
end: float | int = pendulum.from_timestamp(times[-1])
|
||||
start: float | int = pendulum.from_timestamp(times[times != times[-1]][-1])
|
||||
hist_step_size_s: float = (end - start).seconds
|
||||
|
||||
times = self.rt_shm.array['time']
|
||||
end = pendulum.from_timestamp(times[-1])
|
||||
start = pendulum.from_timestamp(times[times != times[-1]][-1])
|
||||
rt_step_size_s = (end - start).seconds
|
||||
|
||||
ratio = hist_step_size_s / rt_step_size_s
|
||||
return (
|
||||
rt_step_size_s,
|
||||
hist_step_size_s,
|
||||
ratio,
|
||||
)
|
||||
|
||||
# TODO: get native msgspec decoding for these workinn
|
||||
def to_msg(self) -> dict:
|
||||
|
||||
msg = self.to_dict()
|
||||
msg['mkt'] = self.mkt.to_dict()
|
||||
|
||||
# NOTE: pop all un-msg-serializable fields:
|
||||
# - `tractor.MsgStream`
|
||||
# - `Feed`
|
||||
# - `Shmarray`
|
||||
# it's expected the `.from_msg()` on the other side
|
||||
# will get instead some kind of msg-compat version
|
||||
# that it can load.
|
||||
msg.pop('stream')
|
||||
msg.pop('feed')
|
||||
msg.pop('_rt_shm')
|
||||
msg.pop('_hist_shm')
|
||||
|
||||
return msg
|
||||
|
||||
@classmethod
|
||||
def from_msg(
|
||||
cls,
|
||||
msg: dict,
|
||||
readonly: bool = True,
|
||||
|
||||
) -> dict:
|
||||
'''
|
||||
Load from an IPC msg presumably in either `dict` or
|
||||
`msgspec.Struct` form.
|
||||
|
||||
'''
|
||||
mkt_msg = msg.pop('mkt')
|
||||
from ..accounting import MktPair # cycle otherwise..
|
||||
mkt = MktPair.from_msg(mkt_msg)
|
||||
msg |= {'_readonly': readonly}
|
||||
return cls(
|
||||
mkt=mkt,
|
||||
**msg,
|
||||
)
|
||||
|
||||
def get_index(
|
||||
self,
|
||||
time_s: float,
|
||||
array: np.ndarray,
|
||||
|
||||
) -> int | float:
|
||||
'''
|
||||
Return array shm-buffer index for for epoch time.
|
||||
|
||||
'''
|
||||
times = array['time']
|
||||
first = np.searchsorted(
|
||||
times,
|
||||
time_s,
|
||||
side='left',
|
||||
)
|
||||
imx = times.shape[0] - 1
|
||||
return min(first, imx)
|
||||
|
||||
# only set by external msg or creator, never
|
||||
# manually!
|
||||
_has_vlm: bool = True
|
||||
|
||||
def has_vlm(self) -> bool:
|
||||
|
||||
if not self._has_vlm:
|
||||
return False
|
||||
|
||||
# make sure that the instrument supports volume history
|
||||
# (sometimes this is not the case for some commodities and
|
||||
# derivatives)
|
||||
vlm: np.ndarray = self.rt_shm.array['volume']
|
||||
return not bool(
|
||||
np.all(np.isin(vlm, -1))
|
||||
or np.all(np.isnan(vlm))
|
||||
)
|
|
@ -23,7 +23,7 @@ Api layer likely in here...
|
|||
from types import ModuleType
|
||||
from importlib import import_module
|
||||
|
||||
from ..log import get_logger
|
||||
from ._util import get_logger
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
|
|
@ -0,0 +1,173 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Tick event stream processing, filter-by-types, format-normalization.
|
||||
|
||||
'''
|
||||
from itertools import chain
|
||||
from typing import (
|
||||
Any,
|
||||
AsyncIterator,
|
||||
)
|
||||
|
||||
# tick-type-classes template for all possible "lowest level" events
|
||||
# that can can be emitted by the "top of book" L1 queues and
|
||||
# price-matching (with eventual clearing) in a double auction
|
||||
# market (queuing) system.
|
||||
_tick_groups: dict[str, set[str]] = {
|
||||
'clears': {'trade', 'dark_trade', 'last'},
|
||||
'bids': {'bid', 'bsize'},
|
||||
'asks': {'ask', 'asize'},
|
||||
}
|
||||
|
||||
# XXX alo define the flattened set of all such "fundamental ticks"
|
||||
# so that it can be used as filter, eg. in the graphics display
|
||||
# loop to compute running windowed y-ranges B)
|
||||
_auction_ticks: set[str] = set.union(*_tick_groups.values())
|
||||
|
||||
|
||||
def frame_ticks(
|
||||
quote: dict[str, Any],
|
||||
|
||||
ticks_by_type: dict | None = None,
|
||||
ticks_in_order: list[dict[str, Any]] | None = None
|
||||
|
||||
) -> dict[
|
||||
str,
|
||||
list[dict[str, Any]]
|
||||
]:
|
||||
'''
|
||||
XXX: build a tick-by-type table of lists
|
||||
of tick messages. This allows for less
|
||||
iteration on the receiver side by allowing for
|
||||
a single "latest tick event" look up by
|
||||
indexing the last entry in each sub-list.
|
||||
|
||||
tbt = {
|
||||
'types': ['bid', 'asize', 'last', .. '<type_n>'],
|
||||
|
||||
'bid': [tick0, tick1, tick2, .., tickn],
|
||||
'asize': [tick0, tick1, tick2, .., tickn],
|
||||
'last': [tick0, tick1, tick2, .., tickn],
|
||||
...
|
||||
'<type_n>': [tick0, tick1, tick2, .., tickn],
|
||||
}
|
||||
|
||||
If `ticks_in_order` is provided, append any retrieved ticks
|
||||
since last iteration into this array/buffer/list.
|
||||
|
||||
'''
|
||||
# TODO: once we decide to get fancy really we should
|
||||
# have a shared mem tick buffer that is just
|
||||
# continually filled and the UI just ready from it
|
||||
# at it's display rate.
|
||||
|
||||
tbt = ticks_by_type if ticks_by_type is not None else {}
|
||||
if not (ticks := quote.get('ticks')):
|
||||
return tbt
|
||||
|
||||
# append in reverse FIFO order for in-order iteration on
|
||||
# receiver side.
|
||||
tick: dict[str, Any]
|
||||
for tick in ticks:
|
||||
tbt.setdefault(
|
||||
tick['type'],
|
||||
[],
|
||||
).append(tick)
|
||||
|
||||
# TODO: do we need this any more or can we just
|
||||
# expect the receiver to unwind the below
|
||||
# `ticks_by_type: dict`?
|
||||
# => undwinding would potentially require a
|
||||
# `dict[str, set | list]` instead with an
|
||||
# included `'types' field which is an (ordered)
|
||||
# set of tick type fields in the order which
|
||||
# types arrived?
|
||||
if ticks_in_order:
|
||||
ticks_in_order.extend(ticks)
|
||||
|
||||
return tbt
|
||||
|
||||
|
||||
def iterticks(
|
||||
quote: dict,
|
||||
types: tuple[str] = (
|
||||
'trade',
|
||||
'dark_trade',
|
||||
),
|
||||
deduplicate_darks: bool = False,
|
||||
reverse: bool = False,
|
||||
|
||||
# TODO: should we offer delegating to `frame_ticks()` above
|
||||
# with this?
|
||||
frame_by_type: bool = False,
|
||||
|
||||
) -> AsyncIterator:
|
||||
'''
|
||||
Iterate through ticks delivered per quote cycle, filter and
|
||||
yield any declared in `types`.
|
||||
|
||||
'''
|
||||
if deduplicate_darks:
|
||||
assert 'dark_trade' in types
|
||||
|
||||
# print(f"{quote}\n\n")
|
||||
ticks = quote.get('ticks', ())
|
||||
trades = {}
|
||||
darks = {}
|
||||
|
||||
if ticks:
|
||||
|
||||
# do a first pass and attempt to remove duplicate dark
|
||||
# trades with the same tick signature.
|
||||
if deduplicate_darks:
|
||||
for tick in ticks:
|
||||
ttype = tick.get('type')
|
||||
|
||||
time = tick.get('time', None)
|
||||
if time:
|
||||
sig = (
|
||||
time,
|
||||
tick['price'],
|
||||
tick.get('size')
|
||||
)
|
||||
|
||||
if ttype == 'dark_trade':
|
||||
darks[sig] = tick
|
||||
|
||||
elif ttype == 'trade':
|
||||
trades[sig] = tick
|
||||
|
||||
# filter duplicates
|
||||
for sig, tick in trades.items():
|
||||
tick = darks.pop(sig, None)
|
||||
if tick:
|
||||
ticks.remove(tick)
|
||||
# print(f'DUPLICATE {tick}')
|
||||
|
||||
# re-insert ticks
|
||||
ticks.extend(list(chain(trades.values(), darks.values())))
|
||||
|
||||
# most-recent-first
|
||||
if reverse:
|
||||
ticks = reversed(ticks)
|
||||
|
||||
for tick in ticks:
|
||||
# print(f"{quote['symbol']}: {tick}")
|
||||
ttype = tick.get('type')
|
||||
if ttype in types:
|
||||
yield tick
|
|
@ -0,0 +1,265 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
Data feed synchronization protocols, init msgs, and general
|
||||
data-provider-backend-agnostic schema definitions.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from decimal import Decimal
|
||||
from pprint import pformat
|
||||
from types import ModuleType
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
)
|
||||
|
||||
from msgspec import field
|
||||
|
||||
from piker.types import Struct
|
||||
from piker.accounting import (
|
||||
Asset,
|
||||
MktPair,
|
||||
)
|
||||
from ._util import log
|
||||
|
||||
|
||||
class FeedInitializationError(ValueError):
|
||||
'''
|
||||
Live data feed setup failed due to API / msg incompatiblity!
|
||||
|
||||
'''
|
||||
|
||||
|
||||
class FeedInit(Struct, frozen=True):
|
||||
'''
|
||||
A stringent data provider startup msg schema validator.
|
||||
|
||||
The fields defined here are matched with those absolutely required
|
||||
from each backend broker/data provider.
|
||||
|
||||
'''
|
||||
mkt_info: MktPair
|
||||
|
||||
# NOTE: only field we use rn in ``.data.feed``
|
||||
# TODO: maybe make a SamplerConfig(Struct)?
|
||||
shm_write_opts: dict[str, Any] = field(
|
||||
default_factory=lambda: {
|
||||
'has_vlm': True,
|
||||
'sum_tick_vlm': True,
|
||||
})
|
||||
|
||||
# XXX: we group backend endpoints into 3
|
||||
# groups to determine "degrees" of functionality.
|
||||
_eps: dict[str, list[str]] = {
|
||||
|
||||
# basic API `Client` layer
|
||||
'middleware': [
|
||||
'get_client',
|
||||
],
|
||||
|
||||
# (live) data streaming / loading / search
|
||||
'datad': [
|
||||
'get_mkt_info',
|
||||
'open_history_client',
|
||||
'open_symbol_search',
|
||||
'stream_quotes',
|
||||
],
|
||||
|
||||
# live order control and trading
|
||||
'brokerd': [
|
||||
'trades_dialogue',
|
||||
'open_trade_dialog', # live order ctl
|
||||
'norm_trade', # ledger normalizer for txns
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def validate_backend(
|
||||
mod: ModuleType,
|
||||
syms: list[str],
|
||||
init_msgs: list[FeedInit] | dict[str, dict[str, Any]],
|
||||
|
||||
# TODO: do a module method scan and report mismatches.
|
||||
check_eps: bool = False,
|
||||
|
||||
api_log_msg_level: str = 'critical'
|
||||
|
||||
) -> FeedInit:
|
||||
'''
|
||||
Fail on malformed live quotes feed config/init or warn on changes
|
||||
that haven't been implemented by this backend yet.
|
||||
|
||||
'''
|
||||
for daemon_name, eps in _eps.items():
|
||||
for name in eps:
|
||||
ep: Callable = getattr(
|
||||
mod,
|
||||
name,
|
||||
None,
|
||||
)
|
||||
if ep is None:
|
||||
log.warning(
|
||||
f'Provider backend {mod.name} is missing '
|
||||
f'{daemon_name} support :(\n'
|
||||
f'The following endpoint is missing: {name}'
|
||||
)
|
||||
|
||||
inits: list[
|
||||
FeedInit | dict[str, Any]
|
||||
] = init_msgs
|
||||
|
||||
# convert to list if from old dict-style
|
||||
if isinstance(init_msgs, dict):
|
||||
inits = list(init_msgs.values())
|
||||
|
||||
init: FeedInit | dict[str, Any]
|
||||
for i, init in enumerate(inits):
|
||||
|
||||
# XXX: eventually this WILL NOT necessarily be true.
|
||||
if i > 0:
|
||||
assert not len(init_msgs) == 1
|
||||
if isinstance(init_msgs, dict):
|
||||
keys: set = set(init_msgs.keys()) - set(syms)
|
||||
raise FeedInitializationError(
|
||||
'TOO MANY INIT MSGS!\n'
|
||||
f'Unexpected keys: {keys}\n'
|
||||
'ALL MSGS:\n'
|
||||
f'{pformat(init_msgs)}\n'
|
||||
)
|
||||
else:
|
||||
raise FeedInitializationError(
|
||||
'TOO MANY INIT MSGS!\n'
|
||||
f'{pformat(init_msgs)}\n'
|
||||
)
|
||||
|
||||
# TODO: once all backends are updated we can remove this branching.
|
||||
rx_msg: bool = False
|
||||
warn_msg: str = ''
|
||||
if not isinstance(init, FeedInit):
|
||||
warn_msg += (
|
||||
'\n'
|
||||
'--------------------------\n'
|
||||
':::DEPRECATED API STYLE:::\n'
|
||||
'--------------------------\n'
|
||||
f'`{mod.name}.stream_quotes()` should deliver '
|
||||
'`.started(FeedInit)`\n'
|
||||
f'|-> CURRENTLY it is using DEPRECATED `.started(dict)` style!\n'
|
||||
f'|-> SEE `FeedInit` in `piker.data.validate`\n'
|
||||
'--------------------------------------------\n'
|
||||
)
|
||||
else:
|
||||
rx_msg = True
|
||||
|
||||
# verify feed init state / schema
|
||||
bs_fqme: str # backend specific fqme
|
||||
mkt: MktPair
|
||||
|
||||
match init:
|
||||
|
||||
# backend is using old dict msg delivery
|
||||
case {
|
||||
'symbol_info': dict(symbol_info),
|
||||
'fqsn': bs_fqme,
|
||||
} | {
|
||||
'mkt_info': dict(symbol_info),
|
||||
'fqsn': bs_fqme,
|
||||
}:
|
||||
symbol_info: dict
|
||||
warn_msg += (
|
||||
'It may also be still using the legacy `Symbol` style API\n'
|
||||
'IT SHOULD BE PORTED TO THE NEW '
|
||||
'`.accounting._mktinfo.MktPair`\n'
|
||||
'STATTTTT!!!\n'
|
||||
)
|
||||
|
||||
# XXX use default legacy (aka discrete precision) mkt
|
||||
# price/size_ticks if none delivered.
|
||||
price_tick = symbol_info.get(
|
||||
'price_tick_size',
|
||||
Decimal('0.01'),
|
||||
)
|
||||
size_tick = symbol_info.get(
|
||||
'lot_tick_size',
|
||||
Decimal('1'),
|
||||
)
|
||||
bs_mktid = init.get('bs_mktid') or bs_fqme
|
||||
mkt = MktPair.from_fqme(
|
||||
fqme=f'{bs_fqme}.{mod.name}',
|
||||
|
||||
price_tick=price_tick,
|
||||
size_tick=size_tick,
|
||||
|
||||
bs_mktid=str(bs_mktid),
|
||||
_atype=symbol_info['asset_type']
|
||||
)
|
||||
|
||||
# backend is using new `MktPair` but not entirely
|
||||
case {
|
||||
'mkt_info': MktPair(
|
||||
dst=Asset(),
|
||||
) as mkt,
|
||||
'fqsn': bs_fqme,
|
||||
}:
|
||||
warn_msg += (
|
||||
f'{mod.name} in API compat transition?\n'
|
||||
"It's half dict, half man..\n"
|
||||
'-------------------------------------\n'
|
||||
)
|
||||
|
||||
case FeedInit(
|
||||
mkt_info=MktPair(dst=Asset()) as mkt,
|
||||
shm_write_opts=dict(shm_opts),
|
||||
) as init:
|
||||
name: str = mod.name
|
||||
log.info(
|
||||
f"{name}'s `MktPair` info:\n"
|
||||
f'{pformat(mkt.to_dict())}\n'
|
||||
f'shm conf: {pformat(shm_opts)}\n'
|
||||
)
|
||||
|
||||
case _:
|
||||
raise FeedInitializationError(init)
|
||||
|
||||
# build a msg if we received a dict for input.
|
||||
if not rx_msg:
|
||||
assert bs_fqme in mkt.fqme
|
||||
init = FeedInit(
|
||||
mkt_info=mkt,
|
||||
shm_write_opts=init.get('shm_write_opts'),
|
||||
)
|
||||
|
||||
# `MktPair` value audits
|
||||
mkt = init.mkt_info
|
||||
assert mkt.type_key
|
||||
|
||||
# backend is using new `MktPair` but not embedded `Asset` types
|
||||
# for the .src/.dst..
|
||||
if not isinstance(mkt.src, Asset):
|
||||
warn_msg += (
|
||||
f'ALSO, {mod.name.upper()} should try to deliver\n'
|
||||
'the new `MktPair.src: Asset` field!\n'
|
||||
'-----------------------------------------------\n'
|
||||
)
|
||||
|
||||
# complain about any non-idealities
|
||||
if warn_msg:
|
||||
# TODO: would be nice to register an API_COMPAT or something in
|
||||
# maybe cyan for this in general throughput piker no?
|
||||
logmeth = getattr(log, api_log_msg_level)
|
||||
logmeth(warn_msg)
|
||||
|
||||
return init.copy()
|
|
@ -22,17 +22,40 @@ from typing import AsyncIterator
|
|||
|
||||
import numpy as np
|
||||
|
||||
from ._engine import cascade
|
||||
from ._api import (
|
||||
maybe_mk_fsp_shm,
|
||||
Fsp,
|
||||
)
|
||||
from ._engine import (
|
||||
cascade,
|
||||
Cascade,
|
||||
)
|
||||
from ._volume import (
|
||||
dolla_vlm,
|
||||
flow_rates,
|
||||
tina_vwap,
|
||||
)
|
||||
|
||||
__all__ = ['cascade']
|
||||
__all__: list[str] = [
|
||||
'cascade',
|
||||
'Cascade',
|
||||
'maybe_mk_fsp_shm',
|
||||
'Fsp',
|
||||
'dolla_vlm',
|
||||
'flow_rates',
|
||||
'tina_vwap',
|
||||
]
|
||||
|
||||
|
||||
async def latency(
|
||||
source: 'TickStream[Dict[str, float]]', # noqa
|
||||
ohlcv: np.ndarray
|
||||
|
||||
) -> AsyncIterator[np.ndarray]:
|
||||
"""Latency measurements, broker to piker.
|
||||
"""
|
||||
'''
|
||||
Latency measurements, broker to piker.
|
||||
|
||||
'''
|
||||
# TODO: do we want to offer yielding this async
|
||||
# before the rt data connection comes up?
|
||||
|
||||
|
|
|
@ -78,7 +78,8 @@ class Fsp:
|
|||
# + the consuming fsp *to* the consumers output
|
||||
# shm flow.
|
||||
_flow_registry: dict[
|
||||
tuple[_Token, str], _Token,
|
||||
tuple[_Token, str],
|
||||
tuple[_Token, Optional[ShmArray]],
|
||||
] = {}
|
||||
|
||||
def __init__(
|
||||
|
@ -120,7 +121,6 @@ class Fsp:
|
|||
):
|
||||
return self.func(*args, **kwargs)
|
||||
|
||||
# TODO: lru_cache this? prettty sure it'll work?
|
||||
def get_shm(
|
||||
self,
|
||||
src_shm: ShmArray,
|
||||
|
@ -131,12 +131,27 @@ class Fsp:
|
|||
for this "instance" of a signal processor for
|
||||
the given ``key``.
|
||||
|
||||
The destination shm "token" and array are cached if possible to
|
||||
minimize multiple stdlib/system calls.
|
||||
|
||||
'''
|
||||
dst_token = self._flow_registry[
|
||||
dst_token, maybe_array = self._flow_registry[
|
||||
(src_shm._token, self.name)
|
||||
]
|
||||
shm = attach_shm_array(dst_token)
|
||||
return shm
|
||||
if maybe_array is None:
|
||||
self._flow_registry[
|
||||
(src_shm._token, self.name)
|
||||
] = (
|
||||
dst_token,
|
||||
# "cache" the ``ShmArray`` such that
|
||||
# we call the underlying "attach" code as few
|
||||
# times as possible as per:
|
||||
# - https://github.com/pikers/piker/issues/359
|
||||
# - https://github.com/pikers/piker/issues/332
|
||||
maybe_array := attach_shm_array(dst_token)
|
||||
)
|
||||
|
||||
return maybe_array
|
||||
|
||||
|
||||
def fsp(
|
||||
|
@ -159,18 +174,10 @@ def fsp(
|
|||
return Fsp(wrapped, outputs=(wrapped.__name__,))
|
||||
|
||||
|
||||
def mk_fsp_shm_key(
|
||||
sym: str,
|
||||
target: Fsp
|
||||
|
||||
) -> str:
|
||||
uid = tractor.current_actor().uid
|
||||
return f'{sym}.fsp.{target.name}.{".".join(uid)}'
|
||||
|
||||
|
||||
def maybe_mk_fsp_shm(
|
||||
sym: str,
|
||||
target: Fsp,
|
||||
size: int,
|
||||
readonly: bool = True,
|
||||
|
||||
) -> (str, ShmArray, bool):
|
||||
|
@ -179,20 +186,27 @@ def maybe_mk_fsp_shm(
|
|||
exists, otherwise load the shm already existing for that token.
|
||||
|
||||
'''
|
||||
assert isinstance(sym, str), '`sym` should be file-name-friendly `str`'
|
||||
if not isinstance(sym, str):
|
||||
raise ValueError('`sym: str` should be file-name-friendly')
|
||||
|
||||
# TODO: load output types from `Fsp`
|
||||
# - should `index` be a required internal field?
|
||||
fsp_dtype = np.dtype(
|
||||
[('index', int)] +
|
||||
[('index', int)]
|
||||
+
|
||||
[('time', float)]
|
||||
+
|
||||
[(field_name, float) for field_name in target.outputs]
|
||||
)
|
||||
|
||||
key = mk_fsp_shm_key(sym, target)
|
||||
# (attempt to) uniquely key the fsp shm buffers
|
||||
actor_name, uuid = tractor.current_actor().uid
|
||||
uuid_snip: str = uuid[:16]
|
||||
key: str = f'piker.{actor_name}[{uuid_snip}].{sym}.{target.name}'
|
||||
|
||||
shm, opened = maybe_open_shm_array(
|
||||
key,
|
||||
# TODO: create entry for each time frame
|
||||
size=size,
|
||||
dtype=fsp_dtype,
|
||||
readonly=True,
|
||||
)
|
||||
|
|
|
@ -18,41 +18,43 @@
|
|||
core task logic for processing chains
|
||||
|
||||
'''
|
||||
from dataclasses import dataclass
|
||||
from __future__ import annotations
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from functools import partial
|
||||
from typing import (
|
||||
AsyncIterator, Callable, Optional,
|
||||
Union,
|
||||
AsyncIterator,
|
||||
Callable,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
import pyqtgraph as pg
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
import tractor
|
||||
from tractor.msg import NamespacePath
|
||||
|
||||
from piker.types import Struct
|
||||
from ..log import get_logger, get_console_log
|
||||
from .. import data
|
||||
from ..data import attach_shm_array
|
||||
from ..data.feed import Feed
|
||||
from ..data.feed import (
|
||||
Flume,
|
||||
Feed,
|
||||
)
|
||||
from ..data._sharedmem import ShmArray
|
||||
from ..data._source import Symbol
|
||||
from ..data._sampling import (
|
||||
_default_delay_s,
|
||||
open_sample_stream,
|
||||
)
|
||||
from ..accounting import MktPair
|
||||
from ._api import (
|
||||
Fsp,
|
||||
_load_builtins,
|
||||
_Token,
|
||||
)
|
||||
from ..toolz import Profiler
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskTracker:
|
||||
complete: trio.Event
|
||||
cs: trio.CancelScope
|
||||
|
||||
|
||||
async def filter_quotes_by_sym(
|
||||
|
||||
sym: str,
|
||||
|
@ -73,50 +75,190 @@ async def filter_quotes_by_sym(
|
|||
if quote:
|
||||
yield quote
|
||||
|
||||
# TODO: unifying the abstractions in this FSP subsys/layer:
|
||||
# -[ ] move the `.data.flows.Flume` type into this
|
||||
# module/subsys/pkg?
|
||||
# -[ ] ideas for further abstractions as per
|
||||
# - https://github.com/pikers/piker/issues/216,
|
||||
# - https://github.com/pikers/piker/issues/270:
|
||||
# - a (financial signal) ``Flow`` would be the a "collection" of such
|
||||
# minmial cascades. Some engineering based jargon concepts:
|
||||
# - https://en.wikipedia.org/wiki/Signal_chain
|
||||
# - https://en.wikipedia.org/wiki/Daisy_chain_(electrical_engineering)
|
||||
# - https://en.wikipedia.org/wiki/Audio_signal_flow
|
||||
# - https://en.wikipedia.org/wiki/Digital_signal_processing#Implementation
|
||||
# - https://en.wikipedia.org/wiki/Dataflow_programming
|
||||
# - https://en.wikipedia.org/wiki/Signal_programming
|
||||
# - https://en.wikipedia.org/wiki/Incremental_computing
|
||||
# - https://en.wikipedia.org/wiki/Signal-flow_graph
|
||||
# - https://en.wikipedia.org/wiki/Signal-flow_graph#Basic_components
|
||||
|
||||
async def fsp_compute(
|
||||
# -[ ] we probably want to eval THE BELOW design and unify with the
|
||||
# proto `TaskManager` in the `tractor` dev branch as well as with
|
||||
# our below idea for `Cascade`:
|
||||
# - https://github.com/goodboy/tractor/pull/363
|
||||
class Cascade(Struct):
|
||||
'''
|
||||
As per sig-proc engineering parlance, this is a chaining of
|
||||
`Flume`s, which are themselves collections of "Streams"
|
||||
implemented currently via `ShmArray`s.
|
||||
|
||||
symbol: Symbol,
|
||||
feed: Feed,
|
||||
A `Cascade` is be the minimal "connection" of 2 `Flumes`
|
||||
as per circuit parlance:
|
||||
https://en.wikipedia.org/wiki/Two-port_network#Cascade_connection
|
||||
|
||||
TODO:
|
||||
-[ ] could cover the combination of our `FspAdmin` and the
|
||||
backend `.fsp._engine` related machinery to "connect" one flume
|
||||
to another?
|
||||
|
||||
'''
|
||||
# TODO: make these `Flume`s
|
||||
src: Flume
|
||||
dst: Flume
|
||||
tn: trio.Nursery
|
||||
fsp: Fsp # UI-side middleware ctl API
|
||||
|
||||
# filled during cascade/.bind_func() (fsp_compute) init phases
|
||||
bind_func: Callable | None = None
|
||||
complete: trio.Event | None = None
|
||||
cs: trio.CancelScope | None = None
|
||||
client_stream: tractor.MsgStream | None = None
|
||||
|
||||
async def resync(self) -> int:
|
||||
# TODO: adopt an incremental update engine/approach
|
||||
# where possible here eventually!
|
||||
log.info(f're-syncing fsp {self.fsp.name} to source')
|
||||
self.cs.cancel()
|
||||
await self.complete.wait()
|
||||
index: int = await self.tn.start(self.bind_func)
|
||||
|
||||
# always trigger UI refresh after history update,
|
||||
# see ``piker.ui._fsp.FspAdmin.open_chain()`` and
|
||||
# ``piker.ui._display.trigger_update()``.
|
||||
dst_shm: ShmArray = self.dst.rt_shm
|
||||
await self.client_stream.send({
|
||||
'fsp_update': {
|
||||
'key': dst_shm.token,
|
||||
'first': dst_shm._first.value,
|
||||
'last': dst_shm._last.value,
|
||||
}
|
||||
})
|
||||
return index
|
||||
|
||||
def is_synced(self) -> tuple[bool, int, int]:
|
||||
'''
|
||||
Predicate to dertmine if a destination FSP
|
||||
output array is aligned to its source array.
|
||||
|
||||
'''
|
||||
src_shm: ShmArray = self.src.rt_shm
|
||||
dst_shm: ShmArray = self.dst.rt_shm
|
||||
step_diff = src_shm.index - dst_shm.index
|
||||
len_diff = abs(len(src_shm.array) - len(dst_shm.array))
|
||||
synced: bool = not (
|
||||
# the source is likely backfilling and we must
|
||||
# sync history calculations
|
||||
len_diff > 2
|
||||
|
||||
# we aren't step synced to the source and may be
|
||||
# leading/lagging by a step
|
||||
or step_diff > 1
|
||||
or step_diff < 0
|
||||
)
|
||||
if not synced:
|
||||
fsp: Fsp = self.fsp
|
||||
log.warning(
|
||||
'***DESYNCED FSP***\n'
|
||||
f'{fsp.ns_path}@{src_shm.token}\n'
|
||||
f'step_diff: {step_diff}\n'
|
||||
f'len_diff: {len_diff}\n'
|
||||
)
|
||||
return (
|
||||
synced,
|
||||
step_diff,
|
||||
len_diff,
|
||||
)
|
||||
|
||||
async def poll_and_sync_to_step(self) -> int:
|
||||
synced, step_diff, _ = self.is_synced()
|
||||
while not synced:
|
||||
await self.resync()
|
||||
synced, step_diff, _ = self.is_synced()
|
||||
|
||||
return step_diff
|
||||
|
||||
@acm
|
||||
async def open_edge(
|
||||
self,
|
||||
bind_func: Callable,
|
||||
) -> int:
|
||||
self.bind_func = bind_func
|
||||
index = await self.tn.start(bind_func)
|
||||
yield index
|
||||
# TODO: what do we want on teardown/error?
|
||||
# -[ ] dynamic reconnection after update?
|
||||
|
||||
|
||||
async def connect_streams(
|
||||
casc: Cascade,
|
||||
mkt: MktPair,
|
||||
quote_stream: trio.abc.ReceiveChannel,
|
||||
src: Flume,
|
||||
dst: Flume,
|
||||
|
||||
src: ShmArray,
|
||||
dst: ShmArray,
|
||||
|
||||
func: Callable,
|
||||
edge_func: Callable,
|
||||
|
||||
# attach_stream: bool = False,
|
||||
task_status: TaskStatus[None] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Stream and per-sample compute and write the cascade of
|
||||
2 `Flumes`/streams given some operating `func`.
|
||||
|
||||
profiler = pg.debug.Profiler(
|
||||
https://en.wikipedia.org/wiki/Signal-flow_graph#Basic_components
|
||||
|
||||
Not literally, but something like:
|
||||
|
||||
edge_func(Flume_in) -> Flume_out
|
||||
|
||||
'''
|
||||
profiler = Profiler(
|
||||
delayed=False,
|
||||
disabled=True
|
||||
)
|
||||
|
||||
fqsn = symbol.front_fqsn()
|
||||
out_stream = func(
|
||||
# TODO: just pull it from src.mkt.fqme no?
|
||||
# fqme: str = mkt.fqme
|
||||
fqme: str = src.mkt.fqme
|
||||
|
||||
# TODO: dynamic introspection of what the underlying (vertex)
|
||||
# function actually requires from input node (flumes) then
|
||||
# deliver those inputs as part of a graph "compilation" step?
|
||||
out_stream = edge_func(
|
||||
|
||||
# TODO: do we even need this if we do the feed api right?
|
||||
# shouldn't a local stream do this before we get a handle
|
||||
# to the async iterable? it's that or we do some kinda
|
||||
# async itertools style?
|
||||
filter_quotes_by_sym(fqsn, quote_stream),
|
||||
filter_quotes_by_sym(fqme, quote_stream),
|
||||
|
||||
# XXX: currently the ``ohlcv`` arg
|
||||
feed.shm,
|
||||
# XXX: currently the ``ohlcv`` arg, but we should allow
|
||||
# (dynamic) requests for src flume (node) streams?
|
||||
src.rt_shm,
|
||||
)
|
||||
|
||||
# Conduct a single iteration of fsp with historical bars input
|
||||
# and get historical output
|
||||
history_output: Union[
|
||||
dict[str, np.ndarray], # multi-output case
|
||||
np.ndarray, # single output case
|
||||
]
|
||||
history_output = await out_stream.__anext__()
|
||||
# HISTORY COMPUTE PHASE
|
||||
# conduct a single iteration of fsp with historical bars input
|
||||
# and get historical output.
|
||||
history_output: (
|
||||
dict[str, np.ndarray] # multi-output case
|
||||
| np.ndarray, # single output case
|
||||
)
|
||||
history_output = await anext(out_stream)
|
||||
|
||||
func_name = func.__name__
|
||||
func_name = edge_func.__name__
|
||||
profiler(f'{func_name} generated history')
|
||||
|
||||
# build struct array with an 'index' field to push as history
|
||||
|
@ -124,11 +266,17 @@ async def fsp_compute(
|
|||
# TODO: push using a[['f0', 'f1', .., 'fn']] = .. syntax no?
|
||||
# if the output array is multi-field then push
|
||||
# each respective field.
|
||||
fields = getattr(dst.array.dtype, 'fields', None).copy()
|
||||
dst_shm: ShmArray = dst.rt_shm
|
||||
fields = getattr(dst_shm.array.dtype, 'fields', None).copy()
|
||||
fields.pop('index')
|
||||
history: Optional[np.ndarray] = None # TODO: nptyping here!
|
||||
history_by_field: np.ndarray | None = None
|
||||
src_shm: ShmArray = src.rt_shm
|
||||
src_time = src_shm.array['time']
|
||||
|
||||
if fields and len(fields) > 1 and fields:
|
||||
if (
|
||||
fields and
|
||||
len(fields) > 1
|
||||
):
|
||||
if not isinstance(history_output, dict):
|
||||
raise ValueError(
|
||||
f'`{func_name}` is a multi-output FSP and should yield a '
|
||||
|
@ -139,25 +287,25 @@ async def fsp_compute(
|
|||
if key in history_output:
|
||||
output = history_output[key]
|
||||
|
||||
if history is None:
|
||||
if history_by_field is None:
|
||||
|
||||
if output is None:
|
||||
length = len(src.array)
|
||||
length = len(src_shm.array)
|
||||
else:
|
||||
length = len(output)
|
||||
|
||||
# using the first output, determine
|
||||
# the length of the struct-array that
|
||||
# will be pushed to shm.
|
||||
history = np.zeros(
|
||||
history_by_field = np.zeros(
|
||||
length,
|
||||
dtype=dst.array.dtype
|
||||
dtype=dst_shm.array.dtype
|
||||
)
|
||||
|
||||
if output is None:
|
||||
continue
|
||||
|
||||
history[key] = output
|
||||
history_by_field[key] = output
|
||||
|
||||
# single-key output stream
|
||||
else:
|
||||
|
@ -166,11 +314,15 @@ async def fsp_compute(
|
|||
f'`{func_name}` is a single output FSP and should yield an '
|
||||
'`np.ndarray` for history'
|
||||
)
|
||||
history = np.zeros(
|
||||
history_by_field = np.zeros(
|
||||
len(history_output),
|
||||
dtype=dst.array.dtype
|
||||
dtype=dst_shm.array.dtype
|
||||
)
|
||||
history[func_name] = history_output
|
||||
history_by_field[func_name] = history_output
|
||||
|
||||
history_by_field['time'] = src_time[-len(history_by_field):]
|
||||
|
||||
history_output['time'] = src_shm.array['time']
|
||||
|
||||
# TODO: XXX:
|
||||
# THERE'S A BIG BUG HERE WITH THE `index` field since we're
|
||||
|
@ -183,11 +335,14 @@ async def fsp_compute(
|
|||
# is `index` aware such that historical data can be indexed
|
||||
# relative to the true first datum? Not sure if this is sane
|
||||
# for incremental compuations.
|
||||
first = dst._first.value = src._first.value
|
||||
first = dst_shm._first.value = src_shm._first.value
|
||||
|
||||
# TODO: can we use this `start` flag instead of the manual
|
||||
# setting above?
|
||||
index = dst.push(history, start=first)
|
||||
index = dst_shm.push(
|
||||
history_by_field,
|
||||
start=first,
|
||||
)
|
||||
|
||||
profiler(f'{func_name} pushed history')
|
||||
profiler.finish()
|
||||
|
@ -195,12 +350,9 @@ async def fsp_compute(
|
|||
# setup a respawn handle
|
||||
with trio.CancelScope() as cs:
|
||||
|
||||
# TODO: might be better to just make a "restart" method where
|
||||
# the target task is spawned implicitly and then the event is
|
||||
# set via some higher level api? At that poing we might as well
|
||||
# be writing a one-cancels-one nursery though right?
|
||||
tracker = TaskTracker(trio.Event(), cs)
|
||||
task_status.started((tracker, index))
|
||||
casc.cs = cs
|
||||
casc.complete = trio.Event()
|
||||
task_status.started(index)
|
||||
|
||||
profiler(f'{func_name} yield last index')
|
||||
|
||||
|
@ -213,8 +365,14 @@ async def fsp_compute(
|
|||
|
||||
log.debug(f"{func_name}: {processed}")
|
||||
key, output = processed
|
||||
index = src.index
|
||||
dst.array[-1][key] = output
|
||||
# dst.array[-1][key] = output
|
||||
dst_shm.array[[key, 'time']][-1] = (
|
||||
output,
|
||||
# TODO: what about pushing ``time.time_ns()``
|
||||
# in which case we'll need to round at the graphics
|
||||
# processing / sampling layer?
|
||||
src_shm.array[-1]['time']
|
||||
)
|
||||
|
||||
# NOTE: for now we aren't streaming this to the consumer
|
||||
# stream latest array index entry which basically just acts
|
||||
|
@ -225,6 +383,7 @@ async def fsp_compute(
|
|||
# N-consumers who subscribe for the real-time output,
|
||||
# which we'll likely want to implement using local-mem
|
||||
# chans for the fan out?
|
||||
# index = src_shm.index
|
||||
# if attach_stream:
|
||||
# await client_stream.send(index)
|
||||
|
||||
|
@ -234,7 +393,7 @@ async def fsp_compute(
|
|||
# log.info(f'FSP quote too fast: {hz}')
|
||||
# last = time.time()
|
||||
finally:
|
||||
tracker.complete.set()
|
||||
casc.complete.set()
|
||||
|
||||
|
||||
@tractor.context
|
||||
|
@ -243,17 +402,17 @@ async def cascade(
|
|||
ctx: tractor.Context,
|
||||
|
||||
# data feed key
|
||||
fqsn: str,
|
||||
|
||||
src_shm_token: dict,
|
||||
dst_shm_token: tuple[str, np.dtype],
|
||||
fqme: str,
|
||||
|
||||
# flume pair cascaded using an "edge function"
|
||||
src_flume_addr: dict,
|
||||
dst_flume_addr: dict,
|
||||
ns_path: NamespacePath,
|
||||
|
||||
shm_registry: dict[str, _Token],
|
||||
|
||||
zero_on_step: bool = False,
|
||||
loglevel: Optional[str] = None,
|
||||
loglevel: str | None = None,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
|
@ -261,7 +420,7 @@ async def cascade(
|
|||
destination shm array buffer.
|
||||
|
||||
'''
|
||||
profiler = pg.debug.Profiler(
|
||||
profiler = Profiler(
|
||||
delayed=False,
|
||||
disabled=False
|
||||
)
|
||||
|
@ -269,8 +428,14 @@ async def cascade(
|
|||
if loglevel:
|
||||
get_console_log(loglevel)
|
||||
|
||||
src = attach_shm_array(token=src_shm_token)
|
||||
dst = attach_shm_array(readonly=False, token=dst_shm_token)
|
||||
src: Flume = Flume.from_msg(src_flume_addr)
|
||||
dst: Flume = Flume.from_msg(
|
||||
dst_flume_addr,
|
||||
readonly=False,
|
||||
)
|
||||
|
||||
# src: ShmArray = attach_shm_array(token=src_shm_token)
|
||||
# dst: ShmArray = attach_shm_array(readonly=False, token=dst_shm_token)
|
||||
|
||||
reg = _load_builtins()
|
||||
lines = '\n'.join([f'{key.rpartition(":")[2]} => {key}' for key in reg])
|
||||
|
@ -278,28 +443,33 @@ async def cascade(
|
|||
f'Registered FSP set:\n{lines}'
|
||||
)
|
||||
|
||||
# update actorlocal flows table which registers
|
||||
# readonly "instances" of this fsp for symbol/source
|
||||
# so that consumer fsps can look it up by source + fsp.
|
||||
# TODO: ugh i hate this wind/unwind to list over the wire
|
||||
# but not sure how else to do it.
|
||||
# NOTE XXX: update actorlocal flows table which registers
|
||||
# readonly "instances" of this fsp for symbol/source so that
|
||||
# consumer fsps can look it up by source + fsp.
|
||||
# TODO: ugh i hate this wind/unwind to list over the wire but
|
||||
# not sure how else to do it.
|
||||
for (token, fsp_name, dst_token) in shm_registry:
|
||||
Fsp._flow_registry[
|
||||
(_Token.from_msg(token), fsp_name)
|
||||
] = _Token.from_msg(dst_token)
|
||||
Fsp._flow_registry[(
|
||||
_Token.from_msg(token),
|
||||
fsp_name,
|
||||
)] = _Token.from_msg(dst_token), None
|
||||
|
||||
fsp: Fsp = reg.get(
|
||||
NamespacePath(ns_path)
|
||||
)
|
||||
func = fsp.func
|
||||
func: Callable = fsp.func
|
||||
|
||||
if not func:
|
||||
# TODO: assume it's a func target path
|
||||
raise ValueError(f'Unknown fsp target: {ns_path}')
|
||||
|
||||
_fqme: str = src.mkt.fqme
|
||||
assert _fqme == fqme
|
||||
|
||||
# open a data feed stream with requested broker
|
||||
feed: Feed
|
||||
async with data.feed.maybe_open_feed(
|
||||
[fqsn],
|
||||
[fqme],
|
||||
|
||||
# TODO throttle tick outputs from *this* daemon since
|
||||
# it'll emit tons of ticks due to the throttle only
|
||||
|
@ -307,43 +477,70 @@ async def cascade(
|
|||
# needs to get throttled the ticks we generate.
|
||||
# tick_throttle=60,
|
||||
|
||||
) as (feed, quote_stream):
|
||||
symbol = feed.symbols[fqsn]
|
||||
) as feed:
|
||||
|
||||
flume: Flume = feed.flumes[fqme]
|
||||
# XXX: can't do this since flume.feed will be set XD
|
||||
# assert flume == src
|
||||
assert flume.mkt == src.mkt
|
||||
mkt: MktPair = flume.mkt
|
||||
|
||||
# NOTE: FOR NOW, sanity checks around the feed as being
|
||||
# always the src flume (until we get to fancier/lengthier
|
||||
# chains/graphs.
|
||||
assert src.rt_shm.token == flume.rt_shm.token
|
||||
|
||||
# XXX: won't work bc the _hist_shm_token value will be
|
||||
# list[list] after IPC..
|
||||
# assert flume.to_msg() == src_flume_addr
|
||||
|
||||
profiler(f'{func}: feed up')
|
||||
|
||||
assert src.token == feed.shm.token
|
||||
# last_len = new_len = len(src.array)
|
||||
|
||||
func_name = func.__name__
|
||||
func_name: str = func.__name__
|
||||
async with (
|
||||
trio.open_nursery() as n,
|
||||
trio.open_nursery() as tn,
|
||||
):
|
||||
# TODO: might be better to just make a "restart" method where
|
||||
# the target task is spawned implicitly and then the event is
|
||||
# set via some higher level api? At that poing we might as well
|
||||
# be writing a one-cancels-one nursery though right?
|
||||
casc = Cascade(
|
||||
src,
|
||||
dst,
|
||||
tn,
|
||||
fsp,
|
||||
)
|
||||
|
||||
# TODO: this seems like it should be wrapped somewhere?
|
||||
fsp_target = partial(
|
||||
connect_streams,
|
||||
casc=casc,
|
||||
mkt=mkt,
|
||||
quote_stream=flume.stream,
|
||||
|
||||
fsp_compute,
|
||||
symbol=symbol,
|
||||
feed=feed,
|
||||
quote_stream=quote_stream,
|
||||
|
||||
# shm
|
||||
# flumes and shm passthrough
|
||||
src=src,
|
||||
dst=dst,
|
||||
|
||||
# target
|
||||
func=func
|
||||
# chain function which takes src flume input(s)
|
||||
# and renders dst flume output(s)
|
||||
edge_func=func
|
||||
)
|
||||
|
||||
tracker, index = await n.start(fsp_target)
|
||||
async with casc.open_edge(
|
||||
bind_func=fsp_target,
|
||||
) as index:
|
||||
# casc.bind_func = fsp_target
|
||||
# index = await tn.start(fsp_target)
|
||||
dst_shm: ShmArray = dst.rt_shm
|
||||
src_shm: ShmArray = src.rt_shm
|
||||
|
||||
if zero_on_step:
|
||||
last = dst.array[-1:]
|
||||
last = dst.rt_shm.array[-1:]
|
||||
zeroed = np.zeros(last.shape, dtype=last.dtype)
|
||||
|
||||
profiler(f'{func_name}: fsp up')
|
||||
|
||||
# sync client
|
||||
# sync to client-side actor
|
||||
await ctx.started(index)
|
||||
|
||||
# XXX: rt stream with client which we MUST
|
||||
|
@ -351,96 +548,39 @@ async def cascade(
|
|||
# incremental "updates" as history prepends take
|
||||
# place.
|
||||
async with ctx.open_stream() as client_stream:
|
||||
casc.client_stream: tractor.MsgStream = client_stream
|
||||
|
||||
# TODO: these likely should all become
|
||||
# methods of this ``TaskLifetime`` or wtv
|
||||
# abstraction..
|
||||
async def resync(
|
||||
tracker: TaskTracker,
|
||||
|
||||
) -> tuple[TaskTracker, int]:
|
||||
# TODO: adopt an incremental update engine/approach
|
||||
# where possible here eventually!
|
||||
log.debug(f're-syncing fsp {func_name} to source')
|
||||
tracker.cs.cancel()
|
||||
await tracker.complete.wait()
|
||||
tracker, index = await n.start(fsp_target)
|
||||
|
||||
# always trigger UI refresh after history update,
|
||||
# see ``piker.ui._fsp.FspAdmin.open_chain()`` and
|
||||
# ``piker.ui._display.trigger_update()``.
|
||||
await client_stream.send({
|
||||
'fsp_update': {
|
||||
'key': dst_shm_token,
|
||||
'first': dst._first.value,
|
||||
'last': dst._last.value,
|
||||
}})
|
||||
return tracker, index
|
||||
|
||||
def is_synced(
|
||||
src: ShmArray,
|
||||
dst: ShmArray
|
||||
) -> tuple[bool, int, int]:
|
||||
'''Predicate to dertmine if a destination FSP
|
||||
output array is aligned to its source array.
|
||||
|
||||
'''
|
||||
step_diff = src.index - dst.index
|
||||
len_diff = abs(len(src.array) - len(dst.array))
|
||||
return not (
|
||||
# the source is likely backfilling and we must
|
||||
# sync history calculations
|
||||
len_diff > 2 or
|
||||
|
||||
# we aren't step synced to the source and may be
|
||||
# leading/lagging by a step
|
||||
step_diff > 1 or
|
||||
step_diff < 0
|
||||
), step_diff, len_diff
|
||||
|
||||
async def poll_and_sync_to_step(
|
||||
|
||||
tracker: TaskTracker,
|
||||
src: ShmArray,
|
||||
dst: ShmArray,
|
||||
|
||||
) -> tuple[TaskTracker, int]:
|
||||
|
||||
synced, step_diff, _ = is_synced(src, dst)
|
||||
while not synced:
|
||||
tracker, index = await resync(tracker)
|
||||
synced, step_diff, _ = is_synced(src, dst)
|
||||
|
||||
return tracker, step_diff
|
||||
|
||||
s, step, ld = is_synced(src, dst)
|
||||
s, step, ld = casc.is_synced()
|
||||
|
||||
# detect sample period step for subscription to increment
|
||||
# signal
|
||||
times = src.array['time']
|
||||
delay_s = times[-1] - times[times != times[-1]][-1]
|
||||
times = src.rt_shm.array['time']
|
||||
if len(times) > 1:
|
||||
last_ts = times[-1]
|
||||
delay_s: float = float(last_ts - times[times != last_ts][-1])
|
||||
else:
|
||||
# our default "HFT" sample rate.
|
||||
delay_s: float = _default_delay_s
|
||||
|
||||
# Increment the underlying shared memory buffer on every
|
||||
# "increment" msg received from the underlying data feed.
|
||||
async with feed.index_stream(
|
||||
int(delay_s)
|
||||
# sub and increment the underlying shared memory buffer
|
||||
# on every step msg received from the global `samplerd`
|
||||
# service.
|
||||
async with open_sample_stream(
|
||||
float(delay_s)
|
||||
) as istream:
|
||||
|
||||
profiler(f'{func_name}: sample stream up')
|
||||
profiler.finish()
|
||||
|
||||
async for _ in istream:
|
||||
async for i in istream:
|
||||
# print(f'FSP incrementing {i}')
|
||||
|
||||
# respawn the compute task if the source
|
||||
# array has been updated such that we compute
|
||||
# new history from the (prepended) source.
|
||||
synced, step_diff, _ = is_synced(src, dst)
|
||||
synced, step_diff, _ = casc.is_synced()
|
||||
if not synced:
|
||||
tracker, step_diff = await poll_and_sync_to_step(
|
||||
tracker,
|
||||
src,
|
||||
dst,
|
||||
)
|
||||
step_diff: int = await casc.poll_and_sync_to_step()
|
||||
|
||||
# skip adding a last bar since we should already
|
||||
# be step alinged
|
||||
|
@ -448,7 +588,7 @@ async def cascade(
|
|||
continue
|
||||
|
||||
# read out last shm row, copy and write new row
|
||||
array = dst.array
|
||||
array = dst_shm.array
|
||||
|
||||
# some metrics like vlm should be reset
|
||||
# to zero every step.
|
||||
|
@ -457,4 +597,24 @@ async def cascade(
|
|||
else:
|
||||
last = array[-1:].copy()
|
||||
|
||||
dst.push(last)
|
||||
dst.rt_shm.push(last)
|
||||
|
||||
# sync with source buffer's time step
|
||||
src_l2 = src_shm.array[-2:]
|
||||
src_li, src_lt = src_l2[-1][['index', 'time']]
|
||||
src_2li, src_2lt = src_l2[-2][['index', 'time']]
|
||||
dst_shm._array['time'][src_li] = src_lt
|
||||
dst_shm._array['time'][src_2li] = src_2lt
|
||||
|
||||
# last2 = dst.array[-2:]
|
||||
# if (
|
||||
# last2[-1]['index'] != src_li
|
||||
# or last2[-2]['index'] != src_2li
|
||||
# ):
|
||||
# dstl2 = list(last2)
|
||||
# srcl2 = list(src_l2)
|
||||
# print(
|
||||
# # f'{dst.token}\n'
|
||||
# f'src: {srcl2}\n'
|
||||
# f'dst: {dstl2}\n'
|
||||
# )
|
||||
|
|
|
@ -24,7 +24,7 @@ import numpy as np
|
|||
from numba import jit, float64, optional, int64
|
||||
|
||||
from ._api import fsp
|
||||
from ..data._normalize import iterticks
|
||||
from ..data import iterticks
|
||||
from ..data._sharedmem import ShmArray
|
||||
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ import numpy as np
|
|||
from tractor.trionics._broadcast import AsyncReceiver
|
||||
|
||||
from ._api import fsp
|
||||
from ..data._normalize import iterticks
|
||||
from ..data import iterticks
|
||||
from ..data._sharedmem import ShmArray
|
||||
from ._momo import _wma
|
||||
from ..log import get_logger
|
||||
|
@ -234,7 +234,7 @@ async def flow_rates(
|
|||
# FSPs, user input, and possibly any general event stream in
|
||||
# real-time. Hint: ideally implemented with caching until mutated
|
||||
# ;)
|
||||
period: 'Param[int]' = 6, # noqa
|
||||
period: 'Param[int]' = 1, # noqa
|
||||
|
||||
# TODO: support other means by providing a map
|
||||
# to weights `partial()`-ed with `wma()`?
|
||||
|
@ -268,8 +268,7 @@ async def flow_rates(
|
|||
'dark_dvlm_rate': None,
|
||||
}
|
||||
|
||||
# TODO: 3.10 do ``anext()``
|
||||
quote = await source.__anext__()
|
||||
quote = await anext(source)
|
||||
|
||||
# ltr = 0
|
||||
# lvr = 0
|
||||
|
|
51
piker/log.py
51
piker/log.py
|
@ -21,7 +21,11 @@ import logging
|
|||
import json
|
||||
|
||||
import tractor
|
||||
from pygments import highlight, lexers, formatters
|
||||
from pygments import (
|
||||
highlight,
|
||||
lexers,
|
||||
formatters,
|
||||
)
|
||||
|
||||
# Makes it so we only see the full module name when using ``__name__``
|
||||
# without the extra "piker." prefix.
|
||||
|
@ -32,26 +36,51 @@ def get_logger(
|
|||
name: str = None,
|
||||
|
||||
) -> logging.Logger:
|
||||
'''Return the package log or a sub-log for `name` if provided.
|
||||
'''
|
||||
return tractor.log.get_logger(name=name, _root_name=_proj_name)
|
||||
Return the package log or a sub-log for `name` if provided.
|
||||
|
||||
'''
|
||||
return tractor.log.get_logger(
|
||||
name=name,
|
||||
_root_name=_proj_name,
|
||||
)
|
||||
|
||||
|
||||
def get_console_log(level: str = None, name: str = None) -> logging.Logger:
|
||||
'''Get the package logger and enable a handler which writes to stderr.
|
||||
def get_console_log(
|
||||
level: str | None = None,
|
||||
name: str | None = None,
|
||||
|
||||
) -> logging.Logger:
|
||||
'''
|
||||
Get the package logger and enable a handler which writes to stderr.
|
||||
|
||||
Yeah yeah, i know we can use ``DictConfig``. You do it...
|
||||
|
||||
'''
|
||||
return tractor.log.get_console_log(
|
||||
level, name=name, _root_name=_proj_name) # our root logger
|
||||
level,
|
||||
name=name,
|
||||
_root_name=_proj_name,
|
||||
) # our root logger
|
||||
|
||||
|
||||
def colorize_json(data, style='algol_nu'):
|
||||
"""Colorize json output using ``pygments``.
|
||||
"""
|
||||
formatted_json = json.dumps(data, sort_keys=True, indent=4)
|
||||
def colorize_json(
|
||||
data: dict,
|
||||
style='algol_nu',
|
||||
):
|
||||
'''
|
||||
Colorize json output using ``pygments``.
|
||||
|
||||
'''
|
||||
formatted_json = json.dumps(
|
||||
data,
|
||||
sort_keys=True,
|
||||
indent=4,
|
||||
)
|
||||
return highlight(
|
||||
formatted_json, lexers.JsonLexer(),
|
||||
formatted_json,
|
||||
lexers.JsonLexer(),
|
||||
|
||||
# likeable styles: algol_nu, tango, monokai
|
||||
formatters.TerminalTrueColorFormatter(style=style)
|
||||
)
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Actor runtime primtives and (distributed) service APIs for,
|
||||
|
||||
- daemon-service mgmt: `_daemon` (i.e. low-level spawn and supervise machinery
|
||||
for sub-actors like `brokerd`, `emsd`, datad`, etc.)
|
||||
|
||||
- service-actor supervision (via `trio` tasks) API: `._mngr`
|
||||
|
||||
- discovery interface (via light wrapping around `tractor`'s built-in
|
||||
prot): `._registry`
|
||||
|
||||
- `docker` cntr SC supervision for use with `trio`: `_ahab`
|
||||
- wrappers for marketstore and elasticsearch dbs
|
||||
=> TODO: maybe to (re)move elsewhere?
|
||||
|
||||
'''
|
||||
from ._mngr import Services as Services
|
||||
from ._registry import (
|
||||
_tractor_kwargs as _tractor_kwargs,
|
||||
_default_reg_addr as _default_reg_addr,
|
||||
_default_registry_host as _default_registry_host,
|
||||
_default_registry_port as _default_registry_port,
|
||||
|
||||
open_registry as open_registry,
|
||||
find_service as find_service,
|
||||
check_for_service as check_for_service,
|
||||
)
|
||||
from ._daemon import (
|
||||
maybe_spawn_daemon as maybe_spawn_daemon,
|
||||
spawn_emsd as spawn_emsd,
|
||||
maybe_open_emsd as maybe_open_emsd,
|
||||
)
|
||||
from ._actor_runtime import (
|
||||
open_piker_runtime as open_piker_runtime,
|
||||
maybe_open_pikerd as maybe_open_pikerd,
|
||||
open_pikerd as open_pikerd,
|
||||
get_runtime_vars as get_runtime_vars,
|
||||
)
|
||||
from ..brokers._daemon import (
|
||||
spawn_brokerd as spawn_brokerd,
|
||||
maybe_spawn_brokerd as maybe_spawn_brokerd,
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue