mirror of https://github.com/skygpu/skynet.git
Use leap as network + auth layer, we decentarlized now
parent
79901c85ca
commit
0b312ff961
|
@ -1,7 +1,7 @@
|
|||
docker build \
|
||||
-t skynet:runtime-cuda \
|
||||
-f Dockerfile.runtime+cuda .
|
||||
-f docker/Dockerfile.runtime+cuda .
|
||||
|
||||
docker build \
|
||||
-t skynet:runtime \
|
||||
-f Dockerfile.runtime .
|
||||
-f docker/Dockerfile.runtime .
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIFxDCCA6wCAQAwDQYJKoZIhvcNAQENBQAwgacxCzAJBgNVBAYTAlVZMRMwEQYD
|
||||
VQQIDApNb250ZXZpZGVvMRMwEQYDVQQHDApNb250ZXZpZGVvMRowGAYDVQQKDBFz
|
||||
a3luZXQtZm91bmRhdGlvbjENMAsGA1UECwwEbm9uZTEcMBoGA1UEAwwTR3VpbGxl
|
||||
cm1vIFJvZHJpZ3VlejElMCMGCSqGSIb3DQEJARYWZ3VpbGxlcm1vckBmaW5nLmVk
|
||||
dS51eTAeFw0yMjEyMTExNDM3NDVaFw0zMjEyMDgxNDM3NDVaMIGnMQswCQYDVQQG
|
||||
EwJVWTETMBEGA1UECAwKTW9udGV2aWRlbzETMBEGA1UEBwwKTW9udGV2aWRlbzEa
|
||||
MBgGA1UECgwRc2t5bmV0LWZvdW5kYXRpb24xDTALBgNVBAsMBG5vbmUxHDAaBgNV
|
||||
BAMME0d1aWxsZXJtbyBSb2RyaWd1ZXoxJTAjBgkqhkiG9w0BCQEWFmd1aWxsZXJt
|
||||
b3JAZmluZy5lZHUudXkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCu
|
||||
HdqGPtsqtYqfIilVdq0MmqfEn9g4T+uglfWjRF2gWV3uQCuXDv1O61XfIIyaDQXl
|
||||
VRqT36txtM8rvn213746SwK0jx9+ln5jD3EDbL4WZv1qvp4/jqA+UPKXFXnD3he+
|
||||
pRpcDMu4IpYKuoPl667IW/auFSSy3TIWhIZb8ghqxzb2e2i6/OhzIWKHeFIKvbEA
|
||||
EB6Z63wy3O0ACY7RVhHu0wzyzqUW1t1VNsbZvO9Xmmqm2EWZBJp0TFph3Z9kOR/g
|
||||
0Ik7kxMLrGIfhV5/1gPQlNr3ADebGJnaMdGCBUi+pqeZcVnGY45fjOJREaD3aTRG
|
||||
ohZM0Td40K7paDVjUvQ9rPgKoDMsCWpu8IPdc4LB0hONIO2KycFb49cd8zNWsetj
|
||||
kHXxL9IVgORxfGmVyOtNGotS5RX6R+qwsll3qUmX4XjwvQMAMvATcSkY26CWdCDM
|
||||
vGFp+0REbVyDfJ9pwU7ZkAxiWeAoiesGfEWyRLsl0fFkaHgHG+oPCH9IO63TVnCq
|
||||
E6NGRQpHfJ5oV4ZihUfWjSFxOJqdFM3xfzk/2YGzQUgKVBsbuQTWPKxE0aSwt1Cf
|
||||
Ug4+C0RSDMmrquRmhRn/BWsSRl+2m17rt1axTA4pEVGcHHyKSowEFQ68spD1Lm2K
|
||||
iU/LCPBh4REzexwjP+onwHALXoxIEOLiy2lEdYgWnwIDAQABMA0GCSqGSIb3DQEB
|
||||
DQUAA4ICAQBtTZb6PJJQXtF90MD4Hcgj+phKkbtHVZyM198Giw3I9f2PgjDECKb9
|
||||
I7JLzCUgpexKk1TNso2FPNoVlcE4yMO0I0EauoKcwZ1w9GXsXOGwPHvB9hrItaLs
|
||||
s7Qxf+IVgKO4y5Tv+8WO4lhgShWa4fW3L7Dpk0XK4INoAAxZLbEdekf2GGqTUGzD
|
||||
SrfvtE8h6JT+gR4lsAvdsRjJIKYacsqhKjtV0reA6v99NthDcpwaStrAaFmtJkD3
|
||||
6G3JVU0JyMBlR1GetN0w42BjVHJ2l7cPm405lE2ymFwcl7C8VozXXi4wmfVN+xlh
|
||||
NOVSbl/QUiMUyt44XPhPCbgopxLqhqtvGzBl+ldF1AR4aaukXjvS/8VtFZ3cfx7n
|
||||
n5NYxvPnq3kwlFNHgppt+u1leGrzxuesGNQENQd3shO/S9T4I92hAdk2MRTivIfv
|
||||
m74u6RCtHqDviiOFzF7zcqO37wCrb1dnfS1N4I6/rCf6XtxlRGa8Cp9z4DTKjwAC
|
||||
5z5irJb+LSJkFXA/zIFpBjjKBdyhjYGuXrbJWdL81kTcYRqjE99XfZaTU8L43qVd
|
||||
TUaIvQGTtx8k7WGmeTRHk6SauCaXSfeXwYTpEZpictUI/uWo/KJRDL/aE8HmBeH3
|
||||
pr+cfDu7erTLH+GG5ZROrILf4929Jd7OF4a0nHUnZcycBS0CjGHVHA==
|
||||
-----END CERTIFICATE-----
|
|
@ -1,52 +0,0 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCyAuCwwnoENeYe
|
||||
B0159sH47zedmRaxcUmC/qmVdUptzOxIHpUCSAIy+hoR5UOhnRsmjj7Y0kUWtlwj
|
||||
bHAKHcuUn4sqLBb0nl6kH79DzP/4YCQM3GEIXzE6wy/zmnYrHz53Ci7DzmMcRM3n
|
||||
MwXDVPPpKXzpmI/yassKxSltBKgbh65U3oOheiuFygOlAkT4fUaXX5Bf9DECZBsj
|
||||
ewf9WvHzLGN2eQt/YWYxJMstgAecHLlRmLbKoYD/P+O0K1ybmhMDItcXE49kNC4s
|
||||
Rvq7MUt8B0bi8SlRxv5plAbZBiyMilrxf3yCCgYaTsqtt3x+CSrAWjzYIzEzD5aZ
|
||||
1+s5O2jsqPYkbTvA4NT/hDnWHkkr7YcBRwQn1iMe2tMUTTsWotIYWH87++BzDAWG
|
||||
3ZBkqNZ4mUdA3usk2ZPO0BwWNxlb0AqOlAJUYSoCsm3nBPT08rVvumQ44hup6XPW
|
||||
L5KIDyL5+Fl8RDgDF8cpCfrijdL+U+GoHmmJYM6zMkrGqD7BD+WJgw9plgbaWUBI
|
||||
q4aimXF4PrBJAAX5IRyZK+EDDH0AREL3qoZIQVvJR+yGIKTixpyVKtj6jm1OY4Go
|
||||
iXxRLaFrc4ucT9+PxRHo9zYtNIijub4eXuU5nveswptmCsNa4spTO2XCkHh6IE0Z
|
||||
B4oALC4lrC279WY+3TaOpv/roGzG9QIDAQABAoICABfpXGFMs7MzwkYvrkU/KO3V
|
||||
bwppHAFDOcqyMU7K7e/d4ly1rvJwKyDJ3mKfrKay7Ii7UXndP5E+IcD9ufcXQCzQ
|
||||
rug/+pLAC0UkoT6W9PNaMWgrhOU+VDs+fjHM19QRuFmpMSr1jZ6ofLgdGchpSvJR
|
||||
CQnKh9uFDjfTethoEw96Tv1GKTcHAChSleFpHUv7wqsRbTABJJbbokGb2duQhzD7
|
||||
uh3vQzodzT+2CjeBxoPpNS40GKm+FA6KzdLP2FAWhuNESibmu7uMFCpicR+1ZBxe
|
||||
+zNU4xCsbamk9rPZqSD1HM4/1RZqs53TuP9TcbzvDPfAUgKpMjICWrUuVIHgQcb/
|
||||
H3lJbsusZccFkl+B4arncUu7oyYWsw+OLHq/khja1RrJu6/PDDfcqY0cSAAsCKJf
|
||||
ChiHVyVbhZ6b9g1MdYLNPlcJrpgCVX+PisqLqY/RqQGIln6D0sBK1+MC6TjFW3zA
|
||||
ca3Dhun18JBZ73mmlGj7LoOUojtnnxy5YVUdB75tdo5BqilGR1nLurJupg9Nkgeq
|
||||
C7nbA+rZ93MKHptayko91nc7yLzsMRV8PDFhE2UhZWRZfJ5yAW/IaJBZpvTvSYM3
|
||||
5lTgAn1o34mnykuNC3sK5tbCAMb0YbCJtmotRwBIqlFHqbH+TK07CW2lnEkqZ8ID
|
||||
YFTpAJlgKgsdhsd5ZCkpAoIBAQDQMvn4iBKvnhCeRUV/6AOHcOsgwJkV/G61Gz/G
|
||||
F0mx0kPsaPugNX1VzF15R+vN1kbk3sQ9bDP6FfsX7jp2EjRqGEb9mJ8BoIbSHLJ4
|
||||
dDT7M90TMMYepCVoFMC03Hh30vxH3QokgV3E1lakXCwl1dheRz5czT0BL9VuBkpG
|
||||
x8vGpVfX4VqLliOWK72wEYdfohUTynb2OkRP/e6woBRxb3hYLqpN7nVHVRiMFBgG
|
||||
+AvpLNv/oSYBOXj9oRBOwVLZaPV8N1p4Pv7WXL+B7E47Z9rUYNzGFf+2iM1uDdrO
|
||||
xHkAocgMM/sL81sJaj1khoYRLC8IpAxBG8NqRP6xzeGcLVLHAoIBAQDa4ZdEDvqA
|
||||
gJmJ4vgivIX7/zv7/q9c/nkNsnPiXjMys6HRdwroQjT7wrxO5/jJX9EDjM98dSFg
|
||||
1HFJWJulpmDMpIzzwC6DLxZWd+EEqG4Pyv50VGmGuwmqDwWAP7v/pMPwUEvlsGYZ
|
||||
Tvlebr4jze9vz8MiRw3qBp0ASWpDWgySt3zm0gDWRaxqvZbdqlLvK/YTta+4ySay
|
||||
dfkqMG4SGM2m7Rc6H+DKqhwADoyd3oVrFD7QWCZTUUm414TgFFk+uils8Pms6ulG
|
||||
u+mZT29Jaq8UzoXLOmf+tX2K07oA98y0HfrGMAto3+c0x9ArIPrtwHuUGJiTdt3V
|
||||
ShBPP9AzaBxjAoIBAQCF+3gwP2k/CQqKv+t035t9yuYVgrxBkNyxweJtmUj8nWLG
|
||||
vdzIggOxdj3lMaqHIVEoMk+5c2uTkhevk8ideSOv7wWoZ1JUWrjIeF1F9QqvafXo
|
||||
RqgIyfukmk5VVdhUzDs8B/xh97qfVIwXY5Wpl4+RRGnWkOGkZOMF1hhwqlzx7i+0
|
||||
prp9P9aQ6n880lr66TSFMvMRi/ewPqsfkTT2txSMMyO32TAyAoo0gy3fNjt8CDlf
|
||||
rZXmjdTV65OyCulFLi1kjb6zyV54FuHLO4Yw5qnFqLwK4ddY4XrKSzI3g+qWxIYX
|
||||
jFAPpcE9MthlW8jlPjjaZ6/XKoW8WsBJLkP1HJm7AoIBAAm9J+HbWMIG9s3vz2Kc
|
||||
SMnhnWWk+2CD4hb97bIQxu5ml7ieN1oGOB1LmN1Z7PPo03/47/J1s7p/OVsuGh7Q
|
||||
vFXerHbcAjXMDo5iXxy58cu6GIBMkTVxdQigCnqeW1sQlbdHm1jo9GID5YySGNu2
|
||||
+gRbli8cQj47dRjiK1w70XtltqT+ixL9nqJRNTk/rtj9d8GAwATUzmf6X8/Ev+EG
|
||||
QYA/5Fyttm7OCtjlzNPpZr5Q9EqI4YurfkA/NqZRwXbNCbLTNgi/mwmOquIraqQ1
|
||||
nvyqA8H7I01t/dwDd687V1xcSSAwWxGbhMoQae7BVOjnO5hnT8Kf81beKMOd70Ga
|
||||
TEkCggEAI8ICJvOBouBO92330s8smVhxPi9tRCnOZ0mg5MoR8EJydbOrcRIap1w7
|
||||
Ai0CTR6ziOgMaDbT52ouZ1u0l6izYAdBdeSaPOiiTLx8vEE+U7SpNR3zCesPtZB3
|
||||
uvGOY2mVwyfZH2SUc4cs+uzDnAGhPqC7/RSFPMoctXf46YpGc9auyjdesE395KLX
|
||||
L043DaE9/ng9B1jCnhu5TUyiUtAluHvRGQC32og6id2KUEhmhGCl5vj2KIVoDmI2
|
||||
NpeBLCKuaBNi/rOG3zyHLjg1wCYidjE7vwjY6UyemjbW48LI8KN6Sl5rQdaDu+bG
|
||||
lWI2XLI4C2zqDBVmEL2MuzL0FrWivQ==
|
||||
-----END PRIVATE KEY-----
|
|
@ -1,33 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIFxDCCA6wCAQIwDQYJKoZIhvcNAQENBQAwgacxCzAJBgNVBAYTAlVZMRMwEQYD
|
||||
VQQIDApNb250ZXZpZGVvMRMwEQYDVQQHDApNb250ZXZpZGVvMRowGAYDVQQKDBFz
|
||||
a3luZXQtZm91bmRhdGlvbjENMAsGA1UECwwEbm9uZTEcMBoGA1UEAwwTR3VpbGxl
|
||||
cm1vIFJvZHJpZ3VlejElMCMGCSqGSIb3DQEJARYWZ3VpbGxlcm1vckBmaW5nLmVk
|
||||
dS51eTAeFw0yMjEyMTExNTE1MDNaFw0zMjEyMDgxNTE1MDNaMIGnMQswCQYDVQQG
|
||||
EwJVWTETMBEGA1UECAwKTW9udGV2aWRlbzETMBEGA1UEBwwKTW9udGV2aWRlbzEa
|
||||
MBgGA1UECgwRc2t5bmV0LWZvdW5kYXRpb24xDTALBgNVBAsMBG5vbmUxHDAaBgNV
|
||||
BAMME0d1aWxsZXJtbyBSb2RyaWd1ZXoxJTAjBgkqhkiG9w0BCQEWFmd1aWxsZXJt
|
||||
b3JAZmluZy5lZHUudXkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCy
|
||||
AuCwwnoENeYeB0159sH47zedmRaxcUmC/qmVdUptzOxIHpUCSAIy+hoR5UOhnRsm
|
||||
jj7Y0kUWtlwjbHAKHcuUn4sqLBb0nl6kH79DzP/4YCQM3GEIXzE6wy/zmnYrHz53
|
||||
Ci7DzmMcRM3nMwXDVPPpKXzpmI/yassKxSltBKgbh65U3oOheiuFygOlAkT4fUaX
|
||||
X5Bf9DECZBsjewf9WvHzLGN2eQt/YWYxJMstgAecHLlRmLbKoYD/P+O0K1ybmhMD
|
||||
ItcXE49kNC4sRvq7MUt8B0bi8SlRxv5plAbZBiyMilrxf3yCCgYaTsqtt3x+CSrA
|
||||
WjzYIzEzD5aZ1+s5O2jsqPYkbTvA4NT/hDnWHkkr7YcBRwQn1iMe2tMUTTsWotIY
|
||||
WH87++BzDAWG3ZBkqNZ4mUdA3usk2ZPO0BwWNxlb0AqOlAJUYSoCsm3nBPT08rVv
|
||||
umQ44hup6XPWL5KIDyL5+Fl8RDgDF8cpCfrijdL+U+GoHmmJYM6zMkrGqD7BD+WJ
|
||||
gw9plgbaWUBIq4aimXF4PrBJAAX5IRyZK+EDDH0AREL3qoZIQVvJR+yGIKTixpyV
|
||||
Ktj6jm1OY4GoiXxRLaFrc4ucT9+PxRHo9zYtNIijub4eXuU5nveswptmCsNa4spT
|
||||
O2XCkHh6IE0ZB4oALC4lrC279WY+3TaOpv/roGzG9QIDAQABMA0GCSqGSIb3DQEB
|
||||
DQUAA4ICAQBic+3ipdfvmCThWkDjVs97tkbUUNjGXH95okwI0Jbft0iRivVM16Xb
|
||||
hqGquQK4OvYoSTHTmsMH19/dMj0W/Bd4IUYKl64rG8YJUbjDbO1y7a+wF2TaONyn
|
||||
z0k3zRCky+IwxqYf9Ppw7s2/cXlt3fOEg0kBr4EooXd+bFCx/+JQIxU3vfL8cDQK
|
||||
dp55vkh+ROt8eR7ai1FiAC8J1prswyT092ktco2fP0MI4uQ3iQfl07NyI68UV1E5
|
||||
aIsOPU3SKMtxz5FLm8JEUVhZRJZJWQ/o/iB/2cdn4PDBGkrBhgU6ysMPNX51RlCM
|
||||
aHRsMyoO2mFfIlm0jW0C5lZ6nKHuA1sXPFz1YxzpvnRgRlHUlfoKf1wpCeF+5Qz+
|
||||
qylArHPSu69CA38wLCzJ3wWTaGVL1nuH1UPR2Pg71HGBYqLCD2XGa8iLShO1DKl7
|
||||
1bAeHOvzryngYq35rky1L3cIquinAwCP4QKocJK3DJAD5lPqhpzO1f2/1BmWV9Ri
|
||||
ZRrRkM/9AxePxGZEmnoQbwKsQs/bY+jGU2fRzqijxRPoX9ogX5Te/Ko0mQh1slbX
|
||||
4bL9NIipHPgpNeZRmRUnu4z00UJNGrI/qGaont3eMH1V65WGz9VMYnmCxkmsg45e
|
||||
skrauB/Ly9DRRZBddDwAQF8RIbpqPsfQTuEjF0sGdYH3LaClGbA/cA==
|
||||
-----END CERTIFICATE-----
|
|
@ -0,0 +1,22 @@
|
|||
FROM ubuntu:22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
|
||||
# install eosio tools
|
||||
RUN wget https://github.com/AntelopeIO/leap/releases/download/v4.0.0/leap_4.0.0-ubuntu22.04_amd64.deb
|
||||
|
||||
RUN apt-get install -y ./leap_4.0.0-ubuntu22.04_amd64.deb
|
||||
|
||||
RUN mkdir -p /root/nodeos
|
||||
WORKDIR /root/nodeos
|
||||
COPY config.ini config.ini
|
||||
COPY contracts contracts
|
||||
COPY genesis genesis
|
||||
|
||||
EXPOSE 42000
|
||||
EXPOSE 29876
|
||||
EXPOSE 39999
|
||||
|
||||
CMD sleep 9999999999
|
|
@ -0,0 +1,52 @@
|
|||
agent-name = Telos Skynet Testnet
|
||||
|
||||
wasm-runtime = eos-vm-jit
|
||||
eos-vm-oc-compile-threads = 4
|
||||
eos-vm-oc-enable = true
|
||||
|
||||
chain-state-db-size-mb = 65536
|
||||
enable-account-queries = true
|
||||
|
||||
http-server-address = 0.0.0.0:42000
|
||||
access-control-allow-origin = *
|
||||
contracts-console = true
|
||||
http-validate-host = false
|
||||
p2p-listen-endpoint = 0.0.0.0:29876
|
||||
p2p-server-address = 0.0.0.0:29876
|
||||
verbose-http-errors = true
|
||||
|
||||
state-history-endpoint = 0.0.0.0:39999
|
||||
trace-history = true
|
||||
chain-state-history = true
|
||||
trace-history-debug-mode = true
|
||||
state-history-dir = state-history
|
||||
|
||||
sync-fetch-span = 1600
|
||||
max-clients = 250
|
||||
|
||||
signature-provider = EOS5fLreY5Zq5owBhmNJTgQaLqQ4ufzXSTpStQakEyfxNFuUEgNs1=KEY:5JnvSc6pewpHHuUHwvbJopsew6AKwiGnexwDRc2Pj2tbdw6iML9
|
||||
|
||||
disable-subjective-billing = true
|
||||
max-transaction-time = 500
|
||||
read-only-read-window-time-us = 600000
|
||||
|
||||
abi-serializer-max-time-ms = 2000000
|
||||
|
||||
p2p-max-nodes-per-host = 1
|
||||
|
||||
connection-cleanup-period = 30
|
||||
allowed-connection = any
|
||||
http-max-response-time-ms = 100000
|
||||
max-body-size = 10000000
|
||||
|
||||
enable-stale-production = true
|
||||
|
||||
|
||||
plugin = eosio::http_plugin
|
||||
plugin = eosio::chain_plugin
|
||||
plugin = eosio::chain_api_plugin
|
||||
plugin = eosio::net_api_plugin
|
||||
plugin = eosio::net_plugin
|
||||
plugin = eosio::producer_plugin
|
||||
plugin = eosio::producer_api_plugin
|
||||
plugin = eosio::state_history_plugin
|
|
@ -0,0 +1,360 @@
|
|||
{
|
||||
"____comment": "This file was generated with eosio-abigen. DO NOT EDIT Thu Apr 14 07:49:43 2022",
|
||||
"version": "eosio::abi/1.1",
|
||||
"structs": [
|
||||
{
|
||||
"name": "action",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "account",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "authorization",
|
||||
"type": "permission_level[]"
|
||||
},
|
||||
{
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "approval",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "level",
|
||||
"type": "permission_level"
|
||||
},
|
||||
{
|
||||
"name": "time",
|
||||
"type": "time_point"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "approvals_info",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "version",
|
||||
"type": "uint8"
|
||||
},
|
||||
{
|
||||
"name": "proposal_name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "requested_approvals",
|
||||
"type": "approval[]"
|
||||
},
|
||||
{
|
||||
"name": "provided_approvals",
|
||||
"type": "approval[]"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "approve",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "proposer",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "proposal_name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "level",
|
||||
"type": "permission_level"
|
||||
},
|
||||
{
|
||||
"name": "proposal_hash",
|
||||
"type": "checksum256$"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "cancel",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "proposer",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "proposal_name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "canceler",
|
||||
"type": "name"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "exec",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "proposer",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "proposal_name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "executer",
|
||||
"type": "name"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "extension",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "type",
|
||||
"type": "uint16"
|
||||
},
|
||||
{
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "invalidate",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "account",
|
||||
"type": "name"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "invalidation",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "account",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "last_invalidation_time",
|
||||
"type": "time_point"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "old_approvals_info",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "proposal_name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "requested_approvals",
|
||||
"type": "permission_level[]"
|
||||
},
|
||||
{
|
||||
"name": "provided_approvals",
|
||||
"type": "permission_level[]"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "permission_level",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "actor",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "permission",
|
||||
"type": "name"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "proposal",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "proposal_name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "packed_transaction",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "propose",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "proposer",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "proposal_name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "requested",
|
||||
"type": "permission_level[]"
|
||||
},
|
||||
{
|
||||
"name": "trx",
|
||||
"type": "transaction"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "transaction",
|
||||
"base": "transaction_header",
|
||||
"fields": [
|
||||
{
|
||||
"name": "context_free_actions",
|
||||
"type": "action[]"
|
||||
},
|
||||
{
|
||||
"name": "actions",
|
||||
"type": "action[]"
|
||||
},
|
||||
{
|
||||
"name": "transaction_extensions",
|
||||
"type": "extension[]"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "transaction_header",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "expiration",
|
||||
"type": "time_point_sec"
|
||||
},
|
||||
{
|
||||
"name": "ref_block_num",
|
||||
"type": "uint16"
|
||||
},
|
||||
{
|
||||
"name": "ref_block_prefix",
|
||||
"type": "uint32"
|
||||
},
|
||||
{
|
||||
"name": "max_net_usage_words",
|
||||
"type": "varuint32"
|
||||
},
|
||||
{
|
||||
"name": "max_cpu_usage_ms",
|
||||
"type": "uint8"
|
||||
},
|
||||
{
|
||||
"name": "delay_sec",
|
||||
"type": "varuint32"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "unapprove",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "proposer",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "proposal_name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "level",
|
||||
"type": "permission_level"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"types": [],
|
||||
"actions": [
|
||||
{
|
||||
"name": "approve",
|
||||
"type": "approve",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "cancel",
|
||||
"type": "cancel",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "exec",
|
||||
"type": "exec",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "invalidate",
|
||||
"type": "invalidate",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "propose",
|
||||
"type": "propose",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "unapprove",
|
||||
"type": "unapprove",
|
||||
"ricardian_contract": ""
|
||||
}
|
||||
],
|
||||
"tables": [
|
||||
{
|
||||
"name": "approvals",
|
||||
"type": "old_approvals_info",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
},
|
||||
{
|
||||
"name": "approvals2",
|
||||
"type": "approvals_info",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
},
|
||||
{
|
||||
"name": "invals",
|
||||
"type": "invalidation",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
},
|
||||
{
|
||||
"name": "proposal",
|
||||
"type": "proposal",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
}
|
||||
],
|
||||
"ricardian_clauses": [],
|
||||
"variants": [],
|
||||
"abi_extensions": []
|
||||
}
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
|
@ -0,0 +1,185 @@
|
|||
{
|
||||
"____comment": "This file was generated with eosio-abigen. DO NOT EDIT ",
|
||||
"version": "eosio::abi/1.1",
|
||||
"types": [],
|
||||
"structs": [
|
||||
{
|
||||
"name": "account",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "balance",
|
||||
"type": "asset"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "close",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "owner",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "symbol",
|
||||
"type": "symbol"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "create",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "issuer",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "maximum_supply",
|
||||
"type": "asset"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "currency_stats",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "supply",
|
||||
"type": "asset"
|
||||
},
|
||||
{
|
||||
"name": "max_supply",
|
||||
"type": "asset"
|
||||
},
|
||||
{
|
||||
"name": "issuer",
|
||||
"type": "name"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "issue",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "to",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "quantity",
|
||||
"type": "asset"
|
||||
},
|
||||
{
|
||||
"name": "memo",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "open",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "owner",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "symbol",
|
||||
"type": "symbol"
|
||||
},
|
||||
{
|
||||
"name": "ram_payer",
|
||||
"type": "name"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "retire",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "quantity",
|
||||
"type": "asset"
|
||||
},
|
||||
{
|
||||
"name": "memo",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "transfer",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "from",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "to",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "quantity",
|
||||
"type": "asset"
|
||||
},
|
||||
{
|
||||
"name": "memo",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"name": "close",
|
||||
"type": "close",
|
||||
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Close Token Balance\nsummary: 'Close {{nowrap owner}}’s zero quantity balance'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\n{{owner}} agrees to close their zero quantity balance for the {{symbol_to_symbol_code symbol}} token.\n\nRAM will be refunded to the RAM payer of the {{symbol_to_symbol_code symbol}} token balance for {{owner}}."
|
||||
},
|
||||
{
|
||||
"name": "create",
|
||||
"type": "create",
|
||||
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Create New Token\nsummary: 'Create a new token'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\n{{$action.account}} agrees to create a new token with symbol {{asset_to_symbol_code maximum_supply}} to be managed by {{issuer}}.\n\nThis action will not result any any tokens being issued into circulation.\n\n{{issuer}} will be allowed to issue tokens into circulation, up to a maximum supply of {{maximum_supply}}.\n\nRAM will deducted from {{$action.account}}’s resources to create the necessary records."
|
||||
},
|
||||
{
|
||||
"name": "issue",
|
||||
"type": "issue",
|
||||
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Issue Tokens into Circulation\nsummary: 'Issue {{nowrap quantity}} into circulation and transfer into {{nowrap to}}’s account'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\nThe token manager agrees to issue {{quantity}} into circulation, and transfer it into {{to}}’s account.\n\n{{#if memo}}There is a memo attached to the transfer stating:\n{{memo}}\n{{/if}}\n\nIf {{to}} does not have a balance for {{asset_to_symbol_code quantity}}, or the token manager does not have a balance for {{asset_to_symbol_code quantity}}, the token manager will be designated as the RAM payer of the {{asset_to_symbol_code quantity}} token balance for {{to}}. As a result, RAM will be deducted from the token manager’s resources to create the necessary records.\n\nThis action does not allow the total quantity to exceed the max allowed supply of the token."
|
||||
},
|
||||
{
|
||||
"name": "open",
|
||||
"type": "open",
|
||||
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Open Token Balance\nsummary: 'Open a zero quantity balance for {{nowrap owner}}'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\n{{ram_payer}} agrees to establish a zero quantity balance for {{owner}} for the {{symbol_to_symbol_code symbol}} token.\n\nIf {{owner}} does not have a balance for {{symbol_to_symbol_code symbol}}, {{ram_payer}} will be designated as the RAM payer of the {{symbol_to_symbol_code symbol}} token balance for {{owner}}. As a result, RAM will be deducted from {{ram_payer}}’s resources to create the necessary records."
|
||||
},
|
||||
{
|
||||
"name": "retire",
|
||||
"type": "retire",
|
||||
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Remove Tokens from Circulation\nsummary: 'Remove {{nowrap quantity}} from circulation'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\nThe token manager agrees to remove {{quantity}} from circulation, taken from their own account.\n\n{{#if memo}} There is a memo attached to the action stating:\n{{memo}}\n{{/if}}"
|
||||
},
|
||||
{
|
||||
"name": "transfer",
|
||||
"type": "transfer",
|
||||
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Transfer Tokens\nsummary: 'Send {{nowrap quantity}} from {{nowrap from}} to {{nowrap to}}'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/transfer.png#5dfad0df72772ee1ccc155e670c1d124f5c5122f1d5027565df38b418042d1dd\n---\n\n{{from}} agrees to send {{quantity}} to {{to}}.\n\n{{#if memo}}There is a memo attached to the transfer stating:\n{{memo}}\n{{/if}}\n\nIf {{from}} is not already the RAM payer of their {{asset_to_symbol_code quantity}} token balance, {{from}} will be designated as such. As a result, RAM will be deducted from {{from}}’s resources to refund the original RAM payer.\n\nIf {{to}} does not have a balance for {{asset_to_symbol_code quantity}}, {{from}} will be designated as the RAM payer of the {{asset_to_symbol_code quantity}} token balance for {{to}}. As a result, RAM will be deducted from {{from}}’s resources to create the necessary records."
|
||||
}
|
||||
],
|
||||
"tables": [
|
||||
{
|
||||
"name": "accounts",
|
||||
"type": "account",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
},
|
||||
{
|
||||
"name": "stat",
|
||||
"type": "currency_stats",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
}
|
||||
],
|
||||
"ricardian_clauses": [],
|
||||
"variants": []
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,130 @@
|
|||
{
|
||||
"____comment": "This file was generated with eosio-abigen. DO NOT EDIT Thu Apr 14 07:49:40 2022",
|
||||
"version": "eosio::abi/1.1",
|
||||
"structs": [
|
||||
{
|
||||
"name": "action",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "account",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "authorization",
|
||||
"type": "permission_level[]"
|
||||
},
|
||||
{
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "exec",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "executer",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "trx",
|
||||
"type": "transaction"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "extension",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "type",
|
||||
"type": "uint16"
|
||||
},
|
||||
{
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "permission_level",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "actor",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "permission",
|
||||
"type": "name"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "transaction",
|
||||
"base": "transaction_header",
|
||||
"fields": [
|
||||
{
|
||||
"name": "context_free_actions",
|
||||
"type": "action[]"
|
||||
},
|
||||
{
|
||||
"name": "actions",
|
||||
"type": "action[]"
|
||||
},
|
||||
{
|
||||
"name": "transaction_extensions",
|
||||
"type": "extension[]"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "transaction_header",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "expiration",
|
||||
"type": "time_point_sec"
|
||||
},
|
||||
{
|
||||
"name": "ref_block_num",
|
||||
"type": "uint16"
|
||||
},
|
||||
{
|
||||
"name": "ref_block_prefix",
|
||||
"type": "uint32"
|
||||
},
|
||||
{
|
||||
"name": "max_net_usage_words",
|
||||
"type": "varuint32"
|
||||
},
|
||||
{
|
||||
"name": "max_cpu_usage_ms",
|
||||
"type": "uint8"
|
||||
},
|
||||
{
|
||||
"name": "delay_sec",
|
||||
"type": "varuint32"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"types": [],
|
||||
"actions": [
|
||||
{
|
||||
"name": "exec",
|
||||
"type": "exec",
|
||||
"ricardian_contract": ""
|
||||
}
|
||||
],
|
||||
"tables": [],
|
||||
"ricardian_clauses": [],
|
||||
"variants": [],
|
||||
"abi_extensions": []
|
||||
}
|
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"initial_timestamp": "2023-05-22T00:00:00.000",
|
||||
"initial_key": "EOS5fLreY5Zq5owBhmNJTgQaLqQ4ufzXSTpStQakEyfxNFuUEgNs1",
|
||||
"initial_configuration": {
|
||||
"max_block_net_usage": 1048576,
|
||||
"target_block_net_usage_pct": 1000,
|
||||
"max_transaction_net_usage": 1048575,
|
||||
"base_per_transaction_net_usage": 12,
|
||||
"net_usage_leeway": 500,
|
||||
"context_free_discount_net_usage_num": 20,
|
||||
"context_free_discount_net_usage_den": 100,
|
||||
"max_block_cpu_usage": 200000,
|
||||
"target_block_cpu_usage_pct": 1000,
|
||||
"max_transaction_cpu_usage": 150000,
|
||||
"min_transaction_cpu_usage": 100,
|
||||
"max_transaction_lifetime": 3600,
|
||||
"deferred_trx_expiration_window": 600,
|
||||
"max_transaction_delay": 3888000,
|
||||
"max_inline_action_size": 4096,
|
||||
"max_inline_action_depth": 4,
|
||||
"max_authority_depth": 6
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1,4 +1,3 @@
|
|||
[pytest]
|
||||
log_cli = True
|
||||
log_level = info
|
||||
trio_mode = true
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
pdbpp
|
||||
pytest
|
||||
pytest-trio
|
||||
docker
|
||||
psycopg2-binary
|
||||
|
||||
git+https://github.com/guilledk/pytest-dockerctl.git@multi_names#egg=pytest-dockerctl
|
||||
git+https://github.com/guilledk/py-leap.git@async_net_requests#egg=py-eosio
|
||||
|
|
|
@ -1,13 +1,9 @@
|
|||
trio
|
||||
pynng
|
||||
asks
|
||||
numpy
|
||||
Pillow
|
||||
triopg
|
||||
aiohttp
|
||||
msgspec
|
||||
protobuf
|
||||
pyOpenSSL
|
||||
trio_asyncio
|
||||
pyTelegramBotAPI
|
||||
|
||||
git+https://github.com/goodboy/tractor.git@master#egg=tractor
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
'''Self signed x509 certificate generator
|
||||
|
||||
can look at generated file using openssl:
|
||||
openssl x509 -inform pem -in selfsigned.crt -noout -text'''
|
||||
import sys
|
||||
|
||||
from OpenSSL import crypto, SSL
|
||||
|
||||
from skynet.constants import DEFAULT_CERTS_DIR
|
||||
|
||||
|
||||
def input_or_skip(txt, default):
|
||||
i = input(f'[default: {default}]: {txt}')
|
||||
if len(i) == 0:
|
||||
return default
|
||||
else:
|
||||
return i
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# create a key pair
|
||||
k = crypto.PKey()
|
||||
k.generate_key(crypto.TYPE_RSA, 4096)
|
||||
# create a self-signed cert
|
||||
cert = crypto.X509()
|
||||
cert.get_subject().C = input('country name two char ISO code (example: US): ')
|
||||
cert.get_subject().ST = input('state or province name (example: Texas): ')
|
||||
cert.get_subject().L = input('locality name (example: Dallas): ')
|
||||
cert.get_subject().O = input('organization name: ')
|
||||
cert.get_subject().OU = input_or_skip('organizational unit name: ', 'none')
|
||||
cert.get_subject().CN = input('common name: ')
|
||||
cert.get_subject().emailAddress = input('email address: ')
|
||||
cert.set_serial_number(int(input_or_skip('numberic serial number: ', 0)))
|
||||
cert.gmtime_adj_notBefore(int(input_or_skip('amount of seconds until cert is valid: ', 0)))
|
||||
cert.gmtime_adj_notAfter(int(input_or_skip('amount of seconds until cert expires: ', 10*365*24*60*60)))
|
||||
cert.set_issuer(cert.get_subject())
|
||||
cert.set_pubkey(k)
|
||||
cert.sign(k, 'sha512')
|
||||
with open(f'{DEFAULT_CERTS_DIR}/{sys.argv[1]}.cert', "wt") as f:
|
||||
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode("utf-8"))
|
||||
with open(f'{DEFAULT_CERTS_DIR}/{sys.argv[1]}.key', "wt") as f:
|
||||
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode("utf-8"))
|
|
@ -1,12 +1,6 @@
|
|||
[skynet]
|
||||
certs_dir = certs
|
||||
|
||||
[skynet.dgpu]
|
||||
hf_home = hf_home
|
||||
hf_token = hf_XxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXx
|
||||
|
||||
[skynet.telegram]
|
||||
token = XXXXXXXXXX:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
[skynet.telegram-test]
|
||||
token = XXXXXXXXXX:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
|
210
skynet/brain.py
210
skynet/brain.py
|
@ -1,210 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import logging
|
||||
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from collections import OrderedDict
|
||||
|
||||
import trio
|
||||
|
||||
from pynng import Context
|
||||
|
||||
from .utils import time_ms
|
||||
from .network import *
|
||||
from .protobuf import *
|
||||
from .constants import *
|
||||
|
||||
|
||||
|
||||
class SkynetRPCBadRequest(BaseException):
|
||||
...
|
||||
|
||||
class SkynetDGPUOffline(BaseException):
|
||||
...
|
||||
|
||||
class SkynetDGPUOverloaded(BaseException):
|
||||
...
|
||||
|
||||
class SkynetDGPUComputeError(BaseException):
|
||||
...
|
||||
|
||||
class SkynetShutdownRequested(BaseException):
|
||||
...
|
||||
|
||||
|
||||
@acm
|
||||
async def run_skynet(
|
||||
rpc_address: str = DEFAULT_RPC_ADDR
|
||||
):
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.info('skynet is starting')
|
||||
|
||||
nodes = OrderedDict()
|
||||
heartbeats = {}
|
||||
next_worker: Optional[int] = None
|
||||
|
||||
def connect_node(req: SkynetRPCRequest):
|
||||
nonlocal next_worker
|
||||
|
||||
node_params = MessageToDict(req.params)
|
||||
logging.info(f'got node params {node_params}')
|
||||
|
||||
if 'dgpu_addr' not in node_params:
|
||||
raise SkynetRPCBadRequest(
|
||||
f'DGPU connection params don\'t include dgpu addr')
|
||||
|
||||
session = SessionClient(
|
||||
node_params['dgpu_addr'],
|
||||
'skynet',
|
||||
cert_name='brain.cert',
|
||||
key_name='brain.key',
|
||||
ca_name=node_params['cert']
|
||||
)
|
||||
try:
|
||||
session.connect()
|
||||
|
||||
node = {
|
||||
'task': None,
|
||||
'session': session
|
||||
}
|
||||
node.update(node_params)
|
||||
|
||||
nodes[req.uid] = node
|
||||
logging.info(f'DGPU node online: {req.uid}')
|
||||
|
||||
if not next_worker:
|
||||
next_worker = 0
|
||||
|
||||
except pynng.exceptions.ConnectionRefused:
|
||||
logging.warning(f'error while dialing dgpu node... dropping...')
|
||||
raise SkynetDGPUOffline('Connection to dgpu node addr failed.')
|
||||
|
||||
def disconnect_node(uid):
|
||||
nonlocal next_worker
|
||||
if uid not in nodes:
|
||||
logging.warning(f'Attempt to disconnect unknown node {uid}')
|
||||
return
|
||||
|
||||
i = list(nodes.keys()).index(uid)
|
||||
nodes[uid]['session'].disconnect()
|
||||
del nodes[uid]
|
||||
|
||||
if i < next_worker:
|
||||
next_worker -= 1
|
||||
|
||||
logging.warning(f'DGPU node offline: {uid}')
|
||||
|
||||
if len(nodes) == 0:
|
||||
logging.info('All nodes disconnected.')
|
||||
next_worker = None
|
||||
|
||||
|
||||
def is_worker_busy(nid: str):
|
||||
return nodes[nid]['task'] != None
|
||||
|
||||
def are_all_workers_busy():
|
||||
for nid in nodes.keys():
|
||||
if not is_worker_busy(nid):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_next_worker():
|
||||
nonlocal next_worker
|
||||
|
||||
if next_worker == None:
|
||||
raise SkynetDGPUOffline('No workers connected, try again later')
|
||||
|
||||
if are_all_workers_busy():
|
||||
raise SkynetDGPUOverloaded('All workers are busy at the moment')
|
||||
|
||||
|
||||
nid = list(nodes.keys())[next_worker]
|
||||
while is_worker_busy(nid):
|
||||
next_worker += 1
|
||||
|
||||
if next_worker >= len(nodes):
|
||||
next_worker = 0
|
||||
|
||||
nid = list(nodes.keys())[next_worker]
|
||||
|
||||
next_worker += 1
|
||||
if next_worker >= len(nodes):
|
||||
next_worker = 0
|
||||
|
||||
return nid
|
||||
|
||||
async def rpc_handler(req: SkynetRPCRequest, ctx: Context):
|
||||
result = {'ok': {}}
|
||||
resp = SkynetRPCResponse()
|
||||
|
||||
try:
|
||||
match req.method:
|
||||
case 'dgpu_online':
|
||||
connect_node(req)
|
||||
|
||||
case 'dgpu_call':
|
||||
nid = get_next_worker()
|
||||
idx = list(nodes.keys()).index(nid)
|
||||
node = nodes[nid]
|
||||
logging.info(f'dgpu_call {idx}/{len(nodes)} {nid} @ {node["dgpu_addr"]}')
|
||||
dgpu_time = await node['session'].rpc('dgpu_time')
|
||||
if 'ok' not in dgpu_time.result:
|
||||
status = MessageToDict(dgpu_time.result)
|
||||
logging.warning(json.dumps(status, indent=4))
|
||||
disconnect_node(nid)
|
||||
raise SkynetDGPUComputeError(status['error'])
|
||||
|
||||
dgpu_time = dgpu_time.result['ok']
|
||||
logging.info(f'ping to {nid}: {time_ms() - dgpu_time} ms')
|
||||
|
||||
try:
|
||||
dgpu_result = await node['session'].rpc(
|
||||
timeout=45, # give this 45 sec to run cause its compute
|
||||
binext=req.bin,
|
||||
**req.params
|
||||
)
|
||||
result = MessageToDict(dgpu_result.result)
|
||||
|
||||
if dgpu_result.bin:
|
||||
resp.bin = dgpu_result.bin
|
||||
|
||||
except trio.TooSlowError:
|
||||
result = {'error': 'timeout while processing request'}
|
||||
|
||||
case 'dgpu_offline':
|
||||
disconnect_node(req.uid)
|
||||
|
||||
case 'dgpu_workers':
|
||||
result = {'ok': len(nodes)}
|
||||
|
||||
case 'dgpu_next':
|
||||
result = {'ok': next_worker}
|
||||
|
||||
case 'skynet_shutdown':
|
||||
raise SkynetShutdownRequested
|
||||
|
||||
case _:
|
||||
logging.warning(f'Unknown method {req.method}')
|
||||
result = {'error': 'unknown method'}
|
||||
|
||||
except BaseException as e:
|
||||
result = {'error': str(e)}
|
||||
|
||||
resp.result.update(result)
|
||||
|
||||
return resp
|
||||
|
||||
rpc_server = SessionServer(
|
||||
rpc_address,
|
||||
rpc_handler,
|
||||
cert_name='brain.cert',
|
||||
key_name='brain.key'
|
||||
)
|
||||
|
||||
async with rpc_server.open():
|
||||
logging.info('rpc server is up')
|
||||
yield
|
||||
logging.info('skynet is shuting down...')
|
||||
|
||||
logging.info('skynet down.')
|
124
skynet/cli.py
124
skynet/cli.py
|
@ -4,21 +4,27 @@ torch_enabled = importlib.util.find_spec('torch') != None
|
|||
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
|
||||
from typing import Optional
|
||||
from functools import partial
|
||||
|
||||
import trio
|
||||
import click
|
||||
import trio_asyncio
|
||||
import docker
|
||||
import asyncio
|
||||
|
||||
from leap.cleos import CLEOS, default_nodeos_image
|
||||
from leap.sugar import get_container
|
||||
|
||||
if torch_enabled:
|
||||
from . import utils
|
||||
from .dgpu import open_dgpu_node
|
||||
|
||||
from .brain import run_skynet
|
||||
from .db import open_new_database
|
||||
from .config import *
|
||||
from .constants import ALGOS, DEFAULT_RPC_ADDR, DEFAULT_DGPU_ADDR
|
||||
from .nodeos import open_nodeos
|
||||
from .constants import ALGOS
|
||||
from .frontend.telegram import run_skynet_telegram
|
||||
|
||||
|
||||
|
@ -86,79 +92,93 @@ def download():
|
|||
def run(*args, **kwargs):
|
||||
pass
|
||||
|
||||
@run.command()
|
||||
def db():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
with open_new_database(cleanup=False) as db_params:
|
||||
container, passwd, host = db_params
|
||||
logging.info(('skynet', passwd, host))
|
||||
|
||||
@run.command()
|
||||
def nodeos():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
with open_nodeos(cleanup=False):
|
||||
...
|
||||
|
||||
@run.command()
|
||||
@click.option('--loglevel', '-l', default='warning', help='Logging level')
|
||||
@click.option(
|
||||
'--host', '-H', default=DEFAULT_RPC_ADDR)
|
||||
def brain(
|
||||
loglevel: str,
|
||||
host: str
|
||||
):
|
||||
async def _run_skynet():
|
||||
async with run_skynet(
|
||||
rpc_address=host
|
||||
):
|
||||
await trio.sleep_forever()
|
||||
|
||||
trio.run(_run_skynet)
|
||||
|
||||
|
||||
@run.command()
|
||||
@click.option('--loglevel', '-l', default='warning', help='Logging level')
|
||||
'--account', '-a', default='testworker1')
|
||||
@click.option(
|
||||
'--uid', '-u', required=True)
|
||||
'--permission', '-p', default='active')
|
||||
@click.option(
|
||||
'--key', '-k', default='dgpu.key')
|
||||
'--key', '-k', default=None)
|
||||
@click.option(
|
||||
'--cert', '-c', default='whitelist/dgpu.cert')
|
||||
'--node-url', '-n', default='http://test1.us.telos.net:42000')
|
||||
@click.option(
|
||||
'--algos', '-a', default=json.dumps(['midj']))
|
||||
@click.option(
|
||||
'--rpc', '-r', default=DEFAULT_RPC_ADDR)
|
||||
@click.option(
|
||||
'--dgpu', '-d', default=DEFAULT_DGPU_ADDR)
|
||||
'--algos', '-A', default=json.dumps(['midj']))
|
||||
def dgpu(
|
||||
loglevel: str,
|
||||
uid: str,
|
||||
key: str,
|
||||
cert: str,
|
||||
algos: str,
|
||||
rpc: str,
|
||||
dgpu: str
|
||||
account: str,
|
||||
permission: str,
|
||||
key: str | None,
|
||||
node_url: str,
|
||||
algos: list[str]
|
||||
):
|
||||
dclient = docker.from_env()
|
||||
vtestnet = get_container(
|
||||
dclient,
|
||||
default_nodeos_image(),
|
||||
force_unique=True,
|
||||
detach=True,
|
||||
network='host',
|
||||
remove=True)
|
||||
|
||||
cleos = CLEOS(dclient, vtestnet, url=node_url, remote=node_url)
|
||||
|
||||
trio.run(
|
||||
partial(
|
||||
open_dgpu_node,
|
||||
cert,
|
||||
uid,
|
||||
key_name=key,
|
||||
rpc_address=rpc,
|
||||
dgpu_address=dgpu,
|
||||
initial_algos=json.loads(algos)
|
||||
account, permission,
|
||||
cleos, key=key, initial_algos=json.loads(algos)
|
||||
))
|
||||
|
||||
vtestnet.stop()
|
||||
|
||||
|
||||
@run.command()
|
||||
@click.option('--loglevel', '-l', default='warning', help='Logging level')
|
||||
@click.option('--loglevel', '-l', default='warning', help='logging level')
|
||||
@click.option(
|
||||
'--key', '-k', default='telegram-frontend')
|
||||
'--account', '-a', default='telegram1')
|
||||
@click.option(
|
||||
'--cert', '-c', default='whitelist/telegram-frontend')
|
||||
'--permission', '-p', default='active')
|
||||
@click.option(
|
||||
'--rpc', '-r', default=DEFAULT_RPC_ADDR)
|
||||
'--key', '-k', default=None)
|
||||
@click.option(
|
||||
'--node-url', '-n', default='http://test1.us.telos.net:42000')
|
||||
@click.option(
|
||||
'--db-host', '-h', default='localhost:5432')
|
||||
@click.option(
|
||||
'--db-user', '-u', default='skynet')
|
||||
@click.option(
|
||||
'--db-pass', '-u', default='password')
|
||||
def telegram(
|
||||
loglevel: str,
|
||||
key: str,
|
||||
cert: str,
|
||||
rpc: str
|
||||
account: str,
|
||||
permission: str,
|
||||
key: str | None,
|
||||
node_url: str,
|
||||
db_host: str,
|
||||
db_user: str,
|
||||
db_pass: str
|
||||
):
|
||||
_, _, tg_token, cfg = init_env_from_config()
|
||||
trio_asyncio.run(
|
||||
partial(
|
||||
run_skynet_telegram,
|
||||
asyncio.run(
|
||||
run_skynet_telegram(
|
||||
tg_token,
|
||||
key_name=key,
|
||||
cert_name=cert,
|
||||
rpc_address=rpc
|
||||
account,
|
||||
permission,
|
||||
node_url,
|
||||
db_host, db_user, db_pass,
|
||||
key=key
|
||||
))
|
||||
|
|
|
@ -102,7 +102,7 @@ MAX_WIDTH = 512
|
|||
MAX_HEIGHT = 656
|
||||
MAX_GUIDANCE = 20
|
||||
|
||||
DEFAULT_SEED = None
|
||||
DEFAULT_SEED = 0
|
||||
DEFAULT_WIDTH = 512
|
||||
DEFAULT_HEIGHT = 512
|
||||
DEFAULT_GUIDANCE = 7.5
|
||||
|
@ -114,15 +114,7 @@ DEFAULT_ROLE = 'pleb'
|
|||
DEFAULT_UPSCALER = None
|
||||
|
||||
DEFAULT_CONFIG_PATH = 'skynet.ini'
|
||||
DEFAULT_CERTS_DIR = 'certs'
|
||||
DEFAULT_CERT_WHITELIST_DIR = 'whitelist'
|
||||
DEFAULT_CERT_SKYNET_PUB = 'brain.cert'
|
||||
DEFAULT_CERT_SKYNET_PRIV = 'brain.key'
|
||||
DEFAULT_CERT_DGPU = 'dgpu.key'
|
||||
|
||||
DEFAULT_RPC_ADDR = 'tcp://127.0.0.1:41000'
|
||||
|
||||
DEFAULT_DGPU_ADDR = 'tcp://127.0.0.1:41069'
|
||||
DEFAULT_DGPU_MAX_TASKS = 2
|
||||
DEFAULT_INITAL_ALGOS = ['midj', 'stable', 'ink']
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
from .proxy import open_database_connection
|
||||
|
||||
from .functions import open_new_database
|
||||
from .functions import open_new_database, open_database_connection
|
||||
|
|
|
@ -4,12 +4,15 @@ import time
|
|||
import random
|
||||
import string
|
||||
import logging
|
||||
import importlib
|
||||
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
from contextlib import contextmanager as cm
|
||||
from contextlib import asynccontextmanager as acm
|
||||
|
||||
import docker
|
||||
import asyncpg
|
||||
import psycopg2
|
||||
|
||||
from asyncpg.exceptions import UndefinedColumnError
|
||||
|
@ -51,7 +54,7 @@ CREATE TABLE IF NOT EXISTS skynet.user_config(
|
|||
step INT NOT NULL,
|
||||
width INT NOT NULL,
|
||||
height INT NOT NULL,
|
||||
seed BIGINT,
|
||||
seed BIGINT NOT NULL,
|
||||
guidance REAL NOT NULL,
|
||||
strength REAL NOT NULL,
|
||||
upscaler VARCHAR(128)
|
||||
|
@ -79,7 +82,7 @@ def try_decode_uid(uid: str):
|
|||
|
||||
|
||||
@cm
|
||||
def open_new_database():
|
||||
def open_new_database(cleanup=True):
|
||||
rpassword = ''.join(
|
||||
random.choice(string.ascii_lowercase)
|
||||
for i in range(12))
|
||||
|
@ -99,46 +102,79 @@ def open_new_database():
|
|||
detach=True,
|
||||
remove=True
|
||||
)
|
||||
try:
|
||||
|
||||
for log in container.logs(stream=True):
|
||||
log = log.decode().rstrip()
|
||||
logging.info(log)
|
||||
if ('database system is ready to accept connections' in log or
|
||||
'database system is shut down' in log):
|
||||
break
|
||||
for log in container.logs(stream=True):
|
||||
log = log.decode().rstrip()
|
||||
logging.info(log)
|
||||
if ('database system is ready to accept connections' in log or
|
||||
'database system is shut down' in log):
|
||||
break
|
||||
|
||||
# ip = container.attrs['NetworkSettings']['IPAddress']
|
||||
container.reload()
|
||||
port = container.ports['5432/tcp'][0]['HostPort']
|
||||
host = f'localhost:{port}'
|
||||
# ip = container.attrs['NetworkSettings']['IPAddress']
|
||||
container.reload()
|
||||
port = container.ports['5432/tcp'][0]['HostPort']
|
||||
host = f'localhost:{port}'
|
||||
|
||||
# why print the system is ready to accept connections when its not
|
||||
# postgres? wtf
|
||||
time.sleep(1)
|
||||
logging.info('creating skynet db...')
|
||||
# why print the system is ready to accept connections when its not
|
||||
# postgres? wtf
|
||||
time.sleep(1)
|
||||
logging.info('creating skynet db...')
|
||||
|
||||
conn = psycopg2.connect(
|
||||
user='postgres',
|
||||
password=rpassword,
|
||||
host='localhost',
|
||||
port=port
|
||||
)
|
||||
logging.info('connected...')
|
||||
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute(
|
||||
f'CREATE USER skynet WITH PASSWORD \'{password}\'')
|
||||
cursor.execute(
|
||||
f'CREATE DATABASE skynet')
|
||||
cursor.execute(
|
||||
f'GRANT ALL PRIVILEGES ON DATABASE skynet TO skynet')
|
||||
conn = psycopg2.connect(
|
||||
user='postgres',
|
||||
password=rpassword,
|
||||
host='localhost',
|
||||
port=port
|
||||
)
|
||||
logging.info('connected...')
|
||||
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute(
|
||||
f'CREATE USER skynet WITH PASSWORD \'{password}\'')
|
||||
cursor.execute(
|
||||
f'CREATE DATABASE skynet')
|
||||
cursor.execute(
|
||||
f'GRANT ALL PRIVILEGES ON DATABASE skynet TO skynet')
|
||||
|
||||
conn.close()
|
||||
conn.close()
|
||||
|
||||
logging.info('done.')
|
||||
yield container, password, host
|
||||
logging.info('done.')
|
||||
yield container, password, host
|
||||
|
||||
container.stop()
|
||||
finally:
|
||||
if container and cleanup:
|
||||
container.stop()
|
||||
|
||||
@acm
|
||||
async def open_database_connection(
|
||||
db_user: str = 'skynet',
|
||||
db_pass: str = 'password',
|
||||
db_host: str = 'localhost:5432',
|
||||
db_name: str = 'skynet'
|
||||
):
|
||||
db = importlib.import_module('skynet.db.functions')
|
||||
pool = await asyncpg.create_pool(
|
||||
dsn=f'postgres://{db_user}:{db_pass}@{db_host}/{db_name}')
|
||||
|
||||
async with pool.acquire() as conn:
|
||||
res = await conn.execute(f'''
|
||||
select distinct table_schema
|
||||
from information_schema.tables
|
||||
where table_schema = \'{db_name}\'
|
||||
''')
|
||||
if '1' in res:
|
||||
logging.info('schema already in db, skipping init')
|
||||
else:
|
||||
await conn.execute(DB_INIT_SQL)
|
||||
|
||||
async def _db_call(method: str, *args, **kwargs):
|
||||
method = getattr(db, method)
|
||||
|
||||
async with pool.acquire() as conn:
|
||||
return await method(conn, *args, **kwargs)
|
||||
|
||||
yield _db_call
|
||||
|
||||
|
||||
async def get_user(conn, uid: str):
|
||||
|
|
|
@ -1,123 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import importlib
|
||||
|
||||
from contextlib import asynccontextmanager as acm
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
import asyncpg
|
||||
import asyncio
|
||||
import trio_asyncio
|
||||
|
||||
|
||||
_spawn_kwargs = {
|
||||
'infect_asyncio': True,
|
||||
}
|
||||
|
||||
|
||||
async def aio_db_proxy(
|
||||
to_trio: trio.MemorySendChannel,
|
||||
from_trio: asyncio.Queue,
|
||||
db_user: str = 'skynet',
|
||||
db_pass: str = 'password',
|
||||
db_host: str = 'localhost:5432',
|
||||
db_name: str = 'skynet'
|
||||
) -> None:
|
||||
db = importlib.import_module('skynet.db.functions')
|
||||
|
||||
pool = await asyncpg.create_pool(
|
||||
dsn=f'postgres://{db_user}:{db_pass}@{db_host}/{db_name}')
|
||||
|
||||
async with pool_conn.acquire() as conn:
|
||||
res = await conn.execute(f'''
|
||||
select distinct table_schema
|
||||
from information_schema.tables
|
||||
where table_schema = \'{db_name}\'
|
||||
''')
|
||||
if '1' in res:
|
||||
logging.info('schema already in db, skipping init')
|
||||
else:
|
||||
await conn.execute(DB_INIT_SQL)
|
||||
|
||||
# a first message must be sent **from** this ``asyncio``
|
||||
# task or the ``trio`` side will never unblock from
|
||||
# ``tractor.to_asyncio.open_channel_from():``
|
||||
to_trio.send_nowait('start')
|
||||
|
||||
# XXX: this uses an ``from_trio: asyncio.Queue`` currently but we
|
||||
# should probably offer something better.
|
||||
while True:
|
||||
msg = await from_trio.get()
|
||||
|
||||
method = getattr(db, msg.get('method'))
|
||||
args = getattr(db, msg.get('args', []))
|
||||
kwargs = getattr(db, msg.get('kwargs', {}))
|
||||
|
||||
async with pool_conn.acquire() as conn:
|
||||
result = await method(conn, *args, **kwargs)
|
||||
to_trio.send_nowait(result)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def trio_to_aio_db_proxy(
|
||||
ctx: tractor.Context,
|
||||
db_user: str = 'skynet',
|
||||
db_pass: str = 'password',
|
||||
db_host: str = 'localhost:5432',
|
||||
db_name: str = 'skynet'
|
||||
):
|
||||
# this will block until the ``asyncio`` task sends a "first"
|
||||
# message.
|
||||
async with tractor.to_asyncio.open_channel_from(
|
||||
aio_db_proxy,
|
||||
db_user=db_user,
|
||||
db_pass=db_pass,
|
||||
db_host=db_host,
|
||||
db_name=db_name
|
||||
) as (first, chan):
|
||||
|
||||
assert first == 'start'
|
||||
await ctx.started(first)
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
async for msg in stream:
|
||||
await chan.send(msg)
|
||||
|
||||
out = await chan.receive()
|
||||
# echo back to parent actor-task
|
||||
await stream.send(out)
|
||||
|
||||
|
||||
@acm
|
||||
async def open_database_connection(
|
||||
db_user: str = 'skynet',
|
||||
db_pass: str = 'password',
|
||||
db_host: str = 'localhost:5432',
|
||||
db_name: str = 'skynet'
|
||||
):
|
||||
async with tractor.open_nursery() as n:
|
||||
p = await n.start_actor(
|
||||
'aio_db_proxy',
|
||||
enable_modules=[__name__],
|
||||
infect_asyncio=True,
|
||||
)
|
||||
async with p.open_context(
|
||||
trio_to_aio_db_proxy,
|
||||
db_user=db_user,
|
||||
db_pass=db_pass,
|
||||
db_host=db_host,
|
||||
db_name=db_name
|
||||
) as (ctx, first):
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
async def _db_pc(method: str, *args, **kwargs):
|
||||
await stream.send({
|
||||
'method': method,
|
||||
'args': args,
|
||||
'kwargs': kwargs
|
||||
})
|
||||
return await stream.receive()
|
||||
|
||||
yield _db_pc
|
164
skynet/dgpu.py
164
skynet/dgpu.py
|
@ -3,16 +3,21 @@
|
|||
import gc
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
|
||||
from PIL import Image
|
||||
from typing import List, Optional
|
||||
from hashlib import sha256
|
||||
|
||||
import trio
|
||||
import asks
|
||||
import torch
|
||||
|
||||
from pynng import Context
|
||||
from leap.cleos import CLEOS, default_nodeos_image
|
||||
from leap.sugar import get_container
|
||||
|
||||
from diffusers import (
|
||||
StableDiffusionPipeline,
|
||||
StableDiffusionImg2ImgPipeline,
|
||||
|
@ -22,9 +27,8 @@ from realesrgan import RealESRGANer
|
|||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from diffusers.models import UNet2DConditionModel
|
||||
|
||||
from .ipfs import IPFSDocker, open_ipfs_node
|
||||
from .utils import *
|
||||
from .network import *
|
||||
from .protobuf import *
|
||||
from .constants import *
|
||||
|
||||
|
||||
|
@ -50,15 +54,20 @@ class DGPUComputeError(BaseException):
|
|||
|
||||
|
||||
async def open_dgpu_node(
|
||||
cert_name: str,
|
||||
unique_id: str,
|
||||
key_name: Optional[str],
|
||||
rpc_address: str = DEFAULT_RPC_ADDR,
|
||||
dgpu_address: str = DEFAULT_DGPU_ADDR,
|
||||
account: str,
|
||||
permission: str,
|
||||
cleos: CLEOS,
|
||||
key: str = None,
|
||||
initial_algos: Optional[List[str]] = None
|
||||
):
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.info(f'starting dgpu node!')
|
||||
logging.info(f'launching toolchain container!')
|
||||
|
||||
if key:
|
||||
cleos.setup_wallet(key)
|
||||
|
||||
logging.info(f'loading models...')
|
||||
|
||||
upscaler = init_upscaler()
|
||||
|
@ -77,7 +86,7 @@ async def open_dgpu_node(
|
|||
logging.info('memory summary:')
|
||||
logging.info('\n' + torch.cuda.memory_summary())
|
||||
|
||||
async def gpu_compute_one(method: str, params: dict, binext: Optional[bytes] = None):
|
||||
def gpu_compute_one(method: str, params: dict, binext: Optional[bytes] = None):
|
||||
match method:
|
||||
case 'diffuse':
|
||||
image = None
|
||||
|
@ -126,9 +135,7 @@ async def open_dgpu_node(
|
|||
**_params,
|
||||
guidance_scale=params['guidance'],
|
||||
num_inference_steps=int(params['step']),
|
||||
generator=torch.Generator("cuda").manual_seed(
|
||||
int(params['seed']) if params['seed'] else random.randint(0, 2 ** 64)
|
||||
)
|
||||
generator=torch.manual_seed(int(params['seed']))
|
||||
).images[0]
|
||||
|
||||
if params['upscaler'] == 'x4':
|
||||
|
@ -144,9 +151,12 @@ async def open_dgpu_node(
|
|||
img_byte_arr = io.BytesIO()
|
||||
image.save(img_byte_arr, format='PNG')
|
||||
raw_img = img_byte_arr.getvalue()
|
||||
img_sha = sha256(raw_img).hexdigest()
|
||||
logging.info(f'final img size {len(raw_img)} bytes.')
|
||||
|
||||
return raw_img
|
||||
logging.info(params)
|
||||
|
||||
return img_sha, raw_img
|
||||
|
||||
except BaseException as e:
|
||||
logging.error(e)
|
||||
|
@ -158,59 +168,99 @@ async def open_dgpu_node(
|
|||
case _:
|
||||
raise DGPUComputeError('Unsupported compute method')
|
||||
|
||||
async def rpc_handler(req: SkynetRPCRequest, ctx: Context):
|
||||
result = {}
|
||||
resp = SkynetRPCResponse()
|
||||
async def get_work_requests_last_hour():
|
||||
return await cleos.aget_table(
|
||||
'telos.gpu', 'telos.gpu', 'queue',
|
||||
index_position=2,
|
||||
key_type='i64',
|
||||
lower_bound=int(time.time()) - 3600
|
||||
)
|
||||
|
||||
match req.method:
|
||||
case 'dgpu_time':
|
||||
result = {'ok': time_ms()}
|
||||
async def get_status_by_request_id(request_id: int):
|
||||
return await cleos.aget_table(
|
||||
'telos.gpu', request_id, 'status')
|
||||
|
||||
case _:
|
||||
logging.debug(f'dgpu got one request: {req.method}')
|
||||
try:
|
||||
resp.bin = await gpu_compute_one(
|
||||
req.method, MessageToDict(req.params),
|
||||
binext=req.bin if req.bin else None
|
||||
)
|
||||
logging.debug(f'dgpu processed one request')
|
||||
def begin_work(request_id: int):
|
||||
ec, out = cleos.push_action(
|
||||
'telos.gpu',
|
||||
'workbegin',
|
||||
[account, request_id],
|
||||
f'{account}@{permission}'
|
||||
)
|
||||
assert ec == 0
|
||||
|
||||
except DGPUComputeError as e:
|
||||
result = {'error': str(e)}
|
||||
async def find_my_results():
|
||||
return await cleos.aget_table(
|
||||
'telos.gpu', 'telos.gpu', 'results',
|
||||
index_position=4,
|
||||
key_type='name',
|
||||
lower_bound=account,
|
||||
upper_bound=account
|
||||
)
|
||||
|
||||
resp.result.update(result)
|
||||
return resp
|
||||
ipfs_node = None
|
||||
def publish_on_ipfs(img_sha: str, raw_img: bytes):
|
||||
img = Image.open(io.BytesIO(raw_img))
|
||||
img.save(f'tmp/ipfs-docker-staging/image.png')
|
||||
|
||||
rpc_server = SessionServer(
|
||||
dgpu_address,
|
||||
rpc_handler,
|
||||
cert_name=cert_name,
|
||||
key_name=key_name
|
||||
)
|
||||
skynet_rpc = SessionClient(
|
||||
rpc_address,
|
||||
unique_id,
|
||||
cert_name=cert_name,
|
||||
key_name=key_name
|
||||
)
|
||||
skynet_rpc.connect()
|
||||
ipfs_hash = ipfs_node.add('image.png')
|
||||
|
||||
ipfs_node.pin(ipfs_hash)
|
||||
|
||||
async with rpc_server.open() as rpc_server:
|
||||
res = await skynet_rpc.rpc(
|
||||
'dgpu_online', {
|
||||
'dgpu_addr': rpc_server.addr,
|
||||
'cert': cert_name
|
||||
})
|
||||
return ipfs_hash
|
||||
|
||||
assert 'ok' in res.result
|
||||
def submit_work(request_id: int, result_hash: str, ipfs_hash: str):
|
||||
ec, out = cleos.push_action(
|
||||
'telos.gpu',
|
||||
'submit',
|
||||
[account, request_id, result_hash, ipfs_hash],
|
||||
f'{account}@{permission}'
|
||||
)
|
||||
assert ec == 0
|
||||
|
||||
with open_ipfs_node() as ipfs_node:
|
||||
try:
|
||||
await trio.sleep_forever()
|
||||
while True:
|
||||
queue = await get_work_requests_last_hour()
|
||||
|
||||
for req in queue:
|
||||
rid = req['id']
|
||||
|
||||
my_results = [res['id'] for res in (await find_my_results())]
|
||||
if rid in my_results:
|
||||
continue
|
||||
|
||||
statuses = await get_status_by_request_id(rid)
|
||||
|
||||
if len(statuses) < 3:
|
||||
|
||||
# parse request
|
||||
body = json.loads(req['body'])
|
||||
binary = bytes.fromhex(req['binary_data'])
|
||||
|
||||
# TODO: validate request
|
||||
|
||||
# perform work
|
||||
logging.info(f'working on {body}')
|
||||
|
||||
begin_work(rid)
|
||||
img_sha, raw_img = gpu_compute_one(
|
||||
body['method'], body['params'], binext=binary)
|
||||
|
||||
ipfs_hash = publish_on_ipfs(img_sha, raw_img)
|
||||
|
||||
submit_work(rid, img_sha, ipfs_hash)
|
||||
|
||||
break
|
||||
|
||||
else:
|
||||
logging.info(f'request {rid} already beign worked on, skip...')
|
||||
continue
|
||||
|
||||
await trio.sleep(1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logging.info('interrupt caught, stopping...')
|
||||
...
|
||||
|
||||
|
||||
|
||||
finally:
|
||||
res = await skynet_rpc.rpc('dgpu_offline')
|
||||
assert 'ok' in res.result
|
||||
|
|
|
@ -6,23 +6,8 @@ from typing import Union, Optional
|
|||
from pathlib import Path
|
||||
from contextlib import contextmanager as cm
|
||||
|
||||
import pynng
|
||||
|
||||
from pynng import TLSConfig
|
||||
from OpenSSL.crypto import (
|
||||
load_privatekey,
|
||||
load_certificate,
|
||||
FILETYPE_PEM
|
||||
)
|
||||
|
||||
from google.protobuf.struct_pb2 import Struct
|
||||
|
||||
from ..network import SessionClient
|
||||
from ..constants import *
|
||||
|
||||
from ..protobuf.auth import *
|
||||
from ..protobuf.skynet_pb2 import SkynetRPCRequest, SkynetRPCResponse
|
||||
|
||||
|
||||
class ConfigRequestFormatError(BaseException):
|
||||
...
|
||||
|
@ -40,24 +25,6 @@ class ConfigSizeDivisionByEight(BaseException):
|
|||
...
|
||||
|
||||
|
||||
@cm
|
||||
def open_skynet_rpc(
|
||||
unique_id: str,
|
||||
rpc_address: str = DEFAULT_RPC_ADDR,
|
||||
cert_name: Optional[str] = None,
|
||||
key_name: Optional[str] = None
|
||||
):
|
||||
sesh = SessionClient(
|
||||
rpc_address,
|
||||
unique_id,
|
||||
cert_name=cert_name,
|
||||
key_name=key_name
|
||||
)
|
||||
logging.debug(f'opening skynet rpc...')
|
||||
sesh.connect()
|
||||
yield sesh
|
||||
sesh.disconnect()
|
||||
|
||||
def validate_user_config_request(req: str):
|
||||
params = req.split(' ')
|
||||
|
||||
|
|
|
@ -3,18 +3,22 @@
|
|||
import io
|
||||
import zlib
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from PIL import Image
|
||||
from trio_asyncio import aio_as_trio
|
||||
import docker
|
||||
|
||||
from PIL import Image
|
||||
from leap.cleos import CLEOS, default_nodeos_image
|
||||
from leap.sugar import get_container, collect_stdout
|
||||
from trio_asyncio import aio_as_trio
|
||||
from telebot.types import (
|
||||
InputFile, InputMediaPhoto, InlineKeyboardButton, InlineKeyboardMarkup
|
||||
)
|
||||
from telebot.async_telebot import AsyncTeleBot
|
||||
|
||||
from ..db import open_database_connection
|
||||
from ..db import open_new_database, open_database_connection
|
||||
from ..constants import *
|
||||
|
||||
from . import *
|
||||
|
@ -55,283 +59,340 @@ def prepare_metainfo_caption(tguser, meta: dict) -> str:
|
|||
|
||||
|
||||
async def run_skynet_telegram(
|
||||
name: str,
|
||||
tg_token: str,
|
||||
key_name: str = 'telegram-frontend.key',
|
||||
cert_name: str = 'whitelist/telegram-frontend.cert',
|
||||
rpc_address: str = DEFAULT_RPC_ADDR,
|
||||
db_host: str = 'localhost:5432',
|
||||
db_user: str = 'skynet',
|
||||
db_pass: str = 'password'
|
||||
account: str,
|
||||
permission: str,
|
||||
node_url: str,
|
||||
db_host: str,
|
||||
db_user: str,
|
||||
db_pass: str,
|
||||
key: str = None
|
||||
):
|
||||
dclient = docker.from_env()
|
||||
vtestnet = get_container(
|
||||
dclient,
|
||||
default_nodeos_image(),
|
||||
force_unique=True,
|
||||
detach=True,
|
||||
network='host',
|
||||
remove=True)
|
||||
|
||||
cleos = CLEOS(dclient, vtestnet, url=node_url, remote=node_url)
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
if key:
|
||||
cleos.setup_wallet(key)
|
||||
|
||||
bot = AsyncTeleBot(tg_token)
|
||||
logging.info(f'tg_token: {tg_token}')
|
||||
|
||||
async with open_database_connection(
|
||||
db_user, db_pass, db_host
|
||||
) as db_call:
|
||||
with open_skynet_rpc(
|
||||
f'skynet-telegram-{name}',
|
||||
rpc_address=rpc_address,
|
||||
cert_name=cert_name,
|
||||
key_name=key_name
|
||||
) as session:
|
||||
|
||||
@bot.message_handler(commands=['help'])
|
||||
async def send_help(message):
|
||||
splt_msg = message.text.split(' ')
|
||||
@bot.message_handler(commands=['help'])
|
||||
async def send_help(message):
|
||||
splt_msg = message.text.split(' ')
|
||||
|
||||
if len(splt_msg) == 1:
|
||||
await bot.reply_to(message, HELP_TEXT)
|
||||
if len(splt_msg) == 1:
|
||||
await bot.reply_to(message, HELP_TEXT)
|
||||
|
||||
else:
|
||||
param = splt_msg[1]
|
||||
if param in HELP_TOPICS:
|
||||
await bot.reply_to(message, HELP_TOPICS[param])
|
||||
|
||||
else:
|
||||
param = splt_msg[1]
|
||||
if param in HELP_TOPICS:
|
||||
await bot.reply_to(message, HELP_TOPICS[param])
|
||||
await bot.reply_to(message, HELP_UNKWNOWN_PARAM)
|
||||
|
||||
else:
|
||||
await bot.reply_to(message, HELP_UNKWNOWN_PARAM)
|
||||
@bot.message_handler(commands=['cool'])
|
||||
async def send_cool_words(message):
|
||||
await bot.reply_to(message, '\n'.join(COOL_WORDS))
|
||||
|
||||
@bot.message_handler(commands=['cool'])
|
||||
async def send_cool_words(message):
|
||||
await bot.reply_to(message, '\n'.join(COOL_WORDS))
|
||||
@bot.message_handler(commands=['txt2img'])
|
||||
async def send_txt2img(message):
|
||||
chat = message.chat
|
||||
reply_id = None
|
||||
if chat.type == 'group' and chat.id == GROUP_ID:
|
||||
reply_id = message.message_id
|
||||
|
||||
@bot.message_handler(commands=['txt2img'])
|
||||
async def send_txt2img(message):
|
||||
chat = message.chat
|
||||
reply_id = None
|
||||
if chat.type == 'group' and chat.id == GROUP_ID:
|
||||
reply_id = message.message_id
|
||||
user_id = f'tg+{message.from_user.id}'
|
||||
|
||||
user_id = f'tg+{message.from_user.id}'
|
||||
prompt = ' '.join(message.text.split(' ')[1:])
|
||||
|
||||
prompt = ' '.join(message.text.split(' ')[1:])
|
||||
if len(prompt) == 0:
|
||||
await bot.reply_to(message, 'Empty text prompt ignored.')
|
||||
return
|
||||
|
||||
if len(prompt) == 0:
|
||||
await bot.reply_to(message, 'Empty text prompt ignored.')
|
||||
return
|
||||
logging.info(f'mid: {message.id}')
|
||||
user = await db_call('get_or_create_user', user_id)
|
||||
user_config = {**(await db_call('get_user_config', user))}
|
||||
del user_config['id']
|
||||
|
||||
logging.info(f'mid: {message.id}')
|
||||
user = await db_call('get_or_create_user', user_id)
|
||||
user_config = {**(await db_call('get_user_config', user))}
|
||||
del user_config['id']
|
||||
req = json.dumps({
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': prompt,
|
||||
**user_config
|
||||
}
|
||||
})
|
||||
|
||||
resp = await session.rpc(
|
||||
'dgpu_call', {
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': prompt,
|
||||
**user_config
|
||||
}
|
||||
},
|
||||
timeout=60
|
||||
ec, out = cleos.push_action(
|
||||
'telos.gpu', 'enqueue', [account, req, ''], f'{account}@{permission}'
|
||||
)
|
||||
out = collect_stdout(out)
|
||||
if ec != 0:
|
||||
await bot.reply_to(message, out)
|
||||
return
|
||||
|
||||
request_id = int(out)
|
||||
logging.info(f'{request_id} enqueued.')
|
||||
|
||||
ipfs_hash = None
|
||||
sha_hash = None
|
||||
for i in range(60):
|
||||
results = cleos.get_table(
|
||||
'telos.gpu', 'telos.gpu', 'results',
|
||||
index_position=2,
|
||||
key_type='i64',
|
||||
lower_bound=request_id,
|
||||
upper_bound=request_id
|
||||
)
|
||||
logging.info(f'resp to {message.id} arrived')
|
||||
|
||||
resp_txt = ''
|
||||
result = MessageToDict(resp.result)
|
||||
if 'error' in resp.result:
|
||||
resp_txt = resp.result['message']
|
||||
await bot.reply_to(message, resp_txt)
|
||||
|
||||
if len(results) > 0:
|
||||
ipfs_hash = results[0]['ipfs_hash']
|
||||
sha_hash = results[0]['result_hash']
|
||||
break
|
||||
else:
|
||||
logging.info(result['id'])
|
||||
img_raw = resp.bin
|
||||
logging.info(f'got image of size: {len(img_raw)}')
|
||||
img = Image.open(io.BytesIO(img_raw))
|
||||
await asyncio.sleep(1)
|
||||
|
||||
await bot.send_photo(
|
||||
GROUP_ID,
|
||||
caption=prepare_metainfo_caption(message.from_user, result['meta']['meta']),
|
||||
photo=img,
|
||||
reply_to_message_id=reply_id,
|
||||
reply_markup=build_redo_menu()
|
||||
)
|
||||
return
|
||||
if not ipfs_hash:
|
||||
await bot.reply_to(message, 'timeout processing request')
|
||||
return
|
||||
|
||||
ipfs_link = f'https://ipfs.io/ipfs/{ipfs_hash}/image.png'
|
||||
|
||||
@bot.message_handler(func=lambda message: True, content_types=['photo'])
|
||||
async def send_img2img(message):
|
||||
chat = message.chat
|
||||
reply_id = None
|
||||
if chat.type == 'group' and chat.id == GROUP_ID:
|
||||
reply_id = message.message_id
|
||||
await bot.reply_to(
|
||||
message,
|
||||
ipfs_link,
|
||||
reply_markup=build_redo_menu()
|
||||
)
|
||||
|
||||
user_id = f'tg+{message.from_user.id}'
|
||||
@bot.message_handler(func=lambda message: True, content_types=['photo'])
|
||||
async def send_img2img(message):
|
||||
chat = message.chat
|
||||
reply_id = None
|
||||
if chat.type == 'group' and chat.id == GROUP_ID:
|
||||
reply_id = message.message_id
|
||||
|
||||
if not message.caption.startswith('/img2img'):
|
||||
await bot.reply_to(
|
||||
message,
|
||||
'For image to image you need to add /img2img to the beggining of your caption'
|
||||
)
|
||||
return
|
||||
user_id = f'tg+{message.from_user.id}'
|
||||
|
||||
prompt = ' '.join(message.caption.split(' ')[1:])
|
||||
|
||||
if len(prompt) == 0:
|
||||
await bot.reply_to(message, 'Empty text prompt ignored.')
|
||||
return
|
||||
|
||||
file_id = message.photo[-1].file_id
|
||||
file_path = (await bot.get_file(file_id)).file_path
|
||||
file_raw = await bot.download_file(file_path)
|
||||
|
||||
logging.info(f'mid: {message.id}')
|
||||
|
||||
user = await db_call('get_or_create_user', user_id)
|
||||
user_config = {**(await db_call('get_user_config', user))}
|
||||
del user_config['id']
|
||||
|
||||
resp = await session.rpc(
|
||||
'dgpu_call', {
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': prompt,
|
||||
**user_config
|
||||
}
|
||||
},
|
||||
binext=file_raw,
|
||||
timeout=60
|
||||
)
|
||||
logging.info(f'resp to {message.id} arrived')
|
||||
|
||||
resp_txt = ''
|
||||
result = MessageToDict(resp.result)
|
||||
if 'error' in resp.result:
|
||||
resp_txt = resp.result['message']
|
||||
await bot.reply_to(message, resp_txt)
|
||||
|
||||
else:
|
||||
logging.info(result['id'])
|
||||
img_raw = resp.bin
|
||||
logging.info(f'got image of size: {len(img_raw)}')
|
||||
img = Image.open(io.BytesIO(img_raw))
|
||||
|
||||
await bot.send_media_group(
|
||||
GROUP_ID,
|
||||
media=[
|
||||
InputMediaPhoto(file_id),
|
||||
InputMediaPhoto(
|
||||
img,
|
||||
caption=prepare_metainfo_caption(message.from_user, result['meta']['meta'])
|
||||
)
|
||||
],
|
||||
reply_to_message_id=reply_id
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
@bot.message_handler(commands=['img2img'])
|
||||
async def img2img_missing_image(message):
|
||||
if not message.caption.startswith('/img2img'):
|
||||
await bot.reply_to(
|
||||
message,
|
||||
'seems you tried to do an img2img command without sending image'
|
||||
'For image to image you need to add /img2img to the beggining of your caption'
|
||||
)
|
||||
return
|
||||
|
||||
@bot.message_handler(commands=['redo'])
|
||||
async def redo(message):
|
||||
chat = message.chat
|
||||
reply_id = None
|
||||
if chat.type == 'group' and chat.id == GROUP_ID:
|
||||
reply_id = message.message_id
|
||||
prompt = ' '.join(message.caption.split(' ')[1:])
|
||||
|
||||
user_config = {**(await db_call('get_user_config', user))}
|
||||
del user_config['id']
|
||||
prompt = await db_call('get_last_prompt_of', user)
|
||||
if len(prompt) == 0:
|
||||
await bot.reply_to(message, 'Empty text prompt ignored.')
|
||||
return
|
||||
|
||||
resp = await session.rpc(
|
||||
'dgpu_call', {
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': prompt,
|
||||
**user_config
|
||||
}
|
||||
},
|
||||
timeout=60
|
||||
file_id = message.photo[-1].file_id
|
||||
file_path = (await bot.get_file(file_id)).file_path
|
||||
file_raw = await bot.download_file(file_path)
|
||||
|
||||
logging.info(f'mid: {message.id}')
|
||||
|
||||
user = await db_call('get_or_create_user', user_id)
|
||||
user_config = {**(await db_call('get_user_config', user))}
|
||||
del user_config['id']
|
||||
|
||||
req = json.dumps({
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': prompt,
|
||||
**user_config
|
||||
}
|
||||
})
|
||||
|
||||
ec, out = cleos.push_action(
|
||||
'telos.gpu', 'enqueue', [account, req, file_raw.hex()], f'{account}@{permission}'
|
||||
)
|
||||
if ec != 0:
|
||||
await bot.reply_to(message, out)
|
||||
return
|
||||
|
||||
request_id = int(out)
|
||||
logging.info(f'{request_id} enqueued.')
|
||||
|
||||
ipfs_hash = None
|
||||
sha_hash = None
|
||||
for i in range(60):
|
||||
result = cleos.get_table(
|
||||
'telos.gpu', 'telos.gpu', 'results',
|
||||
index_position=2,
|
||||
key_type='i64',
|
||||
lower_bound=request_id,
|
||||
upper_bound=request_id
|
||||
)
|
||||
logging.info(f'resp to {message.id} arrived')
|
||||
|
||||
resp_txt = ''
|
||||
result = MessageToDict(resp.result)
|
||||
if 'error' in resp.result:
|
||||
resp_txt = resp.result['message']
|
||||
await bot.reply_to(message, resp_txt)
|
||||
|
||||
if len(results) > 0:
|
||||
ipfs_hash = result[0]['ipfs_hash']
|
||||
sha_hash = result[0]['result_hash']
|
||||
break
|
||||
else:
|
||||
logging.info(result['id'])
|
||||
img_raw = resp.bin
|
||||
logging.info(f'got image of size: {len(img_raw)}')
|
||||
img = Image.open(io.BytesIO(img_raw))
|
||||
await asyncio.sleep(1)
|
||||
|
||||
await bot.send_photo(
|
||||
GROUP_ID,
|
||||
caption=prepare_metainfo_caption(message.from_user, result['meta']['meta']),
|
||||
photo=img,
|
||||
reply_to_message_id=reply_id
|
||||
)
|
||||
return
|
||||
if not ipfs_hash:
|
||||
await bot.reply_to(message, 'timeout processing request')
|
||||
|
||||
@bot.message_handler(commands=['config'])
|
||||
async def set_config(message):
|
||||
rpc_params = {}
|
||||
try:
|
||||
attr, val, reply_txt = validate_user_config_request(
|
||||
message.text)
|
||||
ipfs_link = f'https://ipfs.io/ipfs/{ipfs_hash}/image.png'
|
||||
|
||||
logging.info(f'user config update: {attr} to {val}')
|
||||
await db_call('update_user_config',
|
||||
user, req.params['attr'], req.params['val'])
|
||||
logging.info('done')
|
||||
|
||||
except BaseException as e:
|
||||
reply_txt = str(e)
|
||||
|
||||
finally:
|
||||
await bot.reply_to(message, reply_txt)
|
||||
|
||||
@bot.message_handler(commands=['stats'])
|
||||
async def user_stats(message):
|
||||
|
||||
generated, joined, role = await db_call('get_user_stats', user)
|
||||
|
||||
stats_str = f'generated: {generated}\n'
|
||||
stats_str += f'joined: {joined}\n'
|
||||
stats_str += f'role: {role}\n'
|
||||
|
||||
await bot.reply_to(
|
||||
message, stats_str)
|
||||
|
||||
@bot.message_handler(commands=['donate'])
|
||||
async def donation_info(message):
|
||||
await bot.reply_to(
|
||||
message, DONATION_INFO)
|
||||
|
||||
@bot.message_handler(commands=['say'])
|
||||
async def say(message):
|
||||
chat = message.chat
|
||||
user = message.from_user
|
||||
|
||||
if (chat.type == 'group') or (user.id != 383385940):
|
||||
return
|
||||
|
||||
await bot.send_message(GROUP_ID, message.text[4:])
|
||||
await bot.reply_to(
|
||||
message,
|
||||
ipfs_link + '\n' +
|
||||
prepare_metainfo_caption(message.from_user, result['meta']['meta']),
|
||||
reply_to_message_id=reply_id,
|
||||
reply_markup=build_redo_menu()
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
@bot.message_handler(func=lambda message: True)
|
||||
async def echo_message(message):
|
||||
if message.text[0] == '/':
|
||||
await bot.reply_to(message, UNKNOWN_CMD_TEXT)
|
||||
@bot.message_handler(commands=['img2img'])
|
||||
async def img2img_missing_image(message):
|
||||
await bot.reply_to(
|
||||
message,
|
||||
'seems you tried to do an img2img command without sending image'
|
||||
)
|
||||
|
||||
@bot.callback_query_handler(func=lambda call: True)
|
||||
async def callback_query(call):
|
||||
msg = json.loads(call.data)
|
||||
logging.info(call.data)
|
||||
method = msg.get('method')
|
||||
match method:
|
||||
case 'redo':
|
||||
await _redo(call)
|
||||
@bot.message_handler(commands=['redo'])
|
||||
async def redo(message):
|
||||
chat = message.chat
|
||||
reply_id = None
|
||||
if chat.type == 'group' and chat.id == GROUP_ID:
|
||||
reply_id = message.message_id
|
||||
|
||||
user_config = {**(await db_call('get_user_config', user))}
|
||||
del user_config['id']
|
||||
prompt = await db_call('get_last_prompt_of', user)
|
||||
|
||||
req = json.dumps({
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': prompt,
|
||||
**user_config
|
||||
}
|
||||
})
|
||||
|
||||
ec, out = cleos.push_action(
|
||||
'telos.gpu', 'enqueue', [account, req, ''], f'{account}@{permission}'
|
||||
)
|
||||
if ec != 0:
|
||||
await bot.reply_to(message, out)
|
||||
return
|
||||
|
||||
request_id = int(out)
|
||||
logging.info(f'{request_id} enqueued.')
|
||||
|
||||
ipfs_hash = None
|
||||
sha_hash = None
|
||||
for i in range(60):
|
||||
result = cleos.get_table(
|
||||
'telos.gpu', 'telos.gpu', 'results',
|
||||
index_position=2,
|
||||
key_type='i64',
|
||||
lower_bound=request_id,
|
||||
upper_bound=request_id
|
||||
)
|
||||
if len(results) > 0:
|
||||
ipfs_hash = result[0]['ipfs_hash']
|
||||
sha_hash = result[0]['result_hash']
|
||||
break
|
||||
else:
|
||||
await asyncio.sleep(1)
|
||||
|
||||
if not ipfs_hash:
|
||||
await bot.reply_to(message, 'timeout processing request')
|
||||
|
||||
ipfs_link = f'https://ipfs.io/ipfs/{ipfs_hash}/image.png'
|
||||
|
||||
await bot.reply_to(
|
||||
message,
|
||||
ipfs_link + '\n' +
|
||||
prepare_metainfo_caption(message.from_user, result['meta']['meta']),
|
||||
reply_to_message_id=reply_id,
|
||||
reply_markup=build_redo_menu()
|
||||
)
|
||||
return
|
||||
|
||||
@bot.message_handler(commands=['config'])
|
||||
async def set_config(message):
|
||||
rpc_params = {}
|
||||
try:
|
||||
attr, val, reply_txt = validate_user_config_request(
|
||||
message.text)
|
||||
|
||||
logging.info(f'user config update: {attr} to {val}')
|
||||
await db_call('update_user_config',
|
||||
user, req.params['attr'], req.params['val'])
|
||||
logging.info('done')
|
||||
|
||||
except BaseException as e:
|
||||
reply_txt = str(e)
|
||||
|
||||
finally:
|
||||
await bot.reply_to(message, reply_txt)
|
||||
|
||||
@bot.message_handler(commands=['stats'])
|
||||
async def user_stats(message):
|
||||
user = message.from_user.id
|
||||
|
||||
generated, joined, role = await db_call('get_user_stats', user)
|
||||
|
||||
stats_str = f'generated: {generated}\n'
|
||||
stats_str += f'joined: {joined}\n'
|
||||
stats_str += f'role: {role}\n'
|
||||
|
||||
await bot.reply_to(
|
||||
message, stats_str)
|
||||
|
||||
@bot.message_handler(commands=['donate'])
|
||||
async def donation_info(message):
|
||||
await bot.reply_to(
|
||||
message, DONATION_INFO)
|
||||
|
||||
@bot.message_handler(commands=['say'])
|
||||
async def say(message):
|
||||
chat = message.chat
|
||||
user = message.from_user
|
||||
|
||||
if (chat.type == 'group') or (user.id != 383385940):
|
||||
return
|
||||
|
||||
await bot.send_message(GROUP_ID, message.text[4:])
|
||||
|
||||
|
||||
await aio_as_trio(bot.infinity_polling)()
|
||||
@bot.message_handler(func=lambda message: True)
|
||||
async def echo_message(message):
|
||||
if message.text[0] == '/':
|
||||
await bot.reply_to(message, UNKNOWN_CMD_TEXT)
|
||||
|
||||
@bot.callback_query_handler(func=lambda call: True)
|
||||
async def callback_query(call):
|
||||
msg = json.loads(call.data)
|
||||
logging.info(call.data)
|
||||
method = msg.get('method')
|
||||
match method:
|
||||
case 'redo':
|
||||
await _redo(call)
|
||||
|
||||
try:
|
||||
await bot.infinity_polling()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
...
|
||||
|
||||
finally:
|
||||
vtestnet.stop()
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import logging
|
||||
|
||||
from pathlib import Path
|
||||
from contextlib import contextmanager as cm
|
||||
|
||||
import docker
|
||||
|
||||
from docker.models.containers import Container
|
||||
|
||||
|
||||
class IPFSDocker:
|
||||
|
||||
def __init__(self, container: Container):
|
||||
self._container = container
|
||||
|
||||
def add(self, file: str) -> str:
|
||||
ec, out = self._container.exec_run(
|
||||
['ipfs', 'add', '-w', f'/export/{file}', '-Q'])
|
||||
assert ec == 0
|
||||
|
||||
return out.decode().rstrip()
|
||||
|
||||
def pin(self, ipfs_hash: str):
|
||||
ec, out = self._container.exec_run(
|
||||
['ipfs', 'pin', 'add', ipfs_hash])
|
||||
assert ec == 0
|
||||
|
||||
@cm
|
||||
def open_ipfs_node():
|
||||
dclient = docker.from_env()
|
||||
|
||||
container = dclient.containers.run(
|
||||
'ipfs/go-ipfs:latest',
|
||||
name='skynet-ipfs',
|
||||
ports={
|
||||
'8080/tcp': 8080,
|
||||
'4001/tcp': 4001,
|
||||
'5001/tcp': ('127.0.0.1', 5001)
|
||||
},
|
||||
volumes=[
|
||||
str(Path().resolve() / 'tmp/ipfs-docker-staging') + ':/export',
|
||||
str(Path().resolve() / 'tmp/ipfs-docker-data') + ':/data/ipfs'
|
||||
],
|
||||
detach=True,
|
||||
remove=True
|
||||
)
|
||||
try:
|
||||
|
||||
for log in container.logs(stream=True):
|
||||
log = log.decode().rstrip()
|
||||
logging.info(log)
|
||||
if 'Daemon is ready' in log:
|
||||
break
|
||||
|
||||
yield IPFSDocker(container)
|
||||
|
||||
finally:
|
||||
if container:
|
||||
container.stop()
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
|
||||
|
||||
class ModelStore:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_models: int = 2
|
||||
):
|
||||
self.max_models = max_models
|
||||
|
||||
self._models = {}
|
||||
|
||||
def get(self, model_name: str):
|
||||
if model_name in self._models:
|
||||
return self._models[model_name]['pipe']
|
||||
|
||||
if len(self._models) == max_models:
|
||||
least_used = list(self._models.keys())[0]
|
||||
for model in self._models:
|
||||
if self._models[least_used]['generated'] > self._models[model]['generated']:
|
||||
least_used = model
|
||||
|
||||
del self._models[least_used]
|
||||
gc.collect()
|
||||
|
||||
pipe = pipeline_for(model_name)
|
||||
|
||||
self._models[model_name] = {
|
||||
'pipe': pipe,
|
||||
'generated': 0
|
||||
}
|
||||
|
||||
return pipe
|
|
@ -1,341 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import zlib
|
||||
import socket
|
||||
|
||||
from typing import Callable, Awaitable, Optional
|
||||
from pathlib import Path
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
|
||||
import trio
|
||||
import pynng
|
||||
|
||||
from pynng import TLSConfig, Context
|
||||
|
||||
from .protobuf import *
|
||||
from .constants import *
|
||||
|
||||
|
||||
def get_random_port():
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
s.bind(('', 0))
|
||||
return s.getsockname()[1]
|
||||
|
||||
|
||||
def load_certs(
|
||||
certs_dir: str,
|
||||
cert_name: str,
|
||||
key_name: str
|
||||
):
|
||||
certs_dir = Path(certs_dir).resolve()
|
||||
tls_key_data = (certs_dir / key_name).read_bytes()
|
||||
tls_key = serialization.load_pem_private_key(
|
||||
tls_key_data,
|
||||
password=None
|
||||
)
|
||||
|
||||
tls_cert_data = (certs_dir / cert_name).read_bytes()
|
||||
tls_cert = x509.load_pem_x509_certificate(
|
||||
tls_cert_data
|
||||
)
|
||||
|
||||
tls_whitelist = {}
|
||||
for cert_path in (*(certs_dir / 'whitelist').glob('*.cert'), certs_dir / 'brain.cert'):
|
||||
tls_whitelist[cert_path.stem] = x509.load_pem_x509_certificate(
|
||||
cert_path.read_bytes()
|
||||
)
|
||||
|
||||
return (
|
||||
SessionTLSConfig(
|
||||
TLSConfig.MODE_SERVER,
|
||||
own_key_string=tls_key_data,
|
||||
own_cert_string=tls_cert_data
|
||||
),
|
||||
|
||||
tls_whitelist
|
||||
)
|
||||
|
||||
|
||||
def load_certs_client(
|
||||
certs_dir: str,
|
||||
cert_name: str,
|
||||
key_name: str,
|
||||
ca_name: Optional[str] = None
|
||||
):
|
||||
certs_dir = Path(certs_dir).resolve()
|
||||
if not ca_name:
|
||||
ca_name = 'brain.cert'
|
||||
|
||||
ca_cert_data = (certs_dir / ca_name).read_bytes()
|
||||
|
||||
tls_key_data = (certs_dir / key_name).read_bytes()
|
||||
|
||||
|
||||
tls_cert_data = (certs_dir / cert_name).read_bytes()
|
||||
|
||||
|
||||
tls_whitelist = {}
|
||||
for cert_path in (*(certs_dir / 'whitelist').glob('*.cert'), certs_dir / 'brain.cert'):
|
||||
tls_whitelist[cert_path.stem] = x509.load_pem_x509_certificate(
|
||||
cert_path.read_bytes()
|
||||
)
|
||||
|
||||
return (
|
||||
SessionTLSConfig(
|
||||
TLSConfig.MODE_CLIENT,
|
||||
own_key_string=tls_key_data,
|
||||
own_cert_string=tls_cert_data,
|
||||
ca_string=ca_cert_data
|
||||
),
|
||||
|
||||
tls_whitelist
|
||||
)
|
||||
|
||||
|
||||
class SessionError(BaseException):
|
||||
...
|
||||
|
||||
|
||||
class SessionTLSConfig(TLSConfig):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mode,
|
||||
server_name=None,
|
||||
ca_string=None,
|
||||
own_key_string=None,
|
||||
own_cert_string=None,
|
||||
auth_mode=None,
|
||||
ca_files=None,
|
||||
cert_key_file=None,
|
||||
passwd=None
|
||||
):
|
||||
super().__init__(
|
||||
mode,
|
||||
server_name=server_name,
|
||||
ca_string=ca_string,
|
||||
own_key_string=own_key_string,
|
||||
own_cert_string=own_cert_string,
|
||||
auth_mode=auth_mode,
|
||||
ca_files=ca_files,
|
||||
cert_key_file=cert_key_file,
|
||||
passwd=passwd
|
||||
)
|
||||
|
||||
if ca_string:
|
||||
self.ca_cert = x509.load_pem_x509_certificate(ca_string)
|
||||
|
||||
self.cert = x509.load_pem_x509_certificate(own_cert_string)
|
||||
self.key = serialization.load_pem_private_key(
|
||||
own_key_string,
|
||||
password=passwd
|
||||
)
|
||||
|
||||
|
||||
class SessionServer:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
addr: str,
|
||||
msg_handler: Callable[
|
||||
[SkynetRPCRequest, Context], Awaitable[SkynetRPCResponse]
|
||||
],
|
||||
cert_name: Optional[str] = None,
|
||||
key_name: Optional[str] = None,
|
||||
cert_dir: str = DEFAULT_CERTS_DIR,
|
||||
recv_max_size = 0
|
||||
):
|
||||
self.addr = addr
|
||||
self.msg_handler = msg_handler
|
||||
|
||||
self.cert_name = cert_name
|
||||
self.tls_config = None
|
||||
self.tls_whitelist = None
|
||||
if cert_name and key_name:
|
||||
self.cert_name = cert_name
|
||||
self.tls_config, self.tls_whitelist = load_certs(
|
||||
cert_dir, cert_name, key_name)
|
||||
|
||||
self.addr = 'tls+' + self.addr
|
||||
|
||||
self.recv_max_size = recv_max_size
|
||||
|
||||
async def _handle_msg(self, req: SkynetRPCRequest, ctx: Context):
|
||||
resp = await self.msg_handler(req, ctx)
|
||||
|
||||
if self.tls_config:
|
||||
resp.auth.cert = 'skynet'
|
||||
resp.auth.sig = sign_protobuf_msg(
|
||||
resp, self.tls_config.key)
|
||||
|
||||
raw_msg = zlib.compress(resp.SerializeToString())
|
||||
|
||||
await ctx.asend(raw_msg)
|
||||
|
||||
ctx.close()
|
||||
|
||||
async def _listener (self, sock):
|
||||
async with trio.open_nursery() as n:
|
||||
while True:
|
||||
ctx = sock.new_context()
|
||||
|
||||
raw_msg = await ctx.arecv()
|
||||
raw_size = len(raw_msg)
|
||||
logging.debug(f'rpc server new msg {raw_size} bytes')
|
||||
|
||||
try:
|
||||
msg = zlib.decompress(raw_msg)
|
||||
msg_size = len(msg)
|
||||
|
||||
except zlib.error:
|
||||
logging.warning(f'Zlib decompress error, dropping msg of size {len(raw_msg)}')
|
||||
continue
|
||||
|
||||
logging.debug(f'msg after decompress {msg_size} bytes, +{msg_size - raw_size} bytes')
|
||||
|
||||
req = SkynetRPCRequest()
|
||||
try:
|
||||
req.ParseFromString(msg)
|
||||
|
||||
except google.protobuf.message.DecodeError:
|
||||
logging.warning(f'Dropping malfomed msg of size {len(msg)}')
|
||||
continue
|
||||
|
||||
logging.debug(f'msg method: {req.method}')
|
||||
|
||||
if self.tls_config:
|
||||
if req.auth.cert not in self.tls_whitelist:
|
||||
logging.warning(
|
||||
f'{req.auth.cert} not in tls whitelist')
|
||||
continue
|
||||
|
||||
try:
|
||||
verify_protobuf_msg(req, self.tls_whitelist[req.auth.cert])
|
||||
|
||||
except ValueError:
|
||||
logging.warning(
|
||||
f'{req.cert} sent an unauthenticated msg')
|
||||
continue
|
||||
|
||||
n.start_soon(self._handle_msg, req, ctx)
|
||||
|
||||
@acm
|
||||
async def open(self):
|
||||
with pynng.Rep0(
|
||||
recv_max_size=self.recv_max_size
|
||||
) as sock:
|
||||
|
||||
if self.tls_config:
|
||||
sock.tls_config = self.tls_config
|
||||
|
||||
sock.listen(self.addr)
|
||||
|
||||
logging.debug(f'server socket listening at {self.addr}')
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
n.start_soon(self._listener, sock)
|
||||
|
||||
try:
|
||||
yield self
|
||||
|
||||
finally:
|
||||
n.cancel_scope.cancel()
|
||||
|
||||
logging.debug('server socket is off.')
|
||||
|
||||
|
||||
class SessionClient:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
connect_addr: str,
|
||||
uid: str,
|
||||
cert_name: Optional[str] = None,
|
||||
key_name: Optional[str] = None,
|
||||
ca_name: Optional[str] = None,
|
||||
cert_dir: str = DEFAULT_CERTS_DIR,
|
||||
recv_max_size = 0
|
||||
):
|
||||
self.uid = uid
|
||||
self.connect_addr = connect_addr
|
||||
|
||||
self.cert_name = None
|
||||
self.tls_config = None
|
||||
self.tls_whitelist = None
|
||||
self.tls_cert = None
|
||||
self.tls_key = None
|
||||
if cert_name and key_name:
|
||||
self.cert_name = Path(cert_name).stem
|
||||
self.tls_config, self.tls_whitelist = load_certs_client(
|
||||
cert_dir, cert_name, key_name, ca_name=ca_name)
|
||||
|
||||
if not self.connect_addr.startswith('tls'):
|
||||
self.connect_addr = 'tls+' + self.connect_addr
|
||||
|
||||
self.recv_max_size = recv_max_size
|
||||
|
||||
self._connected = False
|
||||
self._sock = None
|
||||
|
||||
def connect(self):
|
||||
self._sock = pynng.Req0(
|
||||
recv_max_size=0,
|
||||
name=self.uid
|
||||
)
|
||||
|
||||
if self.tls_config:
|
||||
self._sock.tls_config = self.tls_config
|
||||
|
||||
logging.debug(f'client is dialing {self.connect_addr}...')
|
||||
self._sock.dial(self.connect_addr, block=True)
|
||||
self._connected = True
|
||||
logging.debug(f'client is connected to {self.connect_addr}')
|
||||
|
||||
def disconnect(self):
|
||||
self._sock.close()
|
||||
self._connected = False
|
||||
logging.debug(f'client disconnected.')
|
||||
|
||||
async def rpc(
|
||||
self,
|
||||
method: str,
|
||||
params: dict = {},
|
||||
binext: Optional[bytes] = None,
|
||||
timeout: float = 2.
|
||||
):
|
||||
if not self._connected:
|
||||
raise SessionError('tried to use rpc without connecting')
|
||||
|
||||
req = SkynetRPCRequest()
|
||||
req.uid = self.uid
|
||||
req.method = method
|
||||
req.params.update(params)
|
||||
if binext:
|
||||
logging.debug('added binary extension')
|
||||
req.bin = binext
|
||||
|
||||
if self.tls_config:
|
||||
req.auth.cert = self.cert_name
|
||||
req.auth.sig = sign_protobuf_msg(req, self.tls_config.key)
|
||||
|
||||
with trio.fail_after(timeout):
|
||||
ctx = self._sock.new_context()
|
||||
raw_req = zlib.compress(req.SerializeToString())
|
||||
logging.debug(f'rpc client sending new msg {method} of size {len(raw_req)}')
|
||||
await ctx.asend(raw_req)
|
||||
logging.debug('sent, awaiting response...')
|
||||
raw_resp = await ctx.arecv()
|
||||
logging.debug(f'rpc client got response of size {len(raw_resp)}')
|
||||
raw_resp = zlib.decompress(raw_resp)
|
||||
|
||||
resp = SkynetRPCResponse()
|
||||
resp.ParseFromString(raw_resp)
|
||||
ctx.close()
|
||||
|
||||
if self.tls_config:
|
||||
verify_protobuf_msg(resp, self.tls_config.ca_cert)
|
||||
|
||||
return resp
|
|
@ -0,0 +1,70 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import time
|
||||
import logging
|
||||
|
||||
from contextlib import contextmanager as cm
|
||||
|
||||
import docker
|
||||
|
||||
from leap.cleos import CLEOS
|
||||
from leap.sugar import get_container
|
||||
|
||||
|
||||
@cm
|
||||
def open_nodeos(cleanup: bool = True):
|
||||
dclient = docker.from_env()
|
||||
vtestnet = get_container(
|
||||
dclient,
|
||||
'guilledk/py-eosio:leap-skynet-4.0.0',
|
||||
force_unique=True,
|
||||
detach=True,
|
||||
network='host')
|
||||
|
||||
try:
|
||||
cleos = CLEOS(
|
||||
dclient, vtestnet,
|
||||
url='http://127.0.0.1:42000',
|
||||
remote='http://127.0.0.1:42000'
|
||||
)
|
||||
|
||||
cleos.start_keosd()
|
||||
|
||||
cleos.start_nodeos_from_config(
|
||||
'/root/nodeos/config.ini',
|
||||
data_dir='/root/nodeos/data',
|
||||
genesis='/root/nodeos/genesis/skynet.json',
|
||||
state_plugin=True)
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
cleos.setup_wallet('5JnvSc6pewpHHuUHwvbJopsew6AKwiGnexwDRc2Pj2tbdw6iML9')
|
||||
cleos.wait_blocks(1)
|
||||
cleos.boot_sequence()
|
||||
|
||||
cleos.new_account('telos.gpu', ram=300000)
|
||||
|
||||
for i in range(1, 4):
|
||||
cleos.create_account_staked(
|
||||
'eosio', f'testworker{i}',
|
||||
key='EOS5fLreY5Zq5owBhmNJTgQaLqQ4ufzXSTpStQakEyfxNFuUEgNs1')
|
||||
|
||||
cleos.create_account_staked(
|
||||
'eosio', 'telegram1', ram=500000,
|
||||
key='EOS5fLreY5Zq5owBhmNJTgQaLqQ4ufzXSTpStQakEyfxNFuUEgNs1')
|
||||
|
||||
cleos.deploy_contract_from_host(
|
||||
'telos.gpu',
|
||||
'tests/contracts/telos.gpu',
|
||||
verify_hash=False,
|
||||
create_account=False
|
||||
)
|
||||
|
||||
yield cleos
|
||||
|
||||
finally:
|
||||
# ec, out = cleos.list_all_keys()
|
||||
# logging.info(out)
|
||||
if cleanup:
|
||||
vtestnet.stop()
|
||||
vtestnet.remove()
|
|
@ -1,4 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
from .auth import *
|
||||
from .skynet_pb2 import *
|
|
@ -1,69 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from hashlib import sha256
|
||||
from collections import OrderedDict
|
||||
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
from cryptography.hazmat.primitives import serialization, hashes
|
||||
from cryptography.hazmat.primitives.asymmetric import padding
|
||||
|
||||
from .skynet_pb2 import *
|
||||
|
||||
|
||||
def serialize_msg_deterministic(msg):
|
||||
descriptors = sorted(
|
||||
type(msg).DESCRIPTOR.fields_by_name.items(),
|
||||
key=lambda x: x[0]
|
||||
)
|
||||
shasum = sha256()
|
||||
|
||||
def hash_dict(d):
|
||||
data = [
|
||||
(key, val)
|
||||
for (key, val) in d.items()
|
||||
]
|
||||
for key, val in sorted(data, key=lambda x: x[0]):
|
||||
if not isinstance(val, dict):
|
||||
shasum.update(key.encode())
|
||||
shasum.update(json.dumps(val).encode())
|
||||
else:
|
||||
hash_dict(val)
|
||||
|
||||
for (field_name, field_descriptor) in descriptors:
|
||||
if not field_descriptor.message_type:
|
||||
shasum.update(field_name.encode())
|
||||
|
||||
value = getattr(msg, field_name)
|
||||
|
||||
if isinstance(value, bytes):
|
||||
value = value.hex()
|
||||
|
||||
shasum.update(json.dumps(value).encode())
|
||||
continue
|
||||
|
||||
if field_descriptor.message_type.name == 'Struct':
|
||||
hash_dict(MessageToDict(getattr(msg, field_name)))
|
||||
|
||||
deterministic_msg = shasum.digest()
|
||||
|
||||
return deterministic_msg
|
||||
|
||||
|
||||
def sign_protobuf_msg(msg, key):
|
||||
return key.sign(
|
||||
serialize_msg_deterministic(msg),
|
||||
padding.PKCS1v15(),
|
||||
hashes.SHA256()
|
||||
).hex()
|
||||
|
||||
|
||||
def verify_protobuf_msg(msg, cert):
|
||||
return cert.public_key().verify(
|
||||
bytes.fromhex(msg.auth.sig),
|
||||
serialize_msg_deterministic(msg),
|
||||
padding.PKCS1v15(),
|
||||
hashes.SHA256()
|
||||
)
|
|
@ -1,24 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package skynet;
|
||||
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
message Auth {
|
||||
string cert = 1;
|
||||
string sig = 2;
|
||||
}
|
||||
|
||||
message SkynetRPCRequest {
|
||||
string uid = 1;
|
||||
string method = 2;
|
||||
google.protobuf.Struct params = 3;
|
||||
optional bytes bin = 4;
|
||||
optional Auth auth = 5;
|
||||
}
|
||||
|
||||
message SkynetRPCResponse {
|
||||
google.protobuf.Struct result = 1;
|
||||
optional bytes bin = 2;
|
||||
optional Auth auth = 3;
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: skynet.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cskynet.proto\x12\x06skynet\x1a\x1cgoogle/protobuf/struct.proto\"!\n\x04\x41uth\x12\x0c\n\x04\x63\x65rt\x18\x01 \x01(\t\x12\x0b\n\x03sig\x18\x02 \x01(\t\"\x9c\x01\n\x10SkynetRPCRequest\x12\x0b\n\x03uid\x18\x01 \x01(\t\x12\x0e\n\x06method\x18\x02 \x01(\t\x12\'\n\x06params\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x10\n\x03\x62in\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x12\x1f\n\x04\x61uth\x18\x05 \x01(\x0b\x32\x0c.skynet.AuthH\x01\x88\x01\x01\x42\x06\n\x04_binB\x07\n\x05_auth\"\x80\x01\n\x11SkynetRPCResponse\x12\'\n\x06result\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x10\n\x03\x62in\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x12\x1f\n\x04\x61uth\x18\x03 \x01(\x0b\x32\x0c.skynet.AuthH\x01\x88\x01\x01\x42\x06\n\x04_binB\x07\n\x05_authb\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'skynet_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_AUTH._serialized_start=54
|
||||
_AUTH._serialized_end=87
|
||||
_SKYNETRPCREQUEST._serialized_start=90
|
||||
_SKYNETRPCREQUEST._serialized_end=246
|
||||
_SKYNETRPCRESPONSE._serialized_start=249
|
||||
_SKYNETRPCRESPONSE._serialized_end=377
|
||||
# @@protoc_insertion_point(module_scope)
|
|
@ -1,148 +0,0 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Built-in (extension) types.
|
||||
"""
|
||||
import sys
|
||||
import json
|
||||
|
||||
from typing import Optional, Union
|
||||
from pprint import pformat
|
||||
|
||||
import msgspec
|
||||
|
||||
|
||||
class Struct(msgspec.Struct):
|
||||
'''
|
||||
A "human friendlier" (aka repl buddy) struct subtype.
|
||||
'''
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
f: getattr(self, f)
|
||||
for f in self.__struct_fields__
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
# only turn on pprint when we detect a python REPL
|
||||
# at runtime B)
|
||||
if (
|
||||
hasattr(sys, 'ps1')
|
||||
# TODO: check if we're in pdb
|
||||
):
|
||||
return self.pformat()
|
||||
|
||||
return super().__repr__()
|
||||
|
||||
def pformat(self) -> str:
|
||||
return f'Struct({pformat(self.to_dict())})'
|
||||
|
||||
def copy(
|
||||
self,
|
||||
update: Optional[dict] = None,
|
||||
|
||||
) -> msgspec.Struct:
|
||||
'''
|
||||
Validate-typecast all self defined fields, return a copy of us
|
||||
with all such fields.
|
||||
This is kinda like the default behaviour in `pydantic.BaseModel`.
|
||||
'''
|
||||
if update:
|
||||
for k, v in update.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
# roundtrip serialize to validate
|
||||
return msgspec.msgpack.Decoder(
|
||||
type=type(self)
|
||||
).decode(
|
||||
msgspec.msgpack.Encoder().encode(self)
|
||||
)
|
||||
|
||||
def typecast(
|
||||
self,
|
||||
# fields: Optional[list[str]] = None,
|
||||
) -> None:
|
||||
for fname, ftype in self.__annotations__.items():
|
||||
setattr(self, fname, ftype(getattr(self, fname)))
|
||||
|
||||
# proto
|
||||
from OpenSSL.crypto import PKey, X509, verify, sign
|
||||
|
||||
|
||||
class AuthenticatedStruct(Struct, kw_only=True):
|
||||
cert: Optional[str] = None
|
||||
sig: Optional[str] = None
|
||||
|
||||
def to_unsigned_dict(self) -> dict:
|
||||
self_dict = self.to_dict()
|
||||
|
||||
if 'sig' in self_dict:
|
||||
del self_dict['sig']
|
||||
|
||||
if 'cert' in self_dict:
|
||||
del self_dict['cert']
|
||||
|
||||
return self_dict
|
||||
|
||||
def unsigned_to_bytes(self) -> bytes:
|
||||
return json.dumps(
|
||||
self.to_unsigned_dict()).encode()
|
||||
|
||||
def sign(self, key: PKey, cert: str):
|
||||
self.cert = cert
|
||||
self.sig = sign(
|
||||
key, self.unsigned_to_bytes(), 'sha256').hex()
|
||||
|
||||
def verify(self, cert: X509):
|
||||
if not self.sig:
|
||||
raise ValueError('Tried to verify unsigned request')
|
||||
|
||||
return verify(
|
||||
cert, bytes.fromhex(self.sig), self.unsigned_to_bytes(), 'sha256')
|
||||
|
||||
|
||||
class SkynetRPCRequest(AuthenticatedStruct):
|
||||
uid: Union[str, int] # user unique id
|
||||
method: str # rpc method name
|
||||
params: dict # variable params
|
||||
|
||||
|
||||
class SkynetRPCResponse(AuthenticatedStruct):
|
||||
result: dict
|
||||
|
||||
|
||||
class ImageGenRequest(Struct):
|
||||
prompt: str
|
||||
step: int
|
||||
width: int
|
||||
height: int
|
||||
guidance: int
|
||||
seed: Optional[int]
|
||||
algo: str
|
||||
upscaler: Optional[str]
|
||||
|
||||
|
||||
class DGPUBusRequest(AuthenticatedStruct):
|
||||
rid: str # req id
|
||||
nid: str # node id
|
||||
task: str
|
||||
params: dict
|
||||
|
||||
|
||||
class DGPUBusResponse(AuthenticatedStruct):
|
||||
rid: str # req id
|
||||
nid: str # node id
|
||||
params: dict
|
|
@ -1,5 +1,6 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import os
|
||||
import time
|
||||
import random
|
||||
|
||||
|
@ -43,6 +44,13 @@ def pipeline_for(algo: str, mem_fraction: float = 1.0, image=False):
|
|||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
|
||||
# full determinism
|
||||
# https://huggingface.co/docs/diffusers/using-diffusers/reproducibility#deterministic-algorithms
|
||||
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
|
||||
|
||||
torch.backends.cudnn.benchmark = False
|
||||
torch.use_deterministic_algorithms(True)
|
||||
|
||||
params = {
|
||||
'torch_dtype': torch.float16,
|
||||
'safety_checker': None
|
||||
|
|
|
@ -1,71 +1,33 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
|
||||
from pathlib import Path
|
||||
from functools import partial
|
||||
|
||||
import pytest
|
||||
|
||||
from docker.types import Mount, DeviceRequest
|
||||
|
||||
from skynet.db import open_new_database
|
||||
from skynet.brain import run_skynet
|
||||
from skynet.network import get_random_port
|
||||
from skynet.constants import *
|
||||
from skynet.nodeos import open_nodeos
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def postgres_db(dockerctl):
|
||||
def postgres_db():
|
||||
with open_new_database() as db_params:
|
||||
yield db_params
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def cleos():
|
||||
with open_nodeos() as cli:
|
||||
contract_acc = cli.new_account('telos.gpu', ram=300000)
|
||||
|
||||
@pytest.fixture
|
||||
async def skynet_running():
|
||||
async with run_skynet():
|
||||
yield
|
||||
cli.new_account(name='testworker1')
|
||||
cli.new_account(name='testworker2')
|
||||
cli.new_account(name='testworker3')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def dgpu_workers(request, dockerctl, skynet_running):
|
||||
devices = [DeviceRequest(capabilities=[['gpu']])]
|
||||
mounts = [Mount(
|
||||
'/skynet', str(Path().resolve()), type='bind')]
|
||||
|
||||
num_containers, initial_algos = request.param
|
||||
|
||||
cmds = []
|
||||
for i in range(num_containers):
|
||||
dgpu_addr = f'tcp://127.0.0.1:{get_random_port()}'
|
||||
cmd = f'''
|
||||
pip install -e . && \
|
||||
skynet run dgpu \
|
||||
--algos=\'{json.dumps(initial_algos)}\' \
|
||||
--uid=dgpu-{i} \
|
||||
--dgpu={dgpu_addr}
|
||||
'''
|
||||
cmds.append(['bash', '-c', cmd])
|
||||
|
||||
logging.info(f'launching: \n{cmd}')
|
||||
|
||||
with dockerctl.run(
|
||||
DOCKER_RUNTIME_CUDA,
|
||||
name='skynet-test-runtime-cuda',
|
||||
commands=cmds,
|
||||
environment={
|
||||
'HF_HOME': '/skynet/hf_home'
|
||||
},
|
||||
network='host',
|
||||
mounts=mounts,
|
||||
device_requests=devices,
|
||||
num=num_containers,
|
||||
) as containers:
|
||||
yield containers
|
||||
|
||||
for i, container in enumerate(containers):
|
||||
logging.info(f'container {i} logs:')
|
||||
logging.info(container.logs().decode())
|
||||
cli.deploy_contract_from_host(
|
||||
'telos.gpu',
|
||||
'tests/contracts/telos.gpu',
|
||||
verify_hash=False,
|
||||
create_account=False
|
||||
)
|
||||
yield cli
|
||||
|
|
|
@ -0,0 +1,220 @@
|
|||
{
|
||||
"____comment": "This file was generated with eosio-abigen. DO NOT EDIT ",
|
||||
"version": "eosio::abi/1.2",
|
||||
"types": [],
|
||||
"structs": [
|
||||
{
|
||||
"name": "dequeue",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "user",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "request_id",
|
||||
"type": "uint64"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "enqueue",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "user",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "request_body",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "binary_data",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "submit",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "worker",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "request_id",
|
||||
"type": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "result_hash",
|
||||
"type": "checksum256"
|
||||
},
|
||||
{
|
||||
"name": "ipfs_hash",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "work_request_struct",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "id",
|
||||
"type": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "binary_data",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"name": "timestamp",
|
||||
"type": "time_point_sec"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "work_result_struct",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "id",
|
||||
"type": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "request_id",
|
||||
"type": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "worker",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "result_hash",
|
||||
"type": "checksum256"
|
||||
},
|
||||
{
|
||||
"name": "ipfs_hash",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "submited",
|
||||
"type": "time_point_sec"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "workbegin",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "worker",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "request_id",
|
||||
"type": "uint64"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "workcancel",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "worker",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "request_id",
|
||||
"type": "uint64"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "worker_status_struct",
|
||||
"base": "",
|
||||
"fields": [
|
||||
{
|
||||
"name": "worker",
|
||||
"type": "name"
|
||||
},
|
||||
{
|
||||
"name": "status",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "started",
|
||||
"type": "time_point_sec"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"name": "dequeue",
|
||||
"type": "dequeue",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "enqueue",
|
||||
"type": "enqueue",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "submit",
|
||||
"type": "submit",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "workbegin",
|
||||
"type": "workbegin",
|
||||
"ricardian_contract": ""
|
||||
},
|
||||
{
|
||||
"name": "workcancel",
|
||||
"type": "workcancel",
|
||||
"ricardian_contract": ""
|
||||
}
|
||||
],
|
||||
"tables": [
|
||||
{
|
||||
"name": "queue",
|
||||
"type": "work_request_struct",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
},
|
||||
{
|
||||
"name": "results",
|
||||
"type": "work_result_struct",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
},
|
||||
{
|
||||
"name": "status",
|
||||
"type": "worker_status_struct",
|
||||
"index_type": "i64",
|
||||
"key_names": [],
|
||||
"key_types": []
|
||||
}
|
||||
],
|
||||
"ricardian_clauses": [],
|
||||
"variants": [],
|
||||
"action_results": []
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,82 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import time
|
||||
import json
|
||||
|
||||
from hashlib import sha256
|
||||
from functools import partial
|
||||
|
||||
import trio
|
||||
import requests
|
||||
|
||||
from skynet.dgpu import open_dgpu_node
|
||||
|
||||
|
||||
def test_enqueue_work(ipfs_node, cleos):
|
||||
|
||||
user = cleos.new_account()
|
||||
req = json.dumps({
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'algo': 'midj',
|
||||
'prompt': 'skynet terminator dystopic',
|
||||
'width': 512,
|
||||
'height': 512,
|
||||
'guidance': 10,
|
||||
'step': 28,
|
||||
'seed': 420,
|
||||
'upscaler': 'x4'
|
||||
}
|
||||
})
|
||||
binary = ''
|
||||
|
||||
ec, out = cleos.push_action(
|
||||
'telos.gpu', 'enqueue', [user, req, binary], f'{user}@active'
|
||||
)
|
||||
|
||||
assert ec == 0
|
||||
|
||||
queue = cleos.get_table('telos.gpu', 'telos.gpu', 'queue')
|
||||
|
||||
assert len(queue) == 1
|
||||
|
||||
req_on_chain = queue[0]
|
||||
|
||||
assert req_on_chain['user'] == user
|
||||
assert req_on_chain['body'] == req
|
||||
assert req_on_chain['binary_data'] == binary
|
||||
|
||||
ipfs_hash = None
|
||||
sha_hash = None
|
||||
for i in range(1, 4):
|
||||
trio.run(
|
||||
partial(
|
||||
open_dgpu_node,
|
||||
f'testworker{i}',
|
||||
'active',
|
||||
cleos,
|
||||
ipfs_node,
|
||||
initial_algos=['midj']
|
||||
)
|
||||
)
|
||||
|
||||
if ipfs_hash == None:
|
||||
result = cleos.get_table(
|
||||
'telos.gpu', 'telos.gpu', 'results',
|
||||
index_position=4,
|
||||
key_type='name',
|
||||
lower_bound=f'testworker{i}',
|
||||
upper_bound=f'testworker{i}'
|
||||
)
|
||||
assert len(result) == 1
|
||||
ipfs_hash = result[0]['ipfs_hash']
|
||||
sha_hash = result[0]['result_hash']
|
||||
|
||||
queue = cleos.get_table('telos.gpu', 'telos.gpu', 'queue')
|
||||
|
||||
assert len(queue) == 0
|
||||
|
||||
resp = requests.get(f'https://ipfs.io/ipfs/{ipfs_hash}/image.png')
|
||||
assert resp.status_code == 200
|
||||
|
||||
assert sha_hash == sha256(resp.content).hexdigest()
|
|
@ -1,389 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import io
|
||||
import time
|
||||
import json
|
||||
import zlib
|
||||
import logging
|
||||
|
||||
from typing import Optional
|
||||
from hashlib import sha256
|
||||
from functools import partial
|
||||
|
||||
import trio
|
||||
import pytest
|
||||
|
||||
from PIL import Image
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
|
||||
from skynet.brain import SkynetDGPUComputeError
|
||||
from skynet.network import get_random_port, SessionServer
|
||||
from skynet.protobuf import SkynetRPCResponse
|
||||
from skynet.frontend import open_skynet_rpc
|
||||
from skynet.constants import *
|
||||
|
||||
|
||||
async def wait_for_dgpus(session, amount: int, timeout: float = 30.0):
|
||||
gpu_ready = False
|
||||
with trio.fail_after(timeout):
|
||||
while not gpu_ready:
|
||||
res = await session.rpc('dgpu_workers')
|
||||
if res.result['ok'] >= amount:
|
||||
break
|
||||
|
||||
await trio.sleep(1)
|
||||
|
||||
|
||||
_images = set()
|
||||
async def check_request_img(
|
||||
i: int,
|
||||
uid: str = '1',
|
||||
width: int = 512,
|
||||
height: int = 512,
|
||||
expect_unique = True,
|
||||
upscaler: Optional[str] = None
|
||||
):
|
||||
global _images
|
||||
|
||||
with open_skynet_rpc(
|
||||
uid,
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
res = await session.rpc(
|
||||
'dgpu_call', {
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': 'red old tractor in a sunny wheat field',
|
||||
'step': 28,
|
||||
'width': width, 'height': height,
|
||||
'guidance': 7.5,
|
||||
'seed': None,
|
||||
'algo': list(ALGOS.keys())[i],
|
||||
'upscaler': upscaler
|
||||
}
|
||||
},
|
||||
timeout=60
|
||||
)
|
||||
|
||||
if 'error' in res.result:
|
||||
raise SkynetDGPUComputeError(MessageToDict(res.result))
|
||||
|
||||
img_raw = res.bin
|
||||
img_sha = sha256(img_raw).hexdigest()
|
||||
img = Image.open(io.BytesIO(img_raw))
|
||||
|
||||
if expect_unique and img_sha in _images:
|
||||
raise ValueError('Duplicated image sha: {img_sha}')
|
||||
|
||||
_images.add(img_sha)
|
||||
|
||||
logging.info(f'img sha256: {img_sha} size: {len(img_raw)}')
|
||||
|
||||
assert len(img_raw) > 100000
|
||||
|
||||
return img
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(1, ['midj'])], indirect=True)
|
||||
async def test_dgpu_worker_compute_error(dgpu_workers):
|
||||
'''Attempt to generate a huge image and check we get the right error,
|
||||
then generate a smaller image to show gpu worker recovery
|
||||
'''
|
||||
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 1)
|
||||
|
||||
with pytest.raises(SkynetDGPUComputeError) as e:
|
||||
await check_request_img(0, width=4096, height=4096)
|
||||
|
||||
logging.info(e)
|
||||
|
||||
await check_request_img(0)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(1, ['midj'])], indirect=True)
|
||||
async def test_dgpu_worker(dgpu_workers):
|
||||
'''Generate one image in a single dgpu worker
|
||||
'''
|
||||
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 1)
|
||||
|
||||
await check_request_img(0)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(1, ['midj', 'stable'])], indirect=True)
|
||||
async def test_dgpu_worker_two_models(dgpu_workers):
|
||||
'''Generate two images in a single dgpu worker using
|
||||
two different models.
|
||||
'''
|
||||
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 1)
|
||||
|
||||
await check_request_img(0)
|
||||
await check_request_img(1)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(1, ['midj'])], indirect=True)
|
||||
async def test_dgpu_worker_upscale(dgpu_workers):
|
||||
'''Generate two images in a single dgpu worker using
|
||||
two different models.
|
||||
'''
|
||||
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 1)
|
||||
|
||||
img = await check_request_img(0, upscaler='x4')
|
||||
|
||||
assert img.size == (2048, 2048)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(2, ['midj'])], indirect=True)
|
||||
async def test_dgpu_workers_two(dgpu_workers):
|
||||
'''Generate two images in two separate dgpu workers
|
||||
'''
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 2, timeout=60)
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
n.start_soon(check_request_img, 0)
|
||||
n.start_soon(check_request_img, 0)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(1, ['midj'])], indirect=True)
|
||||
async def test_dgpu_worker_algo_swap(dgpu_workers):
|
||||
'''Generate an image using a non default model
|
||||
'''
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 1)
|
||||
await check_request_img(5)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(3, ['midj'])], indirect=True)
|
||||
async def test_dgpu_rotation_next_worker(dgpu_workers):
|
||||
'''Connect three dgpu workers, disconnect and check next_worker
|
||||
rotation happens correctly
|
||||
'''
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 3)
|
||||
|
||||
res = await session.rpc('dgpu_next')
|
||||
assert 'ok' in res.result
|
||||
assert res.result['ok'] == 0
|
||||
|
||||
await check_request_img(0)
|
||||
|
||||
res = await session.rpc('dgpu_next')
|
||||
assert 'ok' in res.result
|
||||
assert res.result['ok'] == 1
|
||||
|
||||
await check_request_img(0)
|
||||
|
||||
res = await session.rpc('dgpu_next')
|
||||
assert 'ok' in res.result
|
||||
assert res.result['ok'] == 2
|
||||
|
||||
await check_request_img(0)
|
||||
|
||||
res = await session.rpc('dgpu_next')
|
||||
assert 'ok' in res.result
|
||||
assert res.result['ok'] == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(3, ['midj'])], indirect=True)
|
||||
async def test_dgpu_rotation_next_worker_disconnect(dgpu_workers):
|
||||
'''Connect three dgpu workers, disconnect the first one and check
|
||||
next_worker rotation happens correctly
|
||||
'''
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 3)
|
||||
|
||||
await trio.sleep(3)
|
||||
|
||||
# stop worker who's turn is next
|
||||
for _ in range(2):
|
||||
ec, out = dgpu_workers[0].exec_run(['pkill', '-INT', '-f', 'skynet'])
|
||||
assert ec == 0
|
||||
|
||||
dgpu_workers[0].wait()
|
||||
|
||||
res = await session.rpc('dgpu_workers')
|
||||
assert 'ok' in res.result
|
||||
assert res.result['ok'] == 2
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
n.start_soon(check_request_img, 0)
|
||||
n.start_soon(check_request_img, 0)
|
||||
|
||||
|
||||
async def test_dgpu_no_ack_node_disconnect(skynet_running):
|
||||
'''Mock a node that connects, gets a request but fails to
|
||||
acknowledge it, then check skynet correctly drops the node
|
||||
'''
|
||||
|
||||
async def mock_rpc(req, ctx):
|
||||
resp = SkynetRPCResponse()
|
||||
resp.result.update({'error': 'can\'t do it mate'})
|
||||
return resp
|
||||
|
||||
dgpu_addr = f'tcp://127.0.0.1:{get_random_port()}'
|
||||
mock_server = SessionServer(
|
||||
dgpu_addr,
|
||||
mock_rpc,
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
)
|
||||
|
||||
async with mock_server.open():
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
|
||||
res = await session.rpc('dgpu_online', {
|
||||
'dgpu_addr': dgpu_addr,
|
||||
'cert': 'whitelist/testing.cert'
|
||||
})
|
||||
assert 'ok' in res.result
|
||||
|
||||
await wait_for_dgpus(session, 1)
|
||||
|
||||
with pytest.raises(SkynetDGPUComputeError) as e:
|
||||
await check_request_img(0)
|
||||
|
||||
assert 'can\'t do it mate' in str(e.value)
|
||||
|
||||
res = await session.rpc('dgpu_workers')
|
||||
assert 'ok' in res.result
|
||||
assert res.result['ok'] == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(1, ['midj'])], indirect=True)
|
||||
async def test_dgpu_timeout_while_processing(dgpu_workers):
|
||||
'''Stop node while processing request to cause timeout and
|
||||
then check skynet correctly drops the node.
|
||||
'''
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 1)
|
||||
|
||||
async def check_request_img_raises():
|
||||
with pytest.raises(SkynetDGPUComputeError) as e:
|
||||
await check_request_img(0)
|
||||
|
||||
assert 'timeout while processing request' in str(e)
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
n.start_soon(check_request_img_raises)
|
||||
await trio.sleep(1)
|
||||
ec, out = dgpu_workers[0].exec_run(
|
||||
['pkill', '-TERM', '-f', 'skynet'])
|
||||
assert ec == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dgpu_workers', [(1, ['midj'])], indirect=True)
|
||||
async def test_dgpu_img2img(dgpu_workers):
|
||||
|
||||
with open_skynet_rpc(
|
||||
'test-ctx',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
await wait_for_dgpus(session, 1)
|
||||
|
||||
await trio.sleep(2)
|
||||
|
||||
res = await session.rpc(
|
||||
'dgpu_call', {
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': 'red old tractor in a sunny wheat field',
|
||||
'step': 28,
|
||||
'width': 512, 'height': 512,
|
||||
'guidance': 7.5,
|
||||
'seed': None,
|
||||
'algo': list(ALGOS.keys())[0],
|
||||
'upscaler': None
|
||||
}
|
||||
},
|
||||
timeout=60
|
||||
)
|
||||
|
||||
if 'error' in res.result:
|
||||
raise SkynetDGPUComputeError(MessageToDict(res.result))
|
||||
|
||||
img_raw = res.bin
|
||||
img = Image.open(io.BytesIO(img_raw))
|
||||
img.save('txt2img.png')
|
||||
|
||||
res = await session.rpc(
|
||||
'dgpu_call', {
|
||||
'method': 'diffuse',
|
||||
'params': {
|
||||
'prompt': 'red ferrari in a sunny wheat field',
|
||||
'step': 28,
|
||||
'guidance': 8,
|
||||
'strength': 0.7,
|
||||
'seed': None,
|
||||
'algo': list(ALGOS.keys())[0],
|
||||
'upscaler': 'x4'
|
||||
}
|
||||
},
|
||||
binext=img_raw,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
if 'error' in res.result:
|
||||
raise SkynetDGPUComputeError(MessageToDict(res.result))
|
||||
|
||||
img_raw = res.bin
|
||||
img = Image.open(io.BytesIO(img_raw))
|
||||
img.save('img2img.png')
|
|
@ -1,86 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import logging
|
||||
|
||||
import trio
|
||||
import pynng
|
||||
import pytest
|
||||
import trio_asyncio
|
||||
|
||||
from skynet.brain import run_skynet
|
||||
from skynet.structs import *
|
||||
from skynet.network import SessionServer
|
||||
from skynet.frontend import open_skynet_rpc
|
||||
|
||||
|
||||
async def test_skynet(skynet_running):
|
||||
...
|
||||
|
||||
|
||||
async def test_skynet_attempt_insecure(skynet_running):
|
||||
with pytest.raises(pynng.exceptions.NNGException) as e:
|
||||
with open_skynet_rpc('bad-actor') as session:
|
||||
with trio.fail_after(5):
|
||||
await session.rpc('skynet_shutdown')
|
||||
|
||||
|
||||
async def test_skynet_dgpu_connection_simple(skynet_running):
|
||||
|
||||
async def rpc_handler(req, ctx):
|
||||
...
|
||||
|
||||
fake_dgpu_addr = 'tcp://127.0.0.1:41001'
|
||||
rpc_server = SessionServer(
|
||||
fake_dgpu_addr,
|
||||
rpc_handler,
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
)
|
||||
|
||||
with open_skynet_rpc(
|
||||
'dgpu-0',
|
||||
cert_name='whitelist/testing.cert',
|
||||
key_name='testing.key'
|
||||
) as session:
|
||||
# check 0 nodes are connected
|
||||
res = await session.rpc('dgpu_workers')
|
||||
assert 'ok' in res.result.keys()
|
||||
assert res.result['ok'] == 0
|
||||
|
||||
# check next worker is None
|
||||
res = await session.rpc('dgpu_next')
|
||||
assert 'ok' in res.result.keys()
|
||||
assert res.result['ok'] == None
|
||||
|
||||
async with rpc_server.open() as rpc_server:
|
||||
# connect 1 dgpu
|
||||
res = await session.rpc(
|
||||
'dgpu_online', {
|
||||
'dgpu_addr': fake_dgpu_addr,
|
||||
'cert': 'whitelist/testing.cert'
|
||||
})
|
||||
assert 'ok' in res.result.keys()
|
||||
|
||||
# check 1 node is connected
|
||||
res = await session.rpc('dgpu_workers')
|
||||
assert 'ok' in res.result.keys()
|
||||
assert res.result['ok'] == 1
|
||||
|
||||
# check next worker is 0
|
||||
res = await session.rpc('dgpu_next')
|
||||
assert 'ok' in res.result.keys()
|
||||
assert res.result['ok'] == 0
|
||||
|
||||
# disconnect 1 dgpu
|
||||
res = await session.rpc('dgpu_offline')
|
||||
assert 'ok' in res.result.keys()
|
||||
|
||||
# check 0 nodes are connected
|
||||
res = await session.rpc('dgpu_workers')
|
||||
assert 'ok' in res.result.keys()
|
||||
assert res.result['ok'] == 0
|
||||
|
||||
# check next worker is None
|
||||
res = await session.rpc('dgpu_next')
|
||||
assert 'ok' in res.result.keys()
|
||||
assert res.result['ok'] == None
|
|
@ -1,28 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import trio
|
||||
|
||||
from functools import partial
|
||||
|
||||
from skynet.db import open_new_database
|
||||
from skynet.brain import run_skynet
|
||||
from skynet.config import load_skynet_ini
|
||||
from skynet.frontend.telegram import run_skynet_telegram
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
'''You will need a telegram bot token configured on skynet.ini for this
|
||||
'''
|
||||
with open_new_database() as db_params:
|
||||
db_container, db_pass, db_host = db_params
|
||||
config = load_skynet_ini()
|
||||
|
||||
async def main():
|
||||
await run_skynet_telegram(
|
||||
'telegram-test',
|
||||
config['skynet.telegram-test']['token'],
|
||||
db_host=db_host,
|
||||
db_pass=db_pass
|
||||
)
|
||||
|
||||
trio.run(main)
|
Loading…
Reference in New Issue