Merge pull request #5 from guilledk/decentralize

First fully decentralized `skynet` prototype
pull/18/head
Guillermo Rodriguez 2023-07-28 11:16:51 -03:00 committed by GitHub
commit ffcf9dc905
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
69 changed files with 9187 additions and 2594 deletions

4
.gitignore vendored
View File

@ -7,3 +7,7 @@ secrets
*.egg-info
**/*.key
**/*.cert
docs
ipfs-docker-data
ipfs-docker-staging
weights

View File

@ -1,2 +1,30 @@
# skynet
### decentralized compute platform
To launch a worker:
```
# create and edit config from template
cp skynet.ini.example skynet.ini
# create python virtual envoirment 3.10+
python3 -m venv venv
# enable envoirment
source venv/bin/activate
# install requirements
pip install -r requirements.txt
pip install -r requirements.cuda.0.txt
pip install -r requirements.cuda.1.txt
pip install -r requirements.cuda.2.txt
# install skynet
pip install -e .
# test you can run this command
skynet --help
# to launch worker
skynet run dgpu
```

View File

@ -1,7 +1,7 @@
docker build \
-t skynet:runtime-cuda \
-f Dockerfile.runtime+cuda .
-f docker/Dockerfile.runtime+cuda .
docker build \
-t skynet:runtime \
-f Dockerfile.runtime .
-f docker/Dockerfile.runtime .

View File

@ -1,33 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFxDCCA6wCAQAwDQYJKoZIhvcNAQENBQAwgacxCzAJBgNVBAYTAlVZMRMwEQYD
VQQIDApNb250ZXZpZGVvMRMwEQYDVQQHDApNb250ZXZpZGVvMRowGAYDVQQKDBFz
a3luZXQtZm91bmRhdGlvbjENMAsGA1UECwwEbm9uZTEcMBoGA1UEAwwTR3VpbGxl
cm1vIFJvZHJpZ3VlejElMCMGCSqGSIb3DQEJARYWZ3VpbGxlcm1vckBmaW5nLmVk
dS51eTAeFw0yMjEyMTExNDM3NDVaFw0zMjEyMDgxNDM3NDVaMIGnMQswCQYDVQQG
EwJVWTETMBEGA1UECAwKTW9udGV2aWRlbzETMBEGA1UEBwwKTW9udGV2aWRlbzEa
MBgGA1UECgwRc2t5bmV0LWZvdW5kYXRpb24xDTALBgNVBAsMBG5vbmUxHDAaBgNV
BAMME0d1aWxsZXJtbyBSb2RyaWd1ZXoxJTAjBgkqhkiG9w0BCQEWFmd1aWxsZXJt
b3JAZmluZy5lZHUudXkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCu
HdqGPtsqtYqfIilVdq0MmqfEn9g4T+uglfWjRF2gWV3uQCuXDv1O61XfIIyaDQXl
VRqT36txtM8rvn213746SwK0jx9+ln5jD3EDbL4WZv1qvp4/jqA+UPKXFXnD3he+
pRpcDMu4IpYKuoPl667IW/auFSSy3TIWhIZb8ghqxzb2e2i6/OhzIWKHeFIKvbEA
EB6Z63wy3O0ACY7RVhHu0wzyzqUW1t1VNsbZvO9Xmmqm2EWZBJp0TFph3Z9kOR/g
0Ik7kxMLrGIfhV5/1gPQlNr3ADebGJnaMdGCBUi+pqeZcVnGY45fjOJREaD3aTRG
ohZM0Td40K7paDVjUvQ9rPgKoDMsCWpu8IPdc4LB0hONIO2KycFb49cd8zNWsetj
kHXxL9IVgORxfGmVyOtNGotS5RX6R+qwsll3qUmX4XjwvQMAMvATcSkY26CWdCDM
vGFp+0REbVyDfJ9pwU7ZkAxiWeAoiesGfEWyRLsl0fFkaHgHG+oPCH9IO63TVnCq
E6NGRQpHfJ5oV4ZihUfWjSFxOJqdFM3xfzk/2YGzQUgKVBsbuQTWPKxE0aSwt1Cf
Ug4+C0RSDMmrquRmhRn/BWsSRl+2m17rt1axTA4pEVGcHHyKSowEFQ68spD1Lm2K
iU/LCPBh4REzexwjP+onwHALXoxIEOLiy2lEdYgWnwIDAQABMA0GCSqGSIb3DQEB
DQUAA4ICAQBtTZb6PJJQXtF90MD4Hcgj+phKkbtHVZyM198Giw3I9f2PgjDECKb9
I7JLzCUgpexKk1TNso2FPNoVlcE4yMO0I0EauoKcwZ1w9GXsXOGwPHvB9hrItaLs
s7Qxf+IVgKO4y5Tv+8WO4lhgShWa4fW3L7Dpk0XK4INoAAxZLbEdekf2GGqTUGzD
SrfvtE8h6JT+gR4lsAvdsRjJIKYacsqhKjtV0reA6v99NthDcpwaStrAaFmtJkD3
6G3JVU0JyMBlR1GetN0w42BjVHJ2l7cPm405lE2ymFwcl7C8VozXXi4wmfVN+xlh
NOVSbl/QUiMUyt44XPhPCbgopxLqhqtvGzBl+ldF1AR4aaukXjvS/8VtFZ3cfx7n
n5NYxvPnq3kwlFNHgppt+u1leGrzxuesGNQENQd3shO/S9T4I92hAdk2MRTivIfv
m74u6RCtHqDviiOFzF7zcqO37wCrb1dnfS1N4I6/rCf6XtxlRGa8Cp9z4DTKjwAC
5z5irJb+LSJkFXA/zIFpBjjKBdyhjYGuXrbJWdL81kTcYRqjE99XfZaTU8L43qVd
TUaIvQGTtx8k7WGmeTRHk6SauCaXSfeXwYTpEZpictUI/uWo/KJRDL/aE8HmBeH3
pr+cfDu7erTLH+GG5ZROrILf4929Jd7OF4a0nHUnZcycBS0CjGHVHA==
-----END CERTIFICATE-----

View File

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCyAuCwwnoENeYe
B0159sH47zedmRaxcUmC/qmVdUptzOxIHpUCSAIy+hoR5UOhnRsmjj7Y0kUWtlwj
bHAKHcuUn4sqLBb0nl6kH79DzP/4YCQM3GEIXzE6wy/zmnYrHz53Ci7DzmMcRM3n
MwXDVPPpKXzpmI/yassKxSltBKgbh65U3oOheiuFygOlAkT4fUaXX5Bf9DECZBsj
ewf9WvHzLGN2eQt/YWYxJMstgAecHLlRmLbKoYD/P+O0K1ybmhMDItcXE49kNC4s
Rvq7MUt8B0bi8SlRxv5plAbZBiyMilrxf3yCCgYaTsqtt3x+CSrAWjzYIzEzD5aZ
1+s5O2jsqPYkbTvA4NT/hDnWHkkr7YcBRwQn1iMe2tMUTTsWotIYWH87++BzDAWG
3ZBkqNZ4mUdA3usk2ZPO0BwWNxlb0AqOlAJUYSoCsm3nBPT08rVvumQ44hup6XPW
L5KIDyL5+Fl8RDgDF8cpCfrijdL+U+GoHmmJYM6zMkrGqD7BD+WJgw9plgbaWUBI
q4aimXF4PrBJAAX5IRyZK+EDDH0AREL3qoZIQVvJR+yGIKTixpyVKtj6jm1OY4Go
iXxRLaFrc4ucT9+PxRHo9zYtNIijub4eXuU5nveswptmCsNa4spTO2XCkHh6IE0Z
B4oALC4lrC279WY+3TaOpv/roGzG9QIDAQABAoICABfpXGFMs7MzwkYvrkU/KO3V
bwppHAFDOcqyMU7K7e/d4ly1rvJwKyDJ3mKfrKay7Ii7UXndP5E+IcD9ufcXQCzQ
rug/+pLAC0UkoT6W9PNaMWgrhOU+VDs+fjHM19QRuFmpMSr1jZ6ofLgdGchpSvJR
CQnKh9uFDjfTethoEw96Tv1GKTcHAChSleFpHUv7wqsRbTABJJbbokGb2duQhzD7
uh3vQzodzT+2CjeBxoPpNS40GKm+FA6KzdLP2FAWhuNESibmu7uMFCpicR+1ZBxe
+zNU4xCsbamk9rPZqSD1HM4/1RZqs53TuP9TcbzvDPfAUgKpMjICWrUuVIHgQcb/
H3lJbsusZccFkl+B4arncUu7oyYWsw+OLHq/khja1RrJu6/PDDfcqY0cSAAsCKJf
ChiHVyVbhZ6b9g1MdYLNPlcJrpgCVX+PisqLqY/RqQGIln6D0sBK1+MC6TjFW3zA
ca3Dhun18JBZ73mmlGj7LoOUojtnnxy5YVUdB75tdo5BqilGR1nLurJupg9Nkgeq
C7nbA+rZ93MKHptayko91nc7yLzsMRV8PDFhE2UhZWRZfJ5yAW/IaJBZpvTvSYM3
5lTgAn1o34mnykuNC3sK5tbCAMb0YbCJtmotRwBIqlFHqbH+TK07CW2lnEkqZ8ID
YFTpAJlgKgsdhsd5ZCkpAoIBAQDQMvn4iBKvnhCeRUV/6AOHcOsgwJkV/G61Gz/G
F0mx0kPsaPugNX1VzF15R+vN1kbk3sQ9bDP6FfsX7jp2EjRqGEb9mJ8BoIbSHLJ4
dDT7M90TMMYepCVoFMC03Hh30vxH3QokgV3E1lakXCwl1dheRz5czT0BL9VuBkpG
x8vGpVfX4VqLliOWK72wEYdfohUTynb2OkRP/e6woBRxb3hYLqpN7nVHVRiMFBgG
+AvpLNv/oSYBOXj9oRBOwVLZaPV8N1p4Pv7WXL+B7E47Z9rUYNzGFf+2iM1uDdrO
xHkAocgMM/sL81sJaj1khoYRLC8IpAxBG8NqRP6xzeGcLVLHAoIBAQDa4ZdEDvqA
gJmJ4vgivIX7/zv7/q9c/nkNsnPiXjMys6HRdwroQjT7wrxO5/jJX9EDjM98dSFg
1HFJWJulpmDMpIzzwC6DLxZWd+EEqG4Pyv50VGmGuwmqDwWAP7v/pMPwUEvlsGYZ
Tvlebr4jze9vz8MiRw3qBp0ASWpDWgySt3zm0gDWRaxqvZbdqlLvK/YTta+4ySay
dfkqMG4SGM2m7Rc6H+DKqhwADoyd3oVrFD7QWCZTUUm414TgFFk+uils8Pms6ulG
u+mZT29Jaq8UzoXLOmf+tX2K07oA98y0HfrGMAto3+c0x9ArIPrtwHuUGJiTdt3V
ShBPP9AzaBxjAoIBAQCF+3gwP2k/CQqKv+t035t9yuYVgrxBkNyxweJtmUj8nWLG
vdzIggOxdj3lMaqHIVEoMk+5c2uTkhevk8ideSOv7wWoZ1JUWrjIeF1F9QqvafXo
RqgIyfukmk5VVdhUzDs8B/xh97qfVIwXY5Wpl4+RRGnWkOGkZOMF1hhwqlzx7i+0
prp9P9aQ6n880lr66TSFMvMRi/ewPqsfkTT2txSMMyO32TAyAoo0gy3fNjt8CDlf
rZXmjdTV65OyCulFLi1kjb6zyV54FuHLO4Yw5qnFqLwK4ddY4XrKSzI3g+qWxIYX
jFAPpcE9MthlW8jlPjjaZ6/XKoW8WsBJLkP1HJm7AoIBAAm9J+HbWMIG9s3vz2Kc
SMnhnWWk+2CD4hb97bIQxu5ml7ieN1oGOB1LmN1Z7PPo03/47/J1s7p/OVsuGh7Q
vFXerHbcAjXMDo5iXxy58cu6GIBMkTVxdQigCnqeW1sQlbdHm1jo9GID5YySGNu2
+gRbli8cQj47dRjiK1w70XtltqT+ixL9nqJRNTk/rtj9d8GAwATUzmf6X8/Ev+EG
QYA/5Fyttm7OCtjlzNPpZr5Q9EqI4YurfkA/NqZRwXbNCbLTNgi/mwmOquIraqQ1
nvyqA8H7I01t/dwDd687V1xcSSAwWxGbhMoQae7BVOjnO5hnT8Kf81beKMOd70Ga
TEkCggEAI8ICJvOBouBO92330s8smVhxPi9tRCnOZ0mg5MoR8EJydbOrcRIap1w7
Ai0CTR6ziOgMaDbT52ouZ1u0l6izYAdBdeSaPOiiTLx8vEE+U7SpNR3zCesPtZB3
uvGOY2mVwyfZH2SUc4cs+uzDnAGhPqC7/RSFPMoctXf46YpGc9auyjdesE395KLX
L043DaE9/ng9B1jCnhu5TUyiUtAluHvRGQC32og6id2KUEhmhGCl5vj2KIVoDmI2
NpeBLCKuaBNi/rOG3zyHLjg1wCYidjE7vwjY6UyemjbW48LI8KN6Sl5rQdaDu+bG
lWI2XLI4C2zqDBVmEL2MuzL0FrWivQ==
-----END PRIVATE KEY-----

View File

@ -1,33 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFxDCCA6wCAQIwDQYJKoZIhvcNAQENBQAwgacxCzAJBgNVBAYTAlVZMRMwEQYD
VQQIDApNb250ZXZpZGVvMRMwEQYDVQQHDApNb250ZXZpZGVvMRowGAYDVQQKDBFz
a3luZXQtZm91bmRhdGlvbjENMAsGA1UECwwEbm9uZTEcMBoGA1UEAwwTR3VpbGxl
cm1vIFJvZHJpZ3VlejElMCMGCSqGSIb3DQEJARYWZ3VpbGxlcm1vckBmaW5nLmVk
dS51eTAeFw0yMjEyMTExNTE1MDNaFw0zMjEyMDgxNTE1MDNaMIGnMQswCQYDVQQG
EwJVWTETMBEGA1UECAwKTW9udGV2aWRlbzETMBEGA1UEBwwKTW9udGV2aWRlbzEa
MBgGA1UECgwRc2t5bmV0LWZvdW5kYXRpb24xDTALBgNVBAsMBG5vbmUxHDAaBgNV
BAMME0d1aWxsZXJtbyBSb2RyaWd1ZXoxJTAjBgkqhkiG9w0BCQEWFmd1aWxsZXJt
b3JAZmluZy5lZHUudXkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCy
AuCwwnoENeYeB0159sH47zedmRaxcUmC/qmVdUptzOxIHpUCSAIy+hoR5UOhnRsm
jj7Y0kUWtlwjbHAKHcuUn4sqLBb0nl6kH79DzP/4YCQM3GEIXzE6wy/zmnYrHz53
Ci7DzmMcRM3nMwXDVPPpKXzpmI/yassKxSltBKgbh65U3oOheiuFygOlAkT4fUaX
X5Bf9DECZBsjewf9WvHzLGN2eQt/YWYxJMstgAecHLlRmLbKoYD/P+O0K1ybmhMD
ItcXE49kNC4sRvq7MUt8B0bi8SlRxv5plAbZBiyMilrxf3yCCgYaTsqtt3x+CSrA
WjzYIzEzD5aZ1+s5O2jsqPYkbTvA4NT/hDnWHkkr7YcBRwQn1iMe2tMUTTsWotIY
WH87++BzDAWG3ZBkqNZ4mUdA3usk2ZPO0BwWNxlb0AqOlAJUYSoCsm3nBPT08rVv
umQ44hup6XPWL5KIDyL5+Fl8RDgDF8cpCfrijdL+U+GoHmmJYM6zMkrGqD7BD+WJ
gw9plgbaWUBIq4aimXF4PrBJAAX5IRyZK+EDDH0AREL3qoZIQVvJR+yGIKTixpyV
Ktj6jm1OY4GoiXxRLaFrc4ucT9+PxRHo9zYtNIijub4eXuU5nveswptmCsNa4spT
O2XCkHh6IE0ZB4oALC4lrC279WY+3TaOpv/roGzG9QIDAQABMA0GCSqGSIb3DQEB
DQUAA4ICAQBic+3ipdfvmCThWkDjVs97tkbUUNjGXH95okwI0Jbft0iRivVM16Xb
hqGquQK4OvYoSTHTmsMH19/dMj0W/Bd4IUYKl64rG8YJUbjDbO1y7a+wF2TaONyn
z0k3zRCky+IwxqYf9Ppw7s2/cXlt3fOEg0kBr4EooXd+bFCx/+JQIxU3vfL8cDQK
dp55vkh+ROt8eR7ai1FiAC8J1prswyT092ktco2fP0MI4uQ3iQfl07NyI68UV1E5
aIsOPU3SKMtxz5FLm8JEUVhZRJZJWQ/o/iB/2cdn4PDBGkrBhgU6ysMPNX51RlCM
aHRsMyoO2mFfIlm0jW0C5lZ6nKHuA1sXPFz1YxzpvnRgRlHUlfoKf1wpCeF+5Qz+
qylArHPSu69CA38wLCzJ3wWTaGVL1nuH1UPR2Pg71HGBYqLCD2XGa8iLShO1DKl7
1bAeHOvzryngYq35rky1L3cIquinAwCP4QKocJK3DJAD5lPqhpzO1f2/1BmWV9Ri
ZRrRkM/9AxePxGZEmnoQbwKsQs/bY+jGU2fRzqijxRPoX9ogX5Te/Ko0mQh1slbX
4bL9NIipHPgpNeZRmRUnu4z00UJNGrI/qGaont3eMH1V65WGz9VMYnmCxkmsg45e
skrauB/Ly9DRRZBddDwAQF8RIbpqPsfQTuEjF0sGdYH3LaClGbA/cA==
-----END CERTIFICATE-----

View File

@ -4,7 +4,6 @@ env DEBIAN_FRONTEND=noninteractive
workdir /skynet
copy requirements.test.txt requirements.test.txt
copy requirements.txt requirements.txt
copy pytest.ini ./
copy setup.py ./
@ -12,8 +11,6 @@ copy skynet ./skynet
run pip install \
-e . \
-r requirements.txt \
-r requirements.test.txt
-r requirements.txt
copy scripts ./
copy tests ./

View File

@ -1,5 +1,5 @@
from nvidia/cuda:11.7.0-devel-ubuntu20.04
from python:3.10.0
from python:3.11
env DEBIAN_FRONTEND=noninteractive
@ -15,21 +15,15 @@ run pip install -v -r requirements.cuda.0.txt
run pip install -v -r requirements.cuda.1.txt
run pip install -v -r requirements.cuda.2.txt
copy requirements.test.txt requirements.test.txt
copy requirements.txt requirements.txt
copy pytest.ini pytest.ini
copy setup.py setup.py
copy skynet skynet
run pip install -e . \
-r requirements.txt \
-r requirements.test.txt
run pip install -e . -r requirements.txt
env PYTORCH_CUDA_ALLOC_CONF max_split_size_mb:128
env NVIDIA_VISIBLE_DEVICES=all
env HF_HOME /hf_home
copy scripts scripts
copy tests tests
expose 40000-45000

View File

@ -0,0 +1,22 @@
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y wget
# install eosio tools
RUN wget https://github.com/AntelopeIO/leap/releases/download/v4.0.1/leap_4.0.1-ubuntu22.04_amd64.deb
RUN apt-get install -y ./leap_4.0.1-ubuntu22.04_amd64.deb
RUN mkdir -p /root/nodeos
WORKDIR /root/nodeos
COPY config.ini config.ini
COPY contracts contracts
COPY genesis genesis
EXPOSE 42000
EXPOSE 29876
EXPOSE 39999
CMD sleep 9999999999

View File

@ -0,0 +1,52 @@
agent-name = Telos Skynet Testnet
wasm-runtime = eos-vm-jit
eos-vm-oc-compile-threads = 4
eos-vm-oc-enable = true
chain-state-db-size-mb = 65536
enable-account-queries = true
http-server-address = 0.0.0.0:42000
access-control-allow-origin = *
contracts-console = true
http-validate-host = false
p2p-listen-endpoint = 0.0.0.0:29876
p2p-server-address = 0.0.0.0:29876
verbose-http-errors = true
state-history-endpoint = 0.0.0.0:39999
trace-history = true
chain-state-history = true
trace-history-debug-mode = true
state-history-dir = state-history
sync-fetch-span = 1600
max-clients = 250
signature-provider = EOS5fLreY5Zq5owBhmNJTgQaLqQ4ufzXSTpStQakEyfxNFuUEgNs1=KEY:5JnvSc6pewpHHuUHwvbJopsew6AKwiGnexwDRc2Pj2tbdw6iML9
disable-subjective-billing = true
max-transaction-time = 500
read-only-read-window-time-us = 600000
abi-serializer-max-time-ms = 2000000
p2p-max-nodes-per-host = 1
connection-cleanup-period = 30
allowed-connection = any
http-max-response-time-ms = 100000
max-body-size = 10000000
enable-stale-production = true
plugin = eosio::http_plugin
plugin = eosio::chain_plugin
plugin = eosio::chain_api_plugin
plugin = eosio::net_api_plugin
plugin = eosio::net_plugin
plugin = eosio::producer_plugin
plugin = eosio::producer_api_plugin
plugin = eosio::state_history_plugin

View File

@ -0,0 +1,360 @@
{
"____comment": "This file was generated with eosio-abigen. DO NOT EDIT Thu Apr 14 07:49:43 2022",
"version": "eosio::abi/1.1",
"structs": [
{
"name": "action",
"base": "",
"fields": [
{
"name": "account",
"type": "name"
},
{
"name": "name",
"type": "name"
},
{
"name": "authorization",
"type": "permission_level[]"
},
{
"name": "data",
"type": "bytes"
}
]
},
{
"name": "approval",
"base": "",
"fields": [
{
"name": "level",
"type": "permission_level"
},
{
"name": "time",
"type": "time_point"
}
]
},
{
"name": "approvals_info",
"base": "",
"fields": [
{
"name": "version",
"type": "uint8"
},
{
"name": "proposal_name",
"type": "name"
},
{
"name": "requested_approvals",
"type": "approval[]"
},
{
"name": "provided_approvals",
"type": "approval[]"
}
]
},
{
"name": "approve",
"base": "",
"fields": [
{
"name": "proposer",
"type": "name"
},
{
"name": "proposal_name",
"type": "name"
},
{
"name": "level",
"type": "permission_level"
},
{
"name": "proposal_hash",
"type": "checksum256$"
}
]
},
{
"name": "cancel",
"base": "",
"fields": [
{
"name": "proposer",
"type": "name"
},
{
"name": "proposal_name",
"type": "name"
},
{
"name": "canceler",
"type": "name"
}
]
},
{
"name": "exec",
"base": "",
"fields": [
{
"name": "proposer",
"type": "name"
},
{
"name": "proposal_name",
"type": "name"
},
{
"name": "executer",
"type": "name"
}
]
},
{
"name": "extension",
"base": "",
"fields": [
{
"name": "type",
"type": "uint16"
},
{
"name": "data",
"type": "bytes"
}
]
},
{
"name": "invalidate",
"base": "",
"fields": [
{
"name": "account",
"type": "name"
}
]
},
{
"name": "invalidation",
"base": "",
"fields": [
{
"name": "account",
"type": "name"
},
{
"name": "last_invalidation_time",
"type": "time_point"
}
]
},
{
"name": "old_approvals_info",
"base": "",
"fields": [
{
"name": "proposal_name",
"type": "name"
},
{
"name": "requested_approvals",
"type": "permission_level[]"
},
{
"name": "provided_approvals",
"type": "permission_level[]"
}
]
},
{
"name": "permission_level",
"base": "",
"fields": [
{
"name": "actor",
"type": "name"
},
{
"name": "permission",
"type": "name"
}
]
},
{
"name": "proposal",
"base": "",
"fields": [
{
"name": "proposal_name",
"type": "name"
},
{
"name": "packed_transaction",
"type": "bytes"
}
]
},
{
"name": "propose",
"base": "",
"fields": [
{
"name": "proposer",
"type": "name"
},
{
"name": "proposal_name",
"type": "name"
},
{
"name": "requested",
"type": "permission_level[]"
},
{
"name": "trx",
"type": "transaction"
}
]
},
{
"name": "transaction",
"base": "transaction_header",
"fields": [
{
"name": "context_free_actions",
"type": "action[]"
},
{
"name": "actions",
"type": "action[]"
},
{
"name": "transaction_extensions",
"type": "extension[]"
}
]
},
{
"name": "transaction_header",
"base": "",
"fields": [
{
"name": "expiration",
"type": "time_point_sec"
},
{
"name": "ref_block_num",
"type": "uint16"
},
{
"name": "ref_block_prefix",
"type": "uint32"
},
{
"name": "max_net_usage_words",
"type": "varuint32"
},
{
"name": "max_cpu_usage_ms",
"type": "uint8"
},
{
"name": "delay_sec",
"type": "varuint32"
}
]
},
{
"name": "unapprove",
"base": "",
"fields": [
{
"name": "proposer",
"type": "name"
},
{
"name": "proposal_name",
"type": "name"
},
{
"name": "level",
"type": "permission_level"
}
]
}
],
"types": [],
"actions": [
{
"name": "approve",
"type": "approve",
"ricardian_contract": ""
},
{
"name": "cancel",
"type": "cancel",
"ricardian_contract": ""
},
{
"name": "exec",
"type": "exec",
"ricardian_contract": ""
},
{
"name": "invalidate",
"type": "invalidate",
"ricardian_contract": ""
},
{
"name": "propose",
"type": "propose",
"ricardian_contract": ""
},
{
"name": "unapprove",
"type": "unapprove",
"ricardian_contract": ""
}
],
"tables": [
{
"name": "approvals",
"type": "old_approvals_info",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "approvals2",
"type": "approvals_info",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "invals",
"type": "invalidation",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "proposal",
"type": "proposal",
"index_type": "i64",
"key_names": [],
"key_types": []
}
],
"ricardian_clauses": [],
"variants": [],
"abi_extensions": []
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,185 @@
{
"____comment": "This file was generated with eosio-abigen. DO NOT EDIT ",
"version": "eosio::abi/1.1",
"types": [],
"structs": [
{
"name": "account",
"base": "",
"fields": [
{
"name": "balance",
"type": "asset"
}
]
},
{
"name": "close",
"base": "",
"fields": [
{
"name": "owner",
"type": "name"
},
{
"name": "symbol",
"type": "symbol"
}
]
},
{
"name": "create",
"base": "",
"fields": [
{
"name": "issuer",
"type": "name"
},
{
"name": "maximum_supply",
"type": "asset"
}
]
},
{
"name": "currency_stats",
"base": "",
"fields": [
{
"name": "supply",
"type": "asset"
},
{
"name": "max_supply",
"type": "asset"
},
{
"name": "issuer",
"type": "name"
}
]
},
{
"name": "issue",
"base": "",
"fields": [
{
"name": "to",
"type": "name"
},
{
"name": "quantity",
"type": "asset"
},
{
"name": "memo",
"type": "string"
}
]
},
{
"name": "open",
"base": "",
"fields": [
{
"name": "owner",
"type": "name"
},
{
"name": "symbol",
"type": "symbol"
},
{
"name": "ram_payer",
"type": "name"
}
]
},
{
"name": "retire",
"base": "",
"fields": [
{
"name": "quantity",
"type": "asset"
},
{
"name": "memo",
"type": "string"
}
]
},
{
"name": "transfer",
"base": "",
"fields": [
{
"name": "from",
"type": "name"
},
{
"name": "to",
"type": "name"
},
{
"name": "quantity",
"type": "asset"
},
{
"name": "memo",
"type": "string"
}
]
}
],
"actions": [
{
"name": "close",
"type": "close",
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Close Token Balance\nsummary: 'Close {{nowrap owner}}s zero quantity balance'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\n{{owner}} agrees to close their zero quantity balance for the {{symbol_to_symbol_code symbol}} token.\n\nRAM will be refunded to the RAM payer of the {{symbol_to_symbol_code symbol}} token balance for {{owner}}."
},
{
"name": "create",
"type": "create",
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Create New Token\nsummary: 'Create a new token'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\n{{$action.account}} agrees to create a new token with symbol {{asset_to_symbol_code maximum_supply}} to be managed by {{issuer}}.\n\nThis action will not result any any tokens being issued into circulation.\n\n{{issuer}} will be allowed to issue tokens into circulation, up to a maximum supply of {{maximum_supply}}.\n\nRAM will deducted from {{$action.account}}s resources to create the necessary records."
},
{
"name": "issue",
"type": "issue",
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Issue Tokens into Circulation\nsummary: 'Issue {{nowrap quantity}} into circulation and transfer into {{nowrap to}}s account'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\nThe token manager agrees to issue {{quantity}} into circulation, and transfer it into {{to}}s account.\n\n{{#if memo}}There is a memo attached to the transfer stating:\n{{memo}}\n{{/if}}\n\nIf {{to}} does not have a balance for {{asset_to_symbol_code quantity}}, or the token manager does not have a balance for {{asset_to_symbol_code quantity}}, the token manager will be designated as the RAM payer of the {{asset_to_symbol_code quantity}} token balance for {{to}}. As a result, RAM will be deducted from the token managers resources to create the necessary records.\n\nThis action does not allow the total quantity to exceed the max allowed supply of the token."
},
{
"name": "open",
"type": "open",
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Open Token Balance\nsummary: 'Open a zero quantity balance for {{nowrap owner}}'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\n{{ram_payer}} agrees to establish a zero quantity balance for {{owner}} for the {{symbol_to_symbol_code symbol}} token.\n\nIf {{owner}} does not have a balance for {{symbol_to_symbol_code symbol}}, {{ram_payer}} will be designated as the RAM payer of the {{symbol_to_symbol_code symbol}} token balance for {{owner}}. As a result, RAM will be deducted from {{ram_payer}}s resources to create the necessary records."
},
{
"name": "retire",
"type": "retire",
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Remove Tokens from Circulation\nsummary: 'Remove {{nowrap quantity}} from circulation'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/token.png#207ff68b0406eaa56618b08bda81d6a0954543f36adc328ab3065f31a5c5d654\n---\n\nThe token manager agrees to remove {{quantity}} from circulation, taken from their own account.\n\n{{#if memo}} There is a memo attached to the action stating:\n{{memo}}\n{{/if}}"
},
{
"name": "transfer",
"type": "transfer",
"ricardian_contract": "---\nspec_version: \"0.2.0\"\ntitle: Transfer Tokens\nsummary: 'Send {{nowrap quantity}} from {{nowrap from}} to {{nowrap to}}'\nicon: http://127.0.0.1/ricardian_assets/eosio.contracts/icons/transfer.png#5dfad0df72772ee1ccc155e670c1d124f5c5122f1d5027565df38b418042d1dd\n---\n\n{{from}} agrees to send {{quantity}} to {{to}}.\n\n{{#if memo}}There is a memo attached to the transfer stating:\n{{memo}}\n{{/if}}\n\nIf {{from}} is not already the RAM payer of their {{asset_to_symbol_code quantity}} token balance, {{from}} will be designated as such. As a result, RAM will be deducted from {{from}}s resources to refund the original RAM payer.\n\nIf {{to}} does not have a balance for {{asset_to_symbol_code quantity}}, {{from}} will be designated as the RAM payer of the {{asset_to_symbol_code quantity}} token balance for {{to}}. As a result, RAM will be deducted from {{from}}s resources to create the necessary records."
}
],
"tables": [
{
"name": "accounts",
"type": "account",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "stat",
"type": "currency_stats",
"index_type": "i64",
"key_names": [],
"key_types": []
}
],
"ricardian_clauses": [],
"variants": []
}

View File

@ -0,0 +1,130 @@
{
"____comment": "This file was generated with eosio-abigen. DO NOT EDIT Thu Apr 14 07:49:40 2022",
"version": "eosio::abi/1.1",
"structs": [
{
"name": "action",
"base": "",
"fields": [
{
"name": "account",
"type": "name"
},
{
"name": "name",
"type": "name"
},
{
"name": "authorization",
"type": "permission_level[]"
},
{
"name": "data",
"type": "bytes"
}
]
},
{
"name": "exec",
"base": "",
"fields": [
{
"name": "executer",
"type": "name"
},
{
"name": "trx",
"type": "transaction"
}
]
},
{
"name": "extension",
"base": "",
"fields": [
{
"name": "type",
"type": "uint16"
},
{
"name": "data",
"type": "bytes"
}
]
},
{
"name": "permission_level",
"base": "",
"fields": [
{
"name": "actor",
"type": "name"
},
{
"name": "permission",
"type": "name"
}
]
},
{
"name": "transaction",
"base": "transaction_header",
"fields": [
{
"name": "context_free_actions",
"type": "action[]"
},
{
"name": "actions",
"type": "action[]"
},
{
"name": "transaction_extensions",
"type": "extension[]"
}
]
},
{
"name": "transaction_header",
"base": "",
"fields": [
{
"name": "expiration",
"type": "time_point_sec"
},
{
"name": "ref_block_num",
"type": "uint16"
},
{
"name": "ref_block_prefix",
"type": "uint32"
},
{
"name": "max_net_usage_words",
"type": "varuint32"
},
{
"name": "max_cpu_usage_ms",
"type": "uint8"
},
{
"name": "delay_sec",
"type": "varuint32"
}
]
}
],
"types": [],
"actions": [
{
"name": "exec",
"type": "exec",
"ricardian_contract": ""
}
],
"tables": [],
"ricardian_clauses": [],
"variants": [],
"abi_extensions": []
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,25 @@
{
"initial_timestamp": "2023-05-22T00:00:00.000",
"initial_key": "EOS5fLreY5Zq5owBhmNJTgQaLqQ4ufzXSTpStQakEyfxNFuUEgNs1",
"initial_configuration": {
"max_block_net_usage": 1048576,
"target_block_net_usage_pct": 1000,
"max_transaction_net_usage": 1048575,
"base_per_transaction_net_usage": 12,
"net_usage_leeway": 500,
"context_free_discount_net_usage_num": 20,
"context_free_discount_net_usage_den": 100,
"max_block_cpu_usage": 200000,
"target_block_cpu_usage_pct": 1000,
"max_transaction_cpu_usage": 150000,
"min_transaction_cpu_usage": 100,
"max_transaction_lifetime": 3600,
"deferred_trx_expiration_window": 600,
"max_transaction_delay": 3888000,
"max_inline_action_size": 4096,
"max_inline_action_depth": 4,
"max_authority_depth": 6
}
}

View File

@ -1,4 +1,3 @@
[pytest]
log_cli = True
log_level = info
trio_mode = true

View File

@ -3,6 +3,7 @@ triton
accelerate
transformers
huggingface_hub
diffusers[torch]
diffusers[torch]>=0.18.0
invisible-watermark
torch==1.13.0+cu117
--extra-index-url https://download.pytorch.org/whl/cu117

View File

@ -1,6 +0,0 @@
pdbpp
pytest
pytest-trio
psycopg2-binary
git+https://github.com/guilledk/pytest-dockerctl.git@multi_names#egg=pytest-dockerctl

View File

@ -1,13 +1,15 @@
pytz
trio
pynng
asks
numpy
pdbpp
Pillow
triopg
pytest
docker
aiohttp
msgspec
protobuf
pyOpenSSL
trio_asyncio
psycopg2-binary
pyTelegramBotAPI
discord.py
git+https://github.com/goodboy/tractor.git@master#egg=tractor
py-leap@git+https://github.com/guilledk/py-leap.git@v0.1a14

View File

@ -1,44 +0,0 @@
#!/usr/bin/python
'''Self signed x509 certificate generator
can look at generated file using openssl:
openssl x509 -inform pem -in selfsigned.crt -noout -text'''
import sys
from OpenSSL import crypto, SSL
from skynet.constants import DEFAULT_CERTS_DIR
def input_or_skip(txt, default):
i = input(f'[default: {default}]: {txt}')
if len(i) == 0:
return default
else:
return i
if __name__ == '__main__':
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 4096)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = input('country name two char ISO code (example: US): ')
cert.get_subject().ST = input('state or province name (example: Texas): ')
cert.get_subject().L = input('locality name (example: Dallas): ')
cert.get_subject().O = input('organization name: ')
cert.get_subject().OU = input_or_skip('organizational unit name: ', 'none')
cert.get_subject().CN = input('common name: ')
cert.get_subject().emailAddress = input('email address: ')
cert.set_serial_number(int(input_or_skip('numberic serial number: ', 0)))
cert.gmtime_adj_notBefore(int(input_or_skip('amount of seconds until cert is valid: ', 0)))
cert.gmtime_adj_notAfter(int(input_or_skip('amount of seconds until cert expires: ', 10*365*24*60*60)))
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha512')
with open(f'{DEFAULT_CERTS_DIR}/{sys.argv[1]}.cert', "wt") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode("utf-8"))
with open(f'{DEFAULT_CERTS_DIR}/{sys.argv[1]}.key', "wt") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode("utf-8"))

View File

@ -1,12 +1,35 @@
[skynet]
certs_dir = certs
[skynet.dgpu]
account = testworkerX
permission = active
key = 5Xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
node_url = https://skynet.ancap.tech
hyperion_url = https://skynet.ancap.tech
ipfs_url = /ip4/169.197.140.154/tcp/4001/p2p/12D3KooWKWogLFNEcNNMKnzU7Snrnuj84RZdMBg3sLiQSQc51oEv
hf_home = hf_home
hf_token = hf_XxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXx
auto_withdraw = True
[skynet.telegram]
account = telegram
permission = active
key = 5Xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
node_url = https://skynet.ancap.tech
hyperion_url = https://skynet.ancap.tech
ipfs_url = /ip4/169.197.140.154/tcp/4001/p2p/12D3KooWKWogLFNEcNNMKnzU7Snrnuj84RZdMBg3sLiQSQc51oEv
token = XXXXXXXXXX:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
[skynet.telegram-test]
[skynet.discord]
account = discord
permission = active
key = 5Xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
node_url = https://skynet.ancap.tech
hyperion_url = https://skynet.ancap.tech
ipfs_url = /ip4/169.197.140.154/tcp/4001/p2p/12D3KooWKWogLFNEcNNMKnzU7Snrnuj84RZdMBg3sLiQSQc51oEv
token = XXXXXXXXXX:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

View File

@ -1,210 +0,0 @@
#!/usr/bin/python
import logging
from contextlib import asynccontextmanager as acm
from collections import OrderedDict
import trio
from pynng import Context
from .utils import time_ms
from .network import *
from .protobuf import *
from .constants import *
class SkynetRPCBadRequest(BaseException):
...
class SkynetDGPUOffline(BaseException):
...
class SkynetDGPUOverloaded(BaseException):
...
class SkynetDGPUComputeError(BaseException):
...
class SkynetShutdownRequested(BaseException):
...
@acm
async def run_skynet(
rpc_address: str = DEFAULT_RPC_ADDR
):
logging.basicConfig(level=logging.INFO)
logging.info('skynet is starting')
nodes = OrderedDict()
heartbeats = {}
next_worker: Optional[int] = None
def connect_node(req: SkynetRPCRequest):
nonlocal next_worker
node_params = MessageToDict(req.params)
logging.info(f'got node params {node_params}')
if 'dgpu_addr' not in node_params:
raise SkynetRPCBadRequest(
f'DGPU connection params don\'t include dgpu addr')
session = SessionClient(
node_params['dgpu_addr'],
'skynet',
cert_name='brain.cert',
key_name='brain.key',
ca_name=node_params['cert']
)
try:
session.connect()
node = {
'task': None,
'session': session
}
node.update(node_params)
nodes[req.uid] = node
logging.info(f'DGPU node online: {req.uid}')
if not next_worker:
next_worker = 0
except pynng.exceptions.ConnectionRefused:
logging.warning(f'error while dialing dgpu node... dropping...')
raise SkynetDGPUOffline('Connection to dgpu node addr failed.')
def disconnect_node(uid):
nonlocal next_worker
if uid not in nodes:
logging.warning(f'Attempt to disconnect unknown node {uid}')
return
i = list(nodes.keys()).index(uid)
nodes[uid]['session'].disconnect()
del nodes[uid]
if i < next_worker:
next_worker -= 1
logging.warning(f'DGPU node offline: {uid}')
if len(nodes) == 0:
logging.info('All nodes disconnected.')
next_worker = None
def is_worker_busy(nid: str):
return nodes[nid]['task'] != None
def are_all_workers_busy():
for nid in nodes.keys():
if not is_worker_busy(nid):
return False
return True
def get_next_worker():
nonlocal next_worker
if next_worker == None:
raise SkynetDGPUOffline('No workers connected, try again later')
if are_all_workers_busy():
raise SkynetDGPUOverloaded('All workers are busy at the moment')
nid = list(nodes.keys())[next_worker]
while is_worker_busy(nid):
next_worker += 1
if next_worker >= len(nodes):
next_worker = 0
nid = list(nodes.keys())[next_worker]
next_worker += 1
if next_worker >= len(nodes):
next_worker = 0
return nid
async def rpc_handler(req: SkynetRPCRequest, ctx: Context):
result = {'ok': {}}
resp = SkynetRPCResponse()
try:
match req.method:
case 'dgpu_online':
connect_node(req)
case 'dgpu_call':
nid = get_next_worker()
idx = list(nodes.keys()).index(nid)
node = nodes[nid]
logging.info(f'dgpu_call {idx}/{len(nodes)} {nid} @ {node["dgpu_addr"]}')
dgpu_time = await node['session'].rpc('dgpu_time')
if 'ok' not in dgpu_time.result:
status = MessageToDict(dgpu_time.result)
logging.warning(json.dumps(status, indent=4))
disconnect_node(nid)
raise SkynetDGPUComputeError(status['error'])
dgpu_time = dgpu_time.result['ok']
logging.info(f'ping to {nid}: {time_ms() - dgpu_time} ms')
try:
dgpu_result = await node['session'].rpc(
timeout=45, # give this 45 sec to run cause its compute
binext=req.bin,
**req.params
)
result = MessageToDict(dgpu_result.result)
if dgpu_result.bin:
resp.bin = dgpu_result.bin
except trio.TooSlowError:
result = {'error': 'timeout while processing request'}
case 'dgpu_offline':
disconnect_node(req.uid)
case 'dgpu_workers':
result = {'ok': len(nodes)}
case 'dgpu_next':
result = {'ok': next_worker}
case 'skynet_shutdown':
raise SkynetShutdownRequested
case _:
logging.warning(f'Unknown method {req.method}')
result = {'error': 'unknown method'}
except BaseException as e:
result = {'error': str(e)}
resp.result.update(result)
return resp
rpc_server = SessionServer(
rpc_address,
rpc_handler,
cert_name='brain.cert',
key_name='brain.key'
)
async with rpc_server.open():
logging.info('rpc server is up')
yield
logging.info('skynet is shuting down...')
logging.info('skynet down.')

506
skynet/cli.py 100644 → 100755
View File

@ -1,25 +1,30 @@
#!/usr/bin/python
import importlib.util
torch_enabled = importlib.util.find_spec('torch') != None
import os
import json
import logging
import random
from typing import Optional
from functools import partial
import trio
import asks
import click
import trio_asyncio
import asyncio
import requests
if torch_enabled:
from . import utils
from .dgpu import open_dgpu_node
from leap.cleos import CLEOS
from leap.sugar import collect_stdout, Name, asset_from_str
from leap.hyperion import HyperionAPI
from .brain import run_skynet
from skynet.ipfs import IPFSHTTP
from .db import open_new_database
from .config import *
from .constants import ALGOS, DEFAULT_RPC_ADDR, DEFAULT_DGPU_ADDR
from .frontend.telegram import run_skynet_telegram
from .nodeos import open_cleos, open_nodeos
from .constants import *
from .frontend.telegram import SkynetTelegramFrontend
from .frontend.discord import SkynetDiscordFrontend
@click.group()
@ -38,11 +43,12 @@ def skynet(*args, **kwargs):
@click.option('--steps', '-s', default=26)
@click.option('--seed', '-S', default=None)
def txt2img(*args, **kwargs):
_, hf_token, _, cfg = init_env_from_config()
from . import utils
_, hf_token, _, _ = init_env_from_config()
utils.txt2img(hf_token, **kwargs)
@click.command()
@click.option('--model', '-m', default='midj')
@click.option('--model', '-m', default=list(MODELS.keys())[0])
@click.option(
'--prompt', '-p', default='a red old tractor in a sunny wheat field')
@click.option('--input', '-i', default='input.png')
@ -52,7 +58,8 @@ def txt2img(*args, **kwargs):
@click.option('--steps', '-s', default=26)
@click.option('--seed', '-S', default=None)
def img2img(model, prompt, input, output, strength, guidance, steps, seed):
_, hf_token, _, cfg = init_env_from_config()
from . import utils
_, hf_token, _, _ = init_env_from_config()
utils.img2img(
hf_token,
model=model,
@ -70,6 +77,7 @@ def img2img(model, prompt, input, output, strength, guidance, steps, seed):
@click.option('--output', '-o', default='output.png')
@click.option('--model', '-m', default='weights/RealESRGAN_x4plus.pth')
def upscale(input, output, model):
from . import utils
utils.upscale(
img_path=input,
output=output,
@ -78,87 +86,439 @@ def upscale(input, output, model):
@skynet.command()
def download():
_, hf_token, _, cfg = init_env_from_config()
from . import utils
_, hf_token, _, _ = init_env_from_config()
utils.download_all_models(hf_token)
@skynet.command()
@click.option(
'--account', '-A', default=None)
@click.option(
'--permission', '-P', default=None)
@click.option(
'--key', '-k', default=None)
@click.option(
'--node-url', '-n', default='https://skynet.ancap.tech')
@click.option(
'--reward', '-r', default='20.0000 GPU')
@click.option('--jobs', '-j', default=1)
@click.option('--model', '-m', default='prompthero/openjourney')
@click.option(
'--prompt', '-p', default='a red old tractor in a sunny wheat field')
@click.option('--output', '-o', default='output.png')
@click.option('--width', '-w', default=512)
@click.option('--height', '-h', default=512)
@click.option('--guidance', '-g', default=10)
@click.option('--step', '-s', default=26)
@click.option('--seed', '-S', default=None)
@click.option('--upscaler', '-U', default='x4')
@click.option('--binary_data', '-b', default='')
def enqueue(
account: str,
permission: str,
key: str | None,
node_url: str,
reward: str,
jobs: int,
**kwargs
):
key, account, permission = load_account_info(
'user', key, account, permission)
node_url, _, _ = load_endpoint_info(
'user', node_url, None, None)
with open_cleos(node_url, key=key) as cleos:
async def enqueue_n_jobs():
for i in range(jobs):
if not kwargs['seed']:
kwargs['seed'] = random.randint(0, 10e9)
req = json.dumps({
'method': 'diffuse',
'params': kwargs
})
binary = kwargs['binary_data']
res = await cleos.a_push_action(
'telos.gpu',
'enqueue',
{
'user': Name(account),
'request_body': req,
'binary_data': binary,
'reward': asset_from_str(reward),
'min_verification': 1
},
account, key, permission,
)
print(res)
trio.run(enqueue_n_jobs)
@skynet.command()
@click.option('--loglevel', '-l', default='INFO', help='Logging level')
@click.option(
'--account', '-A', default='telos.gpu')
@click.option(
'--permission', '-P', default='active')
@click.option(
'--key', '-k', default=None)
@click.option(
'--node-url', '-n', default='https://skynet.ancap.tech')
def clean(
loglevel: str,
account: str,
permission: str,
key: str | None,
node_url: str,
):
key, account, permission = load_account_info(
'user', key, account, permission)
node_url, _, _ = load_endpoint_info(
'user', node_url, None, None)
logging.basicConfig(level=loglevel)
cleos = CLEOS(None, None, url=node_url, remote=node_url)
trio.run(
partial(
cleos.a_push_action,
'telos.gpu',
'clean',
{},
account, key, permission=permission
)
)
@skynet.command()
@click.option(
'--node-url', '-n', default='https://skynet.ancap.tech')
def queue(node_url: str):
node_url, _, _ = load_endpoint_info(
'user', node_url, None, None)
resp = requests.post(
f'{node_url}/v1/chain/get_table_rows',
json={
'code': 'telos.gpu',
'table': 'queue',
'scope': 'telos.gpu',
'json': True
}
)
print(json.dumps(resp.json(), indent=4))
@skynet.command()
@click.option(
'--node-url', '-n', default='https://skynet.ancap.tech')
@click.argument('request-id')
def status(node_url: str, request_id: int):
node_url, _, _ = load_endpoint_info(
'user', node_url, None, None)
resp = requests.post(
f'{node_url}/v1/chain/get_table_rows',
json={
'code': 'telos.gpu',
'table': 'status',
'scope': request_id,
'json': True
}
)
print(json.dumps(resp.json(), indent=4))
@skynet.command()
@click.option(
'--account', '-a', default='telegram')
@click.option(
'--permission', '-p', default='active')
@click.option(
'--key', '-k', default=None)
@click.option(
'--node-url', '-n', default='https://skynet.ancap.tech')
@click.argument('request-id')
def dequeue(
account: str,
permission: str,
key: str | None,
node_url: str,
request_id: int
):
key, account, permission = load_account_info(
'user', key, account, permission)
node_url, _, _ = load_endpoint_info(
'user', node_url, None, None)
with open_cleos(node_url, key=key) as cleos:
res = trio.run(cleos.a_push_action,
'telos.gpu',
'dequeue',
{
'user': Name(account),
'request_id': int(request_id),
},
account, key, permission,
)
print(res)
@skynet.command()
@click.option(
'--account', '-a', default='telos.gpu')
@click.option(
'--permission', '-p', default='active')
@click.option(
'--key', '-k', default=None)
@click.option(
'--node-url', '-n', default='https://skynet.ancap.tech')
@click.option(
'--token-contract', '-c', default='eosio.token')
@click.option(
'--token-symbol', '-S', default='4,GPU')
def config(
account: str,
permission: str,
key: str | None,
node_url: str,
token_contract: str,
token_symbol: str
):
key, account, permission = load_account_info(
'user', key, account, permission)
node_url, _, _ = load_endpoint_info(
'user', node_url, None, None)
with open_cleos(node_url, key=key) as cleos:
res = trio.run(cleos.a_push_action,
'telos.gpu',
'config',
{
'token_contract': token_contract,
'token_symbol': token_symbol,
},
account, key, permission,
)
print(res)
@skynet.command()
@click.option(
'--account', '-a', default='telegram')
@click.option(
'--permission', '-p', default='active')
@click.option(
'--key', '-k', default=None)
@click.option(
'--node-url', '-n', default='https://skynet.ancap.tech')
@click.argument('quantity')
def deposit(
account: str,
permission: str,
key: str | None,
node_url: str,
quantity: str
):
key, account, permission = load_account_info(
'user', key, account, permission)
node_url, _, _ = load_endpoint_info(
'user', node_url, None, None)
with open_cleos(node_url, key=key) as cleos:
res = trio.run(cleos.a_push_action,
'eosio.token',
'transfer',
{
'sender': Name(account),
'recipient': Name('telos.gpu'),
'amount': asset_from_str(quantity),
'memo': f'{account} transferred {quantity} to telos.gpu'
},
account, key, permission,
)
print(res)
@skynet.group()
def run(*args, **kwargs):
pass
@run.command()
def db():
logging.basicConfig(level=logging.INFO)
with open_new_database(cleanup=False) as db_params:
container, passwd, host = db_params
logging.info(('skynet', passwd, host))
@run.command()
@click.option('--loglevel', '-l', default='warning', help='Logging level')
@click.option(
'--host', '-H', default=DEFAULT_RPC_ADDR)
def brain(
loglevel: str,
host: str
):
async def _run_skynet():
async with run_skynet(
rpc_address=host
):
await trio.sleep_forever()
trio.run(_run_skynet)
def nodeos():
logging.basicConfig(filename='skynet-nodeos.log', level=logging.INFO)
with open_nodeos(cleanup=False):
...
@run.command()
@click.option('--loglevel', '-l', default='warning', help='Logging level')
@click.option('--loglevel', '-l', default='INFO', help='Logging level')
@click.option(
'--uid', '-u', required=True)
@click.option(
'--key', '-k', default='dgpu.key')
@click.option(
'--cert', '-c', default='whitelist/dgpu.cert')
@click.option(
'--algos', '-a', default=json.dumps(['midj']))
@click.option(
'--rpc', '-r', default=DEFAULT_RPC_ADDR)
@click.option(
'--dgpu', '-d', default=DEFAULT_DGPU_ADDR)
'--config-path', '-c', default='skynet.ini')
def dgpu(
loglevel: str,
uid: str,
key: str,
cert: str,
algos: str,
rpc: str,
dgpu: str
config_path: str
):
trio.run(
partial(
open_dgpu_node,
cert,
uid,
key_name=key,
rpc_address=rpc,
dgpu_address=dgpu,
initial_algos=json.loads(algos)
))
from .dgpu import open_dgpu_node
logging.basicConfig(level=loglevel)
config = load_skynet_ini(file_path=config_path)
assert 'skynet.dgpu' in config
trio.run(open_dgpu_node, config['skynet.dgpu'])
@run.command()
@click.option('--loglevel', '-l', default='warning', help='Logging level')
@click.option('--loglevel', '-l', default='INFO', help='logging level')
@click.option(
'--key', '-k', default='telegram-frontend')
'--account', '-a', default='telegram')
@click.option(
'--cert', '-c', default='whitelist/telegram-frontend')
'--permission', '-p', default='active')
@click.option(
'--rpc', '-r', default=DEFAULT_RPC_ADDR)
'--key', '-k', default=None)
@click.option(
'--hyperion-url', '-y', default=f'https://{DEFAULT_DOMAIN}')
@click.option(
'--node-url', '-n', default=f'https://{DEFAULT_DOMAIN}')
@click.option(
'--ipfs-url', '-i', default=DEFAULT_IPFS_REMOTE)
@click.option(
'--db-host', '-h', default='localhost:5432')
@click.option(
'--db-user', '-u', default='skynet')
@click.option(
'--db-pass', '-u', default='password')
def telegram(
loglevel: str,
key: str,
cert: str,
rpc: str
account: str,
permission: str,
key: str | None,
hyperion_url: str,
ipfs_url: str,
node_url: str,
db_host: str,
db_user: str,
db_pass: str
):
_, _, tg_token, cfg = init_env_from_config()
trio_asyncio.run(
partial(
run_skynet_telegram,
logging.basicConfig(level=loglevel)
_, _, tg_token, _ = init_env_from_config()
key, account, permission = load_account_info(
'telegram', key, account, permission)
node_url, _, ipfs_url = load_endpoint_info(
'telegram', node_url, None, None)
async def _async_main():
frontend = SkynetTelegramFrontend(
tg_token,
key_name=key,
cert_name=cert,
rpc_address=rpc
))
account,
permission,
node_url,
hyperion_url,
db_host, db_user, db_pass,
remote_ipfs_node=ipfs_url,
key=key
)
async with frontend.open():
await frontend.bot.infinity_polling()
asyncio.run(_async_main())
@run.command()
@click.option('--loglevel', '-l', default='INFO', help='logging level')
@click.option(
'--account', '-a', default='discord')
@click.option(
'--permission', '-p', default='active')
@click.option(
'--key', '-k', default=None)
@click.option(
'--hyperion-url', '-y', default=f'https://{DEFAULT_DOMAIN}')
@click.option(
'--node-url', '-n', default=f'https://{DEFAULT_DOMAIN}')
@click.option(
'--ipfs-url', '-i', default=DEFAULT_IPFS_REMOTE)
@click.option(
'--db-host', '-h', default='localhost:5432')
@click.option(
'--db-user', '-u', default='skynet')
@click.option(
'--db-pass', '-u', default='password')
def discord(
loglevel: str,
account: str,
permission: str,
key: str | None,
hyperion_url: str,
ipfs_url: str,
node_url: str,
db_host: str,
db_user: str,
db_pass: str
):
logging.basicConfig(level=loglevel)
_, _, _, dc_token = init_env_from_config()
key, account, permission = load_account_info(
'discord', key, account, permission)
node_url, _, ipfs_url = load_endpoint_info(
'discord', node_url, None, None)
async def _async_main():
frontend = SkynetDiscordFrontend(
# dc_token,
account,
permission,
node_url,
hyperion_url,
db_host, db_user, db_pass,
remote_ipfs_node=ipfs_url,
key=key
)
async with frontend.open():
await frontend.bot.start(dc_token)
asyncio.run(_async_main())
@run.command()
@click.option('--loglevel', '-l', default='INFO', help='logging level')
@click.option('--name', '-n', default='skynet-ipfs', help='container name')
def ipfs(loglevel, name):
from skynet.ipfs.docker import open_ipfs_node
logging.basicConfig(level=loglevel)
with open_ipfs_node(name=name):
...
@run.command()
@click.option('--loglevel', '-l', default='INFO', help='logging level')
@click.option(
'--ipfs-rpc', '-i', default='http://127.0.0.1:5001')
@click.option(
'--hyperion-url', '-y', default='http://127.0.0.1:42001')
def pinner(loglevel, ipfs_rpc, hyperion_url):
from .ipfs.pinner import SkynetPinner
logging.basicConfig(level=loglevel)
ipfs_node = IPFSHTTP(ipfs_rpc)
hyperion = HyperionAPI(hyperion_url)
pinner = SkynetPinner(hyperion, ipfs_node)
trio.run(pinner.pin_forever)

90
skynet/config.py 100644 → 100755
View File

@ -1,9 +1,11 @@
#!/usr/bin/python
import os
import json
from pathlib import Path
from configparser import ConfigParser
from re import sub
from .constants import DEFAULT_CONFIG_PATH
@ -13,27 +15,99 @@ def load_skynet_ini(
):
config = ConfigParser()
config.read(file_path)
return config
def init_env_from_config(
hf_token: str | None = None,
hf_home: str | None = None,
tg_token: str | None = None,
dc_token: str | None = None,
file_path=DEFAULT_CONFIG_PATH
):
config = load_skynet_ini()
config = load_skynet_ini(file_path=file_path)
if 'HF_TOKEN' in os.environ:
hf_token = os.environ['HF_TOKEN']
else:
hf_token = config['skynet.dgpu']['hf_token']
elif 'skynet.dgpu' in config:
sub_config = config['skynet.dgpu']
if 'hf_token' in sub_config:
hf_token = sub_config['hf_token']
os.environ['HF_TOKEN'] = hf_token
if 'HF_HOME' in os.environ:
hf_home = os.environ['HF_HOME']
else:
hf_home = config['skynet.dgpu']['hf_home']
elif 'skynet.dgpu' in config:
sub_config = config['skynet.dgpu']
if 'hf_home' in sub_config:
hf_home = sub_config['hf_home']
os.environ['HF_HOME'] = hf_home
if 'TG_TOKEN' in os.environ:
tg_token = os.environ['TG_TOKEN']
else:
tg_token = config['skynet.telegram']['token']
elif 'skynet.telegram' in config:
sub_config = config['skynet.telegram']
if 'token' in sub_config:
tg_token = sub_config['token']
return hf_home, hf_token, tg_token, config
if 'DC_TOKEN' in os.environ:
dc_token = os.environ['DC_TOKEN']
elif 'skynet.discord' in config:
sub_config = config['skynet.discord']
if 'token' in sub_config:
dc_token = sub_config['token']
return hf_home, hf_token, tg_token, dc_token
def load_account_info(
_type: str,
key: str | None = None,
account: str | None = None,
permission: str | None = None,
file_path=DEFAULT_CONFIG_PATH
):
config = load_skynet_ini(file_path=file_path)
type_key = f'skynet.{_type}'
if type_key in config:
sub_config = config[type_key]
if not key and 'key' in sub_config:
key = sub_config['key']
if not account and 'account' in sub_config:
account = sub_config['account']
if not permission and 'permission' in sub_config:
permission = sub_config['permission']
return key, account, permission
def load_endpoint_info(
_type: str,
node_url: str | None = None,
hyperion_url: str | None = None,
ipfs_url: str | None = None,
file_path=DEFAULT_CONFIG_PATH
):
config = load_skynet_ini(file_path=file_path)
type_key = f'skynet.{_type}'
if type_key in config:
sub_config = config[type_key]
if not node_url and 'node_url' in sub_config:
node_url = sub_config['node_url']
if not hyperion_url and 'hyperion_url' in sub_config:
hyperion_url = sub_config['hyperion_url']
if not ipfs_url and 'ipfs_url' in sub_config:
ipfs_url = sub_config['ipfs_url']
return node_url, hyperion_url, ipfs_url

88
skynet/constants.py 100644 → 100755
View File

@ -1,21 +1,34 @@
#!/usr/bin/python
VERSION = '0.1a9'
VERSION = '0.1a11'
DOCKER_RUNTIME_CUDA = 'skynet:runtime-cuda'
ALGOS = {
'midj': 'prompthero/openjourney',
'stable': 'runwayml/stable-diffusion-v1-5',
'hdanime': 'Linaqruf/anything-v3.0',
'waifu': 'hakurei/waifu-diffusion',
'ghibli': 'nitrosocke/Ghibli-Diffusion',
'van-gogh': 'dallinmackay/Van-Gogh-diffusion',
'pokemon': 'lambdalabs/sd-pokemon-diffusers',
'ink': 'Envvi/Inkpunk-Diffusion',
'robot': 'nousr/robo-diffusion'
MODELS = {
'prompthero/openjourney': { 'short': 'midj'},
'runwayml/stable-diffusion-v1-5': { 'short': 'stable'},
'stabilityai/stable-diffusion-2-1-base': { 'short': 'stable2'},
'snowkidy/stable-diffusion-xl-base-0.9': { 'short': 'stablexl0.9'},
'stabilityai/stable-diffusion-xl-base-1.0': { 'short': 'stablexl'},
'Linaqruf/anything-v3.0': { 'short': 'hdanime'},
'hakurei/waifu-diffusion': { 'short': 'waifu'},
'nitrosocke/Ghibli-Diffusion': { 'short': 'ghibli'},
'dallinmackay/Van-Gogh-diffusion': { 'short': 'van-gogh'},
'lambdalabs/sd-pokemon-diffusers': { 'short': 'pokemon'},
'Envvi/Inkpunk-Diffusion': { 'short': 'ink'},
'nousr/robo-diffusion': { 'short': 'robot'}
}
SHORT_NAMES = [
model_info['short']
for model_info in MODELS.values()
]
def get_model_by_shortname(short: str):
for model, info in MODELS.items():
if short == info['short']:
return model
N = '\n'
HELP_TEXT = f'''
test art bot v{VERSION}
@ -24,6 +37,7 @@ commands work on a user per user basis!
config is individual to each user!
/txt2img TEXT - request an image based on a prompt
/img2img <attach_image> TEXT - request an image base on an image and a promtp
/redo - redo last command (only works for txt2img for now!)
@ -35,8 +49,9 @@ config is individual to each user!
/donate - see donation info
/config algo NAME - select AI to use one of:
/config model NAME - select AI to use one of:
{N.join(ALGOS.keys())}
{N.join(SHORT_NAMES)}
/config step NUMBER - set amount of iterations
/config seed NUMBER - set the seed, deterministic results!
@ -71,6 +86,28 @@ COOL_WORDS = [
'michelangelo'
]
CLEAN_COOL_WORDS = [
'cyberpunk',
'soviet propaganda poster',
'rastafari',
'cannabis',
'art deco',
'H R Giger Necronom IV',
'dimethyltryptamine',
'lysergic',
'psilocybin',
'trippy',
'lucy in the sky with diamonds',
'fractal',
'da vinci',
'pencil illustration',
'blueprint',
'internal diagram',
'baroque',
'the last judgment',
'michelangelo'
]
HELP_TOPICS = {
'step': '''
Diffusion models are iterative processes a repeated cycle that starts with a\
@ -98,8 +135,8 @@ MP_ENABLED_ROLES = ['god']
MIN_STEP = 1
MAX_STEP = 100
MAX_WIDTH = 512
MAX_HEIGHT = 656
MAX_WIDTH = 1024
MAX_HEIGHT = 1024
MAX_GUIDANCE = 20
DEFAULT_SEED = None
@ -109,22 +146,16 @@ DEFAULT_GUIDANCE = 7.5
DEFAULT_STRENGTH = 0.5
DEFAULT_STEP = 35
DEFAULT_CREDITS = 10
DEFAULT_ALGO = 'midj'
DEFAULT_MODEL = list(MODELS.keys())[0]
DEFAULT_ROLE = 'pleb'
DEFAULT_UPSCALER = None
DEFAULT_CONFIG_PATH = 'skynet.ini'
DEFAULT_CERTS_DIR = 'certs'
DEFAULT_CERT_WHITELIST_DIR = 'whitelist'
DEFAULT_CERT_SKYNET_PUB = 'brain.cert'
DEFAULT_CERT_SKYNET_PRIV = 'brain.key'
DEFAULT_CERT_DGPU = 'dgpu.key'
DEFAULT_RPC_ADDR = 'tcp://127.0.0.1:41000'
DEFAULT_DGPU_ADDR = 'tcp://127.0.0.1:41069'
DEFAULT_DGPU_MAX_TASKS = 2
DEFAULT_INITAL_ALGOS = ['midj', 'stable', 'ink']
DEFAULT_INITAL_MODELS = [
'prompthero/openjourney',
'runwayml/stable-diffusion-v1-5'
]
DATE_FORMAT = '%B the %dth %Y, %H:%M:%S'
@ -138,3 +169,10 @@ CONFIG_ATTRS = [
'strength',
'upscaler'
]
DEFAULT_DOMAIN = 'skygpu.net'
DEFAULT_IPFS_REMOTE = '/ip4/169.197.140.154/tcp/4001/p2p/12D3KooWKWogLFNEcNNMKnzU7Snrnuj84RZdMBg3sLiQSQc51oEv'
TG_MAX_WIDTH = 1280
TG_MAX_HEIGHT = 1280

View File

@ -1,5 +1,3 @@
#!/usr/bin/python
from .proxy import open_database_connection
from .functions import open_new_database
from .functions import open_new_database, open_database_connection

View File

@ -4,15 +4,16 @@ import time
import random
import string
import logging
import importlib
from typing import Optional
from datetime import datetime
from contextlib import contextmanager as cm
from contextlib import asynccontextmanager as acm
import docker
import asyncpg
import psycopg2
from asyncpg.exceptions import UndefinedColumnError
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from ..constants import *
@ -22,43 +23,41 @@ DB_INIT_SQL = '''
CREATE SCHEMA IF NOT EXISTS skynet;
CREATE TABLE IF NOT EXISTS skynet.user(
id SERIAL PRIMARY KEY NOT NULL,
tg_id BIGINT,
wp_id VARCHAR(128),
mx_id VARCHAR(128),
ig_id VARCHAR(128),
generated INT NOT NULL,
joined DATE NOT NULL,
last_prompt TEXT,
role VARCHAR(128) NOT NULL
id BIGSERIAL PRIMARY KEY NOT NULL,
generated INT NOT NULL,
joined TIMESTAMP NOT NULL,
last_method TEXT,
last_prompt TEXT,
last_file TEXT,
last_binary TEXT,
role VARCHAR(128) NOT NULL
);
ALTER TABLE skynet.user
ADD CONSTRAINT tg_unique
UNIQUE (tg_id);
ALTER TABLE skynet.user
ADD CONSTRAINT wp_unique
UNIQUE (wp_id);
ALTER TABLE skynet.user
ADD CONSTRAINT mx_unique
UNIQUE (mx_id);
ALTER TABLE skynet.user
ADD CONSTRAINT ig_unique
UNIQUE (ig_id);
CREATE TABLE IF NOT EXISTS skynet.user_config(
id SERIAL NOT NULL,
algo VARCHAR(128) NOT NULL,
id BIGSERIAL NOT NULL,
model VARCHAR(512) NOT NULL,
step INT NOT NULL,
width INT NOT NULL,
height INT NOT NULL,
seed BIGINT,
guidance REAL NOT NULL,
strength REAL NOT NULL,
upscaler VARCHAR(128)
seed NUMERIC,
guidance DECIMAL NOT NULL,
strength DECIMAL NOT NULL,
upscaler VARCHAR(128),
CONSTRAINT fk_config
FOREIGN KEY(id)
REFERENCES skynet.user(id)
);
CREATE TABLE IF NOT EXISTS skynet.user_requests(
id BIGSERIAL NOT NULL,
user_id BIGSERIAL NOT NULL,
sent TIMESTAMP NOT NULL,
status TEXT NOT NULL,
status_msg BIGSERIAL PRIMARY KEY NOT NULL,
CONSTRAINT fk_user_req
FOREIGN KEY(user_id)
REFERENCES skynet.user(id)
);
ALTER TABLE skynet.user_config
ADD FOREIGN KEY(id)
REFERENCES skynet.user(id);
'''
@ -79,7 +78,7 @@ def try_decode_uid(uid: str):
@cm
def open_new_database():
def open_new_database(cleanup=True):
rpassword = ''.join(
random.choice(string.ascii_lowercase)
for i in range(12))
@ -97,149 +96,211 @@ def open_new_database():
'POSTGRES_PASSWORD': rpassword
},
detach=True,
remove=True
# could remove this if we ant the dockers to be persistent.
# remove=True
)
try:
for log in container.logs(stream=True):
log = log.decode().rstrip()
logging.info(log)
if ('database system is ready to accept connections' in log or
'database system is shut down' in log):
break
for log in container.logs(stream=True):
log = log.decode().rstrip()
logging.info(log)
if ('database system is ready to accept connections' in log or
'database system is shut down' in log):
break
# ip = container.attrs['NetworkSettings']['IPAddress']
container.reload()
port = container.ports['5432/tcp'][0]['HostPort']
host = f'localhost:{port}'
# ip = container.attrs['NetworkSettings']['IPAddress']
container.reload()
port = container.ports['5432/tcp'][0]['HostPort']
host = f'localhost:{port}'
# why print the system is ready to accept connections when its not
# postgres? wtf
time.sleep(1)
logging.info('creating skynet db...')
# why print the system is ready to accept connections when its not
# postgres? wtf
time.sleep(1)
logging.info('creating skynet db...')
conn = psycopg2.connect(
user='postgres',
password=rpassword,
host='localhost',
port=port
)
logging.info('connected...')
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
cursor.execute(
f'CREATE USER skynet WITH PASSWORD \'{password}\'')
cursor.execute(
f'CREATE DATABASE skynet')
cursor.execute(
f'GRANT ALL PRIVILEGES ON DATABASE skynet TO skynet')
conn = psycopg2.connect(
user='postgres',
password=rpassword,
host='localhost',
port=port
)
logging.info('connected...')
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
cursor.execute(
f'CREATE USER skynet WITH PASSWORD \'{password}\'')
cursor.execute(
f'CREATE DATABASE skynet')
cursor.execute(
f'GRANT ALL PRIVILEGES ON DATABASE skynet TO skynet')
conn.close()
conn.close()
logging.info('done.')
yield container, password, host
logging.info('done.')
yield container, password, host
container.stop()
finally:
if container and cleanup:
container.stop()
@acm
async def open_database_connection(
db_user: str = 'skynet',
db_pass: str = 'password',
db_host: str = 'localhost:5432',
db_name: str = 'skynet'
):
db = importlib.import_module('skynet.db.functions')
pool = await asyncpg.create_pool(
dsn=f'postgres://{db_user}:{db_pass}@{db_host}/{db_name}')
async def get_user(conn, uid: str):
if isinstance(uid, str):
proto, uid = try_decode_uid(uid)
async with pool.acquire() as conn:
res = await conn.execute(f'''
select distinct table_schema
from information_schema.tables
where table_schema = \'{db_name}\'
''')
if '1' in res:
logging.info('schema already in db, skipping init')
else:
await conn.execute(DB_INIT_SQL)
match proto:
case 'tg':
stmt = await conn.prepare(
'SELECT * FROM skynet.user WHERE tg_id = $1')
user = await stmt.fetchval(uid)
async def _db_call(method: str, *args, **kwargs):
method = getattr(db, method)
case _:
user = None
async with pool.acquire() as conn:
return await method(conn, *args, **kwargs)
return user
else: # asumme is our uid
stmt = await conn.prepare(
'SELECT * FROM skynet.user WHERE id = $1')
return await stmt.fetchval(uid)
yield _db_call
async def get_user_config(conn, user: int):
stmt = await conn.prepare(
'SELECT * FROM skynet.user_config WHERE id = $1')
return (await stmt.fetch(user))[0]
conf = await stmt.fetch(user)
if len(conf) == 1:
return conf[0]
else:
return None
async def get_user(conn, uid: int):
return await get_user_config(conn, uid)
async def get_last_method_of(conn, user: int):
stmt = await conn.prepare(
'SELECT last_method FROM skynet.user WHERE id = $1')
return await stmt.fetchval(user)
async def get_last_prompt_of(conn, user: int):
stmt = await conn.prepare(
'SELECT last_prompt FROM skynet.user WHERE id = $1')
return await stmt.fetchval(user)
async def get_last_file_of(conn, user: int):
stmt = await conn.prepare(
'SELECT last_file FROM skynet.user WHERE id = $1')
return await stmt.fetchval(user)
async def new_user(conn, uid: str):
async def get_last_binary_of(conn, user: int):
stmt = await conn.prepare(
'SELECT last_binary FROM skynet.user WHERE id = $1')
return await stmt.fetchval(user)
async def get_user_request(conn, mid: int):
stmt = await conn.prepare(
'SELECT * FROM skynet.user_requests WHERE id = $1')
return await stmt.fetch(mid)
async def get_user_request_by_sid(conn, sid: int):
stmt = await conn.prepare(
'SELECT * FROM skynet.user_requests WHERE status_msg = $1')
return (await stmt.fetch(sid))[0]
async def new_user_request(
conn, user: int, mid: int,
status_msg: int,
status: str = 'started processing request...'
):
date = datetime.utcnow()
async with conn.transaction():
stmt = await conn.prepare('''
INSERT INTO skynet.user_requests(
id, user_id, sent, status, status_msg
)
VALUES($1, $2, $3, $4, $5)
''')
await stmt.fetch(mid, user, date, status, status_msg)
async def update_user_request(
conn, mid: int, status: str
):
stmt = await conn.prepare(f'''
UPDATE skynet.user_requests
SET status = $2
WHERE id = $1
''')
await stmt.fetch(mid, status)
async def update_user_request_by_sid(
conn, sid: int, status: str
):
stmt = await conn.prepare(f'''
UPDATE skynet.user_requests
SET status = $2
WHERE status_msg = $1
''')
await stmt.fetch(sid, status)
async def new_user(conn, uid: int):
if await get_user(conn, uid):
raise ValueError('User already present on db')
logging.info(f'new user! {uid}')
date = datetime.utcnow()
proto, pid = try_decode_uid(uid)
async with conn.transaction():
match proto:
case 'tg':
tg_id = pid
stmt = await conn.prepare('''
INSERT INTO skynet.user(
tg_id, generated, joined, last_prompt, role)
stmt = await conn.prepare('''
INSERT INTO skynet.user(
id, generated, joined,
last_method, last_prompt, last_file, last_binary,
role
)
VALUES($1, $2, $3, $4, $5)
ON CONFLICT DO NOTHING
''')
await stmt.fetch(
tg_id, 0, date, None, DEFAULT_ROLE
)
new_uid = await get_user(conn, uid)
case None:
stmt = await conn.prepare('''
INSERT INTO skynet.user(
id, generated, joined, last_prompt, role)
VALUES($1, $2, $3, $4, $5)
ON CONFLICT DO NOTHING
''')
await stmt.fetch(
pid, 0, date, None, DEFAULT_ROLE
)
new_uid = pid
VALUES($1, $2, $3, $4, $5, $6, $7, $8)
''')
await stmt.fetch(
uid, 0, date, 'txt2img', None, None, None, DEFAULT_ROLE
)
stmt = await conn.prepare('''
INSERT INTO skynet.user_config(
id, algo, step, width, height, seed, guidance, strength, upscaler)
id, model, step, width, height, guidance, strength, upscaler)
VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT DO NOTHING
VALUES($1, $2, $3, $4, $5, $6, $7, $8)
''')
user = await stmt.fetch(
new_uid,
DEFAULT_ALGO,
resp = await stmt.fetch(
uid,
DEFAULT_MODEL,
DEFAULT_STEP,
DEFAULT_WIDTH,
DEFAULT_HEIGHT,
DEFAULT_SEED,
DEFAULT_GUIDANCE,
DEFAULT_STRENGTH,
DEFAULT_UPSCALER
)
return new_uid
async def get_or_create_user(conn, uid: str):
user = await get_user(conn, uid)
if not user:
user = await new_user(conn, uid)
await new_user(conn, uid)
user = await get_user(conn, uid)
return user
@ -270,11 +331,7 @@ async def get_user_stats(conn, user: int):
record = records[0]
return record
async def update_user_stats(
conn,
user: int,
last_prompt: Optional[str] = None
):
async def increment_generated(conn, user: int):
stmt = await conn.prepare('''
UPDATE skynet.user
SET generated = generated + 1
@ -282,5 +339,20 @@ async def update_user_stats(
''')
await stmt.fetch(user)
async def update_user_stats(
conn,
user: int,
method: str,
last_prompt: str | None = None,
last_file: str | None = None,
last_binary: str | None = None
):
await update_user(conn, user, 'last_method', method)
if last_prompt:
await update_user(conn, user, 'last_prompt', last_prompt)
if last_file:
await update_user(conn, user, 'last_file', last_file)
if last_binary:
await update_user(conn, user, 'last_binary', last_binary)
logging.info((method, last_prompt, last_binary))

View File

@ -1,123 +0,0 @@
#!/usr/bin/python
import importlib
from contextlib import asynccontextmanager as acm
import trio
import tractor
import asyncpg
import asyncio
import trio_asyncio
_spawn_kwargs = {
'infect_asyncio': True,
}
async def aio_db_proxy(
to_trio: trio.MemorySendChannel,
from_trio: asyncio.Queue,
db_user: str = 'skynet',
db_pass: str = 'password',
db_host: str = 'localhost:5432',
db_name: str = 'skynet'
) -> None:
db = importlib.import_module('skynet.db.functions')
pool = await asyncpg.create_pool(
dsn=f'postgres://{db_user}:{db_pass}@{db_host}/{db_name}')
async with pool_conn.acquire() as conn:
res = await conn.execute(f'''
select distinct table_schema
from information_schema.tables
where table_schema = \'{db_name}\'
''')
if '1' in res:
logging.info('schema already in db, skipping init')
else:
await conn.execute(DB_INIT_SQL)
# a first message must be sent **from** this ``asyncio``
# task or the ``trio`` side will never unblock from
# ``tractor.to_asyncio.open_channel_from():``
to_trio.send_nowait('start')
# XXX: this uses an ``from_trio: asyncio.Queue`` currently but we
# should probably offer something better.
while True:
msg = await from_trio.get()
method = getattr(db, msg.get('method'))
args = getattr(db, msg.get('args', []))
kwargs = getattr(db, msg.get('kwargs', {}))
async with pool_conn.acquire() as conn:
result = await method(conn, *args, **kwargs)
to_trio.send_nowait(result)
@tractor.context
async def trio_to_aio_db_proxy(
ctx: tractor.Context,
db_user: str = 'skynet',
db_pass: str = 'password',
db_host: str = 'localhost:5432',
db_name: str = 'skynet'
):
# this will block until the ``asyncio`` task sends a "first"
# message.
async with tractor.to_asyncio.open_channel_from(
aio_db_proxy,
db_user=db_user,
db_pass=db_pass,
db_host=db_host,
db_name=db_name
) as (first, chan):
assert first == 'start'
await ctx.started(first)
async with ctx.open_stream() as stream:
async for msg in stream:
await chan.send(msg)
out = await chan.receive()
# echo back to parent actor-task
await stream.send(out)
@acm
async def open_database_connection(
db_user: str = 'skynet',
db_pass: str = 'password',
db_host: str = 'localhost:5432',
db_name: str = 'skynet'
):
async with tractor.open_nursery() as n:
p = await n.start_actor(
'aio_db_proxy',
enable_modules=[__name__],
infect_asyncio=True,
)
async with p.open_context(
trio_to_aio_db_proxy,
db_user=db_user,
db_pass=db_pass,
db_host=db_host,
db_name=db_name
) as (ctx, first):
async with ctx.open_stream() as stream:
async def _db_pc(method: str, *args, **kwargs):
await stream.send({
'method': method,
'args': args,
'kwargs': kwargs
})
return await stream.receive()
yield _db_pc

View File

@ -1,216 +0,0 @@
#!/usr/bin/python
import gc
import io
import json
import random
import logging
from PIL import Image
from typing import List, Optional
import trio
import torch
from pynng import Context
from diffusers import (
StableDiffusionPipeline,
StableDiffusionImg2ImgPipeline,
EulerAncestralDiscreteScheduler
)
from realesrgan import RealESRGANer
from basicsr.archs.rrdbnet_arch import RRDBNet
from diffusers.models import UNet2DConditionModel
from .utils import *
from .network import *
from .protobuf import *
from .constants import *
def init_upscaler(model_path: str = 'weights/RealESRGAN_x4plus.pth'):
return RealESRGANer(
scale=4,
model_path=model_path,
dni_weight=None,
model=RRDBNet(
num_in_ch=3,
num_out_ch=3,
num_feat=64,
num_block=23,
num_grow_ch=32,
scale=4
),
half=True
)
class DGPUComputeError(BaseException):
...
async def open_dgpu_node(
cert_name: str,
unique_id: str,
key_name: Optional[str],
rpc_address: str = DEFAULT_RPC_ADDR,
dgpu_address: str = DEFAULT_DGPU_ADDR,
initial_algos: Optional[List[str]] = None
):
logging.basicConfig(level=logging.DEBUG)
logging.info(f'starting dgpu node!')
logging.info(f'loading models...')
upscaler = init_upscaler()
initial_algos = (
initial_algos
if initial_algos else DEFAULT_INITAL_ALGOS
)
models = {}
for algo in initial_algos:
models[algo] = {
'pipe': pipeline_for(algo),
'generated': 0
}
logging.info(f'loaded {algo}.')
logging.info('memory summary:')
logging.info('\n' + torch.cuda.memory_summary())
async def gpu_compute_one(method: str, params: dict, binext: Optional[bytes] = None):
match method:
case 'diffuse':
image = None
algo = params['algo']
if binext:
algo += 'img'
image = Image.open(io.BytesIO(binext))
w, h = image.size
logging.info(f'user sent img of size {image.size}')
if w > 512 or h > 512:
image.thumbnail((512, 512))
logging.info(f'resized it to {image.size}')
if algo not in models:
logging.info(f'{algo} not in loaded models, swapping...')
least_used = list(models.keys())[0]
for model in models:
if models[least_used]['generated'] > models[model]['generated']:
least_used = model
del models[least_used]
gc.collect()
models[algo] = {
'pipe': pipeline_for(params['algo'], image=True if binext else False),
'generated': 0
}
logging.info(f'swapping done.')
_params = {}
logging.info(method)
logging.info(json.dumps(params, indent=4))
logging.info(f'binext: {len(binext) if binext else 0} bytes')
if binext:
_params['image'] = image
_params['strength'] = params['strength']
else:
_params['width'] = int(params['width'])
_params['height'] = int(params['height'])
try:
image = models[algo]['pipe'](
params['prompt'],
**_params,
guidance_scale=params['guidance'],
num_inference_steps=int(params['step']),
generator=torch.Generator("cuda").manual_seed(
int(params['seed']) if params['seed'] else random.randint(0, 2 ** 64)
)
).images[0]
if params['upscaler'] == 'x4':
logging.info(f'size: {len(image.tobytes())}')
logging.info('performing upscale...')
input_img = image.convert('RGB')
up_img, _ = upscaler.enhance(
convert_from_image_to_cv2(input_img), outscale=4)
image = convert_from_cv2_to_image(up_img)
logging.info('done')
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format='PNG')
raw_img = img_byte_arr.getvalue()
logging.info(f'final img size {len(raw_img)} bytes.')
return raw_img
except BaseException as e:
logging.error(e)
raise DGPUComputeError(str(e))
finally:
torch.cuda.empty_cache()
case _:
raise DGPUComputeError('Unsupported compute method')
async def rpc_handler(req: SkynetRPCRequest, ctx: Context):
result = {}
resp = SkynetRPCResponse()
match req.method:
case 'dgpu_time':
result = {'ok': time_ms()}
case _:
logging.debug(f'dgpu got one request: {req.method}')
try:
resp.bin = await gpu_compute_one(
req.method, MessageToDict(req.params),
binext=req.bin if req.bin else None
)
logging.debug(f'dgpu processed one request')
except DGPUComputeError as e:
result = {'error': str(e)}
resp.result.update(result)
return resp
rpc_server = SessionServer(
dgpu_address,
rpc_handler,
cert_name=cert_name,
key_name=key_name
)
skynet_rpc = SessionClient(
rpc_address,
unique_id,
cert_name=cert_name,
key_name=key_name
)
skynet_rpc.connect()
async with rpc_server.open() as rpc_server:
res = await skynet_rpc.rpc(
'dgpu_online', {
'dgpu_addr': rpc_server.addr,
'cert': cert_name
})
assert 'ok' in res.result
try:
await trio.sleep_forever()
except KeyboardInterrupt:
logging.info('interrupt caught, stopping...')
finally:
res = await skynet_rpc.rpc('dgpu_offline')
assert 'ok' in res.result

View File

@ -0,0 +1,16 @@
#!/usr/bin/python
import trio
from skynet.dgpu.compute import SkynetMM
from skynet.dgpu.daemon import SkynetDGPUDaemon
from skynet.dgpu.network import SkynetGPUConnector
async def open_dgpu_node(config: dict):
conn = SkynetGPUConnector(config)
mm = SkynetMM(config)
async with conn.open() as conn:
await (SkynetDGPUDaemon(mm, conn, config)
.serve_forever())

View File

@ -0,0 +1,166 @@
#!/usr/bin/python
# Skynet Memory Manager
import gc
from hashlib import sha256
import json
import logging
from diffusers import DiffusionPipeline
import torch
from skynet.constants import DEFAULT_INITAL_MODELS, MODELS
from skynet.dgpu.errors import DGPUComputeError
from skynet.utils import convert_from_bytes_and_crop, convert_from_cv2_to_image, convert_from_image_to_cv2, convert_from_img_to_bytes, init_upscaler, pipeline_for
def prepare_params_for_diffuse(
params: dict,
binary: bytes | None = None
):
image = None
if binary:
image = convert_from_bytes_and_crop(binary, 512, 512)
_params = {}
if image:
_params['image'] = image
_params['strength'] = float(params['strength'])
else:
_params['width'] = int(params['width'])
_params['height'] = int(params['height'])
return (
params['prompt'],
float(params['guidance']),
int(params['step']),
torch.manual_seed(int(params['seed'])),
params['upscaler'] if 'upscaler' in params else None,
_params
)
class SkynetMM:
def __init__(self, config: dict):
self.upscaler = init_upscaler()
self.initial_models = (
config['initial_models']
if 'initial_models' in config else DEFAULT_INITAL_MODELS
)
self._models = {}
for model in self.initial_models:
self.load_model(model, False, force=True)
def log_debug_info(self):
logging.info('memory summary:')
logging.info('\n' + torch.cuda.memory_summary())
def is_model_loaded(self, model_name: str, image: bool):
for model_key, model_data in self._models.items():
if (model_key == model_name and
model_data['image'] == image):
return True
return False
def load_model(
self,
model_name: str,
image: bool,
force=False
):
logging.info(f'loading model {model_name}...')
if force or len(self._models.keys()) == 0:
pipe = pipeline_for(model_name, image=image)
self._models[model_name] = {
'pipe': pipe,
'generated': 0,
'image': image
}
else:
least_used = list(self._models.keys())[0]
for model in self._models:
if self._models[
least_used]['generated'] > self._models[model]['generated']:
least_used = model
del self._models[least_used]
logging.info(f'swapping model {least_used} for {model_name}...')
gc.collect()
torch.cuda.empty_cache()
pipe = pipeline_for(model_name, image=image)
self._models[model_name] = {
'pipe': pipe,
'generated': 0,
'image': image
}
logging.info(f'loaded model {model_name}')
return pipe
def get_model(self, model_name: str, image: bool) -> DiffusionPipeline:
if model_name not in MODELS:
raise DGPUComputeError(f'Unknown model {model_name}')
if not self.is_model_loaded(model_name, image):
pipe = self.load_model(model_name, image=image)
else:
pipe = self._models[model_name]['pipe']
return pipe
def compute_one(
self,
method: str,
params: dict,
binary: bytes | None = None
):
try:
match method:
case 'diffuse':
image = None
arguments = prepare_params_for_diffuse(params, binary)
prompt, guidance, step, seed, upscaler, extra_params = arguments
model = self.get_model(params['model'], 'image' in extra_params)
image = model(
prompt,
guidance_scale=guidance,
num_inference_steps=step,
generator=seed,
**extra_params
).images[0]
if upscaler == 'x4':
input_img = image.convert('RGB')
up_img, _ = self.upscaler.enhance(
convert_from_image_to_cv2(input_img), outscale=4)
image = convert_from_cv2_to_image(up_img)
img_raw = convert_from_img_to_bytes(image)
img_sha = sha256(img_raw).hexdigest()
return img_sha, img_raw
case _:
raise DGPUComputeError('Unsupported compute method')
except BaseException as e:
logging.error(e)
raise DGPUComputeError(str(e))
finally:
torch.cuda.empty_cache()

View File

@ -0,0 +1,92 @@
#!/usr/bin/python
import json
import logging
import traceback
from hashlib import sha256
import trio
from skynet.dgpu.compute import SkynetMM
from skynet.dgpu.network import SkynetGPUConnector
class SkynetDGPUDaemon:
def __init__(
self,
mm: SkynetMM,
conn: SkynetGPUConnector,
config: dict
):
self.mm = mm
self.conn = conn
self.auto_withdraw = (
config['auto_withdraw']
if 'auto_withdraw' in config else False
)
async def serve_forever(self):
try:
while True:
if self.auto_withdraw:
await self.conn.maybe_withdraw_all()
queue = await self.conn.get_work_requests_last_hour()
for req in queue:
rid = req['id']
my_results = [res['id'] for res in (await self.conn.find_my_results())]
if rid not in my_results:
statuses = await self.conn.get_status_by_request_id(rid)
if len(statuses) == 0:
# parse request
body = json.loads(req['body'])
binary = await self.conn.get_input_data(req['binary_data'])
hash_str = (
str(req['nonce'])
+
req['body']
+
req['binary_data']
)
logging.info(f'hashing: {hash_str}')
request_hash = sha256(hash_str.encode('utf-8')).hexdigest()
# TODO: validate request
# perform work
logging.info(f'working on {body}')
resp = await self.conn.begin_work(rid)
if 'code' in resp:
logging.info(f'probably being worked on already... skip.')
else:
try:
img_sha, img_raw = self.mm.compute_one(
body['method'], body['params'], binary=binary)
ipfs_hash = self.conn.publish_on_ipfs( img_raw)
await self.conn.submit_work(rid, request_hash, img_sha, ipfs_hash)
break
except BaseException as e:
traceback.print_exc()
await self.conn.cancel_work(rid, str(e))
break
else:
logging.info(f'request {rid} already beign worked on, skip...')
await trio.sleep(1)
except KeyboardInterrupt:
...

View File

@ -0,0 +1,5 @@
#!/usr/bin/python
class DGPUComputeError(BaseException):
...

View File

@ -0,0 +1,233 @@
#!/usr/bin/python
from functools import partial
import io
import json
import time
import logging
import asks
from PIL import Image
from contextlib import ExitStack
from contextlib import asynccontextmanager as acm
from leap.cleos import CLEOS
from leap.sugar import Checksum256, Name, asset_from_str
from skynet.constants import DEFAULT_DOMAIN
from skynet.dgpu.errors import DGPUComputeError
from skynet.ipfs import get_ipfs_file
from skynet.ipfs.docker import open_ipfs_node
async def failable(fn: partial, ret_fail=None):
try:
return await fn()
except (
asks.errors.RequestTimeout,
json.JSONDecodeError
):
return ret_fail
class SkynetGPUConnector:
def __init__(self, config: dict):
self.account = Name(config['account'])
self.permission = config['permission']
self.key = config['key']
self.node_url = config['node_url']
self.hyperion_url = config['hyperion_url']
self.ipfs_url = config['ipfs_url']
self.cleos = CLEOS(
None, None, self.node_url, remote=self.node_url)
self._exit_stack = ExitStack()
def connect(self):
self.ipfs_node = self._exit_stack.enter_context(
open_ipfs_node())
def disconnect(self):
self._exit_stack.close()
@acm
async def open(self):
self.connect()
yield self
self.disconnect()
# blockchain helpers
async def get_work_requests_last_hour(self):
logging.info('get_work_requests_last_hour')
return await failable(
partial(
self.cleos.aget_table,
'telos.gpu', 'telos.gpu', 'queue',
index_position=2,
key_type='i64',
lower_bound=int(time.time()) - 3600
), ret_fail=[])
async def get_status_by_request_id(self, request_id: int):
logging.info('get_status_by_request_id')
return await failable(
partial(
self.cleos.aget_table,
'telos.gpu', request_id, 'status'), ret_fail=[])
async def get_global_config(self):
logging.info('get_global_config')
rows = await failable(
partial(
self.cleos.aget_table,
'telos.gpu', 'telos.gpu', 'config'))
if rows:
return rows[0]
else:
return None
async def get_worker_balance(self):
logging.info('get_worker_balance')
rows = await failable(
partial(
self.cleos.aget_table,
'telos.gpu', 'telos.gpu', 'users',
index_position=1,
key_type='name',
lower_bound=self.account,
upper_bound=self.account
))
if rows:
return rows[0]['balance']
else:
return None
async def begin_work(self, request_id: int):
logging.info('begin_work')
return await failable(
partial(
self.cleos.a_push_action,
'telos.gpu',
'workbegin',
{
'worker': self.account,
'request_id': request_id,
'max_workers': 2
},
self.account, self.key,
permission=self.permission
)
)
async def cancel_work(self, request_id: int, reason: str):
logging.info('cancel_work')
return await failable(
partial(
self.cleos.a_push_action,
'telos.gpu',
'workcancel',
{
'worker': self.account,
'request_id': request_id,
'reason': reason
},
self.account, self.key,
permission=self.permission
)
)
async def maybe_withdraw_all(self):
logging.info('maybe_withdraw_all')
balance = await self.get_worker_balance()
if not balance:
return
balance_amount = float(balance.split(' ')[0])
if balance_amount > 0:
await failable(
partial(
self.cleos.a_push_action,
'telos.gpu',
'withdraw',
{
'user': self.account,
'quantity': asset_from_str(balance)
},
self.account, self.key,
permission=self.permission
)
)
async def find_my_results(self):
logging.info('find_my_results')
return await failable(
partial(
self.cleos.aget_table,
'telos.gpu', 'telos.gpu', 'results',
index_position=4,
key_type='name',
lower_bound=self.account,
upper_bound=self.account
)
)
async def submit_work(
self,
request_id: int,
request_hash: str,
result_hash: str,
ipfs_hash: str
):
logging.info('submit_work')
return await failable(
partial(
self.cleos.a_push_action,
'telos.gpu',
'submit',
{
'worker': self.account,
'request_id': request_id,
'request_hash': Checksum256(request_hash),
'result_hash': Checksum256(result_hash),
'ipfs_hash': ipfs_hash
},
self.account, self.key,
permission=self.permission
)
)
# IPFS helpers
def publish_on_ipfs(self, raw_img: bytes):
logging.info('publish_on_ipfs')
img = Image.open(io.BytesIO(raw_img))
img.save(f'ipfs-docker-staging/image.png')
# check peer connections, reconnect to skynet gateway if not
peers = self.ipfs_node.check_connect()
if self.ipfs_url not in peers:
self.ipfs_node.connect(self.ipfs_url)
ipfs_hash = self.ipfs_node.add('image.png')
self.ipfs_node.pin(ipfs_hash)
return ipfs_hash
async def get_input_data(self, ipfs_hash: str) -> bytes:
if ipfs_hash == '':
return b''
resp = await get_ipfs_file(f'https://ipfs.{DEFAULT_DOMAIN}/ipfs/{ipfs_hash}/image.png')
if not resp:
raise DGPUComputeError('Couldn\'t gather input data from ipfs')
return resp.raw

View File

@ -1,28 +1,7 @@
#!/usr/bin/python
import json
from typing import Union, Optional
from pathlib import Path
from contextlib import contextmanager as cm
import pynng
from pynng import TLSConfig
from OpenSSL.crypto import (
load_privatekey,
load_certificate,
FILETYPE_PEM
)
from google.protobuf.struct_pb2 import Struct
from ..network import SessionClient
from ..constants import *
from ..protobuf.auth import *
from ..protobuf.skynet_pb2 import SkynetRPCRequest, SkynetRPCResponse
class ConfigRequestFormatError(BaseException):
...
@ -40,24 +19,6 @@ class ConfigSizeDivisionByEight(BaseException):
...
@cm
def open_skynet_rpc(
unique_id: str,
rpc_address: str = DEFAULT_RPC_ADDR,
cert_name: Optional[str] = None,
key_name: Optional[str] = None
):
sesh = SessionClient(
rpc_address,
unique_id,
cert_name=cert_name,
key_name=key_name
)
logging.debug(f'opening skynet rpc...')
sesh.connect()
yield sesh
sesh.disconnect()
def validate_user_config_request(req: str):
params = req.split(' ')
@ -69,10 +30,14 @@ def validate_user_config_request(req: str):
attr = params[1]
match attr:
case 'algo':
case 'model' | 'algo':
attr = 'model'
val = params[2]
if val not in ALGOS:
raise ConfigUnknownAlgorithm(f'no algo named {val}')
shorts = [model_info['short'] for model_info in MODELS.values()]
if val not in shorts:
raise ConfigUnknownAlgorithm(f'no model named {val}')
val = get_model_by_shortname(val)
case 'step':
val = int(params[2])
@ -117,7 +82,12 @@ def validate_user_config_request(req: str):
raise ConfigUnknownAttribute(
f'\"{attr}\" not a configurable parameter')
return attr, val, f'config updated! {attr} to {val}'
display_val = val
if attr == 'seed':
if not val:
display_val = 'Random'
return attr, val, f'config updated! {attr} to {display_val}'
except ValueError:
raise ValueError(f'\"{val}\" is not a number silly')

View File

@ -0,0 +1,267 @@
#!/usr/bin/python
from json import JSONDecodeError
import random
import logging
import asyncio
from decimal import Decimal
from hashlib import sha256
from datetime import datetime
from contextlib import ExitStack, AsyncExitStack
from contextlib import asynccontextmanager as acm
from leap.cleos import CLEOS
from leap.sugar import Name, asset_from_str, collect_stdout
from leap.hyperion import HyperionAPI
# from telebot.types import InputMediaPhoto
import discord
import io
from skynet.db import open_new_database, open_database_connection
from skynet.ipfs import get_ipfs_file
from skynet.ipfs.docker import open_ipfs_node
from skynet.constants import *
from . import *
from .bot import DiscordBot
from .utils import *
from .handlers import create_handler_context
from .ui import SkynetView
class SkynetDiscordFrontend:
def __init__(
self,
# token: str,
account: str,
permission: str,
node_url: str,
hyperion_url: str,
db_host: str,
db_user: str,
db_pass: str,
remote_ipfs_node: str,
key: str
):
# self.token = token
self.account = account
self.permission = permission
self.node_url = node_url
self.hyperion_url = hyperion_url
self.db_host = db_host
self.db_user = db_user
self.db_pass = db_pass
self.remote_ipfs_node = remote_ipfs_node
self.key = key
self.bot = DiscordBot(self)
self.cleos = CLEOS(None, None, url=node_url, remote=node_url)
self.hyperion = HyperionAPI(hyperion_url)
self._exit_stack = ExitStack()
self._async_exit_stack = AsyncExitStack()
async def start(self):
self.ipfs_node = self._exit_stack.enter_context(
open_ipfs_node())
self.ipfs_node.connect(self.remote_ipfs_node)
logging.info(
f'connected to remote ipfs node: {self.remote_ipfs_node}')
self.db_call = await self._async_exit_stack.enter_async_context(
open_database_connection(
self.db_user, self.db_pass, self.db_host))
create_handler_context(self)
async def stop(self):
await self._async_exit_stack.aclose()
self._exit_stack.close()
@acm
async def open(self):
await self.start()
yield self
await self.stop()
# maybe do this?
# async def update_status_message(
# self, status_msg, new_text: str, **kwargs
# ):
# await self.db_call(
# 'update_user_request_by_sid', status_msg.id, new_text)
# return await self.bot.edit_message_text(
# new_text,
# chat_id=status_msg.chat.id,
# message_id=status_msg.id,
# **kwargs
# )
# async def append_status_message(
# self, status_msg, add_text: str, **kwargs
# ):
# request = await self.db_call('get_user_request_by_sid', status_msg.id)
# await self.update_status_message(
# status_msg,
# request['status'] + add_text,
# **kwargs
# )
async def work_request(
self,
user,
status_msg,
method: str,
params: dict,
ctx: discord.ext.commands.context.Context | discord.Message,
file_id: str | None = None,
binary_data: str = ''
):
send = ctx.channel.send
if params['seed'] == None:
params['seed'] = random.randint(0, 0xFFFFFFFF)
sanitized_params = {}
for key, val in params.items():
if isinstance(val, Decimal):
val = str(val)
sanitized_params[key] = val
body = json.dumps({
'method': 'diffuse',
'params': sanitized_params
})
request_time = datetime.now().isoformat()
await status_msg.delete()
msg_text = f'processing a \'{method}\' request by {user.name}\n[{timestamp_pretty()}] *broadcasting transaction to chain...* '
embed = discord.Embed(
title='live updates',
description=msg_text,
color=discord.Color.blue())
message = await send(embed=embed)
reward = '20.0000 GPU'
res = await self.cleos.a_push_action(
'telos.gpu',
'enqueue',
{
'user': Name(self.account),
'request_body': body,
'binary_data': binary_data,
'reward': asset_from_str(reward),
'min_verification': 1
},
self.account, self.key, permission=self.permission
)
if 'code' in res or 'statusCode' in res:
logging.error(json.dumps(res, indent=4))
await self.bot.channel.send(
status_msg,
'skynet has suffered an internal error trying to fill this request')
return
enqueue_tx_id = res['transaction_id']
enqueue_tx_link = f'[**Your request on Skynet Explorer**](https://explorer.{DEFAULT_DOMAIN}/v2/explore/transaction/{enqueue_tx_id})'
msg_text += f'**broadcasted!** \n{enqueue_tx_link}\n[{timestamp_pretty()}] *workers are processing request...* '
embed = discord.Embed(
title='live updates',
description=msg_text,
color=discord.Color.blue())
await message.edit(embed=embed)
out = collect_stdout(res)
request_id, nonce = out.split(':')
request_hash = sha256(
(nonce + body + binary_data).encode('utf-8')).hexdigest().upper()
request_id = int(request_id)
logging.info(f'{request_id} enqueued.')
tx_hash = None
ipfs_hash = None
for i in range(60):
try:
submits = await self.hyperion.aget_actions(
account=self.account,
filter='telos.gpu:submit',
sort='desc',
after=request_time
)
actions = [
action
for action in submits['actions']
if action[
'act']['data']['request_hash'] == request_hash
]
if len(actions) > 0:
tx_hash = actions[0]['trx_id']
data = actions[0]['act']['data']
ipfs_hash = data['ipfs_hash']
worker = data['worker']
logging.info('Found matching submit!')
break
except JSONDecodeError:
logging.error(f'network error while getting actions, retry..')
await asyncio.sleep(1)
if not ipfs_hash:
timeout_text = f'\n[{timestamp_pretty()}] **timeout processing request**'
embed = discord.Embed(
title='live updates',
description=timeout_text,
color=discord.Color.blue())
await message.edit(embed=embed)
return
tx_link = f'[**Your result on Skynet Explorer**](https://explorer.{DEFAULT_DOMAIN}/v2/explore/transaction/{tx_hash})'
msg_text += f'**request processed!**\n{tx_link}\n[{timestamp_pretty()}] *trying to download image...*\n '
embed = discord.Embed(
title='live updates',
description=msg_text,
color=discord.Color.blue())
await message.edit(embed=embed)
# attempt to get the image and send it
ipfs_link = f'https://ipfs.{DEFAULT_DOMAIN}/ipfs/{ipfs_hash}/image.png'
resp = await get_ipfs_file(ipfs_link)
# reword this function, may not need caption
caption, embed = generate_reply_caption(
user, params, tx_hash, worker, reward)
if not resp or resp.status_code != 200:
logging.error(f'couldn\'t get ipfs hosted image at {ipfs_link}!')
embed.add_field(name='Error', value=f'couldn\'t get ipfs hosted image [**here**]({ipfs_link})!')
await message.edit(embed=embed, view=SkynetView(self))
else:
logging.info(f'success! sending generated image')
await message.delete()
if file_id: # img2img
embed.set_thumbnail(
url='https://ipfs.skygpu.net/ipfs/' + binary_data + '/image.png')
embed.set_image(url=ipfs_link)
await send(embed=embed, view=SkynetView(self))
else: # txt2img
embed.set_image(url=ipfs_link)
await send(embed=embed, view=SkynetView(self))

View File

@ -0,0 +1,89 @@
# import os
import discord
import asyncio
# from dotenv import load_dotenv
# from pathlib import Path
from discord.ext import commands
from .ui import SkynetView
# # Auth
# current_dir = Path(__file__).resolve().parent
# # parent_dir = current_dir.parent
# env_file_path = current_dir / ".env"
# load_dotenv(dotenv_path=env_file_path)
#
# discordToken = os.getenv("DISCORD_TOKEN")
# Actual Discord bot.
class DiscordBot(commands.Bot):
def __init__(self, bot, *args, **kwargs):
self.bot = bot
intents = discord.Intents(
messages=True,
guilds=True,
typing=True,
members=True,
presences=True,
reactions=True,
message_content=True,
voice_states=True
)
super().__init__(command_prefix='/', intents=intents, *args, **kwargs)
# async def setup_hook(self):
# db.poll_db.start()
async def on_ready(self):
print(f'{self.user.name} has connected to Discord!')
for guild in self.guilds:
for channel in guild.channels:
if channel.name == "skynet":
await channel.send('Skynet bot online', view=SkynetView(self.bot))
# intro_msg = await channel.send('Welcome to the Skynet discord bot.\nSkynet is a decentralized compute layer, focused on supporting AI paradigms. Skynet leverages blockchain technology to manage work requests and fills. We are currently featuring image generation and support 11 different models. Get started with the /help command, or just click on some buttons. Here is an example command to generate an image:\n/txt2img a big red tractor in a giant field of corn')
intro_msg = await channel.send("Welcome to Skynet's Discord Bot,\n\nSkynet operates as a decentralized compute layer, offering a wide array of support for diverse AI paradigms through the use of blockchain technology. Our present focus is image generation, powered by 11 distinct models.\n\nTo begin exploring, use the '/help' command or directly interact with the provided buttons. Here is an example command to generate an image:\n\n'/txt2img a big red tractor in a giant field of corn'")
await intro_msg.pin()
print("\n==============")
print("Logged in as")
print(self.user.name)
print(self.user.id)
print("==============")
async def on_message(self, message):
if isinstance(message.channel, discord.DMChannel):
return
elif message.channel.name != 'skynet':
return
elif message.author == self.user:
return
await self.process_commands(message)
# await asyncio.sleep(3)
# await message.channel.send('', view=SkynetView(self.bot))
async def on_command_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('You missed a required argument, please try again.')
# async def on_message(self, message):
# print(f"message from {message.author} what he said {message.content}")
# await message.channel.send(message.content)
# bot=DiscordBot()
# @bot.command(name='config', help='Responds with the configuration')
# async def config(ctx):
# response = "This is the bot configuration" # Put your bot configuration here
# await ctx.send(response)
#
# @bot.command(name='helper', help='Responds with a help')
# async def helper(ctx):
# response = "This is help information" # Put your help response here
# await ctx.send(response)
#
# @bot.command(name='txt2img', help='Responds with an image')
# async def txt2img(ctx, *, arg):
# response = f"This is your prompt: {arg}"
# await ctx.send(response)
# bot.run(discordToken)

View File

@ -0,0 +1,601 @@
#!/usr/bin/python
import io
import json
import logging
from datetime import datetime, timedelta
from PIL import Image
# from telebot.types import CallbackQuery, Message
from skynet.frontend import validate_user_config_request
from skynet.constants import *
from .ui import SkynetView
def create_handler_context(frontend: 'SkynetDiscordFrontend'):
bot = frontend.bot
cleos = frontend.cleos
db_call = frontend.db_call
work_request = frontend.work_request
ipfs_node = frontend.ipfs_node
@bot.command(name='config', help='Responds with the configuration')
async def set_config(ctx):
user = ctx.author
try:
attr, val, reply_txt = validate_user_config_request(
ctx.message.content)
logging.info(f'user config update: {attr} to {val}')
await db_call('update_user_config', user.id, attr, val)
logging.info('done')
except BaseException as e:
reply_txt = str(e)
finally:
await ctx.reply(content=reply_txt, view=SkynetView(frontend))
bot.remove_command('help')
@bot.command(name='help', help='Responds with a help')
async def help(ctx):
splt_msg = ctx.message.content.split(' ')
if len(splt_msg) == 1:
await ctx.send(content=f'```{HELP_TEXT}```', view=SkynetView(frontend))
else:
param = splt_msg[1]
if param in HELP_TOPICS:
await ctx.send(content=f'```{HELP_TOPICS[param]}```', view=SkynetView(frontend))
else:
await ctx.send(content=f'```{HELP_UNKWNOWN_PARAM}```', view=SkynetView(frontend))
@bot.command(name='cool', help='Display a list of cool prompt words')
async def send_cool_words(ctx):
clean_cool_word = '\n'.join(CLEAN_COOL_WORDS)
await ctx.send(content=f'```{clean_cool_word}```', view=SkynetView(frontend))
@bot.command(name='stats', help='See user statistics' )
async def user_stats(ctx):
user = ctx.author
await db_call('get_or_create_user', user.id)
generated, joined, role = await db_call('get_user_stats', user.id)
stats_str = f'```generated: {generated}\n'
stats_str += f'joined: {joined}\n'
stats_str += f'role: {role}\n```'
await ctx.reply(stats_str, view=SkynetView(frontend))
@bot.command(name='donate', help='See donate info')
async def donation_info(ctx):
await ctx.reply(
f'```\n{DONATION_INFO}```', view=SkynetView(frontend))
@bot.command(name='txt2img', help='Responds with an image')
async def send_txt2img(ctx):
# grab user from ctx
user = ctx.author
user_row = await db_call('get_or_create_user', user.id)
# init new msg
init_msg = 'started processing txt2img request...'
status_msg = await ctx.send(init_msg)
await db_call(
'new_user_request', user.id, ctx.message.id, status_msg.id, status=init_msg)
prompt = ' '.join(ctx.message.content.split(' ')[1:])
if len(prompt) == 0:
await status_msg.edit(content=
'Empty text prompt ignored.'
)
await db_call('update_user_request', status_msg.id, 'Empty text prompt ignored.')
return
logging.info(f'mid: {ctx.message.id}')
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
await db_call(
'update_user_stats', user.id, 'txt2img', last_prompt=prompt)
ec = await work_request(user, status_msg, 'txt2img', params, ctx)
if ec == None:
await db_call('increment_generated', user.id)
@bot.command(name='redo', help='Redo last request')
async def redo(ctx):
init_msg = 'started processing redo request...'
status_msg = await ctx.send(init_msg)
user = ctx.author
method = await db_call('get_last_method_of', user.id)
prompt = await db_call('get_last_prompt_of', user.id)
file_id = None
binary = ''
if method == 'img2img':
file_id = await db_call('get_last_file_of', user.id)
binary = await db_call('get_last_binary_of', user.id)
if not prompt:
await status_msg.edit(
content='no last prompt found, do a txt2img cmd first!',
view=SkynetView(frontend)
)
return
user_row = await db_call('get_or_create_user', user.id)
await db_call(
'new_user_request', user.id, ctx.message.id, status_msg.id, status=init_msg)
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
ec = await work_request(
user, status_msg, 'redo', params, ctx,
file_id=file_id,
binary_data=binary
)
if ec == None:
await db_call('increment_generated', user.id)
@bot.command(name='img2img', help='Responds with an image')
async def send_img2img(ctx):
# if isinstance(message_or_query, CallbackQuery):
# query = message_or_query
# message = query.message
# user = query.from_user
# chat = query.message.chat
#
# else:
# message = message_or_query
# user = message.from_user
# chat = message.chat
# reply_id = None
# if chat.type == 'group' and chat.id == GROUP_ID:
# reply_id = message.message_id
#
user = ctx.author
user_row = await db_call('get_or_create_user', user.id)
# init new msg
init_msg = 'started processing img2img request...'
status_msg = await ctx.send(init_msg)
await db_call(
'new_user_request', user.id, ctx.message.id, status_msg.id, status=init_msg)
if not ctx.message.content.startswith('/img2img'):
await ctx.reply(
'For image to image you need to add /img2img to the beggining of your caption'
)
return
prompt = ' '.join(ctx.message.content.split(' ')[1:])
if len(prompt) == 0:
await ctx.reply('Empty text prompt ignored.')
return
# file_id = message.photo[-1].file_id
# file_path = (await bot.get_file(file_id)).file_path
# image_raw = await bot.download_file(file_path)
#
file = ctx.message.attachments[-1]
file_id = str(file.id)
# file bytes
image_raw = await file.read()
with Image.open(io.BytesIO(image_raw)) as image:
w, h = image.size
if w > 512 or h > 512:
logging.warning(f'user sent img of size {image.size}')
image.thumbnail((512, 512))
logging.warning(f'resized it to {image.size}')
image.save(f'ipfs-docker-staging/image.png', format='PNG')
ipfs_hash = ipfs_node.add('image.png')
ipfs_node.pin(ipfs_hash)
logging.info(f'published input image {ipfs_hash} on ipfs')
logging.info(f'mid: {ctx.message.id}')
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
await db_call(
'update_user_stats',
user.id,
'img2img',
last_file=file_id,
last_prompt=prompt,
last_binary=ipfs_hash
)
ec = await work_request(
user, status_msg, 'img2img', params, ctx,
file_id=file_id,
binary_data=ipfs_hash
)
if ec == None:
await db_call('increment_generated', user.id)
# TODO: DELETE BELOW
# user = 'testworker3'
# status_msg = 'status'
# params = {
# 'prompt': arg,
# 'seed': None,
# 'step': 35,
# 'guidance': 7.5,
# 'strength': 0.5,
# 'width': 512,
# 'height': 512,
# 'upscaler': None,
# 'model': 'prompthero/openjourney',
# }
#
# ec = await work_request(user, status_msg, 'txt2img', params, ctx)
# print(ec)
# if ec == 0:
# await db_call('increment_generated', user.id)
# response = f"This is your prompt: {arg}"
# await ctx.send(response)
# generic / simple handlers
# @bot.message_handler(commands=['help'])
# async def send_help(message):
# splt_msg = message.text.split(' ')
#
# if len(splt_msg) == 1:
# await bot.reply_to(message, HELP_TEXT)
#
# else:
# param = splt_msg[1]
# if param in HELP_TOPICS:
# await bot.reply_to(message, HELP_TOPICS[param])
#
# else:
# await bot.reply_to(message, HELP_UNKWNOWN_PARAM)
#
# @bot.message_handler(commands=['cool'])
# async def send_cool_words(message):
# await bot.reply_to(message, '\n'.join(COOL_WORDS))
#
# @bot.message_handler(commands=['queue'])
# async def queue(message):
# an_hour_ago = datetime.now() - timedelta(hours=1)
# queue = await cleos.aget_table(
# 'telos.gpu', 'telos.gpu', 'queue',
# index_position=2,
# key_type='i64',
# sort='desc',
# lower_bound=int(an_hour_ago.timestamp())
# )
# await bot.reply_to(
# message, f'Total requests on skynet queue: {len(queue)}')
# @bot.message_handler(commands=['config'])
# async def set_config(message):
# user = message.from_user.id
# try:
# attr, val, reply_txt = validate_user_config_request(
# message.text)
#
# logging.info(f'user config update: {attr} to {val}')
# await db_call('update_user_config', user, attr, val)
# logging.info('done')
#
# except BaseException as e:
# reply_txt = str(e)
#
# finally:
# await bot.reply_to(message, reply_txt)
#
# @bot.message_handler(commands=['stats'])
# async def user_stats(message):
# user = message.from_user.id
#
# await db_call('get_or_create_user', user)
# generated, joined, role = await db_call('get_user_stats', user)
#
# stats_str = f'generated: {generated}\n'
# stats_str += f'joined: {joined}\n'
# stats_str += f'role: {role}\n'
#
# await bot.reply_to(
# message, stats_str)
#
# @bot.message_handler(commands=['donate'])
# async def donation_info(message):
# await bot.reply_to(
# message, DONATION_INFO)
#
# @bot.message_handler(commands=['say'])
# async def say(message):
# chat = message.chat
# user = message.from_user
#
# if (chat.type == 'group') or (user.id != 383385940):
# return
#
# await bot.send_message(GROUP_ID, message.text[4:])
# generic txt2img handler
# async def _generic_txt2img(message_or_query):
# if isinstance(message_or_query, CallbackQuery):
# query = message_or_query
# message = query.message
# user = query.from_user
# chat = query.message.chat
#
# else:
# message = message_or_query
# user = message.from_user
# chat = message.chat
#
# reply_id = None
# if chat.type == 'group' and chat.id == GROUP_ID:
# reply_id = message.message_id
#
# user_row = await db_call('get_or_create_user', user.id)
#
# # init new msg
# init_msg = 'started processing txt2img request...'
# status_msg = await bot.reply_to(message, init_msg)
# await db_call(
# 'new_user_request', user.id, message.id, status_msg.id, status=init_msg)
#
# prompt = ' '.join(message.text.split(' ')[1:])
#
# if len(prompt) == 0:
# await bot.edit_message_text(
# 'Empty text prompt ignored.',
# chat_id=status_msg.chat.id,
# message_id=status_msg.id
# )
# await db_call('update_user_request', status_msg.id, 'Empty text prompt ignored.')
# return
#
# logging.info(f'mid: {message.id}')
#
# user_config = {**user_row}
# del user_config['id']
#
# params = {
# 'prompt': prompt,
# **user_config
# }
#
# await db_call(
# 'update_user_stats', user.id, 'txt2img', last_prompt=prompt)
#
# ec = await work_request(user, status_msg, 'txt2img', params)
# if ec == 0:
# await db_call('increment_generated', user.id)
#
#
# # generic img2img handler
#
# async def _generic_img2img(message_or_query):
# if isinstance(message_or_query, CallbackQuery):
# query = message_or_query
# message = query.message
# user = query.from_user
# chat = query.message.chat
#
# else:
# message = message_or_query
# user = message.from_user
# chat = message.chat
#
# reply_id = None
# if chat.type == 'group' and chat.id == GROUP_ID:
# reply_id = message.message_id
#
# user_row = await db_call('get_or_create_user', user.id)
#
# # init new msg
# init_msg = 'started processing txt2img request...'
# status_msg = await bot.reply_to(message, init_msg)
# await db_call(
# 'new_user_request', user.id, message.id, status_msg.id, status=init_msg)
#
# if not message.caption.startswith('/img2img'):
# await bot.reply_to(
# message,
# 'For image to image you need to add /img2img to the beggining of your caption'
# )
# return
#
# prompt = ' '.join(message.caption.split(' ')[1:])
#
# if len(prompt) == 0:
# await bot.reply_to(message, 'Empty text prompt ignored.')
# return
#
# file_id = message.photo[-1].file_id
# file_path = (await bot.get_file(file_id)).file_path
# image_raw = await bot.download_file(file_path)
# with Image.open(io.BytesIO(image_raw)) as image:
# w, h = image.size
#
# if w > 512 or h > 512:
# logging.warning(f'user sent img of size {image.size}')
# image.thumbnail((512, 512))
# logging.warning(f'resized it to {image.size}')
#
# image.save(f'ipfs-docker-staging/image.png', format='PNG')
#
# ipfs_hash = ipfs_node.add('image.png')
# ipfs_node.pin(ipfs_hash)
#
# logging.info(f'published input image {ipfs_hash} on ipfs')
#
# logging.info(f'mid: {message.id}')
#
# user_config = {**user_row}
# del user_config['id']
#
# params = {
# 'prompt': prompt,
# **user_config
# }
#
# await db_call(
# 'update_user_stats',
# user.id,
# 'img2img',
# last_file=file_id,
# last_prompt=prompt,
# last_binary=ipfs_hash
# )
#
# ec = await work_request(
# user, status_msg, 'img2img', params,
# file_id=file_id,
# binary_data=ipfs_hash
# )
#
# if ec == 0:
# await db_call('increment_generated', user.id)
#
# generic redo handler
# async def _redo(message_or_query):
# is_query = False
# if isinstance(message_or_query, CallbackQuery):
# is_query = True
# query = message_or_query
# message = query.message
# user = query.from_user
# chat = query.message.chat
#
# elif isinstance(message_or_query, Message):
# message = message_or_query
# user = message.from_user
# chat = message.chat
#
# init_msg = 'started processing redo request...'
# if is_query:
# status_msg = await bot.send_message(chat.id, init_msg)
#
# else:
# status_msg = await bot.reply_to(message, init_msg)
#
# method = await db_call('get_last_method_of', user.id)
# prompt = await db_call('get_last_prompt_of', user.id)
#
# file_id = None
# binary = ''
# if method == 'img2img':
# file_id = await db_call('get_last_file_of', user.id)
# binary = await db_call('get_last_binary_of', user.id)
#
# if not prompt:
# await bot.reply_to(
# message,
# 'no last prompt found, do a txt2img cmd first!'
# )
# return
#
#
# user_row = await db_call('get_or_create_user', user.id)
# await db_call(
# 'new_user_request', user.id, message.id, status_msg.id, status=init_msg)
# user_config = {**user_row}
# del user_config['id']
#
# params = {
# 'prompt': prompt,
# **user_config
# }
#
# await work_request(
# user, status_msg, 'redo', params,
# file_id=file_id,
# binary_data=binary
# )
# "proxy" handlers just request routers
# @bot.message_handler(commands=['txt2img'])
# async def send_txt2img(message):
# await _generic_txt2img(message)
#
# @bot.message_handler(func=lambda message: True, content_types=[
# 'photo', 'document'])
# async def send_img2img(message):
# await _generic_img2img(message)
#
# @bot.message_handler(commands=['img2img'])
# async def img2img_missing_image(message):
# await bot.reply_to(
# message,
# 'seems you tried to do an img2img command without sending image'
# )
#
# @bot.message_handler(commands=['redo'])
# async def redo(message):
# await _redo(message)
#
# @bot.callback_query_handler(func=lambda call: True)
# async def callback_query(call):
# msg = json.loads(call.data)
# logging.info(call.data)
# method = msg.get('method')
# match method:
# case 'redo':
# await _redo(call)
# catch all handler for things we dont support
# @bot.message_handler(func=lambda message: True)
# async def echo_message(message):
# if message.text[0] == '/':
# await bot.reply_to(message, UNKNOWN_CMD_TEXT)

View File

@ -0,0 +1,311 @@
import io
import discord
from PIL import Image
import logging
from skynet.constants import *
from skynet.frontend import validate_user_config_request
class SkynetView(discord.ui.View):
def __init__(self, bot):
self.bot = bot
super().__init__(timeout=None)
self.add_item(RedoButton('redo', discord.ButtonStyle.primary, self.bot))
self.add_item(Txt2ImgButton('txt2img', discord.ButtonStyle.primary, self.bot))
self.add_item(Img2ImgButton('img2img', discord.ButtonStyle.primary, self.bot))
self.add_item(StatsButton('stats', discord.ButtonStyle.secondary, self.bot))
self.add_item(DonateButton('donate', discord.ButtonStyle.secondary, self.bot))
self.add_item(ConfigButton('config', discord.ButtonStyle.secondary, self.bot))
self.add_item(HelpButton('help', discord.ButtonStyle.secondary, self.bot))
self.add_item(CoolButton('cool', discord.ButtonStyle.secondary, self.bot))
class Txt2ImgButton(discord.ui.Button):
def __init__(self, label: str, style: discord.ButtonStyle, bot):
self.bot = bot
super().__init__(label=label, style=style)
async def callback(self, interaction):
db_call = self.bot.db_call
work_request = self.bot.work_request
msg = await grab('Enter your prompt:', interaction)
# grab user from msg
user = msg.author
user_row = await db_call('get_or_create_user', user.id)
# init new msg
init_msg = 'started processing txt2img request...'
status_msg = await msg.channel.send(init_msg)
await db_call(
'new_user_request', user.id, msg.id, status_msg.id, status=init_msg)
prompt = msg.content
if len(prompt) == 0:
await status_msg.edit(content=
'Empty text prompt ignored.'
)
await db_call('update_user_request', status_msg.id, 'Empty text prompt ignored.')
return
logging.info(f'mid: {msg.id}')
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
await db_call(
'update_user_stats', user.id, 'txt2img', last_prompt=prompt)
ec = await work_request(user, status_msg, 'txt2img', params, msg)
if ec == None:
await db_call('increment_generated', user.id)
class Img2ImgButton(discord.ui.Button):
def __init__(self, label: str, style: discord.ButtonStyle, bot):
self.bot = bot
super().__init__(label=label, style=style)
async def callback(self, interaction):
db_call = self.bot.db_call
work_request = self.bot.work_request
ipfs_node = self.bot.ipfs_node
msg = await grab('Attach an Image. Enter your prompt:', interaction)
user = msg.author
user_row = await db_call('get_or_create_user', user.id)
# init new msg
init_msg = 'started processing img2img request...'
status_msg = await msg.channel.send(init_msg)
await db_call(
'new_user_request', user.id, msg.id, status_msg.id, status=init_msg)
# if not msg.content.startswith('/img2img'):
# await msg.reply(
# 'For image to image you need to add /img2img to the beggining of your caption'
# )
# return
prompt = msg.content
if len(prompt) == 0:
await msg.reply('Empty text prompt ignored.')
return
# file_id = message.photo[-1].file_id
# file_path = (await bot.get_file(file_id)).file_path
# image_raw = await bot.download_file(file_path)
#
file = msg.attachments[-1]
file_id = str(file.id)
# file bytes
image_raw = await file.read()
with Image.open(io.BytesIO(image_raw)) as image:
w, h = image.size
if w > 512 or h > 512:
logging.warning(f'user sent img of size {image.size}')
image.thumbnail((512, 512))
logging.warning(f'resized it to {image.size}')
image.save(f'ipfs-docker-staging/image.png', format='PNG')
ipfs_hash = ipfs_node.add('image.png')
ipfs_node.pin(ipfs_hash)
logging.info(f'published input image {ipfs_hash} on ipfs')
logging.info(f'mid: {msg.id}')
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
await db_call(
'update_user_stats',
user.id,
'img2img',
last_file=file_id,
last_prompt=prompt,
last_binary=ipfs_hash
)
ec = await work_request(
user, status_msg, 'img2img', params, msg,
file_id=file_id,
binary_data=ipfs_hash
)
if ec == None:
await db_call('increment_generated', user.id)
class RedoButton(discord.ui.Button):
def __init__(self, label: str, style: discord.ButtonStyle, bot):
self.bot = bot
super().__init__(label=label, style=style)
async def callback(self, interaction):
db_call = self.bot.db_call
work_request = self.bot.work_request
init_msg = 'started processing redo request...'
await interaction.response.send_message(init_msg)
status_msg = await interaction.original_response()
user = interaction.user
method = await db_call('get_last_method_of', user.id)
prompt = await db_call('get_last_prompt_of', user.id)
file_id = None
binary = ''
if method == 'img2img':
file_id = await db_call('get_last_file_of', user.id)
binary = await db_call('get_last_binary_of', user.id)
if not prompt:
await status_msg.edit(
content='no last prompt found, do a txt2img cmd first!',
view=SkynetView(self.bot)
)
return
user_row = await db_call('get_or_create_user', user.id)
await db_call(
'new_user_request', user.id, interaction.id, status_msg.id, status=init_msg)
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
ec = await work_request(
user, status_msg, 'redo', params, interaction,
file_id=file_id,
binary_data=binary
)
if ec == None:
await db_call('increment_generated', user.id)
class ConfigButton(discord.ui.Button):
def __init__(self, label: str, style: discord.ButtonStyle, bot):
self.bot = bot
super().__init__(label=label, style=style)
async def callback(self, interaction):
db_call = self.bot.db_call
msg = await grab('What params do you want to change? (format: <param> <value>)', interaction)
user = interaction.user
try:
attr, val, reply_txt = validate_user_config_request(
'/config ' + msg.content)
logging.info(f'user config update: {attr} to {val}')
await db_call('update_user_config', user.id, attr, val)
logging.info('done')
except BaseException as e:
reply_txt = str(e)
finally:
await msg.reply(content=reply_txt, view=SkynetView(self.bot))
class StatsButton(discord.ui.Button):
def __init__(self, label: str, style: discord.ButtonStyle, bot):
self.bot = bot
super().__init__(label=label, style=style)
async def callback(self, interaction):
db_call = self.bot.db_call
user = interaction.user
await db_call('get_or_create_user', user.id)
generated, joined, role = await db_call('get_user_stats', user.id)
stats_str = f'```generated: {generated}\n'
stats_str += f'joined: {joined}\n'
stats_str += f'role: {role}\n```'
await interaction.response.send_message(
content=stats_str, view=SkynetView(self.bot))
class DonateButton(discord.ui.Button):
def __init__(self, label: str, style: discord.ButtonStyle, bot):
self.bot = bot
super().__init__(label=label, style=style)
async def callback(self, interaction):
await interaction.response.send_message(
content=f'```\n{DONATION_INFO}```',
view=SkynetView(self.bot))
class CoolButton(discord.ui.Button):
def __init__(self, label: str, style: discord.ButtonStyle, bot):
self.bot = bot
super().__init__(label=label, style=style)
async def callback(self, interaction):
clean_cool_word = '\n'.join(CLEAN_COOL_WORDS)
await interaction.response.send_message(
content=f'```{clean_cool_word}```',
view=SkynetView(self.bot))
class HelpButton(discord.ui.Button):
def __init__(self, label: str, style: discord.ButtonStyle, bot):
self.bot = bot
super().__init__(label=label, style=style)
async def callback(self, interaction):
msg = await grab('What would you like help with? (a for all)', interaction)
param = msg.content
if param == 'a':
await msg.reply(content=f'```{HELP_TEXT}```', view=SkynetView(self.bot))
else:
if param in HELP_TOPICS:
await msg.reply(content=f'```{HELP_TOPICS[param]}```', view=SkynetView(self.bot))
else:
await msg.reply(content=f'```{HELP_UNKWNOWN_PARAM}```', view=SkynetView(self.bot))
async def grab(prompt, interaction):
def vet(m):
return m.author == interaction.user and m.channel == interaction.channel
await interaction.response.send_message(prompt, ephemeral=True)
message = await interaction.client.wait_for('message', check=vet)
return message

View File

@ -0,0 +1,121 @@
#!/usr/bin/python
import json
import logging
import traceback
from datetime import datetime, timezone
from telebot.types import InlineKeyboardButton, InlineKeyboardMarkup
from telebot.async_telebot import ExceptionHandler
from telebot.formatting import hlink
import discord
from skynet.constants import *
def timestamp_pretty():
return datetime.now(timezone.utc).strftime('%H:%M:%S')
def tg_user_pretty(tguser):
if tguser.username:
return f'@{tguser.username}'
else:
return f'{tguser.first_name} id: {tguser.id}'
class SKYExceptionHandler(ExceptionHandler):
def handle(exception):
traceback.print_exc()
def build_redo_menu():
btn_redo = InlineKeyboardButton("Redo", callback_data=json.dumps({'method': 'redo'}))
inline_keyboard = InlineKeyboardMarkup()
inline_keyboard.add(btn_redo)
return inline_keyboard
def prepare_metainfo_caption(user, worker: str, reward: str, meta: dict, embed) -> str:
prompt = meta["prompt"]
if len(prompt) > 256:
prompt = prompt[:256]
gen_str = f'generated by {user.name}\n'
gen_str += f'performed by {worker}\n'
gen_str += f'reward: {reward}\n'
embed.add_field(
name='General Info', value=f'```{gen_str}```', inline=False)
# meta_str = f'__by {user.name}__\n'
# meta_str += f'*performed by {worker}*\n'
# meta_str += f'__**reward: {reward}**__\n'
embed.add_field(name='Prompt', value=f'```{prompt}\n```', inline=False)
# meta_str = f'`prompt:` {prompt}\n'
meta_str = f'seed: {meta["seed"]}\n'
meta_str += f'step: {meta["step"]}\n'
meta_str += f'guidance: {meta["guidance"]}\n'
if meta['strength']:
meta_str += f'strength: {meta["strength"]}\n'
meta_str += f'algo: {meta["model"]}\n'
if meta['upscaler']:
meta_str += f'upscaler: {meta["upscaler"]}\n'
embed.add_field(name='Parameters', value=f'```{meta_str}```', inline=False)
foot_str = f'Made with Skynet v{VERSION}\n'
foot_str += f'JOIN THE SWARM: https://discord.gg/JYM4YPMgK'
embed.set_footer(text=foot_str)
return meta_str
def generate_reply_caption(
user, # discord user
params: dict,
tx_hash: str,
worker: str,
reward: str
):
explorer_link = discord.Embed(
title='[SKYNET Transaction Explorer]',
url=f'https://explorer.{DEFAULT_DOMAIN}/v2/explore/transaction/{tx_hash}',
color=discord.Color.blue())
meta_info = prepare_metainfo_caption(user, worker, reward, params, explorer_link)
# why do we have this?
final_msg = '\n'.join([
'Worker finished your task!',
# explorer_link,
f'PARAMETER INFO:\n{meta_info}'
])
final_msg = '\n'.join([
# f'***{explorer_link}***',
f'{meta_info}'
])
logging.info(final_msg)
return final_msg, explorer_link
async def get_global_config(cleos):
return (await cleos.aget_table(
'telos.gpu', 'telos.gpu', 'config'))[0]
async def get_user_nonce(cleos, user: str):
return (await cleos.aget_table(
'telos.gpu', 'telos.gpu', 'users',
index_position=1,
key_type='name',
lower_bound=user,
upper_bound=user
))[0]['nonce']

View File

@ -1,337 +0,0 @@
#!/usr/bin/python
import io
import zlib
import logging
from datetime import datetime
from PIL import Image
from trio_asyncio import aio_as_trio
from telebot.types import (
InputFile, InputMediaPhoto, InlineKeyboardButton, InlineKeyboardMarkup
)
from telebot.async_telebot import AsyncTeleBot
from ..db import open_database_connection
from ..constants import *
from . import *
PREFIX = 'tg'
def build_redo_menu():
btn_redo = InlineKeyboardButton("Redo", callback_data=json.dumps({'method': 'redo'}))
inline_keyboard = InlineKeyboardMarkup()
inline_keyboard.add(btn_redo)
return inline_keyboard
def prepare_metainfo_caption(tguser, meta: dict) -> str:
prompt = meta["prompt"]
if len(prompt) > 256:
prompt = prompt[:256]
if tguser.username:
user = f'@{tguser.username}'
else:
user = f'{tguser.first_name} id: {tguser.id}'
meta_str = f'by {user}\n'
meta_str += f'prompt: \"{prompt}\"\n'
meta_str += f'seed: {meta["seed"]}\n'
meta_str += f'step: {meta["step"]}\n'
meta_str += f'guidance: {meta["guidance"]}\n'
if meta['strength']:
meta_str += f'strength: {meta["strength"]}\n'
meta_str += f'algo: \"{meta["algo"]}\"\n'
if meta['upscaler']:
meta_str += f'upscaler: \"{meta["upscaler"]}\"\n'
meta_str += f'sampler: k_euler_ancestral\n'
meta_str += f'skynet v{VERSION}'
return meta_str
async def run_skynet_telegram(
name: str,
tg_token: str,
key_name: str = 'telegram-frontend.key',
cert_name: str = 'whitelist/telegram-frontend.cert',
rpc_address: str = DEFAULT_RPC_ADDR,
db_host: str = 'localhost:5432',
db_user: str = 'skynet',
db_pass: str = 'password'
):
logging.basicConfig(level=logging.INFO)
bot = AsyncTeleBot(tg_token)
logging.info(f'tg_token: {tg_token}')
async with open_database_connection(
db_user, db_pass, db_host
) as db_call:
with open_skynet_rpc(
f'skynet-telegram-{name}',
rpc_address=rpc_address,
cert_name=cert_name,
key_name=key_name
) as session:
@bot.message_handler(commands=['help'])
async def send_help(message):
splt_msg = message.text.split(' ')
if len(splt_msg) == 1:
await bot.reply_to(message, HELP_TEXT)
else:
param = splt_msg[1]
if param in HELP_TOPICS:
await bot.reply_to(message, HELP_TOPICS[param])
else:
await bot.reply_to(message, HELP_UNKWNOWN_PARAM)
@bot.message_handler(commands=['cool'])
async def send_cool_words(message):
await bot.reply_to(message, '\n'.join(COOL_WORDS))
@bot.message_handler(commands=['txt2img'])
async def send_txt2img(message):
chat = message.chat
reply_id = None
if chat.type == 'group' and chat.id == GROUP_ID:
reply_id = message.message_id
user_id = f'tg+{message.from_user.id}'
prompt = ' '.join(message.text.split(' ')[1:])
if len(prompt) == 0:
await bot.reply_to(message, 'Empty text prompt ignored.')
return
logging.info(f'mid: {message.id}')
user = await db_call('get_or_create_user', user_id)
user_config = {**(await db_call('get_user_config', user))}
del user_config['id']
resp = await session.rpc(
'dgpu_call', {
'method': 'diffuse',
'params': {
'prompt': prompt,
**user_config
}
},
timeout=60
)
logging.info(f'resp to {message.id} arrived')
resp_txt = ''
result = MessageToDict(resp.result)
if 'error' in resp.result:
resp_txt = resp.result['message']
await bot.reply_to(message, resp_txt)
else:
logging.info(result['id'])
img_raw = resp.bin
logging.info(f'got image of size: {len(img_raw)}')
img = Image.open(io.BytesIO(img_raw))
await bot.send_photo(
GROUP_ID,
caption=prepare_metainfo_caption(message.from_user, result['meta']['meta']),
photo=img,
reply_to_message_id=reply_id,
reply_markup=build_redo_menu()
)
return
@bot.message_handler(func=lambda message: True, content_types=['photo'])
async def send_img2img(message):
chat = message.chat
reply_id = None
if chat.type == 'group' and chat.id == GROUP_ID:
reply_id = message.message_id
user_id = f'tg+{message.from_user.id}'
if not message.caption.startswith('/img2img'):
await bot.reply_to(
message,
'For image to image you need to add /img2img to the beggining of your caption'
)
return
prompt = ' '.join(message.caption.split(' ')[1:])
if len(prompt) == 0:
await bot.reply_to(message, 'Empty text prompt ignored.')
return
file_id = message.photo[-1].file_id
file_path = (await bot.get_file(file_id)).file_path
file_raw = await bot.download_file(file_path)
logging.info(f'mid: {message.id}')
user = await db_call('get_or_create_user', user_id)
user_config = {**(await db_call('get_user_config', user))}
del user_config['id']
resp = await session.rpc(
'dgpu_call', {
'method': 'diffuse',
'params': {
'prompt': prompt,
**user_config
}
},
binext=file_raw,
timeout=60
)
logging.info(f'resp to {message.id} arrived')
resp_txt = ''
result = MessageToDict(resp.result)
if 'error' in resp.result:
resp_txt = resp.result['message']
await bot.reply_to(message, resp_txt)
else:
logging.info(result['id'])
img_raw = resp.bin
logging.info(f'got image of size: {len(img_raw)}')
img = Image.open(io.BytesIO(img_raw))
await bot.send_media_group(
GROUP_ID,
media=[
InputMediaPhoto(file_id),
InputMediaPhoto(
img,
caption=prepare_metainfo_caption(message.from_user, result['meta']['meta'])
)
],
reply_to_message_id=reply_id
)
return
@bot.message_handler(commands=['img2img'])
async def img2img_missing_image(message):
await bot.reply_to(
message,
'seems you tried to do an img2img command without sending image'
)
@bot.message_handler(commands=['redo'])
async def redo(message):
chat = message.chat
reply_id = None
if chat.type == 'group' and chat.id == GROUP_ID:
reply_id = message.message_id
user_config = {**(await db_call('get_user_config', user))}
del user_config['id']
prompt = await db_call('get_last_prompt_of', user)
resp = await session.rpc(
'dgpu_call', {
'method': 'diffuse',
'params': {
'prompt': prompt,
**user_config
}
},
timeout=60
)
logging.info(f'resp to {message.id} arrived')
resp_txt = ''
result = MessageToDict(resp.result)
if 'error' in resp.result:
resp_txt = resp.result['message']
await bot.reply_to(message, resp_txt)
else:
logging.info(result['id'])
img_raw = resp.bin
logging.info(f'got image of size: {len(img_raw)}')
img = Image.open(io.BytesIO(img_raw))
await bot.send_photo(
GROUP_ID,
caption=prepare_metainfo_caption(message.from_user, result['meta']['meta']),
photo=img,
reply_to_message_id=reply_id
)
return
@bot.message_handler(commands=['config'])
async def set_config(message):
rpc_params = {}
try:
attr, val, reply_txt = validate_user_config_request(
message.text)
logging.info(f'user config update: {attr} to {val}')
await db_call('update_user_config',
user, req.params['attr'], req.params['val'])
logging.info('done')
except BaseException as e:
reply_txt = str(e)
finally:
await bot.reply_to(message, reply_txt)
@bot.message_handler(commands=['stats'])
async def user_stats(message):
generated, joined, role = await db_call('get_user_stats', user)
stats_str = f'generated: {generated}\n'
stats_str += f'joined: {joined}\n'
stats_str += f'role: {role}\n'
await bot.reply_to(
message, stats_str)
@bot.message_handler(commands=['donate'])
async def donation_info(message):
await bot.reply_to(
message, DONATION_INFO)
@bot.message_handler(commands=['say'])
async def say(message):
chat = message.chat
user = message.from_user
if (chat.type == 'group') or (user.id != 383385940):
return
await bot.send_message(GROUP_ID, message.text[4:])
@bot.message_handler(func=lambda message: True)
async def echo_message(message):
if message.text[0] == '/':
await bot.reply_to(message, UNKNOWN_CMD_TEXT)
@bot.callback_query_handler(func=lambda call: True)
async def callback_query(call):
msg = json.loads(call.data)
logging.info(call.data)
method = msg.get('method')
match method:
case 'redo':
await _redo(call)
await aio_as_trio(bot.infinity_polling)()

View File

@ -0,0 +1,292 @@
#!/usr/bin/python
import io
import random
import logging
import asyncio
from PIL import Image
from json import JSONDecodeError
from decimal import Decimal
from hashlib import sha256
from datetime import datetime
from contextlib import ExitStack, AsyncExitStack
from contextlib import asynccontextmanager as acm
from leap.cleos import CLEOS
from leap.sugar import Name, asset_from_str, collect_stdout
from leap.hyperion import HyperionAPI
from telebot.types import InputMediaPhoto
from telebot.async_telebot import AsyncTeleBot
from skynet.db import open_new_database, open_database_connection
from skynet.ipfs import get_ipfs_file
from skynet.ipfs.docker import open_ipfs_node
from skynet.constants import *
from . import *
from .utils import *
from .handlers import create_handler_context
class SkynetTelegramFrontend:
def __init__(
self,
token: str,
account: str,
permission: str,
node_url: str,
hyperion_url: str,
db_host: str,
db_user: str,
db_pass: str,
remote_ipfs_node: str,
key: str
):
self.token = token
self.account = account
self.permission = permission
self.node_url = node_url
self.hyperion_url = hyperion_url
self.db_host = db_host
self.db_user = db_user
self.db_pass = db_pass
self.remote_ipfs_node = remote_ipfs_node
self.key = key
self.bot = AsyncTeleBot(token, exception_handler=SKYExceptionHandler)
self.cleos = CLEOS(None, None, url=node_url, remote=node_url)
self.hyperion = HyperionAPI(hyperion_url)
self._exit_stack = ExitStack()
self._async_exit_stack = AsyncExitStack()
async def start(self):
self.ipfs_node = self._exit_stack.enter_context(
open_ipfs_node())
# self.ipfs_node.connect(self.remote_ipfs_node)
logging.info(
f'connected to remote ipfs node: {self.remote_ipfs_node}')
self.db_call = await self._async_exit_stack.enter_async_context(
open_database_connection(
self.db_user, self.db_pass, self.db_host))
create_handler_context(self)
async def stop(self):
await self._async_exit_stack.aclose()
self._exit_stack.close()
@acm
async def open(self):
await self.start()
yield self
await self.stop()
async def update_status_message(
self, status_msg, new_text: str, **kwargs
):
await self.db_call(
'update_user_request_by_sid', status_msg.id, new_text)
return await self.bot.edit_message_text(
new_text,
chat_id=status_msg.chat.id,
message_id=status_msg.id,
**kwargs
)
async def append_status_message(
self, status_msg, add_text: str, **kwargs
):
request = await self.db_call('get_user_request_by_sid', status_msg.id)
await self.update_status_message(
status_msg,
request['status'] + add_text,
**kwargs
)
async def work_request(
self,
user,
status_msg,
method: str,
params: dict,
file_id: str | None = None,
binary_data: str = ''
):
if params['seed'] == None:
params['seed'] = random.randint(0, 0xFFFFFFFF)
sanitized_params = {}
for key, val in params.items():
if isinstance(val, Decimal):
val = str(val)
sanitized_params[key] = val
body = json.dumps({
'method': 'diffuse',
'params': sanitized_params
})
request_time = datetime.now().isoformat()
await self.update_status_message(
status_msg,
f'processing a \'{method}\' request by {tg_user_pretty(user)}\n'
f'[{timestamp_pretty()}] <i>broadcasting transaction to chain...</i>',
parse_mode='HTML'
)
reward = '20.0000 GPU'
res = await self.cleos.a_push_action(
'telos.gpu',
'enqueue',
{
'user': Name(self.account),
'request_body': body,
'binary_data': binary_data,
'reward': asset_from_str(reward),
'min_verification': 1
},
self.account, self.key, permission=self.permission
)
if 'code' in res or 'statusCode' in res:
logging.error(json.dumps(res, indent=4))
await self.update_status_message(
status_msg,
'skynet has suffered an internal error trying to fill this request')
return
enqueue_tx_id = res['transaction_id']
enqueue_tx_link = hlink(
'Your request on Skynet Explorer',
f'https://explorer.{DEFAULT_DOMAIN}/v2/explore/transaction/{enqueue_tx_id}'
)
await self.append_status_message(
status_msg,
f' <b>broadcasted!</b>\n'
f'<b>{enqueue_tx_link}</b>\n'
f'[{timestamp_pretty()}] <i>workers are processing request...</i>',
parse_mode='HTML'
)
out = collect_stdout(res)
request_id, nonce = out.split(':')
request_hash = sha256(
(nonce + body + binary_data).encode('utf-8')).hexdigest().upper()
request_id = int(request_id)
logging.info(f'{request_id} enqueued.')
tx_hash = None
ipfs_hash = None
for i in range(60):
try:
submits = await self.hyperion.aget_actions(
account=self.account,
filter='telos.gpu:submit',
sort='desc',
after=request_time
)
actions = [
action
for action in submits['actions']
if action[
'act']['data']['request_hash'] == request_hash
]
if len(actions) > 0:
tx_hash = actions[0]['trx_id']
data = actions[0]['act']['data']
ipfs_hash = data['ipfs_hash']
worker = data['worker']
logging.info('Found matching submit!')
break
except JSONDecodeError:
logging.error(f'network error while getting actions, retry..')
await asyncio.sleep(1)
if not ipfs_hash:
await self.update_status_message(
status_msg,
f'\n[{timestamp_pretty()}] <b>timeout processing request</b>',
parse_mode='HTML'
)
return
tx_link = hlink(
'Your result on Skynet Explorer',
f'https://explorer.{DEFAULT_DOMAIN}/v2/explore/transaction/{tx_hash}'
)
await self.append_status_message(
status_msg,
f' <b>request processed!</b>\n'
f'<b>{tx_link}</b>\n'
f'[{timestamp_pretty()}] <i>trying to download image...</i>\n',
parse_mode='HTML'
)
caption = generate_reply_caption(
user, params, tx_hash, worker, reward)
# attempt to get the image and send it
ipfs_link = f'https://ipfs.{DEFAULT_DOMAIN}/ipfs/{ipfs_hash}/image.png'
resp = await get_ipfs_file(ipfs_link)
if not resp or resp.status_code != 200:
logging.error(f'couldn\'t get ipfs hosted image at {ipfs_link}!')
await self.update_status_message(
status_msg,
caption,
reply_markup=build_redo_menu(),
parse_mode='HTML'
)
return
png_img = resp.raw
with Image.open(io.BytesIO(resp.raw)) as image:
w, h = image.size
if w > TG_MAX_WIDTH or h > TG_MAX_HEIGHT:
logging.warning(f'result is of size {image.size}')
image.thumbnail((TG_MAX_WIDTH, TG_MAX_HEIGHT))
tmp_buf = io.BytesIO()
image.save(tmp_buf, format='PNG')
png_img = tmp_buf.getvalue()
logging.info(f'success! sending generated image')
await self.bot.delete_message(
chat_id=status_msg.chat.id, message_id=status_msg.id)
if file_id: # img2img
await self.bot.send_media_group(
status_msg.chat.id,
media=[
InputMediaPhoto(file_id),
InputMediaPhoto(
png_img,
caption=caption,
parse_mode='HTML'
)
],
)
else: # txt2img
await self.bot.send_photo(
status_msg.chat.id,
caption=caption,
photo=png_img,
reply_markup=build_redo_menu(),
parse_mode='HTML'
)

View File

@ -0,0 +1,354 @@
#!/usr/bin/python
import io
import json
import logging
from datetime import datetime, timedelta
from PIL import Image
from telebot.types import CallbackQuery, Message
from skynet.frontend import validate_user_config_request
from skynet.constants import *
def create_handler_context(frontend: 'SkynetTelegramFrontend'):
bot = frontend.bot
cleos = frontend.cleos
db_call = frontend.db_call
work_request = frontend.work_request
ipfs_node = frontend.ipfs_node
# generic / simple handlers
@bot.message_handler(commands=['help'])
async def send_help(message):
splt_msg = message.text.split(' ')
if len(splt_msg) == 1:
await bot.reply_to(message, HELP_TEXT)
else:
param = splt_msg[1]
if param in HELP_TOPICS:
await bot.reply_to(message, HELP_TOPICS[param])
else:
await bot.reply_to(message, HELP_UNKWNOWN_PARAM)
@bot.message_handler(commands=['cool'])
async def send_cool_words(message):
await bot.reply_to(message, '\n'.join(COOL_WORDS))
@bot.message_handler(commands=['queue'])
async def queue(message):
an_hour_ago = datetime.now() - timedelta(hours=1)
queue = await cleos.aget_table(
'telos.gpu', 'telos.gpu', 'queue',
index_position=2,
key_type='i64',
sort='desc',
lower_bound=int(an_hour_ago.timestamp())
)
await bot.reply_to(
message, f'Total requests on skynet queue: {len(queue)}')
@bot.message_handler(commands=['config'])
async def set_config(message):
user = message.from_user.id
try:
attr, val, reply_txt = validate_user_config_request(
message.text)
logging.info(f'user config update: {attr} to {val}')
await db_call('update_user_config', user, attr, val)
logging.info('done')
except BaseException as e:
reply_txt = str(e)
finally:
await bot.reply_to(message, reply_txt)
@bot.message_handler(commands=['stats'])
async def user_stats(message):
user = message.from_user.id
await db_call('get_or_create_user', user)
generated, joined, role = await db_call('get_user_stats', user)
stats_str = f'generated: {generated}\n'
stats_str += f'joined: {joined}\n'
stats_str += f'role: {role}\n'
await bot.reply_to(
message, stats_str)
@bot.message_handler(commands=['donate'])
async def donation_info(message):
await bot.reply_to(
message, DONATION_INFO)
@bot.message_handler(commands=['say'])
async def say(message):
chat = message.chat
user = message.from_user
if (chat.type == 'group') or (user.id != 383385940):
return
await bot.send_message(GROUP_ID, message.text[4:])
# generic txt2img handler
async def _generic_txt2img(message_or_query):
if isinstance(message_or_query, CallbackQuery):
query = message_or_query
message = query.message
user = query.from_user
chat = query.message.chat
else:
message = message_or_query
user = message.from_user
chat = message.chat
if chat.type == 'private':
return
reply_id = None
if chat.type == 'group' and chat.id == GROUP_ID:
reply_id = message.message_id
user_row = await db_call('get_or_create_user', user.id)
# init new msg
init_msg = 'started processing txt2img request...'
status_msg = await bot.reply_to(message, init_msg)
await db_call(
'new_user_request', user.id, message.id, status_msg.id, status=init_msg)
prompt = ' '.join(message.text.split(' ')[1:])
if len(prompt) == 0:
await bot.edit_message_text(
'Empty text prompt ignored.',
chat_id=status_msg.chat.id,
message_id=status_msg.id
)
await db_call('update_user_request', status_msg.id, 'Empty text prompt ignored.')
return
logging.info(f'mid: {message.id}')
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
await db_call(
'update_user_stats', user.id, 'txt2img', last_prompt=prompt)
ec = await work_request(user, status_msg, 'txt2img', params)
if ec == 0:
await db_call('increment_generated', user.id)
# generic img2img handler
async def _generic_img2img(message_or_query):
if isinstance(message_or_query, CallbackQuery):
query = message_or_query
message = query.message
user = query.from_user
chat = query.message.chat
else:
message = message_or_query
user = message.from_user
chat = message.chat
if chat.type == 'private':
return
reply_id = None
if chat.type == 'group' and chat.id == GROUP_ID:
reply_id = message.message_id
user_row = await db_call('get_or_create_user', user.id)
# init new msg
init_msg = 'started processing txt2img request...'
status_msg = await bot.reply_to(message, init_msg)
await db_call(
'new_user_request', user.id, message.id, status_msg.id, status=init_msg)
if not message.caption.startswith('/img2img'):
await bot.reply_to(
message,
'For image to image you need to add /img2img to the beggining of your caption'
)
return
prompt = ' '.join(message.caption.split(' ')[1:])
if len(prompt) == 0:
await bot.reply_to(message, 'Empty text prompt ignored.')
return
file_id = message.photo[-1].file_id
file_path = (await bot.get_file(file_id)).file_path
image_raw = await bot.download_file(file_path)
with Image.open(io.BytesIO(image_raw)) as image:
w, h = image.size
if w > 512 or h > 512:
logging.warning(f'user sent img of size {image.size}')
image.thumbnail((512, 512))
logging.warning(f'resized it to {image.size}')
image.save(f'ipfs-docker-staging/image.png', format='PNG')
ipfs_hash = ipfs_node.add('image.png')
ipfs_node.pin(ipfs_hash)
logging.info(f'published input image {ipfs_hash} on ipfs')
logging.info(f'mid: {message.id}')
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
await db_call(
'update_user_stats',
user.id,
'img2img',
last_file=file_id,
last_prompt=prompt,
last_binary=ipfs_hash
)
ec = await work_request(
user, status_msg, 'img2img', params,
file_id=file_id,
binary_data=ipfs_hash
)
if ec == 0:
await db_call('increment_generated', user.id)
# generic redo handler
async def _redo(message_or_query):
is_query = False
if isinstance(message_or_query, CallbackQuery):
is_query = True
query = message_or_query
message = query.message
user = query.from_user
chat = query.message.chat
elif isinstance(message_or_query, Message):
message = message_or_query
user = message.from_user
chat = message.chat
if chat.type == 'private':
return
init_msg = 'started processing redo request...'
if is_query:
status_msg = await bot.send_message(chat.id, init_msg)
else:
status_msg = await bot.reply_to(message, init_msg)
method = await db_call('get_last_method_of', user.id)
prompt = await db_call('get_last_prompt_of', user.id)
file_id = None
binary = ''
if method == 'img2img':
file_id = await db_call('get_last_file_of', user.id)
binary = await db_call('get_last_binary_of', user.id)
if not prompt:
await bot.reply_to(
message,
'no last prompt found, do a txt2img cmd first!'
)
return
user_row = await db_call('get_or_create_user', user.id)
await db_call(
'new_user_request', user.id, message.id, status_msg.id, status=init_msg)
user_config = {**user_row}
del user_config['id']
params = {
'prompt': prompt,
**user_config
}
await work_request(
user, status_msg, 'redo', params,
file_id=file_id,
binary_data=binary
)
# "proxy" handlers just request routers
@bot.message_handler(commands=['txt2img'])
async def send_txt2img(message):
await _generic_txt2img(message)
@bot.message_handler(func=lambda message: True, content_types=[
'photo', 'document'])
async def send_img2img(message):
await _generic_img2img(message)
@bot.message_handler(commands=['img2img'])
async def img2img_missing_image(message):
await bot.reply_to(
message,
'seems you tried to do an img2img command without sending image'
)
@bot.message_handler(commands=['redo'])
async def redo(message):
await _redo(message)
@bot.callback_query_handler(func=lambda call: True)
async def callback_query(call):
msg = json.loads(call.data)
logging.info(call.data)
method = msg.get('method')
match method:
case 'redo':
await _redo(call)
# catch all handler for things we dont support
@bot.message_handler(func=lambda message: True)
async def echo_message(message):
if message.text[0] == '/':
await bot.reply_to(message, UNKNOWN_CMD_TEXT)

View File

@ -0,0 +1,106 @@
#!/usr/bin/python
import json
import logging
import traceback
from datetime import datetime, timezone
from telebot.types import InlineKeyboardButton, InlineKeyboardMarkup
from telebot.async_telebot import ExceptionHandler
from telebot.formatting import hlink
from skynet.constants import *
def timestamp_pretty():
return datetime.now(timezone.utc).strftime('%H:%M:%S')
def tg_user_pretty(tguser):
if tguser.username:
return f'@{tguser.username}'
else:
return f'{tguser.first_name} id: {tguser.id}'
class SKYExceptionHandler(ExceptionHandler):
def handle(exception):
traceback.print_exc()
def build_redo_menu():
btn_redo = InlineKeyboardButton("Redo", callback_data=json.dumps({'method': 'redo'}))
inline_keyboard = InlineKeyboardMarkup()
inline_keyboard.add(btn_redo)
return inline_keyboard
def prepare_metainfo_caption(tguser, worker: str, reward: str, meta: dict) -> str:
prompt = meta["prompt"]
if len(prompt) > 256:
prompt = prompt[:256]
meta_str = f'<u>by {tg_user_pretty(tguser)}</u>\n'
meta_str += f'<i>performed by {worker}</i>\n'
meta_str += f'<b><u>reward: {reward}</u></b>\n'
meta_str += f'<code>prompt:</code> {prompt}\n'
meta_str += f'<code>seed: {meta["seed"]}</code>\n'
meta_str += f'<code>step: {meta["step"]}</code>\n'
meta_str += f'<code>guidance: {meta["guidance"]}</code>\n'
if meta['strength']:
meta_str += f'<code>strength: {meta["strength"]}</code>\n'
meta_str += f'<code>algo: {meta["model"]}</code>\n'
if meta['upscaler']:
meta_str += f'<code>upscaler: {meta["upscaler"]}</code>\n'
meta_str += f'<b><u>Made with Skynet v{VERSION}</u></b>\n'
meta_str += f'<b>JOIN THE SWARM: @skynetgpu</b>'
return meta_str
def generate_reply_caption(
tguser, # telegram user
params: dict,
tx_hash: str,
worker: str,
reward: str
):
explorer_link = hlink(
'SKYNET Transaction Explorer',
f'https://explorer.{DEFAULT_DOMAIN}/v2/explore/transaction/{tx_hash}'
)
meta_info = prepare_metainfo_caption(tguser, worker, reward, params)
final_msg = '\n'.join([
'Worker finished your task!',
explorer_link,
f'PARAMETER INFO:\n{meta_info}'
])
final_msg = '\n'.join([
f'<b><i>{explorer_link}</i></b>',
f'{meta_info}'
])
logging.info(final_msg)
return final_msg
async def get_global_config(cleos):
return (await cleos.aget_table(
'telos.gpu', 'telos.gpu', 'config'))[0]
async def get_user_nonce(cleos, user: str):
return (await cleos.aget_table(
'telos.gpu', 'telos.gpu', 'users',
index_position=1,
key_type='name',
lower_bound=user,
upper_bound=user
))[0]['nonce']

View File

@ -0,0 +1,45 @@
#!/usr/bin/python
import logging
import asks
import requests
class IPFSHTTP:
def __init__(self, endpoint: str):
self.endpoint = endpoint
def pin(self, cid: str):
return requests.post(
f'{self.endpoint}/api/v0/pin/add',
params={'arg': cid}
)
async def a_pin(self, cid: str):
return await asks.post(
f'{self.endpoint}/api/v0/pin/add',
params={'arg': cid}
)
async def get_ipfs_file(ipfs_link: str):
logging.info(f'attempting to get image at {ipfs_link}')
resp = None
for i in range(20):
try:
resp = await asks.get(ipfs_link, timeout=3)
except asks.errors.RequestTimeout:
logging.warning('timeout...')
except asks.errors.BadHttpResponse as e:
logging.error(f'ifps gateway exception: \n{e}')
if resp:
logging.info(f'status_code: {resp.status_code}')
else:
logging.error(f'timeout')
return resp

View File

@ -0,0 +1,102 @@
#!/usr/bin/python
import os
import sys
import logging
from pathlib import Path
from contextlib import contextmanager as cm
import docker
from docker.types import Mount
from docker.models.containers import Container
class IPFSDocker:
def __init__(self, container: Container):
self._container = container
def add(self, file: str) -> str:
ec, out = self._container.exec_run(
['ipfs', 'add', '-w', f'/export/{file}', '-Q'])
if ec != 0:
logging.error(out)
assert ec == 0
return out.decode().rstrip()
def pin(self, ipfs_hash: str):
ec, _ = self._container.exec_run(
['ipfs', 'pin', 'add', ipfs_hash])
assert ec == 0
def connect(self, remote_node: str):
ec, out = self._container.exec_run(
['ipfs', 'swarm', 'connect', remote_node])
if ec != 0:
logging.error(out)
assert ec == 0
def check_connect(self):
ec, out = self._container.exec_run(
['ipfs', 'swarm', 'peers'])
if ec != 0:
logging.error(out)
assert ec == 0
return out.splitlines()
@cm
def open_ipfs_node(name='skynet-ipfs'):
dclient = docker.from_env()
try:
container = dclient.containers.get(name)
except docker.errors.NotFound:
staging_dir = Path().resolve() / 'ipfs-docker-staging'
staging_dir.mkdir(parents=True, exist_ok=True)
data_dir = Path().resolve() / 'ipfs-docker-data'
data_dir.mkdir(parents=True, exist_ok=True)
export_target = '/export'
data_target = '/data/ipfs'
container = dclient.containers.run(
'ipfs/go-ipfs:latest',
name='skynet-ipfs',
ports={
'8080/tcp': 8080,
'4001/tcp': 4001,
'5001/tcp': ('127.0.0.1', 5001)
},
mounts=[
Mount(export_target, str(staging_dir), 'bind'),
Mount(data_target, str(data_dir), 'bind')
],
detach=True,
remove=True
)
if sys.platform != 'win32':
uid = os.getuid()
gid = os.getgid()
ec, out = container.exec_run(['chown', f'{uid}:{gid}', '-R', export_target])
logging.info(out)
assert ec == 0
ec, out = container.exec_run(['chown', f'{uid}:{gid}', '-R', data_target])
logging.info(out)
assert ec == 0
for log in container.logs(stream=True):
log = log.decode().rstrip()
logging.info(log)
if 'Daemon is ready' in log:
break
yield IPFSDocker(container)

View File

@ -0,0 +1,128 @@
#!/usr/bin/python
import logging
import traceback
from datetime import datetime, timedelta
import trio
from leap.hyperion import HyperionAPI
from . import IPFSHTTP
MAX_TIME = timedelta(seconds=20)
class SkynetPinner:
def __init__(
self,
hyperion: HyperionAPI,
ipfs_http: IPFSHTTP
):
self.hyperion = hyperion
self.ipfs_http = ipfs_http
self._pinned = {}
self._now = datetime.now()
def is_pinned(self, cid: str):
pin_time = self._pinned.get(cid)
return pin_time
def pin_cids(self, cids: list[str]):
for cid in cids:
self._pinned[cid] = self._now
def cleanup_old_cids(self):
cids = list(self._pinned.keys())
for cid in cids:
if (self._now - self._pinned[cid]) > MAX_TIME * 2:
del self._pinned[cid]
async def capture_enqueues(self, after: datetime):
enqueues = await self.hyperion.aget_actions(
account='telos.gpu',
filter='telos.gpu:enqueue',
sort='desc',
after=after.isoformat(),
limit=1000
)
logging.info(f'got {len(enqueues["actions"])} enqueue actions.')
cids = []
for action in enqueues['actions']:
cid = action['act']['data']['binary_data']
if cid and not self.is_pinned(cid):
cids.append(cid)
return cids
async def capture_submits(self, after: datetime):
submits = await self.hyperion.aget_actions(
account='telos.gpu',
filter='telos.gpu:submit',
sort='desc',
after=after.isoformat(),
limit=1000
)
logging.info(f'got {len(submits["actions"])} submits actions.')
cids = []
for action in submits['actions']:
cid = action['act']['data']['ipfs_hash']
if cid and not self.is_pinned(cid):
cids.append(cid)
return cids
async def task_pin(self, cid: str):
logging.info(f'pinning {cid}...')
for _ in range(6):
try:
with trio.move_on_after(5):
resp = await self.ipfs_http.a_pin(cid)
if resp.status_code != 200:
logging.error(f'error pinning {cid}:\n{resp.text}')
del self._pinned[cid]
else:
logging.info(f'pinned {cid}')
return
except trio.TooSlowError:
logging.error(f'timed out pinning {cid}')
logging.error(f'gave up pinning {cid}')
async def pin_forever(self):
async with trio.open_nursery() as n:
while True:
try:
self._now = datetime.now()
self.cleanup_old_cids()
prev_second = self._now - MAX_TIME
cids = [
*(await self.capture_enqueues(prev_second)),
*(await self.capture_submits(prev_second))
]
self.pin_cids(cids)
for cid in cids:
n.start_soon(self.task_pin, cid)
except OSError as e:
traceback.print_exc()
except KeyboardInterrupt:
break
await trio.sleep(1)

View File

@ -1,33 +0,0 @@
class ModelStore:
def __init__(
self,
max_models: int = 2
):
self.max_models = max_models
self._models = {}
def get(self, model_name: str):
if model_name in self._models:
return self._models[model_name]['pipe']
if len(self._models) == max_models:
least_used = list(self._models.keys())[0]
for model in self._models:
if self._models[least_used]['generated'] > self._models[model]['generated']:
least_used = model
del self._models[least_used]
gc.collect()
pipe = pipeline_for(model_name)
self._models[model_name] = {
'pipe': pipe,
'generated': 0
}
return pipe

View File

@ -1,341 +0,0 @@
#!/usr/bin/python
import zlib
import socket
from typing import Callable, Awaitable, Optional
from pathlib import Path
from contextlib import asynccontextmanager as acm
from cryptography import x509
from cryptography.hazmat.primitives import serialization
import trio
import pynng
from pynng import TLSConfig, Context
from .protobuf import *
from .constants import *
def get_random_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 0))
return s.getsockname()[1]
def load_certs(
certs_dir: str,
cert_name: str,
key_name: str
):
certs_dir = Path(certs_dir).resolve()
tls_key_data = (certs_dir / key_name).read_bytes()
tls_key = serialization.load_pem_private_key(
tls_key_data,
password=None
)
tls_cert_data = (certs_dir / cert_name).read_bytes()
tls_cert = x509.load_pem_x509_certificate(
tls_cert_data
)
tls_whitelist = {}
for cert_path in (*(certs_dir / 'whitelist').glob('*.cert'), certs_dir / 'brain.cert'):
tls_whitelist[cert_path.stem] = x509.load_pem_x509_certificate(
cert_path.read_bytes()
)
return (
SessionTLSConfig(
TLSConfig.MODE_SERVER,
own_key_string=tls_key_data,
own_cert_string=tls_cert_data
),
tls_whitelist
)
def load_certs_client(
certs_dir: str,
cert_name: str,
key_name: str,
ca_name: Optional[str] = None
):
certs_dir = Path(certs_dir).resolve()
if not ca_name:
ca_name = 'brain.cert'
ca_cert_data = (certs_dir / ca_name).read_bytes()
tls_key_data = (certs_dir / key_name).read_bytes()
tls_cert_data = (certs_dir / cert_name).read_bytes()
tls_whitelist = {}
for cert_path in (*(certs_dir / 'whitelist').glob('*.cert'), certs_dir / 'brain.cert'):
tls_whitelist[cert_path.stem] = x509.load_pem_x509_certificate(
cert_path.read_bytes()
)
return (
SessionTLSConfig(
TLSConfig.MODE_CLIENT,
own_key_string=tls_key_data,
own_cert_string=tls_cert_data,
ca_string=ca_cert_data
),
tls_whitelist
)
class SessionError(BaseException):
...
class SessionTLSConfig(TLSConfig):
def __init__(
self,
mode,
server_name=None,
ca_string=None,
own_key_string=None,
own_cert_string=None,
auth_mode=None,
ca_files=None,
cert_key_file=None,
passwd=None
):
super().__init__(
mode,
server_name=server_name,
ca_string=ca_string,
own_key_string=own_key_string,
own_cert_string=own_cert_string,
auth_mode=auth_mode,
ca_files=ca_files,
cert_key_file=cert_key_file,
passwd=passwd
)
if ca_string:
self.ca_cert = x509.load_pem_x509_certificate(ca_string)
self.cert = x509.load_pem_x509_certificate(own_cert_string)
self.key = serialization.load_pem_private_key(
own_key_string,
password=passwd
)
class SessionServer:
def __init__(
self,
addr: str,
msg_handler: Callable[
[SkynetRPCRequest, Context], Awaitable[SkynetRPCResponse]
],
cert_name: Optional[str] = None,
key_name: Optional[str] = None,
cert_dir: str = DEFAULT_CERTS_DIR,
recv_max_size = 0
):
self.addr = addr
self.msg_handler = msg_handler
self.cert_name = cert_name
self.tls_config = None
self.tls_whitelist = None
if cert_name and key_name:
self.cert_name = cert_name
self.tls_config, self.tls_whitelist = load_certs(
cert_dir, cert_name, key_name)
self.addr = 'tls+' + self.addr
self.recv_max_size = recv_max_size
async def _handle_msg(self, req: SkynetRPCRequest, ctx: Context):
resp = await self.msg_handler(req, ctx)
if self.tls_config:
resp.auth.cert = 'skynet'
resp.auth.sig = sign_protobuf_msg(
resp, self.tls_config.key)
raw_msg = zlib.compress(resp.SerializeToString())
await ctx.asend(raw_msg)
ctx.close()
async def _listener (self, sock):
async with trio.open_nursery() as n:
while True:
ctx = sock.new_context()
raw_msg = await ctx.arecv()
raw_size = len(raw_msg)
logging.debug(f'rpc server new msg {raw_size} bytes')
try:
msg = zlib.decompress(raw_msg)
msg_size = len(msg)
except zlib.error:
logging.warning(f'Zlib decompress error, dropping msg of size {len(raw_msg)}')
continue
logging.debug(f'msg after decompress {msg_size} bytes, +{msg_size - raw_size} bytes')
req = SkynetRPCRequest()
try:
req.ParseFromString(msg)
except google.protobuf.message.DecodeError:
logging.warning(f'Dropping malfomed msg of size {len(msg)}')
continue
logging.debug(f'msg method: {req.method}')
if self.tls_config:
if req.auth.cert not in self.tls_whitelist:
logging.warning(
f'{req.auth.cert} not in tls whitelist')
continue
try:
verify_protobuf_msg(req, self.tls_whitelist[req.auth.cert])
except ValueError:
logging.warning(
f'{req.cert} sent an unauthenticated msg')
continue
n.start_soon(self._handle_msg, req, ctx)
@acm
async def open(self):
with pynng.Rep0(
recv_max_size=self.recv_max_size
) as sock:
if self.tls_config:
sock.tls_config = self.tls_config
sock.listen(self.addr)
logging.debug(f'server socket listening at {self.addr}')
async with trio.open_nursery() as n:
n.start_soon(self._listener, sock)
try:
yield self
finally:
n.cancel_scope.cancel()
logging.debug('server socket is off.')
class SessionClient:
def __init__(
self,
connect_addr: str,
uid: str,
cert_name: Optional[str] = None,
key_name: Optional[str] = None,
ca_name: Optional[str] = None,
cert_dir: str = DEFAULT_CERTS_DIR,
recv_max_size = 0
):
self.uid = uid
self.connect_addr = connect_addr
self.cert_name = None
self.tls_config = None
self.tls_whitelist = None
self.tls_cert = None
self.tls_key = None
if cert_name and key_name:
self.cert_name = Path(cert_name).stem
self.tls_config, self.tls_whitelist = load_certs_client(
cert_dir, cert_name, key_name, ca_name=ca_name)
if not self.connect_addr.startswith('tls'):
self.connect_addr = 'tls+' + self.connect_addr
self.recv_max_size = recv_max_size
self._connected = False
self._sock = None
def connect(self):
self._sock = pynng.Req0(
recv_max_size=0,
name=self.uid
)
if self.tls_config:
self._sock.tls_config = self.tls_config
logging.debug(f'client is dialing {self.connect_addr}...')
self._sock.dial(self.connect_addr, block=True)
self._connected = True
logging.debug(f'client is connected to {self.connect_addr}')
def disconnect(self):
self._sock.close()
self._connected = False
logging.debug(f'client disconnected.')
async def rpc(
self,
method: str,
params: dict = {},
binext: Optional[bytes] = None,
timeout: float = 2.
):
if not self._connected:
raise SessionError('tried to use rpc without connecting')
req = SkynetRPCRequest()
req.uid = self.uid
req.method = method
req.params.update(params)
if binext:
logging.debug('added binary extension')
req.bin = binext
if self.tls_config:
req.auth.cert = self.cert_name
req.auth.sig = sign_protobuf_msg(req, self.tls_config.key)
with trio.fail_after(timeout):
ctx = self._sock.new_context()
raw_req = zlib.compress(req.SerializeToString())
logging.debug(f'rpc client sending new msg {method} of size {len(raw_req)}')
await ctx.asend(raw_req)
logging.debug('sent, awaiting response...')
raw_resp = await ctx.arecv()
logging.debug(f'rpc client got response of size {len(raw_resp)}')
raw_resp = zlib.decompress(raw_resp)
resp = SkynetRPCResponse()
resp.ParseFromString(raw_resp)
ctx.close()
if self.tls_config:
verify_protobuf_msg(resp, self.tls_config.ca_cert)
return resp

177
skynet/nodeos.py 100644
View File

@ -0,0 +1,177 @@
#!/usr/bin/env python3
import json
import time
import logging
from datetime import datetime
from contextlib import contextmanager as cm
import docker
from pytz import timezone
from leap.cleos import CLEOS, default_nodeos_image
from leap.sugar import get_container, Symbol, random_string
@cm
def open_cleos(
node_url: str,
key: str | None
):
vtestnet = None
try:
dclient = docker.from_env()
vtestnet = get_container(
dclient,
default_nodeos_image(),
name=f'skynet-wallet-{random_string(size=8)}',
force_unique=True,
detach=True,
network='host',
remove=True)
cleos = CLEOS(dclient, vtestnet, url=node_url, remote=node_url)
if key:
cleos.setup_wallet(key)
yield cleos
finally:
if vtestnet:
vtestnet.stop()
@cm
def open_nodeos(cleanup: bool = True):
dclient = docker.from_env()
vtestnet = get_container(
dclient,
'guilledk/skynet:leap-4.0.1',
name='skynet-nodeos',
force_unique=True,
detach=True,
network='host')
try:
cleos = CLEOS(
dclient, vtestnet,
url='http://127.0.0.1:42000',
remote='http://127.0.0.1:42000'
)
cleos.start_keosd()
priv, pub = cleos.create_key_pair()
logging.info(f'SUDO KEYS: {(priv, pub)}')
cleos.setup_wallet(priv)
genesis = json.dumps({
"initial_timestamp": '2017-08-29T02:14:00.000',
"initial_key": pub,
"initial_configuration": {
"max_block_net_usage": 1048576,
"target_block_net_usage_pct": 1000,
"max_transaction_net_usage": 1048575,
"base_per_transaction_net_usage": 12,
"net_usage_leeway": 500,
"context_free_discount_net_usage_num": 20,
"context_free_discount_net_usage_den": 100,
"max_block_cpu_usage": 200000,
"target_block_cpu_usage_pct": 1000,
"max_transaction_cpu_usage": 150000,
"min_transaction_cpu_usage": 100,
"max_transaction_lifetime": 3600,
"deferred_trx_expiration_window": 600,
"max_transaction_delay": 3888000,
"max_inline_action_size": 4096,
"max_inline_action_depth": 4,
"max_authority_depth": 6
}
}, indent=4)
ec, out = cleos.run(
['bash', '-c', f'echo \'{genesis}\' > /root/skynet.json'])
assert ec == 0
place_holder = 'EOS5fLreY5Zq5owBhmNJTgQaLqQ4ufzXSTpStQakEyfxNFuUEgNs1=KEY:5JnvSc6pewpHHuUHwvbJopsew6AKwiGnexwDRc2Pj2tbdw6iML9'
sig_provider = f'{pub}=KEY:{priv}'
nodeos_config_ini = '/root/nodeos/config.ini'
ec, out = cleos.run(
['bash', '-c', f'sed -i -e \'s/{place_holder}/{sig_provider}/g\' {nodeos_config_ini}'])
assert ec == 0
cleos.start_nodeos_from_config(
nodeos_config_ini,
data_dir='/root/nodeos/data',
genesis='/root/skynet.json',
state_plugin=True)
time.sleep(0.5)
cleos.wait_blocks(1)
cleos.boot_sequence(token_sym=Symbol('GPU', 4))
priv, pub = cleos.create_key_pair()
cleos.import_key(priv)
cleos.private_keys['telos.gpu'] = priv
logging.info(f'GPU KEYS: {(priv, pub)}')
cleos.new_account('telos.gpu', ram=4200000, key=pub)
for i in range(1, 4):
priv, pub = cleos.create_key_pair()
cleos.import_key(priv)
cleos.private_keys[f'testworker{i}'] = priv
logging.info(f'testworker{i} KEYS: {(priv, pub)}')
cleos.create_account_staked(
'eosio', f'testworker{i}', key=pub)
priv, pub = cleos.create_key_pair()
cleos.import_key(priv)
logging.info(f'TELEGRAM KEYS: {(priv, pub)}')
cleos.create_account_staked(
'eosio', 'telegram', ram=500000, key=pub)
cleos.transfer_token(
'eosio', 'telegram', '1000000.0000 GPU', 'Initial testing funds')
cleos.deploy_contract_from_host(
'telos.gpu',
'tests/contracts/telos.gpu',
verify_hash=False,
create_account=False
)
ec, out = cleos.push_action(
'telos.gpu',
'config',
['eosio.token', '4,GPU'],
f'telos.gpu@active'
)
assert ec == 0
ec, out = cleos.transfer_token(
'telegram', 'telos.gpu', '1000000.0000 GPU', 'Initial testing funds')
assert ec == 0
user_row = cleos.get_table(
'telos.gpu',
'telos.gpu',
'users',
index_position=1,
key_type='name',
lower_bound='telegram',
upper_bound='telegram'
)
assert len(user_row) == 1
yield cleos
finally:
# ec, out = cleos.list_all_keys()
# logging.info(out)
if cleanup:
vtestnet.stop()
vtestnet.remove()

View File

@ -1,4 +0,0 @@
#!/usr/bin/python
from .auth import *
from .skynet_pb2 import *

View File

@ -1,69 +0,0 @@
#!/usr/bin/python
import json
import logging
from hashlib import sha256
from collections import OrderedDict
from google.protobuf.json_format import MessageToDict
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding
from .skynet_pb2 import *
def serialize_msg_deterministic(msg):
descriptors = sorted(
type(msg).DESCRIPTOR.fields_by_name.items(),
key=lambda x: x[0]
)
shasum = sha256()
def hash_dict(d):
data = [
(key, val)
for (key, val) in d.items()
]
for key, val in sorted(data, key=lambda x: x[0]):
if not isinstance(val, dict):
shasum.update(key.encode())
shasum.update(json.dumps(val).encode())
else:
hash_dict(val)
for (field_name, field_descriptor) in descriptors:
if not field_descriptor.message_type:
shasum.update(field_name.encode())
value = getattr(msg, field_name)
if isinstance(value, bytes):
value = value.hex()
shasum.update(json.dumps(value).encode())
continue
if field_descriptor.message_type.name == 'Struct':
hash_dict(MessageToDict(getattr(msg, field_name)))
deterministic_msg = shasum.digest()
return deterministic_msg
def sign_protobuf_msg(msg, key):
return key.sign(
serialize_msg_deterministic(msg),
padding.PKCS1v15(),
hashes.SHA256()
).hex()
def verify_protobuf_msg(msg, cert):
return cert.public_key().verify(
bytes.fromhex(msg.auth.sig),
serialize_msg_deterministic(msg),
padding.PKCS1v15(),
hashes.SHA256()
)

View File

@ -1,24 +0,0 @@
syntax = "proto3";
package skynet;
import "google/protobuf/struct.proto";
message Auth {
string cert = 1;
string sig = 2;
}
message SkynetRPCRequest {
string uid = 1;
string method = 2;
google.protobuf.Struct params = 3;
optional bytes bin = 4;
optional Auth auth = 5;
}
message SkynetRPCResponse {
google.protobuf.Struct result = 1;
optional bytes bin = 2;
optional Auth auth = 3;
}

View File

@ -1,30 +0,0 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: skynet.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cskynet.proto\x12\x06skynet\x1a\x1cgoogle/protobuf/struct.proto\"!\n\x04\x41uth\x12\x0c\n\x04\x63\x65rt\x18\x01 \x01(\t\x12\x0b\n\x03sig\x18\x02 \x01(\t\"\x9c\x01\n\x10SkynetRPCRequest\x12\x0b\n\x03uid\x18\x01 \x01(\t\x12\x0e\n\x06method\x18\x02 \x01(\t\x12\'\n\x06params\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x10\n\x03\x62in\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x12\x1f\n\x04\x61uth\x18\x05 \x01(\x0b\x32\x0c.skynet.AuthH\x01\x88\x01\x01\x42\x06\n\x04_binB\x07\n\x05_auth\"\x80\x01\n\x11SkynetRPCResponse\x12\'\n\x06result\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x10\n\x03\x62in\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x12\x1f\n\x04\x61uth\x18\x03 \x01(\x0b\x32\x0c.skynet.AuthH\x01\x88\x01\x01\x42\x06\n\x04_binB\x07\n\x05_authb\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'skynet_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_AUTH._serialized_start=54
_AUTH._serialized_end=87
_SKYNETRPCREQUEST._serialized_start=90
_SKYNETRPCREQUEST._serialized_end=246
_SKYNETRPCRESPONSE._serialized_start=249
_SKYNETRPCRESPONSE._serialized_end=377
# @@protoc_insertion_point(module_scope)

View File

@ -1,148 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Built-in (extension) types.
"""
import sys
import json
from typing import Optional, Union
from pprint import pformat
import msgspec
class Struct(msgspec.Struct):
'''
A "human friendlier" (aka repl buddy) struct subtype.
'''
def to_dict(self) -> dict:
return {
f: getattr(self, f)
for f in self.__struct_fields__
}
def __repr__(self):
# only turn on pprint when we detect a python REPL
# at runtime B)
if (
hasattr(sys, 'ps1')
# TODO: check if we're in pdb
):
return self.pformat()
return super().__repr__()
def pformat(self) -> str:
return f'Struct({pformat(self.to_dict())})'
def copy(
self,
update: Optional[dict] = None,
) -> msgspec.Struct:
'''
Validate-typecast all self defined fields, return a copy of us
with all such fields.
This is kinda like the default behaviour in `pydantic.BaseModel`.
'''
if update:
for k, v in update.items():
setattr(self, k, v)
# roundtrip serialize to validate
return msgspec.msgpack.Decoder(
type=type(self)
).decode(
msgspec.msgpack.Encoder().encode(self)
)
def typecast(
self,
# fields: Optional[list[str]] = None,
) -> None:
for fname, ftype in self.__annotations__.items():
setattr(self, fname, ftype(getattr(self, fname)))
# proto
from OpenSSL.crypto import PKey, X509, verify, sign
class AuthenticatedStruct(Struct, kw_only=True):
cert: Optional[str] = None
sig: Optional[str] = None
def to_unsigned_dict(self) -> dict:
self_dict = self.to_dict()
if 'sig' in self_dict:
del self_dict['sig']
if 'cert' in self_dict:
del self_dict['cert']
return self_dict
def unsigned_to_bytes(self) -> bytes:
return json.dumps(
self.to_unsigned_dict()).encode()
def sign(self, key: PKey, cert: str):
self.cert = cert
self.sig = sign(
key, self.unsigned_to_bytes(), 'sha256').hex()
def verify(self, cert: X509):
if not self.sig:
raise ValueError('Tried to verify unsigned request')
return verify(
cert, bytes.fromhex(self.sig), self.unsigned_to_bytes(), 'sha256')
class SkynetRPCRequest(AuthenticatedStruct):
uid: Union[str, int] # user unique id
method: str # rpc method name
params: dict # variable params
class SkynetRPCResponse(AuthenticatedStruct):
result: dict
class ImageGenRequest(Struct):
prompt: str
step: int
width: int
height: int
guidance: int
seed: Optional[int]
algo: str
upscaler: Optional[str]
class DGPUBusRequest(AuthenticatedStruct):
rid: str # req id
nid: str # node id
task: str
params: dict
class DGPUBusResponse(AuthenticatedStruct):
rid: str # req id
nid: str # node id
params: dict

94
skynet/utils.py 100644 → 100755
View File

@ -1,5 +1,7 @@
#!/usr/bin/python
import io
import os
import time
import random
@ -12,6 +14,9 @@ import numpy as np
from PIL import Image
from basicsr.archs.rrdbnet_arch import RRDBNet
from diffusers import (
DiffusionPipeline,
StableDiffusionXLPipeline,
StableDiffusionXLImg2ImgPipeline,
StableDiffusionPipeline,
StableDiffusionImg2ImgPipeline,
EulerAncestralDiscreteScheduler
@ -19,7 +24,7 @@ from diffusers import (
from realesrgan import RealESRGANer
from huggingface_hub import login
from .constants import ALGOS
from .constants import MODELS
def time_ms():
@ -36,28 +41,61 @@ def convert_from_image_to_cv2(img: Image) -> np.ndarray:
return np.asarray(img)
def pipeline_for(algo: str, mem_fraction: float = 1.0, image=False):
def convert_from_bytes_to_img(raw: bytes) -> Image:
return Image.open(io.BytesIO(raw))
def convert_from_img_to_bytes(image: Image, fmt='PNG') -> bytes:
byte_arr = io.BytesIO()
image.save(byte_arr, format=fmt)
return byte_arr.getvalue()
def convert_from_bytes_and_crop(raw: bytes, max_w: int, max_h: int) -> Image:
image = convert_from_bytes_to_img(raw)
w, h = image.size
if w > max_w or h > max_h:
image.thumbnail((512, 512))
return image.convert('RGB')
def pipeline_for(model: str, mem_fraction: float = 1.0, image=False) -> DiffusionPipeline:
assert torch.cuda.is_available()
torch.cuda.empty_cache()
torch.cuda.set_per_process_memory_fraction(mem_fraction)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
# full determinism
# https://huggingface.co/docs/diffusers/using-diffusers/reproducibility#deterministic-algorithms
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
params = {
'torch_dtype': torch.float16,
'safety_checker': None
}
if algo == 'stable':
if model == 'runwayml/stable-diffusion-v1-5':
params['revision'] = 'fp16'
if image:
pipe_class = StableDiffusionImg2ImgPipeline
if (model == 'stabilityai/stable-diffusion-xl-base-1.0' or
model == 'snowkidy/stable-diffusion-xl-base-0.9'):
if image:
pipe_class = StableDiffusionXLImg2ImgPipeline
else:
pipe_class = StableDiffusionXLPipeline
else:
pipe_class = StableDiffusionPipeline
if image:
pipe_class = StableDiffusionImg2ImgPipeline
else:
pipe_class = StableDiffusionPipeline
pipe = pipe_class.from_pretrained(
ALGOS[algo], **params)
model, **params)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
pipe.scheduler.config)
@ -70,7 +108,7 @@ def pipeline_for(algo: str, mem_fraction: float = 1.0, image=False):
def txt2img(
hf_token: str,
model: str = 'midj',
model: str = 'prompthero/openjourney',
prompt: str = 'a red old tractor in a sunny wheat field',
output: str = 'output.png',
width: int = 512, height: int = 512,
@ -102,7 +140,7 @@ def txt2img(
def img2img(
hf_token: str,
model: str = 'midj',
model: str = 'prompthero/openjourney',
prompt: str = 'a red old tractor in a sunny wheat field',
img_path: str = 'input.png',
output: str = 'output.png',
@ -120,7 +158,8 @@ def img2img(
login(token=hf_token)
pipe = pipeline_for(model, image=True)
input_img = Image.open(img_path).convert('RGB')
with open(img_path, 'rb') as img_file:
input_img = convert_from_bytes_and_crop(img_file.read(), 512, 512)
seed = seed if seed else random.randint(0, 2 ** 64)
prompt = prompt
@ -135,6 +174,22 @@ def img2img(
image.save(output)
def init_upscaler(model_path: str = 'weights/RealESRGAN_x4plus.pth'):
return RealESRGANer(
scale=4,
model_path=model_path,
dni_weight=None,
model=RRDBNet(
num_in_ch=3,
num_out_ch=3,
num_feat=64,
num_block=23,
num_grow_ch=32,
scale=4
),
half=True
)
def upscale(
img_path: str = 'input.png',
output: str = 'output.png',
@ -148,19 +203,7 @@ def upscale(
input_img = Image.open(img_path).convert('RGB')
upscaler = RealESRGANer(
scale=4,
model_path=model_path,
dni_weight=None,
model=RRDBNet(
num_in_ch=3,
num_out_ch=3,
num_feat=64,
num_block=23,
num_grow_ch=32,
scale=4
),
half=True)
upscaler = init_upscaler(model_path=model_path)
up_img, _ = upscaler.enhance(
convert_from_image_to_cv2(input_img), outscale=4)
@ -175,7 +218,8 @@ def download_all_models(hf_token: str):
assert torch.cuda.is_available()
login(token=hf_token)
for model in ALGOS:
for model in MODELS:
print(f'DOWNLOADING {model.upper()}')
pipeline_for(model)
print(f'DOWNLOADING IMAGE {model.upper()}')
pipeline_for(model, image=True)

View File

@ -1,71 +1,21 @@
#!/usr/bin/python
import os
import json
import time
import logging
from pathlib import Path
from functools import partial
import pytest
from docker.types import Mount, DeviceRequest
from skynet.db import open_new_database
from skynet.brain import run_skynet
from skynet.network import get_random_port
from skynet.constants import *
from skynet.nodeos import open_nodeos
@pytest.fixture(scope='session')
def postgres_db(dockerctl):
def postgres_db():
with open_new_database() as db_params:
yield db_params
@pytest.fixture
async def skynet_running():
async with run_skynet():
yield
@pytest.fixture
def dgpu_workers(request, dockerctl, skynet_running):
devices = [DeviceRequest(capabilities=[['gpu']])]
mounts = [Mount(
'/skynet', str(Path().resolve()), type='bind')]
num_containers, initial_algos = request.param
cmds = []
for i in range(num_containers):
dgpu_addr = f'tcp://127.0.0.1:{get_random_port()}'
cmd = f'''
pip install -e . && \
skynet run dgpu \
--algos=\'{json.dumps(initial_algos)}\' \
--uid=dgpu-{i} \
--dgpu={dgpu_addr}
'''
cmds.append(['bash', '-c', cmd])
logging.info(f'launching: \n{cmd}')
with dockerctl.run(
DOCKER_RUNTIME_CUDA,
name='skynet-test-runtime-cuda',
commands=cmds,
environment={
'HF_HOME': '/skynet/hf_home'
},
network='host',
mounts=mounts,
device_requests=devices,
num=num_containers,
) as containers:
yield containers
for i, container in enumerate(containers):
logging.info(f'container {i} logs:')
logging.info(container.logs().decode())
@pytest.fixture(scope='session')
def cleos():
with open_nodeos() as cli:
yield cli

View File

@ -0,0 +1,416 @@
{
"____comment": "This file was generated with eosio-abigen. DO NOT EDIT ",
"version": "eosio::abi/1.2",
"types": [],
"structs": [
{
"name": "account",
"base": "",
"fields": [
{
"name": "user",
"type": "name"
},
{
"name": "balance",
"type": "asset"
},
{
"name": "nonce",
"type": "uint64"
}
]
},
{
"name": "card",
"base": "",
"fields": [
{
"name": "id",
"type": "uint64"
},
{
"name": "owner",
"type": "name"
},
{
"name": "card_name",
"type": "string"
},
{
"name": "version",
"type": "string"
},
{
"name": "total_memory",
"type": "uint64"
},
{
"name": "mp_count",
"type": "uint32"
},
{
"name": "extra",
"type": "string"
}
]
},
{
"name": "clean",
"base": "",
"fields": []
},
{
"name": "config",
"base": "",
"fields": [
{
"name": "token_contract",
"type": "name"
},
{
"name": "token_symbol",
"type": "symbol"
}
]
},
{
"name": "dequeue",
"base": "",
"fields": [
{
"name": "user",
"type": "name"
},
{
"name": "request_id",
"type": "uint64"
}
]
},
{
"name": "enqueue",
"base": "",
"fields": [
{
"name": "user",
"type": "name"
},
{
"name": "request_body",
"type": "string"
},
{
"name": "binary_data",
"type": "string"
},
{
"name": "reward",
"type": "asset"
},
{
"name": "min_verification",
"type": "uint32"
}
]
},
{
"name": "global_configuration_struct",
"base": "",
"fields": [
{
"name": "token_contract",
"type": "name"
},
{
"name": "token_symbol",
"type": "symbol"
}
]
},
{
"name": "submit",
"base": "",
"fields": [
{
"name": "worker",
"type": "name"
},
{
"name": "request_id",
"type": "uint64"
},
{
"name": "request_hash",
"type": "checksum256"
},
{
"name": "result_hash",
"type": "checksum256"
},
{
"name": "ipfs_hash",
"type": "string"
}
]
},
{
"name": "withdraw",
"base": "",
"fields": [
{
"name": "user",
"type": "name"
},
{
"name": "quantity",
"type": "asset"
}
]
},
{
"name": "work_request_struct",
"base": "",
"fields": [
{
"name": "id",
"type": "uint64"
},
{
"name": "user",
"type": "name"
},
{
"name": "reward",
"type": "asset"
},
{
"name": "min_verification",
"type": "uint32"
},
{
"name": "nonce",
"type": "uint64"
},
{
"name": "body",
"type": "string"
},
{
"name": "binary_data",
"type": "string"
},
{
"name": "timestamp",
"type": "time_point_sec"
}
]
},
{
"name": "work_result_struct",
"base": "",
"fields": [
{
"name": "id",
"type": "uint64"
},
{
"name": "request_id",
"type": "uint64"
},
{
"name": "user",
"type": "name"
},
{
"name": "worker",
"type": "name"
},
{
"name": "result_hash",
"type": "checksum256"
},
{
"name": "ipfs_hash",
"type": "string"
},
{
"name": "submited",
"type": "time_point_sec"
}
]
},
{
"name": "workbegin",
"base": "",
"fields": [
{
"name": "worker",
"type": "name"
},
{
"name": "request_id",
"type": "uint64"
},
{
"name": "max_workers",
"type": "uint32"
}
]
},
{
"name": "workcancel",
"base": "",
"fields": [
{
"name": "worker",
"type": "name"
},
{
"name": "request_id",
"type": "uint64"
},
{
"name": "reason",
"type": "string"
}
]
},
{
"name": "worker",
"base": "",
"fields": [
{
"name": "account",
"type": "name"
},
{
"name": "joined",
"type": "time_point_sec"
},
{
"name": "left",
"type": "time_point_sec"
},
{
"name": "url",
"type": "string"
}
]
},
{
"name": "worker_status_struct",
"base": "",
"fields": [
{
"name": "worker",
"type": "name"
},
{
"name": "status",
"type": "string"
},
{
"name": "started",
"type": "time_point_sec"
}
]
}
],
"actions": [
{
"name": "clean",
"type": "clean",
"ricardian_contract": ""
},
{
"name": "config",
"type": "config",
"ricardian_contract": ""
},
{
"name": "dequeue",
"type": "dequeue",
"ricardian_contract": ""
},
{
"name": "enqueue",
"type": "enqueue",
"ricardian_contract": ""
},
{
"name": "submit",
"type": "submit",
"ricardian_contract": ""
},
{
"name": "withdraw",
"type": "withdraw",
"ricardian_contract": ""
},
{
"name": "workbegin",
"type": "workbegin",
"ricardian_contract": ""
},
{
"name": "workcancel",
"type": "workcancel",
"ricardian_contract": ""
}
],
"tables": [
{
"name": "cards",
"type": "card",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "config",
"type": "global_configuration_struct",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "queue",
"type": "work_request_struct",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "results",
"type": "work_result_struct",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "status",
"type": "worker_status_struct",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "users",
"type": "account",
"index_type": "i64",
"key_names": [],
"key_types": []
},
{
"name": "workers",
"type": "worker",
"index_type": "i64",
"key_names": [],
"key_types": []
}
],
"ricardian_clauses": [],
"variants": [],
"action_results": []
}

Binary file not shown.

View File

@ -0,0 +1,106 @@
#!/usr/bin/env python3
import time
import json
from hashlib import sha256
from functools import partial
import trio
import requests
from skynet.constants import DEFAULT_IPFS_REMOTE
from skynet.dgpu import open_dgpu_node
from leap.sugar import collect_stdout
def test_enqueue_work(cleos):
user = 'telegram'
req = json.dumps({
'method': 'diffuse',
'params': {
'algo': 'midj',
'prompt': 'skynet terminator dystopic',
'width': 512,
'height': 512,
'guidance': 10,
'step': 28,
'seed': 420,
'upscaler': 'x4'
}
})
binary = ''
ec, out = cleos.push_action(
'telos.gpu', 'enqueue', [user, req, binary, '20.0000 GPU', 1], f'{user}@active'
)
assert ec == 0
queue = cleos.get_table('telos.gpu', 'telos.gpu', 'queue')
assert len(queue) == 1
req_on_chain = queue[0]
assert req_on_chain['user'] == user
assert req_on_chain['body'] == req
assert req_on_chain['binary_data'] == binary
trio.run(
partial(
open_dgpu_node,
f'testworker1',
'active',
cleos,
DEFAULT_IPFS_REMOTE,
cleos.private_keys['testworker1'],
initial_algos=['midj']
)
)
queue = cleos.get_table('telos.gpu', 'telos.gpu', 'queue')
assert len(queue) == 0
def test_enqueue_dequeue(cleos):
user = 'telegram'
req = json.dumps({
'method': 'diffuse',
'params': {
'algo': 'midj',
'prompt': 'skynet terminator dystopic',
'width': 512,
'height': 512,
'guidance': 10,
'step': 28,
'seed': 420,
'upscaler': 'x4'
}
})
binary = ''
ec, out = cleos.push_action(
'telos.gpu', 'enqueue', [user, req, binary, '20.0000 GPU', 1], f'{user}@active'
)
assert ec == 0
request_id, _ = collect_stdout(out).split(':')
request_id = int(request_id)
queue = cleos.get_table('telos.gpu', 'telos.gpu', 'queue')
assert len(queue) == 1
ec, out = cleos.push_action(
'telos.gpu', 'dequeue', [user, request_id], f'{user}@active'
)
assert ec == 0
queue = cleos.get_table('telos.gpu', 'telos.gpu', 'queue')
assert len(queue) == 0

View File

@ -1,389 +0,0 @@
#!/usr/bin/python
import io
import time
import json
import zlib
import logging
from typing import Optional
from hashlib import sha256
from functools import partial
import trio
import pytest
from PIL import Image
from google.protobuf.json_format import MessageToDict
from skynet.brain import SkynetDGPUComputeError
from skynet.network import get_random_port, SessionServer
from skynet.protobuf import SkynetRPCResponse
from skynet.frontend import open_skynet_rpc
from skynet.constants import *
async def wait_for_dgpus(session, amount: int, timeout: float = 30.0):
gpu_ready = False
with trio.fail_after(timeout):
while not gpu_ready:
res = await session.rpc('dgpu_workers')
if res.result['ok'] >= amount:
break
await trio.sleep(1)
_images = set()
async def check_request_img(
i: int,
uid: str = '1',
width: int = 512,
height: int = 512,
expect_unique = True,
upscaler: Optional[str] = None
):
global _images
with open_skynet_rpc(
uid,
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
res = await session.rpc(
'dgpu_call', {
'method': 'diffuse',
'params': {
'prompt': 'red old tractor in a sunny wheat field',
'step': 28,
'width': width, 'height': height,
'guidance': 7.5,
'seed': None,
'algo': list(ALGOS.keys())[i],
'upscaler': upscaler
}
},
timeout=60
)
if 'error' in res.result:
raise SkynetDGPUComputeError(MessageToDict(res.result))
img_raw = res.bin
img_sha = sha256(img_raw).hexdigest()
img = Image.open(io.BytesIO(img_raw))
if expect_unique and img_sha in _images:
raise ValueError('Duplicated image sha: {img_sha}')
_images.add(img_sha)
logging.info(f'img sha256: {img_sha} size: {len(img_raw)}')
assert len(img_raw) > 100000
return img
@pytest.mark.parametrize(
'dgpu_workers', [(1, ['midj'])], indirect=True)
async def test_dgpu_worker_compute_error(dgpu_workers):
'''Attempt to generate a huge image and check we get the right error,
then generate a smaller image to show gpu worker recovery
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 1)
with pytest.raises(SkynetDGPUComputeError) as e:
await check_request_img(0, width=4096, height=4096)
logging.info(e)
await check_request_img(0)
@pytest.mark.parametrize(
'dgpu_workers', [(1, ['midj'])], indirect=True)
async def test_dgpu_worker(dgpu_workers):
'''Generate one image in a single dgpu worker
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 1)
await check_request_img(0)
@pytest.mark.parametrize(
'dgpu_workers', [(1, ['midj', 'stable'])], indirect=True)
async def test_dgpu_worker_two_models(dgpu_workers):
'''Generate two images in a single dgpu worker using
two different models.
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 1)
await check_request_img(0)
await check_request_img(1)
@pytest.mark.parametrize(
'dgpu_workers', [(1, ['midj'])], indirect=True)
async def test_dgpu_worker_upscale(dgpu_workers):
'''Generate two images in a single dgpu worker using
two different models.
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 1)
img = await check_request_img(0, upscaler='x4')
assert img.size == (2048, 2048)
@pytest.mark.parametrize(
'dgpu_workers', [(2, ['midj'])], indirect=True)
async def test_dgpu_workers_two(dgpu_workers):
'''Generate two images in two separate dgpu workers
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 2, timeout=60)
async with trio.open_nursery() as n:
n.start_soon(check_request_img, 0)
n.start_soon(check_request_img, 0)
@pytest.mark.parametrize(
'dgpu_workers', [(1, ['midj'])], indirect=True)
async def test_dgpu_worker_algo_swap(dgpu_workers):
'''Generate an image using a non default model
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 1)
await check_request_img(5)
@pytest.mark.parametrize(
'dgpu_workers', [(3, ['midj'])], indirect=True)
async def test_dgpu_rotation_next_worker(dgpu_workers):
'''Connect three dgpu workers, disconnect and check next_worker
rotation happens correctly
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 3)
res = await session.rpc('dgpu_next')
assert 'ok' in res.result
assert res.result['ok'] == 0
await check_request_img(0)
res = await session.rpc('dgpu_next')
assert 'ok' in res.result
assert res.result['ok'] == 1
await check_request_img(0)
res = await session.rpc('dgpu_next')
assert 'ok' in res.result
assert res.result['ok'] == 2
await check_request_img(0)
res = await session.rpc('dgpu_next')
assert 'ok' in res.result
assert res.result['ok'] == 0
@pytest.mark.parametrize(
'dgpu_workers', [(3, ['midj'])], indirect=True)
async def test_dgpu_rotation_next_worker_disconnect(dgpu_workers):
'''Connect three dgpu workers, disconnect the first one and check
next_worker rotation happens correctly
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 3)
await trio.sleep(3)
# stop worker who's turn is next
for _ in range(2):
ec, out = dgpu_workers[0].exec_run(['pkill', '-INT', '-f', 'skynet'])
assert ec == 0
dgpu_workers[0].wait()
res = await session.rpc('dgpu_workers')
assert 'ok' in res.result
assert res.result['ok'] == 2
async with trio.open_nursery() as n:
n.start_soon(check_request_img, 0)
n.start_soon(check_request_img, 0)
async def test_dgpu_no_ack_node_disconnect(skynet_running):
'''Mock a node that connects, gets a request but fails to
acknowledge it, then check skynet correctly drops the node
'''
async def mock_rpc(req, ctx):
resp = SkynetRPCResponse()
resp.result.update({'error': 'can\'t do it mate'})
return resp
dgpu_addr = f'tcp://127.0.0.1:{get_random_port()}'
mock_server = SessionServer(
dgpu_addr,
mock_rpc,
cert_name='whitelist/testing.cert',
key_name='testing.key'
)
async with mock_server.open():
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
res = await session.rpc('dgpu_online', {
'dgpu_addr': dgpu_addr,
'cert': 'whitelist/testing.cert'
})
assert 'ok' in res.result
await wait_for_dgpus(session, 1)
with pytest.raises(SkynetDGPUComputeError) as e:
await check_request_img(0)
assert 'can\'t do it mate' in str(e.value)
res = await session.rpc('dgpu_workers')
assert 'ok' in res.result
assert res.result['ok'] == 0
@pytest.mark.parametrize(
'dgpu_workers', [(1, ['midj'])], indirect=True)
async def test_dgpu_timeout_while_processing(dgpu_workers):
'''Stop node while processing request to cause timeout and
then check skynet correctly drops the node.
'''
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 1)
async def check_request_img_raises():
with pytest.raises(SkynetDGPUComputeError) as e:
await check_request_img(0)
assert 'timeout while processing request' in str(e)
async with trio.open_nursery() as n:
n.start_soon(check_request_img_raises)
await trio.sleep(1)
ec, out = dgpu_workers[0].exec_run(
['pkill', '-TERM', '-f', 'skynet'])
assert ec == 0
@pytest.mark.parametrize(
'dgpu_workers', [(1, ['midj'])], indirect=True)
async def test_dgpu_img2img(dgpu_workers):
with open_skynet_rpc(
'test-ctx',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
await wait_for_dgpus(session, 1)
await trio.sleep(2)
res = await session.rpc(
'dgpu_call', {
'method': 'diffuse',
'params': {
'prompt': 'red old tractor in a sunny wheat field',
'step': 28,
'width': 512, 'height': 512,
'guidance': 7.5,
'seed': None,
'algo': list(ALGOS.keys())[0],
'upscaler': None
}
},
timeout=60
)
if 'error' in res.result:
raise SkynetDGPUComputeError(MessageToDict(res.result))
img_raw = res.bin
img = Image.open(io.BytesIO(img_raw))
img.save('txt2img.png')
res = await session.rpc(
'dgpu_call', {
'method': 'diffuse',
'params': {
'prompt': 'red ferrari in a sunny wheat field',
'step': 28,
'guidance': 8,
'strength': 0.7,
'seed': None,
'algo': list(ALGOS.keys())[0],
'upscaler': 'x4'
}
},
binext=img_raw,
timeout=60
)
if 'error' in res.result:
raise SkynetDGPUComputeError(MessageToDict(res.result))
img_raw = res.bin
img = Image.open(io.BytesIO(img_raw))
img.save('img2img.png')

View File

@ -1,86 +0,0 @@
#!/usr/bin/python
import logging
import trio
import pynng
import pytest
import trio_asyncio
from skynet.brain import run_skynet
from skynet.structs import *
from skynet.network import SessionServer
from skynet.frontend import open_skynet_rpc
async def test_skynet(skynet_running):
...
async def test_skynet_attempt_insecure(skynet_running):
with pytest.raises(pynng.exceptions.NNGException) as e:
with open_skynet_rpc('bad-actor') as session:
with trio.fail_after(5):
await session.rpc('skynet_shutdown')
async def test_skynet_dgpu_connection_simple(skynet_running):
async def rpc_handler(req, ctx):
...
fake_dgpu_addr = 'tcp://127.0.0.1:41001'
rpc_server = SessionServer(
fake_dgpu_addr,
rpc_handler,
cert_name='whitelist/testing.cert',
key_name='testing.key'
)
with open_skynet_rpc(
'dgpu-0',
cert_name='whitelist/testing.cert',
key_name='testing.key'
) as session:
# check 0 nodes are connected
res = await session.rpc('dgpu_workers')
assert 'ok' in res.result.keys()
assert res.result['ok'] == 0
# check next worker is None
res = await session.rpc('dgpu_next')
assert 'ok' in res.result.keys()
assert res.result['ok'] == None
async with rpc_server.open() as rpc_server:
# connect 1 dgpu
res = await session.rpc(
'dgpu_online', {
'dgpu_addr': fake_dgpu_addr,
'cert': 'whitelist/testing.cert'
})
assert 'ok' in res.result.keys()
# check 1 node is connected
res = await session.rpc('dgpu_workers')
assert 'ok' in res.result.keys()
assert res.result['ok'] == 1
# check next worker is 0
res = await session.rpc('dgpu_next')
assert 'ok' in res.result.keys()
assert res.result['ok'] == 0
# disconnect 1 dgpu
res = await session.rpc('dgpu_offline')
assert 'ok' in res.result.keys()
# check 0 nodes are connected
res = await session.rpc('dgpu_workers')
assert 'ok' in res.result.keys()
assert res.result['ok'] == 0
# check next worker is None
res = await session.rpc('dgpu_next')
assert 'ok' in res.result.keys()
assert res.result['ok'] == None

View File

@ -1,28 +0,0 @@
#!/usr/bin/python
import trio
from functools import partial
from skynet.db import open_new_database
from skynet.brain import run_skynet
from skynet.config import load_skynet_ini
from skynet.frontend.telegram import run_skynet_telegram
if __name__ == '__main__':
'''You will need a telegram bot token configured on skynet.ini for this
'''
with open_new_database() as db_params:
db_container, db_pass, db_host = db_params
config = load_skynet_ini()
async def main():
await run_skynet_telegram(
'telegram-test',
config['skynet.telegram-test']['token'],
db_host=db_host,
db_pass=db_pass
)
trio.run(main)