mirror of https://github.com/skygpu/skynet.git
commit
b5f52b3b5b
|
@ -29,9 +29,6 @@ poetry shell
|
||||||
# test you can run this command
|
# test you can run this command
|
||||||
skynet --help
|
skynet --help
|
||||||
|
|
||||||
# launch ipfs node
|
|
||||||
skynet run ipfs
|
|
||||||
|
|
||||||
# to launch worker
|
# to launch worker
|
||||||
skynet run dgpu
|
skynet run dgpu
|
||||||
|
|
||||||
|
@ -77,9 +74,6 @@ docker pull guilledk/skynet:runtime-cuda
|
||||||
# or build it (takes a bit of time)
|
# or build it (takes a bit of time)
|
||||||
./build_docker.sh
|
./build_docker.sh
|
||||||
|
|
||||||
# launch simple ipfs node
|
|
||||||
./launch_ipfs.sh
|
|
||||||
|
|
||||||
# run worker with all gpus
|
# run worker with all gpus
|
||||||
docker run \
|
docker run \
|
||||||
-it \
|
-it \
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
from nvidia/cuda:12.4.1-devel-ubuntu22.04
|
||||||
|
from python:3.12
|
||||||
|
|
||||||
|
env DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
run apt-get update && apt-get install -y \
|
||||||
|
git \
|
||||||
|
llvm \
|
||||||
|
ffmpeg \
|
||||||
|
libsm6 \
|
||||||
|
libxext6 \
|
||||||
|
ninja-build
|
||||||
|
|
||||||
|
# env CC /usr/bin/clang
|
||||||
|
# env CXX /usr/bin/clang++
|
||||||
|
#
|
||||||
|
# # install llvm10 as required by llvm-lite
|
||||||
|
# run git clone https://github.com/llvm/llvm-project.git -b llvmorg-10.0.1
|
||||||
|
# workdir /llvm-project
|
||||||
|
# # this adds a commit from 12.0.0 that fixes build on newer compilers
|
||||||
|
# run git cherry-pick -n b498303066a63a203d24f739b2d2e0e56dca70d1
|
||||||
|
# run cmake -S llvm -B build -G Ninja -DCMAKE_BUILD_TYPE=Release
|
||||||
|
# run ninja -C build install # -j8
|
||||||
|
|
||||||
|
run curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
|
env PATH "/root/.local/bin:$PATH"
|
||||||
|
|
||||||
|
copy . /skynet
|
||||||
|
|
||||||
|
workdir /skynet
|
||||||
|
|
||||||
|
env POETRY_VIRTUALENVS_PATH /skynet/.venv
|
||||||
|
|
||||||
|
run poetry install --with=cuda -v
|
||||||
|
|
||||||
|
workdir /root/target
|
||||||
|
|
||||||
|
env PYTORCH_CUDA_ALLOC_CONF max_split_size_mb:128
|
||||||
|
env NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
|
||||||
|
copy docker/entrypoint.sh /entrypoint.sh
|
||||||
|
entrypoint ["/entrypoint.sh"]
|
||||||
|
|
||||||
|
cmd ["skynet", "--help"]
|
|
@ -1,20 +1,7 @@
|
||||||
|
|
||||||
docker build \
|
docker build \
|
||||||
-t guilledk/skynet:runtime \
|
-t guilledk/skynet:runtime-cuda-py312 \
|
||||||
-f docker/Dockerfile.runtime .
|
-f docker/Dockerfile.runtime+cuda-py312 .
|
||||||
|
|
||||||
docker build \
|
# docker build \
|
||||||
-t guilledk/skynet:runtime-frontend \
|
# -t guilledk/skynet:runtime-cuda \
|
||||||
-f docker/Dockerfile.runtime+frontend .
|
# -f docker/Dockerfile.runtime+cuda-py311 .
|
||||||
|
|
||||||
docker build \
|
|
||||||
-t guilledk/skynet:runtime-cuda-py311 \
|
|
||||||
-f docker/Dockerfile.runtime+cuda-py311 .
|
|
||||||
|
|
||||||
docker build \
|
|
||||||
-t guilledk/skynet:runtime-cuda \
|
|
||||||
-f docker/Dockerfile.runtime+cuda-py311 .
|
|
||||||
|
|
||||||
docker build \
|
|
||||||
-t guilledk/skynet:runtime-cuda-py310 \
|
|
||||||
-f docker/Dockerfile.runtime+cuda-py310 .
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,20 +1,25 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = 'skynet'
|
name = 'skynet'
|
||||||
version = '0.1a12'
|
version = '0.1a13'
|
||||||
description = 'Decentralized compute platform'
|
description = 'Decentralized compute platform'
|
||||||
authors = ['Guillermo Rodriguez <guillermo@telos.net>']
|
authors = ['Guillermo Rodriguez <guillermo@telos.net>']
|
||||||
license = 'AGPL'
|
license = 'AGPL'
|
||||||
readme = 'README.md'
|
readme = 'README.md'
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = '>=3.10,<3.12'
|
python = '>=3.10,<3.13'
|
||||||
pytz = '^2023.3.post1'
|
pytz = '^2023.3.post1'
|
||||||
trio = '^0.22.2'
|
trio = '^0.22.2'
|
||||||
asks = '^3.0.0'
|
|
||||||
Pillow = '^10.0.1'
|
Pillow = '^10.0.1'
|
||||||
docker = '^6.1.3'
|
docker = '^6.1.3'
|
||||||
py-leap = {git = 'https://github.com/guilledk/py-leap.git', rev = 'v0.1a14'}
|
py-leap = {git = 'https://github.com/guilledk/py-leap.git', rev = 'v0.1a32'}
|
||||||
toml = '^0.10.2'
|
toml = '^0.10.2'
|
||||||
|
msgspec = "^0.19.0"
|
||||||
|
numpy = "<2.1"
|
||||||
|
protobuf = "^5.29.3"
|
||||||
|
zstandard = "^0.23.0"
|
||||||
|
click = "^8.1.8"
|
||||||
|
httpx = "^0.28.1"
|
||||||
|
|
||||||
[tool.poetry.group.frontend]
|
[tool.poetry.group.frontend]
|
||||||
optional = true
|
optional = true
|
||||||
|
@ -32,31 +37,33 @@ optional = true
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
pdbpp = {version = '^0.10.3'}
|
pdbpp = {version = '^0.10.3'}
|
||||||
pytest = {version = '^7.4.2'}
|
pytest = {version = '^7.4.2'}
|
||||||
|
pytest-trio = "^0.8.0"
|
||||||
|
|
||||||
[tool.poetry.group.cuda]
|
[tool.poetry.group.cuda]
|
||||||
optional = true
|
optional = true
|
||||||
|
|
||||||
[tool.poetry.group.cuda.dependencies]
|
[tool.poetry.group.cuda.dependencies]
|
||||||
torch = {version = '2.0.1+cu118', source = 'torch'}
|
torch = {version = '2.5.1+cu121', source = 'torch'}
|
||||||
scipy = {version = '^1.11.2'}
|
scipy = {version = '1.15.1'}
|
||||||
numba = {version = '0.57.0'}
|
numba = {version = '0.60.0'}
|
||||||
quart = {version = '^0.19.3'}
|
quart = {version = '^0.19.3'}
|
||||||
triton = {version = '2.0.0', source = 'torch'}
|
triton = {version = '3.1.0', source = 'torch'}
|
||||||
basicsr = {version = '^1.4.2'}
|
xformers = {version = '^0.0.29'}
|
||||||
xformers = {version = '^0.0.22'}
|
|
||||||
hypercorn = {version = '^0.14.4'}
|
hypercorn = {version = '^0.14.4'}
|
||||||
diffusers = {version = '^0.21.2'}
|
diffusers = {version = '0.32.1'}
|
||||||
realesrgan = {version = '^0.3.0'}
|
|
||||||
quart-trio = {version = '^0.11.0'}
|
quart-trio = {version = '^0.11.0'}
|
||||||
torchvision = {version = '0.15.2+cu118', source = 'torch'}
|
torchvision = {version = '0.20.1+cu121', source = 'torch'}
|
||||||
accelerate = {version = '^0.23.0'}
|
accelerate = {version = '0.34.0'}
|
||||||
transformers = {version = '^4.33.2'}
|
transformers = {version = '4.48.0'}
|
||||||
huggingface-hub = {version = '^0.17.3'}
|
huggingface-hub = {version = '^0.27.1'}
|
||||||
invisible-watermark = {version = '^0.2.0'}
|
invisible-watermark = {version = '^0.2.0'}
|
||||||
|
bitsandbytes = "^0.45.0"
|
||||||
|
basicsr = "^1.4.2"
|
||||||
|
realesrgan = "^0.3.0"
|
||||||
|
|
||||||
[[tool.poetry.source]]
|
[[tool.poetry.source]]
|
||||||
name = 'torch'
|
name = 'torch'
|
||||||
url = 'https://download.pytorch.org/whl/cu118'
|
url = 'https://download.pytorch.org/whl/cu121'
|
||||||
priority = 'explicit'
|
priority = 'explicit'
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
|
@ -65,3 +72,7 @@ build-backend = 'poetry.core.masonry.api'
|
||||||
|
|
||||||
[tool.poetry.scripts]
|
[tool.poetry.scripts]
|
||||||
skynet = 'skynet.cli:skynet'
|
skynet = 'skynet.cli:skynet'
|
||||||
|
txt2img = 'skynet.cli:txt2img'
|
||||||
|
img2img = 'skynet.cli:img2img'
|
||||||
|
upscale = 'skynet.cli:upscale'
|
||||||
|
inpaint = 'skynet.cli:inpaint'
|
||||||
|
|
|
@ -8,7 +8,7 @@ from functools import partial
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
from leap.sugar import Name, asset_from_str
|
from leap.protocol import Name, Asset
|
||||||
|
|
||||||
from .config import *
|
from .config import *
|
||||||
from .constants import *
|
from .constants import *
|
||||||
|
@ -20,7 +20,7 @@ def skynet(*args, **kwargs):
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--model', '-m', default='midj')
|
@click.option('--model', '-m', default=list(MODELS.keys())[-1])
|
||||||
@click.option(
|
@click.option(
|
||||||
'--prompt', '-p', default='a red old tractor in a sunny wheat field')
|
'--prompt', '-p', default='a red old tractor in a sunny wheat field')
|
||||||
@click.option('--output', '-o', default='output.png')
|
@click.option('--output', '-o', default='output.png')
|
||||||
|
@ -39,7 +39,7 @@ def txt2img(*args, **kwargs):
|
||||||
utils.txt2img(hf_token, **kwargs)
|
utils.txt2img(hf_token, **kwargs)
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--model', '-m', default=list(MODELS.keys())[0])
|
@click.option('--model', '-m', default=list(MODELS.keys())[-2])
|
||||||
@click.option(
|
@click.option(
|
||||||
'--prompt', '-p', default='a red old tractor in a sunny wheat field')
|
'--prompt', '-p', default='a red old tractor in a sunny wheat field')
|
||||||
@click.option('--input', '-i', default='input.png')
|
@click.option('--input', '-i', default='input.png')
|
||||||
|
@ -66,6 +66,37 @@ def img2img(model, prompt, input, output, strength, guidance, steps, seed):
|
||||||
seed=seed
|
seed=seed
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option('--model', '-m', default=list(MODELS.keys())[-3])
|
||||||
|
@click.option(
|
||||||
|
'--prompt', '-p', default='a red old tractor in a sunny wheat field')
|
||||||
|
@click.option('--input', '-i', default='input.png')
|
||||||
|
@click.option('--mask', '-M', default='mask.png')
|
||||||
|
@click.option('--output', '-o', default='output.png')
|
||||||
|
@click.option('--strength', '-Z', default=1.0)
|
||||||
|
@click.option('--guidance', '-g', default=10.0)
|
||||||
|
@click.option('--steps', '-s', default=26)
|
||||||
|
@click.option('--seed', '-S', default=None)
|
||||||
|
def inpaint(model, prompt, input, mask, output, strength, guidance, steps, seed):
|
||||||
|
from . import utils
|
||||||
|
config = load_skynet_toml()
|
||||||
|
hf_token = load_key(config, 'skynet.dgpu.hf_token')
|
||||||
|
hf_home = load_key(config, 'skynet.dgpu.hf_home')
|
||||||
|
set_hf_vars(hf_token, hf_home)
|
||||||
|
utils.inpaint(
|
||||||
|
hf_token,
|
||||||
|
model=model,
|
||||||
|
prompt=prompt,
|
||||||
|
img_path=input,
|
||||||
|
mask_path=mask,
|
||||||
|
output=output,
|
||||||
|
strength=strength,
|
||||||
|
guidance=guidance,
|
||||||
|
steps=steps,
|
||||||
|
seed=seed
|
||||||
|
)
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--input', '-i', default='input.png')
|
@click.option('--input', '-i', default='input.png')
|
||||||
@click.option('--output', '-o', default='output.png')
|
@click.option('--output', '-o', default='output.png')
|
||||||
|
@ -147,7 +178,7 @@ def enqueue(
|
||||||
'user': Name(account),
|
'user': Name(account),
|
||||||
'request_body': req,
|
'request_body': req,
|
||||||
'binary_data': binary,
|
'binary_data': binary,
|
||||||
'reward': asset_from_str(reward),
|
'reward': Asset.from_str(reward),
|
||||||
'min_verification': 1
|
'min_verification': 1
|
||||||
},
|
},
|
||||||
account, key, permission,
|
account, key, permission,
|
||||||
|
|
|
@ -4,31 +4,120 @@ VERSION = '0.1a12'
|
||||||
|
|
||||||
DOCKER_RUNTIME_CUDA = 'skynet:runtime-cuda'
|
DOCKER_RUNTIME_CUDA = 'skynet:runtime-cuda'
|
||||||
|
|
||||||
MODELS = {
|
import msgspec
|
||||||
'prompthero/openjourney': {'short': 'midj', 'mem': 6},
|
from typing import Literal
|
||||||
'runwayml/stable-diffusion-v1-5': {'short': 'stable', 'mem': 6},
|
|
||||||
'stabilityai/stable-diffusion-2-1-base': {'short': 'stable2', 'mem': 6},
|
|
||||||
'snowkidy/stable-diffusion-xl-base-0.9': {'short': 'stablexl0.9', 'mem': 8.3},
|
|
||||||
'Linaqruf/anything-v3.0': {'short': 'hdanime', 'mem': 6},
|
|
||||||
'hakurei/waifu-diffusion': {'short': 'waifu', 'mem': 6},
|
|
||||||
'nitrosocke/Ghibli-Diffusion': {'short': 'ghibli', 'mem': 6},
|
|
||||||
'dallinmackay/Van-Gogh-diffusion': {'short': 'van-gogh', 'mem': 6},
|
|
||||||
'lambdalabs/sd-pokemon-diffusers': {'short': 'pokemon', 'mem': 6},
|
|
||||||
'Envvi/Inkpunk-Diffusion': {'short': 'ink', 'mem': 6},
|
|
||||||
'nousr/robo-diffusion': {'short': 'robot', 'mem': 6},
|
|
||||||
|
|
||||||
# default is always last
|
class Size(msgspec.Struct):
|
||||||
'stabilityai/stable-diffusion-xl-base-1.0': {'short': 'stablexl', 'mem': 8.3},
|
w: int
|
||||||
|
h: int
|
||||||
|
|
||||||
|
class ModelDesc(msgspec.Struct):
|
||||||
|
short: str
|
||||||
|
mem: float
|
||||||
|
size: Size
|
||||||
|
tags: list[Literal['txt2img', 'img2img', 'inpaint']]
|
||||||
|
|
||||||
|
MODELS: dict[str, ModelDesc] = {
|
||||||
|
'runwayml/stable-diffusion-v1-5': ModelDesc(
|
||||||
|
short='stable',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'stabilityai/stable-diffusion-2-1-base': ModelDesc(
|
||||||
|
short='stable2',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'snowkidy/stable-diffusion-xl-base-0.9': ModelDesc(
|
||||||
|
short='stablexl0.9',
|
||||||
|
mem=8.3,
|
||||||
|
size=Size(w=1024, h=1024),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'Linaqruf/anything-v3.0': ModelDesc(
|
||||||
|
short='hdanime',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'hakurei/waifu-diffusion': ModelDesc(
|
||||||
|
short='waifu',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'nitrosocke/Ghibli-Diffusion': ModelDesc(
|
||||||
|
short='ghibli',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'dallinmackay/Van-Gogh-diffusion': ModelDesc(
|
||||||
|
short='van-gogh',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'lambdalabs/sd-pokemon-diffusers': ModelDesc(
|
||||||
|
short='pokemon',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'Envvi/Inkpunk-Diffusion': ModelDesc(
|
||||||
|
short='ink',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'nousr/robo-diffusion': ModelDesc(
|
||||||
|
short='robot',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'black-forest-labs/FLUX.1-schnell': ModelDesc(
|
||||||
|
short='flux',
|
||||||
|
mem=24,
|
||||||
|
size=Size(w=1024, h=1024),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
|
'black-forest-labs/FLUX.1-Fill-dev': ModelDesc(
|
||||||
|
short='flux-inpaint',
|
||||||
|
mem=24,
|
||||||
|
size=Size(w=1024, h=1024),
|
||||||
|
tags=['inpaint']
|
||||||
|
),
|
||||||
|
'diffusers/stable-diffusion-xl-1.0-inpainting-0.1': ModelDesc(
|
||||||
|
short='stablexl-inpaint',
|
||||||
|
mem=8.3,
|
||||||
|
size=Size(w=1024, h=1024),
|
||||||
|
tags=['inpaint']
|
||||||
|
),
|
||||||
|
'prompthero/openjourney': ModelDesc(
|
||||||
|
short='midj',
|
||||||
|
mem=6,
|
||||||
|
size=Size(w=512, h=512),
|
||||||
|
tags=['txt2img', 'img2img']
|
||||||
|
),
|
||||||
|
'stabilityai/stable-diffusion-xl-base-1.0': ModelDesc(
|
||||||
|
short='stablexl',
|
||||||
|
mem=8.3,
|
||||||
|
size=Size(w=1024, h=1024),
|
||||||
|
tags=['txt2img']
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
SHORT_NAMES = [
|
SHORT_NAMES = [
|
||||||
model_info['short']
|
model_info.short
|
||||||
for model_info in MODELS.values()
|
for model_info in MODELS.values()
|
||||||
]
|
]
|
||||||
|
|
||||||
def get_model_by_shortname(short: str):
|
def get_model_by_shortname(short: str):
|
||||||
for model, info in MODELS.items():
|
for model, info in MODELS.items():
|
||||||
if short == info['short']:
|
if short == info.short:
|
||||||
return model
|
return model
|
||||||
|
|
||||||
N = '\n'
|
N = '\n'
|
||||||
|
@ -166,9 +255,7 @@ DEFAULT_UPSCALER = None
|
||||||
|
|
||||||
DEFAULT_CONFIG_PATH = 'skynet.toml'
|
DEFAULT_CONFIG_PATH = 'skynet.toml'
|
||||||
|
|
||||||
DEFAULT_INITAL_MODELS = [
|
DEFAULT_INITAL_MODEL = list(MODELS.keys())[-1]
|
||||||
'stabilityai/stable-diffusion-xl-base-1.0'
|
|
||||||
]
|
|
||||||
|
|
||||||
DATE_FORMAT = '%B the %dth %Y, %H:%M:%S'
|
DATE_FORMAT = '%B the %dth %Y, %H:%M:%S'
|
||||||
|
|
||||||
|
@ -193,3 +280,221 @@ TG_MAX_WIDTH = 1280
|
||||||
TG_MAX_HEIGHT = 1280
|
TG_MAX_HEIGHT = 1280
|
||||||
|
|
||||||
DEFAULT_SINGLE_CARD_MAP = 'cuda:0'
|
DEFAULT_SINGLE_CARD_MAP = 'cuda:0'
|
||||||
|
|
||||||
|
GPU_CONTRACT_ABI = {
|
||||||
|
"version": "eosio::abi/1.2",
|
||||||
|
"types": [],
|
||||||
|
"structs": [
|
||||||
|
{
|
||||||
|
"name": "account",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "user", "type": "name"},
|
||||||
|
{"name": "balance", "type": "asset"},
|
||||||
|
{"name": "nonce", "type": "uint64"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "card",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "id", "type": "uint64"},
|
||||||
|
{"name": "owner", "type": "name"},
|
||||||
|
{"name": "card_name", "type": "string"},
|
||||||
|
{"name": "version", "type": "string"},
|
||||||
|
{"name": "total_memory", "type": "uint64"},
|
||||||
|
{"name": "mp_count", "type": "uint32"},
|
||||||
|
{"name": "extra", "type": "string"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "clean",
|
||||||
|
"base": "",
|
||||||
|
"fields": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "token_contract", "type": "name"},
|
||||||
|
{"name": "token_symbol", "type": "symbol"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "dequeue",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "user", "type": "name"},
|
||||||
|
{"name": "request_id", "type": "uint64"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "enqueue",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "user", "type": "name"},
|
||||||
|
{"name": "request_body", "type": "string"},
|
||||||
|
{"name": "binary_data", "type": "string"},
|
||||||
|
{"name": "reward", "type": "asset"},
|
||||||
|
{"name": "min_verification", "type": "uint32"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "gcfgstruct",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "token_contract", "type": "name"},
|
||||||
|
{"name": "token_symbol", "type": "symbol"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "submit",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "worker", "type": "name"},
|
||||||
|
{"name": "request_id", "type": "uint64"},
|
||||||
|
{"name": "request_hash", "type": "checksum256"},
|
||||||
|
{"name": "result_hash", "type": "checksum256"},
|
||||||
|
{"name": "ipfs_hash", "type": "string"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "withdraw",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "user", "type": "name"},
|
||||||
|
{"name": "quantity", "type": "asset"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "work_request_struct",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "id", "type": "uint64"},
|
||||||
|
{"name": "user", "type": "name"},
|
||||||
|
{"name": "reward", "type": "asset"},
|
||||||
|
{"name": "min_verification", "type": "uint32"},
|
||||||
|
{"name": "nonce", "type": "uint64"},
|
||||||
|
{"name": "body", "type": "string"},
|
||||||
|
{"name": "binary_data", "type": "string"},
|
||||||
|
{"name": "timestamp", "type": "time_point_sec"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "work_result_struct",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "id", "type": "uint64"},
|
||||||
|
{"name": "request_id", "type": "uint64"},
|
||||||
|
{"name": "user", "type": "name"},
|
||||||
|
{"name": "worker", "type": "name"},
|
||||||
|
{"name": "result_hash", "type": "checksum256"},
|
||||||
|
{"name": "ipfs_hash", "type": "string"},
|
||||||
|
{"name": "submited", "type": "time_point_sec"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "workbegin",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "worker", "type": "name"},
|
||||||
|
{"name": "request_id", "type": "uint64"},
|
||||||
|
{"name": "max_workers", "type": "uint32"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "workcancel",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "worker", "type": "name"},
|
||||||
|
{"name": "request_id", "type": "uint64"},
|
||||||
|
{"name": "reason", "type": "string"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "worker",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "account", "type": "name"},
|
||||||
|
{"name": "joined", "type": "time_point_sec"},
|
||||||
|
{"name": "left", "type": "time_point_sec"},
|
||||||
|
{"name": "url", "type": "string"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "worker_status_struct",
|
||||||
|
"base": "",
|
||||||
|
"fields": [
|
||||||
|
{"name": "worker", "type": "name"},
|
||||||
|
{"name": "status", "type": "string"},
|
||||||
|
{"name": "started", "type": "time_point_sec"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"actions": [
|
||||||
|
{"name": "clean", "type": "clean", "ricardian_contract": ""},
|
||||||
|
{"name": "config", "type": "config", "ricardian_contract": ""},
|
||||||
|
{"name": "dequeue", "type": "dequeue", "ricardian_contract": ""},
|
||||||
|
{"name": "enqueue", "type": "enqueue", "ricardian_contract": ""},
|
||||||
|
{"name": "submit", "type": "submit", "ricardian_contract": ""},
|
||||||
|
{"name": "withdraw", "type": "withdraw", "ricardian_contract": ""},
|
||||||
|
{"name": "workbegin", "type": "workbegin", "ricardian_contract": ""},
|
||||||
|
{"name": "workcancel", "type": "workcancel", "ricardian_contract": ""}
|
||||||
|
],
|
||||||
|
"tables": [
|
||||||
|
{
|
||||||
|
"name": "cards",
|
||||||
|
"index_type": "i64",
|
||||||
|
"key_names": [],
|
||||||
|
"key_types": [],
|
||||||
|
"type": "card"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "gcfgstruct",
|
||||||
|
"index_type": "i64",
|
||||||
|
"key_names": [],
|
||||||
|
"key_types": [],
|
||||||
|
"type": "gcfgstruct"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "queue",
|
||||||
|
"index_type": "i64",
|
||||||
|
"key_names": [],
|
||||||
|
"key_types": [],
|
||||||
|
"type": "work_request_struct"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "results",
|
||||||
|
"index_type": "i64",
|
||||||
|
"key_names": [],
|
||||||
|
"key_types": [],
|
||||||
|
"type": "work_result_struct"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "status",
|
||||||
|
"index_type": "i64",
|
||||||
|
"key_names": [],
|
||||||
|
"key_types": [],
|
||||||
|
"type": "worker_status_struct"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "users",
|
||||||
|
"index_type": "i64",
|
||||||
|
"key_names": [],
|
||||||
|
"key_types": [],
|
||||||
|
"type": "account"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "workers",
|
||||||
|
"index_type": "i64",
|
||||||
|
"key_names": [],
|
||||||
|
"key_types": [],
|
||||||
|
"type": "worker"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ricardian_clauses": [],
|
||||||
|
"error_messages": [],
|
||||||
|
"abi_extensions": [],
|
||||||
|
"variants": [],
|
||||||
|
"action_results": []
|
||||||
|
}
|
||||||
|
|
|
@ -13,36 +13,48 @@ from diffusers import DiffusionPipeline
|
||||||
import trio
|
import trio
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from skynet.constants import DEFAULT_INITAL_MODELS, MODELS
|
from skynet.constants import DEFAULT_INITAL_MODEL, MODELS
|
||||||
from skynet.dgpu.errors import DGPUComputeError, DGPUInferenceCancelled
|
from skynet.dgpu.errors import DGPUComputeError, DGPUInferenceCancelled
|
||||||
|
|
||||||
from skynet.utils import crop_image, convert_from_cv2_to_image, convert_from_image_to_cv2, convert_from_img_to_bytes, init_upscaler, pipeline_for
|
from skynet.utils import crop_image, convert_from_cv2_to_image, convert_from_image_to_cv2, convert_from_img_to_bytes, init_upscaler, pipeline_for
|
||||||
|
|
||||||
|
|
||||||
def prepare_params_for_diffuse(
|
def prepare_params_for_diffuse(
|
||||||
params: dict,
|
params: dict,
|
||||||
input_type: str,
|
mode: str,
|
||||||
binary = None
|
inputs: list[bytes]
|
||||||
):
|
):
|
||||||
_params = {}
|
_params = {}
|
||||||
if binary != None:
|
match mode:
|
||||||
match input_type:
|
case 'inpaint':
|
||||||
case 'png':
|
|
||||||
image = crop_image(
|
image = crop_image(
|
||||||
binary, params['width'], params['height'])
|
inputs[0], params['width'], params['height'])
|
||||||
|
|
||||||
|
mask = crop_image(
|
||||||
|
inputs[1], params['width'], params['height'])
|
||||||
|
|
||||||
|
_params['image'] = image
|
||||||
|
_params['mask_image'] = mask
|
||||||
|
|
||||||
|
if 'flux' in params['model'].lower():
|
||||||
|
_params['max_sequence_length'] = 512
|
||||||
|
else:
|
||||||
|
_params['strength'] = float(params['strength'])
|
||||||
|
|
||||||
|
case 'img2img':
|
||||||
|
image = crop_image(
|
||||||
|
inputs[0], params['width'], params['height'])
|
||||||
|
|
||||||
_params['image'] = image
|
_params['image'] = image
|
||||||
_params['strength'] = float(params['strength'])
|
_params['strength'] = float(params['strength'])
|
||||||
|
|
||||||
case 'none':
|
case 'txt2img' | 'diffuse':
|
||||||
...
|
...
|
||||||
|
|
||||||
case _:
|
case _:
|
||||||
raise DGPUComputeError(f'Unknown input_type {input_type}')
|
raise DGPUComputeError(f'Unknown mode {mode}')
|
||||||
|
|
||||||
else:
|
# _params['width'] = int(params['width'])
|
||||||
_params['width'] = int(params['width'])
|
# _params['height'] = int(params['height'])
|
||||||
_params['height'] = int(params['height'])
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
params['prompt'],
|
params['prompt'],
|
||||||
|
@ -57,95 +69,55 @@ def prepare_params_for_diffuse(
|
||||||
class SkynetMM:
|
class SkynetMM:
|
||||||
|
|
||||||
def __init__(self, config: dict):
|
def __init__(self, config: dict):
|
||||||
self.upscaler = init_upscaler()
|
|
||||||
self.initial_models = (
|
|
||||||
config['initial_models']
|
|
||||||
if 'initial_models' in config else DEFAULT_INITAL_MODELS
|
|
||||||
)
|
|
||||||
|
|
||||||
self.cache_dir = None
|
self.cache_dir = None
|
||||||
if 'hf_home' in config:
|
if 'hf_home' in config:
|
||||||
self.cache_dir = config['hf_home']
|
self.cache_dir = config['hf_home']
|
||||||
|
|
||||||
self._models = {}
|
self._model_name = ''
|
||||||
for model in self.initial_models:
|
self._model_mode = ''
|
||||||
self.load_model(model, False, force=True)
|
|
||||||
|
# self.load_model(DEFAULT_INITAL_MODEL, 'txt2img')
|
||||||
|
|
||||||
def log_debug_info(self):
|
def log_debug_info(self):
|
||||||
logging.info('memory summary:')
|
logging.info('memory summary:')
|
||||||
logging.info('\n' + torch.cuda.memory_summary())
|
logging.info('\n' + torch.cuda.memory_summary())
|
||||||
|
|
||||||
def is_model_loaded(self, model_name: str, image: bool):
|
def is_model_loaded(self, name: str, mode: str):
|
||||||
for model_key, model_data in self._models.items():
|
if (name == self._model_name and
|
||||||
if (model_key == model_name and
|
mode == self._model_mode):
|
||||||
model_data['image'] == image):
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def load_model(
|
def unload_model(self):
|
||||||
self,
|
if getattr(self, '_model', None):
|
||||||
model_name: str,
|
del self._model
|
||||||
image: bool,
|
|
||||||
force=False
|
|
||||||
):
|
|
||||||
logging.info(f'loading model {model_name}...')
|
|
||||||
if force or len(self._models.keys()) == 0:
|
|
||||||
pipe = pipeline_for(
|
|
||||||
model_name, image=image, cache_dir=self.cache_dir)
|
|
||||||
|
|
||||||
self._models[model_name] = {
|
|
||||||
'pipe': pipe,
|
|
||||||
'generated': 0,
|
|
||||||
'image': image
|
|
||||||
}
|
|
||||||
|
|
||||||
else:
|
|
||||||
least_used = list(self._models.keys())[0]
|
|
||||||
|
|
||||||
for model in self._models:
|
|
||||||
if self._models[
|
|
||||||
least_used]['generated'] > self._models[model]['generated']:
|
|
||||||
least_used = model
|
|
||||||
|
|
||||||
del self._models[least_used]
|
|
||||||
|
|
||||||
logging.info(f'swapping model {least_used} for {model_name}...')
|
|
||||||
|
|
||||||
gc.collect()
|
gc.collect()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
pipe = pipeline_for(
|
self._model_name = ''
|
||||||
model_name, image=image, cache_dir=self.cache_dir)
|
self._model_mode = ''
|
||||||
|
|
||||||
self._models[model_name] = {
|
def load_model(
|
||||||
'pipe': pipe,
|
self,
|
||||||
'generated': 0,
|
name: str,
|
||||||
'image': image
|
mode: str
|
||||||
}
|
):
|
||||||
|
logging.info(f'loading model {name}...')
|
||||||
|
self.unload_model()
|
||||||
|
self._model = pipeline_for(
|
||||||
|
name, mode, cache_dir=self.cache_dir)
|
||||||
|
self._model_mode = mode
|
||||||
|
self._model_name = name
|
||||||
|
|
||||||
logging.info(f'loaded model {model_name}')
|
|
||||||
return pipe
|
|
||||||
|
|
||||||
def get_model(self, model_name: str, image: bool) -> DiffusionPipeline:
|
|
||||||
if model_name not in MODELS:
|
|
||||||
raise DGPUComputeError(f'Unknown model {model_name}')
|
|
||||||
|
|
||||||
if not self.is_model_loaded(model_name, image):
|
|
||||||
pipe = self.load_model(model_name, image=image)
|
|
||||||
|
|
||||||
else:
|
|
||||||
pipe = self._models[model_name]['pipe']
|
|
||||||
|
|
||||||
return pipe
|
|
||||||
|
|
||||||
def compute_one(
|
def compute_one(
|
||||||
self,
|
self,
|
||||||
request_id: int,
|
request_id: int,
|
||||||
method: str,
|
method: str,
|
||||||
params: dict,
|
params: dict,
|
||||||
input_type: str = 'png',
|
inputs: list[bytes] = []
|
||||||
binary: bytes | None = None
|
|
||||||
):
|
):
|
||||||
def maybe_cancel_work(step, *args, **kwargs):
|
def maybe_cancel_work(step, *args, **kwargs):
|
||||||
if self._should_cancel:
|
if self._should_cancel:
|
||||||
|
@ -154,6 +126,8 @@ class SkynetMM:
|
||||||
logging.warn(f'cancelling work at step {step}')
|
logging.warn(f'cancelling work at step {step}')
|
||||||
raise DGPUInferenceCancelled()
|
raise DGPUInferenceCancelled()
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
maybe_cancel_work(0)
|
maybe_cancel_work(0)
|
||||||
|
|
||||||
output_type = 'png'
|
output_type = 'png'
|
||||||
|
@ -163,20 +137,29 @@ class SkynetMM:
|
||||||
output = None
|
output = None
|
||||||
output_hash = None
|
output_hash = None
|
||||||
try:
|
try:
|
||||||
match method:
|
name = params['model']
|
||||||
case 'diffuse':
|
|
||||||
arguments = prepare_params_for_diffuse(
|
|
||||||
params, input_type, binary=binary)
|
|
||||||
prompt, guidance, step, seed, upscaler, extra_params = arguments
|
|
||||||
model = self.get_model(params['model'], 'image' in extra_params)
|
|
||||||
|
|
||||||
output = model(
|
match method:
|
||||||
|
case 'diffuse' | 'txt2img' | 'img2img' | 'inpaint':
|
||||||
|
if not self.is_model_loaded(name, method):
|
||||||
|
self.load_model(name, method)
|
||||||
|
|
||||||
|
arguments = prepare_params_for_diffuse(
|
||||||
|
params, method, inputs)
|
||||||
|
prompt, guidance, step, seed, upscaler, extra_params = arguments
|
||||||
|
|
||||||
|
if 'flux' in name.lower():
|
||||||
|
extra_params['callback_on_step_end'] = maybe_cancel_work
|
||||||
|
|
||||||
|
else:
|
||||||
|
extra_params['callback'] = maybe_cancel_work
|
||||||
|
extra_params['callback_steps'] = 1
|
||||||
|
|
||||||
|
output = self._model(
|
||||||
prompt,
|
prompt,
|
||||||
guidance_scale=guidance,
|
guidance_scale=guidance,
|
||||||
num_inference_steps=step,
|
num_inference_steps=step,
|
||||||
generator=seed,
|
generator=seed,
|
||||||
callback=maybe_cancel_work,
|
|
||||||
callback_steps=1,
|
|
||||||
**extra_params
|
**extra_params
|
||||||
).images[0]
|
).images[0]
|
||||||
|
|
||||||
|
@ -185,7 +168,7 @@ class SkynetMM:
|
||||||
case 'png':
|
case 'png':
|
||||||
if upscaler == 'x4':
|
if upscaler == 'x4':
|
||||||
input_img = output.convert('RGB')
|
input_img = output.convert('RGB')
|
||||||
up_img, _ = self.upscaler.enhance(
|
up_img, _ = init_upscaler().enhance(
|
||||||
convert_from_image_to_cv2(input_img), outscale=4)
|
convert_from_image_to_cv2(input_img), outscale=4)
|
||||||
|
|
||||||
output = convert_from_cv2_to_image(up_img)
|
output = convert_from_cv2_to_image(up_img)
|
||||||
|
@ -197,6 +180,22 @@ class SkynetMM:
|
||||||
|
|
||||||
output_hash = sha256(output_binary).hexdigest()
|
output_hash = sha256(output_binary).hexdigest()
|
||||||
|
|
||||||
|
case 'upscale':
|
||||||
|
if self._model_mode != 'upscale':
|
||||||
|
self.unload_model()
|
||||||
|
self._model = init_upscaler()
|
||||||
|
self._model_mode = 'upscale'
|
||||||
|
self._model_name = 'realesrgan'
|
||||||
|
|
||||||
|
input_img = inputs[0].convert('RGB')
|
||||||
|
up_img, _ = self._model.enhance(
|
||||||
|
convert_from_image_to_cv2(input_img), outscale=4)
|
||||||
|
|
||||||
|
output = convert_from_cv2_to_image(up_img)
|
||||||
|
|
||||||
|
output_binary = convert_from_img_to_bytes(output)
|
||||||
|
output_hash = sha256(output_binary).hexdigest()
|
||||||
|
|
||||||
case _:
|
case _:
|
||||||
raise DGPUComputeError('Unsupported compute method')
|
raise DGPUComputeError('Unsupported compute method')
|
||||||
|
|
||||||
|
|
|
@ -117,22 +117,7 @@ class SkynetDGPUDaemon:
|
||||||
|
|
||||||
return app
|
return app
|
||||||
|
|
||||||
async def serve_forever(self):
|
async def maybe_serve_one(self, req):
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
if self.auto_withdraw:
|
|
||||||
await self.conn.maybe_withdraw_all()
|
|
||||||
|
|
||||||
queue = self._snap['queue']
|
|
||||||
|
|
||||||
random.shuffle(queue)
|
|
||||||
queue = sorted(
|
|
||||||
queue,
|
|
||||||
key=lambda req: convert_reward_to_int(req['reward']),
|
|
||||||
reverse=True
|
|
||||||
)
|
|
||||||
|
|
||||||
for req in queue:
|
|
||||||
rid = req['id']
|
rid = req['id']
|
||||||
|
|
||||||
# parse request
|
# parse request
|
||||||
|
@ -140,25 +125,35 @@ class SkynetDGPUDaemon:
|
||||||
model = body['params']['model']
|
model = body['params']['model']
|
||||||
|
|
||||||
# if model not known
|
# if model not known
|
||||||
if model not in MODELS:
|
if model != 'RealESRGAN_x4plus' and model not in MODELS:
|
||||||
logging.warning(f'Unknown model {model}')
|
logging.warning(f'Unknown model {model}')
|
||||||
continue
|
return False
|
||||||
|
|
||||||
# if whitelist enabled and model not in it continue
|
# if whitelist enabled and model not in it continue
|
||||||
if (len(self.model_whitelist) > 0 and
|
if (len(self.model_whitelist) > 0 and
|
||||||
not model in self.model_whitelist):
|
not model in self.model_whitelist):
|
||||||
continue
|
return False
|
||||||
|
|
||||||
# if blacklist contains model skip
|
# if blacklist contains model skip
|
||||||
if model in self.model_blacklist:
|
if model in self.model_blacklist:
|
||||||
continue
|
return False
|
||||||
|
|
||||||
my_results = [res['id'] for res in self._snap['my_results']]
|
my_results = [res['id'] for res in self._snap['my_results']]
|
||||||
if rid not in my_results and rid in self._snap['requests']:
|
if rid not in my_results and rid in self._snap['requests']:
|
||||||
statuses = self._snap['requests'][rid]
|
statuses = self._snap['requests'][rid]
|
||||||
|
|
||||||
if len(statuses) == 0:
|
if len(statuses) == 0:
|
||||||
binary, input_type = await self.conn.get_input_data(req['binary_data'])
|
inputs = []
|
||||||
|
for _input in req['binary_data'].split(','):
|
||||||
|
if _input:
|
||||||
|
for _ in range(3):
|
||||||
|
try:
|
||||||
|
img = await self.conn.get_input_data(_input)
|
||||||
|
inputs.append(img)
|
||||||
|
break
|
||||||
|
|
||||||
|
except:
|
||||||
|
...
|
||||||
|
|
||||||
hash_str = (
|
hash_str = (
|
||||||
str(req['nonce'])
|
str(req['nonce'])
|
||||||
|
@ -176,7 +171,7 @@ class SkynetDGPUDaemon:
|
||||||
logging.info(f'working on {body}')
|
logging.info(f'working on {body}')
|
||||||
|
|
||||||
resp = await self.conn.begin_work(rid)
|
resp = await self.conn.begin_work(rid)
|
||||||
if 'code' in resp:
|
if not resp or 'code' in resp:
|
||||||
logging.info(f'probably being worked on already... skip.')
|
logging.info(f'probably being worked on already... skip.')
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -195,8 +190,7 @@ class SkynetDGPUDaemon:
|
||||||
self.mm.compute_one,
|
self.mm.compute_one,
|
||||||
rid,
|
rid,
|
||||||
body['method'], body['params'],
|
body['method'], body['params'],
|
||||||
input_type=input_type,
|
inputs=inputs
|
||||||
binary=binary
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -215,11 +209,30 @@ class SkynetDGPUDaemon:
|
||||||
await self.conn.cancel_work(rid, str(e))
|
await self.conn.cancel_work(rid, str(e))
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
break
|
return True
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logging.info(f'request {rid} already beign worked on, skip...')
|
logging.info(f'request {rid} already beign worked on, skip...')
|
||||||
|
|
||||||
|
async def serve_forever(self):
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
if self.auto_withdraw:
|
||||||
|
await self.conn.maybe_withdraw_all()
|
||||||
|
|
||||||
|
queue = self._snap['queue']
|
||||||
|
|
||||||
|
random.shuffle(queue)
|
||||||
|
queue = sorted(
|
||||||
|
queue,
|
||||||
|
key=lambda req: convert_reward_to_int(req['reward']),
|
||||||
|
reverse=True
|
||||||
|
)
|
||||||
|
|
||||||
|
for req in queue:
|
||||||
|
if (await self.maybe_serve_one(req)):
|
||||||
|
break
|
||||||
|
|
||||||
await trio.sleep(1)
|
await trio.sleep(1)
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
|
|
|
@ -8,15 +8,16 @@ import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
import asks
|
|
||||||
import trio
|
import trio
|
||||||
|
import leap
|
||||||
import anyio
|
import anyio
|
||||||
|
import httpx
|
||||||
|
|
||||||
from PIL import Image, UnidentifiedImageError
|
from PIL import Image, UnidentifiedImageError
|
||||||
|
|
||||||
from leap.cleos import CLEOS
|
from leap.cleos import CLEOS
|
||||||
from leap.sugar import Checksum256, Name, asset_from_str
|
from leap.protocol import Asset
|
||||||
from skynet.constants import DEFAULT_IPFS_DOMAIN
|
from skynet.constants import DEFAULT_IPFS_DOMAIN, GPU_CONTRACT_ABI
|
||||||
|
|
||||||
from skynet.ipfs import AsyncIPFSHTTP, get_ipfs_file
|
from skynet.ipfs import AsyncIPFSHTTP, get_ipfs_file
|
||||||
from skynet.dgpu.errors import DGPUComputeError
|
from skynet.dgpu.errors import DGPUComputeError
|
||||||
|
@ -32,25 +33,25 @@ async def failable(fn: partial, ret_fail=None):
|
||||||
except (
|
except (
|
||||||
OSError,
|
OSError,
|
||||||
json.JSONDecodeError,
|
json.JSONDecodeError,
|
||||||
asks.errors.RequestTimeout,
|
anyio.BrokenResourceError,
|
||||||
asks.errors.BadHttpResponse,
|
httpx.ReadError,
|
||||||
anyio.BrokenResourceError
|
leap.errors.TransactionPushError
|
||||||
):
|
) as e:
|
||||||
return ret_fail
|
return ret_fail
|
||||||
|
|
||||||
|
|
||||||
class SkynetGPUConnector:
|
class SkynetGPUConnector:
|
||||||
|
|
||||||
def __init__(self, config: dict):
|
def __init__(self, config: dict):
|
||||||
self.account = Name(config['account'])
|
self.account = config['account']
|
||||||
self.permission = config['permission']
|
self.permission = config['permission']
|
||||||
self.key = config['key']
|
self.key = config['key']
|
||||||
|
|
||||||
self.node_url = config['node_url']
|
self.node_url = config['node_url']
|
||||||
self.hyperion_url = config['hyperion_url']
|
self.hyperion_url = config['hyperion_url']
|
||||||
|
|
||||||
self.cleos = CLEOS(
|
self.cleos = CLEOS(endpoint=self.node_url)
|
||||||
None, None, self.node_url, remote=self.node_url)
|
self.cleos.load_abi('gpu.scd', GPU_CONTRACT_ABI)
|
||||||
|
|
||||||
self.ipfs_gateway_url = None
|
self.ipfs_gateway_url = None
|
||||||
if 'ipfs_gateway_url' in config:
|
if 'ipfs_gateway_url' in config:
|
||||||
|
@ -151,11 +152,11 @@ class SkynetGPUConnector:
|
||||||
self.cleos.a_push_action,
|
self.cleos.a_push_action,
|
||||||
'gpu.scd',
|
'gpu.scd',
|
||||||
'workbegin',
|
'workbegin',
|
||||||
{
|
list({
|
||||||
'worker': self.account,
|
'worker': self.account,
|
||||||
'request_id': request_id,
|
'request_id': request_id,
|
||||||
'max_workers': 2
|
'max_workers': 2
|
||||||
},
|
}.values()),
|
||||||
self.account, self.key,
|
self.account, self.key,
|
||||||
permission=self.permission
|
permission=self.permission
|
||||||
)
|
)
|
||||||
|
@ -168,11 +169,11 @@ class SkynetGPUConnector:
|
||||||
self.cleos.a_push_action,
|
self.cleos.a_push_action,
|
||||||
'gpu.scd',
|
'gpu.scd',
|
||||||
'workcancel',
|
'workcancel',
|
||||||
{
|
list({
|
||||||
'worker': self.account,
|
'worker': self.account,
|
||||||
'request_id': request_id,
|
'request_id': request_id,
|
||||||
'reason': reason
|
'reason': reason
|
||||||
},
|
}.values()),
|
||||||
self.account, self.key,
|
self.account, self.key,
|
||||||
permission=self.permission
|
permission=self.permission
|
||||||
)
|
)
|
||||||
|
@ -191,10 +192,10 @@ class SkynetGPUConnector:
|
||||||
self.cleos.a_push_action,
|
self.cleos.a_push_action,
|
||||||
'gpu.scd',
|
'gpu.scd',
|
||||||
'withdraw',
|
'withdraw',
|
||||||
{
|
list({
|
||||||
'user': self.account,
|
'user': self.account,
|
||||||
'quantity': asset_from_str(balance)
|
'quantity': Asset.from_str(balance)
|
||||||
},
|
}.values()),
|
||||||
self.account, self.key,
|
self.account, self.key,
|
||||||
permission=self.permission
|
permission=self.permission
|
||||||
)
|
)
|
||||||
|
@ -226,13 +227,13 @@ class SkynetGPUConnector:
|
||||||
self.cleos.a_push_action,
|
self.cleos.a_push_action,
|
||||||
'gpu.scd',
|
'gpu.scd',
|
||||||
'submit',
|
'submit',
|
||||||
{
|
list({
|
||||||
'worker': self.account,
|
'worker': self.account,
|
||||||
'request_id': request_id,
|
'request_id': request_id,
|
||||||
'request_hash': Checksum256(request_hash),
|
'request_hash': request_hash,
|
||||||
'result_hash': Checksum256(result_hash),
|
'result_hash': result_hash,
|
||||||
'ipfs_hash': ipfs_hash
|
'ipfs_hash': ipfs_hash
|
||||||
},
|
}.values()),
|
||||||
self.account, self.key,
|
self.account, self.key,
|
||||||
permission=self.permission
|
permission=self.permission
|
||||||
)
|
)
|
||||||
|
@ -267,46 +268,15 @@ class SkynetGPUConnector:
|
||||||
|
|
||||||
return file_cid
|
return file_cid
|
||||||
|
|
||||||
async def get_input_data(self, ipfs_hash: str) -> tuple[bytes, str]:
|
async def get_input_data(self, ipfs_hash: str) -> Image:
|
||||||
input_type = 'none'
|
link = f'https://{self.ipfs_domain}/ipfs/{ipfs_hash}'
|
||||||
|
|
||||||
if ipfs_hash == '':
|
|
||||||
return b'', input_type
|
|
||||||
|
|
||||||
results = {}
|
|
||||||
ipfs_link = f'https://{self.ipfs_domain}/ipfs/{ipfs_hash}'
|
|
||||||
ipfs_link_legacy = ipfs_link + '/image.png'
|
|
||||||
|
|
||||||
async with trio.open_nursery() as n:
|
|
||||||
async def get_and_set_results(link: str):
|
|
||||||
res = await get_ipfs_file(link, timeout=1)
|
res = await get_ipfs_file(link, timeout=1)
|
||||||
logging.info(f'got response from {link}')
|
logging.info(f'got response from {link}')
|
||||||
if not res or res.status_code != 200:
|
if not res or res.status_code != 200:
|
||||||
logging.warning(f'couldn\'t get ipfs binary data at {link}!')
|
logging.warning(f'couldn\'t get ipfs binary data at {link}!')
|
||||||
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
# attempt to decode as image
|
# attempt to decode as image
|
||||||
results[link] = Image.open(io.BytesIO(res.raw))
|
input_data = Image.open(io.BytesIO(res.raw))
|
||||||
input_type = 'png'
|
|
||||||
n.cancel_scope.cancel()
|
|
||||||
|
|
||||||
except UnidentifiedImageError:
|
return input_data
|
||||||
logging.warning(f'couldn\'t get ipfs binary data at {link}!')
|
|
||||||
|
|
||||||
n.start_soon(
|
|
||||||
get_and_set_results, ipfs_link)
|
|
||||||
n.start_soon(
|
|
||||||
get_and_set_results, ipfs_link_legacy)
|
|
||||||
|
|
||||||
input_data = None
|
|
||||||
if ipfs_link_legacy in results:
|
|
||||||
input_data = results[ipfs_link_legacy]
|
|
||||||
|
|
||||||
if ipfs_link in results:
|
|
||||||
input_data = results[ipfs_link]
|
|
||||||
|
|
||||||
if input_data == None:
|
|
||||||
raise DGPUComputeError('Couldn\'t gather input data from ipfs')
|
|
||||||
|
|
||||||
return input_data, input_type
|
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from diffusers import (
|
||||||
|
DiffusionPipeline,
|
||||||
|
FluxPipeline,
|
||||||
|
FluxTransformer2DModel
|
||||||
|
)
|
||||||
|
from transformers import T5EncoderModel, BitsAndBytesConfig
|
||||||
|
|
||||||
|
from huggingface_hub import hf_hub_download
|
||||||
|
|
||||||
|
__model = {
|
||||||
|
'name': 'black-forest-labs/FLUX.1-schnell'
|
||||||
|
}
|
||||||
|
|
||||||
|
def pipeline_for(
|
||||||
|
model: str,
|
||||||
|
mode: str,
|
||||||
|
mem_fraction: float = 1.0,
|
||||||
|
cache_dir: str | None = None
|
||||||
|
) -> DiffusionPipeline:
|
||||||
|
qonfig = BitsAndBytesConfig(
|
||||||
|
load_in_4bit=True,
|
||||||
|
bnb_4bit_quant_type="nf4",
|
||||||
|
)
|
||||||
|
params = {
|
||||||
|
'torch_dtype': torch.bfloat16,
|
||||||
|
'cache_dir': cache_dir,
|
||||||
|
'device_map': 'balanced',
|
||||||
|
'max_memory': {'cpu': '10GiB', 0: '11GiB'}
|
||||||
|
# 'max_memory': {0: '11GiB'}
|
||||||
|
}
|
||||||
|
|
||||||
|
text_encoder = T5EncoderModel.from_pretrained(
|
||||||
|
'black-forest-labs/FLUX.1-schnell',
|
||||||
|
subfolder="text_encoder_2",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
quantization_config=qonfig
|
||||||
|
)
|
||||||
|
params['text_encoder_2'] = text_encoder
|
||||||
|
|
||||||
|
pipe = FluxPipeline.from_pretrained(
|
||||||
|
model, **params)
|
||||||
|
|
||||||
|
pipe.vae.enable_tiling()
|
||||||
|
pipe.vae.enable_slicing()
|
||||||
|
|
||||||
|
return pipe
|
|
@ -0,0 +1,56 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from diffusers import (
|
||||||
|
DiffusionPipeline,
|
||||||
|
FluxFillPipeline,
|
||||||
|
FluxTransformer2DModel
|
||||||
|
)
|
||||||
|
from transformers import T5EncoderModel, BitsAndBytesConfig
|
||||||
|
|
||||||
|
__model = {
|
||||||
|
'name': 'black-forest-labs/FLUX.1-Fill-dev'
|
||||||
|
}
|
||||||
|
|
||||||
|
def pipeline_for(
|
||||||
|
model: str,
|
||||||
|
mode: str,
|
||||||
|
mem_fraction: float = 1.0,
|
||||||
|
cache_dir: str | None = None
|
||||||
|
) -> DiffusionPipeline:
|
||||||
|
qonfig = BitsAndBytesConfig(
|
||||||
|
load_in_4bit=True,
|
||||||
|
bnb_4bit_quant_type="nf4",
|
||||||
|
)
|
||||||
|
params = {
|
||||||
|
'torch_dtype': torch.bfloat16,
|
||||||
|
'cache_dir': cache_dir,
|
||||||
|
'device_map': 'balanced',
|
||||||
|
'max_memory': {'cpu': '10GiB', 0: '11GiB'}
|
||||||
|
# 'max_memory': {0: '11GiB'}
|
||||||
|
}
|
||||||
|
|
||||||
|
text_encoder = T5EncoderModel.from_pretrained(
|
||||||
|
'sayakpaul/FLUX.1-Fill-dev-nf4',
|
||||||
|
subfolder="text_encoder_2",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
quantization_config=qonfig
|
||||||
|
)
|
||||||
|
params['text_encoder_2'] = text_encoder
|
||||||
|
|
||||||
|
transformer = FluxTransformer2DModel.from_pretrained(
|
||||||
|
'sayakpaul/FLUX.1-Fill-dev-nf4',
|
||||||
|
subfolder="transformer",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
quantization_config=qonfig
|
||||||
|
)
|
||||||
|
params['transformer'] = transformer
|
||||||
|
|
||||||
|
pipe = FluxFillPipeline.from_pretrained(
|
||||||
|
model, **params)
|
||||||
|
|
||||||
|
pipe.vae.enable_tiling()
|
||||||
|
pipe.vae.enable_slicing()
|
||||||
|
|
||||||
|
return pipe
|
|
@ -39,7 +39,7 @@ def validate_user_config_request(req: str):
|
||||||
case 'model' | 'algo':
|
case 'model' | 'algo':
|
||||||
attr = 'model'
|
attr = 'model'
|
||||||
val = params[2]
|
val = params[2]
|
||||||
shorts = [model_info['short'] for model_info in MODELS.values()]
|
shorts = [model_info.short for model_info in MODELS.values()]
|
||||||
if val not in shorts:
|
if val not in shorts:
|
||||||
raise ConfigUnknownAlgorithm(f'no model named {val}')
|
raise ConfigUnknownAlgorithm(f'no model named {val}')
|
||||||
|
|
||||||
|
@ -112,20 +112,10 @@ def validate_user_config_request(req: str):
|
||||||
|
|
||||||
|
|
||||||
def perform_auto_conf(config: dict) -> dict:
|
def perform_auto_conf(config: dict) -> dict:
|
||||||
model = config['model']
|
model = MODELS[config['model']]
|
||||||
prefered_size_w = 512
|
|
||||||
prefered_size_h = 512
|
|
||||||
|
|
||||||
if 'xl' in model:
|
|
||||||
prefered_size_w = 1024
|
|
||||||
prefered_size_h = 1024
|
|
||||||
|
|
||||||
else:
|
|
||||||
prefered_size_w = 512
|
|
||||||
prefered_size_h = 512
|
|
||||||
|
|
||||||
config['step'] = random.randint(20, 35)
|
config['step'] = random.randint(20, 35)
|
||||||
config['width'] = prefered_size_w
|
config['width'] = model.size.w
|
||||||
config['height'] = prefered_size_h
|
config['height'] = model.size.h
|
||||||
|
|
||||||
return config
|
return config
|
||||||
|
|
|
@ -14,7 +14,7 @@ from contextlib import AsyncExitStack
|
||||||
from contextlib import asynccontextmanager as acm
|
from contextlib import asynccontextmanager as acm
|
||||||
|
|
||||||
from leap.cleos import CLEOS
|
from leap.cleos import CLEOS
|
||||||
from leap.sugar import Name, asset_from_str, collect_stdout
|
from leap.protocol import Name, Asset
|
||||||
from leap.hyperion import HyperionAPI
|
from leap.hyperion import HyperionAPI
|
||||||
|
|
||||||
from telebot.types import InputMediaPhoto
|
from telebot.types import InputMediaPhoto
|
||||||
|
@ -43,7 +43,6 @@ class SkynetTelegramFrontend:
|
||||||
db_user: str,
|
db_user: str,
|
||||||
db_pass: str,
|
db_pass: str,
|
||||||
ipfs_node: str,
|
ipfs_node: str,
|
||||||
remote_ipfs_node: str | None,
|
|
||||||
key: str,
|
key: str,
|
||||||
explorer_domain: str,
|
explorer_domain: str,
|
||||||
ipfs_domain: str
|
ipfs_domain: str
|
||||||
|
@ -56,22 +55,19 @@ class SkynetTelegramFrontend:
|
||||||
self.db_host = db_host
|
self.db_host = db_host
|
||||||
self.db_user = db_user
|
self.db_user = db_user
|
||||||
self.db_pass = db_pass
|
self.db_pass = db_pass
|
||||||
self.remote_ipfs_node = remote_ipfs_node
|
|
||||||
self.key = key
|
self.key = key
|
||||||
self.explorer_domain = explorer_domain
|
self.explorer_domain = explorer_domain
|
||||||
self.ipfs_domain = ipfs_domain
|
self.ipfs_domain = ipfs_domain
|
||||||
|
|
||||||
self.bot = AsyncTeleBot(token, exception_handler=SKYExceptionHandler)
|
self.bot = AsyncTeleBot(token, exception_handler=SKYExceptionHandler)
|
||||||
self.cleos = CLEOS(None, None, url=node_url, remote=node_url)
|
self.cleos = CLEOS(endpoint=node_url)
|
||||||
|
self.cleos.load_abi('gpu.scd', GPU_CONTRACT_ABI)
|
||||||
self.hyperion = HyperionAPI(hyperion_url)
|
self.hyperion = HyperionAPI(hyperion_url)
|
||||||
self.ipfs_node = AsyncIPFSHTTP(ipfs_node)
|
self.ipfs_node = AsyncIPFSHTTP(ipfs_node)
|
||||||
|
|
||||||
self._async_exit_stack = AsyncExitStack()
|
self._async_exit_stack = AsyncExitStack()
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
if self.remote_ipfs_node:
|
|
||||||
await self.ipfs_node.connect(self.remote_ipfs_node)
|
|
||||||
|
|
||||||
self.db_call = await self._async_exit_stack.enter_async_context(
|
self.db_call = await self._async_exit_stack.enter_async_context(
|
||||||
open_database_connection(
|
open_database_connection(
|
||||||
self.db_user, self.db_pass, self.db_host))
|
self.db_user, self.db_pass, self.db_host))
|
||||||
|
@ -116,7 +112,7 @@ class SkynetTelegramFrontend:
|
||||||
method: str,
|
method: str,
|
||||||
params: dict,
|
params: dict,
|
||||||
file_id: str | None = None,
|
file_id: str | None = None,
|
||||||
binary_data: str = ''
|
inputs: list[str] = []
|
||||||
) -> bool:
|
) -> bool:
|
||||||
if params['seed'] == None:
|
if params['seed'] == None:
|
||||||
params['seed'] = random.randint(0, 0xFFFFFFFF)
|
params['seed'] = random.randint(0, 0xFFFFFFFF)
|
||||||
|
@ -145,13 +141,13 @@ class SkynetTelegramFrontend:
|
||||||
res = await self.cleos.a_push_action(
|
res = await self.cleos.a_push_action(
|
||||||
'gpu.scd',
|
'gpu.scd',
|
||||||
'enqueue',
|
'enqueue',
|
||||||
{
|
list({
|
||||||
'user': Name(self.account),
|
'user': Name(self.account),
|
||||||
'request_body': body,
|
'request_body': body,
|
||||||
'binary_data': binary_data,
|
'binary_data': ','.join(inputs),
|
||||||
'reward': asset_from_str(reward),
|
'reward': Asset.from_str(reward),
|
||||||
'min_verification': 1
|
'min_verification': 1
|
||||||
},
|
}.values()),
|
||||||
self.account, self.key, permission=self.permission
|
self.account, self.key, permission=self.permission
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -176,12 +172,12 @@ class SkynetTelegramFrontend:
|
||||||
parse_mode='HTML'
|
parse_mode='HTML'
|
||||||
)
|
)
|
||||||
|
|
||||||
out = collect_stdout(res)
|
out = res['processed']['action_traces'][0]['console']
|
||||||
|
|
||||||
request_id, nonce = out.split(':')
|
request_id, nonce = out.split(':')
|
||||||
|
|
||||||
request_hash = sha256(
|
request_hash = sha256(
|
||||||
(nonce + body + binary_data).encode('utf-8')).hexdigest().upper()
|
(nonce + body + ','.join(inputs)).encode('utf-8')).hexdigest().upper()
|
||||||
|
|
||||||
request_id = int(request_id)
|
request_id = int(request_id)
|
||||||
|
|
||||||
|
@ -189,7 +185,7 @@ class SkynetTelegramFrontend:
|
||||||
|
|
||||||
tx_hash = None
|
tx_hash = None
|
||||||
ipfs_hash = None
|
ipfs_hash = None
|
||||||
for i in range(60):
|
for i in range(60 * 3):
|
||||||
try:
|
try:
|
||||||
submits = await self.hyperion.aget_actions(
|
submits = await self.hyperion.aget_actions(
|
||||||
account=self.account,
|
account=self.account,
|
||||||
|
@ -241,15 +237,12 @@ class SkynetTelegramFrontend:
|
||||||
user, params, tx_hash, worker, reward, self.explorer_domain)
|
user, params, tx_hash, worker, reward, self.explorer_domain)
|
||||||
|
|
||||||
# attempt to get the image and send it
|
# attempt to get the image and send it
|
||||||
results = {}
|
|
||||||
ipfs_link = f'https://{self.ipfs_domain}/ipfs/{ipfs_hash}'
|
ipfs_link = f'https://{self.ipfs_domain}/ipfs/{ipfs_hash}'
|
||||||
ipfs_link_legacy = ipfs_link + '/image.png'
|
|
||||||
|
|
||||||
async def get_and_set_results(link: str):
|
res = await get_ipfs_file(ipfs_link)
|
||||||
res = await get_ipfs_file(link)
|
logging.info(f'got response from {ipfs_link}')
|
||||||
logging.info(f'got response from {link}')
|
|
||||||
if not res or res.status_code != 200:
|
if not res or res.status_code != 200:
|
||||||
logging.warning(f'couldn\'t get ipfs binary data at {link}!')
|
logging.warning(f'couldn\'t get ipfs binary data at {ipfs_link}!')
|
||||||
|
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
@ -264,23 +257,8 @@ class SkynetTelegramFrontend:
|
||||||
image.save(tmp_buf, format='PNG')
|
image.save(tmp_buf, format='PNG')
|
||||||
png_img = tmp_buf.getvalue()
|
png_img = tmp_buf.getvalue()
|
||||||
|
|
||||||
results[link] = png_img
|
|
||||||
|
|
||||||
except UnidentifiedImageError:
|
except UnidentifiedImageError:
|
||||||
logging.warning(f'couldn\'t get ipfs binary data at {link}!')
|
logging.warning(f'couldn\'t get ipfs binary data at {ipfs_link}!')
|
||||||
|
|
||||||
tasks = [
|
|
||||||
get_and_set_results(ipfs_link),
|
|
||||||
get_and_set_results(ipfs_link_legacy)
|
|
||||||
]
|
|
||||||
await asyncio.gather(*tasks)
|
|
||||||
|
|
||||||
png_img = None
|
|
||||||
if ipfs_link_legacy in results:
|
|
||||||
png_img = results[ipfs_link_legacy]
|
|
||||||
|
|
||||||
if ipfs_link in results:
|
|
||||||
png_img = results[ipfs_link]
|
|
||||||
|
|
||||||
if not png_img:
|
if not png_img:
|
||||||
await self.update_status_message(
|
await self.update_status_message(
|
||||||
|
|
|
@ -254,7 +254,7 @@ def create_handler_context(frontend: 'SkynetTelegramFrontend'):
|
||||||
success = await work_request(
|
success = await work_request(
|
||||||
user, status_msg, 'img2img', params,
|
user, status_msg, 'img2img', params,
|
||||||
file_id=file_id,
|
file_id=file_id,
|
||||||
binary_data=ipfs_hash
|
inputs=ipfs_hash
|
||||||
)
|
)
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
|
@ -320,7 +320,7 @@ def create_handler_context(frontend: 'SkynetTelegramFrontend'):
|
||||||
success = await work_request(
|
success = await work_request(
|
||||||
user, status_msg, 'redo', params,
|
user, status_msg, 'redo', params,
|
||||||
file_id=file_id,
|
file_id=file_id,
|
||||||
binary_data=binary
|
inputs=binary
|
||||||
)
|
)
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
|
|
|
@ -72,7 +72,7 @@ def generate_reply_caption(
|
||||||
):
|
):
|
||||||
explorer_link = hlink(
|
explorer_link = hlink(
|
||||||
'SKYNET Transaction Explorer',
|
'SKYNET Transaction Explorer',
|
||||||
f'https://explorer.{explorer_domain}/v2/explore/transaction/{tx_hash}'
|
f'https://{explorer_domain}/v2/explore/transaction/{tx_hash}'
|
||||||
)
|
)
|
||||||
|
|
||||||
meta_info = prepare_metainfo_caption(tguser, worker, reward, params)
|
meta_info = prepare_metainfo_caption(tguser, worker, reward, params)
|
||||||
|
|
|
@ -3,10 +3,10 @@
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import asks
|
import httpx
|
||||||
|
|
||||||
|
|
||||||
class IPFSClientException(BaseException):
|
class IPFSClientException(Exception):
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,7 +16,8 @@ class AsyncIPFSHTTP:
|
||||||
self.endpoint = endpoint
|
self.endpoint = endpoint
|
||||||
|
|
||||||
async def _post(self, sub_url: str, *args, **kwargs):
|
async def _post(self, sub_url: str, *args, **kwargs):
|
||||||
resp = await asks.post(
|
async with httpx.AsyncClient() as client:
|
||||||
|
resp = await client.post(
|
||||||
self.endpoint + sub_url,
|
self.endpoint + sub_url,
|
||||||
*args, **kwargs
|
*args, **kwargs
|
||||||
)
|
)
|
||||||
|
@ -25,10 +26,10 @@ class AsyncIPFSHTTP:
|
||||||
raise IPFSClientException(resp.text)
|
raise IPFSClientException(resp.text)
|
||||||
|
|
||||||
return resp.json()
|
return resp.json()
|
||||||
|
#!/usr/bin/python
|
||||||
async def add(self, file_path: Path, **kwargs):
|
async def add(self, file_path: Path, **kwargs):
|
||||||
files = {
|
files = {
|
||||||
'file': file_path
|
'file': (file_path.name, file_path.open('rb'))
|
||||||
}
|
}
|
||||||
return await self._post(
|
return await self._post(
|
||||||
'/api/v0/add',
|
'/api/v0/add',
|
||||||
|
@ -55,18 +56,19 @@ class AsyncIPFSHTTP:
|
||||||
))['Peers']
|
))['Peers']
|
||||||
|
|
||||||
|
|
||||||
async def get_ipfs_file(ipfs_link: str, timeout: int = 60):
|
async def get_ipfs_file(ipfs_link: str, timeout: int = 60 * 5):
|
||||||
logging.info(f'attempting to get image at {ipfs_link}')
|
logging.info(f'attempting to get image at {ipfs_link}')
|
||||||
resp = None
|
resp = None
|
||||||
for i in range(timeout):
|
for _ in range(timeout):
|
||||||
try:
|
try:
|
||||||
resp = await asks.get(ipfs_link, timeout=3)
|
async with httpx.AsyncClient() as client:
|
||||||
|
resp = await client.get(ipfs_link, timeout=3)
|
||||||
|
|
||||||
except asks.errors.RequestTimeout:
|
except httpx.RequestError as e:
|
||||||
logging.warning('timeout...')
|
logging.warning(f'Request error: {e}')
|
||||||
|
|
||||||
except asks.errors.BadHttpResponse as e:
|
if resp is not None:
|
||||||
logging.error(f'ifps gateway exception: \n{e}')
|
break
|
||||||
|
|
||||||
if resp:
|
if resp:
|
||||||
logging.info(f'status_code: {resp.status_code}')
|
logging.info(f'status_code: {resp.status_code}')
|
||||||
|
|
|
@ -55,7 +55,7 @@ class SkynetPinner:
|
||||||
|
|
||||||
cids = []
|
cids = []
|
||||||
for action in enqueues['actions']:
|
for action in enqueues['actions']:
|
||||||
cid = action['act']['data']['binary_data']
|
for cid in action['act']['data']['binary_data'].split(','):
|
||||||
if cid and not self.is_pinned(cid):
|
if cid and not self.is_pinned(cid):
|
||||||
cids.append(cid)
|
cids.append(cid)
|
||||||
|
|
||||||
|
|
162
skynet/utils.py
162
skynet/utils.py
|
@ -6,26 +6,42 @@ import sys
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
import logging
|
import logging
|
||||||
|
import importlib
|
||||||
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import asks
|
|
||||||
|
|
||||||
|
import trio
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
|
||||||
from diffusers import (
|
from diffusers import (
|
||||||
DiffusionPipeline,
|
DiffusionPipeline,
|
||||||
EulerAncestralDiscreteScheduler
|
AutoPipelineForText2Image,
|
||||||
|
AutoPipelineForImage2Image,
|
||||||
|
AutoPipelineForInpainting,
|
||||||
|
EulerAncestralDiscreteScheduler,
|
||||||
)
|
)
|
||||||
from realesrgan import RealESRGANer
|
|
||||||
from huggingface_hub import login
|
from huggingface_hub import login
|
||||||
import trio
|
|
||||||
|
|
||||||
from .constants import MODELS
|
from .constants import MODELS
|
||||||
|
|
||||||
|
# Hack to fix a changed import in torchvision 0.17+, which otherwise breaks
|
||||||
|
# basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985
|
||||||
|
try:
|
||||||
|
import torchvision.transforms.functional_tensor # noqa: F401
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
import torchvision.transforms.functional as functional
|
||||||
|
sys.modules["torchvision.transforms.functional_tensor"] = functional
|
||||||
|
except ImportError:
|
||||||
|
pass # shrug...
|
||||||
|
|
||||||
|
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||||
|
from realesrgan import RealESRGANer
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def time_ms():
|
def time_ms():
|
||||||
return int(time.time() * 1000)
|
return int(time.time() * 1000)
|
||||||
|
@ -58,14 +74,18 @@ def crop_image(image: Image, max_w: int, max_h: int) -> Image:
|
||||||
|
|
||||||
return image.convert('RGB')
|
return image.convert('RGB')
|
||||||
|
|
||||||
|
def convert_from_bytes_and_crop(raw: bytes, max_w: int, max_h: int) -> Image:
|
||||||
|
return crop_image(convert_from_bytes_to_img(raw), max_w, max_h)
|
||||||
|
|
||||||
|
|
||||||
def pipeline_for(
|
def pipeline_for(
|
||||||
model: str,
|
model: str,
|
||||||
|
mode: str,
|
||||||
mem_fraction: float = 1.0,
|
mem_fraction: float = 1.0,
|
||||||
image: bool = False,
|
|
||||||
cache_dir: str | None = None
|
cache_dir: str | None = None
|
||||||
) -> DiffusionPipeline:
|
) -> DiffusionPipeline:
|
||||||
|
|
||||||
|
logging.info(f'pipeline_for {model} {mode}')
|
||||||
assert torch.cuda.is_available()
|
assert torch.cuda.is_available()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
torch.backends.cuda.matmul.allow_tf32 = True
|
torch.backends.cuda.matmul.allow_tf32 = True
|
||||||
|
@ -79,21 +99,35 @@ def pipeline_for(
|
||||||
torch.use_deterministic_algorithms(True)
|
torch.use_deterministic_algorithms(True)
|
||||||
|
|
||||||
model_info = MODELS[model]
|
model_info = MODELS[model]
|
||||||
|
shortname = model_info.short
|
||||||
|
|
||||||
|
# disable for compat with "diffuse" method
|
||||||
|
# assert mode in model_info.tags
|
||||||
|
|
||||||
|
# default to checking if custom pipeline exist and return that if not, attempt generic
|
||||||
|
try:
|
||||||
|
normalized_shortname = shortname.replace('-', '_')
|
||||||
|
custom_pipeline = importlib.import_module(f'skynet.dgpu.pipes.{normalized_shortname}')
|
||||||
|
assert custom_pipeline.__model['name'] == model
|
||||||
|
return custom_pipeline.pipeline_for(model, mode, mem_fraction=mem_fraction, cache_dir=cache_dir)
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
req_mem = model_info.mem
|
||||||
|
|
||||||
req_mem = model_info['mem']
|
|
||||||
mem_gb = torch.cuda.mem_get_info()[1] / (10**9)
|
mem_gb = torch.cuda.mem_get_info()[1] / (10**9)
|
||||||
mem_gb *= mem_fraction
|
mem_gb *= mem_fraction
|
||||||
over_mem = mem_gb < req_mem
|
over_mem = mem_gb < req_mem
|
||||||
if over_mem:
|
if over_mem:
|
||||||
logging.warn(f'model requires {req_mem} but card has {mem_gb}, model will run slower..')
|
logging.warn(f'model requires {req_mem} but card has {mem_gb}, model will run slower..')
|
||||||
|
|
||||||
shortname = model_info['short']
|
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'safety_checker': None,
|
'safety_checker': None,
|
||||||
'torch_dtype': torch.float16,
|
'torch_dtype': torch.float16,
|
||||||
'cache_dir': cache_dir,
|
'cache_dir': cache_dir,
|
||||||
'variant': 'fp16'
|
'variant': 'fp16',
|
||||||
}
|
}
|
||||||
|
|
||||||
match shortname:
|
match shortname:
|
||||||
|
@ -102,26 +136,37 @@ def pipeline_for(
|
||||||
|
|
||||||
torch.cuda.set_per_process_memory_fraction(mem_fraction)
|
torch.cuda.set_per_process_memory_fraction(mem_fraction)
|
||||||
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(
|
pipe_class = DiffusionPipeline
|
||||||
|
match mode:
|
||||||
|
case 'inpaint':
|
||||||
|
pipe_class = AutoPipelineForInpainting
|
||||||
|
|
||||||
|
case 'img2img':
|
||||||
|
pipe_class = AutoPipelineForImage2Image
|
||||||
|
|
||||||
|
case 'txt2img':
|
||||||
|
pipe_class = AutoPipelineForText2Image
|
||||||
|
|
||||||
|
pipe = pipe_class.from_pretrained(
|
||||||
model, **params)
|
model, **params)
|
||||||
|
|
||||||
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
|
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
|
||||||
pipe.scheduler.config)
|
pipe.scheduler.config)
|
||||||
|
|
||||||
pipe.enable_xformers_memory_efficient_attention()
|
# pipe.enable_xformers_memory_efficient_attention()
|
||||||
|
|
||||||
if over_mem:
|
if over_mem:
|
||||||
if not image:
|
if mode == 'txt2img':
|
||||||
pipe.enable_vae_slicing()
|
pipe.vae.enable_tiling()
|
||||||
pipe.enable_vae_tiling()
|
pipe.vae.enable_slicing()
|
||||||
|
|
||||||
pipe.enable_model_cpu_offload()
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if sys.version_info[1] < 11:
|
# if sys.version_info[1] < 11:
|
||||||
# torch.compile only supported on python < 3.11
|
# # torch.compile only supported on python < 3.11
|
||||||
pipe.unet = torch.compile(
|
# pipe.unet = torch.compile(
|
||||||
pipe.unet, mode='reduce-overhead', fullgraph=True)
|
# pipe.unet, mode='reduce-overhead', fullgraph=True)
|
||||||
|
|
||||||
pipe = pipe.to('cuda')
|
pipe = pipe.to('cuda')
|
||||||
|
|
||||||
|
@ -130,7 +175,7 @@ def pipeline_for(
|
||||||
|
|
||||||
def txt2img(
|
def txt2img(
|
||||||
hf_token: str,
|
hf_token: str,
|
||||||
model: str = 'prompthero/openjourney',
|
model: str = list(MODELS.keys())[-1],
|
||||||
prompt: str = 'a red old tractor in a sunny wheat field',
|
prompt: str = 'a red old tractor in a sunny wheat field',
|
||||||
output: str = 'output.png',
|
output: str = 'output.png',
|
||||||
width: int = 512, height: int = 512,
|
width: int = 512, height: int = 512,
|
||||||
|
@ -139,7 +184,7 @@ def txt2img(
|
||||||
seed: Optional[int] = None
|
seed: Optional[int] = None
|
||||||
):
|
):
|
||||||
login(token=hf_token)
|
login(token=hf_token)
|
||||||
pipe = pipeline_for(model)
|
pipe = pipeline_for(model, 'txt2img')
|
||||||
|
|
||||||
seed = seed if seed else random.randint(0, 2 ** 64)
|
seed = seed if seed else random.randint(0, 2 ** 64)
|
||||||
prompt = prompt
|
prompt = prompt
|
||||||
|
@ -156,7 +201,7 @@ def txt2img(
|
||||||
|
|
||||||
def img2img(
|
def img2img(
|
||||||
hf_token: str,
|
hf_token: str,
|
||||||
model: str = 'prompthero/openjourney',
|
model: str = list(MODELS.keys())[-2],
|
||||||
prompt: str = 'a red old tractor in a sunny wheat field',
|
prompt: str = 'a red old tractor in a sunny wheat field',
|
||||||
img_path: str = 'input.png',
|
img_path: str = 'input.png',
|
||||||
output: str = 'output.png',
|
output: str = 'output.png',
|
||||||
|
@ -166,10 +211,12 @@ def img2img(
|
||||||
seed: Optional[int] = None
|
seed: Optional[int] = None
|
||||||
):
|
):
|
||||||
login(token=hf_token)
|
login(token=hf_token)
|
||||||
pipe = pipeline_for(model, image=True)
|
pipe = pipeline_for(model, 'img2img')
|
||||||
|
|
||||||
|
model_info = MODELS[model]
|
||||||
|
|
||||||
with open(img_path, 'rb') as img_file:
|
with open(img_path, 'rb') as img_file:
|
||||||
input_img = convert_from_bytes_and_crop(img_file.read(), 512, 512)
|
input_img = convert_from_bytes_and_crop(img_file.read(), model_info.size.w, model_info.size.h)
|
||||||
|
|
||||||
seed = seed if seed else random.randint(0, 2 ** 64)
|
seed = seed if seed else random.randint(0, 2 ** 64)
|
||||||
prompt = prompt
|
prompt = prompt
|
||||||
|
@ -184,7 +231,48 @@ def img2img(
|
||||||
image.save(output)
|
image.save(output)
|
||||||
|
|
||||||
|
|
||||||
def init_upscaler(model_path: str = 'weights/RealESRGAN_x4plus.pth'):
|
def inpaint(
|
||||||
|
hf_token: str,
|
||||||
|
model: str = list(MODELS.keys())[-3],
|
||||||
|
prompt: str = 'a red old tractor in a sunny wheat field',
|
||||||
|
img_path: str = 'input.png',
|
||||||
|
mask_path: str = 'mask.png',
|
||||||
|
output: str = 'output.png',
|
||||||
|
strength: float = 1.0,
|
||||||
|
guidance: float = 10,
|
||||||
|
steps: int = 28,
|
||||||
|
seed: Optional[int] = None
|
||||||
|
):
|
||||||
|
login(token=hf_token)
|
||||||
|
pipe = pipeline_for(model, 'inpaint')
|
||||||
|
|
||||||
|
model_info = MODELS[model]
|
||||||
|
|
||||||
|
with open(img_path, 'rb') as img_file:
|
||||||
|
input_img = convert_from_bytes_and_crop(img_file.read(), model_info.size.w, model_info.size.h)
|
||||||
|
|
||||||
|
with open(mask_path, 'rb') as mask_file:
|
||||||
|
mask_img = convert_from_bytes_and_crop(mask_file.read(), model_info.size.w, model_info.size.h)
|
||||||
|
|
||||||
|
var_params = {}
|
||||||
|
if 'flux' not in model.lower():
|
||||||
|
var_params['strength'] = strength
|
||||||
|
|
||||||
|
seed = seed if seed else random.randint(0, 2 ** 64)
|
||||||
|
prompt = prompt
|
||||||
|
image = pipe(
|
||||||
|
prompt,
|
||||||
|
image=input_img,
|
||||||
|
mask_image=mask_img,
|
||||||
|
guidance_scale=guidance, num_inference_steps=steps,
|
||||||
|
generator=torch.Generator("cuda").manual_seed(seed),
|
||||||
|
**var_params
|
||||||
|
).images[0]
|
||||||
|
|
||||||
|
image.save(output)
|
||||||
|
|
||||||
|
|
||||||
|
def init_upscaler(model_path: str = 'hf_home/RealESRGAN_x4plus.pth'):
|
||||||
return RealESRGANer(
|
return RealESRGANer(
|
||||||
scale=4,
|
scale=4,
|
||||||
model_path=model_path,
|
model_path=model_path,
|
||||||
|
@ -203,7 +291,7 @@ def init_upscaler(model_path: str = 'weights/RealESRGAN_x4plus.pth'):
|
||||||
def upscale(
|
def upscale(
|
||||||
img_path: str = 'input.png',
|
img_path: str = 'input.png',
|
||||||
output: str = 'output.png',
|
output: str = 'output.png',
|
||||||
model_path: str = 'weights/RealESRGAN_x4plus.pth'
|
model_path: str = 'hf_home/RealESRGAN_x4plus.pth'
|
||||||
):
|
):
|
||||||
input_img = Image.open(img_path).convert('RGB')
|
input_img = Image.open(img_path).convert('RGB')
|
||||||
|
|
||||||
|
@ -214,25 +302,3 @@ def upscale(
|
||||||
|
|
||||||
image = convert_from_cv2_to_image(up_img)
|
image = convert_from_cv2_to_image(up_img)
|
||||||
image.save(output)
|
image.save(output)
|
||||||
|
|
||||||
|
|
||||||
async def download_upscaler():
|
|
||||||
print('downloading upscaler...')
|
|
||||||
weights_path = Path('weights')
|
|
||||||
weights_path.mkdir(exist_ok=True)
|
|
||||||
upscaler_url = 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth'
|
|
||||||
save_path = weights_path / 'RealESRGAN_x4plus.pth'
|
|
||||||
response = await asks.get(upscaler_url)
|
|
||||||
with open(save_path, 'wb') as f:
|
|
||||||
f.write(response.content)
|
|
||||||
print('done')
|
|
||||||
|
|
||||||
def download_all_models(hf_token: str, hf_home: str):
|
|
||||||
assert torch.cuda.is_available()
|
|
||||||
|
|
||||||
trio.run(download_upscaler)
|
|
||||||
|
|
||||||
login(token=hf_token)
|
|
||||||
for model in MODELS:
|
|
||||||
print(f'DOWNLOADING {model.upper()}')
|
|
||||||
pipeline_for(model, cache_dir=hf_home)
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from skynet.db import open_new_database
|
from skynet.config import *
|
||||||
from skynet.ipfs import AsyncIPFSHTTP
|
from skynet.ipfs import AsyncIPFSHTTP
|
||||||
from skynet.ipfs.docker import open_ipfs_node
|
from skynet.ipfs.docker import open_ipfs_node
|
||||||
from skynet.nodeos import open_nodeos
|
from skynet.nodeos import open_nodeos
|
||||||
|
@ -15,6 +15,7 @@ def ipfs_client():
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
def postgres_db():
|
def postgres_db():
|
||||||
|
from skynet.db import open_new_database
|
||||||
with open_new_database() as db_params:
|
with open_new_database() as db_params:
|
||||||
yield db_params
|
yield db_params
|
||||||
|
|
||||||
|
@ -22,3 +23,20 @@ def postgres_db():
|
||||||
def cleos():
|
def cleos():
|
||||||
with open_nodeos() as cli:
|
with open_nodeos() as cli:
|
||||||
yield cli
|
yield cli
|
||||||
|
|
||||||
|
@pytest.fixture(scope='session')
|
||||||
|
def dgpu():
|
||||||
|
from skynet.dgpu.network import SkynetGPUConnector
|
||||||
|
from skynet.dgpu.compute import SkynetMM
|
||||||
|
from skynet.dgpu.daemon import SkynetDGPUDaemon
|
||||||
|
|
||||||
|
config = load_skynet_toml(file_path='skynet.toml')
|
||||||
|
hf_token = load_key(config, 'skynet.dgpu.hf_token')
|
||||||
|
hf_home = load_key(config, 'skynet.dgpu.hf_home')
|
||||||
|
set_hf_vars(hf_token, hf_home)
|
||||||
|
config = config['skynet']['dgpu']
|
||||||
|
conn = SkynetGPUConnector(config)
|
||||||
|
mm = SkynetMM(config)
|
||||||
|
daemon = SkynetDGPUDaemon(mm, conn, config)
|
||||||
|
|
||||||
|
yield conn, mm, daemon
|
||||||
|
|
|
@ -0,0 +1,112 @@
|
||||||
|
import json
|
||||||
|
|
||||||
|
from skynet.dgpu.compute import SkynetMM
|
||||||
|
from skynet.constants import *
|
||||||
|
from skynet.config import *
|
||||||
|
|
||||||
|
|
||||||
|
async def test_diffuse(dgpu):
|
||||||
|
conn, mm, daemon = dgpu
|
||||||
|
await conn.cancel_work(0, 'testing')
|
||||||
|
|
||||||
|
daemon._snap['requests'][0] = {}
|
||||||
|
req = {
|
||||||
|
'id': 0,
|
||||||
|
'nonce': 0,
|
||||||
|
'body': json.dumps({
|
||||||
|
"method": "diffuse",
|
||||||
|
"params": {
|
||||||
|
"prompt": "Kronos God Realistic 4k",
|
||||||
|
"model": list(MODELS.keys())[-1],
|
||||||
|
"step": 21,
|
||||||
|
"width": 1024,
|
||||||
|
"height": 1024,
|
||||||
|
"seed": 168402949,
|
||||||
|
"guidance": "7.5"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
'binary_data': '',
|
||||||
|
}
|
||||||
|
|
||||||
|
await daemon.maybe_serve_one(req)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_txt2img(dgpu):
|
||||||
|
conn, mm, daemon = dgpu
|
||||||
|
await conn.cancel_work(0, 'testing')
|
||||||
|
|
||||||
|
daemon._snap['requests'][0] = {}
|
||||||
|
req = {
|
||||||
|
'id': 0,
|
||||||
|
'nonce': 0,
|
||||||
|
'body': json.dumps({
|
||||||
|
"method": "txt2img",
|
||||||
|
"params": {
|
||||||
|
"prompt": "Kronos God Realistic 4k",
|
||||||
|
"model": list(MODELS.keys())[-1],
|
||||||
|
"step": 21,
|
||||||
|
"width": 1024,
|
||||||
|
"height": 1024,
|
||||||
|
"seed": 168402949,
|
||||||
|
"guidance": "7.5"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
'binary_data': '',
|
||||||
|
}
|
||||||
|
|
||||||
|
await daemon.maybe_serve_one(req)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_img2img(dgpu):
|
||||||
|
conn, mm, daemon = dgpu
|
||||||
|
await conn.cancel_work(0, 'testing')
|
||||||
|
|
||||||
|
daemon._snap['requests'][0] = {}
|
||||||
|
req = {
|
||||||
|
'id': 0,
|
||||||
|
'nonce': 0,
|
||||||
|
'body': json.dumps({
|
||||||
|
"method": "img2img",
|
||||||
|
"params": {
|
||||||
|
"prompt": "a hindu cat god feline god on a house roof",
|
||||||
|
"model": list(MODELS.keys())[-2],
|
||||||
|
"step": 21,
|
||||||
|
"width": 1024,
|
||||||
|
"height": 1024,
|
||||||
|
"seed": 168402949,
|
||||||
|
"guidance": "7.5",
|
||||||
|
"strength": "0.5"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
'binary_data': 'QmZcGdXXVQfpco1G3tr2CGFBtv8xVsCwcwuq9gnJBWDymi',
|
||||||
|
}
|
||||||
|
|
||||||
|
await daemon.maybe_serve_one(req)
|
||||||
|
|
||||||
|
async def test_inpaint(dgpu):
|
||||||
|
conn, mm, daemon = dgpu
|
||||||
|
await conn.cancel_work(0, 'testing')
|
||||||
|
|
||||||
|
daemon._snap['requests'][0] = {}
|
||||||
|
req = {
|
||||||
|
'id': 0,
|
||||||
|
'nonce': 0,
|
||||||
|
'body': json.dumps({
|
||||||
|
"method": "inpaint",
|
||||||
|
"params": {
|
||||||
|
"prompt": "a black panther on a sunny roof",
|
||||||
|
"model": list(MODELS.keys())[-3],
|
||||||
|
"step": 21,
|
||||||
|
"width": 1024,
|
||||||
|
"height": 1024,
|
||||||
|
"seed": 168402949,
|
||||||
|
"guidance": "7.5",
|
||||||
|
"strength": "0.5"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
'binary_data':
|
||||||
|
'QmZcGdXXVQfpco1G3tr2CGFBtv8xVsCwcwuq9gnJBWDymi,' +
|
||||||
|
'Qmccx1aXNmq5mZDS3YviUhgGHXWhQeHvca3AgA7MDjj2hR'
|
||||||
|
}
|
||||||
|
|
||||||
|
await daemon.maybe_serve_one(req)
|
Loading…
Reference in New Issue