Decentralized compute layer
 
 
Go to file
Guillermo Rodriguez 08b6b983a2
First pass adding msgspec structs
Added nix-shell dev file
Switch to dev branch of py-leap for get_table row auto struct unpack
Add testing module with helpers like override global load config function
Add types for Queue V0
Rename test contract to gpu.scd to match current
Add new enqueue test
Update lock
2025-02-18 16:07:57 -03:00
docker Switch to uv package manager 2025-01-22 01:18:21 -03:00
skynet First pass adding msgspec structs 2025-02-18 16:07:57 -03:00
tests First pass adding msgspec structs 2025-02-18 16:07:57 -03:00
.dockerignore Add venv to dockerignore 2023-10-07 11:01:40 -03:00
.gitignore Switch to uv package manager 2025-01-22 01:18:21 -03:00
LICENSE Update LICENSE 2023-04-09 19:50:08 -03:00
README.md Fix minor errors, update lockfile, add cli entrypoints in pyproject.toml fix README 2025-01-09 17:04:35 -03:00
default.nix First pass adding msgspec structs 2025-02-18 16:07:57 -03:00
pyproject.toml First pass adding msgspec structs 2025-02-18 16:07:57 -03:00
pytest.ini Add test for new ipfs async apis, fix cli entrypoints endpoint loading to new format 2023-09-24 15:23:25 -03:00
skynet.toml.example Update example config 2025-02-18 15:56:46 -03:00
uv.lock First pass adding msgspec structs 2025-02-18 16:07:57 -03:00

README.md

skynet

<img src="https://explorer.skygpu.net/v2/explore/assets/logo.png" width=512 height=512>

decentralized compute platform

native install

system dependencies: - cuda 11.8 - llvm 10 - python 3.10+ - docker (for ipfs node)

# create and edit config from template
cp skynet.toml.example skynet.toml

# install poetry package manager
curl -sSL https://install.python-poetry.org | python3 -

# install
poetry install

# enable environment
poetry shell

# test you can run this command
skynet --help

# to launch worker
skynet run dgpu

dockerized install

frontend

system dependencies: - docker

# create and edit config from template
cp skynet.toml.example skynet.toml

# pull runtime container
docker pull guilledk/skynet:runtime-frontend

# run telegram bot
docker run \
    -it \
    --rm \
    --network host \
    --name skynet-telegram \
    --mount type=bind,source="$(pwd)",target=/root/target \
    guilledk/skynet:runtime-frontend \
    skynet run telegram --db-pass PASSWORD --db-user USER --db-host HOST

worker

system dependencies: - docker with gpu enabled

# create and edit config from template
cp skynet.toml.example skynet.toml

# pull runtime container
docker pull guilledk/skynet:runtime-cuda

# or build it (takes a bit of time)
./build_docker.sh

# run worker with all gpus
docker run \
    -it \
    --rm \
    --gpus all \
    --network host \
    --name skynet-worker \
    --mount type=bind,source="$(pwd)",target=/root/target \
    guilledk/skynet:runtime-cuda \
    skynet run dgpu

# run worker with specific gpu
docker run \
    -it \
    --rm \
    --gpus '"device=1"' \
    --network host \
    --name skynet-worker-1 \
    --mount type=bind,source="$(pwd)",target=/root/target \
    guilledk/skynet:runtime-cuda \
    skynet run dgpu