Compare commits
35 Commits
main
...
structural
Author | SHA1 | Date |
---|---|---|
|
efd11f7d74 | |
|
76cee99fc2 | |
|
5f50206d84 | |
|
a47a7a39b1 | |
|
bab265b2d8 | |
|
010874bed5 | |
|
ea010ab46a | |
|
be7fc89ae9 | |
|
2a9a78651b | |
|
be818a720a | |
|
ba353bf46f | |
|
9b2161506f | |
|
6b155849b7 | |
|
59c8c7bfe3 | |
|
6ac6fd56c0 | |
|
f799e9ac51 | |
|
9980bb2bd0 | |
|
8de9ab291e | |
|
1a83626f26 | |
|
6b4d08d030 | |
|
7b8b9d6805 | |
|
5afe0a0264 | |
|
eeb9a7d61b | |
|
5cee222353 | |
|
8ebb1f09de | |
|
2683a7f33a | |
|
255209f881 | |
|
9a0d529b18 | |
|
1c441b0986 | |
|
afbdb50a30 | |
|
e46033cbe7 | |
|
c932bb5911 | |
|
33482d8f41 | |
|
7ae194baed | |
|
ef7ca49e9b |
|
@ -0,0 +1,19 @@
|
||||||
|
{ pkgs ? import <nixpkgs> {} }:
|
||||||
|
let
|
||||||
|
nativeBuildInputs = with pkgs; [
|
||||||
|
stdenv.cc.cc.lib
|
||||||
|
uv
|
||||||
|
];
|
||||||
|
|
||||||
|
in
|
||||||
|
pkgs.mkShell {
|
||||||
|
inherit nativeBuildInputs;
|
||||||
|
|
||||||
|
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath nativeBuildInputs;
|
||||||
|
TMPDIR = "/tmp";
|
||||||
|
|
||||||
|
shellHook = ''
|
||||||
|
set -e
|
||||||
|
uv venv .venv --python=3.11
|
||||||
|
'';
|
||||||
|
}
|
|
@ -9,7 +9,7 @@ async def main(service_name):
|
||||||
async with tractor.open_nursery() as an:
|
async with tractor.open_nursery() as an:
|
||||||
await an.start_actor(service_name)
|
await an.start_actor(service_name)
|
||||||
|
|
||||||
async with tractor.get_registry('127.0.0.1', 1616) as portal:
|
async with tractor.get_registry() as portal:
|
||||||
print(f"Arbiter is listening on {portal.channel}")
|
print(f"Arbiter is listening on {portal.channel}")
|
||||||
|
|
||||||
async with tractor.wait_for_actor(service_name) as sockaddr:
|
async with tractor.wait_for_actor(service_name) as sockaddr:
|
||||||
|
|
|
@ -45,6 +45,7 @@ dependencies = [
|
||||||
"pdbp>=1.6,<2", # windows only (from `pdbp`)
|
"pdbp>=1.6,<2", # windows only (from `pdbp`)
|
||||||
# typed IPC msging
|
# typed IPC msging
|
||||||
"msgspec>=0.19.0",
|
"msgspec>=0.19.0",
|
||||||
|
"cffi>=1.17.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
# ------ project ------
|
# ------ project ------
|
||||||
|
|
|
@ -26,7 +26,7 @@ async def test_reg_then_unreg(reg_addr):
|
||||||
portal = await n.start_actor('actor', enable_modules=[__name__])
|
portal = await n.start_actor('actor', enable_modules=[__name__])
|
||||||
uid = portal.channel.uid
|
uid = portal.channel.uid
|
||||||
|
|
||||||
async with tractor.get_registry(*reg_addr) as aportal:
|
async with tractor.get_registry(reg_addr) as aportal:
|
||||||
# this local actor should be the arbiter
|
# this local actor should be the arbiter
|
||||||
assert actor is aportal.actor
|
assert actor is aportal.actor
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ async def spawn_and_check_registry(
|
||||||
async with tractor.open_root_actor(
|
async with tractor.open_root_actor(
|
||||||
registry_addrs=[reg_addr],
|
registry_addrs=[reg_addr],
|
||||||
):
|
):
|
||||||
async with tractor.get_registry(*reg_addr) as portal:
|
async with tractor.get_registry(reg_addr) as portal:
|
||||||
# runtime needs to be up to call this
|
# runtime needs to be up to call this
|
||||||
actor = tractor.current_actor()
|
actor = tractor.current_actor()
|
||||||
|
|
||||||
|
@ -300,7 +300,7 @@ async def close_chans_before_nursery(
|
||||||
async with tractor.open_root_actor(
|
async with tractor.open_root_actor(
|
||||||
registry_addrs=[reg_addr],
|
registry_addrs=[reg_addr],
|
||||||
):
|
):
|
||||||
async with tractor.get_registry(*reg_addr) as aportal:
|
async with tractor.get_registry(reg_addr) as aportal:
|
||||||
try:
|
try:
|
||||||
get_reg = partial(unpack_reg, aportal)
|
get_reg = partial(unpack_reg, aportal)
|
||||||
|
|
||||||
|
|
|
@ -66,6 +66,9 @@ def run_example_in_subproc(
|
||||||
# due to backpressure!!!
|
# due to backpressure!!!
|
||||||
proc = testdir.popen(
|
proc = testdir.popen(
|
||||||
cmdargs,
|
cmdargs,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
assert not proc.returncode
|
assert not proc.returncode
|
||||||
|
@ -119,10 +122,14 @@ def test_example(
|
||||||
code = ex.read()
|
code = ex.read()
|
||||||
|
|
||||||
with run_example_in_subproc(code) as proc:
|
with run_example_in_subproc(code) as proc:
|
||||||
proc.wait()
|
err = None
|
||||||
err, _ = proc.stderr.read(), proc.stdout.read()
|
try:
|
||||||
# print(f'STDERR: {err}')
|
if not proc.poll():
|
||||||
# print(f'STDOUT: {out}')
|
_, err = proc.communicate(timeout=15)
|
||||||
|
|
||||||
|
except subprocess.TimeoutExpired as e:
|
||||||
|
proc.kill()
|
||||||
|
err = e.stderr
|
||||||
|
|
||||||
# if we get some gnarly output let's aggregate and raise
|
# if we get some gnarly output let's aggregate and raise
|
||||||
if err:
|
if err:
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
import trio
|
||||||
|
import pytest
|
||||||
|
from tractor.ipc import (
|
||||||
|
open_eventfd,
|
||||||
|
EFDReadCancelled,
|
||||||
|
EventFD
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_eventfd_read_cancellation():
|
||||||
|
'''
|
||||||
|
Ensure EventFD.read raises EFDReadCancelled if EventFD.close()
|
||||||
|
is called.
|
||||||
|
|
||||||
|
'''
|
||||||
|
fd = open_eventfd()
|
||||||
|
|
||||||
|
async def _read(event: EventFD):
|
||||||
|
with pytest.raises(EFDReadCancelled):
|
||||||
|
await event.read()
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
async with trio.open_nursery() as n:
|
||||||
|
with (
|
||||||
|
EventFD(fd, 'w') as event,
|
||||||
|
trio.fail_after(3)
|
||||||
|
):
|
||||||
|
n.start_soon(_read, event)
|
||||||
|
await trio.sleep(0.2)
|
||||||
|
event.close()
|
||||||
|
|
||||||
|
trio.run(main)
|
|
@ -871,7 +871,7 @@ async def serve_subactors(
|
||||||
)
|
)
|
||||||
await ipc.send((
|
await ipc.send((
|
||||||
peer.chan.uid,
|
peer.chan.uid,
|
||||||
peer.chan.raddr,
|
peer.chan.raddr.unwrap(),
|
||||||
))
|
))
|
||||||
|
|
||||||
print('Spawner exiting spawn serve loop!')
|
print('Spawner exiting spawn serve loop!')
|
||||||
|
|
|
@ -38,7 +38,7 @@ async def test_self_is_registered_localportal(reg_addr):
|
||||||
"Verify waiting on the arbiter to register itself using a local portal."
|
"Verify waiting on the arbiter to register itself using a local portal."
|
||||||
actor = tractor.current_actor()
|
actor = tractor.current_actor()
|
||||||
assert actor.is_arbiter
|
assert actor.is_arbiter
|
||||||
async with tractor.get_registry(*reg_addr) as portal:
|
async with tractor.get_registry(reg_addr) as portal:
|
||||||
assert isinstance(portal, tractor._portal.LocalPortal)
|
assert isinstance(portal, tractor._portal.LocalPortal)
|
||||||
|
|
||||||
with trio.fail_after(0.2):
|
with trio.fail_after(0.2):
|
||||||
|
|
|
@ -32,7 +32,7 @@ def test_abort_on_sigint(daemon):
|
||||||
@tractor_test
|
@tractor_test
|
||||||
async def test_cancel_remote_arbiter(daemon, reg_addr):
|
async def test_cancel_remote_arbiter(daemon, reg_addr):
|
||||||
assert not tractor.current_actor().is_arbiter
|
assert not tractor.current_actor().is_arbiter
|
||||||
async with tractor.get_registry(*reg_addr) as portal:
|
async with tractor.get_registry(reg_addr) as portal:
|
||||||
await portal.cancel_actor()
|
await portal.cancel_actor()
|
||||||
|
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
@ -41,7 +41,7 @@ async def test_cancel_remote_arbiter(daemon, reg_addr):
|
||||||
|
|
||||||
# no arbiter socket should exist
|
# no arbiter socket should exist
|
||||||
with pytest.raises(OSError):
|
with pytest.raises(OSError):
|
||||||
async with tractor.get_registry(*reg_addr) as portal:
|
async with tractor.get_registry(reg_addr) as portal:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,423 @@
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
import trio
|
||||||
|
import pytest
|
||||||
|
import tractor
|
||||||
|
from tractor.ipc import (
|
||||||
|
open_ringbuf,
|
||||||
|
attach_to_ringbuf_receiver,
|
||||||
|
attach_to_ringbuf_sender,
|
||||||
|
attach_to_ringbuf_stream,
|
||||||
|
attach_to_ringbuf_channel,
|
||||||
|
RBToken,
|
||||||
|
)
|
||||||
|
from tractor._testing.samples import (
|
||||||
|
generate_single_byte_msgs,
|
||||||
|
generate_sample_messages
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@tractor.context
|
||||||
|
async def child_read_shm(
|
||||||
|
ctx: tractor.Context,
|
||||||
|
msg_amount: int,
|
||||||
|
token: RBToken,
|
||||||
|
) -> str:
|
||||||
|
'''
|
||||||
|
Sub-actor used in `test_ringbuf`.
|
||||||
|
|
||||||
|
Attach to a ringbuf and receive all messages until end of stream.
|
||||||
|
Keep track of how many bytes received and also calculate
|
||||||
|
sha256 of the whole byte stream.
|
||||||
|
|
||||||
|
Calculate and print performance stats, finally return calculated
|
||||||
|
hash.
|
||||||
|
|
||||||
|
'''
|
||||||
|
await ctx.started()
|
||||||
|
print('reader started')
|
||||||
|
recvd_bytes = 0
|
||||||
|
recvd_hash = hashlib.sha256()
|
||||||
|
start_ts = time.time()
|
||||||
|
async with attach_to_ringbuf_receiver(token) as receiver:
|
||||||
|
async for msg in receiver:
|
||||||
|
recvd_hash.update(msg)
|
||||||
|
recvd_bytes += len(msg)
|
||||||
|
|
||||||
|
end_ts = time.time()
|
||||||
|
elapsed = end_ts - start_ts
|
||||||
|
elapsed_ms = int(elapsed * 1000)
|
||||||
|
|
||||||
|
print(f'\n\telapsed ms: {elapsed_ms}')
|
||||||
|
print(f'\tmsg/sec: {int(msg_amount / elapsed):,}')
|
||||||
|
print(f'\tbytes/sec: {int(recvd_bytes / elapsed):,}')
|
||||||
|
print(f'\treceived bytes: {recvd_bytes:,}')
|
||||||
|
|
||||||
|
return recvd_hash.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
@tractor.context
|
||||||
|
async def child_write_shm(
|
||||||
|
ctx: tractor.Context,
|
||||||
|
msg_amount: int,
|
||||||
|
rand_min: int,
|
||||||
|
rand_max: int,
|
||||||
|
token: RBToken,
|
||||||
|
) -> None:
|
||||||
|
'''
|
||||||
|
Sub-actor used in `test_ringbuf`
|
||||||
|
|
||||||
|
Generate `msg_amount` payloads with
|
||||||
|
`random.randint(rand_min, rand_max)` random bytes at the end,
|
||||||
|
Calculate sha256 hash and send it to parent on `ctx.started`.
|
||||||
|
|
||||||
|
Attach to ringbuf and send all generated messages.
|
||||||
|
|
||||||
|
'''
|
||||||
|
msgs, _total_bytes = generate_sample_messages(
|
||||||
|
msg_amount,
|
||||||
|
rand_min=rand_min,
|
||||||
|
rand_max=rand_max,
|
||||||
|
)
|
||||||
|
print('writer hashing payload...')
|
||||||
|
sent_hash = hashlib.sha256(b''.join(msgs)).hexdigest()
|
||||||
|
print('writer done hashing.')
|
||||||
|
await ctx.started(sent_hash)
|
||||||
|
print('writer started')
|
||||||
|
async with attach_to_ringbuf_sender(token, cleanup=False) as sender:
|
||||||
|
for msg in msgs:
|
||||||
|
await sender.send_all(msg)
|
||||||
|
|
||||||
|
print('writer exit')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'msg_amount,rand_min,rand_max,buf_size',
|
||||||
|
[
|
||||||
|
# simple case, fixed payloads, large buffer
|
||||||
|
(100_000, 0, 0, 10 * 1024),
|
||||||
|
|
||||||
|
# guaranteed wrap around on every write
|
||||||
|
(100, 10 * 1024, 20 * 1024, 10 * 1024),
|
||||||
|
|
||||||
|
# large payload size, but large buffer
|
||||||
|
(10_000, 256 * 1024, 512 * 1024, 10 * 1024 * 1024)
|
||||||
|
],
|
||||||
|
ids=[
|
||||||
|
'fixed_payloads_large_buffer',
|
||||||
|
'wrap_around_every_write',
|
||||||
|
'large_payloads_large_buffer',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_ringbuf(
|
||||||
|
msg_amount: int,
|
||||||
|
rand_min: int,
|
||||||
|
rand_max: int,
|
||||||
|
buf_size: int
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
- Open a new ring buf on root actor
|
||||||
|
- Open `child_write_shm` ctx in sub-actor which will generate a
|
||||||
|
random payload and send its hash on `ctx.started`, finally sending
|
||||||
|
the payload through the stream.
|
||||||
|
- Open `child_read_shm` ctx in sub-actor which will receive the
|
||||||
|
payload, calculate perf stats and return the hash.
|
||||||
|
- Compare both hashes
|
||||||
|
|
||||||
|
'''
|
||||||
|
async def main():
|
||||||
|
with open_ringbuf(
|
||||||
|
'test_ringbuf',
|
||||||
|
buf_size=buf_size
|
||||||
|
) as token:
|
||||||
|
proc_kwargs = {'pass_fds': token.fds}
|
||||||
|
|
||||||
|
async with tractor.open_nursery() as an:
|
||||||
|
send_p = await an.start_actor(
|
||||||
|
'ring_sender',
|
||||||
|
enable_modules=[__name__],
|
||||||
|
proc_kwargs=proc_kwargs
|
||||||
|
)
|
||||||
|
recv_p = await an.start_actor(
|
||||||
|
'ring_receiver',
|
||||||
|
enable_modules=[__name__],
|
||||||
|
proc_kwargs=proc_kwargs
|
||||||
|
)
|
||||||
|
async with (
|
||||||
|
send_p.open_context(
|
||||||
|
child_write_shm,
|
||||||
|
token=token,
|
||||||
|
msg_amount=msg_amount,
|
||||||
|
rand_min=rand_min,
|
||||||
|
rand_max=rand_max,
|
||||||
|
) as (_sctx, sent_hash),
|
||||||
|
recv_p.open_context(
|
||||||
|
child_read_shm,
|
||||||
|
token=token,
|
||||||
|
msg_amount=msg_amount
|
||||||
|
) as (rctx, _sent),
|
||||||
|
):
|
||||||
|
recvd_hash = await rctx.result()
|
||||||
|
|
||||||
|
assert sent_hash == recvd_hash
|
||||||
|
|
||||||
|
await send_p.cancel_actor()
|
||||||
|
await recv_p.cancel_actor()
|
||||||
|
|
||||||
|
|
||||||
|
trio.run(main)
|
||||||
|
|
||||||
|
|
||||||
|
@tractor.context
|
||||||
|
async def child_blocked_receiver(
|
||||||
|
ctx: tractor.Context,
|
||||||
|
token: RBToken
|
||||||
|
):
|
||||||
|
async with attach_to_ringbuf_receiver(token) as receiver:
|
||||||
|
await ctx.started()
|
||||||
|
await receiver.receive_some()
|
||||||
|
|
||||||
|
|
||||||
|
def test_reader_cancel():
|
||||||
|
'''
|
||||||
|
Test that a receiver blocked on eventfd(2) read responds to
|
||||||
|
cancellation.
|
||||||
|
|
||||||
|
'''
|
||||||
|
async def main():
|
||||||
|
with open_ringbuf('test_ring_cancel_reader') as token:
|
||||||
|
async with (
|
||||||
|
tractor.open_nursery() as an,
|
||||||
|
attach_to_ringbuf_sender(token) as _sender,
|
||||||
|
):
|
||||||
|
recv_p = await an.start_actor(
|
||||||
|
'ring_blocked_receiver',
|
||||||
|
enable_modules=[__name__],
|
||||||
|
proc_kwargs={
|
||||||
|
'pass_fds': token.fds
|
||||||
|
}
|
||||||
|
)
|
||||||
|
async with (
|
||||||
|
recv_p.open_context(
|
||||||
|
child_blocked_receiver,
|
||||||
|
token=token
|
||||||
|
) as (sctx, _sent),
|
||||||
|
):
|
||||||
|
await trio.sleep(1)
|
||||||
|
await an.cancel()
|
||||||
|
|
||||||
|
|
||||||
|
with pytest.raises(tractor._exceptions.ContextCancelled):
|
||||||
|
trio.run(main)
|
||||||
|
|
||||||
|
|
||||||
|
@tractor.context
|
||||||
|
async def child_blocked_sender(
|
||||||
|
ctx: tractor.Context,
|
||||||
|
token: RBToken
|
||||||
|
):
|
||||||
|
async with attach_to_ringbuf_sender(token) as sender:
|
||||||
|
await ctx.started()
|
||||||
|
await sender.send_all(b'this will wrap')
|
||||||
|
|
||||||
|
|
||||||
|
def test_sender_cancel():
|
||||||
|
'''
|
||||||
|
Test that a sender blocked on eventfd(2) read responds to
|
||||||
|
cancellation.
|
||||||
|
|
||||||
|
'''
|
||||||
|
async def main():
|
||||||
|
with open_ringbuf(
|
||||||
|
'test_ring_cancel_sender',
|
||||||
|
buf_size=1
|
||||||
|
) as token:
|
||||||
|
async with tractor.open_nursery() as an:
|
||||||
|
recv_p = await an.start_actor(
|
||||||
|
'ring_blocked_sender',
|
||||||
|
enable_modules=[__name__],
|
||||||
|
proc_kwargs={
|
||||||
|
'pass_fds': token.fds
|
||||||
|
}
|
||||||
|
)
|
||||||
|
async with (
|
||||||
|
recv_p.open_context(
|
||||||
|
child_blocked_sender,
|
||||||
|
token=token
|
||||||
|
) as (sctx, _sent),
|
||||||
|
):
|
||||||
|
await trio.sleep(1)
|
||||||
|
await an.cancel()
|
||||||
|
|
||||||
|
|
||||||
|
with pytest.raises(tractor._exceptions.ContextCancelled):
|
||||||
|
trio.run(main)
|
||||||
|
|
||||||
|
|
||||||
|
def test_receiver_max_bytes():
|
||||||
|
'''
|
||||||
|
Test that RingBuffReceiver.receive_some's max_bytes optional
|
||||||
|
argument works correctly, send a msg of size 100, then
|
||||||
|
force receive of messages with max_bytes == 1, wait until
|
||||||
|
100 of these messages are received, then compare join of
|
||||||
|
msgs with original message
|
||||||
|
|
||||||
|
'''
|
||||||
|
msg = generate_single_byte_msgs(100)
|
||||||
|
msgs = []
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
with open_ringbuf(
|
||||||
|
'test_ringbuf_max_bytes',
|
||||||
|
buf_size=10
|
||||||
|
) as token:
|
||||||
|
async with (
|
||||||
|
trio.open_nursery() as n,
|
||||||
|
attach_to_ringbuf_sender(token, cleanup=False) as sender,
|
||||||
|
attach_to_ringbuf_receiver(token, cleanup=False) as receiver
|
||||||
|
):
|
||||||
|
async def _send_and_close():
|
||||||
|
await sender.send_all(msg)
|
||||||
|
await sender.aclose()
|
||||||
|
|
||||||
|
n.start_soon(_send_and_close)
|
||||||
|
while len(msgs) < len(msg):
|
||||||
|
msg_part = await receiver.receive_some(max_bytes=1)
|
||||||
|
assert len(msg_part) == 1
|
||||||
|
msgs.append(msg_part)
|
||||||
|
|
||||||
|
trio.run(main)
|
||||||
|
assert msg == b''.join(msgs)
|
||||||
|
|
||||||
|
|
||||||
|
def test_stapled_ringbuf():
|
||||||
|
'''
|
||||||
|
Open two ringbufs and give tokens to tasks (swap them such that in/out tokens
|
||||||
|
are inversed on each task) which will open the streams and use trio.StapledStream
|
||||||
|
to have a single bidirectional stream.
|
||||||
|
|
||||||
|
Then take turns to send and receive messages.
|
||||||
|
|
||||||
|
'''
|
||||||
|
msg = generate_single_byte_msgs(100)
|
||||||
|
pair_0_msgs = []
|
||||||
|
pair_1_msgs = []
|
||||||
|
|
||||||
|
pair_0_done = trio.Event()
|
||||||
|
pair_1_done = trio.Event()
|
||||||
|
|
||||||
|
async def pair_0(token_in: RBToken, token_out: RBToken):
|
||||||
|
async with attach_to_ringbuf_stream(
|
||||||
|
token_in,
|
||||||
|
token_out,
|
||||||
|
cleanup_in=False,
|
||||||
|
cleanup_out=False
|
||||||
|
) as stream:
|
||||||
|
# first turn to send
|
||||||
|
await stream.send_all(msg)
|
||||||
|
|
||||||
|
# second turn to receive
|
||||||
|
while len(pair_0_msgs) != len(msg):
|
||||||
|
_msg = await stream.receive_some(max_bytes=1)
|
||||||
|
pair_0_msgs.append(_msg)
|
||||||
|
|
||||||
|
pair_0_done.set()
|
||||||
|
await pair_1_done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
async def pair_1(token_in: RBToken, token_out: RBToken):
|
||||||
|
async with attach_to_ringbuf_stream(
|
||||||
|
token_in,
|
||||||
|
token_out,
|
||||||
|
cleanup_in=False,
|
||||||
|
cleanup_out=False
|
||||||
|
) as stream:
|
||||||
|
# first turn to receive
|
||||||
|
while len(pair_1_msgs) != len(msg):
|
||||||
|
_msg = await stream.receive_some(max_bytes=1)
|
||||||
|
pair_1_msgs.append(_msg)
|
||||||
|
|
||||||
|
# second turn to send
|
||||||
|
await stream.send_all(msg)
|
||||||
|
|
||||||
|
pair_1_done.set()
|
||||||
|
await pair_0_done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
with tractor.ipc.open_ringbuf_pair(
|
||||||
|
'test_stapled_ringbuf'
|
||||||
|
) as (token_0, token_1):
|
||||||
|
async with trio.open_nursery() as n:
|
||||||
|
n.start_soon(pair_0, token_0, token_1)
|
||||||
|
n.start_soon(pair_1, token_1, token_0)
|
||||||
|
|
||||||
|
|
||||||
|
trio.run(main)
|
||||||
|
|
||||||
|
assert msg == b''.join(pair_0_msgs)
|
||||||
|
assert msg == b''.join(pair_1_msgs)
|
||||||
|
|
||||||
|
|
||||||
|
@tractor.context
|
||||||
|
async def child_channel_sender(
|
||||||
|
ctx: tractor.Context,
|
||||||
|
msg_amount_min: int,
|
||||||
|
msg_amount_max: int,
|
||||||
|
token_in: RBToken,
|
||||||
|
token_out: RBToken
|
||||||
|
):
|
||||||
|
import random
|
||||||
|
msgs, _total_bytes = generate_sample_messages(
|
||||||
|
random.randint(msg_amount_min, msg_amount_max),
|
||||||
|
rand_min=256,
|
||||||
|
rand_max=1024,
|
||||||
|
)
|
||||||
|
async with attach_to_ringbuf_channel(
|
||||||
|
token_in,
|
||||||
|
token_out
|
||||||
|
) as chan:
|
||||||
|
await ctx.started(msgs)
|
||||||
|
|
||||||
|
for msg in msgs:
|
||||||
|
await chan.send(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def test_channel():
|
||||||
|
|
||||||
|
msg_amount_min = 100
|
||||||
|
msg_amount_max = 1000
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
with tractor.ipc.open_ringbuf_pair(
|
||||||
|
'test_ringbuf_transport'
|
||||||
|
) as (token_0, token_1):
|
||||||
|
async with (
|
||||||
|
attach_to_ringbuf_channel(token_0, token_1) as chan,
|
||||||
|
tractor.open_nursery() as an
|
||||||
|
):
|
||||||
|
recv_p = await an.start_actor(
|
||||||
|
'test_ringbuf_transport_sender',
|
||||||
|
enable_modules=[__name__],
|
||||||
|
proc_kwargs={
|
||||||
|
'pass_fds': token_0.fds + token_1.fds
|
||||||
|
}
|
||||||
|
)
|
||||||
|
async with (
|
||||||
|
recv_p.open_context(
|
||||||
|
child_channel_sender,
|
||||||
|
msg_amount_min=msg_amount_min,
|
||||||
|
msg_amount_max=msg_amount_max,
|
||||||
|
token_in=token_1,
|
||||||
|
token_out=token_0
|
||||||
|
) as (ctx, msgs),
|
||||||
|
):
|
||||||
|
recv_msgs = []
|
||||||
|
async for msg in chan:
|
||||||
|
recv_msgs.append(msg)
|
||||||
|
|
||||||
|
await recv_p.cancel_actor()
|
||||||
|
assert recv_msgs == msgs
|
||||||
|
|
||||||
|
trio.run(main)
|
|
@ -0,0 +1,167 @@
|
||||||
|
"""
|
||||||
|
Shared mem primitives and APIs.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
# import numpy
|
||||||
|
import pytest
|
||||||
|
import trio
|
||||||
|
import tractor
|
||||||
|
from tractor.ipc._shm import (
|
||||||
|
open_shm_list,
|
||||||
|
attach_shm_list,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@tractor.context
|
||||||
|
async def child_attach_shml_alot(
|
||||||
|
ctx: tractor.Context,
|
||||||
|
shm_key: str,
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
await ctx.started(shm_key)
|
||||||
|
|
||||||
|
# now try to attach a boatload of times in a loop..
|
||||||
|
for _ in range(1000):
|
||||||
|
shml = attach_shm_list(
|
||||||
|
key=shm_key,
|
||||||
|
readonly=False,
|
||||||
|
)
|
||||||
|
assert shml.shm.name == shm_key
|
||||||
|
await trio.sleep(0.001)
|
||||||
|
|
||||||
|
|
||||||
|
def test_child_attaches_alot():
|
||||||
|
async def main():
|
||||||
|
async with tractor.open_nursery() as an:
|
||||||
|
|
||||||
|
# allocate writeable list in parent
|
||||||
|
key = f'shml_{uuid.uuid4()}'
|
||||||
|
shml = open_shm_list(
|
||||||
|
key=key,
|
||||||
|
)
|
||||||
|
|
||||||
|
portal = await an.start_actor(
|
||||||
|
'shm_attacher',
|
||||||
|
enable_modules=[__name__],
|
||||||
|
)
|
||||||
|
|
||||||
|
async with (
|
||||||
|
portal.open_context(
|
||||||
|
child_attach_shml_alot,
|
||||||
|
shm_key=shml.key,
|
||||||
|
) as (ctx, start_val),
|
||||||
|
):
|
||||||
|
assert start_val == key
|
||||||
|
await ctx.result()
|
||||||
|
|
||||||
|
await portal.cancel_actor()
|
||||||
|
|
||||||
|
trio.run(main)
|
||||||
|
|
||||||
|
|
||||||
|
@tractor.context
|
||||||
|
async def child_read_shm_list(
|
||||||
|
ctx: tractor.Context,
|
||||||
|
shm_key: str,
|
||||||
|
use_str: bool,
|
||||||
|
frame_size: int,
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
# attach in child
|
||||||
|
shml = attach_shm_list(
|
||||||
|
key=shm_key,
|
||||||
|
# dtype=str if use_str else float,
|
||||||
|
)
|
||||||
|
await ctx.started(shml.key)
|
||||||
|
|
||||||
|
async with ctx.open_stream() as stream:
|
||||||
|
async for i in stream:
|
||||||
|
print(f'(child): reading shm list index: {i}')
|
||||||
|
|
||||||
|
if use_str:
|
||||||
|
expect = str(float(i))
|
||||||
|
else:
|
||||||
|
expect = float(i)
|
||||||
|
|
||||||
|
if frame_size == 1:
|
||||||
|
val = shml[i]
|
||||||
|
assert expect == val
|
||||||
|
print(f'(child): reading value: {val}')
|
||||||
|
else:
|
||||||
|
frame = shml[i - frame_size:i]
|
||||||
|
print(f'(child): reading frame: {frame}')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'use_str',
|
||||||
|
[False, True],
|
||||||
|
ids=lambda i: f'use_str_values={i}',
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'frame_size',
|
||||||
|
[1, 2**6, 2**10],
|
||||||
|
ids=lambda i: f'frame_size={i}',
|
||||||
|
)
|
||||||
|
def test_parent_writer_child_reader(
|
||||||
|
use_str: bool,
|
||||||
|
frame_size: int,
|
||||||
|
):
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
async with tractor.open_nursery(
|
||||||
|
# debug_mode=True,
|
||||||
|
) as an:
|
||||||
|
|
||||||
|
portal = await an.start_actor(
|
||||||
|
'shm_reader',
|
||||||
|
enable_modules=[__name__],
|
||||||
|
debug_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# allocate writeable list in parent
|
||||||
|
key = 'shm_list'
|
||||||
|
seq_size = int(2 * 2 ** 10)
|
||||||
|
shml = open_shm_list(
|
||||||
|
key=key,
|
||||||
|
size=seq_size,
|
||||||
|
dtype=str if use_str else float,
|
||||||
|
readonly=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
async with (
|
||||||
|
portal.open_context(
|
||||||
|
child_read_shm_list,
|
||||||
|
shm_key=key,
|
||||||
|
use_str=use_str,
|
||||||
|
frame_size=frame_size,
|
||||||
|
) as (ctx, sent),
|
||||||
|
|
||||||
|
ctx.open_stream() as stream,
|
||||||
|
):
|
||||||
|
|
||||||
|
assert sent == key
|
||||||
|
|
||||||
|
for i in range(seq_size):
|
||||||
|
|
||||||
|
val = float(i)
|
||||||
|
if use_str:
|
||||||
|
val = str(val)
|
||||||
|
|
||||||
|
# print(f'(parent): writing {val}')
|
||||||
|
shml[i] = val
|
||||||
|
|
||||||
|
# only on frame fills do we
|
||||||
|
# signal to the child that a frame's
|
||||||
|
# worth is ready.
|
||||||
|
if (i % frame_size) == 0:
|
||||||
|
print(f'(parent): signalling frame full on {val}')
|
||||||
|
await stream.send(i)
|
||||||
|
else:
|
||||||
|
print(f'(parent): signalling final frame on {val}')
|
||||||
|
await stream.send(i)
|
||||||
|
|
||||||
|
await portal.cancel_actor()
|
||||||
|
|
||||||
|
trio.run(main)
|
|
@ -77,7 +77,7 @@ async def movie_theatre_question():
|
||||||
async def test_movie_theatre_convo(start_method):
|
async def test_movie_theatre_convo(start_method):
|
||||||
"""The main ``tractor`` routine.
|
"""The main ``tractor`` routine.
|
||||||
"""
|
"""
|
||||||
async with tractor.open_nursery() as n:
|
async with tractor.open_nursery(debug_mode=True) as n:
|
||||||
|
|
||||||
portal = await n.start_actor(
|
portal = await n.start_actor(
|
||||||
'frank',
|
'frank',
|
||||||
|
|
|
@ -64,7 +64,7 @@ from ._root import (
|
||||||
run_daemon as run_daemon,
|
run_daemon as run_daemon,
|
||||||
open_root_actor as open_root_actor,
|
open_root_actor as open_root_actor,
|
||||||
)
|
)
|
||||||
from ._ipc import Channel as Channel
|
from .ipc import Channel as Channel
|
||||||
from ._portal import Portal as Portal
|
from ._portal import Portal as Portal
|
||||||
from ._runtime import Actor as Actor
|
from ._runtime import Actor as Actor
|
||||||
# from . import hilevel as hilevel
|
# from . import hilevel as hilevel
|
||||||
|
|
|
@ -0,0 +1,310 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
from __future__ import annotations
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
from uuid import uuid4
|
||||||
|
from typing import (
|
||||||
|
Protocol,
|
||||||
|
ClassVar,
|
||||||
|
TypeVar,
|
||||||
|
Union,
|
||||||
|
Type
|
||||||
|
)
|
||||||
|
|
||||||
|
import trio
|
||||||
|
from trio import socket
|
||||||
|
|
||||||
|
|
||||||
|
NamespaceType = TypeVar('NamespaceType')
|
||||||
|
AddressType = TypeVar('AddressType')
|
||||||
|
StreamType = TypeVar('StreamType')
|
||||||
|
ListenerType = TypeVar('ListenerType')
|
||||||
|
|
||||||
|
|
||||||
|
class Address(Protocol[
|
||||||
|
NamespaceType,
|
||||||
|
AddressType,
|
||||||
|
StreamType,
|
||||||
|
ListenerType
|
||||||
|
]):
|
||||||
|
|
||||||
|
name_key: ClassVar[str]
|
||||||
|
address_type: ClassVar[Type[AddressType]]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_valid(self) -> bool:
|
||||||
|
...
|
||||||
|
|
||||||
|
@property
|
||||||
|
def namespace(self) -> NamespaceType|None:
|
||||||
|
...
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_addr(cls, addr: AddressType) -> Address:
|
||||||
|
...
|
||||||
|
|
||||||
|
def unwrap(self) -> AddressType:
|
||||||
|
...
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_random(cls, namespace: NamespaceType | None = None) -> Address:
|
||||||
|
...
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_root(cls) -> Address:
|
||||||
|
...
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
...
|
||||||
|
|
||||||
|
def __eq__(self, other) -> bool:
|
||||||
|
...
|
||||||
|
|
||||||
|
async def open_stream(self, **kwargs) -> StreamType:
|
||||||
|
...
|
||||||
|
|
||||||
|
async def open_listener(self, **kwargs) -> ListenerType:
|
||||||
|
...
|
||||||
|
|
||||||
|
async def close_listener(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
class TCPAddress(Address[
|
||||||
|
str,
|
||||||
|
tuple[str, int],
|
||||||
|
trio.SocketStream,
|
||||||
|
trio.SocketListener
|
||||||
|
]):
|
||||||
|
|
||||||
|
name_key: str = 'tcp'
|
||||||
|
address_type: type = tuple[str, int]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
host: str,
|
||||||
|
port: int
|
||||||
|
):
|
||||||
|
if (
|
||||||
|
not isinstance(host, str)
|
||||||
|
or
|
||||||
|
not isinstance(port, int)
|
||||||
|
):
|
||||||
|
raise TypeError(f'Expected host {host} to be str and port {port} to be int')
|
||||||
|
self._host = host
|
||||||
|
self._port = port
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_valid(self) -> bool:
|
||||||
|
return self._port != 0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def namespace(self) -> str:
|
||||||
|
return self._host
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_addr(cls, addr: tuple[str, int]) -> TCPAddress:
|
||||||
|
return TCPAddress(addr[0], addr[1])
|
||||||
|
|
||||||
|
def unwrap(self) -> tuple[str, int]:
|
||||||
|
return self._host, self._port
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_random(cls, namespace: str = '127.0.0.1') -> TCPAddress:
|
||||||
|
return TCPAddress(namespace, 0)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_root(cls) -> Address:
|
||||||
|
return TCPAddress('127.0.0.1', 1616)
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f'{type(self)} @ {self.unwrap()}'
|
||||||
|
|
||||||
|
def __eq__(self, other) -> bool:
|
||||||
|
if not isinstance(other, TCPAddress):
|
||||||
|
raise TypeError(
|
||||||
|
f'Can not compare {type(other)} with {type(self)}'
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
self._host == other._host
|
||||||
|
and
|
||||||
|
self._port == other._port
|
||||||
|
)
|
||||||
|
|
||||||
|
async def open_stream(self, **kwargs) -> trio.SocketStream:
|
||||||
|
stream = await trio.open_tcp_stream(
|
||||||
|
self._host,
|
||||||
|
self._port,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
self._host, self._port = stream.socket.getsockname()[:2]
|
||||||
|
return stream
|
||||||
|
|
||||||
|
async def open_listener(self, **kwargs) -> trio.SocketListener:
|
||||||
|
listeners = await trio.open_tcp_listeners(
|
||||||
|
host=self._host,
|
||||||
|
port=self._port,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
assert len(listeners) == 1
|
||||||
|
listener = listeners[0]
|
||||||
|
self._host, self._port = listener.socket.getsockname()[:2]
|
||||||
|
return listener
|
||||||
|
|
||||||
|
async def close_listener(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
class UDSAddress(Address[
|
||||||
|
None,
|
||||||
|
str,
|
||||||
|
trio.SocketStream,
|
||||||
|
trio.SocketListener
|
||||||
|
]):
|
||||||
|
|
||||||
|
name_key: str = 'uds'
|
||||||
|
address_type: type = str
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
filepath: str
|
||||||
|
):
|
||||||
|
self._filepath = filepath
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_valid(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def namespace(self) -> None:
|
||||||
|
return
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_addr(cls, filepath: str) -> UDSAddress:
|
||||||
|
return UDSAddress(filepath)
|
||||||
|
|
||||||
|
def unwrap(self) -> str:
|
||||||
|
return self._filepath
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_random(cls, namespace: None = None) -> UDSAddress:
|
||||||
|
return UDSAddress(f'{tempfile.gettempdir()}/{uuid4()}.sock')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_root(cls) -> Address:
|
||||||
|
return UDSAddress('tractor.sock')
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f'{type(self)} @ {self._filepath}'
|
||||||
|
|
||||||
|
def __eq__(self, other) -> bool:
|
||||||
|
if not isinstance(other, UDSAddress):
|
||||||
|
raise TypeError(
|
||||||
|
f'Can not compare {type(other)} with {type(self)}'
|
||||||
|
)
|
||||||
|
|
||||||
|
return self._filepath == other._filepath
|
||||||
|
|
||||||
|
async def open_stream(self, **kwargs) -> trio.SocketStream:
|
||||||
|
stream = await trio.open_unix_socket(
|
||||||
|
self._filepath,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
return stream
|
||||||
|
|
||||||
|
async def open_listener(self, **kwargs) -> trio.SocketListener:
|
||||||
|
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||||
|
await self._sock.bind(self._filepath)
|
||||||
|
self._sock.listen(1)
|
||||||
|
return trio.SocketListener(self._sock)
|
||||||
|
|
||||||
|
async def close_listener(self):
|
||||||
|
self._sock.close()
|
||||||
|
os.unlink(self._filepath)
|
||||||
|
|
||||||
|
|
||||||
|
preferred_transport = 'uds'
|
||||||
|
|
||||||
|
|
||||||
|
_address_types = (
|
||||||
|
TCPAddress,
|
||||||
|
UDSAddress
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_default_addrs: dict[str, Type[Address]] = {
|
||||||
|
cls.name_key: cls
|
||||||
|
for cls in _address_types
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
AddressTypes = Union[
|
||||||
|
tuple([
|
||||||
|
cls.address_type
|
||||||
|
for cls in _address_types
|
||||||
|
])
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
_default_lo_addrs: dict[
|
||||||
|
str,
|
||||||
|
AddressTypes
|
||||||
|
] = {
|
||||||
|
cls.name_key: cls.get_root().unwrap()
|
||||||
|
for cls in _address_types
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_address_cls(name: str) -> Type[Address]:
|
||||||
|
return _default_addrs[name]
|
||||||
|
|
||||||
|
|
||||||
|
def is_wrapped_addr(addr: any) -> bool:
|
||||||
|
return type(addr) in _address_types
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_address(addr: AddressTypes) -> Address:
|
||||||
|
|
||||||
|
if is_wrapped_addr(addr):
|
||||||
|
return addr
|
||||||
|
|
||||||
|
cls = None
|
||||||
|
match addr:
|
||||||
|
case str():
|
||||||
|
cls = UDSAddress
|
||||||
|
|
||||||
|
case tuple() | list():
|
||||||
|
cls = TCPAddress
|
||||||
|
|
||||||
|
case None:
|
||||||
|
cls = get_address_cls(preferred_transport)
|
||||||
|
addr = cls.get_root().unwrap()
|
||||||
|
|
||||||
|
case _:
|
||||||
|
raise TypeError(
|
||||||
|
f'Can not wrap addr {addr} of type {type(addr)}'
|
||||||
|
)
|
||||||
|
|
||||||
|
return cls.from_addr(addr)
|
||||||
|
|
||||||
|
|
||||||
|
def default_lo_addrs(transports: list[str]) -> list[AddressTypes]:
|
||||||
|
return [
|
||||||
|
_default_lo_addrs[transport]
|
||||||
|
for transport in transports
|
||||||
|
]
|
|
@ -31,8 +31,12 @@ def parse_uid(arg):
|
||||||
return str(name), str(uuid) # ensures str encoding
|
return str(name), str(uuid) # ensures str encoding
|
||||||
|
|
||||||
def parse_ipaddr(arg):
|
def parse_ipaddr(arg):
|
||||||
host, port = literal_eval(arg)
|
try:
|
||||||
return (str(host), int(port))
|
return literal_eval(arg)
|
||||||
|
|
||||||
|
except (ValueError, SyntaxError):
|
||||||
|
# UDS: try to interpret as a straight up str
|
||||||
|
return arg
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -89,7 +89,7 @@ from .msg import (
|
||||||
pretty_struct,
|
pretty_struct,
|
||||||
_ops as msgops,
|
_ops as msgops,
|
||||||
)
|
)
|
||||||
from ._ipc import (
|
from .ipc import (
|
||||||
Channel,
|
Channel,
|
||||||
)
|
)
|
||||||
from ._streaming import (
|
from ._streaming import (
|
||||||
|
@ -105,7 +105,7 @@ from ._state import (
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from ._portal import Portal
|
from ._portal import Portal
|
||||||
from ._runtime import Actor
|
from ._runtime import Actor
|
||||||
from ._ipc import MsgTransport
|
from .ipc import MsgTransport
|
||||||
from .devx._frame_stack import (
|
from .devx._frame_stack import (
|
||||||
CallerInfo,
|
CallerInfo,
|
||||||
)
|
)
|
||||||
|
@ -859,19 +859,10 @@ class Context:
|
||||||
@property
|
@property
|
||||||
def dst_maddr(self) -> str:
|
def dst_maddr(self) -> str:
|
||||||
chan: Channel = self.chan
|
chan: Channel = self.chan
|
||||||
dst_addr, dst_port = chan.raddr
|
|
||||||
trans: MsgTransport = chan.transport
|
trans: MsgTransport = chan.transport
|
||||||
# cid: str = self.cid
|
# cid: str = self.cid
|
||||||
# cid_head, cid_tail = cid[:6], cid[-6:]
|
# cid_head, cid_tail = cid[:6], cid[-6:]
|
||||||
return (
|
return trans.maddr
|
||||||
f'/ipv4/{dst_addr}'
|
|
||||||
f'/{trans.name_key}/{dst_port}'
|
|
||||||
# f'/{self.chan.uid[0]}'
|
|
||||||
# f'/{self.cid}'
|
|
||||||
|
|
||||||
# f'/cid={cid_head}..{cid_tail}'
|
|
||||||
# TODO: ? not use this ^ right ?
|
|
||||||
)
|
|
||||||
|
|
||||||
dmaddr = dst_maddr
|
dmaddr = dst_maddr
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,13 @@ from contextlib import asynccontextmanager as acm
|
||||||
|
|
||||||
from tractor.log import get_logger
|
from tractor.log import get_logger
|
||||||
from .trionics import gather_contexts
|
from .trionics import gather_contexts
|
||||||
from ._ipc import _connect_chan, Channel
|
from .ipc import _connect_chan, Channel
|
||||||
|
from ._addr import (
|
||||||
|
AddressTypes,
|
||||||
|
Address,
|
||||||
|
preferred_transport,
|
||||||
|
wrap_address
|
||||||
|
)
|
||||||
from ._portal import (
|
from ._portal import (
|
||||||
Portal,
|
Portal,
|
||||||
open_portal,
|
open_portal,
|
||||||
|
@ -48,11 +54,7 @@ log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@acm
|
@acm
|
||||||
async def get_registry(
|
async def get_registry(addr: AddressTypes | None = None) -> AsyncGenerator[
|
||||||
host: str,
|
|
||||||
port: int,
|
|
||||||
|
|
||||||
) -> AsyncGenerator[
|
|
||||||
Portal | LocalPortal | None,
|
Portal | LocalPortal | None,
|
||||||
None,
|
None,
|
||||||
]:
|
]:
|
||||||
|
@ -69,13 +71,13 @@ async def get_registry(
|
||||||
# (likely a re-entrant call from the arbiter actor)
|
# (likely a re-entrant call from the arbiter actor)
|
||||||
yield LocalPortal(
|
yield LocalPortal(
|
||||||
actor,
|
actor,
|
||||||
Channel((host, port))
|
await Channel.from_addr(addr)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# TODO: try to look pre-existing connection from
|
# TODO: try to look pre-existing connection from
|
||||||
# `Actor._peers` and use it instead?
|
# `Actor._peers` and use it instead?
|
||||||
async with (
|
async with (
|
||||||
_connect_chan(host, port) as chan,
|
_connect_chan(addr) as chan,
|
||||||
open_portal(chan) as regstr_ptl,
|
open_portal(chan) as regstr_ptl,
|
||||||
):
|
):
|
||||||
yield regstr_ptl
|
yield regstr_ptl
|
||||||
|
@ -89,11 +91,10 @@ async def get_root(
|
||||||
|
|
||||||
# TODO: rename mailbox to `_root_maddr` when we finally
|
# TODO: rename mailbox to `_root_maddr` when we finally
|
||||||
# add and impl libp2p multi-addrs?
|
# add and impl libp2p multi-addrs?
|
||||||
host, port = _runtime_vars['_root_mailbox']
|
addr = _runtime_vars['_root_mailbox']
|
||||||
assert host is not None
|
|
||||||
|
|
||||||
async with (
|
async with (
|
||||||
_connect_chan(host, port) as chan,
|
_connect_chan(addr) as chan,
|
||||||
open_portal(chan, **kwargs) as portal,
|
open_portal(chan, **kwargs) as portal,
|
||||||
):
|
):
|
||||||
yield portal
|
yield portal
|
||||||
|
@ -134,10 +135,10 @@ def get_peer_by_name(
|
||||||
@acm
|
@acm
|
||||||
async def query_actor(
|
async def query_actor(
|
||||||
name: str,
|
name: str,
|
||||||
regaddr: tuple[str, int]|None = None,
|
regaddr: AddressTypes|None = None,
|
||||||
|
|
||||||
) -> AsyncGenerator[
|
) -> AsyncGenerator[
|
||||||
tuple[str, int]|None,
|
AddressTypes|None,
|
||||||
None,
|
None,
|
||||||
]:
|
]:
|
||||||
'''
|
'''
|
||||||
|
@ -163,31 +164,31 @@ async def query_actor(
|
||||||
return
|
return
|
||||||
|
|
||||||
reg_portal: Portal
|
reg_portal: Portal
|
||||||
regaddr: tuple[str, int] = regaddr or actor.reg_addrs[0]
|
regaddr: Address = wrap_address(regaddr) or actor.reg_addrs[0]
|
||||||
async with get_registry(*regaddr) as reg_portal:
|
async with get_registry(regaddr) as reg_portal:
|
||||||
# TODO: return portals to all available actors - for now
|
# TODO: return portals to all available actors - for now
|
||||||
# just the last one that registered
|
# just the last one that registered
|
||||||
sockaddr: tuple[str, int] = await reg_portal.run_from_ns(
|
addr: AddressTypes = await reg_portal.run_from_ns(
|
||||||
'self',
|
'self',
|
||||||
'find_actor',
|
'find_actor',
|
||||||
name=name,
|
name=name,
|
||||||
)
|
)
|
||||||
yield sockaddr
|
yield addr
|
||||||
|
|
||||||
|
|
||||||
@acm
|
@acm
|
||||||
async def maybe_open_portal(
|
async def maybe_open_portal(
|
||||||
addr: tuple[str, int],
|
addr: AddressTypes,
|
||||||
name: str,
|
name: str,
|
||||||
):
|
):
|
||||||
async with query_actor(
|
async with query_actor(
|
||||||
name=name,
|
name=name,
|
||||||
regaddr=addr,
|
regaddr=addr,
|
||||||
) as sockaddr:
|
) as addr:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if sockaddr:
|
if addr:
|
||||||
async with _connect_chan(*sockaddr) as chan:
|
async with _connect_chan(addr) as chan:
|
||||||
async with open_portal(chan) as portal:
|
async with open_portal(chan) as portal:
|
||||||
yield portal
|
yield portal
|
||||||
else:
|
else:
|
||||||
|
@ -197,7 +198,8 @@ async def maybe_open_portal(
|
||||||
@acm
|
@acm
|
||||||
async def find_actor(
|
async def find_actor(
|
||||||
name: str,
|
name: str,
|
||||||
registry_addrs: list[tuple[str, int]]|None = None,
|
registry_addrs: list[AddressTypes]|None = None,
|
||||||
|
enable_transports: list[str] = [preferred_transport],
|
||||||
|
|
||||||
only_first: bool = True,
|
only_first: bool = True,
|
||||||
raise_on_none: bool = False,
|
raise_on_none: bool = False,
|
||||||
|
@ -224,15 +226,15 @@ async def find_actor(
|
||||||
# XXX NOTE: make sure to dynamically read the value on
|
# XXX NOTE: make sure to dynamically read the value on
|
||||||
# every call since something may change it globally (eg.
|
# every call since something may change it globally (eg.
|
||||||
# like in our discovery test suite)!
|
# like in our discovery test suite)!
|
||||||
from . import _root
|
from ._addr import default_lo_addrs
|
||||||
registry_addrs = (
|
registry_addrs = (
|
||||||
_runtime_vars['_registry_addrs']
|
_runtime_vars['_registry_addrs']
|
||||||
or
|
or
|
||||||
_root._default_lo_addrs
|
default_lo_addrs(enable_transports)
|
||||||
)
|
)
|
||||||
|
|
||||||
maybe_portals: list[
|
maybe_portals: list[
|
||||||
AsyncContextManager[tuple[str, int]]
|
AsyncContextManager[AddressTypes]
|
||||||
] = list(
|
] = list(
|
||||||
maybe_open_portal(
|
maybe_open_portal(
|
||||||
addr=addr,
|
addr=addr,
|
||||||
|
@ -274,7 +276,7 @@ async def find_actor(
|
||||||
@acm
|
@acm
|
||||||
async def wait_for_actor(
|
async def wait_for_actor(
|
||||||
name: str,
|
name: str,
|
||||||
registry_addr: tuple[str, int] | None = None,
|
registry_addr: AddressTypes | None = None,
|
||||||
|
|
||||||
) -> AsyncGenerator[Portal, None]:
|
) -> AsyncGenerator[Portal, None]:
|
||||||
'''
|
'''
|
||||||
|
@ -291,7 +293,7 @@ async def wait_for_actor(
|
||||||
yield peer_portal
|
yield peer_portal
|
||||||
return
|
return
|
||||||
|
|
||||||
regaddr: tuple[str, int] = (
|
regaddr: AddressTypes = (
|
||||||
registry_addr
|
registry_addr
|
||||||
or
|
or
|
||||||
actor.reg_addrs[0]
|
actor.reg_addrs[0]
|
||||||
|
@ -299,8 +301,8 @@ async def wait_for_actor(
|
||||||
# TODO: use `.trionics.gather_contexts()` like
|
# TODO: use `.trionics.gather_contexts()` like
|
||||||
# above in `find_actor()` as well?
|
# above in `find_actor()` as well?
|
||||||
reg_portal: Portal
|
reg_portal: Portal
|
||||||
async with get_registry(*regaddr) as reg_portal:
|
async with get_registry(regaddr) as reg_portal:
|
||||||
sockaddrs = await reg_portal.run_from_ns(
|
addrs = await reg_portal.run_from_ns(
|
||||||
'self',
|
'self',
|
||||||
'wait_for_actor',
|
'wait_for_actor',
|
||||||
name=name,
|
name=name,
|
||||||
|
@ -308,8 +310,8 @@ async def wait_for_actor(
|
||||||
|
|
||||||
# get latest registered addr by default?
|
# get latest registered addr by default?
|
||||||
# TODO: offer multi-portal yields in multi-homed case?
|
# TODO: offer multi-portal yields in multi-homed case?
|
||||||
sockaddr: tuple[str, int] = sockaddrs[-1]
|
addr: AddressTypes = addrs[-1]
|
||||||
|
|
||||||
async with _connect_chan(*sockaddr) as chan:
|
async with _connect_chan(addr) as chan:
|
||||||
async with open_portal(chan) as portal:
|
async with open_portal(chan) as portal:
|
||||||
yield portal
|
yield portal
|
||||||
|
|
|
@ -37,6 +37,7 @@ from .log import (
|
||||||
from . import _state
|
from . import _state
|
||||||
from .devx import _debug
|
from .devx import _debug
|
||||||
from .to_asyncio import run_as_asyncio_guest
|
from .to_asyncio import run_as_asyncio_guest
|
||||||
|
from ._addr import AddressTypes
|
||||||
from ._runtime import (
|
from ._runtime import (
|
||||||
async_main,
|
async_main,
|
||||||
Actor,
|
Actor,
|
||||||
|
@ -52,10 +53,10 @@ log = get_logger(__name__)
|
||||||
def _mp_main(
|
def _mp_main(
|
||||||
|
|
||||||
actor: Actor,
|
actor: Actor,
|
||||||
accept_addrs: list[tuple[str, int]],
|
accept_addrs: list[AddressTypes],
|
||||||
forkserver_info: tuple[Any, Any, Any, Any, Any],
|
forkserver_info: tuple[Any, Any, Any, Any, Any],
|
||||||
start_method: SpawnMethodKey,
|
start_method: SpawnMethodKey,
|
||||||
parent_addr: tuple[str, int] | None = None,
|
parent_addr: AddressTypes | None = None,
|
||||||
infect_asyncio: bool = False,
|
infect_asyncio: bool = False,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -206,7 +207,7 @@ def nest_from_op(
|
||||||
def _trio_main(
|
def _trio_main(
|
||||||
actor: Actor,
|
actor: Actor,
|
||||||
*,
|
*,
|
||||||
parent_addr: tuple[str, int] | None = None,
|
parent_addr: AddressTypes | None = None,
|
||||||
infect_asyncio: bool = False,
|
infect_asyncio: bool = False,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
|
@ -65,7 +65,7 @@ if TYPE_CHECKING:
|
||||||
from ._context import Context
|
from ._context import Context
|
||||||
from .log import StackLevelAdapter
|
from .log import StackLevelAdapter
|
||||||
from ._stream import MsgStream
|
from ._stream import MsgStream
|
||||||
from ._ipc import Channel
|
from .ipc import Channel
|
||||||
|
|
||||||
log = get_logger('tractor')
|
log = get_logger('tractor')
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ from .trionics import maybe_open_nursery
|
||||||
from ._state import (
|
from ._state import (
|
||||||
current_actor,
|
current_actor,
|
||||||
)
|
)
|
||||||
from ._ipc import Channel
|
from .ipc import Channel
|
||||||
from .log import get_logger
|
from .log import get_logger
|
||||||
from .msg import (
|
from .msg import (
|
||||||
# Error,
|
# Error,
|
||||||
|
|
|
@ -43,21 +43,18 @@ from .devx import _debug
|
||||||
from . import _spawn
|
from . import _spawn
|
||||||
from . import _state
|
from . import _state
|
||||||
from . import log
|
from . import log
|
||||||
from ._ipc import _connect_chan
|
from .ipc import (
|
||||||
|
_connect_chan,
|
||||||
|
)
|
||||||
|
from ._addr import (
|
||||||
|
AddressTypes,
|
||||||
|
wrap_address,
|
||||||
|
preferred_transport,
|
||||||
|
default_lo_addrs
|
||||||
|
)
|
||||||
from ._exceptions import is_multi_cancelled
|
from ._exceptions import is_multi_cancelled
|
||||||
|
|
||||||
|
|
||||||
# set at startup and after forks
|
|
||||||
_default_host: str = '127.0.0.1'
|
|
||||||
_default_port: int = 1616
|
|
||||||
|
|
||||||
# default registry always on localhost
|
|
||||||
_default_lo_addrs: list[tuple[str, int]] = [(
|
|
||||||
_default_host,
|
|
||||||
_default_port,
|
|
||||||
)]
|
|
||||||
|
|
||||||
|
|
||||||
logger = log.get_logger('tractor')
|
logger = log.get_logger('tractor')
|
||||||
|
|
||||||
|
|
||||||
|
@ -66,10 +63,12 @@ async def open_root_actor(
|
||||||
|
|
||||||
*,
|
*,
|
||||||
# defaults are above
|
# defaults are above
|
||||||
registry_addrs: list[tuple[str, int]]|None = None,
|
registry_addrs: list[AddressTypes]|None = None,
|
||||||
|
|
||||||
# defaults are above
|
# defaults are above
|
||||||
arbiter_addr: tuple[str, int]|None = None,
|
arbiter_addr: tuple[AddressTypes]|None = None,
|
||||||
|
|
||||||
|
enable_transports: list[str] = [preferred_transport],
|
||||||
|
|
||||||
name: str|None = 'root',
|
name: str|None = 'root',
|
||||||
|
|
||||||
|
@ -195,11 +194,9 @@ async def open_root_actor(
|
||||||
)
|
)
|
||||||
registry_addrs = [arbiter_addr]
|
registry_addrs = [arbiter_addr]
|
||||||
|
|
||||||
registry_addrs: list[tuple[str, int]] = (
|
if not registry_addrs:
|
||||||
registry_addrs
|
registry_addrs: list[AddressTypes] = default_lo_addrs(enable_transports)
|
||||||
or
|
|
||||||
_default_lo_addrs
|
|
||||||
)
|
|
||||||
assert registry_addrs
|
assert registry_addrs
|
||||||
|
|
||||||
loglevel = (
|
loglevel = (
|
||||||
|
@ -248,10 +245,10 @@ async def open_root_actor(
|
||||||
enable_stack_on_sig()
|
enable_stack_on_sig()
|
||||||
|
|
||||||
# closed into below ping task-func
|
# closed into below ping task-func
|
||||||
ponged_addrs: list[tuple[str, int]] = []
|
ponged_addrs: list[AddressTypes] = []
|
||||||
|
|
||||||
async def ping_tpt_socket(
|
async def ping_tpt_socket(
|
||||||
addr: tuple[str, int],
|
addr: AddressTypes,
|
||||||
timeout: float = 1,
|
timeout: float = 1,
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
|
@ -271,7 +268,7 @@ async def open_root_actor(
|
||||||
# be better to eventually have a "discovery" protocol
|
# be better to eventually have a "discovery" protocol
|
||||||
# with basic handshake instead?
|
# with basic handshake instead?
|
||||||
with trio.move_on_after(timeout):
|
with trio.move_on_after(timeout):
|
||||||
async with _connect_chan(*addr):
|
async with _connect_chan(addr):
|
||||||
ponged_addrs.append(addr)
|
ponged_addrs.append(addr)
|
||||||
|
|
||||||
except OSError:
|
except OSError:
|
||||||
|
@ -284,10 +281,10 @@ async def open_root_actor(
|
||||||
for addr in registry_addrs:
|
for addr in registry_addrs:
|
||||||
tn.start_soon(
|
tn.start_soon(
|
||||||
ping_tpt_socket,
|
ping_tpt_socket,
|
||||||
tuple(addr), # TODO: just drop this requirement?
|
addr,
|
||||||
)
|
)
|
||||||
|
|
||||||
trans_bind_addrs: list[tuple[str, int]] = []
|
trans_bind_addrs: list[AddressTypes] = []
|
||||||
|
|
||||||
# Create a new local root-actor instance which IS NOT THE
|
# Create a new local root-actor instance which IS NOT THE
|
||||||
# REGISTRAR
|
# REGISTRAR
|
||||||
|
@ -311,9 +308,12 @@ async def open_root_actor(
|
||||||
)
|
)
|
||||||
# DO NOT use the registry_addrs as the transport server
|
# DO NOT use the registry_addrs as the transport server
|
||||||
# addrs for this new non-registar, root-actor.
|
# addrs for this new non-registar, root-actor.
|
||||||
for host, port in ponged_addrs:
|
for addr in ponged_addrs:
|
||||||
# NOTE: zero triggers dynamic OS port allocation
|
waddr = wrap_address(addr)
|
||||||
trans_bind_addrs.append((host, 0))
|
print(waddr)
|
||||||
|
trans_bind_addrs.append(
|
||||||
|
waddr.get_random(namespace=waddr.namespace)
|
||||||
|
)
|
||||||
|
|
||||||
# Start this local actor as the "registrar", aka a regular
|
# Start this local actor as the "registrar", aka a regular
|
||||||
# actor who manages the local registry of "mailboxes" of
|
# actor who manages the local registry of "mailboxes" of
|
||||||
|
@ -322,7 +322,7 @@ async def open_root_actor(
|
||||||
|
|
||||||
# NOTE that if the current actor IS THE REGISTAR, the
|
# NOTE that if the current actor IS THE REGISTAR, the
|
||||||
# following init steps are taken:
|
# following init steps are taken:
|
||||||
# - the tranport layer server is bound to each (host, port)
|
# - the tranport layer server is bound to each addr
|
||||||
# pair defined in provided registry_addrs, or the default.
|
# pair defined in provided registry_addrs, or the default.
|
||||||
trans_bind_addrs = registry_addrs
|
trans_bind_addrs = registry_addrs
|
||||||
|
|
||||||
|
@ -462,7 +462,7 @@ def run_daemon(
|
||||||
|
|
||||||
# runtime kwargs
|
# runtime kwargs
|
||||||
name: str | None = 'root',
|
name: str | None = 'root',
|
||||||
registry_addrs: list[tuple[str, int]] = _default_lo_addrs,
|
registry_addrs: list[AddressTypes]|None = None,
|
||||||
|
|
||||||
start_method: str | None = None,
|
start_method: str | None = None,
|
||||||
debug_mode: bool = False,
|
debug_mode: bool = False,
|
||||||
|
|
|
@ -42,7 +42,7 @@ from trio import (
|
||||||
TaskStatus,
|
TaskStatus,
|
||||||
)
|
)
|
||||||
|
|
||||||
from ._ipc import Channel
|
from .ipc import Channel
|
||||||
from ._context import (
|
from ._context import (
|
||||||
Context,
|
Context,
|
||||||
)
|
)
|
||||||
|
|
|
@ -73,7 +73,14 @@ from tractor.msg import (
|
||||||
pretty_struct,
|
pretty_struct,
|
||||||
types as msgtypes,
|
types as msgtypes,
|
||||||
)
|
)
|
||||||
from ._ipc import Channel
|
from .ipc import Channel
|
||||||
|
from ._addr import (
|
||||||
|
AddressTypes,
|
||||||
|
Address,
|
||||||
|
wrap_address,
|
||||||
|
preferred_transport,
|
||||||
|
default_lo_addrs
|
||||||
|
)
|
||||||
from ._context import (
|
from ._context import (
|
||||||
mk_context,
|
mk_context,
|
||||||
Context,
|
Context,
|
||||||
|
@ -179,11 +186,11 @@ class Actor:
|
||||||
enable_modules: list[str] = [],
|
enable_modules: list[str] = [],
|
||||||
uid: str|None = None,
|
uid: str|None = None,
|
||||||
loglevel: str|None = None,
|
loglevel: str|None = None,
|
||||||
registry_addrs: list[tuple[str, int]]|None = None,
|
registry_addrs: list[AddressTypes]|None = None,
|
||||||
spawn_method: str|None = None,
|
spawn_method: str|None = None,
|
||||||
|
|
||||||
# TODO: remove!
|
# TODO: remove!
|
||||||
arbiter_addr: tuple[str, int]|None = None,
|
arbiter_addr: AddressTypes|None = None,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
|
@ -223,7 +230,7 @@ class Actor:
|
||||||
DeprecationWarning,
|
DeprecationWarning,
|
||||||
stacklevel=2,
|
stacklevel=2,
|
||||||
)
|
)
|
||||||
registry_addrs: list[tuple[str, int]] = [arbiter_addr]
|
registry_addrs: list[AddressTypes] = [arbiter_addr]
|
||||||
|
|
||||||
# marked by the process spawning backend at startup
|
# marked by the process spawning backend at startup
|
||||||
# will be None for the parent most process started manually
|
# will be None for the parent most process started manually
|
||||||
|
@ -257,6 +264,7 @@ class Actor:
|
||||||
] = {}
|
] = {}
|
||||||
|
|
||||||
self._listeners: list[trio.abc.Listener] = []
|
self._listeners: list[trio.abc.Listener] = []
|
||||||
|
self._listen_addrs: list[Address] = []
|
||||||
self._parent_chan: Channel|None = None
|
self._parent_chan: Channel|None = None
|
||||||
self._forkserver_info: tuple|None = None
|
self._forkserver_info: tuple|None = None
|
||||||
|
|
||||||
|
@ -269,13 +277,13 @@ class Actor:
|
||||||
|
|
||||||
# when provided, init the registry addresses property from
|
# when provided, init the registry addresses property from
|
||||||
# input via the validator.
|
# input via the validator.
|
||||||
self._reg_addrs: list[tuple[str, int]] = []
|
self._reg_addrs: list[AddressTypes] = []
|
||||||
if registry_addrs:
|
if registry_addrs:
|
||||||
self.reg_addrs: list[tuple[str, int]] = registry_addrs
|
self.reg_addrs: list[AddressTypes] = registry_addrs
|
||||||
_state._runtime_vars['_registry_addrs'] = registry_addrs
|
_state._runtime_vars['_registry_addrs'] = registry_addrs
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def reg_addrs(self) -> list[tuple[str, int]]:
|
def reg_addrs(self) -> list[AddressTypes]:
|
||||||
'''
|
'''
|
||||||
List of (socket) addresses for all known (and contactable)
|
List of (socket) addresses for all known (and contactable)
|
||||||
registry actors.
|
registry actors.
|
||||||
|
@ -286,7 +294,7 @@ class Actor:
|
||||||
@reg_addrs.setter
|
@reg_addrs.setter
|
||||||
def reg_addrs(
|
def reg_addrs(
|
||||||
self,
|
self,
|
||||||
addrs: list[tuple[str, int]],
|
addrs: list[AddressTypes],
|
||||||
) -> None:
|
) -> None:
|
||||||
if not addrs:
|
if not addrs:
|
||||||
log.warning(
|
log.warning(
|
||||||
|
@ -295,16 +303,7 @@ class Actor:
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
# always sanity check the input list since it's critical
|
self._reg_addrs = addrs
|
||||||
# that addrs are correct for discovery sys operation.
|
|
||||||
for addr in addrs:
|
|
||||||
if not isinstance(addr, tuple):
|
|
||||||
raise ValueError(
|
|
||||||
'Expected `Actor.reg_addrs: list[tuple[str, int]]`\n'
|
|
||||||
f'Got {addrs}'
|
|
||||||
)
|
|
||||||
|
|
||||||
self._reg_addrs = addrs
|
|
||||||
|
|
||||||
async def wait_for_peer(
|
async def wait_for_peer(
|
||||||
self,
|
self,
|
||||||
|
@ -1024,11 +1023,11 @@ class Actor:
|
||||||
|
|
||||||
async def _from_parent(
|
async def _from_parent(
|
||||||
self,
|
self,
|
||||||
parent_addr: tuple[str, int]|None,
|
parent_addr: AddressTypes|None,
|
||||||
|
|
||||||
) -> tuple[
|
) -> tuple[
|
||||||
Channel,
|
Channel,
|
||||||
list[tuple[str, int]]|None,
|
list[AddressTypes]|None,
|
||||||
]:
|
]:
|
||||||
'''
|
'''
|
||||||
Bootstrap this local actor's runtime config from its parent by
|
Bootstrap this local actor's runtime config from its parent by
|
||||||
|
@ -1040,16 +1039,13 @@ class Actor:
|
||||||
# Connect back to the parent actor and conduct initial
|
# Connect back to the parent actor and conduct initial
|
||||||
# handshake. From this point on if we error, we
|
# handshake. From this point on if we error, we
|
||||||
# attempt to ship the exception back to the parent.
|
# attempt to ship the exception back to the parent.
|
||||||
chan = Channel(
|
chan = await Channel.from_addr(wrap_address(parent_addr))
|
||||||
destaddr=parent_addr,
|
|
||||||
)
|
|
||||||
await chan.connect()
|
|
||||||
|
|
||||||
# TODO: move this into a `Channel.handshake()`?
|
# TODO: move this into a `Channel.handshake()`?
|
||||||
# Initial handshake: swap names.
|
# Initial handshake: swap names.
|
||||||
await self._do_handshake(chan)
|
await self._do_handshake(chan)
|
||||||
|
|
||||||
accept_addrs: list[tuple[str, int]]|None = None
|
accept_addrs: list[AddressTypes]|None = None
|
||||||
|
|
||||||
if self._spawn_method == "trio":
|
if self._spawn_method == "trio":
|
||||||
|
|
||||||
|
@ -1066,7 +1062,7 @@ class Actor:
|
||||||
# if "trace"/"util" mode is enabled?
|
# if "trace"/"util" mode is enabled?
|
||||||
f'{pretty_struct.pformat(spawnspec)}\n'
|
f'{pretty_struct.pformat(spawnspec)}\n'
|
||||||
)
|
)
|
||||||
accept_addrs: list[tuple[str, int]] = spawnspec.bind_addrs
|
accept_addrs: list[AddressTypes] = spawnspec.bind_addrs
|
||||||
|
|
||||||
# TODO: another `Struct` for rtvs..
|
# TODO: another `Struct` for rtvs..
|
||||||
rvs: dict[str, Any] = spawnspec._runtime_vars
|
rvs: dict[str, Any] = spawnspec._runtime_vars
|
||||||
|
@ -1173,8 +1169,7 @@ class Actor:
|
||||||
self,
|
self,
|
||||||
handler_nursery: Nursery,
|
handler_nursery: Nursery,
|
||||||
*,
|
*,
|
||||||
# (host, port) to bind for channel server
|
listen_addrs: list[AddressTypes]|None = None,
|
||||||
listen_sockaddrs: list[tuple[str, int]]|None = None,
|
|
||||||
|
|
||||||
task_status: TaskStatus[Nursery] = trio.TASK_STATUS_IGNORED,
|
task_status: TaskStatus[Nursery] = trio.TASK_STATUS_IGNORED,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -1186,41 +1181,45 @@ class Actor:
|
||||||
`.cancel_server()` is called.
|
`.cancel_server()` is called.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
if listen_sockaddrs is None:
|
if listen_addrs is None:
|
||||||
listen_sockaddrs = [(None, 0)]
|
listen_addrs = default_lo_addrs([preferred_transport])
|
||||||
|
|
||||||
|
else:
|
||||||
|
listen_addrs: list[Address] = [
|
||||||
|
wrap_address(a) for a in listen_addrs
|
||||||
|
]
|
||||||
|
|
||||||
self._server_down = trio.Event()
|
self._server_down = trio.Event()
|
||||||
try:
|
try:
|
||||||
async with trio.open_nursery() as server_n:
|
async with trio.open_nursery() as server_n:
|
||||||
|
listeners: list[trio.abc.Listener] = [
|
||||||
|
await addr.open_listener()
|
||||||
|
for addr in listen_addrs
|
||||||
|
]
|
||||||
|
await server_n.start(
|
||||||
|
partial(
|
||||||
|
trio.serve_listeners,
|
||||||
|
handler=self._stream_handler,
|
||||||
|
listeners=listeners,
|
||||||
|
|
||||||
for host, port in listen_sockaddrs:
|
# NOTE: configured such that new
|
||||||
listeners: list[trio.abc.Listener] = await server_n.start(
|
# connections will stay alive even if
|
||||||
partial(
|
# this server is cancelled!
|
||||||
trio.serve_tcp,
|
handler_nursery=handler_nursery
|
||||||
|
|
||||||
handler=self._stream_handler,
|
|
||||||
port=port,
|
|
||||||
host=host,
|
|
||||||
|
|
||||||
# NOTE: configured such that new
|
|
||||||
# connections will stay alive even if
|
|
||||||
# this server is cancelled!
|
|
||||||
handler_nursery=handler_nursery,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
sockets: list[trio.socket] = [
|
)
|
||||||
getattr(listener, 'socket', 'unknown socket')
|
log.runtime(
|
||||||
for listener in listeners
|
'Started server(s)\n'
|
||||||
]
|
'\n'.join([f'|_{addr}' for addr in listen_addrs])
|
||||||
log.runtime(
|
)
|
||||||
'Started TCP server(s)\n'
|
self._listen_addrs.extend(listen_addrs)
|
||||||
f'|_{sockets}\n'
|
self._listeners.extend(listeners)
|
||||||
)
|
|
||||||
self._listeners.extend(listeners)
|
|
||||||
|
|
||||||
task_status.started(server_n)
|
task_status.started(server_n)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
|
for addr in listen_addrs:
|
||||||
|
await addr.close_listener()
|
||||||
# signal the server is down since nursery above terminated
|
# signal the server is down since nursery above terminated
|
||||||
self._server_down.set()
|
self._server_down.set()
|
||||||
|
|
||||||
|
@ -1579,26 +1578,21 @@ class Actor:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def accept_addrs(self) -> list[tuple[str, int]]:
|
def accept_addrs(self) -> list[AddressTypes]:
|
||||||
'''
|
'''
|
||||||
All addresses to which the transport-channel server binds
|
All addresses to which the transport-channel server binds
|
||||||
and listens for new connections.
|
and listens for new connections.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
# throws OSError on failure
|
return [a.unwrap() for a in self._listen_addrs]
|
||||||
return [
|
|
||||||
listener.socket.getsockname()
|
|
||||||
for listener in self._listeners
|
|
||||||
] # type: ignore
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def accept_addr(self) -> tuple[str, int]:
|
def accept_addr(self) -> AddressTypes:
|
||||||
'''
|
'''
|
||||||
Primary address to which the IPC transport server is
|
Primary address to which the IPC transport server is
|
||||||
bound and listening for new connections.
|
bound and listening for new connections.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
# throws OSError on failure
|
|
||||||
return self.accept_addrs[0]
|
return self.accept_addrs[0]
|
||||||
|
|
||||||
def get_parent(self) -> Portal:
|
def get_parent(self) -> Portal:
|
||||||
|
@ -1670,7 +1664,7 @@ class Actor:
|
||||||
|
|
||||||
async def async_main(
|
async def async_main(
|
||||||
actor: Actor,
|
actor: Actor,
|
||||||
accept_addrs: tuple[str, int]|None = None,
|
accept_addrs: AddressTypes|None = None,
|
||||||
|
|
||||||
# XXX: currently ``parent_addr`` is only needed for the
|
# XXX: currently ``parent_addr`` is only needed for the
|
||||||
# ``multiprocessing`` backend (which pickles state sent to
|
# ``multiprocessing`` backend (which pickles state sent to
|
||||||
|
@ -1679,7 +1673,7 @@ async def async_main(
|
||||||
# change this to a simple ``is_subactor: bool`` which will
|
# change this to a simple ``is_subactor: bool`` which will
|
||||||
# be False when running as root actor and True when as
|
# be False when running as root actor and True when as
|
||||||
# a subactor.
|
# a subactor.
|
||||||
parent_addr: tuple[str, int]|None = None,
|
parent_addr: AddressTypes|None = None,
|
||||||
task_status: TaskStatus[None] = trio.TASK_STATUS_IGNORED,
|
task_status: TaskStatus[None] = trio.TASK_STATUS_IGNORED,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -1769,7 +1763,7 @@ async def async_main(
|
||||||
partial(
|
partial(
|
||||||
actor._serve_forever,
|
actor._serve_forever,
|
||||||
service_nursery,
|
service_nursery,
|
||||||
listen_sockaddrs=accept_addrs,
|
listen_addrs=accept_addrs,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
except OSError as oserr:
|
except OSError as oserr:
|
||||||
|
@ -1785,7 +1779,7 @@ async def async_main(
|
||||||
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
accept_addrs: list[tuple[str, int]] = actor.accept_addrs
|
accept_addrs: list[AddressTypes] = actor.accept_addrs
|
||||||
|
|
||||||
# NOTE: only set the loopback addr for the
|
# NOTE: only set the loopback addr for the
|
||||||
# process-tree-global "root" mailbox since
|
# process-tree-global "root" mailbox since
|
||||||
|
@ -1793,9 +1787,8 @@ async def async_main(
|
||||||
# their root actor over that channel.
|
# their root actor over that channel.
|
||||||
if _state._runtime_vars['_is_root']:
|
if _state._runtime_vars['_is_root']:
|
||||||
for addr in accept_addrs:
|
for addr in accept_addrs:
|
||||||
host, _ = addr
|
waddr = wrap_address(addr)
|
||||||
# TODO: generic 'lo' detector predicate
|
if waddr == waddr.get_root():
|
||||||
if '127.0.0.1' in host:
|
|
||||||
_state._runtime_vars['_root_mailbox'] = addr
|
_state._runtime_vars['_root_mailbox'] = addr
|
||||||
|
|
||||||
# Register with the arbiter if we're told its addr
|
# Register with the arbiter if we're told its addr
|
||||||
|
@ -1810,24 +1803,21 @@ async def async_main(
|
||||||
# only on unique actor uids?
|
# only on unique actor uids?
|
||||||
for addr in actor.reg_addrs:
|
for addr in actor.reg_addrs:
|
||||||
try:
|
try:
|
||||||
assert isinstance(addr, tuple)
|
waddr = wrap_address(addr)
|
||||||
assert addr[1] # non-zero after bind
|
assert waddr.is_valid
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
await _debug.pause()
|
await _debug.pause()
|
||||||
|
|
||||||
async with get_registry(*addr) as reg_portal:
|
async with get_registry(addr) as reg_portal:
|
||||||
for accept_addr in accept_addrs:
|
for accept_addr in accept_addrs:
|
||||||
|
accept_addr = wrap_address(accept_addr)
|
||||||
if not accept_addr[1]:
|
assert accept_addr.is_valid
|
||||||
await _debug.pause()
|
|
||||||
|
|
||||||
assert accept_addr[1]
|
|
||||||
|
|
||||||
await reg_portal.run_from_ns(
|
await reg_portal.run_from_ns(
|
||||||
'self',
|
'self',
|
||||||
'register_actor',
|
'register_actor',
|
||||||
uid=actor.uid,
|
uid=actor.uid,
|
||||||
sockaddr=accept_addr,
|
addr=accept_addr.unwrap(),
|
||||||
)
|
)
|
||||||
|
|
||||||
is_registered: bool = True
|
is_registered: bool = True
|
||||||
|
@ -1954,12 +1944,13 @@ async def async_main(
|
||||||
):
|
):
|
||||||
failed: bool = False
|
failed: bool = False
|
||||||
for addr in actor.reg_addrs:
|
for addr in actor.reg_addrs:
|
||||||
assert isinstance(addr, tuple)
|
waddr = wrap_address(addr)
|
||||||
|
assert waddr.is_valid
|
||||||
with trio.move_on_after(0.5) as cs:
|
with trio.move_on_after(0.5) as cs:
|
||||||
cs.shield = True
|
cs.shield = True
|
||||||
try:
|
try:
|
||||||
async with get_registry(
|
async with get_registry(
|
||||||
*addr,
|
addr,
|
||||||
) as reg_portal:
|
) as reg_portal:
|
||||||
await reg_portal.run_from_ns(
|
await reg_portal.run_from_ns(
|
||||||
'self',
|
'self',
|
||||||
|
@ -2037,7 +2028,7 @@ class Arbiter(Actor):
|
||||||
|
|
||||||
self._registry: dict[
|
self._registry: dict[
|
||||||
tuple[str, str],
|
tuple[str, str],
|
||||||
tuple[str, int],
|
AddressTypes,
|
||||||
] = {}
|
] = {}
|
||||||
self._waiters: dict[
|
self._waiters: dict[
|
||||||
str,
|
str,
|
||||||
|
@ -2053,18 +2044,18 @@ class Arbiter(Actor):
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
|
|
||||||
) -> tuple[str, int]|None:
|
) -> AddressTypes|None:
|
||||||
|
|
||||||
for uid, sockaddr in self._registry.items():
|
for uid, addr in self._registry.items():
|
||||||
if name in uid:
|
if name in uid:
|
||||||
return sockaddr
|
return addr
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
async def get_registry(
|
async def get_registry(
|
||||||
self
|
self
|
||||||
|
|
||||||
) -> dict[str, tuple[str, int]]:
|
) -> dict[str, AddressTypes]:
|
||||||
'''
|
'''
|
||||||
Return current name registry.
|
Return current name registry.
|
||||||
|
|
||||||
|
@ -2084,7 +2075,7 @@ class Arbiter(Actor):
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
|
|
||||||
) -> list[tuple[str, int]]:
|
) -> list[AddressTypes]:
|
||||||
'''
|
'''
|
||||||
Wait for a particular actor to register.
|
Wait for a particular actor to register.
|
||||||
|
|
||||||
|
@ -2092,44 +2083,41 @@ class Arbiter(Actor):
|
||||||
registered.
|
registered.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
sockaddrs: list[tuple[str, int]] = []
|
addrs: list[AddressTypes] = []
|
||||||
sockaddr: tuple[str, int]
|
addr: AddressTypes
|
||||||
|
|
||||||
mailbox_info: str = 'Actor registry contact infos:\n'
|
mailbox_info: str = 'Actor registry contact infos:\n'
|
||||||
for uid, sockaddr in self._registry.items():
|
for uid, addr in self._registry.items():
|
||||||
mailbox_info += (
|
mailbox_info += (
|
||||||
f'|_uid: {uid}\n'
|
f'|_uid: {uid}\n'
|
||||||
f'|_sockaddr: {sockaddr}\n\n'
|
f'|_addr: {addr}\n\n'
|
||||||
)
|
)
|
||||||
if name == uid[0]:
|
if name == uid[0]:
|
||||||
sockaddrs.append(sockaddr)
|
addrs.append(addr)
|
||||||
|
|
||||||
if not sockaddrs:
|
if not addrs:
|
||||||
waiter = trio.Event()
|
waiter = trio.Event()
|
||||||
self._waiters.setdefault(name, []).append(waiter)
|
self._waiters.setdefault(name, []).append(waiter)
|
||||||
await waiter.wait()
|
await waiter.wait()
|
||||||
|
|
||||||
for uid in self._waiters[name]:
|
for uid in self._waiters[name]:
|
||||||
if not isinstance(uid, trio.Event):
|
if not isinstance(uid, trio.Event):
|
||||||
sockaddrs.append(self._registry[uid])
|
addrs.append(self._registry[uid])
|
||||||
|
|
||||||
log.runtime(mailbox_info)
|
log.runtime(mailbox_info)
|
||||||
return sockaddrs
|
return addrs
|
||||||
|
|
||||||
async def register_actor(
|
async def register_actor(
|
||||||
self,
|
self,
|
||||||
uid: tuple[str, str],
|
uid: tuple[str, str],
|
||||||
sockaddr: tuple[str, int]
|
addr: AddressTypes
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
uid = name, hash = (str(uid[0]), str(uid[1]))
|
uid = name, hash = (str(uid[0]), str(uid[1]))
|
||||||
addr = (host, port) = (
|
waddr: Address = wrap_address(addr)
|
||||||
str(sockaddr[0]),
|
if not waddr.is_valid:
|
||||||
int(sockaddr[1]),
|
# should never be 0-dynamic-os-alloc
|
||||||
)
|
|
||||||
if port == 0:
|
|
||||||
await _debug.pause()
|
await _debug.pause()
|
||||||
assert port # should never be 0-dynamic-os-alloc
|
|
||||||
self._registry[uid] = addr
|
self._registry[uid] = addr
|
||||||
|
|
||||||
# pop and signal all waiter events
|
# pop and signal all waiter events
|
||||||
|
|
|
@ -46,6 +46,7 @@ from tractor._state import (
|
||||||
_runtime_vars,
|
_runtime_vars,
|
||||||
)
|
)
|
||||||
from tractor.log import get_logger
|
from tractor.log import get_logger
|
||||||
|
from tractor._addr import AddressTypes
|
||||||
from tractor._portal import Portal
|
from tractor._portal import Portal
|
||||||
from tractor._runtime import Actor
|
from tractor._runtime import Actor
|
||||||
from tractor._entry import _mp_main
|
from tractor._entry import _mp_main
|
||||||
|
@ -392,14 +393,15 @@ async def new_proc(
|
||||||
errors: dict[tuple[str, str], Exception],
|
errors: dict[tuple[str, str], Exception],
|
||||||
|
|
||||||
# passed through to actor main
|
# passed through to actor main
|
||||||
bind_addrs: list[tuple[str, int]],
|
bind_addrs: list[AddressTypes],
|
||||||
parent_addr: tuple[str, int],
|
parent_addr: AddressTypes,
|
||||||
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
||||||
|
|
||||||
*,
|
*,
|
||||||
|
|
||||||
infect_asyncio: bool = False,
|
infect_asyncio: bool = False,
|
||||||
task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED
|
task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED,
|
||||||
|
proc_kwargs: dict[str, any] = {}
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
|
@ -419,6 +421,7 @@ async def new_proc(
|
||||||
_runtime_vars, # run time vars
|
_runtime_vars, # run time vars
|
||||||
infect_asyncio=infect_asyncio,
|
infect_asyncio=infect_asyncio,
|
||||||
task_status=task_status,
|
task_status=task_status,
|
||||||
|
proc_kwargs=proc_kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -429,12 +432,13 @@ async def trio_proc(
|
||||||
errors: dict[tuple[str, str], Exception],
|
errors: dict[tuple[str, str], Exception],
|
||||||
|
|
||||||
# passed through to actor main
|
# passed through to actor main
|
||||||
bind_addrs: list[tuple[str, int]],
|
bind_addrs: list[AddressTypes],
|
||||||
parent_addr: tuple[str, int],
|
parent_addr: AddressTypes,
|
||||||
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
||||||
*,
|
*,
|
||||||
infect_asyncio: bool = False,
|
infect_asyncio: bool = False,
|
||||||
task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED
|
task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED,
|
||||||
|
proc_kwargs: dict[str, any] = {}
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
|
@ -475,7 +479,7 @@ async def trio_proc(
|
||||||
proc: trio.Process|None = None
|
proc: trio.Process|None = None
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
proc: trio.Process = await trio.lowlevel.open_process(spawn_cmd)
|
proc: trio.Process = await trio.lowlevel.open_process(spawn_cmd, **proc_kwargs)
|
||||||
log.runtime(
|
log.runtime(
|
||||||
'Started new child\n'
|
'Started new child\n'
|
||||||
f'|_{proc}\n'
|
f'|_{proc}\n'
|
||||||
|
@ -517,15 +521,15 @@ async def trio_proc(
|
||||||
|
|
||||||
# send a "spawning specification" which configures the
|
# send a "spawning specification" which configures the
|
||||||
# initial runtime state of the child.
|
# initial runtime state of the child.
|
||||||
await chan.send(
|
sspec = SpawnSpec(
|
||||||
SpawnSpec(
|
_parent_main_data=subactor._parent_main_data,
|
||||||
_parent_main_data=subactor._parent_main_data,
|
enable_modules=subactor.enable_modules,
|
||||||
enable_modules=subactor.enable_modules,
|
reg_addrs=subactor.reg_addrs,
|
||||||
reg_addrs=subactor.reg_addrs,
|
bind_addrs=bind_addrs,
|
||||||
bind_addrs=bind_addrs,
|
_runtime_vars=_runtime_vars,
|
||||||
_runtime_vars=_runtime_vars,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
log.runtime(f'Sending spawn spec: {str(sspec)}')
|
||||||
|
await chan.send(sspec)
|
||||||
|
|
||||||
# track subactor in current nursery
|
# track subactor in current nursery
|
||||||
curr_actor: Actor = current_actor()
|
curr_actor: Actor = current_actor()
|
||||||
|
@ -635,12 +639,13 @@ async def mp_proc(
|
||||||
subactor: Actor,
|
subactor: Actor,
|
||||||
errors: dict[tuple[str, str], Exception],
|
errors: dict[tuple[str, str], Exception],
|
||||||
# passed through to actor main
|
# passed through to actor main
|
||||||
bind_addrs: list[tuple[str, int]],
|
bind_addrs: list[AddressTypes],
|
||||||
parent_addr: tuple[str, int],
|
parent_addr: AddressTypes,
|
||||||
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
||||||
*,
|
*,
|
||||||
infect_asyncio: bool = False,
|
infect_asyncio: bool = False,
|
||||||
task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED
|
task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED,
|
||||||
|
proc_kwargs: dict[str, any] = {}
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ from tractor.msg import (
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from ._runtime import Actor
|
from ._runtime import Actor
|
||||||
from ._context import Context
|
from ._context import Context
|
||||||
from ._ipc import Channel
|
from .ipc import Channel
|
||||||
|
|
||||||
|
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
|
@ -28,7 +28,13 @@ import warnings
|
||||||
|
|
||||||
import trio
|
import trio
|
||||||
|
|
||||||
|
|
||||||
from .devx._debug import maybe_wait_for_debugger
|
from .devx._debug import maybe_wait_for_debugger
|
||||||
|
from ._addr import (
|
||||||
|
AddressTypes,
|
||||||
|
preferred_transport,
|
||||||
|
get_address_cls
|
||||||
|
)
|
||||||
from ._state import current_actor, is_main_process
|
from ._state import current_actor, is_main_process
|
||||||
from .log import get_logger, get_loglevel
|
from .log import get_logger, get_loglevel
|
||||||
from ._runtime import Actor
|
from ._runtime import Actor
|
||||||
|
@ -47,8 +53,6 @@ if TYPE_CHECKING:
|
||||||
|
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
||||||
_default_bind_addr: tuple[str, int] = ('127.0.0.1', 0)
|
|
||||||
|
|
||||||
|
|
||||||
class ActorNursery:
|
class ActorNursery:
|
||||||
'''
|
'''
|
||||||
|
@ -130,8 +134,9 @@ class ActorNursery:
|
||||||
|
|
||||||
*,
|
*,
|
||||||
|
|
||||||
bind_addrs: list[tuple[str, int]] = [_default_bind_addr],
|
bind_addrs: list[AddressTypes]|None = None,
|
||||||
rpc_module_paths: list[str]|None = None,
|
rpc_module_paths: list[str]|None = None,
|
||||||
|
enable_transports: list[str] = [preferred_transport],
|
||||||
enable_modules: list[str]|None = None,
|
enable_modules: list[str]|None = None,
|
||||||
loglevel: str|None = None, # set log level per subactor
|
loglevel: str|None = None, # set log level per subactor
|
||||||
debug_mode: bool|None = None,
|
debug_mode: bool|None = None,
|
||||||
|
@ -141,6 +146,7 @@ class ActorNursery:
|
||||||
# a `._ria_nursery` since the dependent APIs have been
|
# a `._ria_nursery` since the dependent APIs have been
|
||||||
# removed!
|
# removed!
|
||||||
nursery: trio.Nursery|None = None,
|
nursery: trio.Nursery|None = None,
|
||||||
|
proc_kwargs: dict[str, any] = {}
|
||||||
|
|
||||||
) -> Portal:
|
) -> Portal:
|
||||||
'''
|
'''
|
||||||
|
@ -155,6 +161,12 @@ class ActorNursery:
|
||||||
or get_loglevel()
|
or get_loglevel()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not bind_addrs:
|
||||||
|
bind_addrs: list[AddressTypes] = [
|
||||||
|
get_address_cls(transport).get_random().unwrap()
|
||||||
|
for transport in enable_transports
|
||||||
|
]
|
||||||
|
|
||||||
# configure and pass runtime state
|
# configure and pass runtime state
|
||||||
_rtv = _state._runtime_vars.copy()
|
_rtv = _state._runtime_vars.copy()
|
||||||
_rtv['_is_root'] = False
|
_rtv['_is_root'] = False
|
||||||
|
@ -204,6 +216,7 @@ class ActorNursery:
|
||||||
parent_addr,
|
parent_addr,
|
||||||
_rtv, # run time vars
|
_rtv, # run time vars
|
||||||
infect_asyncio=infect_asyncio,
|
infect_asyncio=infect_asyncio,
|
||||||
|
proc_kwargs=proc_kwargs
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -222,11 +235,12 @@ class ActorNursery:
|
||||||
*,
|
*,
|
||||||
|
|
||||||
name: str | None = None,
|
name: str | None = None,
|
||||||
bind_addrs: tuple[str, int] = [_default_bind_addr],
|
bind_addrs: AddressTypes|None = None,
|
||||||
rpc_module_paths: list[str] | None = None,
|
rpc_module_paths: list[str] | None = None,
|
||||||
enable_modules: list[str] | None = None,
|
enable_modules: list[str] | None = None,
|
||||||
loglevel: str | None = None, # set log level per subactor
|
loglevel: str | None = None, # set log level per subactor
|
||||||
infect_asyncio: bool = False,
|
infect_asyncio: bool = False,
|
||||||
|
proc_kwargs: dict[str, any] = {},
|
||||||
|
|
||||||
**kwargs, # explicit args to ``fn``
|
**kwargs, # explicit args to ``fn``
|
||||||
|
|
||||||
|
@ -257,6 +271,7 @@ class ActorNursery:
|
||||||
# use the run_in_actor nursery
|
# use the run_in_actor nursery
|
||||||
nursery=self._ria_nursery,
|
nursery=self._ria_nursery,
|
||||||
infect_asyncio=infect_asyncio,
|
infect_asyncio=infect_asyncio,
|
||||||
|
proc_kwargs=proc_kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX: don't allow stream funcs
|
# XXX: don't allow stream funcs
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
|
def generate_single_byte_msgs(amount: int) -> bytes:
|
||||||
|
'''
|
||||||
|
Generate a byte instance of len `amount` with:
|
||||||
|
|
||||||
|
```
|
||||||
|
byte_at_index(i) = (i % 10).encode()
|
||||||
|
```
|
||||||
|
|
||||||
|
this results in constantly repeating sequences of:
|
||||||
|
|
||||||
|
b'0123456789'
|
||||||
|
|
||||||
|
'''
|
||||||
|
return b''.join(str(i % 10).encode() for i in range(amount))
|
||||||
|
|
||||||
|
|
||||||
|
def generate_sample_messages(
|
||||||
|
amount: int,
|
||||||
|
rand_min: int = 0,
|
||||||
|
rand_max: int = 0,
|
||||||
|
silent: bool = False,
|
||||||
|
) -> tuple[list[bytes], int]:
|
||||||
|
'''
|
||||||
|
Generate bytes msgs for tests.
|
||||||
|
|
||||||
|
Messages will have the following format:
|
||||||
|
|
||||||
|
```
|
||||||
|
b'[{i:08}]' + os.urandom(random.randint(rand_min, rand_max))
|
||||||
|
```
|
||||||
|
|
||||||
|
so for message index 25:
|
||||||
|
|
||||||
|
b'[00000025]' + random_bytes
|
||||||
|
|
||||||
|
'''
|
||||||
|
msgs = []
|
||||||
|
size = 0
|
||||||
|
|
||||||
|
log_interval = None
|
||||||
|
if not silent:
|
||||||
|
print(f'\ngenerating {amount} messages...')
|
||||||
|
|
||||||
|
# calculate an apropiate log interval based on
|
||||||
|
# max message size
|
||||||
|
max_msg_size = 10 + rand_max
|
||||||
|
|
||||||
|
if max_msg_size <= 32 * 1024:
|
||||||
|
log_interval = 10_000
|
||||||
|
|
||||||
|
else:
|
||||||
|
log_interval = 1000
|
||||||
|
|
||||||
|
for i in range(amount):
|
||||||
|
msg = f'[{i:08}]'.encode('utf-8')
|
||||||
|
|
||||||
|
if rand_max > 0:
|
||||||
|
msg += os.urandom(
|
||||||
|
random.randint(rand_min, rand_max))
|
||||||
|
|
||||||
|
size += len(msg)
|
||||||
|
|
||||||
|
msgs.append(msg)
|
||||||
|
|
||||||
|
if (
|
||||||
|
not silent
|
||||||
|
and
|
||||||
|
i > 0
|
||||||
|
and
|
||||||
|
i % log_interval == 0
|
||||||
|
):
|
||||||
|
print(f'{i} generated')
|
||||||
|
|
||||||
|
if not silent:
|
||||||
|
print(f'done, {size:,} bytes in total')
|
||||||
|
|
||||||
|
return msgs, size
|
|
@ -91,7 +91,7 @@ from tractor._state import (
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from trio.lowlevel import Task
|
from trio.lowlevel import Task
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
from tractor._ipc import Channel
|
from tractor.ipc import Channel
|
||||||
from tractor._runtime import (
|
from tractor._runtime import (
|
||||||
Actor,
|
Actor,
|
||||||
)
|
)
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
import platform
|
||||||
|
|
||||||
|
from ._transport import (
|
||||||
|
MsgTransportKey as MsgTransportKey,
|
||||||
|
MsgType as MsgType,
|
||||||
|
MsgTransport as MsgTransport,
|
||||||
|
MsgpackTransport as MsgpackTransport
|
||||||
|
)
|
||||||
|
|
||||||
|
from ._tcp import MsgpackTCPStream as MsgpackTCPStream
|
||||||
|
from ._uds import MsgpackUDSStream as MsgpackUDSStream
|
||||||
|
|
||||||
|
from ._types import (
|
||||||
|
transport_from_addr as transport_from_addr,
|
||||||
|
transport_from_stream as transport_from_stream,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ._chan import (
|
||||||
|
_connect_chan as _connect_chan,
|
||||||
|
Channel as Channel
|
||||||
|
)
|
||||||
|
|
||||||
|
if platform.system() == 'Linux':
|
||||||
|
from ._linux import (
|
||||||
|
EFD_SEMAPHORE as EFD_SEMAPHORE,
|
||||||
|
EFD_CLOEXEC as EFD_CLOEXEC,
|
||||||
|
EFD_NONBLOCK as EFD_NONBLOCK,
|
||||||
|
open_eventfd as open_eventfd,
|
||||||
|
write_eventfd as write_eventfd,
|
||||||
|
read_eventfd as read_eventfd,
|
||||||
|
close_eventfd as close_eventfd,
|
||||||
|
EFDReadCancelled as EFDReadCancelled,
|
||||||
|
EventFD as EventFD,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ._ringbuf import (
|
||||||
|
RBToken as RBToken,
|
||||||
|
open_ringbuf as open_ringbuf,
|
||||||
|
RingBuffSender as RingBuffSender,
|
||||||
|
RingBuffReceiver as RingBuffReceiver,
|
||||||
|
open_ringbuf_pair as open_ringbuf_pair,
|
||||||
|
attach_to_ringbuf_receiver as attach_to_ringbuf_receiver,
|
||||||
|
attach_to_ringbuf_sender as attach_to_ringbuf_sender,
|
||||||
|
attach_to_ringbuf_stream as attach_to_ringbuf_stream,
|
||||||
|
RingBuffBytesSender as RingBuffBytesSender,
|
||||||
|
RingBuffBytesReceiver as RingBuffBytesReceiver,
|
||||||
|
RingBuffChannel as RingBuffChannel,
|
||||||
|
attach_to_ringbuf_schannel as attach_to_ringbuf_schannel,
|
||||||
|
attach_to_ringbuf_rchannel as attach_to_ringbuf_rchannel,
|
||||||
|
attach_to_ringbuf_channel as attach_to_ringbuf_channel,
|
||||||
|
)
|
|
@ -0,0 +1,351 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Inter-process comms abstractions
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
from collections.abc import AsyncGenerator
|
||||||
|
from contextlib import (
|
||||||
|
asynccontextmanager as acm,
|
||||||
|
contextmanager as cm,
|
||||||
|
)
|
||||||
|
import platform
|
||||||
|
from pprint import pformat
|
||||||
|
import typing
|
||||||
|
from typing import (
|
||||||
|
Any,
|
||||||
|
)
|
||||||
|
|
||||||
|
import trio
|
||||||
|
|
||||||
|
from tractor.ipc._transport import MsgTransport
|
||||||
|
from tractor.ipc._types import (
|
||||||
|
transport_from_addr,
|
||||||
|
transport_from_stream,
|
||||||
|
)
|
||||||
|
from tractor._addr import (
|
||||||
|
wrap_address,
|
||||||
|
Address,
|
||||||
|
AddressTypes
|
||||||
|
)
|
||||||
|
from tractor.log import get_logger
|
||||||
|
from tractor._exceptions import (
|
||||||
|
MsgTypeError,
|
||||||
|
pack_from_raise,
|
||||||
|
)
|
||||||
|
from tractor.msg import MsgCodec
|
||||||
|
|
||||||
|
|
||||||
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
_is_windows = platform.system() == 'Windows'
|
||||||
|
|
||||||
|
|
||||||
|
class Channel:
|
||||||
|
'''
|
||||||
|
An inter-process channel for communication between (remote) actors.
|
||||||
|
|
||||||
|
Wraps a ``MsgStream``: transport + encoding IPC connection.
|
||||||
|
|
||||||
|
Currently we only support ``trio.SocketStream`` for transport
|
||||||
|
(aka TCP) and the ``msgpack`` interchange format via the ``msgspec``
|
||||||
|
codec libary.
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
|
||||||
|
self,
|
||||||
|
transport: MsgTransport|None = None,
|
||||||
|
# TODO: optional reconnection support?
|
||||||
|
# auto_reconnect: bool = False,
|
||||||
|
# on_reconnect: typing.Callable[..., typing.Awaitable] = None,
|
||||||
|
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
# self._recon_seq = on_reconnect
|
||||||
|
# self._autorecon = auto_reconnect
|
||||||
|
|
||||||
|
# Either created in ``.connect()`` or passed in by
|
||||||
|
# user in ``.from_stream()``.
|
||||||
|
self._transport: MsgTransport|None = transport
|
||||||
|
|
||||||
|
# set after handshake - always uid of far end
|
||||||
|
self.uid: tuple[str, str]|None = None
|
||||||
|
|
||||||
|
self._aiter_msgs = self._iter_msgs()
|
||||||
|
self._exc: Exception|None = None # set if far end actor errors
|
||||||
|
self._closed: bool = False
|
||||||
|
|
||||||
|
# flag set by ``Portal.cancel_actor()`` indicating remote
|
||||||
|
# (possibly peer) cancellation of the far end actor
|
||||||
|
# runtime.
|
||||||
|
self._cancel_called: bool = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def stream(self) -> trio.abc.Stream | None:
|
||||||
|
return self._transport.stream if self._transport else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def msgstream(self) -> MsgTransport:
|
||||||
|
log.info(
|
||||||
|
'`Channel.msgstream` is an old name, use `._transport`'
|
||||||
|
)
|
||||||
|
return self._transport
|
||||||
|
|
||||||
|
@property
|
||||||
|
def transport(self) -> MsgTransport:
|
||||||
|
return self._transport
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_stream(
|
||||||
|
cls,
|
||||||
|
stream: trio.abc.Stream,
|
||||||
|
) -> Channel:
|
||||||
|
transport_cls = transport_from_stream(stream)
|
||||||
|
return Channel(
|
||||||
|
transport=transport_cls(stream)
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def from_addr(
|
||||||
|
cls,
|
||||||
|
addr: AddressTypes,
|
||||||
|
**kwargs
|
||||||
|
) -> Channel:
|
||||||
|
addr: Address = wrap_address(addr)
|
||||||
|
transport_cls = transport_from_addr(addr)
|
||||||
|
transport = await transport_cls.connect_to(addr, **kwargs)
|
||||||
|
|
||||||
|
log.transport(
|
||||||
|
f'Opened channel[{type(transport)}]: {transport.laddr} -> {transport.raddr}'
|
||||||
|
)
|
||||||
|
return Channel(transport=transport)
|
||||||
|
|
||||||
|
@cm
|
||||||
|
def apply_codec(
|
||||||
|
self,
|
||||||
|
codec: MsgCodec,
|
||||||
|
) -> None:
|
||||||
|
'''
|
||||||
|
Temporarily override the underlying IPC msg codec for
|
||||||
|
dynamic enforcement of messaging schema.
|
||||||
|
|
||||||
|
'''
|
||||||
|
orig: MsgCodec = self._transport.codec
|
||||||
|
try:
|
||||||
|
self._transport.codec = codec
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
self._transport.codec = orig
|
||||||
|
|
||||||
|
# TODO: do a .src/.dst: str for maddrs?
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
if not self._transport:
|
||||||
|
return '<Channel with inactive transport?>'
|
||||||
|
|
||||||
|
return repr(
|
||||||
|
self._transport
|
||||||
|
).replace( # type: ignore
|
||||||
|
"socket.socket",
|
||||||
|
"Channel",
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def laddr(self) -> Address|None:
|
||||||
|
return self._transport.laddr if self._transport else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def raddr(self) -> Address|None:
|
||||||
|
return self._transport.raddr if self._transport else None
|
||||||
|
|
||||||
|
# TODO: something like,
|
||||||
|
# `pdbp.hideframe_on(errors=[MsgTypeError])`
|
||||||
|
# instead of the `try/except` hack we have rn..
|
||||||
|
# seems like a pretty useful thing to have in general
|
||||||
|
# along with being able to filter certain stack frame(s / sets)
|
||||||
|
# possibly based on the current log-level?
|
||||||
|
async def send(
|
||||||
|
self,
|
||||||
|
payload: Any,
|
||||||
|
|
||||||
|
hide_tb: bool = False,
|
||||||
|
|
||||||
|
) -> None:
|
||||||
|
'''
|
||||||
|
Send a coded msg-blob over the transport.
|
||||||
|
|
||||||
|
'''
|
||||||
|
__tracebackhide__: bool = hide_tb
|
||||||
|
try:
|
||||||
|
log.transport(
|
||||||
|
'=> send IPC msg:\n\n'
|
||||||
|
f'{pformat(payload)}\n'
|
||||||
|
)
|
||||||
|
# assert self._transport # but why typing?
|
||||||
|
await self._transport.send(
|
||||||
|
payload,
|
||||||
|
hide_tb=hide_tb,
|
||||||
|
)
|
||||||
|
except BaseException as _err:
|
||||||
|
err = _err # bind for introspection
|
||||||
|
if not isinstance(_err, MsgTypeError):
|
||||||
|
# assert err
|
||||||
|
__tracebackhide__: bool = False
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
assert err.cid
|
||||||
|
|
||||||
|
except KeyError:
|
||||||
|
raise err
|
||||||
|
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def recv(self) -> Any:
|
||||||
|
assert self._transport
|
||||||
|
return await self._transport.recv()
|
||||||
|
|
||||||
|
# TODO: auto-reconnect features like 0mq/nanomsg?
|
||||||
|
# -[ ] implement it manually with nods to SC prot
|
||||||
|
# possibly on multiple transport backends?
|
||||||
|
# -> seems like that might be re-inventing scalability
|
||||||
|
# prots tho no?
|
||||||
|
# try:
|
||||||
|
# return await self._transport.recv()
|
||||||
|
# except trio.BrokenResourceError:
|
||||||
|
# if self._autorecon:
|
||||||
|
# await self._reconnect()
|
||||||
|
# return await self.recv()
|
||||||
|
# raise
|
||||||
|
|
||||||
|
async def aclose(self) -> None:
|
||||||
|
|
||||||
|
log.transport(
|
||||||
|
f'Closing channel to {self.uid} '
|
||||||
|
f'{self.laddr} -> {self.raddr}'
|
||||||
|
)
|
||||||
|
assert self._transport
|
||||||
|
await self._transport.stream.aclose()
|
||||||
|
self._closed = True
|
||||||
|
|
||||||
|
async def __aenter__(self):
|
||||||
|
await self.connect()
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __aexit__(self, *args):
|
||||||
|
await self.aclose(*args)
|
||||||
|
|
||||||
|
def __aiter__(self):
|
||||||
|
return self._aiter_msgs
|
||||||
|
|
||||||
|
# ?TODO? run any reconnection sequence?
|
||||||
|
# -[ ] prolly should be impl-ed as deco-API?
|
||||||
|
#
|
||||||
|
# async def _reconnect(self) -> None:
|
||||||
|
# """Handle connection failures by polling until a reconnect can be
|
||||||
|
# established.
|
||||||
|
# """
|
||||||
|
# down = False
|
||||||
|
# while True:
|
||||||
|
# try:
|
||||||
|
# with trio.move_on_after(3) as cancel_scope:
|
||||||
|
# await self.connect()
|
||||||
|
# cancelled = cancel_scope.cancelled_caught
|
||||||
|
# if cancelled:
|
||||||
|
# log.transport(
|
||||||
|
# "Reconnect timed out after 3 seconds, retrying...")
|
||||||
|
# continue
|
||||||
|
# else:
|
||||||
|
# log.transport("Stream connection re-established!")
|
||||||
|
|
||||||
|
# # on_recon = self._recon_seq
|
||||||
|
# # if on_recon:
|
||||||
|
# # await on_recon(self)
|
||||||
|
|
||||||
|
# break
|
||||||
|
# except (OSError, ConnectionRefusedError):
|
||||||
|
# if not down:
|
||||||
|
# down = True
|
||||||
|
# log.transport(
|
||||||
|
# f"Connection to {self.raddr} went down, waiting"
|
||||||
|
# " for re-establishment")
|
||||||
|
# await trio.sleep(1)
|
||||||
|
|
||||||
|
async def _iter_msgs(
|
||||||
|
self
|
||||||
|
) -> AsyncGenerator[Any, None]:
|
||||||
|
'''
|
||||||
|
Yield `MsgType` IPC msgs decoded and deliverd from
|
||||||
|
an underlying `MsgTransport` protocol.
|
||||||
|
|
||||||
|
This is a streaming routine alo implemented as an async-gen
|
||||||
|
func (same a `MsgTransport._iter_pkts()`) gets allocated by
|
||||||
|
a `.__call__()` inside `.__init__()` where it is assigned to
|
||||||
|
the `._aiter_msgs` attr.
|
||||||
|
|
||||||
|
'''
|
||||||
|
assert self._transport
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
async for msg in self._transport:
|
||||||
|
match msg:
|
||||||
|
# NOTE: if transport/interchange delivers
|
||||||
|
# a type error, we pack it with the far
|
||||||
|
# end peer `Actor.uid` and relay the
|
||||||
|
# `Error`-msg upward to the `._rpc` stack
|
||||||
|
# for normal RAE handling.
|
||||||
|
case MsgTypeError():
|
||||||
|
yield pack_from_raise(
|
||||||
|
local_err=msg,
|
||||||
|
cid=msg.cid,
|
||||||
|
|
||||||
|
# XXX we pack it here bc lower
|
||||||
|
# layers have no notion of an
|
||||||
|
# actor-id ;)
|
||||||
|
src_uid=self.uid,
|
||||||
|
)
|
||||||
|
case _:
|
||||||
|
yield msg
|
||||||
|
|
||||||
|
except trio.BrokenResourceError:
|
||||||
|
|
||||||
|
# if not self._autorecon:
|
||||||
|
raise
|
||||||
|
|
||||||
|
await self.aclose()
|
||||||
|
|
||||||
|
# if self._autorecon: # attempt reconnect
|
||||||
|
# await self._reconnect()
|
||||||
|
# continue
|
||||||
|
|
||||||
|
def connected(self) -> bool:
|
||||||
|
return self._transport.connected() if self._transport else False
|
||||||
|
|
||||||
|
|
||||||
|
@acm
|
||||||
|
async def _connect_chan(
|
||||||
|
addr: AddressTypes
|
||||||
|
) -> typing.AsyncGenerator[Channel, None]:
|
||||||
|
'''
|
||||||
|
Create and connect a channel with disconnect on context manager
|
||||||
|
teardown.
|
||||||
|
|
||||||
|
'''
|
||||||
|
chan = await Channel.from_addr(addr)
|
||||||
|
yield chan
|
||||||
|
with trio.CancelScope(shield=True):
|
||||||
|
await chan.aclose()
|
|
@ -0,0 +1,187 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
'''
|
||||||
|
Linux specifics, for now we are only exposing EventFD
|
||||||
|
|
||||||
|
'''
|
||||||
|
import os
|
||||||
|
import errno
|
||||||
|
|
||||||
|
import cffi
|
||||||
|
import trio
|
||||||
|
|
||||||
|
ffi = cffi.FFI()
|
||||||
|
|
||||||
|
# Declare the C functions and types we plan to use.
|
||||||
|
# - eventfd: for creating the event file descriptor
|
||||||
|
# - write: for writing to the file descriptor
|
||||||
|
# - read: for reading from the file descriptor
|
||||||
|
# - close: for closing the file descriptor
|
||||||
|
ffi.cdef(
|
||||||
|
'''
|
||||||
|
int eventfd(unsigned int initval, int flags);
|
||||||
|
|
||||||
|
ssize_t write(int fd, const void *buf, size_t count);
|
||||||
|
ssize_t read(int fd, void *buf, size_t count);
|
||||||
|
|
||||||
|
int close(int fd);
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Open the default dynamic library (essentially 'libc' in most cases)
|
||||||
|
C = ffi.dlopen(None)
|
||||||
|
|
||||||
|
|
||||||
|
# Constants from <sys/eventfd.h>, if needed.
|
||||||
|
EFD_SEMAPHORE = 1
|
||||||
|
EFD_CLOEXEC = 0o2000000
|
||||||
|
EFD_NONBLOCK = 0o4000
|
||||||
|
|
||||||
|
|
||||||
|
def open_eventfd(initval: int = 0, flags: int = 0) -> int:
|
||||||
|
'''
|
||||||
|
Open an eventfd with the given initial value and flags.
|
||||||
|
Returns the file descriptor on success, otherwise raises OSError.
|
||||||
|
|
||||||
|
'''
|
||||||
|
fd = C.eventfd(initval, flags)
|
||||||
|
if fd < 0:
|
||||||
|
raise OSError(errno.errorcode[ffi.errno], 'eventfd failed')
|
||||||
|
return fd
|
||||||
|
|
||||||
|
|
||||||
|
def write_eventfd(fd: int, value: int) -> int:
|
||||||
|
'''
|
||||||
|
Write a 64-bit integer (uint64_t) to the eventfd's counter.
|
||||||
|
|
||||||
|
'''
|
||||||
|
# Create a uint64_t* in C, store `value`
|
||||||
|
data_ptr = ffi.new('uint64_t *', value)
|
||||||
|
|
||||||
|
# Call write(fd, data_ptr, 8)
|
||||||
|
# We expect to write exactly 8 bytes (sizeof(uint64_t))
|
||||||
|
ret = C.write(fd, data_ptr, 8)
|
||||||
|
if ret < 0:
|
||||||
|
raise OSError(errno.errorcode[ffi.errno], 'write to eventfd failed')
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def read_eventfd(fd: int) -> int:
|
||||||
|
'''
|
||||||
|
Read a 64-bit integer (uint64_t) from the eventfd, returning the value.
|
||||||
|
Reading resets the counter to 0 (unless using EFD_SEMAPHORE).
|
||||||
|
|
||||||
|
'''
|
||||||
|
# Allocate an 8-byte buffer in C for reading
|
||||||
|
buf = ffi.new('char[]', 8)
|
||||||
|
|
||||||
|
ret = C.read(fd, buf, 8)
|
||||||
|
if ret < 0:
|
||||||
|
raise OSError(errno.errorcode[ffi.errno], 'read from eventfd failed')
|
||||||
|
# Convert the 8 bytes we read into a Python integer
|
||||||
|
data_bytes = ffi.unpack(buf, 8) # returns a Python bytes object of length 8
|
||||||
|
value = int.from_bytes(data_bytes, byteorder='little', signed=False)
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
def close_eventfd(fd: int) -> int:
|
||||||
|
'''
|
||||||
|
Close the eventfd.
|
||||||
|
|
||||||
|
'''
|
||||||
|
ret = C.close(fd)
|
||||||
|
if ret < 0:
|
||||||
|
raise OSError(errno.errorcode[ffi.errno], 'close failed')
|
||||||
|
|
||||||
|
|
||||||
|
class EFDReadCancelled(Exception):
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
class EventFD:
|
||||||
|
'''
|
||||||
|
Use a previously opened eventfd(2), meant to be used in
|
||||||
|
sub-actors after root actor opens the eventfds then passes
|
||||||
|
them through pass_fds
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
fd: int,
|
||||||
|
omode: str
|
||||||
|
):
|
||||||
|
self._fd: int = fd
|
||||||
|
self._omode: str = omode
|
||||||
|
self._fobj = None
|
||||||
|
self._cscope: trio.CancelScope | None = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fd(self) -> int | None:
|
||||||
|
return self._fd
|
||||||
|
|
||||||
|
def write(self, value: int) -> int:
|
||||||
|
return write_eventfd(self._fd, value)
|
||||||
|
|
||||||
|
async def read(self) -> int:
|
||||||
|
'''
|
||||||
|
Async wrapper for `read_eventfd(self.fd)`
|
||||||
|
|
||||||
|
`trio.to_thread.run_sync` is used, need to use a `trio.CancelScope`
|
||||||
|
in order to make it cancellable when `self.close()` is called.
|
||||||
|
|
||||||
|
'''
|
||||||
|
self._cscope = trio.CancelScope()
|
||||||
|
with self._cscope:
|
||||||
|
return await trio.to_thread.run_sync(
|
||||||
|
read_eventfd, self._fd,
|
||||||
|
abandon_on_cancel=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._cscope.cancelled_caught:
|
||||||
|
raise EFDReadCancelled
|
||||||
|
|
||||||
|
self._cscope = None
|
||||||
|
|
||||||
|
def read_direct(self) -> int:
|
||||||
|
'''
|
||||||
|
Direct call to `read_eventfd(self.fd)`, unless `eventfd` was
|
||||||
|
opened with `EFD_NONBLOCK` its gonna block the thread.
|
||||||
|
|
||||||
|
'''
|
||||||
|
return read_eventfd(self._fd)
|
||||||
|
|
||||||
|
def open(self):
|
||||||
|
self._fobj = os.fdopen(self._fd, self._omode)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._fobj:
|
||||||
|
try:
|
||||||
|
self._fobj.close()
|
||||||
|
|
||||||
|
except OSError:
|
||||||
|
...
|
||||||
|
|
||||||
|
if self._cscope:
|
||||||
|
self._cscope.cancel()
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.open()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
|
self.close()
|
|
@ -0,0 +1,45 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
'''
|
||||||
|
Utils to tame mp non-SC madeness
|
||||||
|
|
||||||
|
'''
|
||||||
|
def disable_mantracker():
|
||||||
|
'''
|
||||||
|
Disable all ``multiprocessing``` "resource tracking" machinery since
|
||||||
|
it's an absolute multi-threaded mess of non-SC madness.
|
||||||
|
|
||||||
|
'''
|
||||||
|
from multiprocessing import resource_tracker as mantracker
|
||||||
|
|
||||||
|
# Tell the "resource tracker" thing to fuck off.
|
||||||
|
class ManTracker(mantracker.ResourceTracker):
|
||||||
|
def register(self, name, rtype):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def unregister(self, name, rtype):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def ensure_running(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# "know your land and know your prey"
|
||||||
|
# https://www.dailymotion.com/video/x6ozzco
|
||||||
|
mantracker._resource_tracker = ManTracker()
|
||||||
|
mantracker.register = mantracker._resource_tracker.register
|
||||||
|
mantracker.ensure_running = mantracker._resource_tracker.ensure_running
|
||||||
|
mantracker.unregister = mantracker._resource_tracker.unregister
|
||||||
|
mantracker.getfd = mantracker._resource_tracker.getfd
|
|
@ -0,0 +1,654 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
'''
|
||||||
|
IPC Reliable RingBuffer implementation
|
||||||
|
|
||||||
|
'''
|
||||||
|
from __future__ import annotations
|
||||||
|
import struct
|
||||||
|
from typing import (
|
||||||
|
ContextManager,
|
||||||
|
AsyncContextManager
|
||||||
|
)
|
||||||
|
from contextlib import (
|
||||||
|
contextmanager as cm,
|
||||||
|
asynccontextmanager as acm
|
||||||
|
)
|
||||||
|
from multiprocessing.shared_memory import SharedMemory
|
||||||
|
|
||||||
|
import trio
|
||||||
|
from msgspec import (
|
||||||
|
Struct,
|
||||||
|
to_builtins
|
||||||
|
)
|
||||||
|
|
||||||
|
from ._linux import (
|
||||||
|
open_eventfd,
|
||||||
|
EFDReadCancelled,
|
||||||
|
EventFD
|
||||||
|
)
|
||||||
|
from ._mp_bs import disable_mantracker
|
||||||
|
from tractor.log import get_logger
|
||||||
|
from tractor._exceptions import (
|
||||||
|
InternalError
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
disable_mantracker()
|
||||||
|
|
||||||
|
_DEFAULT_RB_SIZE = 10 * 1024
|
||||||
|
|
||||||
|
|
||||||
|
class RBToken(Struct, frozen=True):
|
||||||
|
'''
|
||||||
|
RingBuffer token contains necesary info to open the three
|
||||||
|
eventfds and the shared memory
|
||||||
|
|
||||||
|
'''
|
||||||
|
shm_name: str
|
||||||
|
|
||||||
|
write_eventfd: int # used to signal writer ptr advance
|
||||||
|
wrap_eventfd: int # used to signal reader ready after wrap around
|
||||||
|
eof_eventfd: int # used to signal writer closed
|
||||||
|
|
||||||
|
buf_size: int
|
||||||
|
|
||||||
|
def as_msg(self):
|
||||||
|
return to_builtins(self)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_msg(cls, msg: dict) -> RBToken:
|
||||||
|
if isinstance(msg, RBToken):
|
||||||
|
return msg
|
||||||
|
|
||||||
|
return RBToken(**msg)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fds(self) -> tuple[int, int, int]:
|
||||||
|
'''
|
||||||
|
Useful for `pass_fds` params
|
||||||
|
|
||||||
|
'''
|
||||||
|
return (
|
||||||
|
self.write_eventfd,
|
||||||
|
self.wrap_eventfd,
|
||||||
|
self.eof_eventfd
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@cm
|
||||||
|
def open_ringbuf(
|
||||||
|
shm_name: str,
|
||||||
|
buf_size: int = _DEFAULT_RB_SIZE,
|
||||||
|
) -> ContextManager[RBToken]:
|
||||||
|
'''
|
||||||
|
Handle resources for a ringbuf (shm, eventfd), yield `RBToken` to
|
||||||
|
be used with `attach_to_ringbuf_sender` and `attach_to_ringbuf_receiver`
|
||||||
|
|
||||||
|
'''
|
||||||
|
shm = SharedMemory(
|
||||||
|
name=shm_name,
|
||||||
|
size=buf_size,
|
||||||
|
create=True
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
with (
|
||||||
|
EventFD(open_eventfd(), 'r') as write_event,
|
||||||
|
EventFD(open_eventfd(), 'r') as wrap_event,
|
||||||
|
EventFD(open_eventfd(), 'r') as eof_event,
|
||||||
|
):
|
||||||
|
token = RBToken(
|
||||||
|
shm_name=shm_name,
|
||||||
|
write_eventfd=write_event.fd,
|
||||||
|
wrap_eventfd=wrap_event.fd,
|
||||||
|
eof_eventfd=eof_event.fd,
|
||||||
|
buf_size=buf_size
|
||||||
|
)
|
||||||
|
yield token
|
||||||
|
|
||||||
|
finally:
|
||||||
|
shm.unlink()
|
||||||
|
|
||||||
|
|
||||||
|
Buffer = bytes | bytearray | memoryview
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
IPC Reliable Ring Buffer
|
||||||
|
|
||||||
|
`eventfd(2)` is used for wrap around sync, to signal writes to
|
||||||
|
the reader and end of stream.
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
class RingBuffSender(trio.abc.SendStream):
|
||||||
|
'''
|
||||||
|
Ring Buffer sender side implementation
|
||||||
|
|
||||||
|
Do not use directly! manage with `attach_to_ringbuf_sender`
|
||||||
|
after having opened a ringbuf context with `open_ringbuf`.
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
token: RBToken,
|
||||||
|
cleanup: bool = False
|
||||||
|
):
|
||||||
|
self._token = RBToken.from_msg(token)
|
||||||
|
self._shm: SharedMemory | None = None
|
||||||
|
self._write_event = EventFD(self._token.write_eventfd, 'w')
|
||||||
|
self._wrap_event = EventFD(self._token.wrap_eventfd, 'r')
|
||||||
|
self._eof_event = EventFD(self._token.eof_eventfd, 'w')
|
||||||
|
self._ptr = 0
|
||||||
|
|
||||||
|
self._cleanup = cleanup
|
||||||
|
self._send_lock = trio.StrictFIFOLock()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
if not self._shm:
|
||||||
|
raise ValueError('shared memory not initialized yet!')
|
||||||
|
return self._shm.name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self) -> int:
|
||||||
|
return self._token.buf_size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ptr(self) -> int:
|
||||||
|
return self._ptr
|
||||||
|
|
||||||
|
@property
|
||||||
|
def write_fd(self) -> int:
|
||||||
|
return self._write_event.fd
|
||||||
|
|
||||||
|
@property
|
||||||
|
def wrap_fd(self) -> int:
|
||||||
|
return self._wrap_event.fd
|
||||||
|
|
||||||
|
async def _wait_wrap(self):
|
||||||
|
await self._wrap_event.read()
|
||||||
|
|
||||||
|
async def send_all(self, data: Buffer):
|
||||||
|
async with self._send_lock:
|
||||||
|
# while data is larger than the remaining buf
|
||||||
|
target_ptr = self.ptr + len(data)
|
||||||
|
while target_ptr > self.size:
|
||||||
|
# write all bytes that fit
|
||||||
|
remaining = self.size - self.ptr
|
||||||
|
self._shm.buf[self.ptr:] = data[:remaining]
|
||||||
|
# signal write and wait for reader wrap around
|
||||||
|
self._write_event.write(remaining)
|
||||||
|
await self._wait_wrap()
|
||||||
|
|
||||||
|
# wrap around and trim already written bytes
|
||||||
|
self._ptr = 0
|
||||||
|
data = data[remaining:]
|
||||||
|
target_ptr = self._ptr + len(data)
|
||||||
|
|
||||||
|
# remaining data fits on buffer
|
||||||
|
self._shm.buf[self.ptr:target_ptr] = data
|
||||||
|
self._write_event.write(len(data))
|
||||||
|
self._ptr = target_ptr
|
||||||
|
|
||||||
|
async def wait_send_all_might_not_block(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def open(self):
|
||||||
|
self._shm = SharedMemory(
|
||||||
|
name=self._token.shm_name,
|
||||||
|
size=self._token.buf_size,
|
||||||
|
create=False
|
||||||
|
)
|
||||||
|
self._write_event.open()
|
||||||
|
self._wrap_event.open()
|
||||||
|
self._eof_event.open()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self._eof_event.write(
|
||||||
|
self._ptr if self._ptr > 0 else self.size
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._cleanup:
|
||||||
|
self._write_event.close()
|
||||||
|
self._wrap_event.close()
|
||||||
|
self._eof_event.close()
|
||||||
|
self._shm.close()
|
||||||
|
|
||||||
|
async def aclose(self):
|
||||||
|
async with self._send_lock:
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
async def __aenter__(self):
|
||||||
|
self.open()
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
class RingBuffReceiver(trio.abc.ReceiveStream):
|
||||||
|
'''
|
||||||
|
Ring Buffer receiver side implementation
|
||||||
|
|
||||||
|
Do not use directly! manage with `attach_to_ringbuf_receiver`
|
||||||
|
after having opened a ringbuf context with `open_ringbuf`.
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
token: RBToken,
|
||||||
|
cleanup: bool = True,
|
||||||
|
):
|
||||||
|
self._token = RBToken.from_msg(token)
|
||||||
|
self._shm: SharedMemory | None = None
|
||||||
|
self._write_event = EventFD(self._token.write_eventfd, 'w')
|
||||||
|
self._wrap_event = EventFD(self._token.wrap_eventfd, 'r')
|
||||||
|
self._eof_event = EventFD(self._token.eof_eventfd, 'r')
|
||||||
|
self._ptr: int = 0
|
||||||
|
self._write_ptr: int = 0
|
||||||
|
self._end_ptr: int = -1
|
||||||
|
|
||||||
|
self._cleanup: bool = cleanup
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
if not self._shm:
|
||||||
|
raise ValueError('shared memory not initialized yet!')
|
||||||
|
return self._shm.name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self) -> int:
|
||||||
|
return self._token.buf_size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ptr(self) -> int:
|
||||||
|
return self._ptr
|
||||||
|
|
||||||
|
@property
|
||||||
|
def write_fd(self) -> int:
|
||||||
|
return self._write_event.fd
|
||||||
|
|
||||||
|
@property
|
||||||
|
def wrap_fd(self) -> int:
|
||||||
|
return self._wrap_event.fd
|
||||||
|
|
||||||
|
async def _eof_monitor_task(self):
|
||||||
|
'''
|
||||||
|
Long running EOF event monitor, automatically run in bg by
|
||||||
|
`attach_to_ringbuf_receiver` context manager, if EOF event
|
||||||
|
is set its value will be the end pointer (highest valid
|
||||||
|
index to be read from buf, after setting the `self._end_ptr`
|
||||||
|
we close the write event which should cancel any blocked
|
||||||
|
`self._write_event.read()`s on it.
|
||||||
|
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
self._end_ptr = await self._eof_event.read()
|
||||||
|
self._write_event.close()
|
||||||
|
|
||||||
|
except EFDReadCancelled:
|
||||||
|
...
|
||||||
|
|
||||||
|
except trio.Cancelled:
|
||||||
|
...
|
||||||
|
|
||||||
|
async def receive_some(self, max_bytes: int | None = None) -> bytes:
|
||||||
|
'''
|
||||||
|
Receive up to `max_bytes`, if no `max_bytes` is provided
|
||||||
|
a reasonable default is used.
|
||||||
|
|
||||||
|
'''
|
||||||
|
if max_bytes is None:
|
||||||
|
max_bytes: int = _DEFAULT_RB_SIZE
|
||||||
|
|
||||||
|
if max_bytes < 1:
|
||||||
|
raise ValueError("max_bytes must be >= 1")
|
||||||
|
|
||||||
|
# delta is remaining bytes we havent read
|
||||||
|
delta = self._write_ptr - self._ptr
|
||||||
|
if delta == 0:
|
||||||
|
# we have read all we can, see if new data is available
|
||||||
|
if self._end_ptr < 0:
|
||||||
|
# if we havent been signaled about EOF yet
|
||||||
|
try:
|
||||||
|
delta = await self._write_event.read()
|
||||||
|
self._write_ptr += delta
|
||||||
|
|
||||||
|
except EFDReadCancelled:
|
||||||
|
# while waiting for new data `self._write_event` was closed
|
||||||
|
# this means writer signaled EOF
|
||||||
|
if self._end_ptr > 0:
|
||||||
|
# final self._write_ptr modification and recalculate delta
|
||||||
|
self._write_ptr = self._end_ptr
|
||||||
|
delta = self._end_ptr - self._ptr
|
||||||
|
|
||||||
|
else:
|
||||||
|
# shouldnt happen cause self._eof_monitor_task always sets
|
||||||
|
# self._end_ptr before closing self._write_event
|
||||||
|
raise InternalError(
|
||||||
|
'self._write_event.read cancelled but self._end_ptr is not set'
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# no more bytes to read and self._end_ptr set, EOF reached
|
||||||
|
return b''
|
||||||
|
|
||||||
|
# dont overflow caller
|
||||||
|
delta = min(delta, max_bytes)
|
||||||
|
|
||||||
|
target_ptr = self._ptr + delta
|
||||||
|
|
||||||
|
# fetch next segment and advance ptr
|
||||||
|
segment = bytes(self._shm.buf[self._ptr:target_ptr])
|
||||||
|
self._ptr = target_ptr
|
||||||
|
|
||||||
|
if self._ptr == self.size:
|
||||||
|
# reached the end, signal wrap around
|
||||||
|
self._ptr = 0
|
||||||
|
self._write_ptr = 0
|
||||||
|
self._wrap_event.write(1)
|
||||||
|
|
||||||
|
return segment
|
||||||
|
|
||||||
|
def open(self):
|
||||||
|
self._shm = SharedMemory(
|
||||||
|
name=self._token.shm_name,
|
||||||
|
size=self._token.buf_size,
|
||||||
|
create=False
|
||||||
|
)
|
||||||
|
self._write_event.open()
|
||||||
|
self._wrap_event.open()
|
||||||
|
self._eof_event.open()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._cleanup:
|
||||||
|
self._write_event.close()
|
||||||
|
self._wrap_event.close()
|
||||||
|
self._eof_event.close()
|
||||||
|
self._shm.close()
|
||||||
|
|
||||||
|
async def aclose(self):
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
async def __aenter__(self):
|
||||||
|
self.open()
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
@acm
|
||||||
|
async def attach_to_ringbuf_receiver(
|
||||||
|
token: RBToken,
|
||||||
|
cleanup: bool = True
|
||||||
|
) -> AsyncContextManager[RingBuffReceiver]:
|
||||||
|
'''
|
||||||
|
Attach a RingBuffReceiver from a previously opened
|
||||||
|
RBToken.
|
||||||
|
|
||||||
|
Launches `receiver._eof_monitor_task` in a `trio.Nursery`.
|
||||||
|
'''
|
||||||
|
async with (
|
||||||
|
trio.open_nursery() as n,
|
||||||
|
RingBuffReceiver(
|
||||||
|
token,
|
||||||
|
cleanup=cleanup
|
||||||
|
) as receiver
|
||||||
|
):
|
||||||
|
n.start_soon(receiver._eof_monitor_task)
|
||||||
|
yield receiver
|
||||||
|
|
||||||
|
|
||||||
|
@acm
|
||||||
|
async def attach_to_ringbuf_sender(
|
||||||
|
token: RBToken,
|
||||||
|
cleanup: bool = True
|
||||||
|
) -> AsyncContextManager[RingBuffSender]:
|
||||||
|
'''
|
||||||
|
Attach a RingBuffSender from a previously opened
|
||||||
|
RBToken.
|
||||||
|
|
||||||
|
'''
|
||||||
|
async with RingBuffSender(
|
||||||
|
token,
|
||||||
|
cleanup=cleanup
|
||||||
|
) as sender:
|
||||||
|
yield sender
|
||||||
|
|
||||||
|
|
||||||
|
@cm
|
||||||
|
def open_ringbuf_pair(
|
||||||
|
name: str,
|
||||||
|
buf_size: int = _DEFAULT_RB_SIZE
|
||||||
|
) -> ContextManager[tuple(RBToken, RBToken)]:
|
||||||
|
'''
|
||||||
|
Handle resources for a ringbuf pair to be used for
|
||||||
|
bidirectional messaging.
|
||||||
|
|
||||||
|
'''
|
||||||
|
with (
|
||||||
|
open_ringbuf(
|
||||||
|
name + '.pair0',
|
||||||
|
buf_size=buf_size
|
||||||
|
) as token_0,
|
||||||
|
|
||||||
|
open_ringbuf(
|
||||||
|
name + '.pair1',
|
||||||
|
buf_size=buf_size
|
||||||
|
) as token_1
|
||||||
|
):
|
||||||
|
yield token_0, token_1
|
||||||
|
|
||||||
|
|
||||||
|
@acm
|
||||||
|
async def attach_to_ringbuf_stream(
|
||||||
|
token_in: RBToken,
|
||||||
|
token_out: RBToken,
|
||||||
|
cleanup_in: bool = True,
|
||||||
|
cleanup_out: bool = True
|
||||||
|
) -> AsyncContextManager[trio.StapledStream]:
|
||||||
|
'''
|
||||||
|
Attach a trio.StapledStream from a previously opened
|
||||||
|
ringbuf pair.
|
||||||
|
|
||||||
|
'''
|
||||||
|
async with (
|
||||||
|
attach_to_ringbuf_receiver(
|
||||||
|
token_in,
|
||||||
|
cleanup=cleanup_in
|
||||||
|
) as receiver,
|
||||||
|
attach_to_ringbuf_sender(
|
||||||
|
token_out,
|
||||||
|
cleanup=cleanup_out
|
||||||
|
) as sender,
|
||||||
|
):
|
||||||
|
yield trio.StapledStream(sender, receiver)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class RingBuffBytesSender(trio.abc.SendChannel[bytes]):
|
||||||
|
'''
|
||||||
|
In order to guarantee full messages are received, all bytes
|
||||||
|
sent by `RingBuffBytesSender` are preceded with a 4 byte header
|
||||||
|
which decodes into a uint32 indicating the actual size of the
|
||||||
|
next payload.
|
||||||
|
|
||||||
|
Optional batch mode:
|
||||||
|
|
||||||
|
If `batch_size` > 1 messages wont get sent immediately but will be
|
||||||
|
stored until `batch_size` messages are pending, then it will send
|
||||||
|
them all at once.
|
||||||
|
|
||||||
|
`batch_size` can be changed dynamically but always call, `flush()`
|
||||||
|
right before.
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
sender: RingBuffSender,
|
||||||
|
batch_size: int = 1
|
||||||
|
):
|
||||||
|
self._sender = sender
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self._batch_msg_len = 0
|
||||||
|
self._batch: bytes = b''
|
||||||
|
|
||||||
|
async def flush(self) -> None:
|
||||||
|
await self._sender.send_all(self._batch)
|
||||||
|
self._batch = b''
|
||||||
|
self._batch_msg_len = 0
|
||||||
|
|
||||||
|
async def send(self, value: bytes) -> None:
|
||||||
|
msg: bytes = struct.pack("<I", len(value)) + value
|
||||||
|
if self.batch_size == 1:
|
||||||
|
await self._sender.send_all(msg)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._batch += msg
|
||||||
|
self._batch_msg_len += 1
|
||||||
|
if self._batch_msg_len == self.batch_size:
|
||||||
|
await self.flush()
|
||||||
|
|
||||||
|
|
||||||
|
async def aclose(self) -> None:
|
||||||
|
await self._sender.aclose()
|
||||||
|
|
||||||
|
|
||||||
|
class RingBuffBytesReceiver(trio.abc.ReceiveChannel[bytes]):
|
||||||
|
'''
|
||||||
|
See `RingBuffBytesSender` docstring.
|
||||||
|
|
||||||
|
A `tricycle.BufferedReceiveStream` is used for the
|
||||||
|
`receive_exactly` API.
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
receiver: RingBuffReceiver
|
||||||
|
):
|
||||||
|
self._receiver = receiver
|
||||||
|
|
||||||
|
async def _receive_exactly(self, num_bytes: int) -> bytes:
|
||||||
|
'''
|
||||||
|
Fetch bytes from receiver until we read exactly `num_bytes`
|
||||||
|
or end of stream is signaled.
|
||||||
|
|
||||||
|
'''
|
||||||
|
payload = b''
|
||||||
|
while len(payload) < num_bytes:
|
||||||
|
remaining = num_bytes - len(payload)
|
||||||
|
|
||||||
|
new_bytes = await self._receiver.receive_some(
|
||||||
|
max_bytes=remaining
|
||||||
|
)
|
||||||
|
|
||||||
|
if new_bytes == b'':
|
||||||
|
raise trio.EndOfChannel
|
||||||
|
|
||||||
|
payload += new_bytes
|
||||||
|
|
||||||
|
return payload
|
||||||
|
|
||||||
|
async def receive(self) -> bytes:
|
||||||
|
header: bytes = await self._receive_exactly(4)
|
||||||
|
size: int
|
||||||
|
size, = struct.unpack("<I", header)
|
||||||
|
if size == 0:
|
||||||
|
raise trio.EndOfChannel
|
||||||
|
return await self._receive_exactly(size)
|
||||||
|
|
||||||
|
async def aclose(self) -> None:
|
||||||
|
await self._receiver.aclose()
|
||||||
|
|
||||||
|
|
||||||
|
@acm
|
||||||
|
async def attach_to_ringbuf_rchannel(
|
||||||
|
token: RBToken,
|
||||||
|
cleanup: bool = True
|
||||||
|
) -> AsyncContextManager[RingBuffBytesReceiver]:
|
||||||
|
'''
|
||||||
|
Attach a RingBuffBytesReceiver from a previously opened
|
||||||
|
RBToken.
|
||||||
|
'''
|
||||||
|
async with attach_to_ringbuf_receiver(
|
||||||
|
token, cleanup=cleanup
|
||||||
|
) as receiver:
|
||||||
|
yield RingBuffBytesReceiver(receiver)
|
||||||
|
|
||||||
|
|
||||||
|
@acm
|
||||||
|
async def attach_to_ringbuf_schannel(
|
||||||
|
token: RBToken,
|
||||||
|
cleanup: bool = True,
|
||||||
|
batch_size: int = 1,
|
||||||
|
) -> AsyncContextManager[RingBuffBytesSender]:
|
||||||
|
'''
|
||||||
|
Attach a RingBuffBytesSender from a previously opened
|
||||||
|
RBToken.
|
||||||
|
'''
|
||||||
|
async with attach_to_ringbuf_sender(
|
||||||
|
token, cleanup=cleanup
|
||||||
|
) as sender:
|
||||||
|
yield RingBuffBytesSender(sender, batch_size=batch_size)
|
||||||
|
|
||||||
|
|
||||||
|
class RingBuffChannel(trio.abc.Channel[bytes]):
|
||||||
|
'''
|
||||||
|
Combine `RingBuffBytesSender` and `RingBuffBytesReceiver`
|
||||||
|
in order to expose the bidirectional `trio.abc.Channel` API.
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
sender: RingBuffBytesSender,
|
||||||
|
receiver: RingBuffBytesReceiver
|
||||||
|
):
|
||||||
|
self._sender = sender
|
||||||
|
self._receiver = receiver
|
||||||
|
|
||||||
|
async def send(self, value: bytes):
|
||||||
|
await self._sender.send(value)
|
||||||
|
|
||||||
|
async def receive(self) -> bytes:
|
||||||
|
return await self._receiver.receive()
|
||||||
|
|
||||||
|
async def aclose(self):
|
||||||
|
await self._receiver.aclose()
|
||||||
|
await self._sender.aclose()
|
||||||
|
|
||||||
|
|
||||||
|
@acm
|
||||||
|
async def attach_to_ringbuf_channel(
|
||||||
|
token_in: RBToken,
|
||||||
|
token_out: RBToken,
|
||||||
|
cleanup_in: bool = True,
|
||||||
|
cleanup_out: bool = True
|
||||||
|
) -> AsyncContextManager[RingBuffChannel]:
|
||||||
|
'''
|
||||||
|
Attach to an already opened ringbuf pair and return
|
||||||
|
a `RingBuffChannel`.
|
||||||
|
|
||||||
|
'''
|
||||||
|
async with (
|
||||||
|
attach_to_ringbuf_rchannel(
|
||||||
|
token_in,
|
||||||
|
cleanup=cleanup_in
|
||||||
|
) as receiver,
|
||||||
|
attach_to_ringbuf_schannel(
|
||||||
|
token_out,
|
||||||
|
cleanup=cleanup_out
|
||||||
|
) as sender,
|
||||||
|
):
|
||||||
|
yield RingBuffChannel(sender, receiver)
|
|
@ -0,0 +1,809 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""
|
||||||
|
SC friendly shared memory management geared at real-time
|
||||||
|
processing.
|
||||||
|
|
||||||
|
Support for ``numpy`` compatible array-buffers is provided but is
|
||||||
|
considered optional within the context of this runtime-library.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
from sys import byteorder
|
||||||
|
import time
|
||||||
|
from typing import Optional
|
||||||
|
from multiprocessing import shared_memory as shm
|
||||||
|
from multiprocessing.shared_memory import (
|
||||||
|
SharedMemory,
|
||||||
|
ShareableList,
|
||||||
|
)
|
||||||
|
|
||||||
|
from msgspec import (
|
||||||
|
Struct,
|
||||||
|
to_builtins
|
||||||
|
)
|
||||||
|
import tractor
|
||||||
|
|
||||||
|
from tractor.ipc._mp_bs import disable_mantracker
|
||||||
|
from tractor.log import get_logger
|
||||||
|
|
||||||
|
|
||||||
|
_USE_POSIX = getattr(shm, '_USE_POSIX', False)
|
||||||
|
if _USE_POSIX:
|
||||||
|
from _posixshmem import shm_unlink
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import numpy as np
|
||||||
|
from numpy.lib import recfunctions as rfn
|
||||||
|
import nptyping
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
disable_mantracker()
|
||||||
|
|
||||||
|
|
||||||
|
class SharedInt:
|
||||||
|
'''
|
||||||
|
Wrapper around a single entry shared memory array which
|
||||||
|
holds an ``int`` value used as an index counter.
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
shm: SharedMemory,
|
||||||
|
) -> None:
|
||||||
|
self._shm = shm
|
||||||
|
|
||||||
|
@property
|
||||||
|
def value(self) -> int:
|
||||||
|
return int.from_bytes(self._shm.buf, byteorder)
|
||||||
|
|
||||||
|
@value.setter
|
||||||
|
def value(self, value) -> None:
|
||||||
|
self._shm.buf[:] = value.to_bytes(self._shm.size, byteorder)
|
||||||
|
|
||||||
|
def destroy(self) -> None:
|
||||||
|
if _USE_POSIX:
|
||||||
|
# We manually unlink to bypass all the "resource tracker"
|
||||||
|
# nonsense meant for non-SC systems.
|
||||||
|
name = self._shm.name
|
||||||
|
try:
|
||||||
|
shm_unlink(name)
|
||||||
|
except FileNotFoundError:
|
||||||
|
# might be a teardown race here?
|
||||||
|
log.warning(f'Shm for {name} already unlinked?')
|
||||||
|
|
||||||
|
|
||||||
|
class NDToken(Struct, frozen=True):
|
||||||
|
'''
|
||||||
|
Internal represenation of a shared memory ``numpy`` array "token"
|
||||||
|
which can be used to key and load a system (OS) wide shm entry
|
||||||
|
and correctly read the array by type signature.
|
||||||
|
|
||||||
|
This type is msg safe.
|
||||||
|
|
||||||
|
'''
|
||||||
|
shm_name: str # this servers as a "key" value
|
||||||
|
shm_first_index_name: str
|
||||||
|
shm_last_index_name: str
|
||||||
|
dtype_descr: tuple
|
||||||
|
size: int # in struct-array index / row terms
|
||||||
|
|
||||||
|
# TODO: use nptyping here on dtypes
|
||||||
|
@property
|
||||||
|
def dtype(self) -> list[tuple[str, str, tuple[int, ...]]]:
|
||||||
|
return np.dtype(
|
||||||
|
list(
|
||||||
|
map(tuple, self.dtype_descr)
|
||||||
|
)
|
||||||
|
).descr
|
||||||
|
|
||||||
|
def as_msg(self):
|
||||||
|
return to_builtins(self)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_msg(cls, msg: dict) -> NDToken:
|
||||||
|
if isinstance(msg, NDToken):
|
||||||
|
return msg
|
||||||
|
|
||||||
|
# TODO: native struct decoding
|
||||||
|
# return _token_dec.decode(msg)
|
||||||
|
|
||||||
|
msg['dtype_descr'] = tuple(map(tuple, msg['dtype_descr']))
|
||||||
|
return NDToken(**msg)
|
||||||
|
|
||||||
|
|
||||||
|
# _token_dec = msgspec.msgpack.Decoder(NDToken)
|
||||||
|
|
||||||
|
# TODO: this api?
|
||||||
|
# _known_tokens = tractor.ActorVar('_shm_tokens', {})
|
||||||
|
# _known_tokens = tractor.ContextStack('_known_tokens', )
|
||||||
|
# _known_tokens = trio.RunVar('shms', {})
|
||||||
|
|
||||||
|
# TODO: this should maybe be provided via
|
||||||
|
# a `.trionics.maybe_open_context()` wrapper factory?
|
||||||
|
# process-local store of keys to tokens
|
||||||
|
_known_tokens: dict[str, NDToken] = {}
|
||||||
|
|
||||||
|
|
||||||
|
def get_shm_token(key: str) -> NDToken | None:
|
||||||
|
'''
|
||||||
|
Convenience func to check if a token
|
||||||
|
for the provided key is known by this process.
|
||||||
|
|
||||||
|
Returns either the ``numpy`` token or a string for a shared list.
|
||||||
|
|
||||||
|
'''
|
||||||
|
return _known_tokens.get(key)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_token(
|
||||||
|
key: str,
|
||||||
|
size: int,
|
||||||
|
dtype: np.dtype,
|
||||||
|
|
||||||
|
) -> NDToken:
|
||||||
|
'''
|
||||||
|
Create a serializable token that can be used
|
||||||
|
to access a shared array.
|
||||||
|
|
||||||
|
'''
|
||||||
|
return NDToken(
|
||||||
|
shm_name=key,
|
||||||
|
shm_first_index_name=key + "_first",
|
||||||
|
shm_last_index_name=key + "_last",
|
||||||
|
dtype_descr=tuple(np.dtype(dtype).descr),
|
||||||
|
size=size,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ShmArray:
|
||||||
|
'''
|
||||||
|
A shared memory ``numpy.ndarray`` API.
|
||||||
|
|
||||||
|
An underlying shared memory buffer is allocated based on
|
||||||
|
a user specified ``numpy.ndarray``. This fixed size array
|
||||||
|
can be read and written to by pushing data both onto the "front"
|
||||||
|
or "back" of a set index range. The indexes for the "first" and
|
||||||
|
"last" index are themselves stored in shared memory (accessed via
|
||||||
|
``SharedInt`` interfaces) values such that multiple processes can
|
||||||
|
interact with the same array using a synchronized-index.
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
shmarr: np.ndarray,
|
||||||
|
first: SharedInt,
|
||||||
|
last: SharedInt,
|
||||||
|
shm: SharedMemory,
|
||||||
|
# readonly: bool = True,
|
||||||
|
) -> None:
|
||||||
|
self._array = shmarr
|
||||||
|
|
||||||
|
# indexes for first and last indices corresponding
|
||||||
|
# to fille data
|
||||||
|
self._first = first
|
||||||
|
self._last = last
|
||||||
|
|
||||||
|
self._len = len(shmarr)
|
||||||
|
self._shm = shm
|
||||||
|
self._post_init: bool = False
|
||||||
|
|
||||||
|
# pushing data does not write the index (aka primary key)
|
||||||
|
self._write_fields: list[str] | None = None
|
||||||
|
dtype = shmarr.dtype
|
||||||
|
if dtype.fields:
|
||||||
|
self._write_fields = list(shmarr.dtype.fields.keys())[1:]
|
||||||
|
|
||||||
|
# TODO: ringbuf api?
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _token(self) -> NDToken:
|
||||||
|
return NDToken(
|
||||||
|
shm_name=self._shm.name,
|
||||||
|
shm_first_index_name=self._first._shm.name,
|
||||||
|
shm_last_index_name=self._last._shm.name,
|
||||||
|
dtype_descr=tuple(self._array.dtype.descr),
|
||||||
|
size=self._len,
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def token(self) -> dict:
|
||||||
|
"""Shared memory token that can be serialized and used by
|
||||||
|
another process to attach to this array.
|
||||||
|
"""
|
||||||
|
return self._token.as_msg()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def index(self) -> int:
|
||||||
|
return self._last.value % self._len
|
||||||
|
|
||||||
|
@property
|
||||||
|
def array(self) -> np.ndarray:
|
||||||
|
'''
|
||||||
|
Return an up-to-date ``np.ndarray`` view of the
|
||||||
|
so-far-written data to the underlying shm buffer.
|
||||||
|
|
||||||
|
'''
|
||||||
|
a = self._array[self._first.value:self._last.value]
|
||||||
|
|
||||||
|
# first, last = self._first.value, self._last.value
|
||||||
|
# a = self._array[first:last]
|
||||||
|
|
||||||
|
# TODO: eventually comment this once we've not seen it in the
|
||||||
|
# wild in a long time..
|
||||||
|
# XXX: race where first/last indexes cause a reader
|
||||||
|
# to load an empty array..
|
||||||
|
if len(a) == 0 and self._post_init:
|
||||||
|
raise RuntimeError('Empty array race condition hit!?')
|
||||||
|
# breakpoint()
|
||||||
|
|
||||||
|
return a
|
||||||
|
|
||||||
|
def ustruct(
|
||||||
|
self,
|
||||||
|
fields: Optional[list[str]] = None,
|
||||||
|
|
||||||
|
# type that all field values will be cast to
|
||||||
|
# in the returned view.
|
||||||
|
common_dtype: np.dtype = float,
|
||||||
|
|
||||||
|
) -> np.ndarray:
|
||||||
|
|
||||||
|
array = self._array
|
||||||
|
|
||||||
|
if fields:
|
||||||
|
selection = array[fields]
|
||||||
|
# fcount = len(fields)
|
||||||
|
else:
|
||||||
|
selection = array
|
||||||
|
# fcount = len(array.dtype.fields)
|
||||||
|
|
||||||
|
# XXX: manual ``.view()`` attempt that also doesn't work.
|
||||||
|
# uview = selection.view(
|
||||||
|
# dtype='<f16',
|
||||||
|
# ).reshape(-1, 4, order='A')
|
||||||
|
|
||||||
|
# assert len(selection) == len(uview)
|
||||||
|
|
||||||
|
u = rfn.structured_to_unstructured(
|
||||||
|
selection,
|
||||||
|
# dtype=float,
|
||||||
|
copy=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# unstruct = np.ndarray(u.shape, dtype=a.dtype, buffer=shm.buf)
|
||||||
|
# array[:] = a[:]
|
||||||
|
return u
|
||||||
|
# return ShmArray(
|
||||||
|
# shmarr=u,
|
||||||
|
# first=self._first,
|
||||||
|
# last=self._last,
|
||||||
|
# shm=self._shm
|
||||||
|
# )
|
||||||
|
|
||||||
|
def last(
|
||||||
|
self,
|
||||||
|
length: int = 1,
|
||||||
|
|
||||||
|
) -> np.ndarray:
|
||||||
|
'''
|
||||||
|
Return the last ``length``'s worth of ("row") entries from the
|
||||||
|
array.
|
||||||
|
|
||||||
|
'''
|
||||||
|
return self.array[-length:]
|
||||||
|
|
||||||
|
def push(
|
||||||
|
self,
|
||||||
|
data: np.ndarray,
|
||||||
|
|
||||||
|
field_map: Optional[dict[str, str]] = None,
|
||||||
|
prepend: bool = False,
|
||||||
|
update_first: bool = True,
|
||||||
|
start: int | None = None,
|
||||||
|
|
||||||
|
) -> int:
|
||||||
|
'''
|
||||||
|
Ring buffer like "push" to append data
|
||||||
|
into the buffer and return updated "last" index.
|
||||||
|
|
||||||
|
NB: no actual ring logic yet to give a "loop around" on overflow
|
||||||
|
condition, lel.
|
||||||
|
|
||||||
|
'''
|
||||||
|
length = len(data)
|
||||||
|
|
||||||
|
if prepend:
|
||||||
|
index = (start or self._first.value) - length
|
||||||
|
|
||||||
|
if index < 0:
|
||||||
|
raise ValueError(
|
||||||
|
f'Array size of {self._len} was overrun during prepend.\n'
|
||||||
|
f'You have passed {abs(index)} too many datums.'
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
index = start if start is not None else self._last.value
|
||||||
|
|
||||||
|
end = index + length
|
||||||
|
|
||||||
|
if field_map:
|
||||||
|
src_names, dst_names = zip(*field_map.items())
|
||||||
|
else:
|
||||||
|
dst_names = src_names = self._write_fields
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._array[
|
||||||
|
list(dst_names)
|
||||||
|
][index:end] = data[list(src_names)][:]
|
||||||
|
|
||||||
|
# NOTE: there was a race here between updating
|
||||||
|
# the first and last indices and when the next reader
|
||||||
|
# tries to access ``.array`` (which due to the index
|
||||||
|
# overlap will be empty). Pretty sure we've fixed it now
|
||||||
|
# but leaving this here as a reminder.
|
||||||
|
if (
|
||||||
|
prepend
|
||||||
|
and update_first
|
||||||
|
and length
|
||||||
|
):
|
||||||
|
assert index < self._first.value
|
||||||
|
|
||||||
|
if (
|
||||||
|
index < self._first.value
|
||||||
|
and update_first
|
||||||
|
):
|
||||||
|
assert prepend, 'prepend=True not passed but index decreased?'
|
||||||
|
self._first.value = index
|
||||||
|
|
||||||
|
elif not prepend:
|
||||||
|
self._last.value = end
|
||||||
|
|
||||||
|
self._post_init = True
|
||||||
|
return end
|
||||||
|
|
||||||
|
except ValueError as err:
|
||||||
|
if field_map:
|
||||||
|
raise
|
||||||
|
|
||||||
|
# should raise if diff detected
|
||||||
|
self.diff_err_fields(data)
|
||||||
|
raise err
|
||||||
|
|
||||||
|
def diff_err_fields(
|
||||||
|
self,
|
||||||
|
data: np.ndarray,
|
||||||
|
) -> None:
|
||||||
|
# reraise with any field discrepancy
|
||||||
|
our_fields, their_fields = (
|
||||||
|
set(self._array.dtype.fields),
|
||||||
|
set(data.dtype.fields),
|
||||||
|
)
|
||||||
|
|
||||||
|
only_in_ours = our_fields - their_fields
|
||||||
|
only_in_theirs = their_fields - our_fields
|
||||||
|
|
||||||
|
if only_in_ours:
|
||||||
|
raise TypeError(
|
||||||
|
f"Input array is missing field(s): {only_in_ours}"
|
||||||
|
)
|
||||||
|
elif only_in_theirs:
|
||||||
|
raise TypeError(
|
||||||
|
f"Input array has unknown field(s): {only_in_theirs}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: support "silent" prepends that don't update ._first.value?
|
||||||
|
def prepend(
|
||||||
|
self,
|
||||||
|
data: np.ndarray,
|
||||||
|
) -> int:
|
||||||
|
end = self.push(data, prepend=True)
|
||||||
|
assert end
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
self._first._shm.close()
|
||||||
|
self._last._shm.close()
|
||||||
|
self._shm.close()
|
||||||
|
|
||||||
|
def destroy(self) -> None:
|
||||||
|
if _USE_POSIX:
|
||||||
|
# We manually unlink to bypass all the "resource tracker"
|
||||||
|
# nonsense meant for non-SC systems.
|
||||||
|
shm_unlink(self._shm.name)
|
||||||
|
|
||||||
|
self._first.destroy()
|
||||||
|
self._last.destroy()
|
||||||
|
|
||||||
|
def flush(self) -> None:
|
||||||
|
# TODO: flush to storage backend like markestore?
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
def open_shm_ndarray(
|
||||||
|
size: int,
|
||||||
|
key: str | None = None,
|
||||||
|
dtype: np.dtype | None = None,
|
||||||
|
append_start_index: int | None = None,
|
||||||
|
readonly: bool = False,
|
||||||
|
|
||||||
|
) -> ShmArray:
|
||||||
|
'''
|
||||||
|
Open a memory shared ``numpy`` using the standard library.
|
||||||
|
|
||||||
|
This call unlinks (aka permanently destroys) the buffer on teardown
|
||||||
|
and thus should be used from the parent-most accessor (process).
|
||||||
|
|
||||||
|
'''
|
||||||
|
# create new shared mem segment for which we
|
||||||
|
# have write permission
|
||||||
|
a = np.zeros(size, dtype=dtype)
|
||||||
|
a['index'] = np.arange(len(a))
|
||||||
|
|
||||||
|
shm = SharedMemory(
|
||||||
|
name=key,
|
||||||
|
create=True,
|
||||||
|
size=a.nbytes
|
||||||
|
)
|
||||||
|
array = np.ndarray(
|
||||||
|
a.shape,
|
||||||
|
dtype=a.dtype,
|
||||||
|
buffer=shm.buf
|
||||||
|
)
|
||||||
|
array[:] = a[:]
|
||||||
|
array.setflags(write=int(not readonly))
|
||||||
|
|
||||||
|
token = _make_token(
|
||||||
|
key=key,
|
||||||
|
size=size,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
|
||||||
|
# create single entry arrays for storing an first and last indices
|
||||||
|
first = SharedInt(
|
||||||
|
shm=SharedMemory(
|
||||||
|
name=token.shm_first_index_name,
|
||||||
|
create=True,
|
||||||
|
size=4, # std int
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
last = SharedInt(
|
||||||
|
shm=SharedMemory(
|
||||||
|
name=token.shm_last_index_name,
|
||||||
|
create=True,
|
||||||
|
size=4, # std int
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start the "real-time" append-updated (or "pushed-to") section
|
||||||
|
# after some start index: ``append_start_index``. This allows appending
|
||||||
|
# from a start point in the array which isn't the 0 index and looks
|
||||||
|
# something like,
|
||||||
|
# -------------------------
|
||||||
|
# | | i
|
||||||
|
# _________________________
|
||||||
|
# <-------------> <------->
|
||||||
|
# history real-time
|
||||||
|
#
|
||||||
|
# Once fully "prepended", the history section will leave the
|
||||||
|
# ``ShmArray._start.value: int = 0`` and the yet-to-be written
|
||||||
|
# real-time section will start at ``ShmArray.index: int``.
|
||||||
|
|
||||||
|
# this sets the index to nearly 2/3rds into the the length of
|
||||||
|
# the buffer leaving at least a "days worth of second samples"
|
||||||
|
# for the real-time section.
|
||||||
|
if append_start_index is None:
|
||||||
|
append_start_index = round(size * 0.616)
|
||||||
|
|
||||||
|
last.value = first.value = append_start_index
|
||||||
|
|
||||||
|
shmarr = ShmArray(
|
||||||
|
array,
|
||||||
|
first,
|
||||||
|
last,
|
||||||
|
shm,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert shmarr._token == token
|
||||||
|
_known_tokens[key] = shmarr.token
|
||||||
|
|
||||||
|
# "unlink" created shm on process teardown by
|
||||||
|
# pushing teardown calls onto actor context stack
|
||||||
|
stack = tractor.current_actor().lifetime_stack
|
||||||
|
stack.callback(shmarr.close)
|
||||||
|
stack.callback(shmarr.destroy)
|
||||||
|
|
||||||
|
return shmarr
|
||||||
|
|
||||||
|
|
||||||
|
def attach_shm_ndarray(
|
||||||
|
token: tuple[str, str, tuple[str, str]],
|
||||||
|
readonly: bool = True,
|
||||||
|
|
||||||
|
) -> ShmArray:
|
||||||
|
'''
|
||||||
|
Attach to an existing shared memory array previously
|
||||||
|
created by another process using ``open_shared_array``.
|
||||||
|
|
||||||
|
No new shared mem is allocated but wrapper types for read/write
|
||||||
|
access are constructed.
|
||||||
|
|
||||||
|
'''
|
||||||
|
token = NDToken.from_msg(token)
|
||||||
|
key = token.shm_name
|
||||||
|
|
||||||
|
if key in _known_tokens:
|
||||||
|
assert NDToken.from_msg(_known_tokens[key]) == token, "WTF"
|
||||||
|
|
||||||
|
# XXX: ugh, looks like due to the ``shm_open()`` C api we can't
|
||||||
|
# actually place files in a subdir, see discussion here:
|
||||||
|
# https://stackoverflow.com/a/11103289
|
||||||
|
|
||||||
|
# attach to array buffer and view as per dtype
|
||||||
|
_err: Optional[Exception] = None
|
||||||
|
for _ in range(3):
|
||||||
|
try:
|
||||||
|
shm = SharedMemory(
|
||||||
|
name=key,
|
||||||
|
create=False,
|
||||||
|
)
|
||||||
|
break
|
||||||
|
except OSError as oserr:
|
||||||
|
_err = oserr
|
||||||
|
time.sleep(0.1)
|
||||||
|
else:
|
||||||
|
if _err:
|
||||||
|
raise _err
|
||||||
|
|
||||||
|
shmarr = np.ndarray(
|
||||||
|
(token.size,),
|
||||||
|
dtype=token.dtype,
|
||||||
|
buffer=shm.buf
|
||||||
|
)
|
||||||
|
shmarr.setflags(write=int(not readonly))
|
||||||
|
|
||||||
|
first = SharedInt(
|
||||||
|
shm=SharedMemory(
|
||||||
|
name=token.shm_first_index_name,
|
||||||
|
create=False,
|
||||||
|
size=4, # std int
|
||||||
|
),
|
||||||
|
)
|
||||||
|
last = SharedInt(
|
||||||
|
shm=SharedMemory(
|
||||||
|
name=token.shm_last_index_name,
|
||||||
|
create=False,
|
||||||
|
size=4, # std int
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# make sure we can read
|
||||||
|
first.value
|
||||||
|
|
||||||
|
sha = ShmArray(
|
||||||
|
shmarr,
|
||||||
|
first,
|
||||||
|
last,
|
||||||
|
shm,
|
||||||
|
)
|
||||||
|
# read test
|
||||||
|
sha.array
|
||||||
|
|
||||||
|
# Stash key -> token knowledge for future queries
|
||||||
|
# via `maybe_opepn_shm_array()` but only after we know
|
||||||
|
# we can attach.
|
||||||
|
if key not in _known_tokens:
|
||||||
|
_known_tokens[key] = token
|
||||||
|
|
||||||
|
# "close" attached shm on actor teardown
|
||||||
|
tractor.current_actor().lifetime_stack.callback(sha.close)
|
||||||
|
|
||||||
|
return sha
|
||||||
|
|
||||||
|
|
||||||
|
def maybe_open_shm_ndarray(
|
||||||
|
key: str, # unique identifier for segment
|
||||||
|
size: int,
|
||||||
|
dtype: np.dtype | None = None,
|
||||||
|
append_start_index: int = 0,
|
||||||
|
readonly: bool = True,
|
||||||
|
|
||||||
|
) -> tuple[ShmArray, bool]:
|
||||||
|
'''
|
||||||
|
Attempt to attach to a shared memory block using a "key" lookup
|
||||||
|
to registered blocks in the users overall "system" registry
|
||||||
|
(presumes you don't have the block's explicit token).
|
||||||
|
|
||||||
|
This function is meant to solve the problem of discovering whether
|
||||||
|
a shared array token has been allocated or discovered by the actor
|
||||||
|
running in **this** process. Systems where multiple actors may seek
|
||||||
|
to access a common block can use this function to attempt to acquire
|
||||||
|
a token as discovered by the actors who have previously stored
|
||||||
|
a "key" -> ``NDToken`` map in an actor local (aka python global)
|
||||||
|
variable.
|
||||||
|
|
||||||
|
If you know the explicit ``NDToken`` for your memory segment instead
|
||||||
|
use ``attach_shm_array``.
|
||||||
|
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
# see if we already know this key
|
||||||
|
token = _known_tokens[key]
|
||||||
|
return (
|
||||||
|
attach_shm_ndarray(
|
||||||
|
token=token,
|
||||||
|
readonly=readonly,
|
||||||
|
),
|
||||||
|
False, # not newly opened
|
||||||
|
)
|
||||||
|
except KeyError:
|
||||||
|
log.warning(f"Could not find {key} in shms cache")
|
||||||
|
if dtype:
|
||||||
|
token = _make_token(
|
||||||
|
key,
|
||||||
|
size=size,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
|
||||||
|
try:
|
||||||
|
return (
|
||||||
|
attach_shm_ndarray(
|
||||||
|
token=token,
|
||||||
|
readonly=readonly,
|
||||||
|
),
|
||||||
|
False,
|
||||||
|
)
|
||||||
|
except FileNotFoundError:
|
||||||
|
log.warning(f"Could not attach to shm with token {token}")
|
||||||
|
|
||||||
|
# This actor does not know about memory
|
||||||
|
# associated with the provided "key".
|
||||||
|
# Attempt to open a block and expect
|
||||||
|
# to fail if a block has been allocated
|
||||||
|
# on the OS by someone else.
|
||||||
|
return (
|
||||||
|
open_shm_ndarray(
|
||||||
|
key=key,
|
||||||
|
size=size,
|
||||||
|
dtype=dtype,
|
||||||
|
append_start_index=append_start_index,
|
||||||
|
readonly=readonly,
|
||||||
|
),
|
||||||
|
True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ShmList(ShareableList):
|
||||||
|
'''
|
||||||
|
Carbon copy of ``.shared_memory.ShareableList`` with a few
|
||||||
|
enhancements:
|
||||||
|
|
||||||
|
- readonly mode via instance var flag `._readonly: bool`
|
||||||
|
- ``.__getitem__()`` accepts ``slice`` inputs
|
||||||
|
- exposes the underlying buffer "name" as a ``.key: str``
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
sequence: list | None = None,
|
||||||
|
*,
|
||||||
|
name: str | None = None,
|
||||||
|
readonly: bool = True
|
||||||
|
|
||||||
|
) -> None:
|
||||||
|
self._readonly = readonly
|
||||||
|
self._key = name
|
||||||
|
return super().__init__(
|
||||||
|
sequence=sequence,
|
||||||
|
name=name,
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def key(self) -> str:
|
||||||
|
return self._key
|
||||||
|
|
||||||
|
@property
|
||||||
|
def readonly(self) -> bool:
|
||||||
|
return self._readonly
|
||||||
|
|
||||||
|
def __setitem__(
|
||||||
|
self,
|
||||||
|
position,
|
||||||
|
value,
|
||||||
|
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
# mimick ``numpy`` error
|
||||||
|
if self._readonly:
|
||||||
|
raise ValueError('assignment destination is read-only')
|
||||||
|
|
||||||
|
return super().__setitem__(position, value)
|
||||||
|
|
||||||
|
def __getitem__(
|
||||||
|
self,
|
||||||
|
indexish,
|
||||||
|
) -> list:
|
||||||
|
|
||||||
|
# NOTE: this is a non-writeable view (copy?) of the buffer
|
||||||
|
# in a new list instance.
|
||||||
|
if isinstance(indexish, slice):
|
||||||
|
return list(self)[indexish]
|
||||||
|
|
||||||
|
return super().__getitem__(indexish)
|
||||||
|
|
||||||
|
# TODO: should we offer a `.array` and `.push()` equivalent
|
||||||
|
# to the `ShmArray`?
|
||||||
|
# currently we have the following limitations:
|
||||||
|
# - can't write slices of input using traditional slice-assign
|
||||||
|
# syntax due to the ``ShareableList.__setitem__()`` implementation.
|
||||||
|
# - ``list(shmlist)`` returns a non-mutable copy instead of
|
||||||
|
# a writeable view which would be handier numpy-style ops.
|
||||||
|
|
||||||
|
|
||||||
|
def open_shm_list(
|
||||||
|
key: str,
|
||||||
|
sequence: list | None = None,
|
||||||
|
size: int = int(2 ** 10),
|
||||||
|
dtype: float | int | bool | str | bytes | None = float,
|
||||||
|
readonly: bool = True,
|
||||||
|
|
||||||
|
) -> ShmList:
|
||||||
|
|
||||||
|
if sequence is None:
|
||||||
|
default = {
|
||||||
|
float: 0.,
|
||||||
|
int: 0,
|
||||||
|
bool: True,
|
||||||
|
str: 'doggy',
|
||||||
|
None: None,
|
||||||
|
}[dtype]
|
||||||
|
sequence = [default] * size
|
||||||
|
|
||||||
|
shml = ShmList(
|
||||||
|
sequence=sequence,
|
||||||
|
name=key,
|
||||||
|
readonly=readonly,
|
||||||
|
)
|
||||||
|
|
||||||
|
# "close" attached shm on actor teardown
|
||||||
|
try:
|
||||||
|
actor = tractor.current_actor()
|
||||||
|
actor.lifetime_stack.callback(shml.shm.close)
|
||||||
|
actor.lifetime_stack.callback(shml.shm.unlink)
|
||||||
|
except RuntimeError:
|
||||||
|
log.warning('tractor runtime not active, skipping teardown steps')
|
||||||
|
|
||||||
|
return shml
|
||||||
|
|
||||||
|
|
||||||
|
def attach_shm_list(
|
||||||
|
key: str,
|
||||||
|
readonly: bool = False,
|
||||||
|
|
||||||
|
) -> ShmList:
|
||||||
|
|
||||||
|
return ShmList(
|
||||||
|
name=key,
|
||||||
|
readonly=readonly,
|
||||||
|
)
|
|
@ -0,0 +1,105 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
'''
|
||||||
|
TCP implementation of tractor.ipc._transport.MsgTransport protocol
|
||||||
|
|
||||||
|
'''
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import trio
|
||||||
|
|
||||||
|
from tractor.msg import MsgCodec
|
||||||
|
from tractor.log import get_logger
|
||||||
|
from tractor._addr import TCPAddress
|
||||||
|
from tractor.ipc._transport import MsgpackTransport
|
||||||
|
|
||||||
|
|
||||||
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: typing oddity.. not sure why we have to inherit here, but it
|
||||||
|
# seems to be an issue with `get_msg_transport()` returning
|
||||||
|
# a `Type[Protocol]`; probably should make a `mypy` issue?
|
||||||
|
class MsgpackTCPStream(MsgpackTransport):
|
||||||
|
'''
|
||||||
|
A ``trio.SocketStream`` delivering ``msgpack`` formatted data
|
||||||
|
using the ``msgspec`` codec lib.
|
||||||
|
|
||||||
|
'''
|
||||||
|
address_type = TCPAddress
|
||||||
|
layer_key: int = 4
|
||||||
|
|
||||||
|
# def __init__(
|
||||||
|
# self,
|
||||||
|
# stream: trio.SocketStream,
|
||||||
|
# prefix_size: int = 4,
|
||||||
|
# codec: CodecType = None,
|
||||||
|
|
||||||
|
# ) -> None:
|
||||||
|
# super().__init__(
|
||||||
|
# stream,
|
||||||
|
# prefix_size=prefix_size,
|
||||||
|
# codec=codec
|
||||||
|
# )
|
||||||
|
|
||||||
|
@property
|
||||||
|
def maddr(self) -> str:
|
||||||
|
host, port = self.raddr.unwrap()
|
||||||
|
return (
|
||||||
|
f'/ipv4/{host}'
|
||||||
|
f'/{self.address_type.name_key}/{port}'
|
||||||
|
# f'/{self.chan.uid[0]}'
|
||||||
|
# f'/{self.cid}'
|
||||||
|
|
||||||
|
# f'/cid={cid_head}..{cid_tail}'
|
||||||
|
# TODO: ? not use this ^ right ?
|
||||||
|
)
|
||||||
|
|
||||||
|
def connected(self) -> bool:
|
||||||
|
return self.stream.socket.fileno() != -1
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def connect_to(
|
||||||
|
cls,
|
||||||
|
destaddr: TCPAddress,
|
||||||
|
prefix_size: int = 4,
|
||||||
|
codec: MsgCodec|None = None,
|
||||||
|
**kwargs
|
||||||
|
) -> MsgpackTCPStream:
|
||||||
|
stream = await trio.open_tcp_stream(
|
||||||
|
*destaddr.unwrap(),
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
return MsgpackTCPStream(
|
||||||
|
stream,
|
||||||
|
prefix_size=prefix_size,
|
||||||
|
codec=codec
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_stream_addrs(
|
||||||
|
cls,
|
||||||
|
stream: trio.SocketStream
|
||||||
|
) -> tuple[
|
||||||
|
tuple[str, int],
|
||||||
|
tuple[str, int]
|
||||||
|
]:
|
||||||
|
lsockname = stream.socket.getsockname()
|
||||||
|
rsockname = stream.socket.getpeername()
|
||||||
|
return (
|
||||||
|
TCPAddress.from_addr(tuple(lsockname[:2])),
|
||||||
|
TCPAddress.from_addr(tuple(rsockname[:2])),
|
||||||
|
)
|
|
@ -13,41 +13,32 @@
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
'''
|
||||||
|
typing.Protocol based generic msg API, implement this class to add backends for
|
||||||
|
tractor.ipc.Channel
|
||||||
|
|
||||||
"""
|
'''
|
||||||
Inter-process comms abstractions
|
|
||||||
|
|
||||||
"""
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
from typing import (
|
||||||
|
runtime_checkable,
|
||||||
|
Type,
|
||||||
|
Protocol,
|
||||||
|
TypeVar,
|
||||||
|
ClassVar
|
||||||
|
)
|
||||||
from collections.abc import (
|
from collections.abc import (
|
||||||
AsyncGenerator,
|
AsyncGenerator,
|
||||||
AsyncIterator,
|
AsyncIterator,
|
||||||
)
|
)
|
||||||
from contextlib import (
|
|
||||||
asynccontextmanager as acm,
|
|
||||||
contextmanager as cm,
|
|
||||||
)
|
|
||||||
import platform
|
|
||||||
from pprint import pformat
|
|
||||||
import struct
|
import struct
|
||||||
import typing
|
|
||||||
from typing import (
|
|
||||||
Any,
|
|
||||||
Callable,
|
|
||||||
runtime_checkable,
|
|
||||||
Protocol,
|
|
||||||
Type,
|
|
||||||
TypeVar,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
import trio
|
||||||
import msgspec
|
import msgspec
|
||||||
from tricycle import BufferedReceiveStream
|
from tricycle import BufferedReceiveStream
|
||||||
import trio
|
|
||||||
|
|
||||||
from tractor.log import get_logger
|
from tractor.log import get_logger
|
||||||
from tractor._exceptions import (
|
from tractor._exceptions import (
|
||||||
MsgTypeError,
|
MsgTypeError,
|
||||||
pack_from_raise,
|
|
||||||
TransportClosed,
|
TransportClosed,
|
||||||
_mk_send_mte,
|
_mk_send_mte,
|
||||||
_mk_recv_mte,
|
_mk_recv_mte,
|
||||||
|
@ -59,30 +50,13 @@ from tractor.msg import (
|
||||||
types as msgtypes,
|
types as msgtypes,
|
||||||
pretty_struct,
|
pretty_struct,
|
||||||
)
|
)
|
||||||
|
from tractor._addr import Address
|
||||||
|
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
||||||
_is_windows = platform.system() == 'Windows'
|
|
||||||
|
|
||||||
|
# (codec, transport)
|
||||||
def get_stream_addrs(
|
MsgTransportKey = tuple[str, str]
|
||||||
stream: trio.SocketStream
|
|
||||||
) -> tuple[
|
|
||||||
tuple[str, int], # local
|
|
||||||
tuple[str, int], # remote
|
|
||||||
]:
|
|
||||||
'''
|
|
||||||
Return the `trio` streaming transport prot's socket-addrs for
|
|
||||||
both the local and remote sides as a pair.
|
|
||||||
|
|
||||||
'''
|
|
||||||
# rn, should both be IP sockets
|
|
||||||
lsockname = stream.socket.getsockname()
|
|
||||||
rsockname = stream.socket.getpeername()
|
|
||||||
return (
|
|
||||||
tuple(lsockname[:2]),
|
|
||||||
tuple(rsockname[:2]),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# from tractor.msg.types import MsgType
|
# from tractor.msg.types import MsgType
|
||||||
|
@ -92,9 +66,6 @@ def get_stream_addrs(
|
||||||
MsgType = TypeVar('MsgType')
|
MsgType = TypeVar('MsgType')
|
||||||
|
|
||||||
|
|
||||||
# TODO: break up this mod into a subpkg so we can start adding new
|
|
||||||
# backends and move this type stuff into a dedicated file.. Bo
|
|
||||||
#
|
|
||||||
@runtime_checkable
|
@runtime_checkable
|
||||||
class MsgTransport(Protocol[MsgType]):
|
class MsgTransport(Protocol[MsgType]):
|
||||||
#
|
#
|
||||||
|
@ -102,11 +73,11 @@ class MsgTransport(Protocol[MsgType]):
|
||||||
# eventual msg definition/types?
|
# eventual msg definition/types?
|
||||||
# - https://docs.python.org/3/library/typing.html#typing.Protocol
|
# - https://docs.python.org/3/library/typing.html#typing.Protocol
|
||||||
|
|
||||||
stream: trio.SocketStream
|
stream: trio.abc.Stream
|
||||||
drained: list[MsgType]
|
drained: list[MsgType]
|
||||||
|
|
||||||
def __init__(self, stream: trio.SocketStream) -> None:
|
address_type: ClassVar[Type[Address]]
|
||||||
...
|
codec_key: ClassVar[str]
|
||||||
|
|
||||||
# XXX: should this instead be called `.sendall()`?
|
# XXX: should this instead be called `.sendall()`?
|
||||||
async def send(self, msg: MsgType) -> None:
|
async def send(self, msg: MsgType) -> None:
|
||||||
|
@ -126,26 +97,47 @@ class MsgTransport(Protocol[MsgType]):
|
||||||
def drain(self) -> AsyncIterator[dict]:
|
def drain(self) -> AsyncIterator[dict]:
|
||||||
...
|
...
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def key(cls) -> MsgTransportKey:
|
||||||
|
return cls.codec_key, cls.address_type.name_key
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def laddr(self) -> tuple[str, int]:
|
def laddr(self) -> Address:
|
||||||
...
|
...
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def raddr(self) -> tuple[str, int]:
|
def raddr(self) -> Address:
|
||||||
|
...
|
||||||
|
|
||||||
|
@property
|
||||||
|
def maddr(self) -> str:
|
||||||
|
...
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def connect_to(
|
||||||
|
cls,
|
||||||
|
addr: Address,
|
||||||
|
**kwargs
|
||||||
|
) -> MsgTransport:
|
||||||
|
...
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_stream_addrs(
|
||||||
|
cls,
|
||||||
|
stream: trio.abc.Stream
|
||||||
|
) -> tuple[
|
||||||
|
Address, # local
|
||||||
|
Address # remote
|
||||||
|
]:
|
||||||
|
'''
|
||||||
|
Return the `trio` streaming transport prot's addrs for both
|
||||||
|
the local and remote sides as a pair.
|
||||||
|
|
||||||
|
'''
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
# TODO: typing oddity.. not sure why we have to inherit here, but it
|
class MsgpackTransport(MsgTransport):
|
||||||
# seems to be an issue with `get_msg_transport()` returning
|
|
||||||
# a `Type[Protocol]`; probably should make a `mypy` issue?
|
|
||||||
class MsgpackTCPStream(MsgTransport):
|
|
||||||
'''
|
|
||||||
A ``trio.SocketStream`` delivering ``msgpack`` formatted data
|
|
||||||
using the ``msgspec`` codec lib.
|
|
||||||
|
|
||||||
'''
|
|
||||||
layer_key: int = 4
|
|
||||||
name_key: str = 'tcp'
|
|
||||||
|
|
||||||
# TODO: better naming for this?
|
# TODO: better naming for this?
|
||||||
# -[ ] check how libp2p does naming for such things?
|
# -[ ] check how libp2p does naming for such things?
|
||||||
|
@ -153,7 +145,7 @@ class MsgpackTCPStream(MsgTransport):
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
stream: trio.SocketStream,
|
stream: trio.abc.Stream,
|
||||||
prefix_size: int = 4,
|
prefix_size: int = 4,
|
||||||
|
|
||||||
# XXX optionally provided codec pair for `msgspec`:
|
# XXX optionally provided codec pair for `msgspec`:
|
||||||
|
@ -161,18 +153,11 @@ class MsgpackTCPStream(MsgTransport):
|
||||||
#
|
#
|
||||||
# TODO: define this as a `Codec` struct which can be
|
# TODO: define this as a `Codec` struct which can be
|
||||||
# overriden dynamically by the application/runtime?
|
# overriden dynamically by the application/runtime?
|
||||||
codec: tuple[
|
codec: MsgCodec = None,
|
||||||
Callable[[Any], Any]|None, # coder
|
|
||||||
Callable[[type, Any], Any]|None, # decoder
|
|
||||||
]|None = None,
|
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
self.stream = stream
|
self.stream = stream
|
||||||
assert self.stream.socket
|
self._laddr, self._raddr = self.get_stream_addrs(stream)
|
||||||
|
|
||||||
# should both be IP sockets
|
|
||||||
self._laddr, self._raddr = get_stream_addrs(stream)
|
|
||||||
|
|
||||||
# create read loop instance
|
# create read loop instance
|
||||||
self._aiter_pkts = self._iter_packets()
|
self._aiter_pkts = self._iter_packets()
|
||||||
|
@ -255,8 +240,8 @@ class MsgpackTCPStream(MsgTransport):
|
||||||
raise TransportClosed(
|
raise TransportClosed(
|
||||||
message=(
|
message=(
|
||||||
f'IPC transport already closed by peer\n'
|
f'IPC transport already closed by peer\n'
|
||||||
f'x]> {type(trans_err)}\n'
|
f'x)> {type(trans_err)}\n'
|
||||||
f' |_{self}\n'
|
f' |_{self}\n'
|
||||||
),
|
),
|
||||||
loglevel=loglevel,
|
loglevel=loglevel,
|
||||||
) from trans_err
|
) from trans_err
|
||||||
|
@ -273,8 +258,8 @@ class MsgpackTCPStream(MsgTransport):
|
||||||
raise TransportClosed(
|
raise TransportClosed(
|
||||||
message=(
|
message=(
|
||||||
f'IPC transport already manually closed locally?\n'
|
f'IPC transport already manually closed locally?\n'
|
||||||
f'x]> {type(closure_err)} \n'
|
f'x)> {type(closure_err)} \n'
|
||||||
f' |_{self}\n'
|
f' |_{self}\n'
|
||||||
),
|
),
|
||||||
loglevel='error',
|
loglevel='error',
|
||||||
raise_on_report=(
|
raise_on_report=(
|
||||||
|
@ -289,8 +274,8 @@ class MsgpackTCPStream(MsgTransport):
|
||||||
raise TransportClosed(
|
raise TransportClosed(
|
||||||
message=(
|
message=(
|
||||||
f'IPC transport already gracefully closed\n'
|
f'IPC transport already gracefully closed\n'
|
||||||
f']>\n'
|
f')>\n'
|
||||||
f' |_{self}\n'
|
f'|_{self}\n'
|
||||||
),
|
),
|
||||||
loglevel='transport',
|
loglevel='transport',
|
||||||
# cause=??? # handy or no?
|
# cause=??? # handy or no?
|
||||||
|
@ -436,15 +421,7 @@ class MsgpackTCPStream(MsgTransport):
|
||||||
# __tracebackhide__: bool = False
|
# __tracebackhide__: bool = False
|
||||||
# raise
|
# raise
|
||||||
|
|
||||||
@property
|
async def recv(self) -> msgtypes.MsgType:
|
||||||
def laddr(self) -> tuple[str, int]:
|
|
||||||
return self._laddr
|
|
||||||
|
|
||||||
@property
|
|
||||||
def raddr(self) -> tuple[str, int]:
|
|
||||||
return self._raddr
|
|
||||||
|
|
||||||
async def recv(self) -> Any:
|
|
||||||
return await self._aiter_pkts.asend(None)
|
return await self._aiter_pkts.asend(None)
|
||||||
|
|
||||||
async def drain(self) -> AsyncIterator[dict]:
|
async def drain(self) -> AsyncIterator[dict]:
|
||||||
|
@ -464,357 +441,10 @@ class MsgpackTCPStream(MsgTransport):
|
||||||
def __aiter__(self):
|
def __aiter__(self):
|
||||||
return self._aiter_pkts
|
return self._aiter_pkts
|
||||||
|
|
||||||
def connected(self) -> bool:
|
@property
|
||||||
return self.stream.socket.fileno() != -1
|
def laddr(self) -> Address:
|
||||||
|
return self._laddr
|
||||||
|
|
||||||
def get_msg_transport(
|
|
||||||
|
|
||||||
key: tuple[str, str],
|
|
||||||
|
|
||||||
) -> Type[MsgTransport]:
|
|
||||||
|
|
||||||
return {
|
|
||||||
('msgpack', 'tcp'): MsgpackTCPStream,
|
|
||||||
}[key]
|
|
||||||
|
|
||||||
|
|
||||||
class Channel:
|
|
||||||
'''
|
|
||||||
An inter-process channel for communication between (remote) actors.
|
|
||||||
|
|
||||||
Wraps a ``MsgStream``: transport + encoding IPC connection.
|
|
||||||
|
|
||||||
Currently we only support ``trio.SocketStream`` for transport
|
|
||||||
(aka TCP) and the ``msgpack`` interchange format via the ``msgspec``
|
|
||||||
codec libary.
|
|
||||||
|
|
||||||
'''
|
|
||||||
def __init__(
|
|
||||||
|
|
||||||
self,
|
|
||||||
destaddr: tuple[str, int]|None,
|
|
||||||
|
|
||||||
msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'),
|
|
||||||
|
|
||||||
# TODO: optional reconnection support?
|
|
||||||
# auto_reconnect: bool = False,
|
|
||||||
# on_reconnect: typing.Callable[..., typing.Awaitable] = None,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
|
|
||||||
# self._recon_seq = on_reconnect
|
|
||||||
# self._autorecon = auto_reconnect
|
|
||||||
|
|
||||||
self._destaddr = destaddr
|
|
||||||
self._transport_key = msg_transport_type_key
|
|
||||||
|
|
||||||
# Either created in ``.connect()`` or passed in by
|
|
||||||
# user in ``.from_stream()``.
|
|
||||||
self._stream: trio.SocketStream|None = None
|
|
||||||
self._transport: MsgTransport|None = None
|
|
||||||
|
|
||||||
# set after handshake - always uid of far end
|
|
||||||
self.uid: tuple[str, str]|None = None
|
|
||||||
|
|
||||||
self._aiter_msgs = self._iter_msgs()
|
|
||||||
self._exc: Exception|None = None # set if far end actor errors
|
|
||||||
self._closed: bool = False
|
|
||||||
|
|
||||||
# flag set by ``Portal.cancel_actor()`` indicating remote
|
|
||||||
# (possibly peer) cancellation of the far end actor
|
|
||||||
# runtime.
|
|
||||||
self._cancel_called: bool = False
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def msgstream(self) -> MsgTransport:
|
def raddr(self) -> Address:
|
||||||
log.info(
|
return self._raddr
|
||||||
'`Channel.msgstream` is an old name, use `._transport`'
|
|
||||||
)
|
|
||||||
return self._transport
|
|
||||||
|
|
||||||
@property
|
|
||||||
def transport(self) -> MsgTransport:
|
|
||||||
return self._transport
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_stream(
|
|
||||||
cls,
|
|
||||||
stream: trio.SocketStream,
|
|
||||||
**kwargs,
|
|
||||||
|
|
||||||
) -> Channel:
|
|
||||||
|
|
||||||
src, dst = get_stream_addrs(stream)
|
|
||||||
chan = Channel(
|
|
||||||
destaddr=dst,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
# set immediately here from provided instance
|
|
||||||
chan._stream: trio.SocketStream = stream
|
|
||||||
chan.set_msg_transport(stream)
|
|
||||||
return chan
|
|
||||||
|
|
||||||
def set_msg_transport(
|
|
||||||
self,
|
|
||||||
stream: trio.SocketStream,
|
|
||||||
type_key: tuple[str, str]|None = None,
|
|
||||||
|
|
||||||
# XXX optionally provided codec pair for `msgspec`:
|
|
||||||
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
|
|
||||||
codec: MsgCodec|None = None,
|
|
||||||
|
|
||||||
) -> MsgTransport:
|
|
||||||
type_key = (
|
|
||||||
type_key
|
|
||||||
or
|
|
||||||
self._transport_key
|
|
||||||
)
|
|
||||||
# get transport type, then
|
|
||||||
self._transport = get_msg_transport(
|
|
||||||
type_key
|
|
||||||
# instantiate an instance of the msg-transport
|
|
||||||
)(
|
|
||||||
stream,
|
|
||||||
codec=codec,
|
|
||||||
)
|
|
||||||
return self._transport
|
|
||||||
|
|
||||||
@cm
|
|
||||||
def apply_codec(
|
|
||||||
self,
|
|
||||||
codec: MsgCodec,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
Temporarily override the underlying IPC msg codec for
|
|
||||||
dynamic enforcement of messaging schema.
|
|
||||||
|
|
||||||
'''
|
|
||||||
orig: MsgCodec = self._transport.codec
|
|
||||||
try:
|
|
||||||
self._transport.codec = codec
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
self._transport.codec = orig
|
|
||||||
|
|
||||||
# TODO: do a .src/.dst: str for maddrs?
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
if not self._transport:
|
|
||||||
return '<Channel with inactive transport?>'
|
|
||||||
|
|
||||||
return repr(
|
|
||||||
self._transport.stream.socket._sock
|
|
||||||
).replace( # type: ignore
|
|
||||||
"socket.socket",
|
|
||||||
"Channel",
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def laddr(self) -> tuple[str, int]|None:
|
|
||||||
return self._transport.laddr if self._transport else None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def raddr(self) -> tuple[str, int]|None:
|
|
||||||
return self._transport.raddr if self._transport else None
|
|
||||||
|
|
||||||
async def connect(
|
|
||||||
self,
|
|
||||||
destaddr: tuple[Any, ...] | None = None,
|
|
||||||
**kwargs
|
|
||||||
|
|
||||||
) -> MsgTransport:
|
|
||||||
|
|
||||||
if self.connected():
|
|
||||||
raise RuntimeError("channel is already connected?")
|
|
||||||
|
|
||||||
destaddr = destaddr or self._destaddr
|
|
||||||
assert isinstance(destaddr, tuple)
|
|
||||||
|
|
||||||
stream = await trio.open_tcp_stream(
|
|
||||||
*destaddr,
|
|
||||||
**kwargs
|
|
||||||
)
|
|
||||||
transport = self.set_msg_transport(stream)
|
|
||||||
|
|
||||||
log.transport(
|
|
||||||
f'Opened channel[{type(transport)}]: {self.laddr} -> {self.raddr}'
|
|
||||||
)
|
|
||||||
return transport
|
|
||||||
|
|
||||||
# TODO: something like,
|
|
||||||
# `pdbp.hideframe_on(errors=[MsgTypeError])`
|
|
||||||
# instead of the `try/except` hack we have rn..
|
|
||||||
# seems like a pretty useful thing to have in general
|
|
||||||
# along with being able to filter certain stack frame(s / sets)
|
|
||||||
# possibly based on the current log-level?
|
|
||||||
async def send(
|
|
||||||
self,
|
|
||||||
payload: Any,
|
|
||||||
|
|
||||||
hide_tb: bool = False,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
Send a coded msg-blob over the transport.
|
|
||||||
|
|
||||||
'''
|
|
||||||
__tracebackhide__: bool = hide_tb
|
|
||||||
try:
|
|
||||||
log.transport(
|
|
||||||
'=> send IPC msg:\n\n'
|
|
||||||
f'{pformat(payload)}\n'
|
|
||||||
)
|
|
||||||
# assert self._transport # but why typing?
|
|
||||||
await self._transport.send(
|
|
||||||
payload,
|
|
||||||
hide_tb=hide_tb,
|
|
||||||
)
|
|
||||||
except BaseException as _err:
|
|
||||||
err = _err # bind for introspection
|
|
||||||
if not isinstance(_err, MsgTypeError):
|
|
||||||
# assert err
|
|
||||||
__tracebackhide__: bool = False
|
|
||||||
else:
|
|
||||||
assert err.cid
|
|
||||||
|
|
||||||
raise
|
|
||||||
|
|
||||||
async def recv(self) -> Any:
|
|
||||||
assert self._transport
|
|
||||||
return await self._transport.recv()
|
|
||||||
|
|
||||||
# TODO: auto-reconnect features like 0mq/nanomsg?
|
|
||||||
# -[ ] implement it manually with nods to SC prot
|
|
||||||
# possibly on multiple transport backends?
|
|
||||||
# -> seems like that might be re-inventing scalability
|
|
||||||
# prots tho no?
|
|
||||||
# try:
|
|
||||||
# return await self._transport.recv()
|
|
||||||
# except trio.BrokenResourceError:
|
|
||||||
# if self._autorecon:
|
|
||||||
# await self._reconnect()
|
|
||||||
# return await self.recv()
|
|
||||||
# raise
|
|
||||||
|
|
||||||
async def aclose(self) -> None:
|
|
||||||
|
|
||||||
log.transport(
|
|
||||||
f'Closing channel to {self.uid} '
|
|
||||||
f'{self.laddr} -> {self.raddr}'
|
|
||||||
)
|
|
||||||
assert self._transport
|
|
||||||
await self._transport.stream.aclose()
|
|
||||||
self._closed = True
|
|
||||||
|
|
||||||
async def __aenter__(self):
|
|
||||||
await self.connect()
|
|
||||||
return self
|
|
||||||
|
|
||||||
async def __aexit__(self, *args):
|
|
||||||
await self.aclose(*args)
|
|
||||||
|
|
||||||
def __aiter__(self):
|
|
||||||
return self._aiter_msgs
|
|
||||||
|
|
||||||
# ?TODO? run any reconnection sequence?
|
|
||||||
# -[ ] prolly should be impl-ed as deco-API?
|
|
||||||
#
|
|
||||||
# async def _reconnect(self) -> None:
|
|
||||||
# """Handle connection failures by polling until a reconnect can be
|
|
||||||
# established.
|
|
||||||
# """
|
|
||||||
# down = False
|
|
||||||
# while True:
|
|
||||||
# try:
|
|
||||||
# with trio.move_on_after(3) as cancel_scope:
|
|
||||||
# await self.connect()
|
|
||||||
# cancelled = cancel_scope.cancelled_caught
|
|
||||||
# if cancelled:
|
|
||||||
# log.transport(
|
|
||||||
# "Reconnect timed out after 3 seconds, retrying...")
|
|
||||||
# continue
|
|
||||||
# else:
|
|
||||||
# log.transport("Stream connection re-established!")
|
|
||||||
|
|
||||||
# # on_recon = self._recon_seq
|
|
||||||
# # if on_recon:
|
|
||||||
# # await on_recon(self)
|
|
||||||
|
|
||||||
# break
|
|
||||||
# except (OSError, ConnectionRefusedError):
|
|
||||||
# if not down:
|
|
||||||
# down = True
|
|
||||||
# log.transport(
|
|
||||||
# f"Connection to {self.raddr} went down, waiting"
|
|
||||||
# " for re-establishment")
|
|
||||||
# await trio.sleep(1)
|
|
||||||
|
|
||||||
async def _iter_msgs(
|
|
||||||
self
|
|
||||||
) -> AsyncGenerator[Any, None]:
|
|
||||||
'''
|
|
||||||
Yield `MsgType` IPC msgs decoded and deliverd from
|
|
||||||
an underlying `MsgTransport` protocol.
|
|
||||||
|
|
||||||
This is a streaming routine alo implemented as an async-gen
|
|
||||||
func (same a `MsgTransport._iter_pkts()`) gets allocated by
|
|
||||||
a `.__call__()` inside `.__init__()` where it is assigned to
|
|
||||||
the `._aiter_msgs` attr.
|
|
||||||
|
|
||||||
'''
|
|
||||||
assert self._transport
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
async for msg in self._transport:
|
|
||||||
match msg:
|
|
||||||
# NOTE: if transport/interchange delivers
|
|
||||||
# a type error, we pack it with the far
|
|
||||||
# end peer `Actor.uid` and relay the
|
|
||||||
# `Error`-msg upward to the `._rpc` stack
|
|
||||||
# for normal RAE handling.
|
|
||||||
case MsgTypeError():
|
|
||||||
yield pack_from_raise(
|
|
||||||
local_err=msg,
|
|
||||||
cid=msg.cid,
|
|
||||||
|
|
||||||
# XXX we pack it here bc lower
|
|
||||||
# layers have no notion of an
|
|
||||||
# actor-id ;)
|
|
||||||
src_uid=self.uid,
|
|
||||||
)
|
|
||||||
case _:
|
|
||||||
yield msg
|
|
||||||
|
|
||||||
except trio.BrokenResourceError:
|
|
||||||
|
|
||||||
# if not self._autorecon:
|
|
||||||
raise
|
|
||||||
|
|
||||||
await self.aclose()
|
|
||||||
|
|
||||||
# if self._autorecon: # attempt reconnect
|
|
||||||
# await self._reconnect()
|
|
||||||
# continue
|
|
||||||
|
|
||||||
def connected(self) -> bool:
|
|
||||||
return self._transport.connected() if self._transport else False
|
|
||||||
|
|
||||||
|
|
||||||
@acm
|
|
||||||
async def _connect_chan(
|
|
||||||
host: str,
|
|
||||||
port: int
|
|
||||||
|
|
||||||
) -> typing.AsyncGenerator[Channel, None]:
|
|
||||||
'''
|
|
||||||
Create and connect a channel with disconnect on context manager
|
|
||||||
teardown.
|
|
||||||
|
|
||||||
'''
|
|
||||||
chan = Channel((host, port))
|
|
||||||
await chan.connect()
|
|
||||||
yield chan
|
|
||||||
with trio.CancelScope(shield=True):
|
|
||||||
await chan.aclose()
|
|
|
@ -0,0 +1,99 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
from typing import Type
|
||||||
|
|
||||||
|
import trio
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from tractor._addr import Address
|
||||||
|
from tractor.ipc._transport import (
|
||||||
|
MsgTransportKey,
|
||||||
|
MsgTransport
|
||||||
|
)
|
||||||
|
from tractor.ipc._tcp import MsgpackTCPStream
|
||||||
|
from tractor.ipc._uds import MsgpackUDSStream
|
||||||
|
|
||||||
|
|
||||||
|
# manually updated list of all supported msg transport types
|
||||||
|
_msg_transports = [
|
||||||
|
MsgpackTCPStream,
|
||||||
|
MsgpackUDSStream
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# convert a MsgTransportKey to the corresponding transport type
|
||||||
|
_key_to_transport: dict[MsgTransportKey, Type[MsgTransport]] = {
|
||||||
|
cls.key(): cls
|
||||||
|
for cls in _msg_transports
|
||||||
|
}
|
||||||
|
|
||||||
|
# convert an Address wrapper to its corresponding transport type
|
||||||
|
_addr_to_transport: dict[Type[Address], Type[MsgTransport]] = {
|
||||||
|
cls.address_type: cls
|
||||||
|
for cls in _msg_transports
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def transport_from_addr(
|
||||||
|
addr: Address,
|
||||||
|
codec_key: str = 'msgpack',
|
||||||
|
) -> Type[MsgTransport]:
|
||||||
|
'''
|
||||||
|
Given a destination address and a desired codec, find the
|
||||||
|
corresponding `MsgTransport` type.
|
||||||
|
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
return _addr_to_transport[type(addr)]
|
||||||
|
|
||||||
|
except KeyError:
|
||||||
|
raise NotImplementedError(
|
||||||
|
f'No known transport for address {repr(addr)}'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def transport_from_stream(
|
||||||
|
stream: trio.abc.Stream,
|
||||||
|
codec_key: str = 'msgpack'
|
||||||
|
) -> Type[MsgTransport]:
|
||||||
|
'''
|
||||||
|
Given an arbitrary `trio.abc.Stream` and a desired codec,
|
||||||
|
find the corresponding `MsgTransport` type.
|
||||||
|
|
||||||
|
'''
|
||||||
|
transport = None
|
||||||
|
if isinstance(stream, trio.SocketStream):
|
||||||
|
sock = stream.socket
|
||||||
|
match sock.family:
|
||||||
|
case socket.AF_INET | socket.AF_INET6:
|
||||||
|
transport = 'tcp'
|
||||||
|
|
||||||
|
case socket.AF_UNIX:
|
||||||
|
transport = 'uds'
|
||||||
|
|
||||||
|
case _:
|
||||||
|
raise NotImplementedError(
|
||||||
|
f'Unsupported socket family: {sock.family}'
|
||||||
|
)
|
||||||
|
|
||||||
|
if not transport:
|
||||||
|
raise NotImplementedError(
|
||||||
|
f'Could not figure out transport type for stream type {type(stream)}'
|
||||||
|
)
|
||||||
|
|
||||||
|
key = (codec_key, transport)
|
||||||
|
|
||||||
|
return _key_to_transport[key]
|
|
@ -0,0 +1,97 @@
|
||||||
|
# tractor: structured concurrent "actors".
|
||||||
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
'''
|
||||||
|
Unix Domain Socket implementation of tractor.ipc._transport.MsgTransport protocol
|
||||||
|
|
||||||
|
'''
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import trio
|
||||||
|
|
||||||
|
from tractor.msg import MsgCodec
|
||||||
|
from tractor.log import get_logger
|
||||||
|
from tractor._addr import UDSAddress
|
||||||
|
from tractor.ipc._transport import MsgpackTransport
|
||||||
|
|
||||||
|
|
||||||
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class MsgpackUDSStream(MsgpackTransport):
|
||||||
|
'''
|
||||||
|
A ``trio.SocketStream`` delivering ``msgpack`` formatted data
|
||||||
|
using the ``msgspec`` codec lib.
|
||||||
|
|
||||||
|
'''
|
||||||
|
address_type = UDSAddress
|
||||||
|
layer_key: int = 7
|
||||||
|
|
||||||
|
# def __init__(
|
||||||
|
# self,
|
||||||
|
# stream: trio.SocketStream,
|
||||||
|
# prefix_size: int = 4,
|
||||||
|
# codec: CodecType = None,
|
||||||
|
|
||||||
|
# ) -> None:
|
||||||
|
# super().__init__(
|
||||||
|
# stream,
|
||||||
|
# prefix_size=prefix_size,
|
||||||
|
# codec=codec
|
||||||
|
# )
|
||||||
|
|
||||||
|
@property
|
||||||
|
def maddr(self) -> str:
|
||||||
|
filepath = self.raddr.unwrap()
|
||||||
|
return (
|
||||||
|
f'/ipv4/localhost'
|
||||||
|
f'/{self.address_type.name_key}/{filepath}'
|
||||||
|
# f'/{self.chan.uid[0]}'
|
||||||
|
# f'/{self.cid}'
|
||||||
|
|
||||||
|
# f'/cid={cid_head}..{cid_tail}'
|
||||||
|
# TODO: ? not use this ^ right ?
|
||||||
|
)
|
||||||
|
|
||||||
|
def connected(self) -> bool:
|
||||||
|
return self.stream.socket.fileno() != -1
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def connect_to(
|
||||||
|
cls,
|
||||||
|
addr: UDSAddress,
|
||||||
|
prefix_size: int = 4,
|
||||||
|
codec: MsgCodec|None = None,
|
||||||
|
**kwargs
|
||||||
|
) -> MsgpackUDSStream:
|
||||||
|
stream = await trio.open_unix_socket(
|
||||||
|
addr.unwrap(),
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
return MsgpackUDSStream(
|
||||||
|
stream,
|
||||||
|
prefix_size=prefix_size,
|
||||||
|
codec=codec
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_stream_addrs(
|
||||||
|
cls,
|
||||||
|
stream: trio.SocketStream
|
||||||
|
) -> tuple[UDSAddress, UDSAddress]:
|
||||||
|
return (
|
||||||
|
UDSAddress.from_addr(stream.socket.getsockname()),
|
||||||
|
UDSAddress.from_addr(stream.socket.getsockname()),
|
||||||
|
)
|
|
@ -92,7 +92,7 @@ class StackLevelAdapter(LoggerAdapter):
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
IPC transport level msg IO; generally anything below
|
IPC transport level msg IO; generally anything below
|
||||||
`._ipc.Channel` and friends.
|
`.ipc.Channel` and friends.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
return self.log(5, msg)
|
return self.log(5, msg)
|
||||||
|
@ -285,7 +285,7 @@ def get_logger(
|
||||||
# NOTE: for handling for modules that use ``get_logger(__name__)``
|
# NOTE: for handling for modules that use ``get_logger(__name__)``
|
||||||
# we make the following stylistic choice:
|
# we make the following stylistic choice:
|
||||||
# - always avoid duplicate project-package token
|
# - always avoid duplicate project-package token
|
||||||
# in msg output: i.e. tractor.tractor _ipc.py in header
|
# in msg output: i.e. tractor.tractor.ipc._chan.py in header
|
||||||
# looks ridiculous XD
|
# looks ridiculous XD
|
||||||
# - never show the leaf module name in the {name} part
|
# - never show the leaf module name in the {name} part
|
||||||
# since in python the {filename} is always this same
|
# since in python the {filename} is always this same
|
||||||
|
|
|
@ -47,6 +47,7 @@ from tractor.msg import (
|
||||||
pretty_struct,
|
pretty_struct,
|
||||||
)
|
)
|
||||||
from tractor.log import get_logger
|
from tractor.log import get_logger
|
||||||
|
from tractor._addr import AddressTypes
|
||||||
|
|
||||||
|
|
||||||
log = get_logger('tractor.msgspec')
|
log = get_logger('tractor.msgspec')
|
||||||
|
@ -167,8 +168,8 @@ class SpawnSpec(
|
||||||
|
|
||||||
# TODO: not just sockaddr pairs?
|
# TODO: not just sockaddr pairs?
|
||||||
# -[ ] abstract into a `TransportAddr` type?
|
# -[ ] abstract into a `TransportAddr` type?
|
||||||
reg_addrs: list[tuple[str, int]]
|
reg_addrs: list[AddressTypes]
|
||||||
bind_addrs: list[tuple[str, int]]
|
bind_addrs: list[AddressTypes]
|
||||||
|
|
||||||
|
|
||||||
# TODO: caps based RPC support in the payload?
|
# TODO: caps based RPC support in the payload?
|
||||||
|
|
30
uv.lock
30
uv.lock
|
@ -20,10 +20,38 @@ dependencies = [
|
||||||
]
|
]
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 }
|
sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 }
|
||||||
wheels = [
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604 },
|
||||||
{ url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727 },
|
{ url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727 },
|
||||||
{ url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400 },
|
{ url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 },
|
||||||
{ url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 },
|
{ url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 },
|
||||||
{ url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 },
|
{ url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 },
|
||||||
{ url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 },
|
{ url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 },
|
||||||
{ url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 },
|
{ url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 },
|
||||||
]
|
]
|
||||||
|
@ -321,6 +349,7 @@ name = "tractor"
|
||||||
version = "0.1.0a6.dev0"
|
version = "0.1.0a6.dev0"
|
||||||
source = { editable = "." }
|
source = { editable = "." }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
{ name = "cffi" },
|
||||||
{ name = "colorlog" },
|
{ name = "colorlog" },
|
||||||
{ name = "msgspec" },
|
{ name = "msgspec" },
|
||||||
{ name = "pdbp" },
|
{ name = "pdbp" },
|
||||||
|
@ -342,6 +371,7 @@ dev = [
|
||||||
|
|
||||||
[package.metadata]
|
[package.metadata]
|
||||||
requires-dist = [
|
requires-dist = [
|
||||||
|
{ name = "cffi", specifier = ">=1.17.1" },
|
||||||
{ name = "colorlog", specifier = ">=6.8.2,<7" },
|
{ name = "colorlog", specifier = ">=6.8.2,<7" },
|
||||||
{ name = "msgspec", specifier = ">=0.19.0" },
|
{ name = "msgspec", specifier = ">=0.19.0" },
|
||||||
{ name = "pdbp", specifier = ">=1.6,<2" },
|
{ name = "pdbp", specifier = ">=1.6,<2" },
|
||||||
|
|
Loading…
Reference in New Issue