2024-03-13 19:57:15 +00:00
|
|
|
# tractor: structured concurrent "actors".
|
|
|
|
|
# Copyright 2018-eternity Tyler Goodlet.
|
|
|
|
|
|
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
|
# it under the terms of the GNU Affero General Public License as published by
|
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
|
# (at your option) any later version.
|
|
|
|
|
|
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
# GNU Affero General Public License for more details.
|
|
|
|
|
|
|
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
|
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
The fundamental core machinery implementing every "actor"
|
|
|
|
|
including the process-local, or "python-interpreter (aka global)
|
|
|
|
|
singleton) `Actor` primitive(s) and its internal `trio` machinery
|
|
|
|
|
implementing the low level runtime system supporting the
|
|
|
|
|
discovery, communication, spawning, supervision and cancellation
|
|
|
|
|
of other actors in a hierarchincal process tree.
|
|
|
|
|
|
|
|
|
|
The runtime's main entry point: `async_main()` opens the top level
|
|
|
|
|
supervision and service `trio.Nursery`s which manage the tasks responsible
|
|
|
|
|
for running all lower level spawning, supervision and msging layers:
|
|
|
|
|
|
|
|
|
|
- lowlevel transport-protocol init and persistent connectivity on
|
|
|
|
|
top of `._ipc` primitives; the transport layer.
|
|
|
|
|
- bootstrapping of connection/runtime config from the spawning
|
|
|
|
|
parent (actor).
|
|
|
|
|
- starting and supervising IPC-channel msg processing loops around
|
|
|
|
|
tranport connections from parent/peer actors in order to deliver
|
|
|
|
|
SC-transitive RPC via scheduling of `trio` tasks.
|
|
|
|
|
- registration of newly spawned actors with the discovery sys.
|
|
|
|
|
|
2025-08-20 16:35:01 +00:00
|
|
|
Glossary:
|
|
|
|
|
--------
|
|
|
|
|
- tn: a `trio.Nursery` or "task nursery".
|
|
|
|
|
- an: an `ActorNursery` or "actor nursery".
|
|
|
|
|
- root: top/parent-most scope/task/process/actor (or other runtime
|
|
|
|
|
primitive) in a hierarchical tree.
|
|
|
|
|
- parent-ish: "higher-up" in the runtime-primitive hierarchy.
|
|
|
|
|
- child-ish: "lower-down" in the runtime-primitive hierarchy.
|
|
|
|
|
|
2024-03-13 19:57:15 +00:00
|
|
|
'''
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
from contextlib import (
|
|
|
|
|
ExitStack,
|
|
|
|
|
)
|
|
|
|
|
from functools import partial
|
|
|
|
|
import importlib
|
|
|
|
|
import importlib.util
|
2023-09-27 19:19:30 +00:00
|
|
|
import os
|
2025-05-15 00:26:15 +00:00
|
|
|
from pathlib import Path
|
2024-03-13 19:57:15 +00:00
|
|
|
from pprint import pformat
|
|
|
|
|
import signal
|
|
|
|
|
import sys
|
|
|
|
|
from typing import (
|
2024-04-30 16:15:46 +00:00
|
|
|
Any,
|
2024-03-13 19:57:15 +00:00
|
|
|
Callable,
|
2025-03-31 01:36:45 +00:00
|
|
|
Type,
|
2024-03-13 19:57:15 +00:00
|
|
|
TYPE_CHECKING,
|
|
|
|
|
)
|
|
|
|
|
import uuid
|
2025-06-13 03:26:38 +00:00
|
|
|
import textwrap
|
2024-03-13 19:57:15 +00:00
|
|
|
from types import ModuleType
|
2023-09-27 19:19:30 +00:00
|
|
|
import warnings
|
2024-03-13 19:57:15 +00:00
|
|
|
|
|
|
|
|
import trio
|
2024-07-11 16:11:31 +00:00
|
|
|
from trio._core import _run as trio_runtime
|
2024-03-13 19:57:15 +00:00
|
|
|
from trio import (
|
|
|
|
|
CancelScope,
|
|
|
|
|
Nursery,
|
|
|
|
|
TaskStatus,
|
|
|
|
|
)
|
|
|
|
|
|
2024-04-05 23:07:12 +00:00
|
|
|
from tractor.msg import (
|
2024-07-02 20:31:58 +00:00
|
|
|
MsgType,
|
2024-04-05 23:07:12 +00:00
|
|
|
NamespacePath,
|
2024-07-02 20:31:58 +00:00
|
|
|
Stop,
|
|
|
|
|
pretty_struct,
|
2024-04-05 23:07:12 +00:00
|
|
|
types as msgtypes,
|
|
|
|
|
)
|
2025-06-16 15:58:59 +00:00
|
|
|
from .trionics import (
|
|
|
|
|
collapse_eg,
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
maybe_open_nursery,
|
2025-06-16 15:58:59 +00:00
|
|
|
)
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
from .ipc import (
|
|
|
|
|
Channel,
|
2025-04-11 20:55:03 +00:00
|
|
|
# IPCServer, # causes cycles atm..
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
_server,
|
|
|
|
|
)
|
2025-03-23 03:14:04 +00:00
|
|
|
from ._addr import (
|
2025-03-31 01:36:45 +00:00
|
|
|
UnwrappedAddress,
|
2025-03-23 03:14:04 +00:00
|
|
|
Address,
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
# default_lo_addrs,
|
2025-03-31 01:36:45 +00:00
|
|
|
get_address_cls,
|
|
|
|
|
wrap_address,
|
2025-03-23 03:14:04 +00:00
|
|
|
)
|
2024-03-13 19:57:15 +00:00
|
|
|
from ._context import (
|
|
|
|
|
mk_context,
|
|
|
|
|
Context,
|
|
|
|
|
)
|
|
|
|
|
from .log import get_logger
|
|
|
|
|
from ._exceptions import (
|
|
|
|
|
ContextCancelled,
|
2024-07-11 16:11:31 +00:00
|
|
|
InternalError,
|
2024-04-08 14:25:57 +00:00
|
|
|
ModuleNotExposed,
|
|
|
|
|
MsgTypeError,
|
|
|
|
|
unpack_error,
|
2024-03-13 19:57:15 +00:00
|
|
|
)
|
2025-06-13 03:26:38 +00:00
|
|
|
from .devx import (
|
|
|
|
|
debug,
|
|
|
|
|
pformat as _pformat
|
|
|
|
|
)
|
2023-09-27 19:19:30 +00:00
|
|
|
from ._discovery import get_registry
|
2024-03-13 19:57:15 +00:00
|
|
|
from ._portal import Portal
|
|
|
|
|
from . import _state
|
|
|
|
|
from . import _mp_fixup_main
|
2025-04-11 18:30:21 +00:00
|
|
|
from . import _rpc
|
2024-03-13 19:57:15 +00:00
|
|
|
|
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
from ._supervise import ActorNursery
|
2024-07-11 16:11:31 +00:00
|
|
|
from trio._channel import MemoryChannelState
|
2024-03-13 19:57:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
log = get_logger('tractor')
|
2018-07-14 20:09:05 +00:00
|
|
|
|
|
|
|
|
|
2025-05-15 00:26:15 +00:00
|
|
|
def _get_mod_abspath(module: ModuleType) -> Path:
|
|
|
|
|
return Path(module.__file__).absolute()
|
2020-01-29 05:51:25 +00:00
|
|
|
|
|
|
|
|
|
2025-05-13 21:39:53 +00:00
|
|
|
def get_mod_nsps2fps(mod_ns_paths: list[str]) -> dict[str, str]:
|
|
|
|
|
'''
|
|
|
|
|
Deliver a table of py module namespace-path-`str`s mapped to
|
|
|
|
|
their "physical" `.py` file paths in the file-sys.
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
nsp2fp: dict[str, str] = {}
|
|
|
|
|
for nsp in mod_ns_paths:
|
|
|
|
|
mod: ModuleType = importlib.import_module(nsp)
|
2025-05-15 00:26:15 +00:00
|
|
|
nsp2fp[nsp] = str(_get_mod_abspath(mod))
|
2025-05-13 21:39:53 +00:00
|
|
|
|
|
|
|
|
return nsp2fp
|
|
|
|
|
|
|
|
|
|
|
2026-02-11 01:33:19 +00:00
|
|
|
_bp = False
|
|
|
|
|
|
2018-07-14 20:09:05 +00:00
|
|
|
class Actor:
|
2021-12-02 03:17:09 +00:00
|
|
|
'''
|
|
|
|
|
The fundamental "runtime" concurrency primitive.
|
|
|
|
|
|
2024-07-04 23:40:11 +00:00
|
|
|
An "actor" is the combination of a regular Python process
|
|
|
|
|
executing a `trio.run()` task tree, communicating with other
|
|
|
|
|
"actors" through "memory boundary portals": `Portal`, which
|
|
|
|
|
provide a high-level async API around IPC "channels" (`Channel`)
|
|
|
|
|
which themselves encapsulate various (swappable) network
|
|
|
|
|
transport protocols for sending msgs between said memory domains
|
|
|
|
|
(processes, hosts, non-GIL threads).
|
|
|
|
|
|
|
|
|
|
Each "actor" is `trio.run()` scheduled "runtime" composed of many
|
|
|
|
|
concurrent tasks in a single thread. The "runtime" tasks conduct
|
|
|
|
|
a slew of low(er) level functions to make it possible for message
|
|
|
|
|
passing between actors as well as the ability to create new
|
|
|
|
|
actors (aka new "runtimes" in new processes which are supervised
|
|
|
|
|
via an "actor-nursery" construct). Each task which sends messages
|
|
|
|
|
to a task in a "peer" actor (not necessarily a parent-child,
|
2023-08-18 14:10:36 +00:00
|
|
|
depth hierarchy) is able to do so via an "address", which maps
|
|
|
|
|
IPC connections across memory boundaries, and a task request id
|
2024-07-04 23:40:11 +00:00
|
|
|
which allows for per-actor tasks to send and receive messages to
|
|
|
|
|
specific peer-actor tasks with which there is an ongoing RPC/IPC
|
|
|
|
|
dialog.
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2021-12-02 03:17:09 +00:00
|
|
|
'''
|
|
|
|
|
# ugh, we need to get rid of this and replace with a "registry" sys
|
|
|
|
|
# https://github.com/goodboy/tractor/issues/216
|
2019-12-10 05:55:03 +00:00
|
|
|
is_arbiter: bool = False
|
2023-09-27 19:19:30 +00:00
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def is_registrar(self) -> bool:
|
|
|
|
|
return self.is_arbiter
|
|
|
|
|
|
2026-02-19 18:38:47 +00:00
|
|
|
@property
|
|
|
|
|
def is_root(self) -> bool:
|
|
|
|
|
'''
|
|
|
|
|
This actor is the parent most in the tree?
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
return _state.is_root_process()
|
|
|
|
|
|
2021-12-06 00:31:41 +00:00
|
|
|
msg_buffer_size: int = 2**6
|
2019-12-10 05:55:03 +00:00
|
|
|
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
# nursery placeholders filled in by `async_main()`,
|
|
|
|
|
# - after fork for subactors.
|
|
|
|
|
# - during boot for the root actor.
|
|
|
|
|
_root_tn: Nursery|None = None
|
|
|
|
|
_service_tn: Nursery|None = None
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
_ipc_server: _server.IPCServer|None = None
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def ipc_server(self) -> _server.IPCServer:
|
|
|
|
|
'''
|
|
|
|
|
The IPC transport-server for this actor; normally
|
|
|
|
|
a process-singleton.
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
return self._ipc_server
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2020-01-30 02:06:40 +00:00
|
|
|
# Information about `__main__` from parent
|
2021-12-02 17:34:27 +00:00
|
|
|
_parent_main_data: dict[str, str]
|
2024-03-25 18:15:36 +00:00
|
|
|
_parent_chan_cs: CancelScope|None = None
|
2024-04-05 23:07:12 +00:00
|
|
|
_spawn_spec: msgtypes.SpawnSpec|None = None
|
2020-01-30 02:06:40 +00:00
|
|
|
|
2020-06-29 02:44:16 +00:00
|
|
|
# if started on ``asycio`` running ``trio`` in guest mode
|
|
|
|
|
_infected_aio: bool = False
|
|
|
|
|
|
2024-04-05 23:07:12 +00:00
|
|
|
# TODO: nursery tracking like `trio` does?
|
2024-02-19 17:25:08 +00:00
|
|
|
# _ans: dict[
|
|
|
|
|
# tuple[str, str],
|
|
|
|
|
# list[ActorNursery],
|
|
|
|
|
# ] = {}
|
|
|
|
|
|
2022-08-29 19:08:04 +00:00
|
|
|
# Process-global stack closed at end on actor runtime teardown.
|
|
|
|
|
# NOTE: this is currently an undocumented public api.
|
|
|
|
|
lifetime_stack: ExitStack = ExitStack()
|
|
|
|
|
|
2018-07-14 20:09:05 +00:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
name: str,
|
2025-03-31 01:36:45 +00:00
|
|
|
uuid: str,
|
2020-07-23 17:23:55 +00:00
|
|
|
*,
|
2022-02-15 13:48:07 +00:00
|
|
|
enable_modules: list[str] = [],
|
2024-03-08 20:34:20 +00:00
|
|
|
loglevel: str|None = None,
|
2025-07-07 14:37:02 +00:00
|
|
|
registry_addrs: list[Address]|None = None,
|
2024-03-08 20:34:20 +00:00
|
|
|
spawn_method: str|None = None,
|
2023-09-27 19:19:30 +00:00
|
|
|
|
|
|
|
|
# TODO: remove!
|
2025-03-31 01:36:45 +00:00
|
|
|
arbiter_addr: UnwrappedAddress|None = None,
|
2023-09-27 19:19:30 +00:00
|
|
|
|
2018-08-31 21:16:24 +00:00
|
|
|
) -> None:
|
2022-08-03 20:09:16 +00:00
|
|
|
'''
|
|
|
|
|
This constructor is called in the parent actor **before** the spawning
|
2020-01-30 02:06:40 +00:00
|
|
|
phase (aka before a new process is executed).
|
2022-08-03 20:09:16 +00:00
|
|
|
|
|
|
|
|
'''
|
2025-04-02 02:08:56 +00:00
|
|
|
self._aid = msgtypes.Aid(
|
|
|
|
|
name=name,
|
|
|
|
|
uuid=uuid,
|
|
|
|
|
pid=os.getpid(),
|
|
|
|
|
)
|
|
|
|
|
self._task: trio.Task|None = None
|
2020-10-13 15:48:52 +00:00
|
|
|
|
2025-04-02 02:08:56 +00:00
|
|
|
# state
|
2020-10-13 15:48:52 +00:00
|
|
|
self._cancel_complete = trio.Event()
|
2025-06-23 21:33:54 +00:00
|
|
|
self._cancel_called_by: tuple[str, tuple]|None = None
|
2020-10-13 15:48:52 +00:00
|
|
|
self._cancel_called: bool = False
|
2020-01-20 16:04:36 +00:00
|
|
|
|
2020-01-30 02:06:40 +00:00
|
|
|
# retreive and store parent `__main__` data which
|
|
|
|
|
# will be passed to children
|
|
|
|
|
self._parent_main_data = _mp_fixup_main._mp_figure_out_main()
|
|
|
|
|
|
2025-05-13 21:39:53 +00:00
|
|
|
# TODO? only add this when `is_debug_mode() == True` no?
|
2020-07-30 14:41:58 +00:00
|
|
|
# always include debugging tools module
|
2025-05-13 21:39:53 +00:00
|
|
|
if _state.is_root_process():
|
2025-05-13 21:39:53 +00:00
|
|
|
enable_modules.append('tractor.devx.debug._tty_lock')
|
2020-07-30 14:41:58 +00:00
|
|
|
|
2025-05-13 21:39:53 +00:00
|
|
|
self.enable_modules: dict[str, str] = get_mod_nsps2fps(
|
|
|
|
|
mod_ns_paths=enable_modules,
|
|
|
|
|
)
|
2020-01-20 16:04:36 +00:00
|
|
|
|
2021-12-02 17:34:27 +00:00
|
|
|
self._mods: dict[str, ModuleType] = {}
|
2023-09-27 19:19:30 +00:00
|
|
|
self.loglevel: str = loglevel
|
|
|
|
|
|
|
|
|
|
if arbiter_addr is not None:
|
|
|
|
|
warnings.warn(
|
|
|
|
|
'`Actor(arbiter_addr=<blah>)` is now deprecated.\n'
|
2025-07-07 14:37:02 +00:00
|
|
|
'Use `registry_addrs: list[Address]` instead.',
|
2023-09-27 19:19:30 +00:00
|
|
|
DeprecationWarning,
|
|
|
|
|
stacklevel=2,
|
|
|
|
|
)
|
2025-07-07 14:37:02 +00:00
|
|
|
|
2026-02-11 01:33:19 +00:00
|
|
|
registry_addrs: list[Address] = [
|
|
|
|
|
wrap_address(arbiter_addr)
|
|
|
|
|
]
|
2021-07-01 18:52:52 +00:00
|
|
|
|
2020-07-28 00:05:00 +00:00
|
|
|
# marked by the process spawning backend at startup
|
|
|
|
|
# will be None for the parent most process started manually
|
|
|
|
|
# by the user (currently called the "arbiter")
|
2023-09-27 19:19:30 +00:00
|
|
|
self._spawn_method: str = spawn_method
|
2020-07-28 00:05:00 +00:00
|
|
|
|
2024-07-04 23:40:11 +00:00
|
|
|
# RPC state
|
2019-11-23 06:29:02 +00:00
|
|
|
self._ongoing_rpc_tasks = trio.Event()
|
|
|
|
|
self._ongoing_rpc_tasks.set()
|
2021-12-02 17:34:27 +00:00
|
|
|
self._rpc_tasks: dict[
|
2024-07-04 23:40:11 +00:00
|
|
|
tuple[Channel, str], # (chan, cid)
|
|
|
|
|
tuple[Context, Callable, trio.Event] # (ctx=>, fn(), done?)
|
2018-08-31 21:16:24 +00:00
|
|
|
] = {}
|
2021-12-03 19:27:04 +00:00
|
|
|
|
|
|
|
|
# map {actor uids -> Context}
|
|
|
|
|
self._contexts: dict[
|
2024-03-11 14:24:44 +00:00
|
|
|
tuple[
|
|
|
|
|
tuple[str, str], # .uid
|
|
|
|
|
str, # .cid
|
|
|
|
|
str, # .side
|
|
|
|
|
],
|
2021-12-03 19:27:04 +00:00
|
|
|
Context
|
2019-12-10 05:55:03 +00:00
|
|
|
] = {}
|
2021-12-03 19:27:04 +00:00
|
|
|
|
2024-03-25 18:15:36 +00:00
|
|
|
self._parent_chan: Channel|None = None
|
|
|
|
|
self._forkserver_info: tuple|None = None
|
2024-04-18 16:47:28 +00:00
|
|
|
|
|
|
|
|
# track each child/sub-actor in it's locally
|
|
|
|
|
# supervising nursery
|
2022-10-17 19:54:05 +00:00
|
|
|
self._actoruid2nursery: dict[
|
2024-04-18 16:47:28 +00:00
|
|
|
tuple[str, str], # sub-`Actor.uid`
|
2024-03-25 18:15:36 +00:00
|
|
|
ActorNursery|None,
|
2024-04-18 16:47:28 +00:00
|
|
|
] = {}
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2023-10-19 16:05:44 +00:00
|
|
|
# when provided, init the registry addresses property from
|
|
|
|
|
# input via the validator.
|
2025-03-31 01:36:45 +00:00
|
|
|
self._reg_addrs: list[UnwrappedAddress] = []
|
2023-10-19 16:05:44 +00:00
|
|
|
if registry_addrs:
|
2025-07-07 14:37:02 +00:00
|
|
|
_state._runtime_vars['_registry_addrs'] = self.reg_addrs = [
|
|
|
|
|
addr.unwrap()
|
|
|
|
|
for addr in registry_addrs
|
|
|
|
|
]
|
2023-10-19 16:05:44 +00:00
|
|
|
|
2025-04-02 02:08:56 +00:00
|
|
|
@property
|
|
|
|
|
def aid(self) -> msgtypes.Aid:
|
|
|
|
|
'''
|
2025-04-03 16:22:21 +00:00
|
|
|
This process-singleton-actor's "unique actor ID" in struct form.
|
|
|
|
|
|
|
|
|
|
See the `tractor.msg.Aid` struct for details.
|
2025-04-02 02:08:56 +00:00
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
return self._aid
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def name(self) -> str:
|
|
|
|
|
return self._aid.name
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def uid(self) -> tuple[str, str]:
|
|
|
|
|
'''
|
|
|
|
|
This process-singleton's "unique (cross-host) ID".
|
|
|
|
|
|
|
|
|
|
Delivered from the `.Aid.name/.uuid` fields as a `tuple` pair
|
|
|
|
|
and should be multi-host unique despite a large distributed
|
|
|
|
|
process plane.
|
|
|
|
|
|
|
|
|
|
'''
|
2025-04-03 16:22:21 +00:00
|
|
|
msg: str = (
|
|
|
|
|
f'`{type(self).__name__}.uid` is now deprecated.\n'
|
|
|
|
|
'Use the new `.aid: tractor.msg.Aid` (struct) instead '
|
|
|
|
|
'which also provides additional named (optional) fields '
|
|
|
|
|
'beyond just the `.name` and `.uuid`.'
|
|
|
|
|
)
|
|
|
|
|
warnings.warn(
|
|
|
|
|
msg,
|
|
|
|
|
DeprecationWarning,
|
|
|
|
|
stacklevel=2,
|
|
|
|
|
)
|
2025-04-02 02:08:56 +00:00
|
|
|
return (
|
|
|
|
|
self._aid.name,
|
|
|
|
|
self._aid.uuid,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def pid(self) -> int:
|
|
|
|
|
return self._aid.pid
|
|
|
|
|
|
2025-06-23 21:33:54 +00:00
|
|
|
@property
|
|
|
|
|
def repr_state(self) -> str:
|
|
|
|
|
if self.cancel_complete:
|
|
|
|
|
return 'cancelled'
|
|
|
|
|
|
|
|
|
|
elif canceller := self.cancel_caller:
|
|
|
|
|
return f' and cancel-called by {canceller}'
|
|
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
return 'running'
|
|
|
|
|
|
2025-06-13 03:26:38 +00:00
|
|
|
def pformat(
|
|
|
|
|
self,
|
2025-06-16 15:58:59 +00:00
|
|
|
ds: str = ': ',
|
2025-06-13 03:26:38 +00:00
|
|
|
indent: int = 0,
|
2025-06-23 21:33:54 +00:00
|
|
|
privates: bool = False,
|
2025-06-13 03:26:38 +00:00
|
|
|
) -> str:
|
2025-06-23 21:33:54 +00:00
|
|
|
|
|
|
|
|
fmtstr: str = f'|_id: {self.aid.reprol()!r}\n'
|
|
|
|
|
if privates:
|
|
|
|
|
aid_nest_prefix: str = '|_aid='
|
|
|
|
|
aid_field_repr: str = _pformat.nest_from_op(
|
|
|
|
|
input_op='',
|
|
|
|
|
text=pretty_struct.pformat(
|
|
|
|
|
struct=self.aid,
|
|
|
|
|
field_indent=2,
|
|
|
|
|
),
|
|
|
|
|
op_suffix='',
|
|
|
|
|
nest_prefix=aid_nest_prefix,
|
|
|
|
|
nest_indent=0,
|
|
|
|
|
)
|
|
|
|
|
fmtstr: str = f'{aid_field_repr}'
|
|
|
|
|
|
2025-04-02 02:08:56 +00:00
|
|
|
if rent_chan := self._parent_chan:
|
2025-06-23 21:33:54 +00:00
|
|
|
fmtstr += (
|
|
|
|
|
f"|_parent{ds}{rent_chan.aid.reprol()}\n"
|
|
|
|
|
)
|
2025-04-11 20:55:03 +00:00
|
|
|
|
|
|
|
|
server: _server.IPCServer = self.ipc_server
|
|
|
|
|
if server:
|
2025-06-23 21:33:54 +00:00
|
|
|
if privates:
|
|
|
|
|
server_repr: str = self._ipc_server.pformat(
|
|
|
|
|
privates=privates,
|
|
|
|
|
)
|
|
|
|
|
# create field ln as a key-header indented under
|
|
|
|
|
# and up to the section's key prefix.
|
|
|
|
|
# ^XXX if we were to indent `repr(Server)` to
|
|
|
|
|
# '<key>: '
|
|
|
|
|
# _here_^
|
|
|
|
|
server_repr: str = _pformat.nest_from_op(
|
|
|
|
|
input_op='', # nest as sub-obj
|
|
|
|
|
op_suffix='',
|
|
|
|
|
text=server_repr,
|
|
|
|
|
)
|
|
|
|
|
fmtstr += (
|
|
|
|
|
f"{server_repr}"
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
fmtstr += (
|
|
|
|
|
f'|_ipc: {server.repr_state!r}\n'
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
fmtstr += (
|
|
|
|
|
f'|_rpc: {len(self._rpc_tasks)} active tasks\n'
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# TODO, actually fix the .repr_state impl/output?
|
|
|
|
|
# append ipc-ctx state summary
|
|
|
|
|
# ctxs: dict = self._contexts
|
|
|
|
|
# if ctxs:
|
|
|
|
|
# ctx_states: dict[str, int] = {}
|
|
|
|
|
# for ctx in self._contexts.values():
|
|
|
|
|
# ctx_state: str = ctx.repr_state
|
|
|
|
|
# cnt = ctx_states.setdefault(ctx_state, 0)
|
|
|
|
|
# ctx_states[ctx_state] = cnt + 1
|
|
|
|
|
|
|
|
|
|
# fmtstr += (
|
|
|
|
|
# f" ctxs{ds}{ctx_states}\n"
|
|
|
|
|
# )
|
|
|
|
|
|
|
|
|
|
# runtime-state
|
|
|
|
|
task_name: str = '<dne>'
|
|
|
|
|
if task := self._task:
|
|
|
|
|
task_name: str = task.name
|
|
|
|
|
fmtstr += (
|
|
|
|
|
# TODO, this just like ctx?
|
|
|
|
|
f'|_state: {self.repr_state!r}\n'
|
|
|
|
|
f' task: {task_name}\n'
|
|
|
|
|
f' loglevel: {self.loglevel!r}\n'
|
|
|
|
|
f' subactors_spawned: {len(self._actoruid2nursery)}\n'
|
|
|
|
|
)
|
|
|
|
|
if not _state.is_root_process():
|
|
|
|
|
fmtstr += f' spawn_method: {self._spawn_method!r}\n'
|
|
|
|
|
|
|
|
|
|
if privates:
|
|
|
|
|
fmtstr += (
|
|
|
|
|
# f' actoruid2nursery{ds}{self._actoruid2nursery}\n'
|
|
|
|
|
f' cancel_complete{ds}{self._cancel_complete}\n'
|
|
|
|
|
f' cancel_called_by_remote{ds}{self._cancel_called_by}\n'
|
|
|
|
|
f' cancel_called{ds}{self._cancel_called}\n'
|
2025-06-13 03:26:38 +00:00
|
|
|
)
|
2025-06-23 21:33:54 +00:00
|
|
|
|
|
|
|
|
if fmtstr:
|
|
|
|
|
fmtstr: str = textwrap.indent(
|
|
|
|
|
text=fmtstr,
|
|
|
|
|
prefix=' '*(1 + indent),
|
2025-06-13 03:26:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
_repr: str = (
|
2025-06-23 21:33:54 +00:00
|
|
|
f'<{type(self).__name__}(\n'
|
|
|
|
|
f'{fmtstr}'
|
|
|
|
|
f')>\n'
|
2025-04-02 02:08:56 +00:00
|
|
|
)
|
2025-06-13 03:26:38 +00:00
|
|
|
if indent:
|
|
|
|
|
_repr: str = textwrap.indent(
|
|
|
|
|
text=_repr,
|
|
|
|
|
prefix=' '*indent,
|
|
|
|
|
)
|
|
|
|
|
return _repr
|
2025-04-02 02:08:56 +00:00
|
|
|
|
|
|
|
|
__repr__ = pformat
|
|
|
|
|
|
2023-10-19 16:05:44 +00:00
|
|
|
@property
|
2025-03-31 01:36:45 +00:00
|
|
|
def reg_addrs(self) -> list[UnwrappedAddress]:
|
2023-10-19 16:05:44 +00:00
|
|
|
'''
|
|
|
|
|
List of (socket) addresses for all known (and contactable)
|
2025-07-07 14:37:02 +00:00
|
|
|
registry-service actors in "unwrapped" (i.e. IPC interchange
|
|
|
|
|
wire-compat) form.
|
|
|
|
|
|
|
|
|
|
If you are looking for the "wrapped" address form, use
|
|
|
|
|
`.registry_addrs` instead.
|
2023-10-19 16:05:44 +00:00
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
return self._reg_addrs
|
|
|
|
|
|
|
|
|
|
@reg_addrs.setter
|
|
|
|
|
def reg_addrs(
|
|
|
|
|
self,
|
2025-03-31 01:36:45 +00:00
|
|
|
addrs: list[UnwrappedAddress],
|
2023-10-19 16:05:44 +00:00
|
|
|
) -> None:
|
|
|
|
|
if not addrs:
|
|
|
|
|
log.warning(
|
|
|
|
|
'Empty registry address list is invalid:\n'
|
|
|
|
|
f'{addrs}'
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
|
2025-03-23 03:14:04 +00:00
|
|
|
self._reg_addrs = addrs
|
2023-10-19 16:05:44 +00:00
|
|
|
|
2025-07-07 14:37:02 +00:00
|
|
|
@property
|
|
|
|
|
def registry_addrs(self) -> list[Address]:
|
|
|
|
|
return [wrap_address(uw_addr)
|
|
|
|
|
for uw_addr in self.reg_addrs]
|
|
|
|
|
|
2024-03-13 13:55:47 +00:00
|
|
|
def load_modules(
|
|
|
|
|
self,
|
2025-07-07 14:37:02 +00:00
|
|
|
|
2024-03-13 13:55:47 +00:00
|
|
|
) -> None:
|
2022-08-03 20:09:16 +00:00
|
|
|
'''
|
2024-04-30 16:15:46 +00:00
|
|
|
Load explicitly enabled python modules from local fs after
|
|
|
|
|
process spawn.
|
2018-08-21 04:10:24 +00:00
|
|
|
|
|
|
|
|
Since this actor may be spawned on a different machine from
|
|
|
|
|
the original nursery we need to try and load the local module
|
2024-04-30 16:15:46 +00:00
|
|
|
code manually (presuming it exists).
|
2022-08-03 20:09:16 +00:00
|
|
|
|
|
|
|
|
'''
|
2019-11-26 14:23:37 +00:00
|
|
|
try:
|
2020-07-20 23:50:19 +00:00
|
|
|
if self._spawn_method == 'trio':
|
2020-01-30 02:06:40 +00:00
|
|
|
parent_data = self._parent_main_data
|
|
|
|
|
if 'init_main_from_name' in parent_data:
|
|
|
|
|
_mp_fixup_main._fixup_main_from_name(
|
|
|
|
|
parent_data['init_main_from_name'])
|
|
|
|
|
elif 'init_main_from_path' in parent_data:
|
|
|
|
|
_mp_fixup_main._fixup_main_from_path(
|
|
|
|
|
parent_data['init_main_from_path'])
|
|
|
|
|
|
2024-04-30 16:15:46 +00:00
|
|
|
status: str = 'Attempting to import enabled modules:\n'
|
2025-05-15 00:26:15 +00:00
|
|
|
|
|
|
|
|
modpath: str
|
|
|
|
|
filepath: str
|
2021-01-05 13:28:06 +00:00
|
|
|
for modpath, filepath in self.enable_modules.items():
|
2020-01-30 02:06:40 +00:00
|
|
|
# XXX append the allowed module to the python path which
|
|
|
|
|
# should allow for relative (at least downward) imports.
|
|
|
|
|
sys.path.append(os.path.dirname(filepath))
|
2024-04-30 16:15:46 +00:00
|
|
|
status += (
|
|
|
|
|
f'|_{modpath!r} -> {filepath!r}\n'
|
|
|
|
|
)
|
|
|
|
|
mod: ModuleType = importlib.import_module(modpath)
|
2020-02-09 06:05:52 +00:00
|
|
|
self._mods[modpath] = mod
|
|
|
|
|
if modpath == '__main__':
|
|
|
|
|
self._mods['__mp_main__'] = mod
|
2021-11-07 22:05:40 +00:00
|
|
|
|
2024-04-30 16:15:46 +00:00
|
|
|
log.runtime(status)
|
|
|
|
|
|
2019-11-26 14:23:37 +00:00
|
|
|
except ModuleNotFoundError:
|
|
|
|
|
# it is expected the corresponding `ModuleNotExposed` error
|
|
|
|
|
# will be raised later
|
2024-03-13 13:55:47 +00:00
|
|
|
log.error(
|
|
|
|
|
f"Failed to import {modpath} in {self.name}"
|
|
|
|
|
)
|
2019-11-26 14:23:37 +00:00
|
|
|
raise
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2025-07-08 16:44:46 +00:00
|
|
|
# ?TODO, factor this meth-iface into a new `.rpc` subsys primitive?
|
|
|
|
|
# - _get_rpc_func(),
|
|
|
|
|
# - _deliver_ctx_payload(),
|
|
|
|
|
# - get_context(),
|
|
|
|
|
# - start_remote_task(),
|
|
|
|
|
# - cancel_rpc_tasks(),
|
|
|
|
|
# - _cancel_task(),
|
|
|
|
|
#
|
2019-01-01 20:58:38 +00:00
|
|
|
def _get_rpc_func(self, ns, funcname):
|
2024-03-13 19:57:15 +00:00
|
|
|
'''
|
|
|
|
|
Try to lookup and return a target RPC func from the
|
|
|
|
|
post-fork enabled module set.
|
|
|
|
|
|
|
|
|
|
'''
|
2019-01-01 20:58:38 +00:00
|
|
|
try:
|
|
|
|
|
return getattr(self._mods[ns], funcname)
|
|
|
|
|
except KeyError as err:
|
2020-10-16 02:49:12 +00:00
|
|
|
mne = ModuleNotExposed(*err.args)
|
|
|
|
|
|
|
|
|
|
if ns == '__main__':
|
2022-07-01 18:36:49 +00:00
|
|
|
modpath = '__name__'
|
|
|
|
|
else:
|
|
|
|
|
modpath = f"'{ns}'"
|
|
|
|
|
|
|
|
|
|
msg = (
|
|
|
|
|
"\n\nMake sure you exposed the target module, `{ns}`, "
|
|
|
|
|
"using:\n"
|
|
|
|
|
"ActorNursery.start_actor(<name>, enable_modules=[{mod}])"
|
|
|
|
|
).format(
|
|
|
|
|
ns=ns,
|
|
|
|
|
mod=modpath,
|
|
|
|
|
)
|
2020-10-16 02:49:12 +00:00
|
|
|
|
2022-07-01 18:36:49 +00:00
|
|
|
mne.msg += msg
|
2020-10-16 02:49:12 +00:00
|
|
|
|
|
|
|
|
raise mne
|
2019-01-01 20:58:38 +00:00
|
|
|
|
2024-03-13 19:57:15 +00:00
|
|
|
# TODO: rename to `._deliver_payload()` since this handles
|
|
|
|
|
# more then just `result` msgs now obvi XD
|
2024-04-08 14:25:57 +00:00
|
|
|
async def _deliver_ctx_payload(
|
2019-02-15 21:23:58 +00:00
|
|
|
self,
|
2019-02-16 19:05:03 +00:00
|
|
|
chan: Channel,
|
2021-04-26 20:14:45 +00:00
|
|
|
cid: str,
|
2024-04-30 16:15:46 +00:00
|
|
|
msg: MsgType|MsgTypeError,
|
2024-02-19 17:25:08 +00:00
|
|
|
|
|
|
|
|
) -> None|bool:
|
2021-12-06 00:31:41 +00:00
|
|
|
'''
|
2024-03-13 19:57:15 +00:00
|
|
|
Push an RPC msg-payload to the local consumer peer-task's
|
|
|
|
|
queue.
|
2021-12-06 00:31:41 +00:00
|
|
|
|
|
|
|
|
'''
|
2025-06-23 21:33:54 +00:00
|
|
|
aid: msgtypes.Aid = chan.aid
|
|
|
|
|
assert aid, f"`chan.aid` can't be {aid}"
|
2021-12-06 16:38:47 +00:00
|
|
|
try:
|
2024-03-11 14:24:44 +00:00
|
|
|
ctx: Context = self._contexts[(
|
2025-06-23 21:33:54 +00:00
|
|
|
aid.uid,
|
2024-03-11 14:24:44 +00:00
|
|
|
cid,
|
|
|
|
|
|
|
|
|
|
# TODO: how to determine this tho?
|
|
|
|
|
# side,
|
|
|
|
|
)]
|
2021-12-06 16:38:47 +00:00
|
|
|
except KeyError:
|
2024-07-02 20:31:58 +00:00
|
|
|
report: str = (
|
2025-03-12 20:41:42 +00:00
|
|
|
'Ignoring invalid IPC msg!?\n'
|
|
|
|
|
f'Ctx seems to not/no-longer exist??\n'
|
|
|
|
|
f'\n'
|
2025-06-23 21:33:54 +00:00
|
|
|
f'<=? {aid.reprol()!r}\n'
|
2024-07-02 20:31:58 +00:00
|
|
|
f' |_{pretty_struct.pformat(msg)}\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
)
|
2024-07-02 20:31:58 +00:00
|
|
|
match msg:
|
|
|
|
|
case Stop():
|
|
|
|
|
log.runtime(report)
|
|
|
|
|
case _:
|
|
|
|
|
log.warning(report)
|
|
|
|
|
|
2021-12-06 16:38:47 +00:00
|
|
|
return
|
|
|
|
|
|
2024-04-08 14:25:57 +00:00
|
|
|
# if isinstance(msg, MsgTypeError):
|
|
|
|
|
# return await ctx._deliver_bad_msg()
|
|
|
|
|
|
2023-04-13 20:03:35 +00:00
|
|
|
return await ctx._deliver_msg(msg)
|
2019-02-15 21:23:58 +00:00
|
|
|
|
2021-12-03 19:27:04 +00:00
|
|
|
def get_context(
|
2018-08-31 21:16:24 +00:00
|
|
|
self,
|
2021-12-03 19:27:04 +00:00
|
|
|
chan: Channel,
|
|
|
|
|
cid: str,
|
2024-02-20 20:59:55 +00:00
|
|
|
nsf: NamespacePath,
|
2023-04-13 20:03:35 +00:00
|
|
|
|
2024-03-11 14:24:44 +00:00
|
|
|
# TODO: support lookup by `Context.side: str` ?
|
|
|
|
|
# -> would allow making a self-context which might have
|
|
|
|
|
# certain special use cases where RPC isolation is wanted
|
|
|
|
|
# between 2 tasks running in the same process?
|
|
|
|
|
# => prolly needs some deeper though on the real use cases
|
|
|
|
|
# and whether or not such things should be better
|
|
|
|
|
# implemented using a `TaskManager` style nursery..
|
|
|
|
|
#
|
|
|
|
|
# side: str|None = None,
|
|
|
|
|
|
2024-03-25 18:15:36 +00:00
|
|
|
msg_buffer_size: int|None = None,
|
2023-04-13 20:03:35 +00:00
|
|
|
allow_overruns: bool = False,
|
2021-06-24 23:56:05 +00:00
|
|
|
|
2021-12-03 19:27:04 +00:00
|
|
|
) -> Context:
|
|
|
|
|
'''
|
2024-03-13 19:57:15 +00:00
|
|
|
Look-up (existing) or create a new
|
|
|
|
|
inter-actor-SC-linked task "context" (a `Context`) which
|
|
|
|
|
encapsulates the local RPC task's execution enviroment
|
|
|
|
|
around `Channel` relayed msg handling including,
|
|
|
|
|
|
|
|
|
|
- a dedicated `trio` cancel scope (`Context._scope`),
|
|
|
|
|
- a pair of IPC-msg-relay "feeder" mem-channels
|
|
|
|
|
(`Context._recv/send_chan`),
|
|
|
|
|
- and a "context id" (cid) unique to the task-pair
|
|
|
|
|
msging session's lifetime.
|
2021-06-24 23:56:05 +00:00
|
|
|
|
2021-12-03 19:27:04 +00:00
|
|
|
'''
|
2025-06-23 21:33:54 +00:00
|
|
|
# ?TODO, use Aid here as well?
|
2021-12-03 21:51:15 +00:00
|
|
|
actor_uid = chan.uid
|
|
|
|
|
assert actor_uid
|
2019-02-15 21:23:58 +00:00
|
|
|
try:
|
2024-03-11 14:24:44 +00:00
|
|
|
ctx = self._contexts[(
|
|
|
|
|
actor_uid,
|
|
|
|
|
cid,
|
|
|
|
|
# side,
|
|
|
|
|
)]
|
2024-04-18 16:47:28 +00:00
|
|
|
log.debug(
|
2024-02-20 20:59:55 +00:00
|
|
|
f'Retreived cached IPC ctx for\n'
|
|
|
|
|
f'peer: {chan.uid}\n'
|
|
|
|
|
f'cid:{cid}\n'
|
|
|
|
|
)
|
2024-07-11 16:11:31 +00:00
|
|
|
ctx._allow_overruns: bool = allow_overruns
|
2021-12-06 00:31:41 +00:00
|
|
|
|
|
|
|
|
# adjust buffer size if specified
|
2024-07-11 16:11:31 +00:00
|
|
|
state: MemoryChannelState = ctx._send_chan._state # type: ignore
|
|
|
|
|
if (
|
|
|
|
|
msg_buffer_size
|
|
|
|
|
and
|
|
|
|
|
state.max_buffer_size != msg_buffer_size
|
|
|
|
|
):
|
2021-12-06 00:31:41 +00:00
|
|
|
state.max_buffer_size = msg_buffer_size
|
|
|
|
|
|
2019-02-15 21:23:58 +00:00
|
|
|
except KeyError:
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
log.debug(
|
|
|
|
|
f'Allocate new IPC ctx for\n'
|
2024-02-20 20:59:55 +00:00
|
|
|
f'peer: {chan.uid}\n'
|
|
|
|
|
f'cid: {cid}\n'
|
|
|
|
|
)
|
2023-04-13 20:03:35 +00:00
|
|
|
ctx = mk_context(
|
2021-12-03 19:27:04 +00:00
|
|
|
chan,
|
|
|
|
|
cid,
|
2024-02-20 20:59:55 +00:00
|
|
|
nsf=nsf,
|
2023-04-13 20:03:35 +00:00
|
|
|
msg_buffer_size=msg_buffer_size or self.msg_buffer_size,
|
|
|
|
|
_allow_overruns=allow_overruns,
|
2021-12-03 19:27:04 +00:00
|
|
|
)
|
2024-03-11 14:24:44 +00:00
|
|
|
self._contexts[(
|
|
|
|
|
actor_uid,
|
|
|
|
|
cid,
|
|
|
|
|
# side,
|
|
|
|
|
)] = ctx
|
2019-02-15 21:23:58 +00:00
|
|
|
|
2021-12-03 19:27:04 +00:00
|
|
|
return ctx
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2021-12-03 21:51:15 +00:00
|
|
|
async def start_remote_task(
|
2019-02-15 21:23:58 +00:00
|
|
|
self,
|
|
|
|
|
chan: Channel,
|
2024-02-20 20:59:55 +00:00
|
|
|
nsf: NamespacePath,
|
2021-12-06 00:31:41 +00:00
|
|
|
kwargs: dict,
|
2024-02-20 20:59:55 +00:00
|
|
|
|
2024-04-18 16:47:28 +00:00
|
|
|
# determines `Context.side: str`
|
|
|
|
|
portal: Portal|None = None,
|
|
|
|
|
|
2024-02-20 20:59:55 +00:00
|
|
|
# IPC channel config
|
2024-03-25 18:15:36 +00:00
|
|
|
msg_buffer_size: int|None = None,
|
2023-04-13 20:03:35 +00:00
|
|
|
allow_overruns: bool = False,
|
2024-02-20 20:59:55 +00:00
|
|
|
load_nsf: bool = False,
|
2024-04-25 16:36:14 +00:00
|
|
|
ack_timeout: float = float('inf'),
|
2021-12-03 19:27:04 +00:00
|
|
|
|
2021-12-03 21:51:15 +00:00
|
|
|
) -> Context:
|
2021-12-02 03:17:09 +00:00
|
|
|
'''
|
2024-03-13 19:57:15 +00:00
|
|
|
Send a `'cmd'` msg to a remote actor, which requests the
|
|
|
|
|
start and schedule of a remote task-as-function's
|
|
|
|
|
entrypoint.
|
2021-12-03 21:51:15 +00:00
|
|
|
|
2024-03-13 19:57:15 +00:00
|
|
|
Synchronously validates the endpoint type and returns
|
|
|
|
|
a (caller side) `Context` that can be used to accept
|
|
|
|
|
delivery of msg payloads from the local runtime's
|
|
|
|
|
processing loop: `._rpc.process_messages()`.
|
2021-12-02 03:17:09 +00:00
|
|
|
|
|
|
|
|
'''
|
2024-03-13 19:57:15 +00:00
|
|
|
cid: str = str(uuid.uuid4())
|
2018-08-31 21:16:24 +00:00
|
|
|
assert chan.uid
|
2023-04-07 20:07:26 +00:00
|
|
|
ctx = self.get_context(
|
2024-02-20 20:59:55 +00:00
|
|
|
chan=chan,
|
|
|
|
|
cid=cid,
|
|
|
|
|
nsf=nsf,
|
2024-03-11 14:24:44 +00:00
|
|
|
|
|
|
|
|
# side='caller',
|
2023-04-07 20:07:26 +00:00
|
|
|
msg_buffer_size=msg_buffer_size,
|
2023-04-13 20:03:35 +00:00
|
|
|
allow_overruns=allow_overruns,
|
2023-04-07 20:07:26 +00:00
|
|
|
)
|
2024-04-18 16:47:28 +00:00
|
|
|
ctx._portal = portal
|
2024-02-20 20:59:55 +00:00
|
|
|
|
|
|
|
|
if (
|
|
|
|
|
'self' in nsf
|
2024-04-18 16:47:28 +00:00
|
|
|
or
|
|
|
|
|
not load_nsf
|
2024-02-20 20:59:55 +00:00
|
|
|
):
|
|
|
|
|
ns, _, func = nsf.partition(':')
|
|
|
|
|
else:
|
|
|
|
|
# TODO: pass nsf directly over wire!
|
|
|
|
|
# -[ ] but, how to do `self:<Actor.meth>`??
|
|
|
|
|
ns, func = nsf.to_tuple()
|
|
|
|
|
|
2024-04-18 16:47:28 +00:00
|
|
|
msg = msgtypes.Start(
|
|
|
|
|
ns=ns,
|
|
|
|
|
func=func,
|
|
|
|
|
kwargs=kwargs,
|
|
|
|
|
uid=self.uid,
|
|
|
|
|
cid=cid,
|
2024-02-20 20:59:55 +00:00
|
|
|
)
|
2024-04-18 16:47:28 +00:00
|
|
|
log.runtime(
|
2024-04-30 16:15:46 +00:00
|
|
|
'Sending RPC `Start`\n\n'
|
2024-04-18 16:47:28 +00:00
|
|
|
f'=> peer: {chan.uid}\n'
|
2024-04-30 16:15:46 +00:00
|
|
|
f' |_ {ns}.{func}({kwargs})\n\n'
|
|
|
|
|
|
|
|
|
|
f'{pretty_struct.pformat(msg)}'
|
2023-04-07 20:07:26 +00:00
|
|
|
)
|
2024-04-18 16:47:28 +00:00
|
|
|
await chan.send(msg)
|
|
|
|
|
|
|
|
|
|
# NOTE wait on first `StartAck` response msg and validate;
|
|
|
|
|
# this should be immediate and does not (yet) wait for the
|
|
|
|
|
# remote child task to sync via `Context.started()`.
|
|
|
|
|
with trio.fail_after(ack_timeout):
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
first_msg: msgtypes.StartAck = await ctx._rx_chan.receive()
|
2024-04-02 17:41:52 +00:00
|
|
|
try:
|
|
|
|
|
functype: str = first_msg.functype
|
|
|
|
|
except AttributeError:
|
2021-12-03 21:51:15 +00:00
|
|
|
raise unpack_error(first_msg, chan)
|
|
|
|
|
|
2024-04-02 17:41:52 +00:00
|
|
|
if functype not in (
|
2023-04-07 20:07:26 +00:00
|
|
|
'asyncfunc',
|
|
|
|
|
'asyncgen',
|
|
|
|
|
'context',
|
|
|
|
|
):
|
2024-04-02 17:41:52 +00:00
|
|
|
raise ValueError(
|
2024-04-18 16:47:28 +00:00
|
|
|
f'Invalid `StartAck.functype: str = {first_msg!r}` ??'
|
2024-04-02 17:41:52 +00:00
|
|
|
)
|
2021-12-03 21:51:15 +00:00
|
|
|
|
|
|
|
|
ctx._remote_func_type = functype
|
|
|
|
|
return ctx
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2020-09-12 15:48:20 +00:00
|
|
|
async def _from_parent(
|
2020-08-08 18:55:41 +00:00
|
|
|
self,
|
2025-03-31 01:36:45 +00:00
|
|
|
parent_addr: UnwrappedAddress|None,
|
2024-03-13 19:57:15 +00:00
|
|
|
|
|
|
|
|
) -> tuple[
|
|
|
|
|
Channel,
|
2025-03-31 01:36:45 +00:00
|
|
|
list[UnwrappedAddress]|None,
|
|
|
|
|
list[str]|None, # preferred tpts
|
2024-03-13 19:57:15 +00:00
|
|
|
]:
|
|
|
|
|
'''
|
|
|
|
|
Bootstrap this local actor's runtime config from its parent by
|
|
|
|
|
connecting back via the IPC transport, handshaking and then
|
|
|
|
|
`Channel.recv()`-ing seeded data.
|
|
|
|
|
|
|
|
|
|
'''
|
2020-08-08 18:55:41 +00:00
|
|
|
try:
|
|
|
|
|
# Connect back to the parent actor and conduct initial
|
|
|
|
|
# handshake. From this point on if we error, we
|
|
|
|
|
# attempt to ship the exception back to the parent.
|
2025-03-31 01:36:45 +00:00
|
|
|
chan = await Channel.from_addr(
|
|
|
|
|
addr=wrap_address(parent_addr)
|
|
|
|
|
)
|
|
|
|
|
assert isinstance(chan, Channel)
|
2020-08-08 18:55:41 +00:00
|
|
|
|
2025-04-11 20:55:03 +00:00
|
|
|
# init handshake: swap actor-IDs.
|
2025-04-03 16:22:21 +00:00
|
|
|
await chan._do_handshake(aid=self.aid)
|
2020-08-08 18:55:41 +00:00
|
|
|
|
2025-03-31 01:36:45 +00:00
|
|
|
accept_addrs: list[UnwrappedAddress]|None = None
|
2024-04-02 17:41:52 +00:00
|
|
|
|
2020-08-08 18:55:41 +00:00
|
|
|
if self._spawn_method == "trio":
|
2024-04-02 17:41:52 +00:00
|
|
|
|
2024-04-30 16:15:46 +00:00
|
|
|
# Receive post-spawn runtime state from our parent.
|
2024-04-02 17:41:52 +00:00
|
|
|
spawnspec: msgtypes.SpawnSpec = await chan.recv()
|
2025-03-31 01:36:45 +00:00
|
|
|
match spawnspec:
|
|
|
|
|
case MsgTypeError():
|
|
|
|
|
raise spawnspec
|
|
|
|
|
case msgtypes.SpawnSpec():
|
|
|
|
|
self._spawn_spec = spawnspec
|
|
|
|
|
log.runtime(
|
|
|
|
|
'Received runtime spec from parent:\n\n'
|
|
|
|
|
|
|
|
|
|
# TODO: eventually all these msgs as
|
|
|
|
|
# `msgspec.Struct` with a special mode that
|
|
|
|
|
# pformats them in multi-line mode, BUT only
|
|
|
|
|
# if "trace"/"util" mode is enabled?
|
|
|
|
|
f'{pretty_struct.pformat(spawnspec)}\n'
|
|
|
|
|
)
|
2024-04-02 17:41:52 +00:00
|
|
|
|
2025-03-31 01:36:45 +00:00
|
|
|
case _:
|
|
|
|
|
raise InternalError(
|
|
|
|
|
f'Received invalid non-`SpawnSpec` payload !?\n'
|
|
|
|
|
f'{spawnspec}\n'
|
|
|
|
|
)
|
2025-05-13 21:39:53 +00:00
|
|
|
# ^^XXX TODO XXX^^^
|
|
|
|
|
# when the `SpawnSpec` fails to decode the above will
|
|
|
|
|
# raise a `MsgTypeError` which if we do NOT ALSO
|
|
|
|
|
# RAISE it will tried to be pprinted in the
|
|
|
|
|
# log.runtime() below..
|
2025-03-31 01:36:45 +00:00
|
|
|
#
|
|
|
|
|
# SO we gotta look at how other `chan.recv()` calls
|
|
|
|
|
# are wrapped and do the same for this spec receive!
|
|
|
|
|
# -[ ] see `._rpc` likely has the answer?
|
2025-05-13 21:39:53 +00:00
|
|
|
|
|
|
|
|
# ^^^XXX NOTE XXX^^^, can't be called here!
|
2025-03-31 01:36:45 +00:00
|
|
|
#
|
|
|
|
|
# breakpoint()
|
|
|
|
|
# import pdbp; pdbp.set_trace()
|
2025-05-13 21:39:53 +00:00
|
|
|
#
|
|
|
|
|
# => bc we haven't yet received the
|
|
|
|
|
# `spawnspec._runtime_vars` which contains
|
|
|
|
|
# `debug_mode: bool`..
|
|
|
|
|
|
|
|
|
|
# `SpawnSpec.bind_addrs`
|
|
|
|
|
# ---------------------
|
2025-03-31 01:36:45 +00:00
|
|
|
accept_addrs: list[UnwrappedAddress] = spawnspec.bind_addrs
|
2024-02-19 17:25:08 +00:00
|
|
|
|
2025-05-13 21:39:53 +00:00
|
|
|
# `SpawnSpec._runtime_vars`
|
|
|
|
|
# -------------------------
|
|
|
|
|
# => update process-wide globals
|
|
|
|
|
# TODO! -[ ] another `Struct` for rtvs..
|
2024-04-30 16:15:46 +00:00
|
|
|
rvs: dict[str, Any] = spawnspec._runtime_vars
|
2024-02-19 17:25:08 +00:00
|
|
|
if rvs['_debug_mode']:
|
2024-06-28 18:25:53 +00:00
|
|
|
from .devx import (
|
|
|
|
|
enable_stack_on_sig,
|
|
|
|
|
maybe_init_greenback,
|
|
|
|
|
)
|
2024-02-19 17:25:08 +00:00
|
|
|
try:
|
2024-04-30 16:15:46 +00:00
|
|
|
# TODO: maybe return some status msgs upward
|
|
|
|
|
# to that we can emit them in `con_status`
|
|
|
|
|
# instead?
|
|
|
|
|
log.devx(
|
2024-04-02 17:41:52 +00:00
|
|
|
'Enabling `stackscope` traces on SIGUSR1'
|
|
|
|
|
)
|
2024-02-19 17:25:08 +00:00
|
|
|
enable_stack_on_sig()
|
2024-06-28 18:25:53 +00:00
|
|
|
|
2024-02-19 17:25:08 +00:00
|
|
|
except ImportError:
|
|
|
|
|
log.warning(
|
|
|
|
|
'`stackscope` not installed for use in debug mode!'
|
|
|
|
|
)
|
|
|
|
|
|
2024-06-28 18:25:53 +00:00
|
|
|
if rvs.get('use_greenback', False):
|
|
|
|
|
maybe_mod: ModuleType|None = await maybe_init_greenback()
|
|
|
|
|
if maybe_mod:
|
|
|
|
|
log.devx(
|
|
|
|
|
'Activated `greenback` '
|
|
|
|
|
'for `tractor.pause_from_sync()` support!'
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
rvs['use_greenback'] = False
|
|
|
|
|
log.warning(
|
|
|
|
|
'`greenback` not installed for use in debug mode!\n'
|
|
|
|
|
'`tractor.pause_from_sync()` not available!'
|
|
|
|
|
)
|
|
|
|
|
|
2024-07-11 16:11:31 +00:00
|
|
|
# XXX ensure the "infected `asyncio` mode" setting
|
|
|
|
|
# passed down from our spawning parent is consistent
|
|
|
|
|
# with `trio`-runtime initialization:
|
|
|
|
|
# - during sub-proc boot, the entrypoint func
|
|
|
|
|
# (`._entry.<spawn_backend>_main()`) should set
|
|
|
|
|
# `._infected_aio = True` before calling
|
|
|
|
|
# `run_as_asyncio_guest()`,
|
|
|
|
|
# - the value of `infect_asyncio: bool = True` as
|
|
|
|
|
# passed to `ActorNursery.start_actor()` must be
|
|
|
|
|
# the same as `_runtime_vars['_is_infected_aio']`
|
|
|
|
|
if (
|
|
|
|
|
(aio_rtv := rvs['_is_infected_aio'])
|
|
|
|
|
!=
|
|
|
|
|
(aio_attr := self._infected_aio)
|
|
|
|
|
):
|
|
|
|
|
raise InternalError(
|
|
|
|
|
'Parent sent runtime-vars that mismatch for the '
|
|
|
|
|
'"infected `asyncio` mode" settings ?!?\n\n'
|
|
|
|
|
|
|
|
|
|
f'rvs["_is_infected_aio"] = {aio_rtv}\n'
|
|
|
|
|
f'self._infected_aio = {aio_attr}\n'
|
|
|
|
|
)
|
|
|
|
|
if aio_rtv:
|
2025-05-13 21:39:53 +00:00
|
|
|
assert (
|
|
|
|
|
trio_runtime.GLOBAL_RUN_CONTEXT.runner.is_guest
|
|
|
|
|
# and
|
|
|
|
|
# ^TODO^ possibly add a `sniffio` or
|
|
|
|
|
# `trio` pub-API for `is_guest_mode()`?
|
|
|
|
|
)
|
2024-07-11 16:11:31 +00:00
|
|
|
|
|
|
|
|
rvs['_is_root'] = False # obvi XD
|
|
|
|
|
|
2026-02-11 01:33:19 +00:00
|
|
|
# TODO, remove! left in just while protoing init fix!
|
|
|
|
|
# global _bp
|
|
|
|
|
# if (
|
|
|
|
|
# 'chart' in self.aid.name
|
|
|
|
|
# and
|
|
|
|
|
# isinstance(
|
|
|
|
|
# rvs['_root_addrs'][0],
|
|
|
|
|
# dict,
|
|
|
|
|
# )
|
|
|
|
|
# and
|
|
|
|
|
# not _bp
|
|
|
|
|
# ):
|
|
|
|
|
# _bp = True
|
|
|
|
|
# breakpoint()
|
|
|
|
|
|
2020-09-12 15:48:20 +00:00
|
|
|
_state._runtime_vars.update(rvs)
|
|
|
|
|
|
2025-05-13 21:39:53 +00:00
|
|
|
# `SpawnSpec.reg_addrs`
|
|
|
|
|
# ---------------------
|
|
|
|
|
# => update parent provided registrar contact info
|
2024-04-02 17:41:52 +00:00
|
|
|
#
|
|
|
|
|
self.reg_addrs = [
|
|
|
|
|
# TODO: we don't really NEED these as tuples?
|
|
|
|
|
# so we can probably drop this casting since
|
|
|
|
|
# apparently in python lists are "more
|
|
|
|
|
# efficient"?
|
|
|
|
|
tuple(val)
|
|
|
|
|
for val in spawnspec.reg_addrs
|
|
|
|
|
]
|
|
|
|
|
|
2025-05-13 21:39:53 +00:00
|
|
|
# `SpawnSpec.enable_modules`
|
|
|
|
|
# ---------------------
|
|
|
|
|
# => extend RPC-python-module (capabilities) with
|
|
|
|
|
# those permitted by parent.
|
|
|
|
|
#
|
|
|
|
|
# NOTE, only the root actor should have
|
|
|
|
|
# a pre-permitted entry for `.devx.debug._tty_lock`.
|
|
|
|
|
assert not self.enable_modules
|
|
|
|
|
self.enable_modules.update(
|
|
|
|
|
spawnspec.enable_modules
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self._parent_main_data = spawnspec._parent_main_data
|
|
|
|
|
# XXX QUESTION(s)^^^
|
|
|
|
|
# -[ ] already set in `.__init__()` right, but how is
|
|
|
|
|
# it diff from this blatant parent copy?
|
|
|
|
|
# -[ ] do we need/want the .__init__() value in
|
|
|
|
|
# just the root case orr?
|
2024-04-02 17:41:52 +00:00
|
|
|
|
|
|
|
|
return (
|
|
|
|
|
chan,
|
|
|
|
|
accept_addrs,
|
2025-06-17 15:33:36 +00:00
|
|
|
_state._runtime_vars['_enable_tpts']
|
2024-04-02 17:41:52 +00:00
|
|
|
)
|
2020-08-08 18:55:41 +00:00
|
|
|
|
2025-04-03 16:22:21 +00:00
|
|
|
# failed to connect back?
|
|
|
|
|
except (
|
|
|
|
|
OSError,
|
|
|
|
|
ConnectionError,
|
|
|
|
|
):
|
2020-08-08 18:55:41 +00:00
|
|
|
log.warning(
|
2024-04-30 16:15:46 +00:00
|
|
|
f'Failed to connect to spawning parent actor!?\n'
|
2025-04-03 16:22:21 +00:00
|
|
|
f'\n'
|
2024-04-30 16:15:46 +00:00
|
|
|
f'x=> {parent_addr}\n'
|
2025-04-03 16:22:21 +00:00
|
|
|
f' |_{self}\n\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
)
|
2024-04-30 16:15:46 +00:00
|
|
|
await self.cancel(req_chan=None) # self cancel
|
2020-08-08 18:55:41 +00:00
|
|
|
raise
|
|
|
|
|
|
2020-11-16 04:54:42 +00:00
|
|
|
def cancel_soon(self) -> None:
|
2022-08-03 20:09:16 +00:00
|
|
|
'''
|
|
|
|
|
Cancel this actor asap; can be called from a sync context.
|
2020-11-16 04:54:42 +00:00
|
|
|
|
2024-03-13 19:57:15 +00:00
|
|
|
Schedules runtime cancellation via `Actor.cancel()` inside
|
|
|
|
|
the RPC service nursery.
|
2022-08-03 20:09:16 +00:00
|
|
|
|
|
|
|
|
'''
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
actor_repr: str = _pformat.nest_from_op(
|
|
|
|
|
input_op='>c(',
|
|
|
|
|
text=self.pformat(),
|
|
|
|
|
nest_indent=1,
|
|
|
|
|
)
|
|
|
|
|
log.cancel(
|
|
|
|
|
'Actor.cancel_soon()` was called!\n'
|
|
|
|
|
f'>> scheduling `Actor.cancel()`\n'
|
|
|
|
|
f'{actor_repr}'
|
|
|
|
|
)
|
|
|
|
|
assert self._service_tn
|
|
|
|
|
self._service_tn.start_soon(
|
2024-02-22 18:42:48 +00:00
|
|
|
self.cancel,
|
|
|
|
|
None, # self cancel all rpc tasks
|
|
|
|
|
)
|
2020-11-16 04:54:42 +00:00
|
|
|
|
2025-08-20 16:35:01 +00:00
|
|
|
# schedule a "canceller task" in the `._root_tn` once the
|
|
|
|
|
# `._service_tn` is fully shutdown; task waits for child-ish
|
|
|
|
|
# scopes to fully exit then finally cancels its parent,
|
|
|
|
|
# root-most, scope.
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
async def cancel_root_tn_after_services():
|
|
|
|
|
log.runtime(
|
|
|
|
|
'Waiting on service-tn to cancel..\n'
|
|
|
|
|
f'c>)\n'
|
|
|
|
|
f'|_{self._service_tn.cancel_scope!r}\n'
|
|
|
|
|
)
|
|
|
|
|
await self._cancel_complete.wait()
|
|
|
|
|
log.cancel(
|
|
|
|
|
f'`._service_tn` cancelled\n'
|
|
|
|
|
f'>c)\n'
|
|
|
|
|
f'|_{self._service_tn.cancel_scope!r}\n'
|
|
|
|
|
f'\n'
|
|
|
|
|
f'>> cancelling `._root_tn`\n'
|
|
|
|
|
f'c>(\n'
|
|
|
|
|
f' |_{self._root_tn.cancel_scope!r}\n'
|
|
|
|
|
)
|
|
|
|
|
self._root_tn.cancel_scope.cancel()
|
|
|
|
|
|
|
|
|
|
self._root_tn.start_soon(
|
|
|
|
|
cancel_root_tn_after_services
|
|
|
|
|
)
|
|
|
|
|
|
2025-06-23 21:33:54 +00:00
|
|
|
@property
|
|
|
|
|
def cancel_complete(self) -> bool:
|
|
|
|
|
return self._cancel_complete.is_set()
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def cancel_called(self) -> bool:
|
|
|
|
|
'''
|
|
|
|
|
Was this actor requested to cancel by a remote peer actor.
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
return self._cancel_called_by is not None
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def cancel_caller(self) -> msgtypes.Aid|None:
|
|
|
|
|
return self._cancel_called_by
|
|
|
|
|
|
2023-04-13 20:03:35 +00:00
|
|
|
async def cancel(
|
|
|
|
|
self,
|
2024-02-22 18:42:48 +00:00
|
|
|
|
|
|
|
|
# chan whose lifetime limits the lifetime of its remotely
|
|
|
|
|
# requested and locally spawned RPC tasks - similar to the
|
|
|
|
|
# supervision semantics of a nursery wherein the actual
|
2024-04-30 16:15:46 +00:00
|
|
|
# implementation does start all such tasks in a sub-nursery.
|
2024-02-22 18:42:48 +00:00
|
|
|
req_chan: Channel|None,
|
2023-04-13 20:03:35 +00:00
|
|
|
|
|
|
|
|
) -> bool:
|
2022-08-03 20:09:16 +00:00
|
|
|
'''
|
2024-02-22 18:42:48 +00:00
|
|
|
Cancel this actor's runtime, eventually resulting in
|
2024-03-13 19:57:15 +00:00
|
|
|
termination of its containing OS process.
|
2018-08-01 19:15:18 +00:00
|
|
|
|
2024-02-22 18:42:48 +00:00
|
|
|
The ideal "deterministic" teardown sequence in order is:
|
2024-03-13 19:57:15 +00:00
|
|
|
- cancel all ongoing rpc tasks by cancel scope.
|
2024-02-22 18:42:48 +00:00
|
|
|
- cancel the channel server to prevent new inbound
|
2024-03-13 19:57:15 +00:00
|
|
|
connections.
|
2024-02-22 18:42:48 +00:00
|
|
|
- cancel the "service" nursery reponsible for
|
2024-03-13 19:57:15 +00:00
|
|
|
spawning new rpc tasks.
|
|
|
|
|
- return control the parent channel message loop.
|
2022-08-03 20:09:16 +00:00
|
|
|
|
|
|
|
|
'''
|
2024-02-22 18:42:48 +00:00
|
|
|
(
|
2025-06-23 21:33:54 +00:00
|
|
|
requesting_aid, # Aid
|
|
|
|
|
requester_type, # str
|
2024-02-22 18:42:48 +00:00
|
|
|
req_chan,
|
2024-03-03 00:26:40 +00:00
|
|
|
log_meth,
|
2024-02-22 18:42:48 +00:00
|
|
|
) = (
|
2025-06-23 21:33:54 +00:00
|
|
|
req_chan.aid,
|
2024-02-22 18:42:48 +00:00
|
|
|
'peer',
|
|
|
|
|
req_chan,
|
2024-03-03 00:26:40 +00:00
|
|
|
log.cancel,
|
2024-02-22 18:42:48 +00:00
|
|
|
) if req_chan else (
|
|
|
|
|
# a self cancel of ALL rpc tasks
|
2025-06-23 21:33:54 +00:00
|
|
|
self.aid,
|
2024-02-22 18:42:48 +00:00
|
|
|
'self',
|
2024-03-03 00:26:40 +00:00
|
|
|
self,
|
|
|
|
|
log.runtime,
|
2024-02-22 18:42:48 +00:00
|
|
|
)
|
2024-03-03 00:26:40 +00:00
|
|
|
# TODO: just use the new `Context.repr_rpc: str` (and
|
|
|
|
|
# other) repr fields instead of doing this all manual..
|
2024-02-22 18:42:48 +00:00
|
|
|
msg: str = (
|
2025-06-23 21:33:54 +00:00
|
|
|
f'Actor-runtime cancel request from {requester_type!r}\n'
|
2025-02-26 18:16:15 +00:00
|
|
|
f'\n'
|
2025-06-23 21:33:54 +00:00
|
|
|
f'<=c)\n'
|
|
|
|
|
f'{self}'
|
2023-10-23 18:35:36 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# TODO: what happens here when we self-cancel tho?
|
2025-06-23 21:33:54 +00:00
|
|
|
self._cancel_called_by: tuple = requesting_aid
|
2020-10-13 15:48:52 +00:00
|
|
|
self._cancel_called = True
|
2020-09-12 15:48:20 +00:00
|
|
|
|
2018-08-01 19:15:18 +00:00
|
|
|
# cancel all ongoing rpc tasks
|
2024-01-02 14:08:39 +00:00
|
|
|
with CancelScope(shield=True):
|
2020-09-28 17:02:33 +00:00
|
|
|
|
|
|
|
|
# kill any debugger request task to avoid deadlock
|
|
|
|
|
# with the root actor in this tree
|
2025-05-13 16:13:12 +00:00
|
|
|
debug_req = debug.DebugStatus
|
2024-05-08 18:53:45 +00:00
|
|
|
lock_req_ctx: Context = debug_req.req_ctx
|
2024-05-22 19:10:39 +00:00
|
|
|
if (
|
|
|
|
|
lock_req_ctx
|
|
|
|
|
and
|
|
|
|
|
lock_req_ctx.has_outcome
|
|
|
|
|
):
|
2024-02-22 18:42:48 +00:00
|
|
|
msg += (
|
2025-02-26 18:16:15 +00:00
|
|
|
f'\n'
|
|
|
|
|
f'-> Cancelling active debugger request..\n'
|
2025-05-13 16:13:12 +00:00
|
|
|
f'|_{debug.Lock.repr()}\n\n'
|
2024-05-08 18:53:45 +00:00
|
|
|
f'|_{lock_req_ctx}\n\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
)
|
2024-05-08 18:53:45 +00:00
|
|
|
# lock_req_ctx._scope.cancel()
|
|
|
|
|
# TODO: wrap this in a method-API..
|
|
|
|
|
debug_req.req_cs.cancel()
|
2025-02-26 18:16:15 +00:00
|
|
|
# if lock_req_ctx:
|
2020-09-28 17:02:33 +00:00
|
|
|
|
2024-02-22 18:42:48 +00:00
|
|
|
# self-cancel **all** ongoing RPC tasks
|
2023-10-23 18:35:36 +00:00
|
|
|
await self.cancel_rpc_tasks(
|
2025-06-23 21:33:54 +00:00
|
|
|
req_aid=requesting_aid,
|
2024-02-22 18:42:48 +00:00
|
|
|
parent_chan=None,
|
2023-10-23 18:35:36 +00:00
|
|
|
)
|
2020-08-09 00:57:18 +00:00
|
|
|
|
|
|
|
|
# stop channel server
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
if ipc_server := self.ipc_server:
|
|
|
|
|
ipc_server.cancel()
|
|
|
|
|
await ipc_server.wait_for_shutdown()
|
2020-08-09 00:57:18 +00:00
|
|
|
|
2020-10-14 17:46:05 +00:00
|
|
|
# cancel all rpc tasks permanently
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
if self._service_tn:
|
|
|
|
|
self._service_tn.cancel_scope.cancel()
|
2020-10-14 17:46:05 +00:00
|
|
|
|
2024-03-03 00:26:40 +00:00
|
|
|
log_meth(msg)
|
2020-10-13 15:48:52 +00:00
|
|
|
self._cancel_complete.set()
|
2020-08-08 18:55:41 +00:00
|
|
|
return True
|
|
|
|
|
|
2023-04-13 20:03:35 +00:00
|
|
|
async def _cancel_task(
|
|
|
|
|
self,
|
|
|
|
|
cid: str,
|
2024-02-22 18:42:48 +00:00
|
|
|
parent_chan: Channel,
|
2025-06-23 21:33:54 +00:00
|
|
|
requesting_aid: msgtypes.Aid|None,
|
2024-03-07 23:24:00 +00:00
|
|
|
|
|
|
|
|
ipc_msg: dict|None|bool = False,
|
2023-10-23 18:35:36 +00:00
|
|
|
|
2023-04-13 20:03:35 +00:00
|
|
|
) -> bool:
|
2022-07-23 00:45:27 +00:00
|
|
|
'''
|
2024-03-13 19:57:15 +00:00
|
|
|
Cancel a local (RPC) task by context-id/channel by calling
|
|
|
|
|
`trio.CancelScope.cancel()` on it's surrounding cancel
|
|
|
|
|
scope.
|
2022-07-23 00:45:27 +00:00
|
|
|
|
|
|
|
|
'''
|
2024-03-07 23:24:00 +00:00
|
|
|
|
|
|
|
|
# this ctx based lookup ensures the requested task to be
|
|
|
|
|
# cancelled was indeed spawned by a request from its
|
|
|
|
|
# parent (or some grandparent's) channel
|
2024-02-22 18:42:48 +00:00
|
|
|
ctx: Context
|
|
|
|
|
func: Callable
|
|
|
|
|
is_complete: trio.Event
|
2019-02-16 02:59:42 +00:00
|
|
|
try:
|
2024-02-22 18:42:48 +00:00
|
|
|
(
|
|
|
|
|
ctx,
|
|
|
|
|
func,
|
|
|
|
|
is_complete,
|
|
|
|
|
) = self._rpc_tasks[(
|
|
|
|
|
parent_chan,
|
|
|
|
|
cid,
|
|
|
|
|
)]
|
2024-01-02 14:08:39 +00:00
|
|
|
scope: CancelScope = ctx._scope
|
2024-02-21 18:05:22 +00:00
|
|
|
|
2019-02-16 02:59:42 +00:00
|
|
|
except KeyError:
|
2024-02-21 18:05:22 +00:00
|
|
|
# NOTE: during msging race conditions this will often
|
|
|
|
|
# emit, some examples:
|
2024-04-18 16:47:28 +00:00
|
|
|
# - child returns a result before cancel-msg/ctxc-raised
|
|
|
|
|
# - child self raises ctxc before parent send request,
|
|
|
|
|
# - child errors prior to cancel req.
|
2024-05-28 20:03:36 +00:00
|
|
|
log.runtime(
|
|
|
|
|
'Cancel request for invalid RPC task.\n'
|
|
|
|
|
'The task likely already completed or was never started!\n\n'
|
2025-06-23 21:33:54 +00:00
|
|
|
f'<= canceller: {requesting_aid}\n'
|
2024-03-20 14:42:17 +00:00
|
|
|
f'=> {cid}@{parent_chan.uid}\n'
|
|
|
|
|
f' |_{parent_chan}\n'
|
2024-02-21 18:05:22 +00:00
|
|
|
)
|
2023-04-13 20:03:35 +00:00
|
|
|
return True
|
2019-02-16 02:59:42 +00:00
|
|
|
|
2021-10-05 23:25:28 +00:00
|
|
|
log.cancel(
|
2024-07-02 20:31:58 +00:00
|
|
|
'Rxed cancel request for RPC task\n'
|
2025-06-23 21:33:54 +00:00
|
|
|
f'{ctx._task!r} <=c) {requesting_aid}\n'
|
|
|
|
|
f'|_>> {ctx.repr_rpc}\n'
|
|
|
|
|
|
|
|
|
|
# f'|_{ctx._task}\n'
|
|
|
|
|
# f' >> {ctx.repr_rpc}\n'
|
|
|
|
|
|
2024-07-02 20:31:58 +00:00
|
|
|
# f'=> {ctx._task}\n'
|
2024-03-07 23:24:00 +00:00
|
|
|
# f' >> Actor._cancel_task() => {ctx._task}\n'
|
|
|
|
|
# f' |_ {ctx._task}\n\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
|
|
|
|
|
# TODO: better ascii repr for "supervisor" like
|
|
|
|
|
# a nursery or context scope?
|
2024-03-03 00:26:40 +00:00
|
|
|
# f'=> {parent_chan}\n'
|
2024-03-07 23:24:00 +00:00
|
|
|
# f' |_{ctx._task}\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
# TODO: simplified `Context.__repr__()` fields output
|
|
|
|
|
# shows only application state-related stuff like,
|
|
|
|
|
# - ._stream
|
|
|
|
|
# - .closed
|
|
|
|
|
# - .started_called
|
|
|
|
|
# - .. etc.
|
2024-03-07 23:24:00 +00:00
|
|
|
# f' >> {ctx.repr_rpc}\n'
|
2024-03-03 00:26:40 +00:00
|
|
|
# f' |_ctx: {cid}\n'
|
|
|
|
|
# f' >> {ctx._nsf}()\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
)
|
2023-04-13 20:03:35 +00:00
|
|
|
if (
|
2023-10-23 18:35:36 +00:00
|
|
|
ctx._canceller is None
|
2025-06-23 21:33:54 +00:00
|
|
|
and requesting_aid
|
2023-04-13 20:03:35 +00:00
|
|
|
):
|
2025-06-23 21:33:54 +00:00
|
|
|
ctx._canceller: tuple = requesting_aid.uid
|
2023-04-13 20:03:35 +00:00
|
|
|
|
2024-03-07 23:24:00 +00:00
|
|
|
# TODO: pack the RPC `{'cmd': <blah>}` msg into a ctxc and
|
|
|
|
|
# then raise and pack it here?
|
|
|
|
|
if (
|
|
|
|
|
ipc_msg
|
|
|
|
|
and ctx._cancel_msg is None
|
|
|
|
|
):
|
|
|
|
|
# assign RPC msg directly from the loop which usually
|
|
|
|
|
# the case with `ctx.cancel()` on the other side.
|
|
|
|
|
ctx._cancel_msg = ipc_msg
|
|
|
|
|
|
2019-01-24 01:01:29 +00:00
|
|
|
# don't allow cancelling this function mid-execution
|
|
|
|
|
# (is this necessary?)
|
2019-03-23 17:50:23 +00:00
|
|
|
if func is self._cancel_task:
|
2024-02-22 18:42:48 +00:00
|
|
|
log.error('Do not cancel a cancel!?')
|
2023-04-13 20:03:35 +00:00
|
|
|
return True
|
2019-01-21 05:16:20 +00:00
|
|
|
|
2023-04-13 20:03:35 +00:00
|
|
|
# TODO: shouldn't we eventually be calling ``Context.cancel()``
|
|
|
|
|
# directly here instead (since that method can handle both
|
|
|
|
|
# side's calls into it?
|
2023-10-23 18:35:36 +00:00
|
|
|
# await ctx.cancel()
|
2019-01-21 05:16:20 +00:00
|
|
|
scope.cancel()
|
2021-04-26 20:14:45 +00:00
|
|
|
|
2019-01-21 05:16:20 +00:00
|
|
|
# wait for _invoke to mark the task complete
|
2024-02-22 18:42:48 +00:00
|
|
|
flow_info: str = (
|
2025-06-23 21:33:54 +00:00
|
|
|
f'<= canceller: {requesting_aid}\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
f'=> ipc-parent: {parent_chan}\n'
|
2024-04-18 16:47:28 +00:00
|
|
|
f'|_{ctx}\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
)
|
2021-10-05 21:07:23 +00:00
|
|
|
log.runtime(
|
2024-04-18 16:47:28 +00:00
|
|
|
'Waiting on RPC task to cancel\n\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
f'{flow_info}'
|
2023-04-13 20:03:35 +00:00
|
|
|
)
|
2019-01-21 05:16:20 +00:00
|
|
|
await is_complete.wait()
|
2021-10-05 21:07:23 +00:00
|
|
|
log.runtime(
|
2024-04-18 16:47:28 +00:00
|
|
|
f'Sucessfully cancelled RPC task\n\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
f'{flow_info}'
|
|
|
|
|
)
|
2023-04-13 20:03:35 +00:00
|
|
|
return True
|
|
|
|
|
|
2020-10-04 21:58:41 +00:00
|
|
|
async def cancel_rpc_tasks(
|
|
|
|
|
self,
|
2025-06-23 21:33:54 +00:00
|
|
|
req_aid: msgtypes.Aid,
|
2024-02-22 18:42:48 +00:00
|
|
|
|
|
|
|
|
# NOTE: when None is passed we cancel **all** rpc
|
|
|
|
|
# tasks running in this actor!
|
|
|
|
|
parent_chan: Channel|None,
|
2023-04-13 20:03:35 +00:00
|
|
|
|
2020-10-04 21:58:41 +00:00
|
|
|
) -> None:
|
2022-08-03 20:09:16 +00:00
|
|
|
'''
|
2024-03-13 19:57:15 +00:00
|
|
|
Cancel all ongoing RPC tasks owned/spawned for a given
|
|
|
|
|
`parent_chan: Channel` or simply all tasks (inside
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
`._service_tn`) when `parent_chan=None`.
|
2022-08-03 20:09:16 +00:00
|
|
|
|
|
|
|
|
'''
|
2023-10-23 18:35:36 +00:00
|
|
|
tasks: dict = self._rpc_tasks
|
2024-02-22 18:42:48 +00:00
|
|
|
if not tasks:
|
2024-03-03 00:26:40 +00:00
|
|
|
log.runtime(
|
2024-02-22 18:42:48 +00:00
|
|
|
'Actor has no cancellable RPC tasks?\n'
|
2025-06-23 21:33:54 +00:00
|
|
|
f'<= canceller: {req_aid.reprol()}\n'
|
2024-02-22 18:42:48 +00:00
|
|
|
)
|
|
|
|
|
return
|
2024-02-19 17:25:08 +00:00
|
|
|
|
2024-02-22 18:42:48 +00:00
|
|
|
# TODO: seriously factor this into some helper funcs XD
|
|
|
|
|
tasks_str: str = ''
|
|
|
|
|
for (ctx, func, _) in tasks.values():
|
|
|
|
|
|
|
|
|
|
# TODO: std repr of all primitives in
|
|
|
|
|
# a hierarchical tree format, since we can!!
|
|
|
|
|
# like => repr for funcs/addrs/msg-typing:
|
|
|
|
|
#
|
|
|
|
|
# -[ ] use a proper utf8 "arm" like
|
|
|
|
|
# `stackscope` has!
|
|
|
|
|
# -[ ] for typed msging, show the
|
|
|
|
|
# py-type-annot style?
|
|
|
|
|
# - maybe auto-gen via `inspect` / `typing` type-sig:
|
|
|
|
|
# https://stackoverflow.com/a/57110117
|
|
|
|
|
# => see ex. code pasted into `.msg.types`
|
|
|
|
|
#
|
|
|
|
|
# -[ ] proper .maddr() for IPC primitives?
|
|
|
|
|
# - `Channel.maddr() -> str:` obvi!
|
|
|
|
|
# - `Context.maddr() -> str:`
|
|
|
|
|
tasks_str += (
|
|
|
|
|
f' |_@ /ipv4/tcp/cid="{ctx.cid[-16:]} .."\n'
|
|
|
|
|
f' |>> {ctx._nsf}() -> dict:\n'
|
2023-10-23 18:35:36 +00:00
|
|
|
)
|
2020-10-04 21:58:41 +00:00
|
|
|
|
2024-03-03 00:26:40 +00:00
|
|
|
descr: str = (
|
|
|
|
|
'all' if not parent_chan
|
|
|
|
|
else
|
|
|
|
|
"IPC channel's "
|
|
|
|
|
)
|
2024-03-07 23:24:00 +00:00
|
|
|
rent_chan_repr: str = (
|
2024-07-04 19:06:15 +00:00
|
|
|
f' |_{parent_chan}\n\n'
|
2024-03-07 23:24:00 +00:00
|
|
|
if parent_chan
|
|
|
|
|
else ''
|
|
|
|
|
)
|
2024-02-22 18:42:48 +00:00
|
|
|
log.cancel(
|
2024-05-08 18:53:45 +00:00
|
|
|
f'Cancelling {descr} RPC tasks\n\n'
|
2025-06-23 21:33:54 +00:00
|
|
|
f'<=c) {req_aid} [canceller]\n'
|
2024-05-08 18:53:45 +00:00
|
|
|
f'{rent_chan_repr}'
|
2024-07-04 19:06:15 +00:00
|
|
|
f'c)=> {self.uid} [cancellee]\n'
|
|
|
|
|
f' |_{self} [with {len(tasks)} tasks]\n'
|
|
|
|
|
# f' |_tasks: {len(tasks)}\n'
|
2024-03-03 00:26:40 +00:00
|
|
|
# f'{tasks_str}'
|
2024-02-22 18:42:48 +00:00
|
|
|
)
|
|
|
|
|
for (
|
|
|
|
|
(task_caller_chan, cid),
|
|
|
|
|
(ctx, func, is_complete),
|
|
|
|
|
) in tasks.copy().items():
|
2020-10-04 21:58:41 +00:00
|
|
|
|
2024-02-22 18:42:48 +00:00
|
|
|
if (
|
|
|
|
|
# maybe filter to specific IPC channel?
|
|
|
|
|
(parent_chan
|
|
|
|
|
and
|
|
|
|
|
task_caller_chan != parent_chan)
|
|
|
|
|
|
|
|
|
|
# never "cancel-a-cancel" XD
|
|
|
|
|
or (func == self._cancel_task)
|
|
|
|
|
):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# TODO: this maybe block on the task cancellation
|
|
|
|
|
# and so should really done in a nursery batch?
|
|
|
|
|
await self._cancel_task(
|
|
|
|
|
cid,
|
|
|
|
|
task_caller_chan,
|
2025-06-23 21:33:54 +00:00
|
|
|
requesting_aid=req_aid,
|
2023-10-23 18:35:36 +00:00
|
|
|
)
|
2024-02-22 18:42:48 +00:00
|
|
|
|
2024-03-03 00:26:40 +00:00
|
|
|
if tasks:
|
|
|
|
|
log.cancel(
|
|
|
|
|
'Waiting for remaining rpc tasks to complete\n'
|
2024-05-08 18:53:45 +00:00
|
|
|
f'|_{tasks_str}'
|
2024-03-03 00:26:40 +00:00
|
|
|
)
|
2024-02-22 18:42:48 +00:00
|
|
|
await self._ongoing_rpc_tasks.wait()
|
2018-08-01 19:15:18 +00:00
|
|
|
|
2023-09-27 19:19:30 +00:00
|
|
|
@property
|
2025-03-31 01:36:45 +00:00
|
|
|
def accept_addrs(self) -> list[UnwrappedAddress]:
|
2023-09-27 19:19:30 +00:00
|
|
|
'''
|
|
|
|
|
All addresses to which the transport-channel server binds
|
|
|
|
|
and listens for new connections.
|
|
|
|
|
|
|
|
|
|
'''
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
return self._ipc_server.accept_addrs
|
2023-09-27 19:19:30 +00:00
|
|
|
|
2024-03-13 19:57:15 +00:00
|
|
|
@property
|
2025-03-31 01:36:45 +00:00
|
|
|
def accept_addr(self) -> UnwrappedAddress:
|
2022-08-03 20:09:16 +00:00
|
|
|
'''
|
2024-03-13 19:57:15 +00:00
|
|
|
Primary address to which the IPC transport server is
|
2024-07-04 23:40:11 +00:00
|
|
|
bound and listening for new connections.
|
2022-08-03 20:09:16 +00:00
|
|
|
|
|
|
|
|
'''
|
2023-09-27 19:19:30 +00:00
|
|
|
return self.accept_addrs[0]
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2025-07-08 16:44:46 +00:00
|
|
|
# TODO, this should delegate ONLY to the
|
|
|
|
|
# `._spawn_spec._runtime_vars: dict` / `._state` APIs?
|
|
|
|
|
#
|
|
|
|
|
# XXX, AH RIGHT that's why..
|
|
|
|
|
# it's bc we pass this as a CLI flag to the child.py precisely
|
|
|
|
|
# bc we need the bootstrapping pre `async_main()`.. but maybe
|
|
|
|
|
# keep this as an impl deat and not part of the pub iface impl?
|
2020-06-29 02:44:16 +00:00
|
|
|
def is_infected_aio(self) -> bool:
|
2024-03-13 19:57:15 +00:00
|
|
|
'''
|
|
|
|
|
If `True`, this actor is running `trio` in guest mode on
|
|
|
|
|
the `asyncio` event loop and thus can use the APIs in
|
|
|
|
|
`.to_asyncio` to coordinate tasks running in each
|
|
|
|
|
framework but within the same actor runtime.
|
|
|
|
|
|
|
|
|
|
'''
|
2020-06-29 02:44:16 +00:00
|
|
|
return self._infected_aio
|
|
|
|
|
|
2025-07-08 16:44:46 +00:00
|
|
|
# ?TODO, is this the right type for this method?
|
|
|
|
|
def get_parent(self) -> Portal:
|
|
|
|
|
'''
|
|
|
|
|
Return a `Portal` to our parent.
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
assert self._parent_chan, "No parent channel for this actor?"
|
|
|
|
|
return Portal(self._parent_chan)
|
|
|
|
|
|
|
|
|
|
# XXX: hard kill logic if needed?
|
|
|
|
|
# def _hard_mofo_kill(self):
|
|
|
|
|
# # If we're the root actor or zombied kill everything
|
|
|
|
|
# if self._parent_chan is None: # TODO: more robust check
|
|
|
|
|
# root = trio.lowlevel.current_root_task()
|
|
|
|
|
# for n in root.child_nurseries:
|
|
|
|
|
# n.cancel_scope.cancel()
|
|
|
|
|
|
2020-07-20 20:06:50 +00:00
|
|
|
|
2022-08-03 19:29:34 +00:00
|
|
|
async def async_main(
|
|
|
|
|
actor: Actor,
|
2025-03-31 01:36:45 +00:00
|
|
|
accept_addrs: UnwrappedAddress|None = None,
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
# XXX: currently ``parent_addr`` is only needed for the
|
|
|
|
|
# ``multiprocessing`` backend (which pickles state sent to
|
|
|
|
|
# the child instead of relaying it over the connect-back
|
|
|
|
|
# channel). Once that backend is removed we can likely just
|
|
|
|
|
# change this to a simple ``is_subactor: bool`` which will
|
|
|
|
|
# be False when running as root actor and True when as
|
|
|
|
|
# a subactor.
|
2025-03-31 01:36:45 +00:00
|
|
|
parent_addr: UnwrappedAddress|None = None,
|
2026-02-11 01:33:19 +00:00
|
|
|
task_status: TaskStatus[
|
|
|
|
|
tuple[
|
|
|
|
|
list[UnwrappedAddress], # accept_addrs
|
|
|
|
|
list[UnwrappedAddress], # reg_addrs
|
|
|
|
|
]
|
|
|
|
|
] = trio.TASK_STATUS_IGNORED,
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
) -> None:
|
|
|
|
|
'''
|
2024-03-13 19:57:15 +00:00
|
|
|
Main `Actor` runtime entrypoint; start the transport-specific
|
|
|
|
|
IPC channel server, (maybe) connect back to parent (to receive
|
|
|
|
|
additional config), startup all core `trio` machinery for
|
|
|
|
|
delivering RPCs, register with the discovery system.
|
2022-08-03 19:29:34 +00:00
|
|
|
|
2024-03-13 19:57:15 +00:00
|
|
|
The "root" (or "top-level") and "service" `trio.Nursery`s are
|
|
|
|
|
opened here and when cancelled/terminated effectively shutdown
|
|
|
|
|
the actor's "runtime" and all thus all ongoing RPC tasks.
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
'''
|
2025-06-11 20:44:47 +00:00
|
|
|
# XXX NOTE, `_state._current_actor` **must** be set prior to
|
|
|
|
|
# calling this core runtime entrypoint!
|
|
|
|
|
assert actor is _state.current_actor()
|
|
|
|
|
|
2025-04-02 02:08:56 +00:00
|
|
|
actor._task: trio.Task = trio.lowlevel.current_task()
|
|
|
|
|
|
2022-10-13 17:12:17 +00:00
|
|
|
# attempt to retreive ``trio``'s sigint handler and stash it
|
2024-04-18 16:47:28 +00:00
|
|
|
# on our debugger state.
|
2025-05-13 16:13:12 +00:00
|
|
|
debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT)
|
2022-10-13 17:12:17 +00:00
|
|
|
|
2023-09-27 19:19:30 +00:00
|
|
|
is_registered: bool = False
|
2022-08-03 19:29:34 +00:00
|
|
|
try:
|
|
|
|
|
|
|
|
|
|
# establish primary connection with immediate parent
|
2024-03-25 18:15:36 +00:00
|
|
|
actor._parent_chan: Channel|None = None
|
2022-08-03 19:29:34 +00:00
|
|
|
|
2025-07-07 14:37:02 +00:00
|
|
|
# is this a sub-actor?
|
|
|
|
|
# get runtime info from parent.
|
2025-04-02 02:08:56 +00:00
|
|
|
if parent_addr is not None:
|
2023-09-27 19:19:30 +00:00
|
|
|
(
|
|
|
|
|
actor._parent_chan,
|
|
|
|
|
set_accept_addr_says_rent,
|
2025-03-31 01:36:45 +00:00
|
|
|
maybe_preferred_transports_says_rent,
|
2023-09-27 19:19:30 +00:00
|
|
|
) = await actor._from_parent(parent_addr)
|
2022-08-03 19:29:34 +00:00
|
|
|
|
2025-04-02 02:08:56 +00:00
|
|
|
accept_addrs: list[UnwrappedAddress] = []
|
2023-09-27 19:19:30 +00:00
|
|
|
# either it's passed in because we're not a child or
|
|
|
|
|
# because we're running in mp mode
|
|
|
|
|
if (
|
|
|
|
|
set_accept_addr_says_rent
|
2024-04-02 17:41:52 +00:00
|
|
|
and
|
|
|
|
|
set_accept_addr_says_rent is not None
|
2023-09-27 19:19:30 +00:00
|
|
|
):
|
|
|
|
|
accept_addrs = set_accept_addr_says_rent
|
2025-03-31 01:36:45 +00:00
|
|
|
else:
|
|
|
|
|
enable_transports: list[str] = (
|
|
|
|
|
maybe_preferred_transports_says_rent
|
|
|
|
|
or
|
2025-04-04 00:12:30 +00:00
|
|
|
[_state._def_tpt_proto]
|
2025-03-31 01:36:45 +00:00
|
|
|
)
|
|
|
|
|
for transport_key in enable_transports:
|
|
|
|
|
transport_cls: Type[Address] = get_address_cls(
|
|
|
|
|
transport_key
|
|
|
|
|
)
|
|
|
|
|
addr: Address = transport_cls.get_random()
|
|
|
|
|
accept_addrs.append(addr.unwrap())
|
2022-08-03 19:29:34 +00:00
|
|
|
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
assert accept_addrs
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
|
|
|
|
|
ya_root_tn: bool = bool(actor._root_tn)
|
|
|
|
|
ya_service_tn: bool = bool(actor._service_tn)
|
|
|
|
|
|
2025-08-20 16:35:01 +00:00
|
|
|
# NOTE, a top-most "root" nursery in each actor-process
|
|
|
|
|
# enables a lifetime priority for the IPC-channel connection
|
|
|
|
|
# with a sub-actor's immediate parent. I.e. this connection
|
|
|
|
|
# is kept alive as a resilient service connection until all
|
|
|
|
|
# other machinery has exited, cancellation of all
|
|
|
|
|
# embedded/child scopes have completed. This helps ensure
|
|
|
|
|
# a deterministic (and thus "graceful")
|
|
|
|
|
# first-class-supervision style teardown where a parent actor
|
|
|
|
|
# (vs. say peers) is always the last to be contacted before
|
|
|
|
|
# disconnect.
|
2025-06-16 15:58:59 +00:00
|
|
|
root_tn: trio.Nursery
|
|
|
|
|
async with (
|
|
|
|
|
collapse_eg(),
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
maybe_open_nursery(
|
|
|
|
|
nursery=actor._root_tn,
|
|
|
|
|
) as root_tn,
|
2025-06-16 15:58:59 +00:00
|
|
|
):
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
if ya_root_tn:
|
|
|
|
|
assert root_tn is actor._root_tn
|
|
|
|
|
else:
|
|
|
|
|
actor._root_tn = root_tn
|
2022-08-03 19:29:34 +00:00
|
|
|
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
ipc_server: _server.IPCServer
|
|
|
|
|
async with (
|
2025-07-07 14:37:02 +00:00
|
|
|
collapse_eg(),
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
maybe_open_nursery(
|
|
|
|
|
nursery=actor._service_tn,
|
|
|
|
|
) as service_tn,
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
_server.open_ipc_server(
|
2025-08-20 15:35:31 +00:00
|
|
|
parent_tn=service_tn, # ?TODO, why can't this be the root-tn
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
stream_handler_tn=service_tn,
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
) as ipc_server,
|
|
|
|
|
|
|
|
|
|
):
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
if ya_service_tn:
|
|
|
|
|
assert service_tn is actor._service_tn
|
|
|
|
|
else:
|
|
|
|
|
# This nursery is used to handle all inbound
|
|
|
|
|
# connections to us such that if the TCP server
|
|
|
|
|
# is killed, connections can continue to process
|
|
|
|
|
# in the background until this nursery is cancelled.
|
|
|
|
|
actor._service_tn = service_tn
|
|
|
|
|
|
2025-08-20 15:35:31 +00:00
|
|
|
# set after allocate
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
actor._ipc_server = ipc_server
|
2022-08-03 19:29:34 +00:00
|
|
|
|
2024-03-13 13:55:47 +00:00
|
|
|
# load exposed/allowed RPC modules
|
|
|
|
|
# XXX: do this **after** establishing a channel to the parent
|
|
|
|
|
# but **before** starting the message loop for that channel
|
|
|
|
|
# such that import errors are properly propagated upwards
|
|
|
|
|
actor.load_modules()
|
|
|
|
|
|
|
|
|
|
# XXX TODO XXX: figuring out debugging of this
|
|
|
|
|
# would somemwhat guarantee "self-hosted" runtime
|
|
|
|
|
# debugging (since it hits all the ede cases?)
|
|
|
|
|
#
|
|
|
|
|
# `tractor.pause()` right?
|
|
|
|
|
# try:
|
|
|
|
|
# actor.load_modules()
|
|
|
|
|
# except ModuleNotFoundError as err:
|
2025-05-13 16:13:12 +00:00
|
|
|
# debug.pause_from_sync()
|
2024-03-13 13:55:47 +00:00
|
|
|
# import pdbp; pdbp.set_trace()
|
|
|
|
|
# raise
|
|
|
|
|
|
|
|
|
|
# Startup up the transport(-channel) server with,
|
2022-08-03 19:29:34 +00:00
|
|
|
# - subactor: the bind address is sent by our parent
|
|
|
|
|
# over our established channel
|
|
|
|
|
# - root actor: the ``accept_addr`` passed to this method
|
|
|
|
|
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
# TODO: why is this not with the root nursery?
|
2025-08-20 15:35:31 +00:00
|
|
|
# - see above that the `._service_tn` is what's used?
|
2023-10-03 14:54:46 +00:00
|
|
|
try:
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
eps: list = await ipc_server.listen_on(
|
|
|
|
|
accept_addrs=accept_addrs,
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
stream_handler_nursery=service_tn,
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
)
|
|
|
|
|
log.runtime(
|
|
|
|
|
f'Booted IPC server\n'
|
|
|
|
|
f'{ipc_server}\n'
|
|
|
|
|
)
|
|
|
|
|
assert (
|
|
|
|
|
(eps[0].listen_tn)
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
is not service_tn
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
)
|
|
|
|
|
|
2023-10-03 14:54:46 +00:00
|
|
|
except OSError as oserr:
|
|
|
|
|
# NOTE: always allow runtime hackers to debug
|
|
|
|
|
# tranport address bind errors - normally it's
|
|
|
|
|
# something silly like the wrong socket-address
|
|
|
|
|
# passed via a config or CLI Bo
|
2025-05-13 16:13:12 +00:00
|
|
|
entered_debug: bool = await debug._maybe_enter_pm(
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
oserr,
|
|
|
|
|
)
|
2024-01-02 14:08:39 +00:00
|
|
|
if not entered_debug:
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
log.exception('Failed to init IPC server !?\n')
|
2024-05-08 18:53:45 +00:00
|
|
|
else:
|
|
|
|
|
log.runtime('Exited debug REPL..')
|
|
|
|
|
|
2023-10-03 14:54:46 +00:00
|
|
|
raise
|
|
|
|
|
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
# TODO, just read direct from ipc_server?
|
2025-03-31 01:36:45 +00:00
|
|
|
accept_addrs: list[UnwrappedAddress] = actor.accept_addrs
|
2023-09-27 19:19:30 +00:00
|
|
|
|
2022-08-03 19:29:34 +00:00
|
|
|
# Register with the arbiter if we're told its addr
|
2023-09-27 19:19:30 +00:00
|
|
|
log.runtime(
|
2024-06-28 18:25:53 +00:00
|
|
|
f'Registering `{actor.name}` => {pformat(accept_addrs)}\n'
|
|
|
|
|
# ^-TODO-^ we should instead show the maddr here^^
|
2023-09-27 19:19:30 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# TODO: ideally we don't fan out to all registrars
|
|
|
|
|
# if addresses point to the same actor..
|
|
|
|
|
# So we need a way to detect that? maybe iterate
|
|
|
|
|
# only on unique actor uids?
|
2026-02-11 01:33:19 +00:00
|
|
|
addr: UnwrappedAddress
|
2023-10-19 16:05:44 +00:00
|
|
|
for addr in actor.reg_addrs:
|
|
|
|
|
try:
|
2025-03-23 03:14:04 +00:00
|
|
|
waddr = wrap_address(addr)
|
|
|
|
|
assert waddr.is_valid
|
2023-10-19 16:05:44 +00:00
|
|
|
except AssertionError:
|
2025-05-13 16:13:12 +00:00
|
|
|
await debug.pause()
|
2023-09-27 19:19:30 +00:00
|
|
|
|
2025-07-07 14:37:02 +00:00
|
|
|
# !TODO, get rid of the local-portal crap XD
|
2026-02-11 01:33:19 +00:00
|
|
|
reg_portal: Portal
|
2025-03-23 03:14:04 +00:00
|
|
|
async with get_registry(addr) as reg_portal:
|
2026-02-11 01:33:19 +00:00
|
|
|
accept_addr: UnwrappedAddress
|
2023-09-27 19:19:30 +00:00
|
|
|
for accept_addr in accept_addrs:
|
2025-03-23 03:14:04 +00:00
|
|
|
accept_addr = wrap_address(accept_addr)
|
Factor actor-embedded IPC-tpt-server to `ipc` subsys
Primarily moving the `Actor._serve_forever()`-task-as-method and
supporting actor-instance attributes to a new `.ipo._server` sub-mod
which now encapsulates,
- the coupling various `trio.Nursery`s (and their independent lifetime mgmt)
to different `trio.serve_listener()`s tasks and `SocketStream`
handler scopes.
- `Address` and `SocketListener` mgmt and tracking through the idea of
an "IPC endpoint": each "bound-and-active instance" of a served-listener
for some (varied transport protocol's socket) address.
- start and shutdown of the entire server's lifetime via an `@acm`.
- delegation of starting/stopping tpt-protocol-specific `trio.abc.Listener`s
to the corresponding `.ipc._<proto_key>` sub-module (newly defined
mod-top-level instead of `Address` method) `start/close_listener()`
funcs.
Impl details of the `.ipc._server` sub-sys,
- add new `IPCServer`, allocated with `open_ipc_server()`, and which
encapsulates starting multiple-transport-proto-`trio.abc.Listener`s
from an input set of `._addr.Address`s using,
|_`IPCServer.listen_on()` which internally spawns tasks that delegate to a new
`_serve_ipc_eps()`, a rework of what was (effectively)
`Actor._serve_forever()` and which now,
* allocates a new `IPCEndpoint`-struct (see below) for each
address-listener pair alongside the specified
listener-serving/stream-handling `trio.Nursery`s provided by the
caller.
* starts and stops each transport (socket's) listener by calling
`IPCEndpoint.start/close_listener()` which in turn delegates to
the underlying `inspect.getmodule(IPCEndpoint.addr)` backend tpt
module's equivalent impl.
* tracks all created endpoints in a `._endpoints: list[IPCEndpoint]`
which is further exposed through public properties for
introspection of served transport-protocols and their addresses.
|_`IPCServer._[parent/stream_handler]_tn: Nursery`s which are either
allocated (in which case, as the same instance) or provided by the
caller of `open_ipc_server()` such that the same nursery-cancel-scope
controls offered by `trio.serve_listeners(handler_nursery=)` are
offered where the `._parent_tn` is used to spawn `_serve_ipc_eps()`
tasks, and `._stream_handler_tn` is passed verbatim as `handler_nursery`.
- a new `IPCEndpoint`-struct (as mentioned) which wraps each
transport-proto's address + listener + allocated-supervising-nursery
to encapsulate the "lifetime of a server IPC endpoint" such that
eventually we can track and managed per-protocol/address/`.listen_on()`-call
scoped starts/stops/restarts for the purposes of filtering/banning
peer traffic.
|_ also included is an unused `.peer_tpts` table which we can
hopefully use to replace `Actor._peers` in a `Channel`-tracking
transport-proto-aware way!
Surrounding changes to `.ipc.*` primitives to match,
- make `[TCP|UDS]Address` types `msgspec.Struct(frozen=True)` and thus
drop any-and-all `addr._host =` style mutation throughout.
|_ as such also drop their `.__init__()` and `.__eq__()` meths.
|_ UDS tweaks to field names and thus `.__repr__()`.
- move `[TCP|UDS]Address.[start/close]_listener()` meths to be mod-level
equiv `start|close_listener()` funcs.
- just hard code the `.ipc._types._key_to_transport/._addr_to_transport`
table entries instead of all the prior fancy dynamic class property
reading stuff (remember, "explicit is better then implicit").
Modified in `._runtime.Actor` internals,
- drop the `._serve_forever()` and `.cancel_server()`, methods and
`._server_down` waiting logic from `.cancel_soon()`
- add `.[_]ipc_server` which is opened just after the `._service_n` and
delegate to it for any equivalent publicly exposed instance
attributes/properties.
2025-04-10 22:06:12 +00:00
|
|
|
|
|
|
|
|
if not accept_addr.is_valid:
|
|
|
|
|
breakpoint()
|
2022-08-03 19:29:34 +00:00
|
|
|
|
2023-09-27 19:19:30 +00:00
|
|
|
await reg_portal.run_from_ns(
|
|
|
|
|
'self',
|
|
|
|
|
'register_actor',
|
|
|
|
|
uid=actor.uid,
|
2025-03-23 03:14:04 +00:00
|
|
|
addr=accept_addr.unwrap(),
|
2023-09-27 19:19:30 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
is_registered: bool = True
|
2022-08-03 19:29:34 +00:00
|
|
|
|
2026-02-11 01:33:19 +00:00
|
|
|
# init steps complete, deliver IPC-server and
|
|
|
|
|
# registrar addrs back to caller.
|
|
|
|
|
task_status.started((
|
|
|
|
|
accept_addrs,
|
|
|
|
|
actor.reg_addrs,
|
|
|
|
|
))
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
# Begin handling our new connection back to our
|
|
|
|
|
# parent. This is done last since we don't want to
|
|
|
|
|
# start processing parent requests until our channel
|
|
|
|
|
# server is 100% up and running.
|
|
|
|
|
if actor._parent_chan:
|
2025-06-16 15:58:59 +00:00
|
|
|
await root_tn.start(
|
2022-08-03 19:29:34 +00:00
|
|
|
partial(
|
2025-04-11 18:30:21 +00:00
|
|
|
_rpc.process_messages,
|
2025-06-11 20:44:47 +00:00
|
|
|
chan=actor._parent_chan,
|
2022-08-03 19:29:34 +00:00
|
|
|
shield=True,
|
|
|
|
|
)
|
|
|
|
|
)
|
2024-03-03 00:26:40 +00:00
|
|
|
log.runtime(
|
|
|
|
|
'Actor runtime is up!'
|
|
|
|
|
# 'Blocking on service nursery to exit..\n'
|
|
|
|
|
)
|
2023-04-07 20:07:26 +00:00
|
|
|
log.runtime(
|
2025-06-23 21:33:54 +00:00
|
|
|
'Service nursery complete\n'
|
|
|
|
|
'\n'
|
2025-07-07 14:37:02 +00:00
|
|
|
'->} waiting on root nursery to complete..\n'
|
2023-04-07 20:07:26 +00:00
|
|
|
)
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
# Blocks here as expected until the root nursery is
|
|
|
|
|
# killed (i.e. this actor is cancelled or signalled by the parent)
|
2024-06-28 18:25:53 +00:00
|
|
|
except Exception as internal_err:
|
2023-09-27 19:19:30 +00:00
|
|
|
if not is_registered:
|
2024-06-28 18:25:53 +00:00
|
|
|
err_report: str = (
|
|
|
|
|
'\n'
|
|
|
|
|
"Actor runtime (internally) failed BEFORE contacting the registry?\n"
|
|
|
|
|
f'registrars -> {actor.reg_addrs} ?!?!\n\n'
|
|
|
|
|
|
|
|
|
|
'^^^ THIS IS PROBABLY AN INTERNAL `tractor` BUG! ^^^\n\n'
|
|
|
|
|
'\t>> CALMLY CANCEL YOUR CHILDREN AND CALL YOUR PARENTS <<\n\n'
|
|
|
|
|
|
|
|
|
|
'\tIf this is a sub-actor hopefully its parent will keep running '
|
|
|
|
|
'and cancel/reap this sub-process..\n'
|
|
|
|
|
'(well, presuming this error was propagated upward)\n\n'
|
|
|
|
|
|
|
|
|
|
'\t---------------------------------------------\n'
|
|
|
|
|
'\tPLEASE REPORT THIS TRACEBACK IN A BUG REPORT @ ' # oneline
|
|
|
|
|
'https://github.com/goodboy/tractor/issues\n'
|
|
|
|
|
'\t---------------------------------------------\n'
|
|
|
|
|
)
|
|
|
|
|
|
2022-08-03 19:29:34 +00:00
|
|
|
# TODO: I guess we could try to connect back
|
|
|
|
|
# to the parent through a channel and engage a debugger
|
|
|
|
|
# once we have that all working with std streams locking?
|
2024-06-28 18:25:53 +00:00
|
|
|
log.exception(err_report)
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
if actor._parent_chan:
|
2025-04-11 18:30:21 +00:00
|
|
|
await _rpc.try_ship_error_to_remote(
|
2024-02-19 17:25:08 +00:00
|
|
|
actor._parent_chan,
|
2024-06-28 18:25:53 +00:00
|
|
|
internal_err,
|
2024-02-19 17:25:08 +00:00
|
|
|
)
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
# always!
|
2024-06-28 18:25:53 +00:00
|
|
|
match internal_err:
|
2023-04-07 20:07:26 +00:00
|
|
|
case ContextCancelled():
|
|
|
|
|
log.cancel(
|
|
|
|
|
f'Actor: {actor.uid} was task-context-cancelled with,\n'
|
2024-06-28 18:25:53 +00:00
|
|
|
f'str(internal_err)'
|
2023-04-07 20:07:26 +00:00
|
|
|
)
|
|
|
|
|
case _:
|
2024-06-28 18:25:53 +00:00
|
|
|
log.exception(
|
|
|
|
|
'Main actor-runtime task errored\n'
|
|
|
|
|
f'<x)\n'
|
|
|
|
|
f' |_{actor}\n'
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
raise internal_err
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
finally:
|
2024-06-28 18:25:53 +00:00
|
|
|
teardown_report: str = (
|
|
|
|
|
'Main actor-runtime task completed\n'
|
2025-07-07 14:37:02 +00:00
|
|
|
'\n'
|
2024-03-03 00:26:40 +00:00
|
|
|
)
|
2024-06-28 18:25:53 +00:00
|
|
|
|
|
|
|
|
# ?TODO? should this be in `._entry`/`._root` mods instead?
|
|
|
|
|
#
|
|
|
|
|
# teardown any actor-lifetime-bound contexts
|
|
|
|
|
ls: ExitStack = actor.lifetime_stack
|
|
|
|
|
# only report if there are any registered
|
|
|
|
|
cbs: list[Callable] = [
|
|
|
|
|
repr(tup[1].__wrapped__)
|
|
|
|
|
for tup in ls._exit_callbacks
|
|
|
|
|
]
|
|
|
|
|
if cbs:
|
|
|
|
|
cbs_str: str = '\n'.join(cbs)
|
|
|
|
|
teardown_report += (
|
|
|
|
|
'-> Closing actor-lifetime-bound callbacks\n\n'
|
|
|
|
|
f'}}>\n'
|
|
|
|
|
f' |_{ls}\n'
|
|
|
|
|
f' |_{cbs_str}\n'
|
|
|
|
|
)
|
|
|
|
|
# XXX NOTE XXX this will cause an error which
|
|
|
|
|
# prevents any `infected_aio` actor from continuing
|
|
|
|
|
# and any callbacks in the `ls` here WILL NOT be
|
|
|
|
|
# called!!
|
2025-05-13 16:13:12 +00:00
|
|
|
# await debug.pause(shield=True)
|
2024-06-28 18:25:53 +00:00
|
|
|
|
|
|
|
|
ls.close()
|
|
|
|
|
|
|
|
|
|
# XXX TODO but hard XXX
|
|
|
|
|
# we can't actually do this bc the debugger uses the
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
# _service_tn to spawn the lock task, BUT, in theory if we had
|
2024-06-28 18:25:53 +00:00
|
|
|
# the root nursery surround this finally block it might be
|
|
|
|
|
# actually possible to debug THIS machinery in the same way
|
|
|
|
|
# as user task code?
|
|
|
|
|
#
|
2022-08-03 19:29:34 +00:00
|
|
|
# if actor.name == 'brokerd.ib':
|
2024-01-02 14:08:39 +00:00
|
|
|
# with CancelScope(shield=True):
|
2025-05-13 16:13:12 +00:00
|
|
|
# await debug.breakpoint()
|
2022-08-03 19:29:34 +00:00
|
|
|
|
2024-03-03 00:26:40 +00:00
|
|
|
# Unregister actor from the registry-sys / registrar.
|
2022-11-10 00:22:33 +00:00
|
|
|
if (
|
2023-09-27 19:19:30 +00:00
|
|
|
is_registered
|
2025-07-07 14:37:02 +00:00
|
|
|
and
|
|
|
|
|
not actor.is_registrar
|
2022-08-03 19:29:34 +00:00
|
|
|
):
|
2023-09-27 19:19:30 +00:00
|
|
|
failed: bool = False
|
2023-10-19 16:05:44 +00:00
|
|
|
for addr in actor.reg_addrs:
|
2025-03-23 03:14:04 +00:00
|
|
|
waddr = wrap_address(addr)
|
|
|
|
|
assert waddr.is_valid
|
2023-09-27 19:19:30 +00:00
|
|
|
with trio.move_on_after(0.5) as cs:
|
|
|
|
|
cs.shield = True
|
|
|
|
|
try:
|
|
|
|
|
async with get_registry(
|
2025-03-23 03:14:04 +00:00
|
|
|
addr,
|
2023-09-27 19:19:30 +00:00
|
|
|
) as reg_portal:
|
|
|
|
|
await reg_portal.run_from_ns(
|
|
|
|
|
'self',
|
|
|
|
|
'unregister_actor',
|
|
|
|
|
uid=actor.uid
|
|
|
|
|
)
|
|
|
|
|
except OSError:
|
|
|
|
|
failed = True
|
|
|
|
|
if cs.cancelled_caught:
|
2022-08-03 19:29:34 +00:00
|
|
|
failed = True
|
2023-09-27 19:19:30 +00:00
|
|
|
|
|
|
|
|
if failed:
|
2024-06-28 18:25:53 +00:00
|
|
|
teardown_report += (
|
|
|
|
|
f'-> Failed to unregister {actor.name} from '
|
|
|
|
|
f'registar @ {addr}\n'
|
2023-09-27 19:19:30 +00:00
|
|
|
)
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
# Ensure all peers (actors connected to us as clients) are finished
|
2025-04-11 20:55:03 +00:00
|
|
|
if (
|
|
|
|
|
(ipc_server := actor.ipc_server)
|
|
|
|
|
and
|
|
|
|
|
ipc_server.has_peers(check_chans=True)
|
|
|
|
|
):
|
|
|
|
|
teardown_report += (
|
2025-07-07 14:37:02 +00:00
|
|
|
f'-> Waiting for remaining peers to clear..\n'
|
|
|
|
|
f' {pformat(ipc_server._peers)}'
|
2025-04-11 20:55:03 +00:00
|
|
|
)
|
|
|
|
|
log.runtime(teardown_report)
|
2025-07-15 21:28:48 +00:00
|
|
|
await ipc_server.wait_for_no_more_peers()
|
2022-08-03 19:29:34 +00:00
|
|
|
|
2024-07-04 19:06:15 +00:00
|
|
|
teardown_report += (
|
2025-07-07 14:37:02 +00:00
|
|
|
'-]> all peer channels are complete.\n'
|
2024-07-04 19:06:15 +00:00
|
|
|
)
|
2024-06-28 18:25:53 +00:00
|
|
|
|
2025-07-07 14:37:02 +00:00
|
|
|
# op_nested_actor_repr: str = _pformat.nest_from_op(
|
|
|
|
|
# input_op=')>',
|
|
|
|
|
# text=actor.pformat(),
|
|
|
|
|
# nest_prefix='|_',
|
|
|
|
|
# nest_indent=1, # under >
|
|
|
|
|
# )
|
2024-07-04 19:06:15 +00:00
|
|
|
teardown_report += (
|
2025-07-07 14:37:02 +00:00
|
|
|
'-)> actor runtime main task exit.\n'
|
|
|
|
|
# f'{op_nested_actor_repr}'
|
2024-07-04 19:06:15 +00:00
|
|
|
)
|
2025-07-07 14:37:02 +00:00
|
|
|
# if _state._runtime_vars['_is_root']:
|
|
|
|
|
# log.info(teardown_report)
|
|
|
|
|
# else:
|
|
|
|
|
log.runtime(teardown_report)
|
2022-08-03 19:29:34 +00:00
|
|
|
|
|
|
|
|
|
2025-04-03 20:35:33 +00:00
|
|
|
# TODO: rename to `Registry` and move to `.discovery._registry`!
|
2018-07-14 20:09:05 +00:00
|
|
|
class Arbiter(Actor):
|
2021-11-04 15:52:08 +00:00
|
|
|
'''
|
2025-04-03 20:35:33 +00:00
|
|
|
A special registrar (and for now..) `Actor` who can contact all
|
|
|
|
|
other actors within its immediate process tree and possibly keeps
|
|
|
|
|
a registry of others meant to be discoverable in a distributed
|
|
|
|
|
application. Normally the registrar is also the "root actor" and
|
|
|
|
|
thus always has access to the top-most-level actor (process)
|
|
|
|
|
nursery.
|
2023-09-27 19:19:30 +00:00
|
|
|
|
|
|
|
|
By default, the registrar is always initialized when and if no
|
|
|
|
|
other registrar socket addrs have been specified to runtime
|
|
|
|
|
init entry-points (such as `open_root_actor()` or
|
|
|
|
|
`open_nursery()`). Any time a new main process is launched (and
|
|
|
|
|
thus thus a new root actor created) and, no existing registrar
|
|
|
|
|
can be contacted at the provided `registry_addr`, then a new
|
|
|
|
|
one is always created; however, if one can be reached it is
|
|
|
|
|
used.
|
|
|
|
|
|
|
|
|
|
Normally a distributed app requires at least registrar per
|
|
|
|
|
logical host where for that given "host space" (aka localhost
|
|
|
|
|
IPC domain of addresses) it is responsible for making all other
|
|
|
|
|
host (local address) bound actors *discoverable* to external
|
|
|
|
|
actor trees running on remote hosts.
|
2021-11-04 15:52:08 +00:00
|
|
|
|
|
|
|
|
'''
|
2018-07-14 20:09:05 +00:00
|
|
|
is_arbiter = True
|
|
|
|
|
|
2025-04-03 20:35:33 +00:00
|
|
|
# TODO, implement this as a read on there existing a `._state` of
|
|
|
|
|
# some sort setup by whenever we impl this all as
|
|
|
|
|
# a `.discovery._registry.open_registry()` API
|
|
|
|
|
def is_registry(self) -> bool:
|
|
|
|
|
return self.is_arbiter
|
|
|
|
|
|
2023-09-27 19:19:30 +00:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
*args,
|
|
|
|
|
**kwargs,
|
|
|
|
|
) -> None:
|
2021-09-06 15:41:34 +00:00
|
|
|
|
2021-12-02 17:34:27 +00:00
|
|
|
self._registry: dict[
|
2022-02-15 13:48:07 +00:00
|
|
|
tuple[str, str],
|
2025-03-31 01:36:45 +00:00
|
|
|
UnwrappedAddress,
|
2021-09-08 00:24:02 +00:00
|
|
|
] = {}
|
2022-12-12 18:18:22 +00:00
|
|
|
self._waiters: dict[
|
|
|
|
|
str,
|
|
|
|
|
# either an event to sync to receiving an actor uid (which
|
|
|
|
|
# is filled in once the actor has sucessfully registered),
|
|
|
|
|
# or that uid after registry is complete.
|
|
|
|
|
list[trio.Event | tuple[str, str]]
|
|
|
|
|
] = {}
|
2021-09-06 15:41:34 +00:00
|
|
|
|
2018-07-26 21:29:23 +00:00
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
|
|
2022-12-12 18:18:22 +00:00
|
|
|
async def find_actor(
|
|
|
|
|
self,
|
|
|
|
|
name: str,
|
|
|
|
|
|
2025-03-31 01:36:45 +00:00
|
|
|
) -> UnwrappedAddress|None:
|
2022-12-12 18:18:22 +00:00
|
|
|
|
2025-03-23 03:14:04 +00:00
|
|
|
for uid, addr in self._registry.items():
|
2018-07-26 21:29:23 +00:00
|
|
|
if name in uid:
|
2025-03-23 03:14:04 +00:00
|
|
|
return addr
|
2018-08-13 03:59:19 +00:00
|
|
|
|
2018-08-31 21:16:24 +00:00
|
|
|
return None
|
|
|
|
|
|
2020-08-03 19:40:41 +00:00
|
|
|
async def get_registry(
|
|
|
|
|
self
|
2022-02-15 13:48:07 +00:00
|
|
|
|
2025-03-31 01:36:45 +00:00
|
|
|
) -> dict[str, UnwrappedAddress]:
|
2022-02-15 13:48:07 +00:00
|
|
|
'''
|
|
|
|
|
Return current name registry.
|
2021-09-06 15:41:34 +00:00
|
|
|
|
|
|
|
|
This method is async to allow for cross-actor invocation.
|
2022-02-15 13:48:07 +00:00
|
|
|
|
2021-09-06 15:41:34 +00:00
|
|
|
'''
|
2020-08-03 22:42:23 +00:00
|
|
|
# NOTE: requires ``strict_map_key=False`` to the msgpack
|
|
|
|
|
# unpacker since we have tuples as keys (not this makes the
|
|
|
|
|
# arbiter suscetible to hashdos):
|
|
|
|
|
# https://github.com/msgpack/msgpack-python#major-breaking-changes-in-msgpack-10
|
2023-09-27 19:19:30 +00:00
|
|
|
return {
|
|
|
|
|
'.'.join(key): val
|
|
|
|
|
for key, val in self._registry.items()
|
|
|
|
|
}
|
2020-08-03 19:40:41 +00:00
|
|
|
|
2018-08-26 17:12:29 +00:00
|
|
|
async def wait_for_actor(
|
2021-09-06 15:41:34 +00:00
|
|
|
self,
|
|
|
|
|
name: str,
|
2022-02-15 13:48:07 +00:00
|
|
|
|
2025-03-31 01:36:45 +00:00
|
|
|
) -> list[UnwrappedAddress]:
|
2022-02-15 13:48:07 +00:00
|
|
|
'''
|
|
|
|
|
Wait for a particular actor to register.
|
2018-08-13 03:59:19 +00:00
|
|
|
|
|
|
|
|
This is a blocking call if no actor by the provided name is currently
|
|
|
|
|
registered.
|
2022-02-15 13:48:07 +00:00
|
|
|
|
2021-09-06 15:41:34 +00:00
|
|
|
'''
|
2025-03-31 01:36:45 +00:00
|
|
|
addrs: list[UnwrappedAddress] = []
|
|
|
|
|
addr: UnwrappedAddress
|
2018-08-13 03:59:19 +00:00
|
|
|
|
2024-04-30 16:15:46 +00:00
|
|
|
mailbox_info: str = 'Actor registry contact infos:\n'
|
2025-03-23 03:14:04 +00:00
|
|
|
for uid, addr in self._registry.items():
|
2024-04-30 16:15:46 +00:00
|
|
|
mailbox_info += (
|
|
|
|
|
f'|_uid: {uid}\n'
|
2025-03-23 03:14:04 +00:00
|
|
|
f'|_addr: {addr}\n\n'
|
2024-03-03 00:26:40 +00:00
|
|
|
)
|
2024-04-30 16:15:46 +00:00
|
|
|
if name == uid[0]:
|
2025-03-23 03:14:04 +00:00
|
|
|
addrs.append(addr)
|
2018-08-13 03:59:19 +00:00
|
|
|
|
2025-03-23 03:14:04 +00:00
|
|
|
if not addrs:
|
2018-08-13 03:59:19 +00:00
|
|
|
waiter = trio.Event()
|
|
|
|
|
self._waiters.setdefault(name, []).append(waiter)
|
|
|
|
|
await waiter.wait()
|
2022-12-12 18:18:22 +00:00
|
|
|
|
2018-08-13 03:59:19 +00:00
|
|
|
for uid in self._waiters[name]:
|
2022-12-12 18:18:22 +00:00
|
|
|
if not isinstance(uid, trio.Event):
|
2025-03-23 03:14:04 +00:00
|
|
|
addrs.append(self._registry[uid])
|
2018-08-13 03:59:19 +00:00
|
|
|
|
2024-04-30 16:15:46 +00:00
|
|
|
log.runtime(mailbox_info)
|
2025-03-23 03:14:04 +00:00
|
|
|
return addrs
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2021-04-27 12:46:54 +00:00
|
|
|
async def register_actor(
|
2021-07-01 18:52:52 +00:00
|
|
|
self,
|
2022-02-15 13:48:07 +00:00
|
|
|
uid: tuple[str, str],
|
2025-03-31 01:36:45 +00:00
|
|
|
addr: UnwrappedAddress
|
2018-08-26 17:12:29 +00:00
|
|
|
) -> None:
|
2023-09-27 19:19:30 +00:00
|
|
|
uid = name, hash = (str(uid[0]), str(uid[1]))
|
2025-03-23 03:14:04 +00:00
|
|
|
waddr: Address = wrap_address(addr)
|
|
|
|
|
if not waddr.is_valid:
|
|
|
|
|
# should never be 0-dynamic-os-alloc
|
2025-05-13 16:13:12 +00:00
|
|
|
await debug.pause()
|
2025-03-23 03:14:04 +00:00
|
|
|
|
2023-09-27 19:19:30 +00:00
|
|
|
self._registry[uid] = addr
|
2018-08-13 03:59:19 +00:00
|
|
|
|
|
|
|
|
# pop and signal all waiter events
|
2022-12-12 18:18:22 +00:00
|
|
|
events = self._waiters.pop(name, [])
|
2018-08-13 03:59:19 +00:00
|
|
|
self._waiters.setdefault(name, []).append(uid)
|
|
|
|
|
for event in events:
|
2018-09-08 13:40:35 +00:00
|
|
|
if isinstance(event, trio.Event):
|
|
|
|
|
event.set()
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2021-09-06 15:41:34 +00:00
|
|
|
async def unregister_actor(
|
|
|
|
|
self,
|
2022-02-15 13:48:07 +00:00
|
|
|
uid: tuple[str, str]
|
|
|
|
|
|
2021-09-06 15:41:34 +00:00
|
|
|
) -> None:
|
2021-09-08 00:24:02 +00:00
|
|
|
uid = (str(uid[0]), str(uid[1]))
|
2023-06-21 20:08:18 +00:00
|
|
|
entry: tuple = self._registry.pop(uid, None)
|
|
|
|
|
if entry is None:
|
|
|
|
|
log.warning(f'Request to de-register {uid} failed?')
|