2021-12-13 18:08:32 +00:00
|
|
|
# tractor: structured concurrent "actors".
|
|
|
|
# Copyright 2018-eternity Tyler Goodlet.
|
|
|
|
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Affero General Public License for more details.
|
|
|
|
|
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
|
2022-01-27 17:03:28 +00:00
|
|
|
'''
|
2023-10-17 21:22:57 +00:00
|
|
|
Memory "portal" contruct.
|
|
|
|
|
|
|
|
"Memory portals" are both an API and set of IPC wrapping primitives
|
|
|
|
for managing structured concurrency "cancel-scope linked" tasks
|
|
|
|
running in disparate virtual memory domains - at least in different
|
|
|
|
OS processes, possibly on different (hardware) hosts.
|
2021-10-05 23:25:28 +00:00
|
|
|
|
2022-01-27 17:03:28 +00:00
|
|
|
'''
|
|
|
|
from __future__ import annotations
|
2024-03-13 15:59:39 +00:00
|
|
|
from contextlib import asynccontextmanager as acm
|
2018-07-14 20:09:05 +00:00
|
|
|
import importlib
|
2018-09-21 04:31:30 +00:00
|
|
|
import inspect
|
2021-04-28 03:08:45 +00:00
|
|
|
from typing import (
|
2024-03-02 21:27:46 +00:00
|
|
|
Any,
|
|
|
|
Callable,
|
|
|
|
AsyncGenerator,
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
TYPE_CHECKING,
|
2021-04-28 03:08:45 +00:00
|
|
|
)
|
2019-01-21 05:45:54 +00:00
|
|
|
from functools import partial
|
|
|
|
from dataclasses import dataclass
|
2020-12-21 14:09:55 +00:00
|
|
|
import warnings
|
2018-07-14 20:09:05 +00:00
|
|
|
|
|
|
|
import trio
|
|
|
|
|
2022-10-06 18:41:56 +00:00
|
|
|
from .trionics import maybe_open_nursery
|
2024-02-19 22:00:46 +00:00
|
|
|
from ._state import (
|
|
|
|
current_actor,
|
|
|
|
)
|
2018-08-31 21:16:24 +00:00
|
|
|
from ._ipc import Channel
|
2018-07-14 20:09:05 +00:00
|
|
|
from .log import get_logger
|
2024-04-02 17:41:52 +00:00
|
|
|
from .msg import (
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
# Error,
|
2024-05-09 13:37:47 +00:00
|
|
|
PayloadMsg,
|
2024-04-02 17:41:52 +00:00
|
|
|
NamespacePath,
|
|
|
|
Return,
|
|
|
|
)
|
2021-06-13 23:58:52 +00:00
|
|
|
from ._exceptions import (
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
# unpack_error,
|
2021-06-13 23:58:52 +00:00
|
|
|
NoResult,
|
|
|
|
)
|
2024-01-02 23:34:15 +00:00
|
|
|
from ._context import (
|
|
|
|
Context,
|
2024-03-13 15:59:39 +00:00
|
|
|
open_context_from_portal,
|
2024-01-02 23:34:15 +00:00
|
|
|
)
|
|
|
|
from ._streaming import (
|
|
|
|
MsgStream,
|
|
|
|
)
|
2018-07-14 20:09:05 +00:00
|
|
|
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from ._runtime import Actor
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2021-04-27 14:03:00 +00:00
|
|
|
log = get_logger(__name__)
|
2018-07-14 20:09:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Portal:
|
2021-11-29 13:40:59 +00:00
|
|
|
'''
|
2023-10-17 21:22:57 +00:00
|
|
|
A 'portal' to a memory-domain-separated `Actor`.
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2021-04-27 14:03:00 +00:00
|
|
|
A portal is "opened" (and eventually closed) by one side of an
|
|
|
|
inter-actor communication context. The side which opens the portal
|
|
|
|
is equivalent to a "caller" in function parlance and usually is
|
|
|
|
either the called actor's parent (in process tree hierarchy terms)
|
|
|
|
or a client interested in scheduling work to be done remotely in a
|
2023-10-17 21:22:57 +00:00
|
|
|
process which has a separate (virtual) memory domain.
|
2021-04-27 14:03:00 +00:00
|
|
|
|
|
|
|
The portal api allows the "caller" actor to invoke remote routines
|
|
|
|
and receive results through an underlying ``tractor.Channel`` as
|
|
|
|
though the remote (async) function / generator was called locally.
|
|
|
|
It may be thought of loosely as an RPC api where native Python
|
|
|
|
function calling semantics are supported transparently; hence it is
|
|
|
|
like having a "portal" between the seperate actor memory spaces.
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2021-11-29 13:40:59 +00:00
|
|
|
'''
|
2023-10-17 21:22:57 +00:00
|
|
|
# global timeout for remote cancel requests sent to
|
|
|
|
# connected (peer) actors.
|
|
|
|
cancel_timeout: float = 0.5
|
2021-12-01 23:48:29 +00:00
|
|
|
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
channel: Channel,
|
|
|
|
) -> None:
|
|
|
|
|
2024-07-02 19:53:33 +00:00
|
|
|
self._chan: Channel = channel
|
2018-08-01 19:15:18 +00:00
|
|
|
# during the portal's lifetime
|
2024-05-09 13:37:47 +00:00
|
|
|
self._final_result_pld: Any|None = None
|
|
|
|
self._final_result_msg: PayloadMsg|None = None
|
2021-11-29 12:27:14 +00:00
|
|
|
|
2021-12-03 21:51:15 +00:00
|
|
|
# When set to a ``Context`` (when _submit_for_result is called)
|
2021-12-01 23:48:29 +00:00
|
|
|
# it is expected that ``result()`` will be awaited at some
|
2021-12-03 21:51:15 +00:00
|
|
|
# point.
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
self._expect_result_ctx: Context|None = None
|
2023-01-08 18:00:36 +00:00
|
|
|
self._streams: set[MsgStream] = set()
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
self.actor: Actor = current_actor()
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2024-07-02 19:53:33 +00:00
|
|
|
@property
|
|
|
|
def chan(self) -> Channel:
|
|
|
|
return self._chan
|
|
|
|
|
2024-02-22 23:33:18 +00:00
|
|
|
@property
|
|
|
|
def channel(self) -> Channel:
|
|
|
|
'''
|
|
|
|
Proxy to legacy attr name..
|
|
|
|
|
|
|
|
Consider the shorter `Portal.chan` instead of `.channel` ;)
|
|
|
|
'''
|
|
|
|
log.debug(
|
|
|
|
'Consider the shorter `Portal.chan` instead of `.channel` ;)'
|
|
|
|
)
|
|
|
|
return self.chan
|
|
|
|
|
2024-06-27 20:25:46 +00:00
|
|
|
# TODO: factor this out into a `.highlevel` API-wrapper that uses
|
|
|
|
# a single `.open_context()` call underneath.
|
2021-12-03 21:51:15 +00:00
|
|
|
async def _submit_for_result(
|
2019-02-15 21:27:18 +00:00
|
|
|
self,
|
|
|
|
ns: str,
|
|
|
|
func: str,
|
2021-12-03 21:51:15 +00:00
|
|
|
**kwargs
|
|
|
|
) -> None:
|
2021-04-28 15:38:31 +00:00
|
|
|
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
if self._expect_result_ctx is not None:
|
|
|
|
raise RuntimeError(
|
|
|
|
'A pending main result has already been submitted'
|
|
|
|
)
|
2021-04-28 15:38:31 +00:00
|
|
|
|
2024-05-09 13:37:47 +00:00
|
|
|
self._expect_result_ctx: Context = await self.actor.start_remote_task(
|
2021-12-03 21:51:15 +00:00
|
|
|
self.channel,
|
2024-02-22 23:33:18 +00:00
|
|
|
nsf=NamespacePath(f'{ns}:{func}'),
|
2024-04-18 19:17:50 +00:00
|
|
|
kwargs=kwargs,
|
|
|
|
portal=self,
|
2021-12-03 21:51:15 +00:00
|
|
|
)
|
2018-08-01 19:15:18 +00:00
|
|
|
|
2024-06-27 20:25:46 +00:00
|
|
|
# TODO: we should deprecate this API right? since if we remove
|
|
|
|
# `.run_in_actor()` (and instead move it to a `.highlevel`
|
|
|
|
# wrapper api (around a single `.open_context()` call) we don't
|
|
|
|
# really have any notion of a "main" remote task any more?
|
|
|
|
#
|
2024-05-06 16:55:16 +00:00
|
|
|
# @api_frame
|
2024-06-27 20:25:46 +00:00
|
|
|
async def wait_for_result(
|
|
|
|
self,
|
|
|
|
hide_tb: bool = True,
|
|
|
|
) -> Any:
|
2021-12-03 21:51:15 +00:00
|
|
|
'''
|
2024-06-27 20:25:46 +00:00
|
|
|
Return the final result delivered by a `Return`-msg from the
|
|
|
|
remote peer actor's "main" task's `return` statement.
|
2021-12-03 21:51:15 +00:00
|
|
|
|
|
|
|
'''
|
2024-06-27 20:25:46 +00:00
|
|
|
__tracebackhide__: bool = hide_tb
|
2018-11-19 09:05:07 +00:00
|
|
|
# Check for non-rpc errors slapped on the
|
|
|
|
# channel for which we always raise
|
|
|
|
exc = self.channel._exc
|
|
|
|
if exc:
|
|
|
|
raise exc
|
|
|
|
|
|
|
|
# not expecting a "main" result
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
if self._expect_result_ctx is None:
|
2019-01-21 05:45:54 +00:00
|
|
|
log.warning(
|
2018-11-19 09:05:07 +00:00
|
|
|
f"Portal for {self.channel.uid} not expecting a final"
|
|
|
|
" result?\nresult() should only be called if subactor"
|
|
|
|
" was spawned with `ActorNursery.run_in_actor()`")
|
|
|
|
return NoResult
|
|
|
|
|
|
|
|
# expecting a "main" result
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
assert self._expect_result_ctx
|
2021-11-29 12:27:14 +00:00
|
|
|
|
2024-05-09 13:37:47 +00:00
|
|
|
if self._final_result_msg is None:
|
2024-05-20 20:07:57 +00:00
|
|
|
try:
|
|
|
|
(
|
|
|
|
self._final_result_msg,
|
|
|
|
self._final_result_pld,
|
|
|
|
) = await self._expect_result_ctx._pld_rx.recv_msg_w_pld(
|
|
|
|
ipc=self._expect_result_ctx,
|
|
|
|
expect_msg=Return,
|
|
|
|
)
|
|
|
|
except BaseException as err:
|
|
|
|
# TODO: wrap this into `@api_frame` optionally with
|
|
|
|
# some kinda filtering mechanism like log levels?
|
|
|
|
__tracebackhide__: bool = False
|
|
|
|
raise err
|
2018-11-19 09:05:07 +00:00
|
|
|
|
2024-05-09 13:37:47 +00:00
|
|
|
return self._final_result_pld
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2024-06-27 20:25:46 +00:00
|
|
|
# TODO: factor this out into a `.highlevel` API-wrapper that uses
|
|
|
|
# a single `.open_context()` call underneath.
|
|
|
|
async def result(
|
|
|
|
self,
|
|
|
|
*args,
|
|
|
|
**kwargs,
|
|
|
|
) -> Any|Exception:
|
|
|
|
typname: str = type(self).__name__
|
|
|
|
log.warning(
|
|
|
|
f'`{typname}.result()` is DEPRECATED!\n'
|
|
|
|
f'Use `{typname}.wait_for_result()` instead!\n'
|
|
|
|
)
|
|
|
|
return await self.wait_for_result(
|
|
|
|
*args,
|
|
|
|
**kwargs,
|
|
|
|
)
|
|
|
|
|
2019-02-16 02:59:42 +00:00
|
|
|
async def _cancel_streams(self):
|
|
|
|
# terminate all locally running async generator
|
|
|
|
# IPC calls
|
|
|
|
if self._streams:
|
2021-10-05 23:25:28 +00:00
|
|
|
log.cancel(
|
2019-02-16 02:59:42 +00:00
|
|
|
f"Cancelling all streams with {self.channel.uid}")
|
|
|
|
for stream in self._streams.copy():
|
2020-12-21 14:09:55 +00:00
|
|
|
try:
|
|
|
|
await stream.aclose()
|
|
|
|
except trio.ClosedResourceError:
|
|
|
|
# don't error the stream having already been closed
|
|
|
|
# (unless of course at some point down the road we
|
|
|
|
# won't expect this to always be the case or need to
|
|
|
|
# detect it for respawning purposes?)
|
|
|
|
log.debug(f"{stream} was already closed.")
|
2019-01-21 05:45:54 +00:00
|
|
|
|
2019-12-10 05:55:03 +00:00
|
|
|
async def aclose(self):
|
2019-02-16 02:59:42 +00:00
|
|
|
log.debug(f"Closing {self}")
|
2019-02-15 21:27:18 +00:00
|
|
|
# TODO: once we move to implementing our own `ReceiveChannel`
|
|
|
|
# (including remote task cancellation inside its `.aclose()`)
|
|
|
|
# we'll need to .aclose all those channels here
|
2019-02-16 02:59:42 +00:00
|
|
|
await self._cancel_streams()
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2021-12-01 23:48:29 +00:00
|
|
|
async def cancel_actor(
|
|
|
|
self,
|
2022-12-12 18:18:22 +00:00
|
|
|
timeout: float | None = None,
|
2021-12-01 23:48:29 +00:00
|
|
|
|
|
|
|
) -> bool:
|
|
|
|
'''
|
2023-10-08 19:57:18 +00:00
|
|
|
Cancel the actor runtime (and thus process) on the far
|
|
|
|
end of this portal.
|
|
|
|
|
|
|
|
**NOTE** THIS CANCELS THE ENTIRE RUNTIME AND THE
|
|
|
|
SUBPROCESS, it DOES NOT just cancel the remote task. If you
|
|
|
|
want to have a handle to cancel a remote ``tri.Task`` look
|
|
|
|
at `.open_context()` and the definition of
|
|
|
|
`._context.Context.cancel()` which CAN be used for this
|
|
|
|
purpose.
|
2021-12-01 23:48:29 +00:00
|
|
|
|
|
|
|
'''
|
2024-04-18 19:17:50 +00:00
|
|
|
__runtimeframe__: int = 1 # noqa
|
|
|
|
|
2024-02-22 23:33:18 +00:00
|
|
|
chan: Channel = self.channel
|
|
|
|
if not chan.connected():
|
|
|
|
log.runtime(
|
|
|
|
'This channel is already closed, skipping cancel request..'
|
|
|
|
)
|
2019-01-21 05:45:54 +00:00
|
|
|
return False
|
|
|
|
|
2024-02-22 23:33:18 +00:00
|
|
|
reminfo: str = (
|
2024-07-04 19:06:15 +00:00
|
|
|
f'c)=> {self.channel.uid}\n'
|
|
|
|
f' |_{chan}\n'
|
2024-02-22 23:33:18 +00:00
|
|
|
)
|
2021-10-05 23:25:28 +00:00
|
|
|
log.cancel(
|
2024-07-04 19:06:15 +00:00
|
|
|
f'Requesting actor-runtime cancel for peer\n\n'
|
2024-02-22 23:33:18 +00:00
|
|
|
f'{reminfo}'
|
|
|
|
)
|
2021-12-01 23:48:29 +00:00
|
|
|
|
2024-06-27 20:25:46 +00:00
|
|
|
# XXX the one spot we set it?
|
2024-02-22 23:33:18 +00:00
|
|
|
self.channel._cancel_called: bool = True
|
2018-07-14 20:09:05 +00:00
|
|
|
try:
|
2019-01-21 05:45:54 +00:00
|
|
|
# send cancel cmd - might not get response
|
2023-10-23 21:34:28 +00:00
|
|
|
# XXX: sure would be nice to make this work with
|
|
|
|
# a proper shield
|
2023-04-13 20:03:35 +00:00
|
|
|
with trio.move_on_after(
|
|
|
|
timeout
|
2024-02-22 23:33:18 +00:00
|
|
|
or
|
|
|
|
self.cancel_timeout
|
2023-04-13 20:03:35 +00:00
|
|
|
) as cs:
|
2024-02-22 23:33:18 +00:00
|
|
|
cs.shield: bool = True
|
2023-10-23 21:34:28 +00:00
|
|
|
await self.run_from_ns(
|
|
|
|
'self',
|
|
|
|
'cancel',
|
|
|
|
)
|
2018-07-14 20:09:05 +00:00
|
|
|
return True
|
2020-12-22 15:19:52 +00:00
|
|
|
|
2021-12-01 23:48:29 +00:00
|
|
|
if cs.cancelled_caught:
|
2024-03-02 21:27:46 +00:00
|
|
|
# may timeout and we never get an ack (obvi racy)
|
|
|
|
# but that doesn't mean it wasn't cancelled.
|
|
|
|
log.debug(
|
2024-02-22 23:33:18 +00:00
|
|
|
'May have failed to cancel peer?\n'
|
|
|
|
f'{reminfo}'
|
|
|
|
)
|
2019-12-10 05:55:03 +00:00
|
|
|
|
|
|
|
# if we get here some weird cancellation case happened
|
|
|
|
return False
|
2020-12-22 15:19:52 +00:00
|
|
|
|
2021-12-02 19:14:49 +00:00
|
|
|
except (
|
|
|
|
trio.ClosedResourceError,
|
|
|
|
trio.BrokenResourceError,
|
|
|
|
):
|
2024-03-02 21:27:46 +00:00
|
|
|
log.debug(
|
|
|
|
'IPC chan for actor already closed or broken?\n\n'
|
|
|
|
f'{self.channel.uid}\n'
|
|
|
|
f' |_{self.channel}\n'
|
|
|
|
)
|
2018-07-14 20:09:05 +00:00
|
|
|
return False
|
|
|
|
|
2024-06-27 20:25:46 +00:00
|
|
|
# TODO: do we still need this for low level `Actor`-runtime
|
|
|
|
# method calls or can we also remove it?
|
2021-04-28 03:08:45 +00:00
|
|
|
async def run_from_ns(
|
|
|
|
self,
|
|
|
|
namespace_path: str,
|
|
|
|
function_name: str,
|
|
|
|
**kwargs,
|
|
|
|
) -> Any:
|
2021-12-01 23:48:29 +00:00
|
|
|
'''
|
|
|
|
Run a function from a (remote) namespace in a new task on the
|
|
|
|
far-end actor.
|
2021-04-28 03:08:45 +00:00
|
|
|
|
|
|
|
This is a more explitcit way to run tasks in a remote-process
|
|
|
|
actor using explicit object-path syntax. Hint: this is how
|
|
|
|
`.run()` works underneath.
|
|
|
|
|
|
|
|
Note::
|
|
|
|
|
2024-02-22 23:33:18 +00:00
|
|
|
A special namespace `self` can be used to invoke `Actor`
|
|
|
|
instance methods in the remote runtime. Currently this
|
|
|
|
should only ever be used for `Actor` (method) runtime
|
|
|
|
internals!
|
2021-12-01 23:48:29 +00:00
|
|
|
|
|
|
|
'''
|
2024-04-18 19:17:50 +00:00
|
|
|
__runtimeframe__: int = 1 # noqa
|
2024-02-22 23:33:18 +00:00
|
|
|
nsf = NamespacePath(
|
|
|
|
f'{namespace_path}:{function_name}'
|
|
|
|
)
|
2024-04-18 19:17:50 +00:00
|
|
|
ctx: Context = await self.actor.start_remote_task(
|
2024-02-22 23:33:18 +00:00
|
|
|
chan=self.channel,
|
|
|
|
nsf=nsf,
|
|
|
|
kwargs=kwargs,
|
2024-04-18 19:17:50 +00:00
|
|
|
portal=self,
|
2021-04-28 03:08:45 +00:00
|
|
|
)
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
return await ctx._pld_rx.recv_pld(
|
2024-05-20 20:07:57 +00:00
|
|
|
ipc=ctx,
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
expect_msg=Return,
|
2024-02-22 23:33:18 +00:00
|
|
|
)
|
2021-04-28 03:08:45 +00:00
|
|
|
|
2024-06-27 20:25:46 +00:00
|
|
|
# TODO: factor this out into a `.highlevel` API-wrapper that uses
|
|
|
|
# a single `.open_context()` call underneath.
|
2021-04-27 14:03:00 +00:00
|
|
|
async def run(
|
|
|
|
self,
|
|
|
|
func: str,
|
2024-02-22 23:33:18 +00:00
|
|
|
fn_name: str|None = None,
|
2021-04-27 14:03:00 +00:00
|
|
|
**kwargs
|
2024-02-22 23:33:18 +00:00
|
|
|
|
2021-04-27 14:03:00 +00:00
|
|
|
) -> Any:
|
2021-11-29 12:27:14 +00:00
|
|
|
'''
|
|
|
|
Submit a remote function to be scheduled and run by actor, in
|
2021-04-27 14:03:00 +00:00
|
|
|
a new task, wrap and return its (stream of) result(s).
|
|
|
|
|
|
|
|
This is a blocking call and returns either a value from the
|
|
|
|
remote rpc task or a local async generator instance.
|
2021-11-29 12:27:14 +00:00
|
|
|
|
|
|
|
'''
|
2024-05-20 20:07:57 +00:00
|
|
|
__runtimeframe__: int = 1 # noqa
|
|
|
|
|
2021-04-27 14:03:00 +00:00
|
|
|
if isinstance(func, str):
|
|
|
|
warnings.warn(
|
|
|
|
"`Portal.run(namespace: str, funcname: str)` is now"
|
|
|
|
"deprecated, pass a function reference directly instead\n"
|
|
|
|
"If you still want to run a remote function by name use"
|
|
|
|
"`Portal.run_from_ns()`",
|
|
|
|
DeprecationWarning,
|
|
|
|
stacklevel=2,
|
|
|
|
)
|
2024-02-22 23:33:18 +00:00
|
|
|
fn_mod_path: str = func
|
2021-04-27 14:03:00 +00:00
|
|
|
assert isinstance(fn_name, str)
|
2024-02-22 23:33:18 +00:00
|
|
|
nsf = NamespacePath(f'{fn_mod_path}:{fn_name}')
|
2021-04-27 14:03:00 +00:00
|
|
|
|
|
|
|
else: # function reference was passed directly
|
2021-04-28 15:38:31 +00:00
|
|
|
if (
|
|
|
|
not inspect.iscoroutinefunction(func) or
|
|
|
|
(
|
|
|
|
inspect.iscoroutinefunction(func) and
|
|
|
|
getattr(func, '_tractor_stream_function', False)
|
|
|
|
)
|
2021-04-27 14:03:00 +00:00
|
|
|
):
|
2021-04-28 15:38:31 +00:00
|
|
|
raise TypeError(
|
|
|
|
f'{func} must be a non-streaming async function!')
|
2021-04-27 14:03:00 +00:00
|
|
|
|
2024-02-22 23:33:18 +00:00
|
|
|
nsf = NamespacePath.from_ref(func)
|
2021-04-27 14:03:00 +00:00
|
|
|
|
2021-12-06 00:28:00 +00:00
|
|
|
ctx = await self.actor.start_remote_task(
|
|
|
|
self.channel,
|
2024-02-22 23:33:18 +00:00
|
|
|
nsf=nsf,
|
|
|
|
kwargs=kwargs,
|
2024-04-18 19:17:50 +00:00
|
|
|
portal=self,
|
2021-12-06 00:28:00 +00:00
|
|
|
)
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
return await ctx._pld_rx.recv_pld(
|
2024-05-20 20:07:57 +00:00
|
|
|
ipc=ctx,
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
expect_msg=Return,
|
2021-11-29 13:40:59 +00:00
|
|
|
)
|
2021-04-27 14:03:00 +00:00
|
|
|
|
2024-06-27 20:25:46 +00:00
|
|
|
# TODO: factor this out into a `.highlevel` API-wrapper that uses
|
|
|
|
# a single `.open_context()` call underneath.
|
2024-03-13 15:59:39 +00:00
|
|
|
@acm
|
2021-04-28 03:08:45 +00:00
|
|
|
async def open_stream_from(
|
|
|
|
self,
|
|
|
|
async_gen_func: Callable, # typing: ignore
|
|
|
|
**kwargs,
|
2021-06-28 04:18:28 +00:00
|
|
|
|
2023-01-08 18:00:36 +00:00
|
|
|
) -> AsyncGenerator[MsgStream, None]:
|
2024-04-18 19:17:50 +00:00
|
|
|
'''
|
|
|
|
Legacy one-way streaming API.
|
|
|
|
|
|
|
|
TODO: re-impl on top `Portal.open_context()` + an async gen
|
|
|
|
around `Context.open_stream()`.
|
|
|
|
|
|
|
|
'''
|
|
|
|
__runtimeframe__: int = 1 # noqa
|
2021-04-28 03:08:45 +00:00
|
|
|
|
|
|
|
if not inspect.isasyncgenfunction(async_gen_func):
|
2021-04-28 15:38:31 +00:00
|
|
|
if not (
|
|
|
|
inspect.iscoroutinefunction(async_gen_func) and
|
|
|
|
getattr(async_gen_func, '_tractor_stream_function', False)
|
2021-04-28 03:08:45 +00:00
|
|
|
):
|
|
|
|
raise TypeError(
|
|
|
|
f'{async_gen_func} must be an async generator function!')
|
|
|
|
|
2024-02-22 23:33:18 +00:00
|
|
|
ctx: Context = await self.actor.start_remote_task(
|
2021-10-04 14:05:40 +00:00
|
|
|
self.channel,
|
2024-02-22 23:33:18 +00:00
|
|
|
nsf=NamespacePath.from_ref(async_gen_func),
|
|
|
|
kwargs=kwargs,
|
2024-04-18 19:17:50 +00:00
|
|
|
portal=self,
|
2021-10-04 14:05:40 +00:00
|
|
|
)
|
2021-12-03 21:51:15 +00:00
|
|
|
|
|
|
|
# ensure receive-only stream entrypoint
|
|
|
|
assert ctx._remote_func_type == 'asyncgen'
|
|
|
|
|
2021-04-28 03:08:45 +00:00
|
|
|
try:
|
2021-05-01 19:10:03 +00:00
|
|
|
# deliver receive only stream
|
2023-01-08 18:00:36 +00:00
|
|
|
async with MsgStream(
|
2024-03-02 21:27:46 +00:00
|
|
|
ctx=ctx,
|
First draft "payload receiver in a new `.msg._ops`
As per much tinkering, re-designs and preceding rubber-ducking via many
"commit msg novelas", **finally** this adds the (hopefully) final
missing layer for typed msg safety: `tractor.msg._ops.PldRx`
(or `PayloadReceiver`? haven't decided how verbose to go..)
Design justification summary:
------ - ------
- need a way to be as-close-as-possible to the `tractor`-application
such that when `MsgType.pld: PayloadT` validation takes place, it is
straightforward and obvious how user code can decide to handle any
resulting `MsgTypeError`.
- there should be a common and optional-yet-modular way to modify
**how** data delivered via IPC (possibly embedded as user defined,
type-constrained `.pld: msgspec.Struct`s) can be handled and processed
during fault conditions and/or IPC "msg attacks".
- support for nested type constraints within a `MsgType.pld` field
should be simple to define, implement and understand at runtime.
- a layer between the app-level IPC primitive APIs
(`Context`/`MsgStream`) and application-task code (consumer code of
those APIs) should be easily customized and prove-to-be-as-such
through demonstrably rigorous internal (sub-sys) use!
-> eg. via seemless runtime RPC eps support like `Actor.cancel()`
-> by correctly implementing our `.devx._debug.Lock` REPL TTY mgmt
dialog prot, via a dead simple payload-as-ctl-msg-spec.
There are some fairly detailed doc strings included so I won't duplicate
that content, the majority of the work here is actually somewhat of
a factoring of many similar blocks that are doing more or less the same
`msg = await Context._rx_chan.receive()` with boilerplate for
`Error`/`Stop` handling via `_raise_from_no_key_in_msg()`. The new
`PldRx` basically provides a shim layer for this common "receive msg,
decode its payload, yield it up to the consuming app task" by pairing
the RPC feeder mem-chan with a msg-payload decoder and expecting IPC API
internals to use **one** API instead of re-implementing the same pattern
all over the place XD
`PldRx` breakdown
------ - ------
- for now only expects a `._msgdec: MsgDec` which allows for
override-able `MsgType.pld` validation and most obviously used in
the impl of `.dec_msg()`, the decode message method.
- provides multiple mem-chan receive options including:
|_ `.recv_pld()` which does the e2e operation of receiving a payload
item.
|_ a sync `.recv_pld_nowait()` version.
|_ a `.recv_msg_w_pld()` which optionally allows retreiving both the
shuttling `MsgType` as well as it's `.pld` body for use cases where
info on both is important (eg. draining a `MsgStream`).
Dirty internal changeover/implementation deatz:
------ - ------
- obvi move over all the IPC "primitives" that previously had the duplicate recv-n-yield
logic:
- `MsgStream.receive[_nowait]()` delegating instead to the equivalent
`PldRx.recv_pld[_nowait]()`.
- add `Context._pld_rx: PldRx`, created and passed in by
`mk_context()`; use it for the `.started()` -> `first: Started`
retrieval inside `open_context_from_portal()`.
- all the relevant `Portal` invocation methods: `.result()`,
`.run_from_ns()`, `.run()`; also allows for dropping `_unwrap_msg()`
and `.Portal_return_once()` outright Bo
- rename `Context.ctx._recv_chan` -> `._rx_chan`.
- add detailed `Context._scope` info for logging whether or not it's
cancelled inside `_maybe_cancel_and_set_remote_error()`.
- move `._context._drain_to_final_msg()` -> `._ops.drain_to_final_msg()`
since it's really not necessarily ctx specific per say, and it does
kinda fit with "msg operations" more abstractly ;)
2024-04-23 21:43:45 +00:00
|
|
|
rx_chan=ctx._rx_chan,
|
2024-04-24 16:43:08 +00:00
|
|
|
) as stream:
|
|
|
|
self._streams.add(stream)
|
|
|
|
ctx._stream = stream
|
|
|
|
yield stream
|
2021-05-01 19:10:03 +00:00
|
|
|
|
2021-04-28 03:08:45 +00:00
|
|
|
finally:
|
|
|
|
# cancel the far end task on consumer close
|
2021-05-01 19:10:03 +00:00
|
|
|
# NOTE: this is a special case since we assume that if using
|
|
|
|
# this ``.open_fream_from()`` api, the stream is one a one
|
|
|
|
# time use and we couple the far end tasks's lifetime to
|
|
|
|
# the consumer's scope; we don't ever send a `'stop'`
|
|
|
|
# message right now since there shouldn't be a reason to
|
|
|
|
# stop and restart the stream, right?
|
2021-04-28 03:08:45 +00:00
|
|
|
try:
|
2021-10-04 14:05:40 +00:00
|
|
|
with trio.CancelScope(shield=True):
|
|
|
|
await ctx.cancel()
|
2021-06-13 23:58:52 +00:00
|
|
|
|
2021-04-28 03:08:45 +00:00
|
|
|
except trio.ClosedResourceError:
|
|
|
|
# if the far end terminates before we send a cancel the
|
|
|
|
# underlying transport-channel may already be closed.
|
2021-10-05 23:25:28 +00:00
|
|
|
log.cancel(f'Context {ctx} was already closed?')
|
2021-04-28 03:08:45 +00:00
|
|
|
|
2021-10-04 14:05:40 +00:00
|
|
|
# XXX: should this always be done?
|
|
|
|
# await recv_chan.aclose()
|
2024-04-24 16:43:08 +00:00
|
|
|
self._streams.remove(stream)
|
2021-04-27 14:03:00 +00:00
|
|
|
|
2024-03-13 15:59:39 +00:00
|
|
|
# NOTE: impl is found in `._context`` mod to make
|
|
|
|
# reading/groking the details simpler code-org-wise. This
|
|
|
|
# method does not have to be used over that `@acm` module func
|
|
|
|
# directly, it is for conventience and from the original API
|
|
|
|
# design.
|
|
|
|
open_context = open_context_from_portal
|
2021-12-06 15:52:18 +00:00
|
|
|
|
2018-08-17 18:49:17 +00:00
|
|
|
|
2019-01-21 05:45:54 +00:00
|
|
|
@dataclass
|
2018-07-14 20:09:05 +00:00
|
|
|
class LocalPortal:
|
2021-12-01 23:48:29 +00:00
|
|
|
'''
|
|
|
|
A 'portal' to a local ``Actor``.
|
2018-07-14 20:09:05 +00:00
|
|
|
|
|
|
|
A compatibility shim for normal portals but for invoking functions
|
|
|
|
using an in process actor instance.
|
2021-12-01 23:48:29 +00:00
|
|
|
|
|
|
|
'''
|
2020-12-17 16:58:48 +00:00
|
|
|
actor: 'Actor' # type: ignore # noqa
|
2020-02-09 06:59:10 +00:00
|
|
|
channel: Channel
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2023-09-27 19:19:30 +00:00
|
|
|
async def run_from_ns(
|
|
|
|
self,
|
|
|
|
ns: str,
|
|
|
|
func_name: str,
|
|
|
|
**kwargs,
|
|
|
|
) -> Any:
|
2021-12-01 23:48:29 +00:00
|
|
|
'''
|
|
|
|
Run a requested local function from a namespace path and
|
2020-12-22 15:19:52 +00:00
|
|
|
return it's result.
|
|
|
|
|
2021-12-01 23:48:29 +00:00
|
|
|
'''
|
2018-07-14 20:09:05 +00:00
|
|
|
obj = self.actor if ns == 'self' else importlib.import_module(ns)
|
2018-09-21 13:46:01 +00:00
|
|
|
func = getattr(obj, func_name)
|
2021-04-28 03:08:45 +00:00
|
|
|
return await func(**kwargs)
|
2018-07-14 20:09:05 +00:00
|
|
|
|
|
|
|
|
2024-03-13 15:59:39 +00:00
|
|
|
@acm
|
2018-08-20 02:13:13 +00:00
|
|
|
async def open_portal(
|
2021-06-28 04:18:28 +00:00
|
|
|
|
2018-08-31 21:16:24 +00:00
|
|
|
channel: Channel,
|
2024-04-30 16:15:46 +00:00
|
|
|
tn: trio.Nursery|None = None,
|
2020-07-23 17:23:36 +00:00
|
|
|
start_msg_loop: bool = True,
|
2020-08-08 18:47:52 +00:00
|
|
|
shield: bool = False,
|
2021-06-28 04:18:28 +00:00
|
|
|
|
2021-04-28 03:08:45 +00:00
|
|
|
) -> AsyncGenerator[Portal, None]:
|
2021-12-01 23:48:29 +00:00
|
|
|
'''
|
|
|
|
Open a ``Portal`` through the provided ``channel``.
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2024-04-30 16:15:46 +00:00
|
|
|
Spawns a background task to handle RPC processing, normally
|
|
|
|
done by the actor-runtime implicitly via a call to
|
|
|
|
`._rpc.process_messages()`. just after connection establishment.
|
2021-12-01 23:48:29 +00:00
|
|
|
|
|
|
|
'''
|
2018-07-14 20:09:05 +00:00
|
|
|
actor = current_actor()
|
|
|
|
assert actor
|
2024-04-05 23:07:12 +00:00
|
|
|
was_connected: bool = False
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2024-04-30 16:15:46 +00:00
|
|
|
async with maybe_open_nursery(
|
|
|
|
tn,
|
|
|
|
shield=shield,
|
|
|
|
) as tn:
|
2021-06-24 23:56:05 +00:00
|
|
|
|
2018-07-14 20:09:05 +00:00
|
|
|
if not channel.connected():
|
|
|
|
await channel.connect()
|
|
|
|
was_connected = True
|
|
|
|
|
|
|
|
if channel.uid is None:
|
2019-03-23 17:50:23 +00:00
|
|
|
await actor._do_handshake(channel)
|
2018-07-14 20:09:05 +00:00
|
|
|
|
2024-03-02 21:27:46 +00:00
|
|
|
msg_loop_cs: trio.CancelScope|None = None
|
2020-07-23 17:23:36 +00:00
|
|
|
if start_msg_loop:
|
2022-08-03 19:14:36 +00:00
|
|
|
from ._runtime import process_messages
|
2024-04-30 16:15:46 +00:00
|
|
|
msg_loop_cs = await tn.start(
|
2020-07-23 17:23:36 +00:00
|
|
|
partial(
|
2022-08-03 19:14:36 +00:00
|
|
|
process_messages,
|
|
|
|
actor,
|
2020-07-23 17:23:36 +00:00
|
|
|
channel,
|
|
|
|
# if the local task is cancelled we want to keep
|
|
|
|
# the msg loop running until our block ends
|
|
|
|
shield=True,
|
|
|
|
)
|
2019-01-21 05:45:54 +00:00
|
|
|
)
|
2018-07-14 20:09:05 +00:00
|
|
|
portal = Portal(channel)
|
2018-12-15 07:20:55 +00:00
|
|
|
try:
|
|
|
|
yield portal
|
|
|
|
finally:
|
2019-02-15 21:27:18 +00:00
|
|
|
await portal.aclose()
|
2018-12-15 07:20:55 +00:00
|
|
|
|
2019-01-21 05:45:54 +00:00
|
|
|
if was_connected:
|
2024-04-05 23:07:12 +00:00
|
|
|
await channel.aclose()
|
2018-12-15 07:20:55 +00:00
|
|
|
|
|
|
|
# cancel background msg loop task
|
2024-04-30 16:15:46 +00:00
|
|
|
if msg_loop_cs is not None:
|
2020-07-23 17:23:36 +00:00
|
|
|
msg_loop_cs.cancel()
|
2019-01-24 00:17:03 +00:00
|
|
|
|
2024-04-30 16:15:46 +00:00
|
|
|
tn.cancel_scope.cancel()
|