forked from goodboy/tractor
Drop worker pool and add debug example
parent
4a512bc879
commit
71a35aadef
166
docs/README.rst
166
docs/README.rst
|
@ -71,12 +71,16 @@ Alluring Features
|
||||||
|
|
||||||
Example: self-destruct a process tree
|
Example: self-destruct a process tree
|
||||||
-------------------------------------
|
-------------------------------------
|
||||||
|
``tractor`` protects you from zombies, no matter what.
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Run with a process monitor from a terminal using:
|
Run with a process monitor from a terminal using::
|
||||||
$TERM -e watch -n 0.1 "pstree -a $$" & python examples/parallelism/we_are_processes.py && kill $!
|
|
||||||
|
$TERM -e watch -n 0.1 "pstree -a $$" \
|
||||||
|
& python examples/parallelism/we_are_processes.py \
|
||||||
|
&& kill $!
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from multiprocessing import cpu_count
|
from multiprocessing import cpu_count
|
||||||
|
@ -115,133 +119,81 @@ Example: self-destruct a process tree
|
||||||
|
|
||||||
The example you're probably after...
|
The example you're probably after...
|
||||||
------------------------------------
|
------------------------------------
|
||||||
It seems the initial query from most new users is "how do I make a worker
|
It seems the initial ask from most new users is "how do I make a worker
|
||||||
pool thing?".
|
pool thing?".
|
||||||
|
|
||||||
``tractor`` is built to handle any SC process tree you can
|
``tractor`` is built to handle any SC process tree you can
|
||||||
imagine; the "worker pool" pattern is a trivial special case:
|
imagine; the "worker pool" pattern is a trivial special case.
|
||||||
|
|
||||||
|
We have a `full re-implementation <concurrent_actors_primes>`_ of the std-lib's
|
||||||
|
``concurrent.futures.ProcessPoolExecutor`` example for reference.
|
||||||
|
|
||||||
|
You can run it like so (from this dir) to see the process tree in
|
||||||
|
real time::
|
||||||
|
|
||||||
|
$TERM -e watch -n 0.1 "pstree -a $$" \
|
||||||
|
& python examples/parallelism/concurrent_actors_primes.py \
|
||||||
|
&& kill $!
|
||||||
|
|
||||||
|
This uses no extra threads, fancy semaphores or futures; all we need
|
||||||
|
is ``tractor``'s IPC!
|
||||||
|
|
||||||
|
|
||||||
|
.. _concurrent_actors_primes: https://github.com/goodboy/tractor/blob/readme_pump/examples/parallelism/concurrent_actors_primes.py
|
||||||
|
|
||||||
|
|
||||||
|
"Native" sub-process debugging
|
||||||
|
------------------------------
|
||||||
|
Using the magic of `pdb++`_ and some IPC tricks we've
|
||||||
|
been able to create a native feeling debugging experience for
|
||||||
|
any (sub)-process in your ``tractor`` tree.
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
"""
|
from os import getpid
|
||||||
Demonstration of the prime number detector example from the
|
|
||||||
``concurrent.futures`` docs:
|
|
||||||
|
|
||||||
https://docs.python.org/3/library/concurrent.futures.html#processpoolexecutor-example
|
|
||||||
|
|
||||||
This uses no extra threads, fancy semaphores or futures; all we need
|
|
||||||
is ``tractor``'s channels.
|
|
||||||
|
|
||||||
"""
|
|
||||||
from contextlib import asynccontextmanager
|
|
||||||
from typing import List, Callable
|
|
||||||
import itertools
|
|
||||||
import math
|
|
||||||
import time
|
|
||||||
|
|
||||||
import tractor
|
import tractor
|
||||||
import trio
|
import trio
|
||||||
from async_generator import aclosing
|
|
||||||
|
|
||||||
|
|
||||||
PRIMES = [
|
async def breakpoint_forever():
|
||||||
112272535095293,
|
"Indefinitely re-enter debugger in child actor."
|
||||||
112582705942171,
|
while True:
|
||||||
112272535095293,
|
yield 'yo'
|
||||||
115280095190773,
|
await tractor.breakpoint()
|
||||||
115797848077099,
|
|
||||||
1099726899285419,
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def is_prime(n):
|
async def name_error():
|
||||||
if n < 2:
|
"Raise a ``NameError``"
|
||||||
return False
|
getattr(doggypants)
|
||||||
if n == 2:
|
|
||||||
return True
|
|
||||||
if n % 2 == 0:
|
|
||||||
return False
|
|
||||||
|
|
||||||
sqrt_n = int(math.floor(math.sqrt(n)))
|
|
||||||
for i in range(3, sqrt_n + 1, 2):
|
|
||||||
if n % i == 0:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
|
||||||
async def worker_pool(workers=4):
|
|
||||||
"""Though it's a trivial special case for ``tractor``, the well
|
|
||||||
known "worker pool" seems to be the defacto "but, I want this
|
|
||||||
process pattern!" for most parallelism pilgrims.
|
|
||||||
|
|
||||||
Yes, the workers stay alive (and ready for work) until you close
|
|
||||||
the context.
|
|
||||||
"""
|
|
||||||
async with tractor.open_nursery() as tn:
|
|
||||||
|
|
||||||
portals = []
|
|
||||||
snd_chan, recv_chan = trio.open_memory_channel(len(PRIMES))
|
|
||||||
|
|
||||||
for i in range(workers):
|
|
||||||
|
|
||||||
# this starts a new sub-actor (process + trio runtime) and
|
|
||||||
# stores it's "portal" for later use to "submit jobs" (ugh).
|
|
||||||
portals.append(
|
|
||||||
await tn.start_actor(
|
|
||||||
f'worker_{i}',
|
|
||||||
enable_modules=[__name__],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _map(
|
|
||||||
worker_func: Callable[[int], bool],
|
|
||||||
sequence: List[int]
|
|
||||||
) -> List[bool]:
|
|
||||||
|
|
||||||
# define an async (local) task to collect results from workers
|
|
||||||
async def send_result(func, value, portal):
|
|
||||||
await snd_chan.send((value, await portal.run(func, n=value)))
|
|
||||||
|
|
||||||
async with trio.open_nursery() as n:
|
|
||||||
|
|
||||||
for value, portal in zip(sequence, itertools.cycle(portals)):
|
|
||||||
n.start_soon(
|
|
||||||
send_result,
|
|
||||||
worker_func,
|
|
||||||
value,
|
|
||||||
portal
|
|
||||||
)
|
|
||||||
|
|
||||||
# deliver results as they arrive
|
|
||||||
for _ in range(len(sequence)):
|
|
||||||
yield await recv_chan.receive()
|
|
||||||
|
|
||||||
# deliver the parallel "worker mapper" to user code
|
|
||||||
yield _map
|
|
||||||
|
|
||||||
# tear down all "workers" on pool close
|
|
||||||
await tn.cancel()
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
|
"""Test breakpoint in a streaming actor.
|
||||||
|
"""
|
||||||
|
async with tractor.open_nursery(
|
||||||
|
debug_mode=True,
|
||||||
|
loglevel='error',
|
||||||
|
) as n:
|
||||||
|
|
||||||
async with worker_pool() as actor_map:
|
p0 = await n.start_actor('bp_forever', enable_modules=[__name__])
|
||||||
|
p1 = await n.start_actor('name_error', enable_modules=[__name__])
|
||||||
|
|
||||||
start = time.time()
|
# retreive results
|
||||||
|
stream = await p0.run(breakpoint_forever)
|
||||||
async with aclosing(actor_map(is_prime, PRIMES)) as results:
|
await p1.run(name_error)
|
||||||
async for number, prime in results:
|
|
||||||
|
|
||||||
print(f'{number} is prime: {prime}')
|
|
||||||
|
|
||||||
print(f'processing took {time.time() - start} seconds')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
start = time.time()
|
|
||||||
trio.run(main)
|
trio.run(main)
|
||||||
print(f'script took {time.time() - start} seconds')
|
|
||||||
|
|
||||||
|
You can run this with::
|
||||||
|
|
||||||
|
>>> python examples/debugging/multi_daemon_subactors.py
|
||||||
|
|
||||||
|
And, yes, there's a built-in crash handling mode B)
|
||||||
|
We're hoping to add a respawn-from-repl system soon!
|
||||||
|
|
||||||
|
|
||||||
Feel like saying hi?
|
Feel like saying hi?
|
||||||
|
|
Loading…
Reference in New Issue