Allow choosing the spawn backend per test session
Add a `--spawn-backend` option which can be set to one of {'mp', 'trio_run_in_process'} which will either run the test suite using the `multiprocessing` or `trio-run-in-process` backend respectively. Currently trying to run both in the same session can result in hangs seemingly due to a lack of cleanup of forkservers / resource trackers from `multiprocessing` which cause broken pipe errors on occasion (no idea on the details). For `test_cancellation.py::test_nested_multierrors`, use less nesting when mp is used since it breaks if we push it too hard with the whole recursive subprocess spawning thing...try_trip^2
parent
27c9760f96
commit
ecced3d09a
|
@ -14,8 +14,24 @@ _arb_addr = '127.0.0.1', random.randint(1000, 9999)
|
|||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--ll", action="store", dest='loglevel',
|
||||
default=None, help="logging level to set when testing")
|
||||
parser.addoption(
|
||||
"--ll", action="store", dest='loglevel',
|
||||
default=None, help="logging level to set when testing"
|
||||
)
|
||||
|
||||
parser.addoption(
|
||||
"--spawn-backend", action="store", dest='spawn_backend',
|
||||
default='trio_run_in_process',
|
||||
help="Processing spawning backend to use for test run",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
backend = config.option.spawn_backend
|
||||
if backend == 'mp':
|
||||
tractor._spawn.try_set_start_method('spawn')
|
||||
elif backend == 'trio_run_in_process':
|
||||
tractor._spawn.try_set_start_method(backend)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
|
@ -32,16 +48,21 @@ def arb_addr():
|
|||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
spawn_backend = metafunc.config.getoption("spawn_backend")
|
||||
assert spawn_backend in ('mp', 'trio_run_in_process')
|
||||
|
||||
if 'start_method' in metafunc.fixturenames:
|
||||
if spawn_backend == 'mp':
|
||||
from multiprocessing import get_all_start_methods
|
||||
methods = get_all_start_methods()
|
||||
if 'fork' in methods: # fork not available on windows, so check before removing
|
||||
# XXX: the fork method is in general incompatible with
|
||||
# trio's global scheduler state
|
||||
methods.remove('fork')
|
||||
elif spawn_backend == 'trio_run_in_process':
|
||||
if platform.system() == "Windows":
|
||||
pytest.fail("Only `--spawn-backend=mp` is supported on Windows")
|
||||
|
||||
from multiprocessing import get_all_start_methods
|
||||
methods = get_all_start_methods()
|
||||
|
||||
if platform.system() != "Windows":
|
||||
methods = ['trio_run_in_process']
|
||||
|
||||
if 'fork' in methods: # fork not available on windows, so check before removing
|
||||
# XXX: the fork method is in general incompatible with
|
||||
# trio's global scheduler state
|
||||
methods.remove('fork')
|
||||
metafunc.parametrize("start_method", methods, scope='module')
|
||||
|
|
|
@ -284,21 +284,21 @@ async def spawn_and_error(breadth, depth) -> None:
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_nested_multierrors(loglevel):
|
||||
async def test_nested_multierrors(loglevel, start_method):
|
||||
"""Test that failed actor sets are wrapped in `trio.MultiError`s.
|
||||
This test goes only 2 nurseries deep but we should eventually have tests
|
||||
for arbitrary n-depth actor trees.
|
||||
"""
|
||||
# if start_method == 'trio_run_in_process':
|
||||
depth = 2
|
||||
subactor_breadth = 3
|
||||
# else:
|
||||
# # XXX: multiprocessing can't seem to handle any more then 2 depth
|
||||
# # process trees for whatever reason.
|
||||
# # Any more process levels then this and we see bugs that cause
|
||||
# # hangs and broken pipes all over the place...
|
||||
# depth = 1
|
||||
# subactor_breadth = 2
|
||||
if start_method == 'trio_run_in_process':
|
||||
depth = 3
|
||||
subactor_breadth = 2
|
||||
else:
|
||||
# XXX: multiprocessing can't seem to handle any more then 2 depth
|
||||
# process trees for whatever reason.
|
||||
# Any more process levels then this and we see bugs that cause
|
||||
# hangs and broken pipes all over the place...
|
||||
depth = 2
|
||||
subactor_breadth = 2
|
||||
|
||||
with trio.fail_after(120):
|
||||
try:
|
||||
|
|
|
@ -60,7 +60,7 @@ def test_rpc_errors(arb_addr, to_call, testdir):
|
|||
if exposed_mods == ['tmp_mod']:
|
||||
# create an importable module with a bad import
|
||||
testdir.syspathinsert()
|
||||
# module should cause a raise of a ModuleNotFoundError at import
|
||||
# module should raise a ModuleNotFoundError at import
|
||||
testdir.makefile('.py', tmp_mod=funcname)
|
||||
|
||||
# no need to exposed module to the subactor
|
||||
|
|
|
@ -19,6 +19,7 @@ def tractor_test(fn):
|
|||
|
||||
- ``arb_addr`` (a socket addr tuple where arbiter is listening)
|
||||
- ``loglevel`` (logging level passed to tractor internals)
|
||||
- ``start_method`` (subprocess spawning backend)
|
||||
|
||||
are defined in the `pytest` fixture space they will be automatically
|
||||
injected to tests declaring these funcargs.
|
||||
|
@ -41,8 +42,7 @@ def tractor_test(fn):
|
|||
# that activates the internal logging
|
||||
kwargs['loglevel'] = loglevel
|
||||
if 'start_method' in inspect.signature(fn).parameters:
|
||||
# allows test suites to define a 'loglevel' fixture
|
||||
# that activates the internal logging
|
||||
# set of subprocess spawning backends
|
||||
kwargs['start_method'] = start_method
|
||||
return run(
|
||||
partial(fn, *args, **kwargs),
|
||||
|
|
Loading…
Reference in New Issue