forked from goodboy/tractor
1
0
Fork 0

Add timeouts around some context test bodies

Since with my in-index runtime-port to our native msg-spec it seems
these ones are hanging B(

- `test_one_end_stream_not_opened()`
- `test_maybe_allow_overruns_stream()`

Tossing in some `trio.fail_after()`s seems to at least gnab them as
failures B)
msg_codecs
Tyler Goodlet 2024-04-02 13:33:06 -04:00
parent 3aa964315a
commit f2ce4a3469
1 changed files with 74 additions and 60 deletions

View File

@ -6,6 +6,7 @@ sync-opening a ``tractor.Context`` beforehand.
''' '''
from itertools import count from itertools import count
import math
import platform import platform
from pprint import pformat from pprint import pformat
from typing import ( from typing import (
@ -845,7 +846,10 @@ async def keep_sending_from_callee(
('caller', 1, never_open_stream), ('caller', 1, never_open_stream),
('callee', 0, keep_sending_from_callee), ('callee', 0, keep_sending_from_callee),
], ],
ids='overrun_condition={}'.format, ids=[
('caller_1buf_never_open_stream'),
('callee_0buf_keep_sending_from_callee'),
]
) )
def test_one_end_stream_not_opened( def test_one_end_stream_not_opened(
overrun_by: tuple[str, int, Callable], overrun_by: tuple[str, int, Callable],
@ -869,6 +873,7 @@ def test_one_end_stream_not_opened(
enable_modules=[__name__], enable_modules=[__name__],
) )
with trio.fail_after(0.8):
async with portal.open_context( async with portal.open_context(
entrypoint, entrypoint,
) as (ctx, sent): ) as (ctx, sent):
@ -1055,7 +1060,17 @@ def test_maybe_allow_overruns_stream(
loglevel=loglevel, loglevel=loglevel,
debug_mode=debug_mode, debug_mode=debug_mode,
) )
seq = list(range(10))
# stream-sequence batch info with send delay to determine
# approx timeout determining whether test has hung.
total_batches: int = 2
num_items: int = 10
seq = list(range(num_items))
parent_send_delay: float = 0.16
timeout: float = math.ceil(
total_batches * num_items * parent_send_delay
)
with trio.fail_after(timeout):
async with portal.open_context( async with portal.open_context(
echo_back_sequence, echo_back_sequence,
seq=seq, seq=seq,
@ -1071,7 +1086,6 @@ def test_maybe_allow_overruns_stream(
allow_overruns=(allow_overruns_side in {'parent', 'both'}), allow_overruns=(allow_overruns_side in {'parent', 'both'}),
) as stream: ) as stream:
total_batches: int = 2
for _ in range(total_batches): for _ in range(total_batches):
for msg in seq: for msg in seq:
# print(f'root tx {msg}') # print(f'root tx {msg}')
@ -1080,7 +1094,7 @@ def test_maybe_allow_overruns_stream(
# NOTE: we make the parent slightly # NOTE: we make the parent slightly
# slower, when it is slow, to make sure # slower, when it is slow, to make sure
# that in the overruns everywhere case # that in the overruns everywhere case
await trio.sleep(0.16) await trio.sleep(parent_send_delay)
batch = [] batch = []
async for msg in stream: async for msg in stream: