105 lines
3.3 KiB
Python
105 lines
3.3 KiB
Python
import outcome
|
|
import pytest
|
|
import time
|
|
|
|
from .._core.tests.tutil import slow
|
|
from .. import _core
|
|
from ..testing import assert_checkpoints
|
|
from .._timeouts import *
|
|
|
|
|
|
async def check_takes_about(f, expected_dur):
|
|
start = time.perf_counter()
|
|
result = await outcome.acapture(f)
|
|
dur = time.perf_counter() - start
|
|
print(dur / expected_dur)
|
|
# 1.5 is an arbitrary fudge factor because there's always some delay
|
|
# between when we become eligible to wake up and when we actually do. We
|
|
# used to sleep for 0.05, and regularly observed overruns of 1.6x on
|
|
# Appveyor, and then started seeing overruns of 2.3x on Travis's macOS, so
|
|
# now we bumped up the sleep to 1 second, marked the tests as slow, and
|
|
# hopefully now the proportional error will be less huge.
|
|
#
|
|
# We also also for durations that are a hair shorter than expected. For
|
|
# example, here's a run on Windows where a 1.0 second sleep was measured
|
|
# to take 0.9999999999999858 seconds:
|
|
# https://ci.appveyor.com/project/njsmith/trio/build/1.0.768/job/3lbdyxl63q3h9s21
|
|
# I believe that what happened here is that Windows's low clock resolution
|
|
# meant that our calls to time.monotonic() returned exactly the same
|
|
# values as the calls inside the actual run loop, but the two subtractions
|
|
# returned slightly different values because the run loop's clock adds a
|
|
# random floating point offset to both times, which should cancel out, but
|
|
# lol floating point we got slightly different rounding errors. (That
|
|
# value above is exactly 128 ULPs below 1.0, which would make sense if it
|
|
# started as a 1 ULP error at a different dynamic range.)
|
|
assert (1 - 1e-8) <= (dur / expected_dur) < 1.5
|
|
return result.unwrap()
|
|
|
|
|
|
# How long to (attempt to) sleep for when testing. Smaller numbers make the
|
|
# test suite go faster.
|
|
TARGET = 1.0
|
|
|
|
|
|
@slow
|
|
async def test_sleep():
|
|
async def sleep_1():
|
|
await sleep_until(_core.current_time() + TARGET)
|
|
|
|
await check_takes_about(sleep_1, TARGET)
|
|
|
|
async def sleep_2():
|
|
await sleep(TARGET)
|
|
|
|
await check_takes_about(sleep_2, TARGET)
|
|
|
|
with pytest.raises(ValueError):
|
|
await sleep(-1)
|
|
|
|
with assert_checkpoints():
|
|
await sleep(0)
|
|
# This also serves as a test of the trivial move_on_at
|
|
with move_on_at(_core.current_time()):
|
|
with pytest.raises(_core.Cancelled):
|
|
await sleep(0)
|
|
|
|
|
|
@slow
|
|
async def test_move_on_after():
|
|
with pytest.raises(ValueError):
|
|
with move_on_after(-1):
|
|
pass # pragma: no cover
|
|
|
|
async def sleep_3():
|
|
with move_on_after(TARGET):
|
|
await sleep(100)
|
|
|
|
await check_takes_about(sleep_3, TARGET)
|
|
|
|
|
|
@slow
|
|
async def test_fail():
|
|
async def sleep_4():
|
|
with fail_at(_core.current_time() + TARGET):
|
|
await sleep(100)
|
|
|
|
with pytest.raises(TooSlowError):
|
|
await check_takes_about(sleep_4, TARGET)
|
|
|
|
with fail_at(_core.current_time() + 100):
|
|
await sleep(0)
|
|
|
|
async def sleep_5():
|
|
with fail_after(TARGET):
|
|
await sleep(100)
|
|
|
|
with pytest.raises(TooSlowError):
|
|
await check_takes_about(sleep_5, TARGET)
|
|
|
|
with fail_after(100):
|
|
await sleep(0)
|
|
|
|
with pytest.raises(ValueError):
|
|
with fail_after(-1):
|
|
pass # pragma: no cover
|