Use support.sleeping_retry() and support.busy_retry() (#93848)

* Replace time.sleep(0.010) with sleeping_retry() to
  use an exponential sleep.
* support.wait_process(): reuse sleeping_retry().
* _test_eintr: remove unused variables.
This commit is contained in:
Victor Stinner 2022-06-15 14:09:56 +02:00 committed by GitHub
parent bddbd80cff
commit 0ba80273f2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 51 additions and 58 deletions

View file

@ -403,11 +403,9 @@ class SignalEINTRTest(EINTRBaseTest):
old_mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
self.addCleanup(signal.pthread_sigmask, signal.SIG_UNBLOCK, [signum])
t0 = time.monotonic()
proc = self.subprocess(code)
with kill_on_error(proc):
wait_func(signum)
dt = time.monotonic() - t0
self.assertEqual(proc.wait(), 0)
@ -497,16 +495,18 @@ class FNTLEINTRTest(EINTRBaseTest):
proc = self.subprocess(code)
with kill_on_error(proc):
with open(os_helper.TESTFN, 'wb') as f:
while True: # synchronize the subprocess
dt = time.monotonic() - start_time
if dt > 60.0:
raise Exception("failed to sync child in %.1f sec" % dt)
# synchronize the subprocess
start_time = time.monotonic()
for _ in support.sleeping_retry(60.0, error=False):
try:
lock_func(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
lock_func(f, fcntl.LOCK_UN)
time.sleep(0.01)
except BlockingIOError:
break
else:
dt = time.monotonic() - start_time
raise Exception("failed to sync child in %.1f sec" % dt)
# the child locked the file just a moment ago for 'sleep_time' seconds
# that means that the lock below will block for 'sleep_time' minus some
# potential context switch delay

View file

@ -28,16 +28,15 @@ class InterProcessSignalTests(unittest.TestCase):
# (if set)
child.wait()
timeout = support.SHORT_TIMEOUT
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
start_time = time.monotonic()
for _ in support.busy_retry(support.SHORT_TIMEOUT, error=False):
if self.got_signals[signame]:
return
signal.pause()
self.fail('signal %s not received after %s seconds'
% (signame, timeout))
else:
dt = time.monotonic() - start_time
self.fail('signal %s not received after %.1f seconds'
% (signame, dt))
def subprocess_send_signal(self, pid, signame):
code = 'import os, signal; os.kill(%s, signal.%s)' % (pid, signame)

View file

@ -2072,31 +2072,26 @@ def wait_process(pid, *, exitcode, timeout=None):
if timeout is None:
timeout = SHORT_TIMEOUT
t0 = time.monotonic()
sleep = 0.001
max_sleep = 0.1
while True:
start_time = time.monotonic()
for _ in sleeping_retry(timeout, error=False):
pid2, status = os.waitpid(pid, os.WNOHANG)
if pid2 != 0:
break
# process is still running
# rety: the process is still running
else:
try:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
except OSError:
# Ignore errors like ChildProcessError or PermissionError
pass
dt = time.monotonic() - t0
if dt > SHORT_TIMEOUT:
try:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
except OSError:
# Ignore errors like ChildProcessError or PermissionError
pass
raise AssertionError(f"process {pid} is still running "
f"after {dt:.1f} seconds")
sleep = min(sleep * 2, max_sleep)
time.sleep(sleep)
dt = time.monotonic() - start_time
raise AssertionError(f"process {pid} is still running "
f"after {dt:.1f} seconds")
else:
# Windows implementation
# Windows implementation: don't support timeout :-(
pid2, status = os.waitpid(pid, 0)
exitcode2 = os.waitstatus_to_exitcode(status)

View file

@ -88,19 +88,17 @@ def wait_threads_exit(timeout=None):
yield
finally:
start_time = time.monotonic()
deadline = start_time + timeout
while True:
for _ in support.sleeping_retry(timeout, error=False):
support.gc_collect()
count = _thread._count()
if count <= old_count:
break
if time.monotonic() > deadline:
dt = time.monotonic() - start_time
msg = (f"wait_threads() failed to cleanup {count - old_count} "
f"threads after {dt:.1f} seconds "
f"(count: {count}, old count: {old_count})")
raise AssertionError(msg)
time.sleep(0.010)
support.gc_collect()
else:
dt = time.monotonic() - start_time
msg = (f"wait_threads() failed to cleanup {count - old_count} "
f"threads after {dt:.1f} seconds "
f"(count: {count}, old count: {old_count})")
raise AssertionError(msg)
def join_thread(thread, timeout=None):

View file

@ -109,13 +109,12 @@ def run_briefly(loop):
def run_until(loop, pred, timeout=support.SHORT_TIMEOUT):
deadline = time.monotonic() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.monotonic()
if timeout <= 0:
raise futures.TimeoutError()
for _ in support.busy_retry(timeout, error=False):
if pred():
break
loop.run_until_complete(tasks.sleep(0.001))
else:
raise futures.TimeoutError()
def run_once(loop):

View file

@ -76,8 +76,7 @@ def capture_server(evt, buf, serv):
pass
else:
n = 200
start = time.monotonic()
while n > 0 and time.monotonic() - start < 3.0:
for _ in support.busy_retry(3.0, error=False):
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
@ -86,6 +85,8 @@ def capture_server(evt, buf, serv):
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
if n <= 0:
break
time.sleep(0.01)
conn.close()

View file

@ -3602,7 +3602,6 @@ class ConfigDictTest(BaseTest):
if lspec is not None:
cd['handlers']['ah']['listener'] = lspec
qh = None
delay = 0.01
try:
self.apply_config(cd)
qh = logging.getHandlerByName('ah')
@ -3612,12 +3611,14 @@ class ConfigDictTest(BaseTest):
logging.debug('foo')
logging.info('bar')
logging.warning('baz')
# Need to let the listener thread finish its work
deadline = time.monotonic() + support.LONG_TIMEOUT
while not qh.listener.queue.empty():
time.sleep(delay)
if time.monotonic() > deadline:
self.fail("queue not empty")
while support.sleeping_retry(support.LONG_TIMEOUT, error=False):
if qh.listener.queue.empty():
break
else:
self.fail("queue not empty")
with open(fn, encoding='utf-8') as f:
data = f.read().splitlines()
self.assertEqual(data, ['foo', 'bar', 'baz'])