diff --git a/.codecov.yml b/.codecov.yml index b66d33a41d..fd577eb919 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -21,9 +21,13 @@ comment: coverage: # required range - range: 99.6..100 + precision: 5 + round: down + range: 100..100 status: - # require patches to be 100% - patch: + project: default: target: 100% + patch: + default: + target: 100% # require patches to be 100% diff --git a/newsfragments/3159.misc.rst b/newsfragments/3159.misc.rst new file mode 100644 index 0000000000..9460e11c65 --- /dev/null +++ b/newsfragments/3159.misc.rst @@ -0,0 +1 @@ +Get and enforce 100% coverage diff --git a/pyproject.toml b/pyproject.toml index ac8e8fc41d..bbc865ac6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -308,18 +308,16 @@ precision = 1 skip_covered = true skip_empty = true show_missing = true -exclude_lines = [ - "pragma: no cover", - "abc.abstractmethod", - "if TYPE_CHECKING.*:", - "if _t.TYPE_CHECKING:", - "if t.TYPE_CHECKING:", - "@overload", - 'class .*\bProtocol\b.*\):', - "raise NotImplementedError", -] exclude_also = [ '^\s*@pytest\.mark\.xfail', + "abc.abstractmethod", + "if TYPE_CHECKING.*:", + "if _t.TYPE_CHECKING:", + "if t.TYPE_CHECKING:", + "@overload", + 'class .*\bProtocol\b.*\):', + "raise NotImplementedError", + 'TODO: test this line' ] partial_branches = [ "pragma: no branch", @@ -329,4 +327,5 @@ partial_branches = [ "if .* or not TYPE_CHECKING:", "if .* or not _t.TYPE_CHECKING:", "if .* or not t.TYPE_CHECKING:", + 'TODO: test this branch', ] diff --git a/src/trio/_core/_io_kqueue.py b/src/trio/_core/_io_kqueue.py index 6c440920d3..9718c4df80 100644 --- a/src/trio/_core/_io_kqueue.py +++ b/src/trio/_core/_io_kqueue.py @@ -81,7 +81,7 @@ def get_events(self, timeout: float) -> EventResult: events += batch if len(batch) < max_events: break - else: + else: # TODO: test this line timeout = 0 # and loop back to the start return events @@ -93,12 +93,12 @@ def process_events(self, events: EventResult) -> None: self._force_wakeup.drain() continue receiver = self._registered[key] - if event.flags & select.KQ_EV_ONESHOT: + if event.flags & select.KQ_EV_ONESHOT: # TODO: test this branch del self._registered[key] if isinstance(receiver, _core.Task): _core.reschedule(receiver, outcome.Value(event)) else: - receiver.put_nowait(event) + receiver.put_nowait(event) # TODO: test this line # kevent registration is complicated -- e.g. aio submission can # implicitly perform a EV_ADD, and EVFILT_PROC with NOTE_TRACK will @@ -162,7 +162,7 @@ async def wait_kevent( def abort(raise_cancel: RaiseCancelT) -> Abort: r = abort_func(raise_cancel) - if r is _core.Abort.SUCCEEDED: + if r is _core.Abort.SUCCEEDED: # TODO: test this branch del self._registered[key] return r diff --git a/src/trio/_core/_io_windows.py b/src/trio/_core/_io_windows.py index 55c4e49e7d..148253ab88 100644 --- a/src/trio/_core/_io_windows.py +++ b/src/trio/_core/_io_windows.py @@ -856,9 +856,9 @@ async def wait_overlapped( `__. """ handle = _handle(handle_) - if isinstance(lpOverlapped, int): + if isinstance(lpOverlapped, int): # TODO: test this line lpOverlapped = ffi.cast("LPOVERLAPPED", lpOverlapped) - if lpOverlapped in self._overlapped_waiters: + if lpOverlapped in self._overlapped_waiters: # TODO: test this line raise _core.BusyResourceError( "another task is already waiting on that lpOverlapped", ) diff --git a/src/trio/_core/_run.py b/src/trio/_core/_run.py index bfb38f480f..5dbaa18cab 100644 --- a/src/trio/_core/_run.py +++ b/src/trio/_core/_run.py @@ -115,7 +115,8 @@ def _public(fn: RetT) -> RetT: _r = random.Random() -def _hypothesis_plugin_setup() -> None: +# no cover because we don't check the hypothesis plugin works with hypothesis +def _hypothesis_plugin_setup() -> None: # pragma: no cover from hypothesis import register_random global _ALLOW_DETERMINISTIC_SCHEDULING diff --git a/src/trio/_core/_tests/test_io.py b/src/trio/_core/_tests/test_io.py index 48b99d387d..379daa025e 100644 --- a/src/trio/_core/_tests/test_io.py +++ b/src/trio/_core/_tests/test_io.py @@ -1,7 +1,9 @@ from __future__ import annotations import random +import select import socket as stdlib_socket +import sys from collections.abc import Awaitable, Callable from contextlib import suppress from typing import TYPE_CHECKING, TypeVar @@ -343,6 +345,7 @@ def check(*, expected_readers: int, expected_writers: int) -> None: assert iostats.tasks_waiting_write == expected_writers else: assert iostats.backend == "kqueue" + assert iostats.monitors == 0 assert iostats.tasks_waiting == expected_readers + expected_writers a1, b1 = stdlib_socket.socketpair() @@ -381,6 +384,44 @@ def check(*, expected_readers: int, expected_writers: int) -> None: check(expected_readers=1, expected_writers=0) +@pytest.mark.filterwarnings("ignore:.*UnboundedQueue:trio.TrioDeprecationWarning") +async def test_io_manager_kqueue_monitors_statistics() -> None: + def check( + *, + expected_monitors: int, + expected_readers: int, + expected_writers: int, + ) -> None: + statistics = _core.current_statistics() + print(statistics) + iostats = statistics.io_statistics + assert iostats.backend == "kqueue" + assert iostats.monitors == expected_monitors + assert iostats.tasks_waiting == expected_readers + expected_writers + + a1, b1 = stdlib_socket.socketpair() + for sock in [a1, b1]: + sock.setblocking(False) + + with a1, b1: + # let the call_soon_task settle down + await wait_all_tasks_blocked() + + if sys.platform != "win32" and sys.platform != "linux": + # 1 for call_soon_task + check(expected_monitors=0, expected_readers=1, expected_writers=0) + + with _core.monitor_kevent(a1.fileno(), select.KQ_FILTER_READ): + with ( + pytest.raises(_core.BusyResourceError), + _core.monitor_kevent(a1.fileno(), select.KQ_FILTER_READ), + ): + pass # pragma: no cover + check(expected_monitors=1, expected_readers=1, expected_writers=0) + + check(expected_monitors=0, expected_readers=1, expected_writers=0) + + async def test_can_survive_unnotified_close() -> None: # An "unnotified" close is when the user closes an fd/socket/handle # directly, without calling notify_closing first. This should never happen diff --git a/src/trio/_core/_tests/test_run.py b/src/trio/_core/_tests/test_run.py index f7ac155ba9..0d1cf46722 100644 --- a/src/trio/_core/_tests/test_run.py +++ b/src/trio/_core/_tests/test_run.py @@ -11,6 +11,7 @@ from contextlib import ExitStack, contextmanager, suppress from math import inf, nan from typing import TYPE_CHECKING, NoReturn, TypeVar +from unittest import mock import outcome import pytest @@ -26,7 +27,7 @@ assert_checkpoints, wait_all_tasks_blocked, ) -from .._run import DEADLINE_HEAP_MIN_PRUNE_THRESHOLD +from .._run import DEADLINE_HEAP_MIN_PRUNE_THRESHOLD, _count_context_run_tb_frames from .tutil import ( check_sequence_matches, create_asyncio_future_in_new_loop, @@ -371,6 +372,15 @@ async def test_cancel_scope_validation() -> None: match="^Cannot specify both a deadline and a relative deadline$", ): _core.CancelScope(deadline=7, relative_deadline=3) + + with pytest.raises(ValueError, match="^deadline must not be NaN$"): + _core.CancelScope(deadline=nan) + with pytest.raises(ValueError, match="^relative deadline must not be NaN$"): + _core.CancelScope(relative_deadline=nan) + + with pytest.raises(ValueError, match="^timeout must be non-negative$"): + _core.CancelScope(relative_deadline=-3) + scope = _core.CancelScope() with pytest.raises(ValueError, match="^deadline must not be NaN$"): @@ -2836,3 +2846,12 @@ async def handle_error() -> None: assert isinstance(exc, MyException) assert gc.get_referrers(exc) == no_other_refs() + + +def test_context_run_tb_frames() -> None: + class Context: + def run(self, fn: Callable[[], object]) -> object: + return fn() + + with mock.patch("trio._core._run.copy_context", return_value=Context()): + assert _count_context_run_tb_frames() == 1 diff --git a/src/trio/_dtls.py b/src/trio/_dtls.py index 7f4bccc9ed..a7709632a4 100644 --- a/src/trio/_dtls.py +++ b/src/trio/_dtls.py @@ -58,7 +58,7 @@ def worst_case_mtu(sock: SocketType) -> int: if sock.family == trio.socket.AF_INET: return 576 - packet_header_overhead(sock) else: - return 1280 - packet_header_overhead(sock) + return 1280 - packet_header_overhead(sock) # TODO: test this line def best_guess_mtu(sock: SocketType) -> int: @@ -222,7 +222,7 @@ def decode_handshake_fragment_untrusted(payload: bytes) -> HandshakeFragment: frag_offset_bytes, frag_len_bytes, ) = HANDSHAKE_MESSAGE_HEADER.unpack_from(payload) - except struct.error as exc: + except struct.error as exc: # TODO: test this line raise BadPacket("bad handshake message header") from exc # 'struct' doesn't have built-in support for 24-bit integers, so we # have to do it by hand. These can't fail. @@ -425,14 +425,14 @@ def encode_volley( for message in messages: if isinstance(message, OpaqueHandshakeMessage): encoded = encode_record(message.record) - if mtu - len(packet) - len(encoded) <= 0: + if mtu - len(packet) - len(encoded) <= 0: # TODO: test this line packets.append(packet) packet = bytearray() packet += encoded assert len(packet) <= mtu elif isinstance(message, PseudoHandshakeMessage): space = mtu - len(packet) - RECORD_HEADER.size - len(message.payload) - if space <= 0: + if space <= 0: # TODO: test this line packets.append(packet) packet = bytearray() packet += RECORD_HEADER.pack( @@ -1039,7 +1039,7 @@ def read_volley() -> list[_AnyHandshakeMessage]: if ( isinstance(maybe_volley[0], PseudoHandshakeMessage) and maybe_volley[0].content_type == ContentType.alert - ): + ): # TODO: test this line # we're sending an alert (e.g. due to a corrupted # packet). We want to send it once, but don't save it to # retransmit -- keep the last volley as the current @@ -1326,9 +1326,8 @@ async def handler(dtls_channel): raise trio.BusyResourceError("another task is already listening") try: self.socket.getsockname() - except OSError: - # TODO: Write test that triggers this - raise RuntimeError( # pragma: no cover + except OSError: # TODO: test this line + raise RuntimeError( "DTLS socket must be bound before it can serve", ) from None self._ensure_receive_loop() diff --git a/src/trio/_tests/test_dtls.py b/src/trio/_tests/test_dtls.py index 3f8ee2f05c..141e891586 100644 --- a/src/trio/_tests/test_dtls.py +++ b/src/trio/_tests/test_dtls.py @@ -75,7 +75,9 @@ async def echo_handler(dtls_channel: DTLSChannel) -> None: print("server starting do_handshake") await dtls_channel.do_handshake() print("server finished do_handshake") - async for packet in dtls_channel: + # no branch for leaving this for loop because we only leave + # a channel by cancellation. + async for packet in dtls_channel: # pragma: no branch print(f"echoing {packet!r} -> {dtls_channel.peer_address!r}") await dtls_channel.send(packet) except trio.BrokenResourceError: # pragma: no cover diff --git a/src/trio/_tests/test_exports.py b/src/trio/_tests/test_exports.py index 7d8a7e3c0b..f89d4105e6 100644 --- a/src/trio/_tests/test_exports.py +++ b/src/trio/_tests/test_exports.py @@ -384,8 +384,10 @@ def lookup_symbol(symbol: str) -> dict[str, str]: elif tool == "mypy": # load the cached type information cached_type_info = cache_json["names"][class_name] - if "node" not in cached_type_info: - cached_type_info = lookup_symbol(cached_type_info["cross_ref"]) + assert ( + "node" not in cached_type_info + ), "previously this was an 'if' but it seems it's no longer possible for this cache to contain 'node', if this assert raises for you please let us know!" + cached_type_info = lookup_symbol(cached_type_info["cross_ref"]) assert "node" in cached_type_info node = cached_type_info["node"] diff --git a/src/trio/_tests/test_socket.py b/src/trio/_tests/test_socket.py index ebe94a6ca6..3e960bd9a4 100644 --- a/src/trio/_tests/test_socket.py +++ b/src/trio/_tests/test_socket.py @@ -376,9 +376,7 @@ async def test_sniff_sockopts() -> None: from socket import AF_INET, AF_INET6, SOCK_DGRAM, SOCK_STREAM # generate the combinations of families/types we're testing: - families = [AF_INET] - if can_create_ipv6: - families.append(AF_INET6) + families = (AF_INET, AF_INET6) if can_create_ipv6 else (AF_INET,) sockets = [ stdlib_socket.socket(family, type_) for family in families diff --git a/src/trio/_tests/test_ssl.py b/src/trio/_tests/test_ssl.py index 2a16a0cd13..d271743c7a 100644 --- a/src/trio/_tests/test_ssl.py +++ b/src/trio/_tests/test_ssl.py @@ -210,27 +210,11 @@ def __init__( # we still have to support versions before that, and that means we # need to test renegotiation support, which means we need to force this # to use a lower version where this test server can trigger - # renegotiations. Of course TLS 1.3 support isn't released yet, but - # I'm told that this will work once it is. (And once it is we can - # remove the pragma: no cover too.) Alternatively, we could switch to - # using TLSv1_2_METHOD. - # - # Discussion: https://github.com/pyca/pyopenssl/issues/624 - - # This is the right way, but we can't use it until this PR is in a - # released: - # https://github.com/pyca/pyopenssl/pull/861 - # - # if hasattr(SSL, "OP_NO_TLSv1_3"): - # ctx.set_options(SSL.OP_NO_TLSv1_3) - # - # Fortunately pyopenssl uses cryptography under the hood, so we can be - # confident that they're using the same version of openssl + # renegotiations. from cryptography.hazmat.bindings.openssl.binding import Binding b = Binding() - if hasattr(b.lib, "SSL_OP_NO_TLSv1_3"): - ctx.set_options(b.lib.SSL_OP_NO_TLSv1_3) + ctx.set_options(b.lib.SSL_OP_NO_TLSv1_3) # Unfortunately there's currently no way to say "use 1.3 or worse", we # can only disable specific versions. And if the two sides start diff --git a/src/trio/_tests/test_subprocess.py b/src/trio/_tests/test_subprocess.py index 88623a4304..bf6742064d 100644 --- a/src/trio/_tests/test_subprocess.py +++ b/src/trio/_tests/test_subprocess.py @@ -16,6 +16,7 @@ Any, NoReturn, ) +from unittest import mock import pytest @@ -81,13 +82,6 @@ def SLEEP(seconds: int) -> list[str]: return python(f"import time; time.sleep({seconds})") -def got_signal(proc: Process, sig: SignalType) -> bool: - if (not TYPE_CHECKING and posix) or sys.platform != "win32": - return proc.returncode == -sig - else: - return proc.returncode != 0 - - @asynccontextmanager # type: ignore[misc] # Any in decorated async def open_process_then_kill( *args: Any, @@ -146,6 +140,26 @@ async def test_basic(background_process: BackgroundProcessType) -> None: ) +@background_process_param +async def test_basic_no_pidfd(background_process: BackgroundProcessType) -> None: + with mock.patch("trio._subprocess.can_try_pidfd_open", new=False): + async with background_process(EXIT_TRUE) as proc: + assert proc._pidfd is None + await proc.wait() + assert isinstance(proc, Process) + assert proc._pidfd is None + assert proc.returncode == 0 + assert repr(proc) == f"" + + async with background_process(EXIT_FALSE) as proc: + await proc.wait() + assert proc.returncode == 1 + assert repr(proc) == "".format( + EXIT_FALSE, + "exited with status 1", + ) + + @background_process_param async def test_auto_update_returncode( background_process: BackgroundProcessType, @@ -181,6 +195,27 @@ async def test_multi_wait(background_process: BackgroundProcessType) -> None: proc.kill() +@background_process_param +async def test_multi_wait_no_pidfd(background_process: BackgroundProcessType) -> None: + with mock.patch("trio._subprocess.can_try_pidfd_open", new=False): + async with background_process(SLEEP(10)) as proc: + # Check that wait (including multi-wait) tolerates being cancelled + async with _core.open_nursery() as nursery: + nursery.start_soon(proc.wait) + nursery.start_soon(proc.wait) + nursery.start_soon(proc.wait) + await wait_all_tasks_blocked() + nursery.cancel_scope.cancel() + + # Now try waiting for real + async with _core.open_nursery() as nursery: + nursery.start_soon(proc.wait) + nursery.start_soon(proc.wait) + nursery.start_soon(proc.wait) + await wait_all_tasks_blocked() + proc.kill() + + COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR = python( "data = sys.stdin.buffer.read(); " "sys.stdout.buffer.write(data); " @@ -524,6 +559,31 @@ async def test_wait_reapable_fails(background_process: BackgroundProcessType) -> signal.signal(signal.SIGCHLD, old_sigchld) +@pytest.mark.skipif(not posix, reason="POSIX specific") +@background_process_param +async def test_wait_reapable_fails_no_pidfd( + background_process: BackgroundProcessType, +) -> None: + if TYPE_CHECKING and sys.platform == "win32": + return + with mock.patch("trio._subprocess.can_try_pidfd_open", new=False): + old_sigchld = signal.signal(signal.SIGCHLD, signal.SIG_IGN) + try: + # With SIGCHLD disabled, the wait() syscall will wait for the + # process to exit but then fail with ECHILD. Make sure we + # support this case as the stdlib subprocess module does. + async with background_process(SLEEP(3600)) as proc: + async with _core.open_nursery() as nursery: + nursery.start_soon(proc.wait) + await wait_all_tasks_blocked() + proc.kill() + nursery.cancel_scope.deadline = _core.current_time() + 1.0 + assert not nursery.cancel_scope.cancelled_caught + assert proc.returncode == 0 # exit status unknowable, so... + finally: + signal.signal(signal.SIGCHLD, old_sigchld) + + @slow def test_waitid_eintr() -> None: # This only matters on PyPy (where we're coding EINTR handling diff --git a/src/trio/_tests/test_timeouts.py b/src/trio/_tests/test_timeouts.py index bad439530c..052520b2d9 100644 --- a/src/trio/_tests/test_timeouts.py +++ b/src/trio/_tests/test_timeouts.py @@ -115,7 +115,7 @@ async def test_context_shields_from_outer(scope: TimeoutScope) -> None: outer.cancel() try: await trio.lowlevel.checkpoint() - except trio.Cancelled: + except trio.Cancelled: # pragma: no cover pytest.fail("shield didn't work") inner.shield = False with pytest.raises(trio.Cancelled): diff --git a/src/trio/_tools/gen_exports.py b/src/trio/_tools/gen_exports.py index b4db597b63..5b1affe24a 100755 --- a/src/trio/_tools/gen_exports.py +++ b/src/trio/_tools/gen_exports.py @@ -180,22 +180,13 @@ def run_linters(file: File, source: str) -> str: SystemExit: If either failed. """ - success, response = run_black(file, source) - if not success: - print(response) - sys.exit(1) - - success, response = run_ruff(file, response) - if not success: # pragma: no cover # Test for run_ruff should catch - print(response) - sys.exit(1) - - success, response = run_black(file, response) - if not success: - print(response) - sys.exit(1) + for fn in (run_black, run_ruff): + success, source = fn(file, source) + if not success: + print(source) + sys.exit(1) - return response + return source def gen_public_wrappers_source(file: File) -> str: @@ -204,9 +195,7 @@ def gen_public_wrappers_source(file: File) -> str: """ header = [HEADER] - - if file.imports: - header.append(file.imports) + header.append(file.imports) if file.platform: # Simple checks to avoid repeating imports. If this messes up, type checkers/tests will # just give errors. @@ -304,7 +293,7 @@ def process(files: Iterable[File], *, do_test: bool) -> None: with open(new_path, "w", encoding="utf-8", newline="\n") as fp: fp.write(new_source) print("Regenerated sources successfully.") - if not matches_disk: + if not matches_disk: # TODO: test this branch # With pre-commit integration, show that we edited files. sys.exit(1)