updates
This commit is contained in:
@@ -1,15 +1,14 @@
|
||||
# mypy: allow-untyped-defs
|
||||
"""Support for skip/xfail functions and markers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Generator
|
||||
from collections.abc import Mapping
|
||||
import dataclasses
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import traceback
|
||||
from collections.abc import Mapping
|
||||
from typing import Generator
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Type
|
||||
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import hookimpl
|
||||
@@ -19,9 +18,7 @@ from _pytest.nodes import Item
|
||||
from _pytest.outcomes import fail
|
||||
from _pytest.outcomes import skip
|
||||
from _pytest.outcomes import xfail
|
||||
from _pytest.raises import AbstractRaises
|
||||
from _pytest.reports import BaseReport
|
||||
from _pytest.reports import TestReport
|
||||
from _pytest.runner import CallInfo
|
||||
from _pytest.stash import StashKey
|
||||
|
||||
@@ -37,13 +34,11 @@ def pytest_addoption(parser: Parser) -> None:
|
||||
)
|
||||
|
||||
parser.addini(
|
||||
"strict_xfail",
|
||||
"xfail_strict",
|
||||
"Default for the strict parameter of xfail "
|
||||
"markers when not given explicitly (default: False) (alias: xfail_strict)",
|
||||
"markers when not given explicitly (default: False)",
|
||||
default=False,
|
||||
type="bool",
|
||||
# None => fallback to `strict`.
|
||||
default=None,
|
||||
aliases=["xfail_strict"],
|
||||
)
|
||||
|
||||
|
||||
@@ -76,7 +71,7 @@ def pytest_configure(config: Config) -> None:
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=strict_xfail): "
|
||||
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
|
||||
"mark the test function as an expected failure if any of the conditions "
|
||||
"evaluate to True. Optionally specify a reason for better reporting "
|
||||
"and run=False if you don't even want to execute the test function. "
|
||||
@@ -86,7 +81,7 @@ def pytest_configure(config: Config) -> None:
|
||||
)
|
||||
|
||||
|
||||
def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool, str]:
|
||||
def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
|
||||
"""Evaluate a single skipif/xfail condition.
|
||||
|
||||
If an old-style string condition is given, it is eval()'d, otherwise the
|
||||
@@ -108,18 +103,20 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool,
|
||||
):
|
||||
if not isinstance(dictionary, Mapping):
|
||||
raise ValueError(
|
||||
f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}"
|
||||
"pytest_markeval_namespace() needs to return a dict, got {!r}".format(
|
||||
dictionary
|
||||
)
|
||||
)
|
||||
globals_.update(dictionary)
|
||||
if hasattr(item, "obj"):
|
||||
globals_.update(item.obj.__globals__)
|
||||
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
|
||||
try:
|
||||
filename = f"<{mark.name} condition>"
|
||||
condition_code = compile(condition, filename, "eval")
|
||||
result = eval(condition_code, globals_)
|
||||
except SyntaxError as exc:
|
||||
msglines = [
|
||||
f"Error evaluating {mark.name!r} condition",
|
||||
"Error evaluating %r condition" % mark.name,
|
||||
" " + condition,
|
||||
" " + " " * (exc.offset or 0) + "^",
|
||||
"SyntaxError: invalid syntax",
|
||||
@@ -127,7 +124,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool,
|
||||
fail("\n".join(msglines), pytrace=False)
|
||||
except Exception as exc:
|
||||
msglines = [
|
||||
f"Error evaluating {mark.name!r} condition",
|
||||
"Error evaluating %r condition" % mark.name,
|
||||
" " + condition,
|
||||
*traceback.format_exception_only(type(exc), exc),
|
||||
]
|
||||
@@ -139,7 +136,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool,
|
||||
result = bool(condition)
|
||||
except Exception as exc:
|
||||
msglines = [
|
||||
f"Error evaluating {mark.name!r} condition as a boolean",
|
||||
"Error evaluating %r condition as a boolean" % mark.name,
|
||||
*traceback.format_exception_only(type(exc), exc),
|
||||
]
|
||||
fail("\n".join(msglines), pytrace=False)
|
||||
@@ -151,7 +148,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool,
|
||||
else:
|
||||
# XXX better be checked at collection time
|
||||
msg = (
|
||||
f"Error evaluating {mark.name!r}: "
|
||||
"Error evaluating %r: " % mark.name
|
||||
+ "you need to specify reason=STRING when using booleans as conditions."
|
||||
)
|
||||
fail(msg, pytrace=False)
|
||||
@@ -166,7 +163,7 @@ class Skip:
|
||||
reason: str = "unconditional skip"
|
||||
|
||||
|
||||
def evaluate_skip_marks(item: Item) -> Skip | None:
|
||||
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
|
||||
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
|
||||
for mark in item.iter_markers(name="skipif"):
|
||||
if "condition" not in mark.kwargs:
|
||||
@@ -198,28 +195,19 @@ def evaluate_skip_marks(item: Item) -> Skip | None:
|
||||
class Xfail:
|
||||
"""The result of evaluate_xfail_marks()."""
|
||||
|
||||
__slots__ = ("raises", "reason", "run", "strict")
|
||||
__slots__ = ("reason", "run", "strict", "raises")
|
||||
|
||||
reason: str
|
||||
run: bool
|
||||
strict: bool
|
||||
raises: (
|
||||
type[BaseException]
|
||||
| tuple[type[BaseException], ...]
|
||||
| AbstractRaises[BaseException]
|
||||
| None
|
||||
)
|
||||
raises: Optional[Tuple[Type[BaseException], ...]]
|
||||
|
||||
|
||||
def evaluate_xfail_marks(item: Item) -> Xfail | None:
|
||||
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
|
||||
"""Evaluate xfail marks on item, returning Xfail if triggered."""
|
||||
for mark in item.iter_markers(name="xfail"):
|
||||
run = mark.kwargs.get("run", True)
|
||||
strict = mark.kwargs.get("strict")
|
||||
if strict is None:
|
||||
strict = item.config.getini("strict_xfail")
|
||||
if strict is None:
|
||||
strict = item.config.getini("strict")
|
||||
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
|
||||
raises = mark.kwargs.get("raises", None)
|
||||
if "condition" not in mark.kwargs:
|
||||
conditions = mark.args
|
||||
@@ -241,7 +229,7 @@ def evaluate_xfail_marks(item: Item) -> Xfail | None:
|
||||
|
||||
|
||||
# Saves the xfail mark evaluation. Can be refreshed during call if None.
|
||||
xfailed_key = StashKey[Xfail | None]()
|
||||
xfailed_key = StashKey[Optional[Xfail]]()
|
||||
|
||||
|
||||
@hookimpl(tryfirst=True)
|
||||
@@ -255,8 +243,8 @@ def pytest_runtest_setup(item: Item) -> None:
|
||||
xfail("[NOTRUN] " + xfailed.reason)
|
||||
|
||||
|
||||
@hookimpl(wrapper=True)
|
||||
def pytest_runtest_call(item: Item) -> Generator[None]:
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
|
||||
xfailed = item.stash.get(xfailed_key, None)
|
||||
if xfailed is None:
|
||||
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
@@ -264,44 +252,33 @@ def pytest_runtest_call(item: Item) -> Generator[None]:
|
||||
if xfailed and not item.config.option.runxfail and not xfailed.run:
|
||||
xfail("[NOTRUN] " + xfailed.reason)
|
||||
|
||||
try:
|
||||
return (yield)
|
||||
finally:
|
||||
# The test run may have added an xfail mark dynamically.
|
||||
xfailed = item.stash.get(xfailed_key, None)
|
||||
if xfailed is None:
|
||||
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
yield
|
||||
|
||||
# The test run may have added an xfail mark dynamically.
|
||||
xfailed = item.stash.get(xfailed_key, None)
|
||||
if xfailed is None:
|
||||
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
|
||||
|
||||
@hookimpl(wrapper=True)
|
||||
def pytest_runtest_makereport(
|
||||
item: Item, call: CallInfo[None]
|
||||
) -> Generator[None, TestReport, TestReport]:
|
||||
rep = yield
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
|
||||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
xfailed = item.stash.get(xfailed_key, None)
|
||||
if item.config.option.runxfail:
|
||||
pass # don't interfere
|
||||
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
|
||||
assert call.excinfo.value.msg is not None
|
||||
rep.wasxfail = call.excinfo.value.msg
|
||||
rep.wasxfail = "reason: " + call.excinfo.value.msg
|
||||
rep.outcome = "skipped"
|
||||
elif not rep.skipped and xfailed:
|
||||
if call.excinfo:
|
||||
raises = xfailed.raises
|
||||
if raises is None or (
|
||||
(
|
||||
isinstance(raises, type | tuple)
|
||||
and isinstance(call.excinfo.value, raises)
|
||||
)
|
||||
or (
|
||||
isinstance(raises, AbstractRaises)
|
||||
and raises.matches(call.excinfo.value)
|
||||
)
|
||||
):
|
||||
if raises is not None and not isinstance(call.excinfo.value, raises):
|
||||
rep.outcome = "failed"
|
||||
else:
|
||||
rep.outcome = "skipped"
|
||||
rep.wasxfail = xfailed.reason
|
||||
else:
|
||||
rep.outcome = "failed"
|
||||
elif call.when == "call":
|
||||
if xfailed.strict:
|
||||
rep.outcome = "failed"
|
||||
@@ -309,10 +286,9 @@ def pytest_runtest_makereport(
|
||||
else:
|
||||
rep.outcome = "passed"
|
||||
rep.wasxfail = xfailed.reason
|
||||
return rep
|
||||
|
||||
|
||||
def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None:
|
||||
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
|
||||
if hasattr(report, "wasxfail"):
|
||||
if report.skipped:
|
||||
return "xfailed", "x", "XFAIL"
|
||||
|
||||
Reference in New Issue
Block a user