ci(pytest): add plugin pytest-ignore-test-results

pass with warning if test job got ignored.
This commit is contained in:
Fu Hanxi
2023-06-29 12:07:30 +08:00
parent 39f3a5f3ac
commit 5e4427ab13
5 changed files with 44 additions and 105 deletions

View File

@@ -328,7 +328,7 @@ test_pytest_qemu:
-m qemu -m qemu
--embedded-services idf,qemu --embedded-services idf,qemu
--junitxml=XUNIT_RESULT.xml --junitxml=XUNIT_RESULT.xml
--known-failure-cases-file known_failure_cases/known_failure_cases.txt --ignore-result-files known_failure_cases/known_failure_cases.txt
--app-info-filepattern \"list_job_*.txt\" --app-info-filepattern \"list_job_*.txt\"
test_pytest_linux: test_pytest_linux:
@@ -356,5 +356,5 @@ test_pytest_linux:
--target linux --target linux
-m host_test -m host_test
--junitxml=XUNIT_RESULT.xml --junitxml=XUNIT_RESULT.xml
--known-failure-cases-file known_failure_cases/known_failure_cases.txt --ignore-result-files known_failure_cases/known_failure_cases.txt
--app-info-filepattern \"list_job_*.txt\" --app-info-filepattern \"list_job_*.txt\"

View File

@@ -35,7 +35,7 @@
- run_cmd pytest $TEST_DIR - run_cmd pytest $TEST_DIR
-m \"${markers}\" -m \"${markers}\"
--junitxml=XUNIT_RESULT.xml --junitxml=XUNIT_RESULT.xml
--known-failure-cases-file known_failure_cases/known_failure_cases.txt --ignore-result-files known_failure_cases/known_failure_cases.txt
--parallel-count ${CI_NODE_TOTAL:-1} --parallel-count ${CI_NODE_TOTAL:-1}
--parallel-index ${CI_NODE_INDEX:-1} --parallel-index ${CI_NODE_INDEX:-1}
${PYTEST_EXTRA_FLAGS} ${PYTEST_EXTRA_FLAGS}

View File

@@ -251,7 +251,6 @@ def pytest_addoption(parser: pytest.Parser) -> None:
'--sdkconfig', '--sdkconfig',
help='sdkconfig postfix, like sdkconfig.ci.<config>. (Default: None, which would build all found apps)', help='sdkconfig postfix, like sdkconfig.ci.<config>. (Default: None, which would build all found apps)',
) )
idf_group.addoption('--known-failure-cases-file', help='known failure cases file path')
idf_group.addoption( idf_group.addoption(
'--dev-user', '--dev-user',
help='user name associated with some specific device/service used during the test execution', help='user name associated with some specific device/service used during the test execution',
@@ -313,7 +312,6 @@ def pytest_configure(config: Config) -> None:
config.stash[IDF_PYTEST_EMBEDDED_KEY] = IdfPytestEmbedded( config.stash[IDF_PYTEST_EMBEDDED_KEY] = IdfPytestEmbedded(
target=target, target=target,
sdkconfig=config.getoption('sdkconfig'), sdkconfig=config.getoption('sdkconfig'),
known_failure_cases_file=config.getoption('known_failure_cases_file'),
apps_list=apps_list, apps_list=apps_list,
) )
config.pluginmanager.register(config.stash[IDF_PYTEST_EMBEDDED_KEY]) config.pluginmanager.register(config.stash[IDF_PYTEST_EMBEDDED_KEY])

View File

@@ -4,18 +4,17 @@
import logging import logging
import os import os
import typing as t import typing as t
from fnmatch import fnmatch
from xml.etree import ElementTree as ET from xml.etree import ElementTree as ET
import pytest import pytest
from _pytest.config import ExitCode from _pytest.config import ExitCode
from _pytest.main import Session from _pytest.main import Session
from _pytest.python import Function from _pytest.python import Function
from _pytest.reports import TestReport
from _pytest.runner import CallInfo from _pytest.runner import CallInfo
from _pytest.terminal import TerminalReporter from pytest_embedded import Dut
from pytest_embedded.plugin import parse_multi_dut_args from pytest_embedded.plugin import parse_multi_dut_args
from pytest_embedded.utils import find_by_suffix, to_list from pytest_embedded.utils import find_by_suffix, to_list
from pytest_ignore_test_results.ignore_results import ChildCase, ChildCasesStashKey
from .constants import DEFAULT_SDKCONFIG, PREVIEW_TARGETS, SUPPORTED_TARGETS, PytestApp, PytestCase from .constants import DEFAULT_SDKCONFIG, PREVIEW_TARGETS, SUPPORTED_TARGETS, PytestApp, PytestCase
from .utils import format_case_id, merge_junit_files from .utils import format_case_id, merge_junit_files
@@ -26,55 +25,25 @@ ITEM_FAILED_KEY = pytest.StashKey[bool]()
class IdfPytestEmbedded: class IdfPytestEmbedded:
UNITY_RESULT_MAPPINGS = {
'PASS': 'passed',
'FAIL': 'failed',
'IGNORE': 'skipped',
}
def __init__( def __init__(
self, self,
target: str, target: str,
sdkconfig: t.Optional[str] = None, sdkconfig: t.Optional[str] = None,
known_failure_cases_file: t.Optional[str] = None,
apps_list: t.Optional[t.List[str]] = None, apps_list: t.Optional[t.List[str]] = None,
): ):
# CLI options to filter the test cases # CLI options to filter the test cases
self.target = target.lower() self.target = target.lower()
self.sdkconfig = sdkconfig self.sdkconfig = sdkconfig
self.known_failure_patterns = self._parse_known_failure_cases_file(known_failure_cases_file)
self.apps_list = apps_list self.apps_list = apps_list
self.cases: t.List[PytestCase] = [] self.cases: t.List[PytestCase] = []
self._failed_cases: t.List[t.Tuple[str, bool, bool]] = [] # (test_case_name, is_known_failure_cases, is_xfail)
@property
def failed_cases(self) -> t.List[str]:
return [case for case, is_known, is_xfail in self._failed_cases if not is_known and not is_xfail]
@property
def known_failure_cases(self) -> t.List[str]:
return [case for case, is_known, _ in self._failed_cases if is_known]
@property
def xfail_cases(self) -> t.List[str]:
return [case for case, _, is_xfail in self._failed_cases if is_xfail]
@staticmethod
def _parse_known_failure_cases_file(
known_failure_cases_file: t.Optional[str] = None,
) -> t.List[str]:
if not known_failure_cases_file or not os.path.isfile(known_failure_cases_file):
return []
patterns = []
with open(known_failure_cases_file) as fr:
for line in fr.readlines():
if not line:
continue
if not line.strip():
continue
without_comments = line.split('#')[0].strip()
if without_comments:
patterns.append(without_comments)
return patterns
@staticmethod @staticmethod
def get_param(item: Function, key: str, default: t.Any = None) -> t.Any: def get_param(item: Function, key: str, default: t.Any = None) -> t.Any:
# implement like this since this is a limitation of pytest, couldn't get fixture values while collecting # implement like this since this is a limitation of pytest, couldn't get fixture values while collecting
@@ -214,54 +183,46 @@ class IdfPytestEmbedded:
for item in items: for item in items:
self.cases.append(self.item_to_pytest_case(item)) self.cases.append(self.item_to_pytest_case(item))
def pytest_runtest_makereport(self, item: Function, call: CallInfo[None]) -> t.Optional[TestReport]: def pytest_custom_test_case_name(self, item: Function) -> str:
report = TestReport.from_item_and_call(item, call) return item.funcargs.get('test_case_name', item.nodeid) # type: ignore
if item.stash.get(ITEM_FAILED_KEY, None) is None:
item.stash[ITEM_FAILED_KEY] = False
if report.outcome == 'failed': def pytest_runtest_makereport(self, item: Function, call: CallInfo[None]) -> None:
# Mark the failed test cases if call.when == 'call':
# target = item.funcargs['target']
# This hook function would be called in 3 phases, setup, call, teardown. config = item.funcargs['config']
# the report.outcome is the outcome of the single call of current phase, which is independent is_qemu = item.get_closest_marker('qemu') is not None
# the call phase outcome is the test result
item.stash[ITEM_FAILED_KEY] = True
if call.when == 'teardown': dut: t.Union[Dut, t.Tuple[Dut]] = item.funcargs['dut'] # type: ignore
item_failed = item.stash[ITEM_FAILED_KEY] if isinstance(dut, (list, tuple)):
if item_failed: res = []
# unity real test cases for i, _dut in enumerate(dut):
failed_sub_cases = item.stash.get(ITEM_FAILED_CASES_KEY, []) res.extend(
if failed_sub_cases: [
for test_case_name in failed_sub_cases: ChildCase(
self._failed_cases.append((test_case_name, self._is_known_failure(test_case_name), False)) format_case_id(target, config, case.name + f' {i}', is_qemu=is_qemu),
else: # the case iteself is failing self.UNITY_RESULT_MAPPINGS[case.result],
test_case_name = item.funcargs.get('test_case_name', '')
if test_case_name:
self._failed_cases.append(
(
test_case_name,
self._is_known_failure(test_case_name),
report.keywords.get('xfail', False),
) )
for case in _dut.testsuite.testcases
]
)
item.config.stash[ChildCasesStashKey] = {item.nodeid: res}
else:
item.config.stash[ChildCasesStashKey] = {
item.nodeid: [
ChildCase(
format_case_id(target, config, case.name, is_qemu=is_qemu),
self.UNITY_RESULT_MAPPINGS[case.result],
) )
for case in dut.testsuite.testcases
return report ]
}
def _is_known_failure(self, case_id: str) -> bool:
for pattern in self.known_failure_patterns:
if case_id == pattern:
return True
if fnmatch(case_id, pattern):
return True
return False
@pytest.hookimpl(trylast=True) @pytest.hookimpl(trylast=True)
def pytest_runtest_teardown(self, item: Function) -> None: def pytest_runtest_teardown(self, item: Function) -> None:
""" """
Format the test case generated junit reports Modify the junit reports. Format the unity c test case names.
""" """
tempdir = item.funcargs.get('test_case_tempdir') tempdir: t.Optional[str] = item.funcargs.get('test_case_tempdir') # type: ignore
if not tempdir: if not tempdir:
return return
@@ -273,8 +234,8 @@ class IdfPytestEmbedded:
merge_junit_files(junits, os.path.join(tempdir, 'dut.xml')) merge_junit_files(junits, os.path.join(tempdir, 'dut.xml'))
junits = [os.path.join(tempdir, 'dut.xml')] junits = [os.path.join(tempdir, 'dut.xml')]
# unity cases
is_qemu = item.get_closest_marker('qemu') is not None is_qemu = item.get_closest_marker('qemu') is not None
failed_sub_cases = []
target = item.funcargs['target'] target = item.funcargs['target']
config = item.funcargs['config'] config = item.funcargs['config']
for junit in junits: for junit in junits:
@@ -287,30 +248,9 @@ class IdfPytestEmbedded:
if 'file' in case.attrib: if 'file' in case.attrib:
case.attrib['file'] = case.attrib['file'].replace('/IDF/', '') # our unity test framework case.attrib['file'] = case.attrib['file'].replace('/IDF/', '') # our unity test framework
# collect real failure cases
if case.find('failure') is not None:
failed_sub_cases.append(new_case_name)
xml.write(junit) xml.write(junit)
item.stash[ITEM_FAILED_CASES_KEY] = failed_sub_cases
def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None: def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None:
if exitstatus != 0: if exitstatus != 0:
if exitstatus == ExitCode.NO_TESTS_COLLECTED: if exitstatus == ExitCode.NO_TESTS_COLLECTED:
session.exitstatus = 0 session.exitstatus = 0
elif self.known_failure_cases and not self.failed_cases:
session.exitstatus = 0
def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
if self.known_failure_cases:
terminalreporter.section('Known failure cases', bold=True, yellow=True)
terminalreporter.line('\n'.join(self.known_failure_cases))
if self.xfail_cases:
terminalreporter.section('xfail cases', bold=True, yellow=True)
terminalreporter.line('\n'.join(self.xfail_cases))
if self.failed_cases:
terminalreporter.section('Failed cases', bold=True, red=True)
terminalreporter.line('\n'.join(self.failed_cases))

View File

@@ -7,6 +7,7 @@ pytest-embedded-jtag
pytest-embedded-qemu pytest-embedded-qemu
pytest-rerunfailures pytest-rerunfailures
pytest-timeout pytest-timeout
pytest-ignore-test-results
# build # build
idf-build-apps idf-build-apps