ci: apply idf-ci pytest plugin

Removed

- target markers. Now must use target as parametrization in esp-idf
- host test markers. Now will be automatically added with linux target and qemu marker
This commit is contained in:
Fu Hanxi
2025-06-13 14:30:23 +02:00
parent f33469dd63
commit a5257dcc39
39 changed files with 441 additions and 2415 deletions

View File

@@ -34,7 +34,6 @@
variables:
IDF_TOOLCHAIN: clang
TEST_BUILD_OPTS_EXTRA: ""
PYTEST_IGNORE_COLLECT_IMPORT_ERROR: "1"
script:
# CI specific options start from "--parallel-count xxx". could ignore when running locally
- run_cmd idf-build-apps build
@@ -292,8 +291,6 @@ generate_build_child_pipeline:
- non_test_related_apps.txt
expire_in: 1 week
when: always
variables:
PYTEST_IGNORE_COLLECT_IMPORT_ERROR: "1"
script:
- run_cmd python tools/ci/dynamic_pipelines/scripts/generate_build_child_pipeline.py

View File

@@ -145,6 +145,7 @@ variables:
export IDF_PIP_WHEELS_URL=""
fi
# install.sh
if [[ "${CI_JOB_STAGE}" != "target_test" ]]; then
section_start "running_install_sh" "Running install.sh"
if [[ "${CI_JOB_STAGE}" == "build_doc" ]]; then
@@ -154,8 +155,9 @@ variables:
fi
section_end "running_install_sh"
else
section_start "install_python_env" "Install Python environment"
section_start "install_python_env" "Install Python environment, skip required tools check"
run_cmd python tools/idf_tools.py install-python-env --features ci,test-specific
export IDF_SKIP_TOOLS_CHECK=1
section_end "install_python_env"
fi
@@ -170,11 +172,6 @@ variables:
$IDF_PATH/tools/idf_tools.py --non-interactive install esp-clang
fi
if [[ "${CI_JOB_STAGE}" == "target_test" ]]; then
section_start "IDF_SKIP_TOOLS_CHECK" "Skip required tools check"
export IDF_SKIP_TOOLS_CHECK=1
section_end "IDF_SKIP_TOOLS_CHECK"
fi
section_start "source_export" "Source export.sh"
source ./export.sh
section_end "source_export"

View File

@@ -353,7 +353,6 @@ test_pytest_macos:
junit: XUNIT_RESULT.xml
variables:
PYENV_VERSION: "3.9"
PYTEST_IGNORE_COLLECT_IMPORT_ERROR: "1"
# Workaround for a bug in Parallels executor where CI_PROJECT_DIR is not an absolute path,
# but a relative path to the build directory (builds/espressif/esp-idf instead of ~/builds/espressif/esp-idf.
# GitLab sets the project dir to this template `<builds_dir>/<namespace>/<project_name>`
@@ -364,30 +363,15 @@ test_pytest_macos:
--build-system cmake
--target linux
--only-test-related
-m \"host_test and macos\"
-m macos
--modified-files ${MR_MODIFIED_FILES}
- python tools/ci/get_known_failure_cases_file.py
- run_cmd pytest
--target linux
-m \"host_test and macos\"
-m macos
--junitxml=XUNIT_RESULT.xml
--ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME}
test_idf_pytest_plugin:
extends:
- .host_test_template
- .rules:patterns:idf-pytest-plugin
variables:
SUBMODULES_TO_FETCH: "none"
artifacts:
reports:
junit: XUNIT_RESULT.xml
script:
- cd ${IDF_PATH}/tools/ci/dynamic_pipelines/tests/test_report_generator
- python -m unittest test_report_generator.py
- cd ${IDF_PATH}/tools/ci/idf_pytest
- pytest --junitxml=${CI_PROJECT_DIR}/XUNIT_RESULT.xml
test_idf_build_apps_load_soc_caps:
extends: .host_test_template
script:

View File

@@ -116,8 +116,6 @@ check_test_scripts_build_test_rules:
extends:
- .pre_check_template
- .before_script:build
variables:
PYTEST_IGNORE_COLLECT_IMPORT_ERROR: "1"
script:
# requires basic pytest dependencies
- python tools/ci/check_build_test_rules.py check-test-scripts examples/ tools/test_apps components

View File

@@ -162,10 +162,6 @@
- "components/bt/esp_ble_mesh/lib/lib"
- ".gitmodules"
.patterns-idf-pytest-plugin: &patterns-idf-pytest-plugin
- "tools/ci/idf_pytest/**/*"
- "tools/ci/dynamic_pipelines/tests/**/*"
##############
# if anchors #
##############
@@ -265,12 +261,6 @@
# - <<: *if-dev-push
# changes: *patterns-sonarqube-files
.rules:patterns:idf-pytest-plugin:
rules:
- <<: *if-protected-check
- <<: *if-dev-push
changes: *patterns-idf-pytest-plugin
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# DO NOT place comments or maintain any code from this line
#

View File

@@ -20,7 +20,7 @@ ignore_warning_files = [
]
build_dir = "build_@t_@w"
build_log_filename = "build.log"
build_log_filename = "build_log.txt"
size_json_filename = "size.json"
verbose = 1 # INFO
@@ -31,7 +31,7 @@ collect_size_info_filename = "size_info_${CI_JOB_NAME_SLUG}.txt" # TODO remove
junitxml = "build_summary_${CI_JOB_NAME_SLUG}.xml"
# manifest
# check_manifest_rules = true # FIXME
check_manifest_rules = true
manifest_rootpath = "$IDF_PATH"
manifest_filepatterns = [
'**/.build-test-rules.yml',

View File

@@ -10,26 +10,6 @@ repos:
- id: ruff-format
- id: ruff
args: [ "--fix" ]
- repo: local
hooks:
- id: pytest-linter
name: Pytest Linter Check
entry: tools/ci/check_test_files.py
language: python
files: 'pytest_.*\.py$'
require_serial: true
additional_dependencies:
- pytest-embedded-idf[serial]~=1.16
- pytest-embedded-jtag~=1.16
- pytest-embedded-qemu~=1.16
- pytest-ignore-test-results~=0.3
- pytest-rerunfailures
- pytest-timeout
- idf-build-apps~=2.11
- python-gitlab
- minio
- click
- esp-idf-monitor
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
@@ -178,15 +158,6 @@ repos:
files: 'tools/ci/sort_yaml\.py$'
additional_dependencies:
- ruamel.yaml
- id: check-build-test-rules-path-exists
name: check path in .build-test-rules.yml exists
entry: tools/ci/check_build_test_rules.py check-exist
language: python
additional_dependencies:
- PyYAML == 5.3.1
always_run: true
pass_filenames: false
require_serial: true
- id: cleanup-ignore-lists
name: Remove non-existing patterns from ignore lists
entry: tools/ci/cleanup_ignore_lists.py

View File

@@ -3,6 +3,7 @@
import pytest
from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize
# normal mmu tests
@@ -20,14 +21,6 @@ def test_mmap(dut: Dut) -> None:
# mmu tests with psram enabled
PSRAM_RELEASE_CONFIGS = [
pytest.param('psram_release_esp32', marks=[pytest.mark.esp32]),
pytest.param('psram_release_esp32s2', marks=[pytest.mark.esp32s2]),
pytest.param('psram_release_esp32s3', marks=[pytest.mark.esp32s3]),
pytest.param('psram_release_esp32p4', marks=[pytest.mark.esp32p4]),
]
@pytest.mark.generic
@idf_parametrize(
'config,target',
@@ -44,12 +37,6 @@ def test_mmap_psram(dut: Dut) -> None:
# mmu tests with xip_psram
XIP_CONFIGS = [
pytest.param('xip_psram_esp32s2', marks=[pytest.mark.esp32s2]),
pytest.param('xip_psram_esp32s3', marks=[pytest.mark.esp32s3]),
]
@pytest.mark.generic
@idf_parametrize(
'config,target', [('xip_psram_esp32s2', 'esp32s2'), ('xip_psram_esp32s3', 'esp32s3')], indirect=['config', 'target']

View File

@@ -15,17 +15,6 @@ def not_expect(dut: Dut, output_regex: str) -> None:
raise RuntimeError(f'Found not_expect output {output_regex}')
JTAG_SERIAL_MARKS = [
pytest.mark.esp32s3,
pytest.mark.esp32c2,
pytest.mark.esp32c3,
pytest.mark.esp32c5,
pytest.mark.esp32c6,
pytest.mark.esp32c61,
pytest.mark.esp32h2,
]
@pytest.mark.generic
@idf_parametrize('config', ['console_none'], indirect=['config'])
@idf_parametrize('target', ['supported_targets'], indirect=['target'])

View File

@@ -3,7 +3,6 @@
import pytest
from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize
# @pytest.mark.esp32c2 # esp32c2 are using xtal_26mhz
@pytest.mark.generic

View File

@@ -4,30 +4,6 @@ import pytest
from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize
CONFIGS = [
pytest.param(
'default',
marks=[
pytest.mark.supported_targets,
],
),
pytest.param('freertos_options', marks=[pytest.mark.supported_targets]),
pytest.param('psram', marks=[pytest.mark.esp32, pytest.mark.esp32s3, pytest.mark.esp32p4, pytest.mark.esp32c5]),
pytest.param('single_core', marks=[pytest.mark.esp32, pytest.mark.esp32p4]),
# TODO: [ESP32C5] IDF-10335
# TODO: [ESP32C61] IDF-11146
pytest.param(
'smp',
marks=[
pytest.mark.supported_targets,
pytest.mark.temp_skip_ci(
targets=['esp32p4', 'esp32c5', 'esp32c61', 'esp32h21'], reason='test failed/TBD IDF-8113'
),
],
),
pytest.param('tickless_idle', marks=[pytest.mark.supported_targets]),
]
@pytest.mark.generic
@idf_parametrize(
@@ -41,6 +17,8 @@ CONFIGS = [
('psram', 'esp32p4'),
('psram', 'esp32s3'),
('single_core', 'esp32'),
# TODO: [ESP32C5] IDF-10335
# TODO: [ESP32C61] IDF-11146
('single_core', 'esp32p4'),
(
'smp',

View File

@@ -4,11 +4,6 @@ import pytest
from pytest_embedded_idf.dut import IdfDut
from pytest_embedded_idf.utils import idf_parametrize
CONFIGS_NVS_ENCR_FLASH_ENC = [
pytest.param('nvs_encr_flash_enc_esp32', marks=[pytest.mark.esp32]),
pytest.param('nvs_encr_flash_enc_esp32c3', marks=[pytest.mark.esp32c3]),
]
@pytest.mark.generic
@pytest.mark.parametrize('config', ['default'], indirect=True)

View File

@@ -1,2 +1,2 @@
| Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-C5 | ESP32-C6 | ESP32-C61 | ESP32-H2 | ESP32-P4 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- | --------- | -------- | -------- | -------- | -------- |
| Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-C5 | ESP32-C6 | ESP32-C61 | ESP32-H2 | ESP32-H21 | ESP32-H4 | ESP32-P4 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- | --------- | -------- | --------- | -------- | -------- | -------- | -------- |

View File

@@ -4,11 +4,6 @@ import pytest
from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize
CONFIGS = [
pytest.param('default', marks=[pytest.mark.esp32, pytest.mark.esp32c3]),
pytest.param('psram_esp32', marks=[pytest.mark.esp32]),
]
@pytest.mark.generic
@idf_parametrize(

View File

@@ -17,7 +17,6 @@ if os.path.join(os.path.dirname(__file__), 'tools', 'ci') not in sys.path:
if os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages') not in sys.path:
sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages'))
import glob
import io
import logging
import os
@@ -36,20 +35,17 @@ from _pytest.config import Config
from _pytest.fixtures import FixtureRequest
from artifacts_handler import ArtifactType
from dynamic_pipelines.constants import TEST_RELATED_APPS_DOWNLOAD_URLS_FILENAME
from idf_ci_local.app import import_apps_from_txt
from idf_ci import PytestCase
from idf_ci.idf_pytest import IDF_CI_PYTEST_CASE_KEY
from idf_ci_local.uploader import AppDownloader
from idf_ci_local.uploader import AppUploader
from idf_ci_utils import IDF_PATH
from idf_ci_utils import idf_relpath
from idf_pytest.constants import DEFAULT_LOGDIR
from idf_pytest.constants import DEFAULT_SDKCONFIG
from idf_pytest.constants import ENV_MARKERS
from idf_pytest.constants import SPECIAL_MARKERS
from idf_pytest.constants import TARGET_MARKERS
from idf_pytest.constants import PytestCase
from idf_pytest.plugin import IDF_PYTEST_EMBEDDED_KEY
from idf_pytest.plugin import ITEM_PYTEST_CASE_KEY
from idf_pytest.plugin import IdfPytestEmbedded
from idf_pytest.plugin import IDF_LOCAL_PLUGIN_KEY
from idf_pytest.plugin import IdfLocalPlugin
from idf_pytest.plugin import requires_elf_or_map
from idf_pytest.utils import format_case_id
from pytest_embedded.plugin import multi_dut_argument
from pytest_embedded.plugin import multi_dut_fixture
@@ -85,7 +81,7 @@ def config(request: FixtureRequest) -> str:
@pytest.fixture
@multi_dut_fixture
def target(request: FixtureRequest, dut_total: int, dut_index: int) -> str:
plugin = request.config.stash[IDF_PYTEST_EMBEDDED_KEY]
plugin = request.config.stash[IDF_LOCAL_PLUGIN_KEY]
if dut_total == 1:
return plugin.target[0] # type: ignore
@@ -205,11 +201,11 @@ def build_dir(
valid build directory
"""
# download from minio on CI
case: PytestCase = request._pyfuncitem.stash[ITEM_PYTEST_CASE_KEY]
case: PytestCase = request.node.stash[IDF_CI_PYTEST_CASE_KEY]
if app_downloader:
# somehow hardcoded...
app_build_path = os.path.join(idf_relpath(app_path), f'build_{target}_{config}')
if case.requires_elf_or_map:
if requires_elf_or_map(case):
app_downloader.download_app(app_build_path)
else:
app_downloader.download_app(app_build_path, ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES)
@@ -403,10 +399,6 @@ def dev_user(request: FixtureRequest) -> str:
##################
def pytest_addoption(parser: pytest.Parser) -> None:
idf_group = parser.getgroup('idf')
idf_group.addoption(
'--sdkconfig',
help='sdkconfig postfix, like sdkconfig.ci.<config>. (Default: None, which would build all found apps)',
)
idf_group.addoption(
'--dev-user',
help='user name associated with some specific device/service used during the test execution',
@@ -415,12 +407,6 @@ def pytest_addoption(parser: pytest.Parser) -> None:
'--dev-passwd',
help='password associated with some specific device/service used during the test execution',
)
idf_group.addoption(
'--app-info-filepattern',
help='glob pattern to specify the files that include built app info generated by '
'`idf-build-apps --collect-app-info ...`. will not raise ValueError when binary '
'paths not exist in local file system if not listed recorded in the app info.',
)
idf_group.addoption(
'--pipeline-id',
help='main pipeline id, not the child pipeline id. Specify this option to download the artifacts '
@@ -437,63 +423,15 @@ def pytest_configure(config: Config) -> None:
supported_targets.set(SUPPORTED_TARGETS)
preview_targets.set(PREVIEW_TARGETS)
# cli option "--target"
target = [_t.strip().lower() for _t in (config.getoption('target', '') or '').split(',') if _t.strip()]
# add markers based on idf_pytest/constants.py
for name, description in {
**TARGET_MARKERS,
**ENV_MARKERS,
**SPECIAL_MARKERS,
}.items():
config.addinivalue_line('markers', f'{name}: {description}')
help_commands = ['--help', '--fixtures', '--markers', '--version']
for cmd in help_commands:
if cmd in config.invocation_params.args:
target = ['unneeded']
break
markexpr = config.getoption('markexpr') or ''
# check marker expr set via "pytest -m"
if not target and markexpr:
# we use `-m "esp32 and generic"` in our CI to filter the test cases
# this doesn't cover all use cases, but fit what we do in CI.
for marker in markexpr.split('and'):
marker = marker.strip()
if marker in TARGET_MARKERS:
target.append(marker)
# "--target" must be set
if not target:
raise SystemExit(
"""Pass `--target TARGET[,TARGET...]` to specify all targets the test cases are using.
- for single DUT, we run with `pytest --target esp32`
- for multi DUT, we run with `pytest --target esp32,esp32,esp32s2` to indicate all DUTs
"""
)
apps = None
app_info_filepattern = config.getoption('app_info_filepattern')
if app_info_filepattern:
apps = []
for f in glob.glob(os.path.join(IDF_PATH, app_info_filepattern)):
apps.extend(import_apps_from_txt(f))
if '--collect-only' not in config.invocation_params.args:
config.stash[IDF_PYTEST_EMBEDDED_KEY] = IdfPytestEmbedded(
config_name=config.getoption('sdkconfig'),
target=target,
apps=apps,
)
config.pluginmanager.register(config.stash[IDF_PYTEST_EMBEDDED_KEY])
config.stash[IDF_LOCAL_PLUGIN_KEY] = IdfLocalPlugin()
config.pluginmanager.register(config.stash[IDF_LOCAL_PLUGIN_KEY])
def pytest_unconfigure(config: Config) -> None:
_pytest_embedded = config.stash.get(IDF_PYTEST_EMBEDDED_KEY, None)
if _pytest_embedded:
del config.stash[IDF_PYTEST_EMBEDDED_KEY]
config.pluginmanager.unregister(_pytest_embedded)
idf_local_plugin = config.stash.get(IDF_LOCAL_PLUGIN_KEY, None)
if idf_local_plugin:
del config.stash[IDF_LOCAL_PLUGIN_KEY]
config.pluginmanager.unregister(idf_local_plugin)
dut_artifacts_url = []

View File

@@ -91,32 +91,23 @@ Getting Started
.. code-block:: python
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
@pytest.mark.generic
def test_hello_world(dut) -> None:
dut.expect('Hello world!')
This is a simple test script that could run with the ESP-IDF getting-started example :example:`get-started/hello_world`.
First two lines are the target markers:
In this test script, the ``@pytest.mark.parametrize`` decorator is used to parameterize the test case. The ``target`` parameter is a special parameter that indicates the target board type. The ``indirect=True`` argument indicates that this parameter is pre-calculated before other fixtures.
* The ``@pytest.mark.esp32`` is a marker that indicates that this test case should be run on the ESP32.
* The ``@pytest.mark.esp32s2`` is a marker that indicates that this test case should be run on the ESP32-S2.
Next is the environment marker. The ``@pytest.mark.generic`` marker indicates that this test case should run on the generic board type.
.. note::
If the test case can be run on all targets officially supported by ESP-IDF (call ``idf.py --list-targets`` for more details), you can use a special marker ``supported_targets`` to apply all of them in one line.
We also supports ``preview_targets`` and ``all_targets`` as special target markers (call ``idf.py --list-targets --preview`` for a full targets list including preview targets).
Next, we have the environment marker:
* The ``@pytest.mark.generic`` is a marker that indicates that this test case should be run on the ``generic`` board type.
.. note::
For the detailed explanation of the environment markers, please refer to :idf_file:`ENV_MARKERS definition <tools/ci/idf_pytest/constants.py>`
For the detailed explanation of the environment markers, please refer to :idf_file:`env_markers definition <pytest.ini>`
Finally, we have the test function. With a ``dut`` fixture. In single-dut test cases, the ``dut`` fixture is an instance of ``IdfDut`` class, for multi-dut test cases, it is a tuple of ``IdfDut`` instances. For more details regarding the ``IdfDut`` class, please refer to `pytest-embedded IdfDut API reference <https://docs.espressif.com/projects/pytest-embedded/en/latest/api.html#pytest_embedded_idf.dut.IdfDut>`__.
@@ -142,8 +133,10 @@ If the test case needs to run all supported targets with these two sdkconfig fil
.. code-block:: python
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize('target', [
'esp32', # <-- run with esp32 target
'esp32s2', # <-- run with esp32s2 target
], indirect=True)
@pytest.mark.parametrize('config', [ # <-- parameterize the sdkconfig file
'foo', # <-- run with sdkconfig.ci.foo
'bar', # <-- run with sdkconfig.ci.bar
@@ -180,17 +173,6 @@ The test case ID is used to identify the test case in the JUnit report.
Nearly all the CLI options of pytest-embedded supports parameterization. To see all supported CLI options, you may run ``pytest --help`` and check the ``embedded-...`` sections for vanilla pytest-embedded ones, and the ``idf`` sections for ESP-IDF specific ones.
.. note::
The target markers, like ``@pytest.mark.esp32`` and ``@pytest.mark.esp32s2``, are actually syntactic sugar for parameterization. In fact they are defined as:
.. code-block:: python
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
Same App With Different sdkconfig Files, Different Targets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -198,9 +180,9 @@ For some test cases, you may need to run the same app with different sdkconfig f
.. code-block:: python
@pytest.mark.parametrize('config', [
pytest.param('foo', marks=[pytest.mark.esp32]),
pytest.param('bar', marks=[pytest.mark.esp32s2]),
@pytest.mark.parametrize('config, target', [
pytest.param('foo', 'esp32'),
pytest.param('bar', 'esp32s2'),
], indirect=True)
Now this test function would be replicated to 2 test cases (represented as test case IDs):
@@ -261,40 +243,6 @@ After setting the param ``count`` to 2, all the fixtures are changed into tuples
For detailed multi-dut parametrization documentation, please refer to `pytest-embedded Multi-DUT documentation <https://docs.espressif.com/projects/pytest-embedded/en/latest/key_concepts.html#multi-duts>`__.
.. warning::
In some test scripts, you may see target markers like ``@pytest.mark.esp32`` and ``@pytest.mark.esp32s2`` used together with multi-DUT test cases. This is deprecated and should be replaced with the ``target`` parametrization.
For example,
.. code-block:: python
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize('count', [
2,
], indirect=True)
def test_hello_world(dut) -> None:
dut[0].expect('Hello world!')
dut[1].expect('Hello world!')
should be replaced with:
.. code-block:: python
@pytest.mark.parametrize('count', [
2,
], indirect=True)
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
def test_hello_world(dut) -> None:
dut[0].expect('Hello world!')
dut[1].expect('Hello world!')
This could help avoid the ambiguity of the target markers when multi-DUT test cases are using different type of targets.
Multi-Target Tests with Different Apps
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -697,9 +645,7 @@ This marker means that the test case could still be run locally with ``pytest --
Add New Markers
---------------
We are using two types of custom markers, target markers which indicate that the test cases should support this target, and env markers which indicate that the test cases should be assigned to runners with these tags in CI.
You can add new markers by adding one line under the :idf_file:`conftest.py`. If it is a target marker, it should be added into ``TARGET_MARKERS``. If it is a marker that specifies a type of test environment, it should be added into ``ENV_MARKERS``. The syntax should be: ``<marker_name>: <marker_description>``.
You can add new markers by adding one line under the :idf_file:`pytest.ini`. If it is a marker that specifies a type of test environment, it should be added into ``env_markers`` section. Otherwise it should be added into ``markers`` section. The syntax should be: ``<marker_name>: <marker_description>``.
Skip Auto Flash Binary
----------------------

View File

@@ -91,32 +91,23 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
.. code-block:: python
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
@pytest.mark.generic
def test_hello_world(dut) -> None:
dut.expect('Hello world!')
这是一个简单的测试脚本,可以与入门示例 :example:`get-started/hello_world` 一起运行。
前两行是目标标记:
在这个测试脚本中,使用了 ``@pytest.mark.parametrize`` 装饰器来参数化测试用例。``target`` 参数是一个特殊参数,用于指示目标板类型。``indirect=True`` 参数表示此参数在其他 fixture 之前被预先计算。
* ``@pytest.mark.esp32`` 是一个标记表示此测试用例应在 ESP32 上运行。
* ``@pytest.mark.esp32s2`` 是一个标记,表示此测试用例应在 ESP32-S2 上运行。
紧接着是环境标记。``@pytest.mark.generic`` 标记表示此测试用例应在 generic 板类型上运行。
.. note::
如果测试用例可以在 ESP-IDF 官方支持的所有目标芯片上运行,调用 ``idf.py --list-targets`` 获取更多详情,可以使用特殊的标记 ``supported_targets`` 来在一行中应用所有目标
也支持 ``preview_targets````all_targets`` 作为特殊的目标标记,调用 ``idf.py --list-targets --preview`` 获取包括预览目标的完整目标列表。
环境标记:
* ``@pytest.mark.generic`` 标记表示此测试用例应在 generic 板类型上运行。
.. note::
有关环境标记的详细解释,请参阅 :idf_file:`ENV_MARKERS 定义 <tools/ci/idf_pytest/constants.py>`
有关环境标记的详细解释,请参阅 :idf_file:`env_markers 定义 <pytest.ini>`
关于测试函数,使用了一个 ``dut`` fixture。在单一 DUT 测试用例中,``dut`` fixture 是 ``IdfDut`` 类的一个实例,对于多个 DUT 测试用例,它是 ``IdfDut`` 实例的一个元组。有关 ``IdfDut`` 类的更多详细信息,请参阅 `pytest-embedded IdfDut API 参考 <https://docs.espressif.com/projects/pytest-embedded/en/latest/api.html#pytest_embedded_idf.dut.IdfDut>`__
@@ -142,8 +133,10 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
.. code-block:: python
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize('target', [
'esp32', # <-- run with esp32 target
'esp32s2', # <-- run with esp32s2 target
], indirect=True)
@pytest.mark.parametrize('config', [ # <-- parameterize the sdkconfig file
'foo', # <-- run with sdkconfig.ci.foo
'bar', # <-- run with sdkconfig.ci.bar
@@ -180,17 +173,6 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
几乎所有 pytest-embedded 的 CLI 选项都支持参数化。要查看所有支持的 CLI 选项,您可以运行 ``pytest --help`` 命令,并检查 ``embedded-...`` 部分以查看普通 pytest-embedded 选项,以及 ``idf`` 部分以查看 ESP-IDF 特定选项。
.. note::
目标标记,例如 ``@pytest.mark.esp32````@pytest.mark.esp32s2``,是参数化的一种语法糖。它们被定义为:
.. code-block:: python
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
使用不同的 sdkconfig 文件运行相同的应用程序,支持不同的目标芯片
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -198,9 +180,9 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
.. code-block:: python
@pytest.mark.parametrize('config', [
pytest.param('foo', marks=[pytest.mark.esp32]),
pytest.param('bar', marks=[pytest.mark.esp32s2]),
@pytest.mark.parametrize('config, target', [
pytest.param('foo', 'esp32'),
pytest.param('bar', 'esp32s2'),
], indirect=True)
此时,这个测试函数将被复制为 2 个测试用例(测试用例 ID
@@ -261,40 +243,6 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
有关详细的多个 DUT 参数化文档,请参阅 `pytest-embedded Multi-DUT 文档 <https://docs.espressif.com/projects/pytest-embedded/en/latest/key_concepts.html#multi-duts>`__
.. warning::
在一些测试脚本中,您可能会看到目标标记,如 ``@pytest.mark.esp32````@pytest.mark.esp32s2`` 用于多个 DUT 测试用例。这些脚本已被弃用,应该替换为 ``target`` 参数化。
例如,
.. code-block:: python
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize('count', [
2,
], indirect=True)
def test_hello_world(dut) -> None:
dut[0].expect('Hello world!')
dut[1].expect('Hello world!')
应该改为:
.. code-block:: python
@pytest.mark.parametrize('count', [
2,
], indirect=True)
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
def test_hello_world(dut) -> None:
dut[0].expect('Hello world!')
dut[1].expect('Hello world!')
这有助于避免多个 DUT 测试用例在运行不同目标芯片时造成歧义。
用不同应用程序和目标芯片进行多目标测试
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -699,7 +647,7 @@ Pytest 使用技巧
我们目前使用两种自定义 marker。target marker 是指测试用例支持此目标芯片env marker 是指测试用例应分配到 CI 中具有相应 tag 的 runner 上。
你可以在 :idf_file:`conftest.py` 文件添加一行新的 marker。如果该 marker 是 target marker应将其添加到 ``TARGET_MARKERS`` 中。如果该 marker 指定了一类测试环境,应将其添加到 ``ENV_MARKERS`` 中。自定义 marker 格式``<marker_name>: <marker_description>``
你可以通过在 :idf_file:`pytest.ini` 文件添加一行来添加新的 marker。如果该 marker 指定了一类测试环境,应将其添加到 ``env_markers`` 部分。否则,应将其添加到 ``markers`` 部分。语法应为``<marker_name>: <marker_description>``
跳过自动烧录二进制文件
-------------------------------------

View File

@@ -7,25 +7,6 @@ import pytest
from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize
CONFIGS = [
pytest.param('esp32_singlecore', marks=[pytest.mark.esp32]),
pytest.param(
'basic',
marks=[
pytest.mark.esp32,
pytest.mark.esp32s2,
pytest.mark.esp32s3,
pytest.mark.esp32c3,
pytest.mark.esp32c5,
pytest.mark.esp32c6,
pytest.mark.esp32c61,
pytest.mark.esp32h2,
pytest.mark.esp32p4,
pytest.mark.esp32c2,
],
),
]
@pytest.mark.generic
@idf_parametrize(

View File

@@ -7,7 +7,6 @@ python_files = pytest_*.py
addopts =
-s -vv
--embedded-services esp,idf
-p no:idf-ci
--tb short
--strict-markers
--skip-check-coredump y
@@ -36,3 +35,94 @@ junit_family = xunit1
## log all to `system-out` when case fail
junit_logging = stdout
junit_log_passing_tests = False
markers =
temp_skip_ci: mark test to be skipped in CI
temp_skip: mark test to be skipped in CI and locally
require_elf: mark test to be skipped if no elf file is found
env_markers =
qemu: build and test using qemu, not real target
macos: tests should be run on macos hosts
generic: tests should be run on generic runners
flash_suspend: support flash suspend feature
eth_ip101: connected via IP101 ethernet transceiver
eth_lan8720: connected via LAN8720 ethernet transceiver
eth_rtl8201: connected via RTL8201 ethernet transceiver
eth_ksz8041: connected via KSZ8041 ethernet transceiver
eth_dp83848: connected via DP83848 ethernet transceiver
eth_w5500: SPI Ethernet module with two W5500
eth_ksz8851snl: SPI Ethernet module with two KSZ8851SNL
eth_dm9051: SPI Ethernet module with two DM9051
quad_psram: runners with quad psram
octal_psram: runners with octal psram
usb_host_flash_disk: usb host runners with USB flash disk attached
usb_device: usb device runners
ethernet_ota: ethernet OTA runners
flash_encryption: Flash Encryption runners
flash_encryption_f4r8: Flash Encryption runners with 4-line flash and 8-line psram
flash_encryption_f8r8: Flash Encryption runners with 8-line flash and 8-line psram
flash_encryption_ota: Flash Encryption runners with ethernet OTA support with 4mb flash size
flash_multi: Multiple flash chips tests
psram: Chip has 4-line psram
ir_transceiver: runners with a pair of IR transmitter and receiver
twai_transceiver: runners with a TWAI PHY transceiver
flash_encryption_wifi_high_traffic: Flash Encryption runners with wifi high traffic support
ethernet: ethernet runner
ethernet_stress: ethernet runner with stress test
ethernet_flash_8m: ethernet runner with 8mb flash
ethernet_router: both the runner and dut connect to the same router through ethernet NIC
ethernet_vlan: ethernet runner GARM-32-SH-1-R16S5N3
wifi_ap: a wifi AP in the environment
wifi_router: both the runner and dut connect to the same wifi router
wifi_high_traffic: wifi high traffic runners
wifi_wlan: wifi runner with a wireless NIC
wifi_iperf: the AP and ESP dut were placed in a shielded box - for iperf test
Example_ShieldBox: multiple shielded APs connected to shielded ESP DUT via RF cable with programmable attenuator
xtal_26mhz: runner with 26MHz xtal on board
xtal_40mhz: runner with 40MHz xtal on board
external_flash: external flash memory connected via VSPI (FSPI)
sdcard_sdmode: sdcard running in SD mode, to be removed after test migration
sdcard_spimode: sdcard running in SPI mode
emmc: eMMC card
sdcard: sdcard runner
MSPI_F8R8: runner with Octal Flash and Octal PSRAM
MSPI_F4R8: runner with Quad Flash and Octal PSRAM
MSPI_F4R4: runner with Quad Flash and Quad PSRAM
flash_120m: runner with 120M supported Flash
jtag: runner where the chip is accessible through JTAG as well
usb_serial_jtag: runner where the chip is accessible through builtin JTAG as well
adc: ADC related tests should run on adc runners
xtal32k: Runner with external 32k crystal connected
no32kXtal: Runner with no external 32k crystal connected
psramv0: Runner with PSRAM version 0
esp32eco3: Runner with esp32 eco3 connected
ecdsa_efuse: Runner with test ECDSA private keys programmed in efuse
ccs811: Runner with CCS811 connected
nvs_encr_hmac: Runner with test HMAC key programmed in efuse
i2c_oled: Runner with ssd1306 I2C oled connected
httpbin: runner for tests that need to access the httpbin service
flash_4mb: C2 runners with 4 MB flash
jtag_re_enable: Runner to re-enable jtag which is softly disabled by burning bit SOFT_DIS_JTAG on eFuse
es8311: Development board that carries es8311 codec
camera: Runner with camera
ov5647: Runner with camera ov5647
multi_dut_modbus_rs485: a pair of runners connected by RS485 bus
ieee802154: ieee802154 related tests should run on ieee802154 runners.
openthread_br: tests should be used for openthread border router.
openthread_bbr: tests should be used for openthread border router linked to Internet.
openthread_sleep: tests should be used for openthread sleepy device.
zigbee_multi_dut: zigbee runner which have multiple duts.
wifi_two_dut: tests should be run on runners which has two wifi duts connected.
generic_multi_device: generic multiple devices whose corresponding gpio pins are connected to each other.
twai_network: multiple runners form a TWAI network.
sdio_master_slave: Test sdio multi board, esp32+esp32
sdio_multidev_32_c6: Test sdio multi board, esp32+esp32c6
sdio_multidev_p4_c5: Test sdio multi board, esp32p4+esp32c5
usj_device: Test usb_serial_jtag and usb_serial_jtag is used as serial only (not console)
twai_std: twai runner with all twai supported targets connect to usb-can adapter
lp_i2s: lp_i2s runner tested with hp_i2s
ram_app: ram_app runners
esp32c3eco7: esp32c3 major version(v1.1) chips
esp32c2eco4: esp32c2 major version(v2.0) chips
recovery_bootloader: Runner with recovery bootloader offset set in eFuse

View File

@@ -1,29 +1,23 @@
#!/usr/bin/env python
# SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import argparse
import difflib
import inspect
import os
import re
import sys
import typing as t
from collections import defaultdict
from pathlib import Path
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import yaml
from idf_ci_utils import get_all_manifest_files
from idf_ci_utils import IDF_PATH
YES = u'\u2713'
NO = u'\u2717'
from idf_ci_utils import idf_relpath
# | Supported Target | ... |
# | ---------------- | --- |
SUPPORTED_TARGETS_TABLE_REGEX = re.compile(
r'^\|\s*Supported Targets.+$\n^\|(?:\s*|-).+$\n?', re.MULTILINE
)
SUPPORTED_TARGETS_TABLE_REGEX = re.compile(r'^\|\s*Supported Targets.+$\n^\|(?:\s*|-).+$\n?', re.MULTILINE)
USUAL_TO_FORMAL = {
'esp32': 'ESP32',
@@ -41,54 +35,51 @@ USUAL_TO_FORMAL = {
'linux': 'Linux',
}
FORMAL_TO_USUAL = {
'ESP32': 'esp32',
'ESP32-S2': 'esp32s2',
'ESP32-S3': 'esp32s3',
'ESP32-C3': 'esp32c3',
'ESP32-C2': 'esp32c2',
'ESP32-C6': 'esp32c6',
'ESP32-C5': 'esp32c5',
'ESP32-H2': 'esp32h2',
'ESP32-P4': 'esp32p4',
'ESP32-C61': 'esp32c61',
'ESP32-H21': 'esp32h21',
'ESP32-H4': 'esp32h4',
'Linux': 'linux',
}
FORMAL_TO_USUAL = {v: k for k, v in USUAL_TO_FORMAL.items()}
def doublequote(s: str) -> str:
if s.startswith('"') and s.endswith('"'):
return s
def diff_lists(
list1: t.List[str], list2: t.List[str], title1: str, title2: str, excluded: t.Optional[t.List[str]] = None
) -> None:
"""
Compare two lists and print the differences.
"""
diff = difflib.ndiff(list1, list2)
if not diff:
return
return f'"{s}"'
print(f'Difference between {title1} and {title2}:')
for line in diff:
last_part = line.split(' ', 1)[-1]
if excluded and last_part in excluded:
print(line + ' ' + '(excluded)')
else:
print(line)
def check_readme(
paths: List[str],
exclude_dirs: Optional[List[str]] = None,
extra_default_build_targets: Optional[List[str]] = None,
paths: t.List[str],
exclude_dirs: t.Optional[t.List[str]] = None,
extra_default_build_targets: t.Optional[t.List[str]] = None,
) -> None:
from idf_build_apps import App, find_apps
from idf_build_apps import App
from idf_build_apps import find_apps
from idf_build_apps.constants import SUPPORTED_TARGETS
def get_readme_path(_app: App) -> Optional[str]:
_readme_path = os.path.join(_app.app_dir, 'README.md')
def get_readme_path(_app_dir: str) -> t.Optional[str]:
_readme_path = os.path.join(_app_dir, 'README.md')
if not os.path.isfile(_readme_path):
_readme_path = os.path.join(_app.app_dir, '..', 'README.md')
_readme_path = os.path.join(_app_dir, '..', 'README.md')
if not os.path.isfile(_readme_path):
_readme_path = None # type: ignore
return _readme_path
def _generate_new_support_table_str(_app: App) -> str:
def _generate_new_support_table_str(_app_dir: str, _manifest_supported_targets: t.List[str]) -> str:
# extra space here
table_headers = [
f'{USUAL_TO_FORMAL[target]}' for target in _app.supported_targets
]
table_headers = [f'{USUAL_TO_FORMAL[target]}' for target in _manifest_supported_targets]
table_headers = ['Supported Targets'] + table_headers
res = '| ' + ' | '.join(table_headers) + ' |\n'
@@ -96,8 +87,8 @@ def check_readme(
return res
def _parse_existing_support_table_str(_app: App) -> Tuple[Optional[str], List[str]]:
_readme_path = get_readme_path(_app)
def _parse_existing_support_table_str(_app_dir: str) -> t.Tuple[t.Optional[str], t.List[str]]:
_readme_path = get_readme_path(_app_dir)
if not _readme_path:
return None, SUPPORTED_TARGETS
@@ -109,41 +100,31 @@ def check_readme(
return None, SUPPORTED_TARGETS
# old style
parts = [
part.strip()
for part in support_string[0].split('\n', 1)[0].split('|')
if part.strip()
]
parts = [part.strip() for part in support_string[0].split('\n', 1)[0].split('|') if part.strip()]
return support_string[0].strip(), [FORMAL_TO_USUAL[part] for part in parts[1:] if part in FORMAL_TO_USUAL]
def check_enable_build(_app: App, _old_supported_targets: List[str]) -> bool:
if _app.supported_targets == sorted(_old_supported_targets):
def check_enable_build(
_app_dir: str, _manifest_supported_targets: t.List[str], _old_supported_targets: t.List[str]
) -> bool:
if _manifest_supported_targets == sorted(_old_supported_targets):
return True
_readme_path = get_readme_path(_app)
if_clause = f'IDF_TARGET in [{", ".join([doublequote(target) for target in sorted(_old_supported_targets)])}]'
_readme_path = get_readme_path(_app_dir)
diff_lists(
sorted(_manifest_supported_targets),
sorted(_old_supported_targets),
'manifest-enabled targets',
f'supported targets table in {_readme_path}',
)
print(
inspect.cleandoc(
f'''
{_app.app_dir}:
- enable build targets according to the manifest file: {_app.supported_targets}
- enable build targets according to the old Supported Targets table under readme "{_readme_path}": {_old_supported_targets}
f"""
To enable/disable build targets, please modify your manifest file:
{App.MANIFEST.most_suitable_rule(app_dir).by_manifest_file}
If you want to disable some targets, please use the following snippet:
# Please combine this with the original one
#
# Notes:
# - please keep in mind to avoid duplicated folders as yaml keys
# - please use parentheses to group conditions, the "or" and "and" operators could only accept two operands
{_app.app_dir}:
enable:
- if: {if_clause}
temporary: true
reason: <why only enable build jobs for these targets>
'''
Please refer to https://docs.espressif.com/projects/idf-build-apps/en/latest/references/manifest.html#enable-disable-rules
for more details.
"""
)
)
@@ -153,47 +134,52 @@ def check_readme(
find_apps(
paths,
'all',
recursive=True,
exclude_list=exclude_dirs or [],
manifest_files=get_all_manifest_files(),
default_build_targets=SUPPORTED_TARGETS + extra_default_build_targets,
)
)
exit_code = 0
checked_app_dirs = set()
apps_grouped: t.Dict[str, t.List[App]] = defaultdict(list)
for app in apps:
if app.app_dir not in checked_app_dirs:
checked_app_dirs.add(app.app_dir)
else:
continue
apps_grouped[app.app_dir].append(app)
replace_str, old_supported_targets = _parse_existing_support_table_str(app)
success = check_enable_build(app, old_supported_targets)
for app_dir in apps_grouped:
replace_str, old_supported_targets = _parse_existing_support_table_str(app_dir)
# manifest defined ones
manifest_defined_targets = sorted(
{
target
for app in apps_grouped[app_dir]
for target in (
App.MANIFEST.enable_build_targets(app_dir)
+ App.MANIFEST.enable_build_targets(app_dir, config_name=app.config_name)
)
}
)
success = check_enable_build(app_dir, manifest_defined_targets, old_supported_targets)
if not success:
print(f'check_enable_build failed for app: {app}')
print(f'check_enable_build failed for app: {app_dir}')
print('-' * 80)
exit_code = 1
readme_path = get_readme_path(app)
readme_path = get_readme_path(app_dir)
new_readme_str = _generate_new_support_table_str(app_dir, manifest_defined_targets)
# no readme, create a new file
if not readme_path:
with open(os.path.join(app.app_dir, 'README.md'), 'w') as fw:
fw.write(_generate_new_support_table_str(app) + '\n')
print(f'Added new README file: {os.path.join(app.app_dir, "README.md")}')
with open(os.path.join(app_dir, 'README.md'), 'w') as fw:
fw.write(new_readme_str + '\n')
print(f'Added new README file: {os.path.join(app_dir, "README.md")}')
print('-' * 80)
exit_code = 1
# has old table, but different string
elif replace_str and replace_str != _generate_new_support_table_str(app):
elif replace_str and replace_str != new_readme_str:
with open(readme_path) as fr:
readme_str = fr.read()
with open(readme_path, 'w') as fw:
fw.write(
readme_str.replace(
replace_str, _generate_new_support_table_str(app)
)
)
fw.write(readme_str.replace(replace_str, new_readme_str))
print(f'Modified README file: {readme_path}')
print('-' * 80)
exit_code = 1
@@ -203,9 +189,7 @@ def check_readme(
readme_str = fr.read()
with open(readme_path, 'w') as fw:
fw.write(
_generate_new_support_table_str(app) + '\n\n' + readme_str
) # extra new line
fw.write(new_readme_str + '\n\n' + readme_str) # extra new line
print(f'Modified README file: {readme_path}')
print('-' * 80)
@@ -215,90 +199,61 @@ def check_readme(
def check_test_scripts(
paths: List[str],
exclude_dirs: Optional[List[str]] = None,
bypass_check_test_targets: Optional[List[str]] = None,
extra_default_build_targets: Optional[List[str]] = None,
paths: t.List[str],
exclude_dirs: t.Optional[t.List[str]] = None,
bypass_check_test_targets: t.Optional[t.List[str]] = None,
extra_default_build_targets: t.Optional[t.List[str]] = None,
) -> None:
from idf_build_apps import App, find_apps
from idf_build_apps import App
from idf_build_apps import find_apps
from idf_build_apps.constants import SUPPORTED_TARGETS
from idf_pytest.script import get_pytest_cases
from idf_ci import get_pytest_cases
# takes long time, run only in CI
# dict:
# {
# app_dir: {
# 'script_path': 'path/to/script',
# 'targets': ['esp32', 'esp32s2', 'esp32s3', 'esp32c3', 'esp32c2', 'linux'],
# 'script_paths': {'path/to/script1', 'path/to/script2'},
# 'targets': {'esp32', 'esp32s2', 'esp32s3', 'esp32c3', 'esp32c2', 'linux'},
# }
# }
def check_enable_test(
_app: App,
_pytest_app_dir_targets_dict: Dict[str, Dict[str, str]],
_app_dir: str,
_manifest_verified_targets: t.List[str],
_pytest_app_dir_targets_dict: t.Dict[str, t.Dict[str, t.Set[str]]],
) -> bool:
if _app.app_dir in _pytest_app_dir_targets_dict:
test_script_path = _pytest_app_dir_targets_dict[_app.app_dir]['script_path']
actual_verified_targets = sorted(
set(_pytest_app_dir_targets_dict[_app.app_dir]['targets'])
)
if _app_dir in _pytest_app_dir_targets_dict:
test_script_paths = _pytest_app_dir_targets_dict[_app_dir]['script_paths']
actual_verified_targets = sorted(set(_pytest_app_dir_targets_dict[_app_dir]['targets']))
else:
return True # no test case
actual_extra_tested_targets = set(actual_verified_targets) - set(
_app.verified_targets
if _manifest_verified_targets == actual_verified_targets:
return True
elif not (set(_manifest_verified_targets) - set(actual_verified_targets + (bypass_check_test_targets or []))):
return True
_title2 = 'pytest enabled targets in test scripts: \n'
for script_path in test_script_paths:
_title2 += f' - {script_path}\n'
diff_lists(
_manifest_verified_targets,
actual_verified_targets,
'manifest-enabled targets',
_title2.rstrip(),
excluded=bypass_check_test_targets or [],
)
if actual_extra_tested_targets - set(bypass_check_test_targets or []):
print(
inspect.cleandoc(
f'''
{_app.app_dir}:
- enable test targets according to the manifest file: {_app.verified_targets}
- enable test targets according to the test scripts: {actual_verified_targets}
test scripts enabled targets should be a subset of the manifest file declared ones.
Please check the test script: {test_script_path}.
'''
)
)
return False
if _app.verified_targets == actual_verified_targets:
return True
elif not (set(_app.verified_targets) - set(actual_verified_targets + (bypass_check_test_targets or []))):
print(f'WARNING: bypass test script check on {_app.app_dir} for targets {bypass_check_test_targets} ')
return True
if_clause = f'IDF_TARGET in [{", ".join([doublequote(target) for target in sorted(set(_app.verified_targets) - set(actual_verified_targets))])}]'
print(
inspect.cleandoc(
f'''
{_app.app_dir}:
- enable test targets according to the manifest file: {_app.verified_targets}
- enable test targets according to the test scripts: {actual_verified_targets}
f"""
To enable/disable test targets, please modify your manifest file:
{App.MANIFEST.most_suitable_rule(app_dir).by_manifest_file}
the test scripts enabled test targets should be the same with the manifest file enabled ones. Please check
the test script manually: {test_script_path}.
To understand how to enable/disable test targets, please refer to:
https://docs.espressif.com/projects/pytest-embedded/en/latest/usages/markers.html#idf-parametrize
If you want to enable test targets in the pytest test scripts, please add `@pytest.mark.MISSING_TARGET`
marker above the test case function.
If you want to disable the test targets in the manifest file, please modify your manifest file with
the following code snippet:
# Please combine this with the original one
#
# Notes:
# - please keep in mind to avoid duplicated folders as yaml keys
# - please use parentheses to group conditions, the "or" and "and" operators could only accept two operands
{_app.app_dir}:
disable_test:
- if: {if_clause}
temporary: true
reason: <why you disable this test>
'''
"""
)
)
return False
@@ -307,42 +262,50 @@ def check_test_scripts(
find_apps(
paths,
'all',
recursive=True,
exclude_list=exclude_dirs or [],
manifest_files=get_all_manifest_files(),
default_build_targets=SUPPORTED_TARGETS + extra_default_build_targets,
)
)
apps_grouped: t.Dict[str, t.List[App]] = defaultdict(list)
for app in apps:
apps_grouped[app.app_dir].append(app)
exit_code = 0
pytest_cases = get_pytest_cases(paths)
pytest_cases = get_pytest_cases(
paths=paths,
marker_expr=None, # don't filter host_test
)
pytest_app_dir_targets_dict = {}
for case in pytest_cases:
for pytest_app in case.apps:
app_dir = os.path.relpath(pytest_app.path, IDF_PATH)
app_dir = idf_relpath(pytest_app.path)
if app_dir not in pytest_app_dir_targets_dict:
pytest_app_dir_targets_dict[app_dir] = {
'script_path': case.path,
'targets': [pytest_app.target],
'script_paths': {case.path},
'targets': {pytest_app.target},
}
else:
pytest_app_dir_targets_dict[app_dir]['targets'].append(
pytest_app.target
pytest_app_dir_targets_dict[app_dir]['script_paths'].add(case.path)
pytest_app_dir_targets_dict[app_dir]['targets'].add(pytest_app.target)
for app_dir in apps_grouped:
# manifest defined ones
manifest_defined_targets = sorted(
{
target
for app in apps_grouped[app_dir]
for target in (
App.MANIFEST.enable_test_targets(app_dir)
+ App.MANIFEST.enable_test_targets(app_dir, config_name=app.config_name)
)
checked_app_dirs = set()
for app in apps:
if app.app_dir not in checked_app_dirs:
checked_app_dirs.add(app.app_dir)
else:
continue
success = check_enable_test(
app, pytest_app_dir_targets_dict
}
)
success = check_enable_test(app_dir, manifest_defined_targets, pytest_app_dir_targets_dict)
if not success:
print(f'check_enable_test failed for app: {app}')
print(f'check_enable_test failed for app: {app_dir}')
print('-' * 80)
exit_code = 1
continue
@@ -350,26 +313,6 @@ def check_test_scripts(
sys.exit(exit_code)
def check_exist() -> None:
exit_code = 0
config_files = get_all_manifest_files()
for file in config_files:
if 'managed_components' in Path(file).parts:
continue
with open(file) as fr:
configs = yaml.safe_load(fr)
for path in configs.keys():
if path.startswith('.'):
continue
if not os.path.isdir(path):
print(f'Path \'{path}\' referred in \'{file}\' does not exist!')
exit_code = 1
sys.exit(exit_code)
if __name__ == '__main__':
if 'CI_JOB_ID' not in os.environ:
os.environ['CI_JOB_ID'] = 'fake' # this is a CI script
@@ -400,69 +343,60 @@ if __name__ == '__main__':
arg = parser.parse_args()
# Since this script is executed from the pre-commit hook environment, make sure IDF_PATH is set
os.environ['IDF_PATH'] = os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..')
)
os.environ['IDF_PATH'] = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
if arg.action == 'check-exist':
check_exist()
else:
check_dirs = set()
check_dirs = set()
# check if *_caps.h files changed
check_all = False
soc_caps_header_files = list(
(Path(IDF_PATH) / 'components' / 'soc').glob('**/*_caps.h')
)
for p in arg.paths:
if Path(p).resolve() in soc_caps_header_files:
check_all = True
break
if os.path.isfile(p):
check_dirs.add(os.path.dirname(p))
else:
check_dirs.add(p)
if 'tools/idf_py_actions/constants.py' in arg.paths or 'tools/ci/check_build_test_rules.py' in arg.paths:
# check if *_caps.h files changed
check_all = False
soc_caps_header_files = list((Path(IDF_PATH) / 'components' / 'soc').glob('**/*_caps.h'))
for p in arg.paths:
if Path(p).resolve() in soc_caps_header_files:
check_all = True
break
if check_all:
check_dirs = {IDF_PATH}
_exclude_dirs = [os.path.join(IDF_PATH, 'tools', 'unit-test-app'),
os.path.join(IDF_PATH, 'tools', 'test_build_system', 'build_test_app'),
os.path.join(IDF_PATH, 'tools', 'templates', 'sample_project')]
if os.path.isfile(p):
check_dirs.add(os.path.dirname(p))
else:
_exclude_dirs = [os.path.join(IDF_PATH, 'tools', 'templates', 'sample_project')]
check_dirs.add(p)
extra_default_build_targets_list: List[str] = []
bypass_check_test_targets_list: List[str] = []
if arg.config:
with open(arg.config) as fr:
configs = yaml.safe_load(fr)
if 'tools/idf_py_actions/constants.py' in arg.paths or 'tools/ci/check_build_test_rules.py' in arg.paths:
check_all = True
if configs:
extra_default_build_targets_list = (
configs.get('extra_default_build_targets') or []
)
bypass_check_test_targets_list = (
configs.get('bypass_check_test_targets') or []
)
if check_all:
check_dirs = {IDF_PATH}
_exclude_dirs = [
os.path.join(IDF_PATH, 'tools', 'unit-test-app'),
os.path.join(IDF_PATH, 'tools', 'test_build_system', 'build_test_app'),
os.path.join(IDF_PATH, 'tools', 'templates', 'sample_project'),
]
else:
_exclude_dirs = [os.path.join(IDF_PATH, 'tools', 'templates', 'sample_project')]
if arg.action == 'check-readmes':
os.environ['INCLUDE_NIGHTLY_RUN'] = '1'
os.environ['NIGHTLY_RUN'] = '1'
check_readme(
list(check_dirs),
exclude_dirs=_exclude_dirs,
extra_default_build_targets=extra_default_build_targets_list,
)
elif arg.action == 'check-test-scripts':
os.environ['INCLUDE_NIGHTLY_RUN'] = '1'
os.environ['NIGHTLY_RUN'] = '1'
check_test_scripts(
list(check_dirs),
exclude_dirs=_exclude_dirs,
bypass_check_test_targets=bypass_check_test_targets_list,
extra_default_build_targets=extra_default_build_targets_list,
)
extra_default_build_targets_list: t.List[str] = []
bypass_check_test_targets_list: t.List[str] = []
if arg.config:
with open(arg.config) as fr:
configs = yaml.safe_load(fr)
if configs:
extra_default_build_targets_list = configs.get('extra_default_build_targets') or []
bypass_check_test_targets_list = configs.get('bypass_check_test_targets') or []
if arg.action == 'check-readmes':
os.environ['INCLUDE_NIGHTLY_RUN'] = '1'
os.environ['NIGHTLY_RUN'] = '1'
check_readme(
list(check_dirs),
exclude_dirs=_exclude_dirs,
extra_default_build_targets=extra_default_build_targets_list,
)
elif arg.action == 'check-test-scripts':
os.environ['INCLUDE_NIGHTLY_RUN'] = '1'
os.environ['NIGHTLY_RUN'] = '1'
check_test_scripts(
list(check_dirs),
exclude_dirs=_exclude_dirs,
bypass_check_test_targets=bypass_check_test_targets_list,
extra_default_build_targets=extra_default_build_targets_list,
)

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import sys
from pathlib import Path
import pytest
sys.path.insert(0, os.path.dirname(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from idf_ci_utils import IDF_PATH # noqa: E402
os.environ['IDF_PATH'] = IDF_PATH
os.environ['PYTEST_IGNORE_COLLECT_IMPORT_ERROR'] = '1'
from idf_pytest.plugin import IdfPytestEmbedded # noqa: E402
def main() -> None:
parser = argparse.ArgumentParser(description='Pytest linter check')
parser.add_argument(
'files',
nargs='*',
help='Python files to check (full paths separated by space)',
)
args = parser.parse_args()
# Convert input files to pytest-compatible paths
pytest_scripts = [str(Path(f).resolve()) for f in args.files]
cmd = [
'--collect-only',
*pytest_scripts,
'--target', 'all',
'-p', 'test_linter',
]
res = pytest.main(cmd, plugins=[IdfPytestEmbedded('all')])
sys.exit(res)
if __name__ == '__main__':
main()

View File

@@ -3,7 +3,7 @@
# internal use only for CI
# get latest MR information by source branch
#
# SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-FileCopyrightText: 2020-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
import argparse
@@ -17,7 +17,8 @@ from gitlab_api import Gitlab
from idf_ci_utils import IDF_PATH
if t.TYPE_CHECKING:
from gitlab.v4.objects import ProjectCommit, ProjectMergeRequest
from gitlab.v4.objects import ProjectCommit
from gitlab.v4.objects import ProjectMergeRequest
def _get_mr_obj(source_branch: str) -> t.Optional['ProjectMergeRequest']:
@@ -115,16 +116,6 @@ def get_mr_components(
return list(components)
def get_target_in_tags(tags: str) -> str:
from idf_pytest.constants import TARGET_MARKERS
for x in tags.split(','):
if x in TARGET_MARKERS:
return x
raise RuntimeError(f'No target marker found in {tags}')
def _print_list(_list: t.List[str], separator: str = '\n') -> None:
print(separator.join(_list))
@@ -159,7 +150,5 @@ if __name__ == '__main__':
_print_list([commit.id for commit in get_mr_commits(args.src_branch)])
elif args.action == 'components':
_print_list(get_mr_components(args.src_branch, args.modified_files))
elif args.action == 'target_in_tags':
print(get_target_in_tags(args.tags))
else:
raise NotImplementedError('not possible to get here')

View File

@@ -2,7 +2,13 @@
# no_env_marker_test_cases: List of test cases that do not have environment markers.
# each item shall be the test node id, you may check the error message to get the node id.
no_env_marker_test_cases:
- dummy_test_case
- test_examples_security_secure_boot
- test_examples_security_secure_boot_corrupt_app_sig
- test_examples_security_secure_boot_corrupt_bl_sig
- test_examples_security_secure_boot_key_combo
- test_examples_security_secure_boot_key_revoke
- test_usb_enum
- test_usb_ext_port
# no_runner_tags: List of runner tags that has no test runner set.
# each item shall be a comma separated list of runner tags.

View File

@@ -10,7 +10,6 @@ tools/ci/check_*.py
tools/ci/check_*.sh
tools/ci/check_*.txt
tools/ci/check_copyright_config.yaml
tools/ci/check_test_files.py
tools/ci/checkout_project_ref.py
tools/ci/ci_fetch_submodule.py
tools/ci/ci_get_mr_info.py
@@ -30,6 +29,7 @@ tools/ci/gitlab_yaml_linter.py
tools/ci/idf_build_apps_dump_soc_caps.py
tools/ci/idf_ci_local/**/*
tools/ci/idf_ci_utils.py
tools/ci/idf_pytest/**/*
tools/ci/mirror-submodule-update.sh
tools/ci/multirun_with_pyenv.sh
tools/ci/mypy_ignore_list.txt
@@ -42,7 +42,6 @@ tools/ci/python_packages/idf_iperf_test_util/**/*
tools/ci/python_packages/wifi_tools.py
tools/ci/sg_rules/*
tools/ci/sort_yaml.py
tools/ci/test_linter.py
tools/ci/utils.sh
tools/eclipse-code-style.xml
tools/esp_prov/**/*

View File

@@ -67,7 +67,6 @@ tools/ci/check_requirement_files.py
tools/ci/check_rules_components_patterns.py
tools/ci/check_soc_headers_leak.py
tools/ci/check_soc_struct_headers.py
tools/ci/check_test_files.py
tools/ci/check_tools_files_patterns.py
tools/ci/check_type_comments.py
tools/ci/checkout_project_ref.py

View File

@@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-FileCopyrightText: 2020-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
# internal use only for CI
# some CI related util functions
@@ -9,7 +9,6 @@ import subprocess
import sys
import typing as t
from functools import cached_property
from pathlib import Path
IDF_PATH: str = os.path.abspath(os.getenv('IDF_PATH', os.path.join(os.path.dirname(__file__), '..', '..')))
@@ -233,18 +232,6 @@ class GitlabYmlConfig:
return self.config[name] # type: ignore
def get_all_manifest_files() -> t.List[str]:
paths: t.List[str] = []
for p in Path(IDF_PATH).glob('**/.build-test-rules.yml'):
if 'managed_components' in p.parts:
continue
paths.append(str(p))
return paths
def sanitize_job_name(name: str) -> str:
"""
Sanitize the job name from CI_JOB_NAME

View File

@@ -4,18 +4,7 @@
Pytest Related Constants. Don't import third-party packages here.
"""
import os
import typing as t
import warnings
from dataclasses import dataclass
from enum import Enum
from functools import cached_property
from pathlib import Path
from _pytest.python import Function
from idf_ci_utils import IDF_PATH
from idf_ci_utils import idf_relpath
from pytest_embedded.utils import to_list
SUPPORTED_TARGETS = [
'esp32',
@@ -33,304 +22,7 @@ PREVIEW_TARGETS: t.List[str] = [] # this PREVIEW_TARGETS excludes 'linux' targe
DEFAULT_SDKCONFIG = 'default'
DEFAULT_LOGDIR = 'pytest-embedded'
TARGET_MARKERS = {
'esp32': 'support esp32 target',
'esp32s2': 'support esp32s2 target',
'esp32s3': 'support esp32s3 target',
'esp32c3': 'support esp32c3 target',
'esp32c2': 'support esp32c2 target',
'esp32c5': 'support esp32c5 target',
'esp32c6': 'support esp32c6 target',
'esp32h2': 'support esp32h2 target',
'esp32h4': 'support esp32h4 target', # as preview
'esp32h21': 'support esp32h21 target', # as preview
'esp32p4': 'support esp32p4 target',
'esp32c61': 'support esp32c61 target',
'linux': 'support linux target',
}
SPECIAL_MARKERS = {
'supported_targets': 'support all officially announced supported targets, refer to `SUPPORTED_TARGETS`',
'preview_targets': "support all preview targets ('none')",
'all_targets': 'support all targets, including supported ones and preview ones',
'temp_skip_ci': 'temp skip tests for specified targets only in ci',
'temp_skip': 'temp skip tests for specified targets both in ci and locally',
'nightly_run': 'tests should be executed as part of the nightly trigger pipeline',
'host_test': 'tests which should not be built at the build stage, and instead built in host_test stage',
'require_elf': 'tests which require elf file',
}
ENV_MARKERS = {
# special markers
'qemu': 'build and test using qemu, not real target',
'macos': 'tests should be run on macos hosts',
# single-dut markers
'generic': 'tests should be run on generic runners',
'flash_suspend': 'support flash suspend feature',
'eth_ip101': 'connected via IP101 ethernet transceiver',
'eth_lan8720': 'connected via LAN8720 ethernet transceiver',
'eth_rtl8201': 'connected via RTL8201 ethernet transceiver',
'eth_ksz8041': 'connected via KSZ8041 ethernet transceiver',
'eth_dp83848': 'connected via DP83848 ethernet transceiver',
'eth_w5500': 'SPI Ethernet module with two W5500',
'eth_ksz8851snl': 'SPI Ethernet module with two KSZ8851SNL',
'eth_dm9051': 'SPI Ethernet module with two DM9051',
'quad_psram': 'runners with quad psram',
'octal_psram': 'runners with octal psram',
'usb_host_flash_disk': 'usb host runners with USB flash disk attached',
'usb_device': 'usb device runners',
'ethernet_ota': 'ethernet OTA runners',
'flash_encryption': 'Flash Encryption runners',
'flash_encryption_f4r8': 'Flash Encryption runners with 4-line flash and 8-line psram',
'flash_encryption_f8r8': 'Flash Encryption runners with 8-line flash and 8-line psram',
'flash_encryption_ota': 'Flash Encryption runners with ethernet OTA support with 4mb flash size',
'flash_multi': 'Multiple flash chips tests',
'psram': 'Chip has 4-line psram',
'ir_transceiver': 'runners with a pair of IR transmitter and receiver',
'twai_transceiver': 'runners with a TWAI PHY transceiver',
'flash_encryption_wifi_high_traffic': 'Flash Encryption runners with wifi high traffic support',
'ethernet': 'ethernet runner',
'ethernet_stress': 'ethernet runner with stress test',
'ethernet_flash_8m': 'ethernet runner with 8mb flash',
'ethernet_router': 'both the runner and dut connect to the same router through ethernet NIC',
'ethernet_vlan': 'ethernet runner GARM-32-SH-1-R16S5N3',
'wifi_ap': 'a wifi AP in the environment',
'wifi_router': 'both the runner and dut connect to the same wifi router',
'wifi_high_traffic': 'wifi high traffic runners',
'wifi_wlan': 'wifi runner with a wireless NIC',
'wifi_iperf': 'the AP and ESP dut were placed in a shielded box - for iperf test',
'Example_ShieldBox': 'multiple shielded APs connected to shielded ESP DUT via RF cable with programmable attenuator', # noqa E501
'xtal_26mhz': 'runner with 26MHz xtal on board',
'xtal_40mhz': 'runner with 40MHz xtal on board',
'external_flash': 'external flash memory connected via VSPI (FSPI)',
'sdcard_sdmode': 'sdcard running in SD mode, to be removed after test migration',
'sdcard_spimode': 'sdcard running in SPI mode',
'emmc': 'eMMC card',
'sdcard': 'sdcard runner',
'MSPI_F8R8': 'runner with Octal Flash and Octal PSRAM',
'MSPI_F4R8': 'runner with Quad Flash and Octal PSRAM',
'MSPI_F4R4': 'runner with Quad Flash and Quad PSRAM',
'flash_120m': 'runner with 120M supported Flash',
'jtag': 'runner where the chip is accessible through JTAG as well',
'usb_serial_jtag': 'runner where the chip is accessible through builtin JTAG as well',
'adc': 'ADC related tests should run on adc runners',
'xtal32k': 'Runner with external 32k crystal connected',
'no32kXtal': 'Runner with no external 32k crystal connected',
'psramv0': 'Runner with PSRAM version 0',
'esp32eco3': 'Runner with esp32 eco3 connected',
'ecdsa_efuse': 'Runner with test ECDSA private keys programmed in efuse',
'ccs811': 'Runner with CCS811 connected',
'nvs_encr_hmac': 'Runner with test HMAC key programmed in efuse',
'i2c_oled': 'Runner with ssd1306 I2C oled connected',
'httpbin': 'runner for tests that need to access the httpbin service',
'flash_4mb': 'C2 runners with 4 MB flash',
'jtag_re_enable': 'Runner to re-enable jtag which is softly disabled by burning bit SOFT_DIS_JTAG on eFuse',
'es8311': 'Development board that carries es8311 codec',
'camera': 'Runner with camera',
'ov5647': 'Runner with camera ov5647',
# multi-dut markers
'multi_dut_modbus_rs485': 'a pair of runners connected by RS485 bus',
'ieee802154': 'ieee802154 related tests should run on ieee802154 runners.',
'openthread_br': 'tests should be used for openthread border router.',
'openthread_bbr': 'tests should be used for openthread border router linked to Internet.',
'openthread_sleep': 'tests should be used for openthread sleepy device.',
'zigbee_multi_dut': 'zigbee runner which have multiple duts.',
'wifi_two_dut': 'tests should be run on runners which has two wifi duts connected.',
'generic_multi_device': 'generic multiple devices whose corresponding gpio pins are connected to each other.',
'twai_network': 'multiple runners form a TWAI network.',
'sdio_master_slave': 'Test sdio multi board, esp32+esp32',
'sdio_multidev_32_c6': 'Test sdio multi board, esp32+esp32c6',
'sdio_multidev_p4_c5': 'Test sdio multi board, esp32p4+esp32c5',
'usj_device': 'Test usb_serial_jtag and usb_serial_jtag is used as serial only (not console)',
'twai_std': 'twai runner with all twai supported targets connect to usb-can adapter',
'lp_i2s': 'lp_i2s runner tested with hp_i2s',
'ram_app': 'ram_app runners',
'esp32c3eco7': 'esp32c3 major version(v1.1) chips',
'esp32c2eco4': 'esp32c2 major version(v2.0) chips',
'recovery_bootloader': 'Runner with recovery bootloader offset set in eFuse',
}
# by default the timeout is 1h, for some special cases we need to extend it
TIMEOUT_4H_MARKERS = [
'ethernet_stress',
]
DEFAULT_CONFIG_RULES_STR = ['sdkconfig.ci=default', 'sdkconfig.ci.*=', '=default']
DEFAULT_BUILD_TEST_RULES_FILEPATH = os.path.join(IDF_PATH, '.gitlab', 'ci', 'default-build-test-rules.yml')
class CollectMode(str, Enum):
SINGLE_SPECIFIC = 'single_specific'
MULTI_SPECIFIC = 'multi_specific'
MULTI_ALL_WITH_PARAM = 'multi_all_with_param'
ALL = 'all'
class PytestApp:
"""
Pytest App with relative path to IDF_PATH
"""
def __init__(self, path: str, target: str, config: str) -> None:
self.path = idf_relpath(path)
self.target = target
self.config = config
def __hash__(self) -> int:
return hash((self.path, self.target, self.config))
@cached_property
def build_dir(self) -> str:
return os.path.join(self.path, f'build_{self.target}_{self.config}')
@dataclass
class PytestCase:
apps: t.List[PytestApp]
item: Function
multi_dut_without_param: bool
def __hash__(self) -> int:
return hash((self.path, self.name, self.apps, self.all_markers))
@cached_property
def path(self) -> str:
return str(self.item.path)
@cached_property
def name(self) -> str:
return self.item.originalname # type: ignore
@cached_property
def targets(self) -> t.List[str]:
if not self.multi_dut_without_param:
return [app.target for app in self.apps]
# multi-dut test cases without parametrize
skip = True
for _t in [app.target for app in self.apps]:
if _t in self.target_markers:
skip = False
warnings.warn(
f'`pytest.mark.[TARGET]` defined in parametrize for multi-dut test cases is deprecated. ' # noqa: W604
f'Please use parametrize instead for test case {self.item.nodeid}'
)
break
if not skip:
return [app.target for app in self.apps]
return [''] * len(self.apps) # this will help to filter these cases out later
@cached_property
def is_single_dut_test_case(self) -> bool:
return True if len(self.apps) == 1 else False
@cached_property
def is_host_test(self) -> bool:
return 'host_test' in self.all_markers or 'linux' in self.targets
# the following markers could be changed dynamically, don't use cached_property
@property
def all_markers(self) -> t.Set[str]:
return {marker.name for marker in self.item.iter_markers()}
@property
def skip_targets(self) -> t.Set[str]:
def _get_temp_markers_disabled_targets(marker_name: str) -> t.Set[str]:
temp_marker = self.item.get_closest_marker(marker_name)
if not temp_marker:
return set()
# temp markers should always use keyword arguments `targets` and `reason`
if not temp_marker.kwargs.get('targets') or not temp_marker.kwargs.get('reason'):
raise ValueError(
f'`{marker_name}` should always use keyword arguments `targets` and `reason`. ' # noqa: W604
f'For example: '
f'`@pytest.mark.{marker_name}(targets=["esp32"], reason="IDF-xxxx, will fix it ASAP")`'
)
return set(to_list(temp_marker.kwargs['targets'])) # type: ignore
temp_skip_ci_targets = _get_temp_markers_disabled_targets('temp_skip_ci')
temp_skip_targets = _get_temp_markers_disabled_targets('temp_skip')
# in CI we skip the union of `temp_skip` and `temp_skip_ci`
if os.getenv('CI_JOB_ID'):
_skip_targets = temp_skip_ci_targets.union(temp_skip_targets)
else: # we use `temp_skip` locally
_skip_targets = temp_skip_targets
return _skip_targets
@property
def target_markers(self) -> t.Set[str]:
return {marker for marker in self.all_markers if marker in TARGET_MARKERS} - self.skip_targets
@property
def env_markers(self) -> t.Set[str]:
return {marker for marker in self.all_markers if marker in ENV_MARKERS}
@property
def target_selector(self) -> str:
return ','.join(app.target for app in self.apps)
@property
def requires_elf_or_map(self) -> bool:
"""
This property determines whether the test case requires elf or map file. By default, one app in the test case
only requires .bin files.
:return: True if the test case requires elf or map file, False otherwise
"""
if 'jtag' in self.env_markers or 'usb_serial_jtag' in self.env_markers:
return True
cases_need_elf = ['panic', 'gdbstub_runtime']
if 'require_elf' in SPECIAL_MARKERS:
return True
for case in cases_need_elf:
if any(case in Path(app.path).parts for app in self.apps):
return True
return False
def all_built_in_app_lists(self, app_lists: t.Optional[t.List[str]] = None) -> t.Optional[str]:
"""
Check if all binaries of the test case are built in the app lists.
:param app_lists: app lists to check
:return: debug string if not all binaries are built in the app lists, None otherwise
"""
if app_lists is None:
# ignore this feature
return None
bin_found = [0] * len(self.apps)
for i, app in enumerate(self.apps):
if app.build_dir in app_lists:
bin_found[i] = 1
if sum(bin_found) == 0:
msg = f'Skip test case {self.name} because all following binaries are not listed in the app lists: ' # noqa: E713
for app in self.apps:
msg += f'\n - {app.build_dir}'
print(msg)
return msg
if sum(bin_found) == len(self.apps):
return None
# some found, some not, looks suspicious
msg = f'Found some binaries of test case {self.name} are not listed in the app lists.' # noqa: E713
for i, app in enumerate(self.apps):
if bin_found[i] == 0:
msg += f'\n - {app.build_dir}'
msg += '\nMight be a issue of .build-test-rules.yml files'
print(msg)
return msg

View File

@@ -1,108 +1,88 @@
# SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import importlib
import logging
import os
import sys
import typing as t
from collections import defaultdict
from functools import cached_property
from unittest.mock import MagicMock
from pathlib import Path
from xml.etree import ElementTree as ET
import pytest
from _pytest.config import ExitCode
from _pytest.main import Session
from _pytest.config import Config
from _pytest.python import Function
from _pytest.runner import CallInfo
from idf_build_apps import App
from idf_build_apps.constants import BuildStatus
from idf_ci_utils import idf_relpath
from idf_ci import IdfPytestPlugin
from idf_ci import PytestCase
from idf_ci.idf_pytest.plugin import IDF_CI_PYTEST_DEBUG_INFO_KEY
from idf_ci_utils import to_list
from pytest_embedded import Dut
from pytest_embedded.plugin import parse_multi_dut_args
from pytest_embedded.utils import find_by_suffix
from pytest_embedded.utils import to_list
from pytest_ignore_test_results.ignore_results import ChildCase
from pytest_ignore_test_results.ignore_results import ChildCasesStashKey
from .constants import CollectMode
from .constants import DEFAULT_SDKCONFIG
from .constants import PREVIEW_TARGETS
from .constants import PytestApp
from .constants import PytestCase
from .constants import SUPPORTED_TARGETS
from .utils import comma_sep_str_to_list
from .utils import format_case_id
from .utils import merge_junit_files
from .utils import normalize_testcase_file_path
IDF_PYTEST_EMBEDDED_KEY = pytest.StashKey['IdfPytestEmbedded']()
ITEM_FAILED_CASES_KEY = pytest.StashKey[list]()
ITEM_FAILED_KEY = pytest.StashKey[bool]()
ITEM_PYTEST_CASE_KEY = pytest.StashKey[PytestCase]()
IDF_LOCAL_PLUGIN_KEY = pytest.StashKey['IdfLocalPlugin']()
class IdfPytestEmbedded:
def requires_elf_or_map(case: PytestCase) -> bool:
"""
Determines whether the test case requires elf or map file. By default, one app in the test case
only requires .bin files.
:return: True if the test case requires elf or map file, False otherwise
"""
if 'jtag' in case.env_markers or 'usb_serial_jtag' in case.env_markers:
return True
folders_need_elf = ['panic', 'gdbstub_runtime']
if 'require_elf' in case.all_markers:
return True
for folder in folders_need_elf:
if any(folder in Path(app.path).parts for app in case.apps):
return True
return False
def skipped_targets(item: Function) -> t.Set[str]:
def _get_temp_markers_disabled_targets(marker_name: str) -> t.Set[str]:
temp_marker = item.get_closest_marker(marker_name)
if not temp_marker:
return set()
# temp markers should always use keyword arguments `targets` and `reason`
if not temp_marker.kwargs.get('targets') or not temp_marker.kwargs.get('reason'):
raise ValueError(
f'`{marker_name}` should always use keyword arguments `targets` and `reason`. ' # noqa: W604
f'For example: '
f'`@pytest.mark.{marker_name}(targets=["esp32"], reason="IDF-xxxx, will fix it ASAP")`'
)
return set(to_list(temp_marker.kwargs['targets']))
temp_skip_ci_targets = _get_temp_markers_disabled_targets('temp_skip_ci')
temp_skip_targets = _get_temp_markers_disabled_targets('temp_skip')
# in CI we skip the union of `temp_skip` and `temp_skip_ci`
if os.getenv('CI_JOB_ID'):
_skip_targets = temp_skip_ci_targets.union(temp_skip_targets)
else: # we use `temp_skip` locally
_skip_targets = temp_skip_targets
return _skip_targets
class IdfLocalPlugin:
UNITY_RESULT_MAPPINGS = {
'PASS': 'passed',
'FAIL': 'failed',
'IGNORE': 'skipped',
}
def __init__(
self,
target: t.Union[t.List[str], str],
*,
config_name: t.Optional[str] = None,
single_target_duplicate_mode: bool = False,
apps: t.Optional[t.List[App]] = None,
):
if isinstance(target, str):
# sequence also matters
self.target = comma_sep_str_to_list(target)
else:
self.target = target
if not self.target:
raise ValueError('`target` should not be empty')
self.config_name = config_name
# these are useful while gathering all the multi-dut test cases
# when this mode is activated,
#
# pytest.mark.esp32
# pytest.mark.parametrize('count', [2], indirect=True)
# def test_foo(dut):
# pass
#
# should be collected when running `pytest --target esp32`
#
# otherwise, it should be collected when running `pytest --target esp32,esp32`
self._single_target_duplicate_mode = single_target_duplicate_mode
self.apps_list = (
[os.path.join(idf_relpath(app.app_dir), app.build_dir) for app in apps if app.build_status == BuildStatus.SUCCESS]
if apps is not None
else None
)
self.cases: t.List[PytestCase] = []
# record the additional info
# test case id: {key: value}
self.additional_info: t.Dict[str, t.Dict[str, t.Any]] = defaultdict(dict)
@cached_property
def collect_mode(self) -> CollectMode:
if len(self.target) == 1:
if self.target[0] == CollectMode.MULTI_ALL_WITH_PARAM:
return CollectMode.MULTI_ALL_WITH_PARAM
else:
return CollectMode.SINGLE_SPECIFIC
else:
return CollectMode.MULTI_SPECIFIC
@staticmethod
def get_param(item: Function, key: str, default: t.Any = None) -> t.Any:
# funcargs is not calculated while collection
@@ -112,210 +92,47 @@ class IdfPytestEmbedded:
return item.callspec.params.get(key, default) or default
def item_to_pytest_case(self, item: Function) -> t.Optional[PytestCase]:
"""
Turn pytest item to PytestCase
"""
count = self.get_param(item, 'count', 1)
@pytest.hookimpl(wrapper=True)
def pytest_collection_modifyitems(self, config: Config, items: t.List[Function]) -> t.Generator[None, None, None]:
yield # throw it back to idf-ci
# default app_path is where the test script locates
app_paths = to_list(parse_multi_dut_args(count, self.get_param(item, 'app_path', os.path.dirname(item.path))))
configs = to_list(parse_multi_dut_args(count, self.get_param(item, 'config', DEFAULT_SDKCONFIG)))
targets = to_list(parse_multi_dut_args(count, self.get_param(item, 'target')))
deselected_items = []
multi_dut_without_param = False
if count > 1 and targets == [None] * count:
multi_dut_without_param = True
try:
targets = to_list(parse_multi_dut_args(count, '|'.join(self.target))) # check later while collecting
except ValueError: # count doesn't match
return None
elif targets is None:
targets = self.target
return PytestCase(
apps=[PytestApp(app_paths[i], targets[i], configs[i]) for i in range(count)],
item=item,
multi_dut_without_param=multi_dut_without_param
)
def pytest_collectstart(self) -> None:
# mock the optional packages while collecting locally
if not os.getenv('CI_JOB_ID') or os.getenv('PYTEST_IGNORE_COLLECT_IMPORT_ERROR') == '1':
# optional packages required by test scripts
for p in [
'scapy',
'scapy.all',
'websocket', # websocket-client
'netifaces',
'RangeHTTPServer', # rangehttpserver
'dbus', # dbus-python
'dbus.mainloop',
'dbus.mainloop.glib',
'google.protobuf', # protobuf
'google.protobuf.internal',
'bleak',
'paho', # paho-mqtt
'paho.mqtt',
'paho.mqtt.client',
'paramiko',
'netmiko',
'pyecharts',
'pyecharts.options',
'pyecharts.charts',
'can', # python-can
]:
try:
importlib.import_module(p)
except ImportError:
logging.warning(f'Optional package {p} is not installed, mocking it while collecting...')
sys.modules[p] = MagicMock()
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(self, items: t.List[Function]) -> None:
"""
Background info:
We're using `pytest.mark.[TARGET]` as a syntactic sugar to indicate that they are actually supported by all
the listed targets. For example,
>>> @pytest.mark.esp32
>>> @pytest.mark.esp32s2
should be treated as
>>> @pytest.mark.parametrize('target', [
>>> 'esp32',
>>> 'esp32s2',
>>> ], indirect=True)
All single-dut test cases, and some of the multi-dut test cases with the same targets, are using this
way to indicate the supported targets.
To avoid ambiguity,
- when we're collecting single-dut test cases with esp32, we call
`pytest --collect-only --target esp32`
- when we're collecting multi-dut test cases, we list all the targets, even when they're the same
`pytest --collect-only --target esp32,esp32` for two esp32 connected
`pytest --collect-only --target esp32,esp32s2` for esp32 and esp32s2 connected
therefore, we have two different logic for searching test cases, explained in 2.1 and 2.2
"""
# 1. Filter according to nighty_run related markers
if os.getenv('INCLUDE_NIGHTLY_RUN') == '1':
# nightly_run and non-nightly_run cases are both included
pass
elif os.getenv('NIGHTLY_RUN') == '1':
# only nightly_run cases are included
items[:] = [_item for _item in items if _item.get_closest_marker('nightly_run') is not None]
else:
# only non-nightly_run cases are included
items[:] = [_item for _item in items if _item.get_closest_marker('nightly_run') is None]
# 2. Add markers according to special markers
item_to_case_dict: t.Dict[Function, PytestCase] = {}
# Filter
filtered_items = []
for item in items:
case = self.item_to_pytest_case(item)
if case is None:
case = IdfPytestPlugin.get_case_by_item(item)
if not case:
deselected_items.append(item)
continue
item.stash[ITEM_PYTEST_CASE_KEY] = item_to_case_dict[item] = case
if 'supported_targets' in item.keywords:
for _target in SUPPORTED_TARGETS:
item.add_marker(_target)
if 'preview_targets' in item.keywords:
for _target in PREVIEW_TARGETS:
item.add_marker(_target)
if 'all_targets' in item.keywords:
for _target in [*SUPPORTED_TARGETS, *PREVIEW_TARGETS]:
item.add_marker(_target)
if case.target_selector in skipped_targets(item):
deselected_items.append(item)
item.stash[IDF_CI_PYTEST_DEBUG_INFO_KEY] = 'skipped by temp_skip markers'
continue
# add single-dut "target" as param
_item_target_param = self.get_param(item, 'target', None)
if case.is_single_dut_test_case and _item_target_param and _item_target_param not in case.all_markers:
item.add_marker(_item_target_param)
filtered_items.append(item)
items[:] = [_item for _item in items if _item in item_to_case_dict]
items[:] = filtered_items
# 3.1. CollectMode.SINGLE_SPECIFIC, like `pytest --target esp32`
if self.collect_mode == CollectMode.SINGLE_SPECIFIC:
filtered_items = []
for item in items:
case = item_to_case_dict[item]
# single-dut one
if case.is_single_dut_test_case and self.target[0] in case.target_markers:
filtered_items.append(item)
# multi-dut ones and in single_target_duplicate_mode
elif self._single_target_duplicate_mode and not case.is_single_dut_test_case:
# ignore those test cases with `target` defined in parametrize, since these will be covered in 3.3
if self.get_param(item, 'target', None) is None and self.target[0] in case.target_markers:
filtered_items.append(item)
items[:] = filtered_items
# 3.2. CollectMode.MULTI_SPECIFIC, like `pytest --target esp32,esp32`
elif self.collect_mode == CollectMode.MULTI_SPECIFIC:
items[:] = [_item for _item in items if item_to_case_dict[_item].targets == self.target]
# 3.3. CollectMode.MULTI_ALL_WITH_PARAM, intended to be used by `get_pytest_cases`
else:
filtered_items = []
for item in items:
case = item_to_case_dict[item]
target = self.get_param(item, 'target', None)
if (
not case.is_single_dut_test_case and
target is not None and
target not in case.skip_targets
):
filtered_items.append(item)
items[:] = filtered_items
# 4. filter according to the sdkconfig, if there's param 'config' defined
if self.config_name:
_items = []
for item in items:
case = item_to_case_dict[item]
if self.config_name not in set(app.config or DEFAULT_SDKCONFIG for app in case.apps):
self.additional_info[case.name]['skip_reason'] = f'Only run with sdkconfig {self.config_name}'
else:
_items.append(item)
items[:] = _items
# 5. filter by `self.apps_list`, skip the test case if not listed
# should only be used in CI
_items = []
for item in items:
case = item_to_case_dict[item]
if msg := case.all_built_in_app_lists(self.apps_list):
self.additional_info[case.name]['skip_reason'] = msg
else:
_items.append(item)
# Deselect
config.hook.pytest_deselected(items=deselected_items)
# OKAY!!! All left ones will be executed, sort it and add more markers
items[:] = sorted(
_items, key=lambda x: (os.path.dirname(x.path), self.get_param(x, 'config', DEFAULT_SDKCONFIG))
items, key=lambda x: (os.path.dirname(x.path), self.get_param(x, 'config', DEFAULT_SDKCONFIG))
)
for item in items:
case = item_to_case_dict[item]
case = IdfPytestPlugin.get_case_by_item(item)
# set default timeout 10 minutes for each case
if 'timeout' not in item.keywords:
item.add_marker(pytest.mark.timeout(10 * 60))
# add 'xtal_40mhz' tag as a default tag for esp32c2 target
# only add this marker for esp32c2 cases
if 'esp32c2' in self.target and 'esp32c2' in case.targets and 'xtal_26mhz' not in case.all_markers:
if 'esp32c2' in case.targets and 'xtal_26mhz' not in case.all_markers:
item.add_marker('xtal_40mhz')
def pytest_report_collectionfinish(self, items: t.List[Function]) -> None:
self.cases = [item.stash[ITEM_PYTEST_CASE_KEY] for item in items]
def pytest_custom_test_case_name(self, item: Function) -> str:
return item.funcargs.get('test_case_name', item.nodeid) # type: ignore
@@ -388,8 +205,3 @@ class IdfPytestEmbedded:
case.attrib['ci_job_url'] = ci_job_url
xml.write(junit)
def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None:
if exitstatus != 0:
if exitstatus == ExitCode.NO_TESTS_COLLECTED:
session.exitstatus = 0

View File

@@ -1,3 +0,0 @@
[pytest]
addopts = -p no:idf-ci
python_files = test_*.py

View File

@@ -1,236 +0,0 @@
# SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import fnmatch
import io
import logging
import os.path
import typing as t
from contextlib import redirect_stdout
from pathlib import Path
import pytest
from _pytest.config import ExitCode
from idf_build_apps import App
from idf_build_apps import find_apps
from idf_build_apps.constants import SUPPORTED_TARGETS
from idf_build_apps.constants import BuildStatus
from idf_ci_local.app import IdfCMakeApp
from idf_ci_utils import IDF_PATH
from idf_ci_utils import get_all_manifest_files
from idf_ci_utils import idf_relpath
from idf_ci_utils import to_list
from idf_py_actions.constants import PREVIEW_TARGETS as TOOLS_PREVIEW_TARGETS
from idf_py_actions.constants import SUPPORTED_TARGETS as TOOLS_SUPPORTED_TARGETS
from .constants import DEFAULT_CONFIG_RULES_STR
from .constants import CollectMode
from .constants import PytestCase
from .plugin import IdfPytestEmbedded
def get_pytest_files(paths: t.List[str]) -> t.List[str]:
# this is a workaround to solve pytest collector super slow issue
# benchmark with
# - time pytest -m esp32 --collect-only
# user=15.57s system=1.35s cpu=95% total=17.741
# - time { find -name 'pytest_*.py'; } | xargs pytest -m esp32 --collect-only
# user=0.11s system=0.63s cpu=36% total=2.044
# user=1.76s system=0.22s cpu=43% total=4.539
# use glob.glob would also save a bunch of time
pytest_scripts: t.Set[str] = set()
for p in paths:
path = Path(p)
pytest_scripts.update(str(_p) for _p in path.glob('**/pytest_*.py') if 'managed_components' not in _p.parts)
return list(pytest_scripts)
def get_pytest_cases(
paths: t.Union[str, t.List[str]],
target: str = CollectMode.ALL,
*,
config_name: t.Optional[str] = None,
marker_expr: t.Optional[str] = None,
filter_expr: t.Optional[str] = None,
apps: t.Optional[t.List[App]] = None,
) -> t.List[PytestCase]:
"""
Return the list of test cases
For single-dut test cases, `target` could be
- [TARGET], e.g. `esp32`, to get the test cases for the given target
- or `single_all`, to get all single-dut test cases
For multi-dut test cases, `target` could be
- [TARGET,[TARGET...]], e.g. `esp32,esp32s2`, to get the test cases for the given targets
- or `multi_all`, to get all multi-dut test cases
:param paths: paths to search for pytest scripts
:param target: target or keywords to get test cases for, detailed above
:param config_name: sdkconfig name
:param marker_expr: pytest marker expression, `-m`
:param filter_expr: pytest filter expression, `-k`
:param apps: built app list, skip the tests required by apps not in the list
:return: list of test cases
"""
paths = to_list(paths)
cases: t.List[PytestCase] = []
pytest_scripts = get_pytest_files(paths) # type: ignore
if not pytest_scripts:
print(f'WARNING: no pytest scripts found for target {target} under paths {", ".join(paths)}')
return cases
def _get_pytest_cases(_target: str, _single_target_duplicate_mode: bool = False) -> t.List[PytestCase]:
collector = IdfPytestEmbedded(
_target, config_name=config_name, single_target_duplicate_mode=_single_target_duplicate_mode, apps=apps
)
with io.StringIO() as buf:
with redirect_stdout(buf):
cmd = ['--collect-only', *pytest_scripts, '--target', _target, '-q']
if marker_expr:
cmd.extend(['-m', marker_expr])
if filter_expr:
cmd.extend(['-k', filter_expr])
res = pytest.main(cmd, plugins=[collector])
if res.value != ExitCode.OK:
if res.value == ExitCode.NO_TESTS_COLLECTED:
print(f'WARNING: no pytest app found for target {_target} under paths {", ".join(paths)}')
else:
print(buf.getvalue())
raise RuntimeError(f'pytest collection failed at {", ".join(paths)} with command "{" ".join(cmd)}"')
return collector.cases # type: ignore
if target == CollectMode.ALL:
targets = TOOLS_SUPPORTED_TARGETS + TOOLS_PREVIEW_TARGETS + [CollectMode.MULTI_ALL_WITH_PARAM]
else:
targets = [target]
for _target in targets:
if target == CollectMode.ALL:
cases.extend(_get_pytest_cases(_target, _single_target_duplicate_mode=True))
else:
cases.extend(_get_pytest_cases(_target))
return sorted(cases, key=lambda x: (x.path, x.name, str(x.targets)))
def get_all_apps(
paths: t.List[str],
target: str = CollectMode.ALL,
*,
marker_expr: t.Optional[str] = None,
filter_expr: t.Optional[str] = None,
config_rules_str: t.Optional[t.List[str]] = None,
preserve_all: bool = False,
extra_default_build_targets: t.Optional[t.List[str]] = None,
compare_manifest_sha_filepath: t.Optional[str] = None,
modified_components: t.Optional[t.List[str]] = None,
modified_files: t.Optional[t.List[str]] = None,
ignore_app_dependencies_components: t.Optional[t.List[str]] = None,
ignore_app_dependencies_filepatterns: t.Optional[t.List[str]] = None,
) -> t.Tuple[t.Set[App], t.Set[App]]:
"""
Return the tuple of test-required apps and non-test-related apps
:param paths: paths to search for pytest scripts
:param target: target or keywords to get test cases for, explained in `get_pytest_cases`
:param marker_expr: pytest marker expression, `-m`
:param filter_expr: pytest filter expression, `-k`
:param config_rules_str: config rules string
:param preserve_all: preserve all apps
:param extra_default_build_targets: extra default build targets
:param compare_manifest_sha_filepath: check manifest sha filepath
:param modified_components: modified components
:param modified_files: modified files
:param ignore_app_dependencies_components: ignore app dependencies components
:param ignore_app_dependencies_filepatterns: ignore app dependencies filepatterns
:return: tuple of test-required apps and non-test-related apps
"""
# target could be comma separated list
all_apps: t.List[App] = []
for _t in set(target.split(',')):
all_apps.extend(
find_apps(
paths,
_t,
build_system=IdfCMakeApp,
recursive=True,
build_dir='build_@t_@w',
config_rules_str=config_rules_str or DEFAULT_CONFIG_RULES_STR,
build_log_filename='build_log.txt',
size_json_filename='size.json',
check_warnings=True,
manifest_rootpath=IDF_PATH,
compare_manifest_sha_filepath=compare_manifest_sha_filepath,
manifest_files=get_all_manifest_files(),
default_build_targets=SUPPORTED_TARGETS + (extra_default_build_targets or []),
modified_components=modified_components,
modified_files=modified_files,
ignore_app_dependencies_components=ignore_app_dependencies_components,
ignore_app_dependencies_filepatterns=ignore_app_dependencies_filepatterns,
include_skipped_apps=True,
)
)
pytest_cases = get_pytest_cases(
paths,
target,
marker_expr=marker_expr,
filter_expr=filter_expr,
)
modified_pytest_cases = []
if modified_files:
modified_pytest_scripts = [
os.path.dirname(f) for f in modified_files if fnmatch.fnmatch(os.path.basename(f), 'pytest_*.py')
]
if modified_pytest_scripts:
modified_pytest_cases = get_pytest_cases(
modified_pytest_scripts,
target,
marker_expr=marker_expr,
filter_expr=filter_expr,
)
# app_path, target, config
pytest_app_path_tuple_dict: t.Dict[t.Tuple[str, str, str], PytestCase] = {}
for case in pytest_cases:
for app in case.apps:
pytest_app_path_tuple_dict[(app.path, app.target, app.config)] = case
modified_pytest_app_path_tuple_dict: t.Dict[t.Tuple[str, str, str], PytestCase] = {}
for case in modified_pytest_cases:
for app in case.apps:
modified_pytest_app_path_tuple_dict[(app.path, app.target, app.config)] = case
test_related_apps: t.Set[App] = set()
non_test_related_apps: t.Set[App] = set()
for app in all_apps:
# PytestCase.app.path is idf_relpath
app_path = idf_relpath(app.app_dir)
# override build_status if test script got modified
if case := modified_pytest_app_path_tuple_dict.get((app_path, app.target, app.config_name)):
test_related_apps.add(app)
app.build_status = BuildStatus.SHOULD_BE_BUILT
app.preserve = True
logging.debug('Found app: %s - required by modified test case %s', app, case.path)
elif app.build_status != BuildStatus.SKIPPED:
if case := pytest_app_path_tuple_dict.get((app_path, app.target, app.config_name)):
test_related_apps.add(app)
# build or not should be decided by the build stage
app.preserve = True
logging.debug('Found test-related app: %s - required by %s', app, case.path)
else:
non_test_related_apps.add(app)
app.preserve = preserve_all
logging.debug('Found non-test-related app: %s', app)
print(f'Found {len(test_related_apps)} test-related apps')
print(f'Found {len(non_test_related_apps)} non-test-related apps')
return test_related_apps, non_test_related_apps

View File

@@ -1,71 +0,0 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import shutil
import sys
import tempfile
import typing as t
from pathlib import Path
import pytest
tools_ci_dir = os.path.join(os.path.dirname(__file__), '..', '..')
if tools_ci_dir not in sys.path:
sys.path.append(tools_ci_dir)
tools_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
if tools_dir not in sys.path:
sys.path.append(tools_dir)
from idf_ci_utils import IDF_PATH # noqa: E402
from idf_pytest.constants import DEFAULT_LOGDIR # noqa: E402
def create_project(name: str, folder: Path) -> Path:
p = folder / name
p.mkdir(parents=True, exist_ok=True)
(p / 'main').mkdir(parents=True, exist_ok=True)
with open(p / 'CMakeLists.txt', 'w') as fw:
fw.write(
"""cmake_minimum_required(VERSION 3.16)
include($ENV{{IDF_PATH}}/tools/cmake/project.cmake)
project({})
""".format(
name
)
)
with open(p / 'main' / 'CMakeLists.txt', 'w') as fw:
fw.write(
"""idf_component_register(SRCS "{}.c"
INCLUDE_DIRS ".")
""".format(
name
)
)
with open(p / 'main' / f'{name}.c', 'w') as fw:
fw.write(
"""#include <stdio.h>
void app_main(void) {}
"""
)
return p
@pytest.fixture
def work_dirpath() -> t.Generator[Path, None, None]:
os.makedirs(os.path.join(IDF_PATH, DEFAULT_LOGDIR), exist_ok=True)
p = Path(tempfile.mkdtemp(prefix=os.path.join(IDF_PATH, DEFAULT_LOGDIR) + os.sep))
try:
yield p
except Exception:
logging.critical('Test is failing, Please check the log in %s', p)
raise
else:
shutil.rmtree(p)

View File

@@ -1,131 +0,0 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
from pathlib import Path
from idf_pytest.script import get_all_apps
from idf_pytest.script import SUPPORTED_TARGETS
from conftest import create_project
def test_get_all_apps_non(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
create_project('bar', work_dirpath)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)])
assert test_related_apps == set()
assert len(non_test_related_apps) == 2 * len(SUPPORTED_TARGETS)
def test_get_all_apps_single_dut_test_script(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
with open(work_dirpath / 'foo' / 'pytest_get_all_apps_single_dut_test_script.py', 'w') as fw:
fw.write(
"""import pytest
@pytest.mark.esp32
@pytest.mark.esp32s2
def test_foo(dut):
pass
"""
)
create_project('bar', work_dirpath)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='all')
assert len(test_related_apps) == 2
assert len(non_test_related_apps) == 2 * len(SUPPORTED_TARGETS) - 2
def test_get_all_apps_multi_dut_with_markers_test_script(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
(work_dirpath / 'foo' / 'pytest_get_all_apps_multi_dut_with_markers_test_script.py').write_text(
"""import pytest
@pytest.mark.esp32
@pytest.mark.parametrize('count', [2, 3], indirect=True)
def test_foo(dut):
pass
""",
encoding='utf-8',
)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='all')
assert len(test_related_apps) == 1
assert len(non_test_related_apps) == len(SUPPORTED_TARGETS) - 1
def test_get_all_apps_multi_dut_test_script(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
with open(work_dirpath / 'foo' / 'pytest_get_all_apps_multi_dut_test_script.py', 'w') as fw:
fw.write(
"""import pytest
@pytest.mark.parametrize(
'count, target', [
(2, 'esp32s2|esp32s3'),
(3, 'esp32|esp32s3|esp32'),
], indirect=True
)
def test_foo(dut):
pass
"""
)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='esp32s2,esp32s3')
assert len(test_related_apps) == 2
assert len(non_test_related_apps) == 0
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='esp32,esp32s3,esp32')
assert len(test_related_apps) == 2
assert len(non_test_related_apps) == 0
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='all')
assert len(test_related_apps) == 3
assert len(non_test_related_apps) == len(SUPPORTED_TARGETS) - 3
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='foo,bar')
assert len(test_related_apps) == 0
assert len(non_test_related_apps) == 0
def test_get_all_apps_modified_pytest_script(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
create_project('bar', work_dirpath)
(work_dirpath / 'pytest_get_all_apps_modified_pytest_script.py').write_text(
"""import pytest
import os
@pytest.mark.parametrize('count, target', [(2, 'esp32')], indirect=True)
@pytest.mark.parametrize('app_path', [
'{}|{}'.format(os.path.join(os.path.dirname(__file__), 'foo'), os.path.join(os.path.dirname(__file__), 'bar')),
], indirect=True
)
def test_multi_foo_bar(dut):
pass
""",
encoding='utf-8',
)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='all')
assert len(test_related_apps) == 2 # foo-esp32, bar-esp32
assert len(non_test_related_apps) == 2 * len(SUPPORTED_TARGETS) - 2
test_related_apps, non_test_related_apps = get_all_apps(
[str(work_dirpath)], target='all', modified_files=[], modified_components=[]
)
assert len(test_related_apps) == 0
assert len(non_test_related_apps) == 0
test_related_apps, non_test_related_apps = get_all_apps(
[str(work_dirpath)],
target='all',
modified_files=[str(work_dirpath / 'pytest_get_all_apps_modified_pytest_script.py')],
modified_components=[],
)
assert len(test_related_apps) == 2
assert len(non_test_related_apps) == 0

View File

@@ -1,179 +0,0 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import textwrap
from pathlib import Path
from idf_pytest.constants import CollectMode
from idf_pytest.script import get_pytest_cases
TEMPLATE_SCRIPT = '''
import pytest
@pytest.mark.esp32
@pytest.mark.esp32s2
def test_foo_single(dut):
pass
@pytest.mark.parametrize('target', [
'esp32',
'esp32c3',
])
def test_foo_single_with_param(dut):
pass
@pytest.mark.parametrize(
'count, target', [
(2, 'esp32|esp32s2'),
(3, 'esp32s2|esp32s2|esp32s3'),
], indirect=True
)
def test_foo_multi(dut):
pass
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize(
'count', [2], indirect=True
)
def test_foo_multi_with_marker(dut):
pass
'''
def test_get_pytest_cases_single_specific(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_get_pytest_cases_single_specific.py'
script.write_text(TEMPLATE_SCRIPT)
cases = get_pytest_cases([str(work_dirpath)], 'esp32')
assert len(cases) == 2
assert cases[0].targets == ['esp32']
assert cases[0].name == 'test_foo_single'
assert cases[1].targets == ['esp32']
assert cases[1].name == 'test_foo_single_with_param'
def test_get_pytest_cases_multi_specific(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_get_pytest_cases_multi_specific.py'
script.write_text(TEMPLATE_SCRIPT)
cases = get_pytest_cases([str(work_dirpath)], 'esp32s2,esp32s2, esp32s3')
assert len(cases) == 1
assert cases[0].targets == ['esp32s2', 'esp32s2', 'esp32s3']
cases = get_pytest_cases([str(work_dirpath)], 'esp32s3,esp32s2,esp32s2') # order matters
assert len(cases) == 0
def test_get_pytest_cases_multi_all(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_get_pytest_cases_multi_all.py'
script.write_text(TEMPLATE_SCRIPT)
cases = get_pytest_cases([str(work_dirpath)], CollectMode.MULTI_ALL_WITH_PARAM)
assert len(cases) == 2
assert cases[0].targets == ['esp32', 'esp32s2']
assert cases[1].targets == ['esp32s2', 'esp32s2', 'esp32s3']
def test_get_pytest_cases_all(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_get_pytest_cases_all.py'
script.write_text(TEMPLATE_SCRIPT)
cases = get_pytest_cases([str(work_dirpath)], CollectMode.ALL)
assert len(cases) == 8
assert cases[0].targets == ['esp32', 'esp32s2']
assert cases[0].name == 'test_foo_multi'
assert cases[1].targets == ['esp32s2', 'esp32s2', 'esp32s3']
assert cases[1].name == 'test_foo_multi'
assert cases[2].targets == ['esp32', 'esp32']
assert cases[2].name == 'test_foo_multi_with_marker'
assert cases[3].targets == ['esp32s2', 'esp32s2']
assert cases[3].name == 'test_foo_multi_with_marker'
assert cases[4].targets == ['esp32']
assert cases[4].name == 'test_foo_single'
assert cases[5].targets == ['esp32s2']
assert cases[5].name == 'test_foo_single'
assert cases[6].targets == ['esp32']
assert cases[6].name == 'test_foo_single_with_param'
assert cases[7].targets == ['esp32c3']
assert cases[7].name == 'test_foo_single_with_param'
def test_multi_with_marker_and_app_path(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_multi_with_marker_and_app_path.py'
script.write_text(
textwrap.dedent(
'''
import pytest
@pytest.mark.esp32c2
@pytest.mark.parametrize(
'count,app_path', [
(2, 'foo|bar'),
(3, 'foo|bar|baz'),
], indirect=True
)
def test_foo_multi_with_marker_and_app_path(dut):
pass
'''
)
)
cases = get_pytest_cases([str(work_dirpath)], 'esp32c3,esp32c3')
assert len(cases) == 0
cases = get_pytest_cases([str(work_dirpath)], 'esp32c2,esp32c2')
assert len(cases) == 1
assert cases[0].targets == ['esp32c2', 'esp32c2']
cases = get_pytest_cases([str(work_dirpath)], 'esp32c2,esp32c2,esp32c2')
assert len(cases) == 1
assert cases[0].targets == ['esp32c2', 'esp32c2', 'esp32c2']
def test_filter_with_sdkconfig_name(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_filter_with_sdkconfig_name.py'
script.write_text(
textwrap.dedent(
'''
import pytest
@pytest.mark.esp32
@pytest.mark.parametrize(
'config', [
'foo',
'bar',
], indirect=True
)
def test_filter_with_sdkconfig_name_single_dut(dut):
pass
@pytest.mark.esp32
@pytest.mark.parametrize(
'count', [2], indirect=True
)
@pytest.mark.parametrize(
'config', [
'foo|bar',
'bar|baz',
], indirect=True
)
def test_filter_with_sdkconfig_name_multi_dut(dut):
pass
'''
)
)
cases = get_pytest_cases([str(work_dirpath)], 'esp32', config_name='foo')
assert len(cases) == 1
cases = get_pytest_cases([str(work_dirpath)], 'esp32,esp32', config_name='foo')
assert len(cases) == 1
cases = get_pytest_cases([str(work_dirpath)], 'esp32,esp32', config_name='bar')
assert len(cases) == 2

View File

@@ -1,512 +0,0 @@
# SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import ast
import itertools
import os
import typing as t
import warnings
from collections import defaultdict
import pytest
from idf_pytest.constants import PREVIEW_TARGETS
from idf_pytest.constants import SUPPORTED_TARGETS
from idf_pytest.constants import TARGET_MARKERS
from pytest import Config
from pytest import Function
from pytest import Mark
def is_target_in_marker(mark: Mark) -> bool:
return mark.name in TARGET_MARKERS or mark.name in ('supported_targets', 'preview_targets', 'linux')
def remove_keys(data: t.Dict[str, t.Any], keys_to_remove: t.List[str]) -> t.Dict[str, t.Any]:
"""
Remove specific keys from a dictionary.
"""
return {key: value for key, value in data.items() if key not in keys_to_remove}
def get_values_by_keys(data: t.Dict[str, t.Any], keys: t.List[str]) -> t.Tuple[t.Any, ...]:
"""
Retrieve values from a dictionary for specified keys.
"""
return tuple([data[key] for key in keys if key in data])
def group_by_target(vals: t.List[t.Dict[str, t.Any]]) -> t.List[t.Dict[str, t.Any]]:
"""
Groups rows by non-target keys and modifies targets to 'supported_targets'
if all supported targets are present in a group.
Parameters:
vals: List of dictionaries to process.
Returns:
Processed list of dictionaries with supported targets.
"""
if not vals or 'target' not in vals[0]:
return vals
def _process_group(
_vals: t.List[t.Dict[str, t.Any]], group: t.List[str], group_name: str
) -> t.List[t.Dict[str, t.Any]]:
# Identify keys excluding 'target'
non_target_keys = [key for key in sorted(_vals[0].keys()) if key != 'target']
# Group rows by values of keys excluding 'target'
grouped_rows = defaultdict(list)
for index, row in enumerate(_vals):
key = get_values_by_keys(row, non_target_keys)
grouped_rows[key].append((index, row['target']))
# Identify groups that contain all supported targets
to_skip_lines: t.Set[int] = set()
to_update_lines: t.Set[int] = set()
for _, rows in grouped_rows.items():
lines = []
remaining_targets = set(group)
for index, target in rows:
if target in remaining_targets:
lines.append(index)
remaining_targets.remove(target)
if not remaining_targets:
to_skip_lines.update(lines[1:]) # Skip all but the first matching line
to_update_lines.add(lines[0]) # Update the first matching line
break
# Construct new list of rows with modifications
new_values = []
for ind, row in enumerate(_vals):
if ind in to_update_lines:
row['target'] = group_name
if ind not in to_skip_lines:
new_values.append(row)
return new_values
if SUPPORTED_TARGETS:
vals = _process_group(vals, SUPPORTED_TARGETS, 'supported_targets')
if PREVIEW_TARGETS:
vals = _process_group(vals, PREVIEW_TARGETS, 'preview_targets')
return vals
class CurrentItemContext:
test_name: str
class PathRestore:
# If restored is True, then add the import os when the file is being formatted.
restored: bool = False
def __init__(self, path: str) -> None:
PathRestore.restored = True
self.path = path
def __repr__(self) -> str:
return f"f'{self.path}'"
def restore_path(vals: t.List[t.Dict[str, t.Any]], file_path: str) -> t.List[t.Dict[str, t.Any]]:
if 'app_path' not in vals[0].keys():
return vals
file_path = os.path.dirname(os.path.abspath(file_path))
for row in vals:
paths = row['app_path'].split('|')
row['app_path'] = '|'.join([
f'{{os.path.join(os.path.dirname(__file__), "{os.path.relpath(p, file_path)}")}}' for p in paths
])
row['app_path'] = PathRestore(row['app_path'])
return vals
def make_hashable(item: t.Any) -> t.Union[t.Tuple[t.Any, ...], t.Any]:
"""Recursively convert object to a hashable form, storing original values."""
if isinstance(item, (set, list, tuple)):
converted = tuple(make_hashable(i) for i in item)
elif isinstance(item, dict):
converted = tuple(sorted((k, make_hashable(v)) for k, v in item.items()))
else:
converted = item # Primitives are already hashable
return converted
def restore_params(data: t.List[t.Dict[str, t.Any]]) -> t.List[t.Tuple[t.List[str], t.List[t.Any]]]:
"""
Restore parameters from pytest --collect-only data structure.
"""
# Ensure all dictionaries have the same number of keys
if len({len(d) for d in data}) != 1:
raise ValueError(
f'Inconsistent parameter {CurrentItemContext.test_name} structure: all rows must have the same number of keys.'
)
all_markers_is_empty = []
for d in data:
if 'markers' in d:
all_markers_is_empty.append(not (d['markers']))
d['markers'] = list(set(d['markers']))
if all(all_markers_is_empty):
for d in data:
del d['markers']
hashable_to_original: t.Dict[t.Tuple[str, t.Any], t.Any] = {}
def save_to_hash(key: str, hashable_value: t.Any, original_value: t.Any) -> t.Any:
"""Stores the mapping of hashable values to their original."""
if isinstance(original_value, list):
original_value = tuple(original_value)
hashable_to_original[(key, hashable_value)] = original_value
return hashable_value
def restore_from_hash(key: str, hashable_value: t.Any) -> t.Any:
"""Restores the original value from its hashable equivalent."""
return hashable_to_original.get((key, hashable_value), hashable_value)
# Convert data to a hashable format
data = [{k: save_to_hash(k, make_hashable(v), v) for k, v in row.items()} for row in data]
unique_data = []
for d in data:
if d not in unique_data:
unique_data.append(d)
data = unique_data
data = group_by_target(data)
params_multiplier: t.List[t.Tuple[t.List[str], t.List[t.Any]]] = []
current_keys: t.List[str] = sorted(data[0].keys(), key=lambda x: (x == 'markers', x))
i = 1
while len(current_keys) > i:
# It should be combinations because we are only concerned with the elements, not their order.
for _ in itertools.combinations(current_keys, i):
perm: t.List[str] = list(_)
if perm == ['markers'] or [k for k in current_keys if k not in perm] == ['markers']:
# The mark_runner must be used together with another parameter.
continue
grouped_buckets = defaultdict(list)
for row in data:
grouped_buckets[get_values_by_keys(row, perm)].append(remove_keys(row, perm))
grouped_values = list(grouped_buckets.values())
if all(v == grouped_values[0] for v in grouped_values):
current_keys = [k for k in current_keys if k not in perm]
params_multiplier.append((perm, list(grouped_buckets.keys())))
data = grouped_values[0]
break
else:
i += 1
if data:
remaining_values = [get_values_by_keys(row, current_keys) for row in data]
params_multiplier.append((current_keys, remaining_values))
for key, values in params_multiplier:
values[:] = [tuple(restore_from_hash(key[i], v) for i, v in enumerate(row)) for row in values]
output: t.List[t.Any] = []
if len(key) == 1:
for row in values:
output.extend(row)
values[:] = output
for p in params_multiplier:
if 'markers' in p[0]:
for i, el in enumerate(p[1]):
if el[-1] == ():
p[1][i] = el[:-1]
return params_multiplier
def format_mark(name: str, args: t.Tuple[t.Any, ...], kwargs: t.Dict[str, t.Any]) -> str:
"""Format pytest mark with given arguments and keyword arguments."""
args_str = ', '.join(repr(arg) if isinstance(arg, str) else str(arg) for arg in args)
kwargs_str = ', '.join(f'{key}={repr(value) if isinstance(value, str) else value}' for key, value in kwargs.items())
combined = ', '.join(filter(None, [args_str, kwargs_str]))
return f'@pytest.mark.{name}({combined})\n' if combined else f'@pytest.mark.{name}\n'
def format_parametrize(keys: t.Union[str, t.List[str]], values: t.List[t.Any], indirect: t.Sequence[str]) -> str:
"""Format pytest parametrize for given keys and values."""
# Ensure keys is always a list
if isinstance(keys, str):
keys = [keys]
# Markers will always be at the end, so just remove markers from the keys if it is present
# keys = [k for k in keys if k not in ('__markers',)]
key_str = repr(keys[0]) if len(keys) == 1 else repr(','.join(keys))
# If there any value which need to be represented in some spec way, best way is wrap it with class like PathRestore
formatted_values = [' ' + repr(value) for value in values]
values_str = ',\n'.join(formatted_values)
if indirect:
return f'@idf_parametrize({key_str}, [\n{values_str}\n], indirect={indirect})\n'
return f'@idf_parametrize({key_str}, [\n{values_str}\n])\n'
def key_for_item(item: Function) -> t.Tuple[str, str]:
return item.originalname, str(item.fspath)
def collect_markers(item: Function) -> t.Tuple[t.List[Mark], t.List[Mark]]:
"""Separate local and global markers for a pytest item."""
local_markers, global_markers = [], []
for mark in item.iter_markers():
if mark.name == 'parametrize':
continue
if 'callspec' in dir(item) and mark in item.callspec.marks:
local_markers.append(mark)
else:
global_markers.append(mark)
return local_markers, global_markers
class MarkerRepr(str):
def __new__(cls, mark_name: str, kwargs_str: str, args_str: str, all_args: str) -> 'MarkerRepr':
if not all_args:
instance = super().__new__(cls, f'pytest.mark.{mark_name}')
else:
instance = super().__new__(cls, f'pytest.mark.{mark_name}({all_args})')
return instance # type: ignore
def __init__(self, mark_name: str, kwargs_str: str, args_str: str, all_args: str) -> None:
super().__init__()
self.kwargs_str = kwargs_str
self.args_str = args_str
self.all_args = all_args
self.mark_name = mark_name
def __hash__(self) -> int:
return hash(repr(self))
def __repr__(self) -> str:
if not self.all_args:
return f'pytest.mark.{self.mark_name}'
return f'pytest.mark.{self.mark_name}({self.all_args})'
def mark_to_source(mark: Mark) -> MarkerRepr:
"""Convert a Mark instance to its pytest.mark source code representation."""
kwargs_str = ', '.join(f'{k}={repr(v)}' for k, v in mark.kwargs.items())
args_str = ', '.join(repr(arg) for arg in mark.args)
all_args = ', '.join(filter(None, [args_str, kwargs_str]))
return MarkerRepr(mark.name, kwargs_str, args_str, all_args)
def process_local_markers(local_markers: t.List[Mark]) -> t.Tuple[t.List[str], t.List[MarkerRepr]]:
"""Process local markers to extract targets and runners."""
local_targets, other_markers = [], []
for mark in local_markers:
if is_target_in_marker(mark):
local_targets.append(mark.name)
else:
other_markers.append(mark_to_source(mark))
return sorted(local_targets), sorted(other_markers)
def validate_global_markers(
global_markers: t.List[Mark], local_targets: t.List[str], function_name: str
) -> t.List[Mark]:
"""Validate and normalize global markers."""
normalized_markers = []
for mark in global_markers:
if is_target_in_marker(mark):
if local_targets:
warnings.warn(f'IN {function_name} IGNORING GLOBAL TARGET {mark.name} DUE TO LOCAL TARGETS')
continue
normalized_markers.append(mark)
return normalized_markers
def filter_target(_targets: t.List[str]) -> t.List[str]:
"""
Filters the input targets based on certain conditions.
"""
if len(_targets) == 1:
return _targets
def remove_duplicates(target_list: t.List[str], group: t.List[str], group_name: str) -> t.List[str]:
updated_target = []
for _t in target_list:
if _t in group:
warnings.warn(f'{_t} is already included in {group_name}, no need to specify it separately.')
continue
updated_target.append(_t)
return updated_target
if 'supported_targets' in _targets:
_targets = remove_duplicates(_targets, SUPPORTED_TARGETS, 'supported_targets')
if 'preview_targets' in _targets:
_targets = remove_duplicates(_targets, PREVIEW_TARGETS, 'preview_targets')
return _targets
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(config: Config, items: t.List[Function]) -> None:
"""
Local and Global marks in my diff are as follows:
- Local: Used with a parameter inside a parameterized function, like:
parameterized(param(marks=[....]))
- Global: A regular mark.
"""
test_name_to_params: t.Dict[t.Tuple[str, str], t.List] = defaultdict(list)
test_name_to_global_mark: t.Dict[t.Tuple[str, str], t.List] = defaultdict(list)
test_name_has_local_target_marks = defaultdict(bool)
# Collect all fixtures to determine if a parameter is regular or a fixture
fm = config.pluginmanager.get_plugin('funcmanage')
known_fixtures = set(fm._arg2fixturedefs.keys())
# Collecting data
for item in items:
collected = []
item_key = key_for_item(item)
local_markers, global_markers = collect_markers(item)
# global_markers.sort(key=lambda x: x.name)
global_markers.reverse() # markers of item need to be reverted to save origin order
local_targets, other_markers = process_local_markers(local_markers)
if local_targets:
test_name_has_local_target_marks[item_key] = True
local_targets = filter_target(local_targets)
other_markers_dict = {'markers': other_markers} if other_markers else {'markers': []}
if local_targets:
for target in local_targets:
params = item.callspec.params if 'callspec' in dir(item) else {}
collected.append({**params, **other_markers_dict, 'target': target})
else:
if 'callspec' in dir(item):
collected.append({**other_markers_dict, **item.callspec.params})
global_markers = validate_global_markers(global_markers, local_targets, item.name)
# Just warning if global markers was changed
if item_key in test_name_to_global_mark:
if test_name_to_global_mark[item_key] != global_markers:
warnings.warn(
f'{item.originalname} HAS DIFFERENT GLOBAL MARKERS! {test_name_to_global_mark[item_key]} {global_markers}'
)
test_name_to_global_mark[item_key] = global_markers
test_name_to_params[item_key].extend(collected)
# Post-processing: Modify files based on collected data
for (function_name, file_path), function_params in test_name_to_params.items():
CurrentItemContext.test_name = function_name
to_add_lines = []
global_targets = []
for mark in test_name_to_global_mark[(function_name, file_path)]:
if is_target_in_marker(mark):
global_targets.append(mark.name)
continue
to_add_lines.append(format_mark(mark.name, mark.args, mark.kwargs))
function_params_will_not_update = True
if test_name_has_local_target_marks[(function_name, file_path)]:
function_params_will_not_update = False
# After filter_target, it will lose part of them, but we need them when removing decorators in the file.
original_global_targets = global_targets
global_targets = filter_target(global_targets)
is_target_already_in_params = any({'target' in param for param in function_params})
extra = []
if global_targets:
# If any of param have target then skip add global marker.
if is_target_already_in_params:
warnings.warn(f'Function {function_name} already have target params! Skip adding global target')
else:
extra = [{'target': _t} for _t in global_targets]
def _update_file(file_path: str, to_add_lines: t.List[str], lines: t.List[str]) -> None:
output = []
start_with_comment = True
imports = ['from pytest_embedded_idf.utils import idf_parametrize']
if PathRestore.restored:
imports += ['import os']
for i, line in enumerate(lines):
if line.strip() in imports:
continue
if start_with_comment:
if not line == '\n' and not line.startswith(('from', 'import', '#')):
output.extend([f'{_imp}\n' for _imp in imports])
start_with_comment = False
if i in skip_lines:
continue
if line.startswith(f'def {function_name}('):
output.extend(to_add_lines)
output.append(line)
with open(file_path, 'w+') as file:
file.writelines(output)
if not function_params_will_not_update:
buffered_params: t.List[str] = []
if function_params:
function_params = restore_path(function_params, file_path)
for parameter_names, parameter_values in restore_params(function_params):
buffered_params.append(
format_parametrize(
parameter_names,
parameter_values,
indirect=[p for p in parameter_names if p in known_fixtures],
)
)
to_add_lines.extend(buffered_params)
with open(file_path) as file:
lines = file.readlines()
tree = ast.parse(''.join(lines))
skip_lines: t.Set[int] = set()
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef) and node.name == function_name:
for dec in node.decorator_list:
assert dec.end_lineno is not None
skip_lines.update(list(range(dec.lineno - 1, dec.end_lineno))) # ast count lines from 1 not 0
break
_update_file(file_path, to_add_lines, lines)
if global_targets:
with open(file_path) as file:
lines = file.readlines()
tree = ast.parse(''.join(lines))
skip_lines = set()
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef) and node.name == function_name:
for dec in node.decorator_list:
if isinstance(dec, ast.Attribute):
if dec.attr in original_global_targets:
assert dec.end_lineno is not None
skip_lines.update(list(range(dec.lineno - 1, dec.end_lineno)))
break
if extra:
to_add_lines = [format_parametrize('target', [_t['target'] for _t in extra], ['target'])] if extra else []
else:
to_add_lines = []
_update_file(file_path, to_add_lines, lines)

View File

@@ -6,7 +6,7 @@
# https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/tools/idf-tools.html
# ci
idf-ci==0.1.17
idf-ci==0.1.18
coverage
jsonschema

View File

@@ -4,8 +4,6 @@ import pytest
from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize
TEST_APP_IN_FLASH = [pytest.param('app_in_flash', marks=pytest.mark.esp32p4)]
@pytest.mark.generic
@idf_parametrize('config', ['app_in_flash'], indirect=['config'])

View File

@@ -4,6 +4,10 @@ tools/test_apps/security/secure_boot:
disable:
- if: IDF_ENV_FPGA != 1 and CONFIG_NAME != "qemu"
reason: the test can only run on an FPGA as efuses need to be reset during the test.
disable_test:
- if: IDF_TARGET in ["esp32", "esp32c2", "esp32c6", "esp32h2", "esp32s2", "esp32c61", "esp32p4", "esp32s3"]
temporary: true
reason: Can't use Kconfig option IDF_ENV_FPGA in `disable`. IDFCI-2992
tools/test_apps/security/signed_app_no_secure_boot:
enable:

View File

@@ -1,5 +1,5 @@
| Supported Targets | ESP32 | ESP32-C3 | ESP32-C5 | ESP32-C6 | ESP32-H2 | ESP32-P4 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- | -------- | -------- | -------- |
| Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-C5 | ESP32-C6 | ESP32-C61 | ESP32-H2 | ESP32-H21 | ESP32-H4 | ESP32-P4 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- | --------- | -------- | --------- | -------- | -------- | -------- | -------- |
# Secure Boot

View File

@@ -7,6 +7,7 @@ import zlib
import pytest
from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize
# To prepare a runner for these tests,
# 1. Connect an FPGA with C3 image
# 2. Use a COM port for programming and export it as ESPPORT
@@ -93,16 +94,17 @@ def test_examples_security_secure_boot(dut: Dut) -> None:
# Correctly signed bootloader + correctly signed app should work
@pytest.mark.host_test
@pytest.mark.qemu
@pytest.mark.esp32c3
@pytest.mark.parametrize(
'qemu_extra_args',
[
f'-drive file={os.path.join(os.path.dirname(__file__), "test", "esp32c3_efuses.bin")},if=none,format=raw,id=efuse '
f'-drive file={os.path.join(os.path.dirname(__file__), "test", "esp32c3_efuses.bin")},'
f'if=none,format=raw,id=efuse '
'-global driver=nvram.esp32c3.efuse,property=drive,value=efuse '
'-global driver=timer.esp32c3.timg,property=wdt_disable,value=true',
],
indirect=True,
)
@pytest.mark.parametrize('target', ['esp32c3'], indirect=True)
@pytest.mark.parametrize('config', ['qemu'], indirect=True)
def test_examples_security_secure_boot_qemu(dut: Dut) -> None:
try: