Merge branch 'ci/apply-idf-ci-1' into 'master'

ci: apply `idf-ci`

Closes IDFCI-2719

See merge request espressif/esp-idf!38755
This commit is contained in:
Fu Hanxi
2025-07-11 07:18:32 +02:00
77 changed files with 1036 additions and 4292 deletions

2
.gitignore vendored
View File

@@ -100,7 +100,7 @@ managed_components
pytest-embedded/ pytest-embedded/
# legacy one # legacy one
pytest_embedded_log/ pytest_embedded_log/
list_job*.txt app_info_*.txt
size_info*.txt size_info*.txt
XUNIT_RESULT*.xml XUNIT_RESULT*.xml
.manifest_sha .manifest_sha

View File

@@ -8,11 +8,9 @@ workflow:
- if: $CI_OPEN_MERGE_REQUESTS != null - if: $CI_OPEN_MERGE_REQUESTS != null
variables: variables:
PIPELINE_COMMIT_SHA: $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA PIPELINE_COMMIT_SHA: $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA
IS_MR_PIPELINE: 1
- if: $CI_OPEN_MERGE_REQUESTS == null - if: $CI_OPEN_MERGE_REQUESTS == null
variables: variables:
PIPELINE_COMMIT_SHA: $CI_COMMIT_SHA PIPELINE_COMMIT_SHA: $CI_COMMIT_SHA
IS_MR_PIPELINE: 0
- when: always - when: always
# Place the default settings in `.gitlab/ci/common.yml` instead # Place the default settings in `.gitlab/ci/common.yml` instead

View File

@@ -250,76 +250,9 @@ We're using the latest version of [idf-build-apps][idf-build-apps]. Please refer
In ESP-IDF CI, there's a few more special rules are additionally supported to disable the check app dependencies feature: In ESP-IDF CI, there's a few more special rules are additionally supported to disable the check app dependencies feature:
- Add MR labels `BUILD_AND_TEST_ALL_APPS` - Add MR labels `BUILD_AND_TEST_ALL_APPS`
- Pipeline variable `IDF_CI_SELECT_ALL_PYTEST_CASES=1`
- Run in protected branches - Run in protected branches
## Upload/Download Artifacts to Internal Minio Server ## Upload/Download Artifacts to Internal Minio Server
### Users Without Access to Minio Please refer to the documentation [here](https://docs.espressif.com/projects/idf-ci/en/latest/guides/cli.html)
If you don't have access to the internal Minio server, you can still download the artifacts from the shared link in the job log.
The log will look like this:
```shell
Pipeline ID : 587355
Job name : build_clang_test_apps_esp32
Job ID : 40272275
Created archive file: 40272275.zip, uploading as 587355/build_dir_without_map_and_elf_files/build_clang_test_apps_esp32/40272275.zip
Please download the archive file includes build_dir_without_map_and_elf_files from [INTERNAL_URL]
```
### Users With Access to Minio
#### Env Vars for Minio
Minio takes these env vars to connect to the server:
- `IDF_S3_SERVER`
- `IDF_S3_ACCESS_KEY`
- `IDF_S3_SECRET_KEY`
- `IDF_S3_BUCKET`
#### Artifacts Types and File Patterns
The artifacts types and corresponding file patterns are defined in tools/ci/artifacts_handler.py, inside `ArtifactType` and `TYPE_PATTERNS_DICT`.
#### Upload
```shell
python tools/ci/artifacts_handler.py upload
```
will upload the files that match the file patterns to minio object storage with name:
`<pipeline_id>/<artifact_type>/<job_name>/<job_id>.zip`
For example, job 39043328 will upload these four files:
- `575500/map_and_elf_files/build_pytest_examples_esp32/39043328.zip`
- `575500/build_dir_without_map_and_elf_files/build_pytest_examples_esp32/39043328.zip`
- `575500/logs/build_pytest_examples_esp32/39043328.zip`
- `575500/size_reports/build_pytest_examples_esp32/39043328.zip`
#### Download
You may run
```shell
python tools/ci/artifacts_handler.py download --pipeline_id <pipeline_id>
```
to download all files of the pipeline, or
```shell
python tools/ci/artifacts_handler.py download --pipeline_id <pipeline_id> --job_name <job_name_or_pattern>
```
to download all files with the specified job name or pattern, or
```shell
python tools/ci/artifacts_handler.py download --pipeline_id <pipeline_id> --job_name <job_name_or_pattern> --type <artifact_type> <artifact_type> ...
```
to download all files with the specified job name or pattern and artifact type(s).
You may check all detailed documentation with `python tools/ci/artifacts_handler.py download -h`

View File

@@ -21,7 +21,7 @@
- pipeline_variables - pipeline_variables
artifacts: artifacts:
paths: paths:
# The other artifacts patterns are defined under tools/ci/artifacts_handler.py # The other artifacts patterns are defined under .idf_ci.toml
# Now we're uploading/downloading the binary files from our internal storage server # Now we're uploading/downloading the binary files from our internal storage server
# #
# keep the log file to help debug # keep the log file to help debug
@@ -34,19 +34,16 @@
variables: variables:
IDF_TOOLCHAIN: clang IDF_TOOLCHAIN: clang
TEST_BUILD_OPTS_EXTRA: "" TEST_BUILD_OPTS_EXTRA: ""
TEST_DIR: tools/test_apps/system/clang_build_test
PYTEST_IGNORE_COLLECT_IMPORT_ERROR: "1"
script: script:
# CI specific options start from "--parallel-count xxx". could ignore when running locally # CI specific options start from "--parallel-count xxx". could ignore when running locally
- run_cmd python tools/ci/ci_build_apps.py $TEST_DIR -v - run_cmd idf-build-apps build
-p tools/test_apps/system/clang_build_test
-t $IDF_TARGET -t $IDF_TARGET
--copy-sdkconfig
--parallel-count ${CI_NODE_TOTAL:-1} --parallel-count ${CI_NODE_TOTAL:-1}
--parallel-index ${CI_NODE_INDEX:-1} --parallel-index ${CI_NODE_INDEX:-1}
--modified-components ${MR_MODIFIED_COMPONENTS} --modified-components ${MR_MODIFIED_COMPONENTS}
--modified-files ${MR_MODIFIED_FILES} --modified-files ${MR_MODIFIED_FILES}
$TEST_BUILD_OPTS_EXTRA $TEST_BUILD_OPTS_EXTRA
- python tools/ci/artifacts_handler.py upload
###################### ######################
# build_template_app # # build_template_app #
@@ -105,7 +102,7 @@ gcc_static_analyzer:
ANALYZING_APP: "examples/get-started/hello_world" ANALYZING_APP: "examples/get-started/hello_world"
script: script:
- echo "CONFIG_COMPILER_STATIC_ANALYZER=y" >> ${ANALYZING_APP}/sdkconfig.defaults - echo "CONFIG_COMPILER_STATIC_ANALYZER=y" >> ${ANALYZING_APP}/sdkconfig.defaults
- python -m idf_build_apps build -v -p ${ANALYZING_APP} -t all - idf-build-apps build -p ${ANALYZING_APP}
######################################## ########################################
# Clang Build Apps Without Tests Cases # # Clang Build Apps Without Tests Cases #
@@ -208,7 +205,7 @@ build_clang_test_apps_esp32p4:
script: script:
- ${IDF_PATH}/tools/ci/test_configure_ci_environment.sh - ${IDF_PATH}/tools/ci/test_configure_ci_environment.sh
- cd ${IDF_PATH}/tools/test_build_system - cd ${IDF_PATH}/tools/test_build_system
- python ${IDF_PATH}/tools/ci/get_known_failure_cases_file.py - run_cmd idf-ci gitlab download-known-failure-cases-file ${KNOWN_FAILURE_CASES_FILE_NAME}
- pytest - pytest
--cleanup-idf-copy --cleanup-idf-copy
--parallel-count ${CI_NODE_TOTAL:-1} --parallel-count ${CI_NODE_TOTAL:-1}
@@ -293,11 +290,7 @@ generate_build_child_pipeline:
- non_test_related_apps.txt - non_test_related_apps.txt
expire_in: 1 week expire_in: 1 week
when: always when: always
variables:
PYTEST_IGNORE_COLLECT_IMPORT_ERROR: "1"
script: script:
# requires basic pytest dependencies
- run_cmd bash install.sh --enable-pytest
- run_cmd python tools/ci/dynamic_pipelines/scripts/generate_build_child_pipeline.py - run_cmd python tools/ci/dynamic_pipelines/scripts/generate_build_child_pipeline.py
build_child_pipeline: build_child_pipeline:
@@ -309,12 +302,9 @@ build_child_pipeline:
- pipeline_variables - pipeline_variables
- generate_build_child_pipeline - generate_build_child_pipeline
variables: variables:
IS_MR_PIPELINE: $IS_MR_PIPELINE
MR_MODIFIED_COMPONENTS: $MR_MODIFIED_COMPONENTS MR_MODIFIED_COMPONENTS: $MR_MODIFIED_COMPONENTS
MR_MODIFIED_FILES: $MR_MODIFIED_FILES MR_MODIFIED_FILES: $MR_MODIFIED_FILES
PARENT_PIPELINE_ID: $CI_PIPELINE_ID PARENT_PIPELINE_ID: $CI_PIPELINE_ID
BUILD_AND_TEST_ALL_APPS: $BUILD_AND_TEST_ALL_APPS
REPORT_EXIT_CODE: $REPORT_EXIT_CODE
# https://gitlab.com/gitlab-org/gitlab/-/issues/214340 # https://gitlab.com/gitlab-org/gitlab/-/issues/214340
inherit: inherit:
variables: false variables: false

View File

@@ -120,7 +120,7 @@ variables:
source tools/ci/configure_ci_environment.sh source tools/ci/configure_ci_environment.sh
# add extra python packages # add extra python packages
export PYTHONPATH="$IDF_PATH/tools:$IDF_PATH/tools/esp_app_trace:$IDF_PATH/components/partition_table:$IDF_PATH/tools/ci/python_packages:$PYTHONPATH" export PYTHONPATH="$IDF_PATH/tools:$IDF_PATH/tools/ci:$IDF_PATH/tools/esp_app_trace:$IDF_PATH/components/partition_table:$IDF_PATH/tools/ci/python_packages:$PYTHONPATH"
.setup_tools_and_idf_python_venv: &setup_tools_and_idf_python_venv | .setup_tools_and_idf_python_venv: &setup_tools_and_idf_python_venv |
# must use after setup_tools_except_target_test # must use after setup_tools_except_target_test
@@ -145,23 +145,19 @@ variables:
export IDF_PIP_WHEELS_URL="" export IDF_PIP_WHEELS_URL=""
fi fi
# install.sh
if [[ "${CI_JOB_STAGE}" != "target_test" ]]; then if [[ "${CI_JOB_STAGE}" != "target_test" ]]; then
section_start "running_install_sh" "Running install.sh" section_start "running_install_sh" "Running install.sh"
if [[ "${CI_JOB_STAGE}" == "build_doc" ]]; then if [[ "${CI_JOB_STAGE}" == "build_doc" ]]; then
run_cmd bash install.sh --enable-ci --enable-docs run_cmd bash install.sh --enable-ci --enable-docs
elif [[ "${CI_JOB_STAGE}" == "build" ]]; then
run_cmd bash install.sh --enable-ci
else else
if ! echo "${CI_JOB_NAME}" | egrep ".*pytest.*"; then
run_cmd bash install.sh --enable-ci run_cmd bash install.sh --enable-ci
else
run_cmd bash install.sh --enable-ci --enable-pytest --enable-test-specific
fi
fi fi
section_end "running_install_sh" section_end "running_install_sh"
else else
section_start "install_python_env" "Install Python environment" section_start "install_python_env" "Install Python environment, skip required tools check"
run_cmd python tools/idf_tools.py install-python-env --features ci,pytest,test-specific run_cmd python tools/idf_tools.py install-python-env --features ci,test-specific
export IDF_SKIP_TOOLS_CHECK=1
section_end "install_python_env" section_end "install_python_env"
fi fi
@@ -176,11 +172,6 @@ variables:
$IDF_PATH/tools/idf_tools.py --non-interactive install esp-clang $IDF_PATH/tools/idf_tools.py --non-interactive install esp-clang
fi fi
if [[ "${CI_JOB_STAGE}" == "target_test" ]]; then
section_start "IDF_SKIP_TOOLS_CHECK" "Skip required tools check"
export IDF_SKIP_TOOLS_CHECK=1
section_end "IDF_SKIP_TOOLS_CHECK"
fi
section_start "source_export" "Source export.sh" section_start "source_export" "Source export.sh"
source ./export.sh source ./export.sh
section_end "source_export" section_end "source_export"
@@ -226,7 +217,7 @@ variables:
.upload_failed_job_log_artifacts: &upload_failed_job_log_artifacts | .upload_failed_job_log_artifacts: &upload_failed_job_log_artifacts |
if [ $CI_JOB_STATUS = "failed" ]; then if [ $CI_JOB_STATUS = "failed" ]; then
python tools/ci/artifacts_handler.py upload --type logs run_cmd idf-ci gitlab upload-artifacts --type log
fi fi
.before_script:minimal: .before_script:minimal:
@@ -268,11 +259,17 @@ variables:
.after_script:build:ccache-show-stats: .after_script:build:ccache-show-stats:
after_script: after_script:
- source tools/ci/utils.sh
- section_start "ccache_show_stats" "Show ccache statistics"
- *show_ccache_statistics - *show_ccache_statistics
- section_end "ccache_show_stats"
.after_script:build:ccache-show-stats:upload-failed-job-logs: .after_script:build:ccache-show-stats:upload-failed-job-logs:
after_script: after_script:
- source tools/ci/utils.sh
- section_start "ccache_show_stats" "Show ccache statistics"
- *show_ccache_statistics - *show_ccache_statistics
- section_end "ccache_show_stats"
- *upload_failed_job_log_artifacts - *upload_failed_job_log_artifacts
############################## ##############################

View File

@@ -1,19 +1,3 @@
.all_targets: &all_targets
- esp32
- esp32s2
- esp32s3
- esp32c3
- esp32c2
- esp32c6
- esp32c5
- esp32h2
- esp32p4
.target_test: &target_test
- example_test
- custom_test
- component_ut
############## ##############
# Build Jobs # # Build Jobs #
############## ##############

View File

@@ -297,14 +297,13 @@ test_pytest_qemu:
INSTALL_EXTRA_TOOLS: "qemu-riscv32" INSTALL_EXTRA_TOOLS: "qemu-riscv32"
IDF_TOOLCHAIN: [gcc, clang] IDF_TOOLCHAIN: [gcc, clang]
script: script:
- run_cmd python tools/ci/ci_build_apps.py . -v - run_cmd idf-ci build run
--build-system cmake
--target $IDF_TARGET --target $IDF_TARGET
--pytest-apps --only-test-related
-m qemu -m qemu
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt"
--modified-components ${MR_MODIFIED_COMPONENTS}
--modified-files ${MR_MODIFIED_FILES} --modified-files ${MR_MODIFIED_FILES}
- python tools/ci/get_known_failure_cases_file.py - run_cmd idf-ci gitlab download-known-failure-cases-file ${KNOWN_FAILURE_CASES_FILE_NAME}
- run_cmd pytest - run_cmd pytest
--target $IDF_TARGET --target $IDF_TARGET
--log-cli-level DEBUG --log-cli-level DEBUG
@@ -312,7 +311,6 @@ test_pytest_qemu:
--embedded-services idf,qemu --embedded-services idf,qemu
--junitxml=XUNIT_RESULT.xml --junitxml=XUNIT_RESULT.xml
--ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME} --ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME}
--app-info-filepattern \"list_job_*.txt\"
--qemu-extra-args \"-global driver=timer.$IDF_TARGET.timg,property=wdt_disable,value=true\" --qemu-extra-args \"-global driver=timer.$IDF_TARGET.timg,property=wdt_disable,value=true\"
test_pytest_linux: test_pytest_linux:
@@ -327,21 +325,18 @@ test_pytest_linux:
reports: reports:
junit: XUNIT_RESULT.xml junit: XUNIT_RESULT.xml
script: script:
- run_cmd python tools/ci/ci_build_apps.py components examples tools/test_apps -v - run_cmd idf-ci build run
--build-system cmake
-p components -p examples -p tools/test_apps
--target linux --target linux
--pytest-apps --only-test-related
-m host_test
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt"
--modified-components ${MR_MODIFIED_COMPONENTS}
--modified-files ${MR_MODIFIED_FILES} --modified-files ${MR_MODIFIED_FILES}
- python tools/ci/get_known_failure_cases_file.py - run_cmd idf-ci gitlab download-known-failure-cases-file ${KNOWN_FAILURE_CASES_FILE_NAME}
- run_cmd pytest - run_cmd pytest
--target linux --target linux
-m host_test
--embedded-services idf --embedded-services idf
--junitxml=XUNIT_RESULT.xml --junitxml=XUNIT_RESULT.xml
--ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME} --ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME}
--app-info-filepattern \"list_job_*.txt\"
test_pytest_macos: test_pytest_macos:
extends: extends:
@@ -358,41 +353,24 @@ test_pytest_macos:
junit: XUNIT_RESULT.xml junit: XUNIT_RESULT.xml
variables: variables:
PYENV_VERSION: "3.9" PYENV_VERSION: "3.9"
PYTEST_IGNORE_COLLECT_IMPORT_ERROR: "1"
# Workaround for a bug in Parallels executor where CI_PROJECT_DIR is not an absolute path, # Workaround for a bug in Parallels executor where CI_PROJECT_DIR is not an absolute path,
# but a relative path to the build directory (builds/espressif/esp-idf instead of ~/builds/espressif/esp-idf. # but a relative path to the build directory (builds/espressif/esp-idf instead of ~/builds/espressif/esp-idf.
# GitLab sets the project dir to this template `<builds_dir>/<namespace>/<project_name>` # GitLab sets the project dir to this template `<builds_dir>/<namespace>/<project_name>`
IDF_PATH: "/Users/espressif/builds/espressif/esp-idf" IDF_PATH: "/Users/espressif/builds/espressif/esp-idf"
script: script:
- run_cmd python tools/ci/ci_build_apps.py components examples tools/test_apps -v - run_cmd idf-ci build run
-p components -p examples -p tools/test_apps
--build-system cmake
--target linux --target linux
--pytest-apps --only-test-related
-m \"host_test and macos\" -m macos
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt"
--modified-components ${MR_MODIFIED_COMPONENTS}
--modified-files ${MR_MODIFIED_FILES} --modified-files ${MR_MODIFIED_FILES}
- python tools/ci/get_known_failure_cases_file.py - run_cmd idf-ci gitlab download-known-failure-cases-file ${KNOWN_FAILURE_CASES_FILE_NAME}
- run_cmd pytest - run_cmd pytest
--target linux --target linux
-m \"host_test and macos\" -m macos
--junitxml=XUNIT_RESULT.xml --junitxml=XUNIT_RESULT.xml
--ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME} --ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME}
--app-info-filepattern \"list_job_*.txt\"
test_idf_pytest_plugin:
extends:
- .host_test_template
- .rules:patterns:idf-pytest-plugin
variables:
SUBMODULES_TO_FETCH: "none"
artifacts:
reports:
junit: XUNIT_RESULT.xml
script:
- cd ${IDF_PATH}/tools/ci/dynamic_pipelines/tests/test_report_generator
- python -m unittest test_report_generator.py
- cd ${IDF_PATH}/tools/ci/idf_pytest
- pytest --junitxml=${CI_PROJECT_DIR}/XUNIT_RESULT.xml
test_idf_build_apps_load_soc_caps: test_idf_build_apps_load_soc_caps:
extends: .host_test_template extends: .host_test_template

View File

@@ -116,22 +116,10 @@ check_test_scripts_build_test_rules:
extends: extends:
- .pre_check_template - .pre_check_template
- .before_script:build - .before_script:build
variables:
PYTEST_IGNORE_COLLECT_IMPORT_ERROR: "1"
script: script:
# requires basic pytest dependencies # requires basic pytest dependencies
- run_cmd bash install.sh --enable-pytest
- python tools/ci/check_build_test_rules.py check-test-scripts examples/ tools/test_apps components - python tools/ci/check_build_test_rules.py check-test-scripts examples/ tools/test_apps components
check_configure_ci_environment_parsing:
extends:
- .pre_check_template
- .before_script:build
- .rules:build
script:
- cd tools/ci
- python -m unittest ci_build_apps.py
pipeline_variables: pipeline_variables:
extends: extends:
- .pre_check_template - .pre_check_template
@@ -141,11 +129,10 @@ pipeline_variables:
# MODIFIED_FILES is a list of files that changed, could be used everywhere # MODIFIED_FILES is a list of files that changed, could be used everywhere
- MODIFIED_FILES=$(echo "$GIT_DIFF_OUTPUT" | xargs) - MODIFIED_FILES=$(echo "$GIT_DIFF_OUTPUT" | xargs)
- echo "MODIFIED_FILES=$MODIFIED_FILES" >> pipeline.env - echo "MODIFIED_FILES=$MODIFIED_FILES" >> pipeline.env
- echo "REPORT_EXIT_CODE=0" >> pipeline.env
# MR_MODIFIED_FILES and MR_MODIFIED_COMPONENTS are semicolon separated lists that is used in MR only # MR_MODIFIED_FILES and MR_MODIFIED_COMPONENTS are semicolon separated lists that is used in MR only
# for non MR pipeline, these are empty lists # for non MR pipeline, these are empty lists
- | - |
if [ $IS_MR_PIPELINE == "0" ]; then if [ -z "$CI_MERGE_REQUEST_IID" ]; then
echo "MR_MODIFIED_FILES=\"\"" >> pipeline.env echo "MR_MODIFIED_FILES=\"\"" >> pipeline.env
echo "MR_MODIFIED_COMPONENTS=\"\"" >> pipeline.env echo "MR_MODIFIED_COMPONENTS=\"\"" >> pipeline.env
else else
@@ -155,20 +142,11 @@ pipeline_variables:
MR_MODIFIED_COMPONENTS=$(run_cmd python tools/ci/ci_get_mr_info.py components --modified-files $MODIFIED_FILES | tr '\n' ';') MR_MODIFIED_COMPONENTS=$(run_cmd python tools/ci/ci_get_mr_info.py components --modified-files $MODIFIED_FILES | tr '\n' ';')
echo "MR_MODIFIED_COMPONENTS=\"$MR_MODIFIED_COMPONENTS\"" >> pipeline.env echo "MR_MODIFIED_COMPONENTS=\"$MR_MODIFIED_COMPONENTS\"" >> pipeline.env
fi fi
- |
if echo "$CI_MERGE_REQUEST_LABELS" | egrep "(^|,)BUILD_AND_TEST_ALL_APPS(,|$)"; then
echo "BUILD_AND_TEST_ALL_APPS=1" >> pipeline.env
fi
# run full pipeline if testing constraint branch
- |
if [ -n "$CI_PYTHON_CONSTRAINT_BRANCH" ]; then
echo "BUILD_AND_TEST_ALL_APPS=1" >> pipeline.env
fi
- echo "OOCD_DISTRO_URL_ARMHF=$OOCD_DISTRO_URL_ARMHF" >> pipeline.env - echo "OOCD_DISTRO_URL_ARMHF=$OOCD_DISTRO_URL_ARMHF" >> pipeline.env
- echo "OOCD_DISTRO_URL_ARM64=$OOCD_DISTRO_URL_ARM64" >> pipeline.env - echo "OOCD_DISTRO_URL_ARM64=$OOCD_DISTRO_URL_ARM64" >> pipeline.env
- python tools/ci/ci_process_description.py - run_cmd idf-ci gitlab pipeline-variables >> pipeline.env
- cat pipeline.env - cat pipeline.env
- python tools/ci/artifacts_handler.py upload --type modified_files_and_components_report - run_cmd idf-ci gitlab upload-artifacts --type env
artifacts: artifacts:
reports: reports:
dotenv: pipeline.env dotenv: pipeline.env

View File

@@ -53,7 +53,6 @@
- "tools/ci/ignore_build_warnings.txt" - "tools/ci/ignore_build_warnings.txt"
- "tools/ci/test_build_system*.sh" - "tools/ci/test_build_system*.sh"
- "tools/ci/test_build_system*.py" - "tools/ci/test_build_system*.py"
- "tools/ci/ci_build_apps.py"
- "tools/test_build_system/**/*" - "tools/test_build_system/**/*"
.patterns-build_system_win: &patterns-build_system_win .patterns-build_system_win: &patterns-build_system_win
@@ -163,10 +162,6 @@
- "components/bt/esp_ble_mesh/lib/lib" - "components/bt/esp_ble_mesh/lib/lib"
- ".gitmodules" - ".gitmodules"
.patterns-idf-pytest-plugin: &patterns-idf-pytest-plugin
- "tools/ci/idf_pytest/**/*"
- "tools/ci/dynamic_pipelines/tests/**/*"
############## ##############
# if anchors # # if anchors #
############## ##############
@@ -266,12 +261,6 @@
# - <<: *if-dev-push # - <<: *if-dev-push
# changes: *patterns-sonarqube-files # changes: *patterns-sonarqube-files
.rules:patterns:idf-pytest-plugin:
rules:
- <<: *if-protected-check
- <<: *if-dev-push
changes: *patterns-idf-pytest-plugin
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# DO NOT place comments or maintain any code from this line # DO NOT place comments or maintain any code from this line
# #

View File

@@ -60,7 +60,7 @@ test_tools_win:
PYTHONPATH: "$PYTHONPATH;$IDF_PATH\\tools;$IDF_PATH\\tools\\esp_app_trace;$IDF_PATH\\components\\partition_table;$IDF_PATH\\tools\\ci\\python_packages" PYTHONPATH: "$PYTHONPATH;$IDF_PATH\\tools;$IDF_PATH\\tools\\esp_app_trace;$IDF_PATH\\components\\partition_table;$IDF_PATH\\tools\\ci\\python_packages"
script: script:
- python -m pip install jsonschema - python -m pip install jsonschema
- .\install.ps1 --enable-ci --enable-pytest - .\install.ps1 --enable-ci
- .\export.ps1 - .\export.ps1
- python "${SUBMODULE_FETCH_TOOL}" -s "all" - python "${SUBMODULE_FETCH_TOOL}" -s "all"
- cd ${IDF_PATH}/tools/test_idf_py - cd ${IDF_PATH}/tools/test_idf_py
@@ -78,7 +78,7 @@ test_tools_win:
after_script: [] after_script: []
timeout: 4 hours timeout: 4 hours
script: script:
- .\install.ps1 --enable-ci --enable-pytest - .\install.ps1 --enable-ci
- . .\export.ps1 - . .\export.ps1
- python "${SUBMODULE_FETCH_TOOL}" -s "all" - python "${SUBMODULE_FETCH_TOOL}" -s "all"
- cd ${IDF_PATH}\tools\test_build_system - cd ${IDF_PATH}\tools\test_build_system

View File

@@ -21,7 +21,7 @@ upload-pip-cache:
policy: push policy: push
script: script:
- rm -rf .cache/pip # clear old packages - rm -rf .cache/pip # clear old packages
- bash install.sh --enable-ci --enable-pytest - bash install.sh --enable-ci --enable-test-specific
parallel: parallel:
matrix: matrix:
- GEO: [ 'shiny', 'brew' ] - GEO: [ 'shiny', 'brew' ]

64
.idf_build_apps.toml Normal file
View File

@@ -0,0 +1,64 @@
config_rules = [
'sdkconfig.ci=default',
'sdkconfig.ci.*=',
'=default',
]
extra_pythonpaths = [
'$IDF_PATH/tools/ci/python_packages',
'$IDF_PATH/tools/ci',
'$IDF_PATH/tools',
]
build_system = "idf_ci_local.app:IdfCMakeApp"
recursive = true
check_warnings = true
keep_going = true
copy_sdkconfig = true
ignore_warning_files = [
'$IDF_PATH/tools/ci/ignore_build_warnings.txt',
]
build_dir = "build_@t_@w"
build_log_filename = "build_log.txt"
size_json_filename = "size.json"
verbose = 1 # INFO
# collect
collect_app_info_filename = "app_info_${CI_JOB_NAME_SLUG}.txt"
collect_size_info_filename = "size_info_${CI_JOB_NAME_SLUG}.txt" # TODO remove this file when ci-dashboard is ready
junitxml = "build_summary_${CI_JOB_NAME_SLUG}.xml"
# manifest
check_manifest_rules = true
manifest_rootpath = "$IDF_PATH"
manifest_filepatterns = [
'**/.build-test-rules.yml',
]
# dependency-driven build
deactivate_dependency_driven_build_by_components = [
'cxx',
'esp_common',
'esp_hw_support',
'esp_rom',
'esp_system',
'esp_timer',
'freertos',
'hal',
'heap',
'log',
'newlib',
'riscv',
'soc',
'xtensa',
]
deactivate_dependency_driven_build_by_filepatterns = [
# tools
'tools/cmake/**/*',
'tools/tools.json',
# ci
'tools/ci/ignore_build_warnings.txt',
]

97
.idf_ci.toml Normal file
View File

@@ -0,0 +1,97 @@
preserve_non_test_related_apps = false
[local_runtime_envs]
EXTRA_CFLAGS = "-Werror -Werror=deprecated-declarations -Werror=unused-variable -Werror=unused-but-set-variable -Werror=unused-function -Wstrict-prototypes"
EXTRA_CXXFLAGS = "-Werror -Werror=deprecated-declarations -Werror=unused-variable -Werror=unused-but-set-variable -Werror=unused-function"
LDGEN_CHECK_MAPPING = "1"
IDF_CI_BUILD = "1"
[gitlab]
[gitlab.build_pipeline]
workflow_name = "build_child_pipeline"
presigned_json_job_name = 'generate_pytest_build_report'
job_tags = ['build', 'shiny']
job_template_name = '.dynamic_build_template'
job_template_jinja = '' # write in tools/ci/dynamic_pipelines/templates/.dynamic_jobs.yml
pre_yaml_jinja = """
include:
- .gitlab/ci/common.yml
- tools/ci/dynamic_pipelines/templates/.dynamic_jobs.yml
- tools/ci/dynamic_pipelines/templates/test_child_pipeline.yml
"""
yaml_jinja = """
{{ settings.gitlab.build_pipeline.pre_yaml_jinja }}
workflow:
name: {{ settings.gitlab.build_pipeline.workflow_name }}
rules:
- when: always
{{ jobs }}
""" # simplified since we included the tools/ci/dynamic_pipelines/templates/test_child_pipeline.yml
[gitlab.test_pipeline]
job_template_name = '.dynamic_target_test_template'
job_template_jinja = '' # write in tools/ci/dynamic_pipelines/templates/.dynamic_jobs.yml
pre_yaml_jinja = """
include:
- .gitlab/ci/common.yml
- tools/ci/dynamic_pipelines/templates/.dynamic_jobs.yml
- tools/ci/dynamic_pipelines/templates/generate_target_test_report.yml
"""
[gitlab.artifacts.s3.debug]
bucket = "idf-artifacts"
patterns = [
'**/build*/bootloader/*.map',
'**/build*/bootloader/*.elf',
'**/build*/*.map',
'**/build*/*.elf',
# customized
'**/build*/esp_tee/*.map',
'**/build*/esp_tee/*.elf',
'**/build*/gdbinit/*',
]
[gitlab.artifacts.s3.flash]
bucket = "idf-artifacts"
patterns = [
'**/build*/bootloader/*.bin',
'**/build*/*.bin',
'**/build*/partition_table/*.bin',
'**/build*/flasher_args.json',
'**/build*/flash_project_args',
'**/build*/config/sdkconfig.json',
'**/build*/sdkconfig',
'**/build*/project_description.json',
# customized
'**/build*/esp_tee/*.bin',
]
[gitlab.artifacts.s3.log]
bucket = "idf-artifacts"
patterns = [
'**/build*/build_log.txt',
'**/build*/size.json',
]
[gitlab.artifacts.s3.junit]
bucket = "idf-artifacts"
patterns = [
'XUNIT_RESULT_*.xml',
]
[gitlab.artifacts.s3.env]
bucket = "idf-artifacts"
patterns = [
'pipeline.env',
]
[gitlab.artifacts.s3.longterm]
bucket = "longterm"
if_clause = '"$CI_COMMIT_REF_NAME" == "master"'
patterns = [
'**/build*/size.json',
]

View File

@@ -10,26 +10,6 @@ repos:
- id: ruff-format - id: ruff-format
- id: ruff - id: ruff
args: [ "--fix" ] args: [ "--fix" ]
- repo: local
hooks:
- id: pytest-linter
name: Pytest Linter Check
entry: tools/ci/check_test_files.py
language: python
files: 'pytest_.*\.py$'
require_serial: true
additional_dependencies:
- pytest-embedded-idf[serial]~=1.16
- pytest-embedded-jtag~=1.16
- pytest-embedded-qemu~=1.16
- pytest-ignore-test-results~=0.3
- pytest-rerunfailures
- pytest-timeout
- idf-build-apps~=2.8
- python-gitlab
- minio
- click
- esp-idf-monitor
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0 rev: v4.5.0
hooks: hooks:
@@ -163,7 +143,7 @@ repos:
require_serial: true require_serial: true
additional_dependencies: additional_dependencies:
- PyYAML == 5.3.1 - PyYAML == 5.3.1
- idf-build-apps>=2.8,<3 - idf-build-apps~=2.11
- id: sort-yaml-files - id: sort-yaml-files
name: sort yaml files name: sort yaml files
entry: tools/ci/sort_yaml.py entry: tools/ci/sort_yaml.py
@@ -178,15 +158,6 @@ repos:
files: 'tools/ci/sort_yaml\.py$' files: 'tools/ci/sort_yaml\.py$'
additional_dependencies: additional_dependencies:
- ruamel.yaml - ruamel.yaml
- id: check-build-test-rules-path-exists
name: check path in .build-test-rules.yml exists
entry: tools/ci/check_build_test_rules.py check-exist
language: python
additional_dependencies:
- PyYAML == 5.3.1
always_run: true
pass_filenames: false
require_serial: true
- id: cleanup-ignore-lists - id: cleanup-ignore-lists
name: Remove non-existing patterns from ignore lists name: Remove non-existing patterns from ignore lists
entry: tools/ci/cleanup_ignore_lists.py entry: tools/ci/cleanup_ignore_lists.py
@@ -205,7 +176,12 @@ repos:
rev: v4.0.1 rev: v4.0.1
hooks: hooks:
- id: file-contents-sorter - id: file-contents-sorter
files: 'tools\/ci\/(executable-list\.txt|mypy_ignore_list\.txt|check_copyright_ignore\.txt)' files: "tools/ci/(\
executable-list\\.txt\
|mypy_ignore_list\\.txt\
|check_copyright_ignore\\.txt\
|exclude_check_tools_files\\.txt\
)"
- repo: https://github.com/espressif/check-copyright/ - repo: https://github.com/espressif/check-copyright/
rev: v1.1.1 rev: v1.1.1
hooks: hooks:

View File

@@ -3,6 +3,7 @@
import pytest import pytest
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
# normal mmu tests # normal mmu tests
@@ -20,14 +21,6 @@ def test_mmap(dut: Dut) -> None:
# mmu tests with psram enabled # mmu tests with psram enabled
PSRAM_RELEASE_CONFIGS = [
pytest.param('psram_release_esp32', marks=[pytest.mark.esp32]),
pytest.param('psram_release_esp32s2', marks=[pytest.mark.esp32s2]),
pytest.param('psram_release_esp32s3', marks=[pytest.mark.esp32s3]),
pytest.param('psram_release_esp32p4', marks=[pytest.mark.esp32p4]),
]
@pytest.mark.generic @pytest.mark.generic
@idf_parametrize( @idf_parametrize(
'config,target', 'config,target',
@@ -44,12 +37,6 @@ def test_mmap_psram(dut: Dut) -> None:
# mmu tests with xip_psram # mmu tests with xip_psram
XIP_CONFIGS = [
pytest.param('xip_psram_esp32s2', marks=[pytest.mark.esp32s2]),
pytest.param('xip_psram_esp32s3', marks=[pytest.mark.esp32s3]),
]
@pytest.mark.generic @pytest.mark.generic
@idf_parametrize( @idf_parametrize(
'config,target', [('xip_psram_esp32s2', 'esp32s2'), ('xip_psram_esp32s3', 'esp32s3')], indirect=['config', 'target'] 'config,target', [('xip_psram_esp32s2', 'esp32s2'), ('xip_psram_esp32s3', 'esp32s3')], indirect=['config', 'target']

View File

@@ -15,17 +15,6 @@ def not_expect(dut: Dut, output_regex: str) -> None:
raise RuntimeError(f'Found not_expect output {output_regex}') raise RuntimeError(f'Found not_expect output {output_regex}')
JTAG_SERIAL_MARKS = [
pytest.mark.esp32s3,
pytest.mark.esp32c2,
pytest.mark.esp32c3,
pytest.mark.esp32c5,
pytest.mark.esp32c6,
pytest.mark.esp32c61,
pytest.mark.esp32h2,
]
@pytest.mark.generic @pytest.mark.generic
@idf_parametrize('config', ['console_none'], indirect=['config']) @idf_parametrize('config', ['console_none'], indirect=['config'])
@idf_parametrize('target', ['supported_targets'], indirect=['target']) @idf_parametrize('target', ['supported_targets'], indirect=['target'])

View File

@@ -16,13 +16,13 @@
```bash ```bash
cd $IDF_PATH cd $IDF_PATH
bash install.sh --enable-ci --enable-pytest bash install.sh --enable-ci
. ./export.sh . ./export.sh
``` ```
- For example, to execute the TEE test suite for ESP32-C6 with all the available `sdkconfig` files, run the following steps. The required test applications will be built and flashed automatically onto the DUT by the `pytest` framework. - For example, to execute the TEE test suite for ESP32-C6 with all the available `sdkconfig` files, run the following steps. The required test applications will be built and flashed automatically onto the DUT by the `pytest` framework.
```bash ```bash
python $IDF_PATH/tools/ci/ci_build_apps.py . --target esp32c6 -v --pytest-apps idf-ci build run --target esp32c6 --only-test-related
pytest --target esp32c6 pytest --target esp32c6
``` ```

View File

@@ -3,7 +3,6 @@
import pytest import pytest
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
# @pytest.mark.esp32c2 # esp32c2 are using xtal_26mhz
@pytest.mark.generic @pytest.mark.generic

View File

@@ -4,30 +4,6 @@ import pytest
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
CONFIGS = [
pytest.param(
'default',
marks=[
pytest.mark.supported_targets,
],
),
pytest.param('freertos_options', marks=[pytest.mark.supported_targets]),
pytest.param('psram', marks=[pytest.mark.esp32, pytest.mark.esp32s3, pytest.mark.esp32p4, pytest.mark.esp32c5]),
pytest.param('single_core', marks=[pytest.mark.esp32, pytest.mark.esp32p4]),
# TODO: [ESP32C5] IDF-10335
# TODO: [ESP32C61] IDF-11146
pytest.param(
'smp',
marks=[
pytest.mark.supported_targets,
pytest.mark.temp_skip_ci(
targets=['esp32p4', 'esp32c5', 'esp32c61', 'esp32h21'], reason='test failed/TBD IDF-8113'
),
],
),
pytest.param('tickless_idle', marks=[pytest.mark.supported_targets]),
]
@pytest.mark.generic @pytest.mark.generic
@idf_parametrize( @idf_parametrize(
@@ -41,6 +17,8 @@ CONFIGS = [
('psram', 'esp32p4'), ('psram', 'esp32p4'),
('psram', 'esp32s3'), ('psram', 'esp32s3'),
('single_core', 'esp32'), ('single_core', 'esp32'),
# TODO: [ESP32C5] IDF-10335
# TODO: [ESP32C61] IDF-11146
('single_core', 'esp32p4'), ('single_core', 'esp32p4'),
( (
'smp', 'smp',

View File

@@ -4,11 +4,6 @@ import pytest
from pytest_embedded_idf.dut import IdfDut from pytest_embedded_idf.dut import IdfDut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
CONFIGS_NVS_ENCR_FLASH_ENC = [
pytest.param('nvs_encr_flash_enc_esp32', marks=[pytest.mark.esp32]),
pytest.param('nvs_encr_flash_enc_esp32c3', marks=[pytest.mark.esp32c3]),
]
@pytest.mark.generic @pytest.mark.generic
@pytest.mark.parametrize('config', ['default'], indirect=True) @pytest.mark.parametrize('config', ['default'], indirect=True)

View File

@@ -1,2 +1,2 @@
| Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-C5 | ESP32-C6 | ESP32-C61 | ESP32-H2 | ESP32-P4 | ESP32-S2 | ESP32-S3 | | Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-C5 | ESP32-C6 | ESP32-C61 | ESP32-H2 | ESP32-H21 | ESP32-H4 | ESP32-P4 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- | --------- | -------- | -------- | -------- | -------- | | ----------------- | ----- | -------- | -------- | -------- | -------- | --------- | -------- | --------- | -------- | -------- | -------- | -------- |

View File

@@ -4,11 +4,6 @@ import pytest
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
CONFIGS = [
pytest.param('default', marks=[pytest.mark.esp32, pytest.mark.esp32c3]),
pytest.param('psram_esp32', marks=[pytest.mark.esp32]),
]
@pytest.mark.generic @pytest.mark.generic
@idf_parametrize( @idf_parametrize(

View File

@@ -213,7 +213,7 @@ When all the hardware have been configured and prepared, the Test can be run via
The description is provided, assuming that the test is run under Linux and Host board has a /dev/ttyACM0 path, Mocked Device has a /dev/ttyUSB0 path, and we are in the esp-idf root folder. The description is provided, assuming that the test is run under Linux and Host board has a /dev/ttyACM0 path, Mocked Device has a /dev/ttyUSB0 path, and we are in the esp-idf root folder.
To run the pytest, esp-idf must be installed and configured with ```--enable-pytest```. To run the pytest, esp-idf must be installed and configured with `--enable-ci`.
### Prepare Host ### Prepare Host

View File

@@ -9,6 +9,7 @@
# please report to https://github.com/espressif/pytest-embedded/issues # please report to https://github.com/espressif/pytest-embedded/issues
# or discuss at https://github.com/espressif/pytest-embedded/discussions # or discuss at https://github.com/espressif/pytest-embedded/discussions
import os import os
import subprocess
import sys import sys
if os.path.join(os.path.dirname(__file__), 'tools', 'ci') not in sys.path: if os.path.join(os.path.dirname(__file__), 'tools', 'ci') not in sys.path:
@@ -17,41 +18,26 @@ if os.path.join(os.path.dirname(__file__), 'tools', 'ci') not in sys.path:
if os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages') not in sys.path: if os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages') not in sys.path:
sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages')) sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages'))
import glob
import io
import logging import logging
import os import os
import re import re
import typing as t import typing as t
import zipfile
from copy import deepcopy from copy import deepcopy
from urllib.parse import quote from urllib.parse import quote
import common_test_methods # noqa: F401 import common_test_methods # noqa: F401
import gitlab_api import gitlab_api
import pytest import pytest
import requests
import yaml
from _pytest.config import Config from _pytest.config import Config
from _pytest.fixtures import FixtureRequest from _pytest.fixtures import FixtureRequest
from artifacts_handler import ArtifactType from idf_ci import PytestCase
from dynamic_pipelines.constants import TEST_RELATED_APPS_DOWNLOAD_URLS_FILENAME from idf_ci.idf_pytest import IDF_CI_PYTEST_CASE_KEY
from idf_ci_local.app import import_apps_from_txt
from idf_ci_local.uploader import AppDownloader
from idf_ci_local.uploader import AppUploader
from idf_ci_utils import IDF_PATH
from idf_ci_utils import idf_relpath from idf_ci_utils import idf_relpath
from idf_pytest.constants import DEFAULT_LOGDIR from idf_pytest.constants import DEFAULT_LOGDIR
from idf_pytest.constants import DEFAULT_SDKCONFIG from idf_pytest.plugin import IDF_LOCAL_PLUGIN_KEY
from idf_pytest.constants import ENV_MARKERS from idf_pytest.plugin import IdfLocalPlugin
from idf_pytest.constants import SPECIAL_MARKERS from idf_pytest.plugin import requires_elf_or_map
from idf_pytest.constants import TARGET_MARKERS
from idf_pytest.constants import PytestCase
from idf_pytest.plugin import IDF_PYTEST_EMBEDDED_KEY
from idf_pytest.plugin import ITEM_PYTEST_CASE_KEY
from idf_pytest.plugin import IdfPytestEmbedded
from idf_pytest.utils import format_case_id from idf_pytest.utils import format_case_id
from pytest_embedded.plugin import multi_dut_argument
from pytest_embedded.plugin import multi_dut_fixture from pytest_embedded.plugin import multi_dut_fixture
from pytest_embedded_idf.dut import IdfDut from pytest_embedded_idf.dut import IdfDut
from pytest_embedded_idf.unity_tester import CaseTester from pytest_embedded_idf.unity_tester import CaseTester
@@ -76,23 +62,6 @@ def case_tester(unity_tester: CaseTester) -> CaseTester:
return unity_tester return unity_tester
@pytest.fixture
@multi_dut_argument
def config(request: FixtureRequest) -> str:
return getattr(request, 'param', None) or DEFAULT_SDKCONFIG # type: ignore
@pytest.fixture
@multi_dut_fixture
def target(request: FixtureRequest, dut_total: int, dut_index: int) -> str:
plugin = request.config.stash[IDF_PYTEST_EMBEDDED_KEY]
if dut_total == 1:
return plugin.target[0] # type: ignore
return plugin.target[dut_index] # type: ignore
@pytest.fixture @pytest.fixture
def test_func_name(request: FixtureRequest) -> str: def test_func_name(request: FixtureRequest) -> str:
return request.node.function.__name__ # type: ignore return request.node.function.__name__ # type: ignore
@@ -119,69 +88,76 @@ def pipeline_id(request: FixtureRequest) -> t.Optional[str]:
return request.config.getoption('pipeline_id', None) or os.getenv('PARENT_PIPELINE_ID', None) # type: ignore return request.config.getoption('pipeline_id', None) or os.getenv('PARENT_PIPELINE_ID', None) # type: ignore
class BuildReportDownloader(AppDownloader): def get_pipeline_commit_sha_by_pipeline_id(pipeline_id: str) -> t.Optional[str]:
def __init__(self, presigned_url_yaml: str) -> None: gl = gitlab_api.Gitlab(os.getenv('CI_PROJECT_ID', 'espressif/esp-idf'))
self.app_presigned_urls_dict: t.Dict[str, t.Dict[str, str]] = yaml.safe_load(presigned_url_yaml) pipeline = gl.project.pipelines.get(pipeline_id)
if not pipeline:
return None
def _download_app(self, app_build_path: str, artifact_type: ArtifactType) -> None: commit = gl.project.commits.get(pipeline.sha)
url = self.app_presigned_urls_dict[app_build_path][artifact_type.value] if not commit or not commit.parent_ids:
return None
logging.info('Downloading app from %s', url) if len(commit.parent_ids) == 1:
with io.BytesIO() as f: return commit.parent_ids[0] # type: ignore
for chunk in requests.get(url).iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
f.seek(0) for parent_id in commit.parent_ids:
parent_commit = gl.project.commits.get(parent_id)
if parent_commit.parent_ids and len(parent_commit.parent_ids) == 1:
return parent_id # type: ignore
with zipfile.ZipFile(f) as zip_ref: return None
zip_ref.extractall(IDF_PATH)
def download_app(self, app_build_path: str, artifact_type: t.Optional[ArtifactType] = None) -> None:
if app_build_path not in self.app_presigned_urls_dict: class AppDownloader:
raise ValueError( def __init__(
f'No presigned url found for {app_build_path}. ' self,
f'Usually this should not happen, please re-trigger a pipeline.' commit_sha: str,
f'If this happens again, please report this bug to the CI channel.' pipeline_id: t.Optional[str] = None,
) -> None:
self.commit_sha = commit_sha
self.pipeline_id = pipeline_id
def download_app(self, app_build_path: str, artifact_type: t.Optional[str] = None) -> None:
args = [
'idf-ci',
'gitlab',
'download-artifacts',
'--commit-sha',
self.commit_sha,
]
if artifact_type:
args.extend(['--type', artifact_type])
if self.pipeline_id:
args.extend(['--pipeline-id', self.pipeline_id])
args.append(app_build_path)
subprocess.run(
args,
stdout=sys.stdout,
stderr=sys.stderr,
) )
super().download_app(app_build_path, artifact_type)
PRESIGNED_JSON = 'presigned.json'
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
def app_downloader(pipeline_id: t.Optional[str]) -> t.Optional[AppDownloader]: def app_downloader(
pipeline_id: t.Optional[str],
) -> t.Optional[AppDownloader]:
if not pipeline_id: if not pipeline_id:
return None return None
if ( commit_sha = get_pipeline_commit_sha_by_pipeline_id(pipeline_id)
'IDF_S3_BUCKET' in os.environ if not commit_sha:
and 'IDF_S3_ACCESS_KEY' in os.environ raise ValueError(
and 'IDF_S3_SECRET_KEY' in os.environ 'commit sha cannot be found for pipeline id %s. Please check the pipeline id. '
and 'IDF_S3_SERVER' in os.environ 'If you think this is a bug, please report it to CI team',
and 'IDF_S3_BUCKET' in os.environ )
): logging.debug('pipeline commit sha of pipeline %s is %s', pipeline_id, commit_sha)
return AppUploader(pipeline_id)
logging.info('Downloading build report from the build pipeline %s', pipeline_id) return AppDownloader(commit_sha, pipeline_id)
test_app_presigned_urls_file = None
gl = gitlab_api.Gitlab(os.getenv('CI_PROJECT_ID', 'espressif/esp-idf'))
for child_pipeline in gl.project.pipelines.get(pipeline_id, lazy=True).bridges.list(iterator=True):
if child_pipeline.name == 'build_child_pipeline':
for job in gl.project.pipelines.get(child_pipeline.downstream_pipeline['id'], lazy=True).jobs.list(
iterator=True
):
if job.name == 'generate_pytest_build_report':
test_app_presigned_urls_file = gl.download_artifact(
job.id, [TEST_RELATED_APPS_DOWNLOAD_URLS_FILENAME]
)[0]
break
if test_app_presigned_urls_file:
return BuildReportDownloader(test_app_presigned_urls_file)
return None
@pytest.fixture @pytest.fixture
@@ -205,14 +181,14 @@ def build_dir(
valid build directory valid build directory
""" """
# download from minio on CI # download from minio on CI
case: PytestCase = request._pyfuncitem.stash[ITEM_PYTEST_CASE_KEY] case: PytestCase = request.node.stash[IDF_CI_PYTEST_CASE_KEY]
if app_downloader: if app_downloader:
# somehow hardcoded... # somehow hardcoded...
app_build_path = os.path.join(idf_relpath(app_path), f'build_{target}_{config}') app_build_path = os.path.join(idf_relpath(app_path), f'build_{target}_{config}')
if case.requires_elf_or_map: if requires_elf_or_map(case):
app_downloader.download_app(app_build_path) app_downloader.download_app(app_build_path)
else: else:
app_downloader.download_app(app_build_path, ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES) app_downloader.download_app(app_build_path, 'flash')
check_dirs = [f'build_{target}_{config}'] check_dirs = [f'build_{target}_{config}']
else: else:
check_dirs = [] check_dirs = []
@@ -403,10 +379,6 @@ def dev_user(request: FixtureRequest) -> str:
################## ##################
def pytest_addoption(parser: pytest.Parser) -> None: def pytest_addoption(parser: pytest.Parser) -> None:
idf_group = parser.getgroup('idf') idf_group = parser.getgroup('idf')
idf_group.addoption(
'--sdkconfig',
help='sdkconfig postfix, like sdkconfig.ci.<config>. (Default: None, which would build all found apps)',
)
idf_group.addoption( idf_group.addoption(
'--dev-user', '--dev-user',
help='user name associated with some specific device/service used during the test execution', help='user name associated with some specific device/service used during the test execution',
@@ -415,16 +387,10 @@ def pytest_addoption(parser: pytest.Parser) -> None:
'--dev-passwd', '--dev-passwd',
help='password associated with some specific device/service used during the test execution', help='password associated with some specific device/service used during the test execution',
) )
idf_group.addoption(
'--app-info-filepattern',
help='glob pattern to specify the files that include built app info generated by '
'`idf-build-apps --collect-app-info ...`. will not raise ValueError when binary '
'paths not exist in local file system if not listed recorded in the app info.',
)
idf_group.addoption( idf_group.addoption(
'--pipeline-id', '--pipeline-id',
help='main pipeline id, not the child pipeline id. Specify this option to download the artifacts ' help='For users without s3 access. main pipeline id, not the child pipeline id. '
'from the minio server for debugging purpose.', 'Specify this option to download the artifacts from the minio server for debugging purpose.',
) )
@@ -437,63 +403,15 @@ def pytest_configure(config: Config) -> None:
supported_targets.set(SUPPORTED_TARGETS) supported_targets.set(SUPPORTED_TARGETS)
preview_targets.set(PREVIEW_TARGETS) preview_targets.set(PREVIEW_TARGETS)
# cli option "--target" config.stash[IDF_LOCAL_PLUGIN_KEY] = IdfLocalPlugin()
target = [_t.strip().lower() for _t in (config.getoption('target', '') or '').split(',') if _t.strip()] config.pluginmanager.register(config.stash[IDF_LOCAL_PLUGIN_KEY])
# add markers based on idf_pytest/constants.py
for name, description in {
**TARGET_MARKERS,
**ENV_MARKERS,
**SPECIAL_MARKERS,
}.items():
config.addinivalue_line('markers', f'{name}: {description}')
help_commands = ['--help', '--fixtures', '--markers', '--version']
for cmd in help_commands:
if cmd in config.invocation_params.args:
target = ['unneeded']
break
markexpr = config.getoption('markexpr') or ''
# check marker expr set via "pytest -m"
if not target and markexpr:
# we use `-m "esp32 and generic"` in our CI to filter the test cases
# this doesn't cover all use cases, but fit what we do in CI.
for marker in markexpr.split('and'):
marker = marker.strip()
if marker in TARGET_MARKERS:
target.append(marker)
# "--target" must be set
if not target:
raise SystemExit(
"""Pass `--target TARGET[,TARGET...]` to specify all targets the test cases are using.
- for single DUT, we run with `pytest --target esp32`
- for multi DUT, we run with `pytest --target esp32,esp32,esp32s2` to indicate all DUTs
"""
)
apps = None
app_info_filepattern = config.getoption('app_info_filepattern')
if app_info_filepattern:
apps = []
for f in glob.glob(os.path.join(IDF_PATH, app_info_filepattern)):
apps.extend(import_apps_from_txt(f))
if '--collect-only' not in config.invocation_params.args:
config.stash[IDF_PYTEST_EMBEDDED_KEY] = IdfPytestEmbedded(
config_name=config.getoption('sdkconfig'),
target=target,
apps=apps,
)
config.pluginmanager.register(config.stash[IDF_PYTEST_EMBEDDED_KEY])
def pytest_unconfigure(config: Config) -> None: def pytest_unconfigure(config: Config) -> None:
_pytest_embedded = config.stash.get(IDF_PYTEST_EMBEDDED_KEY, None) idf_local_plugin = config.stash.get(IDF_LOCAL_PLUGIN_KEY, None)
if _pytest_embedded: if idf_local_plugin:
del config.stash[IDF_PYTEST_EMBEDDED_KEY] del config.stash[IDF_LOCAL_PLUGIN_KEY]
config.pluginmanager.unregister(_pytest_embedded) config.pluginmanager.unregister(idf_local_plugin)
dut_artifacts_url = [] dut_artifacts_url = []

View File

@@ -33,13 +33,13 @@ On the host side, ESP-IDF employs the pytest framework (alongside certain pytest
Installation Installation
============ ============
All basic dependencies could be installed by running the ESP-IDF install script with the ``--enable-pytest`` argument: All basic dependencies could be installed by running the ESP-IDF install script with the ``--enable-ci`` argument:
.. code-block:: bash .. code-block:: bash
$ install.sh --enable-pytest $ install.sh --enable-ci
Additional test script specific dependencies could be installed separately by running the ESP-IDF install script with the ``--enable-pytest-specific`` argument: Additional test script specific dependencies could be installed separately by running the ESP-IDF install script with the ``--enable-test-specific`` argument:
.. code-block:: bash .. code-block:: bash
@@ -91,32 +91,23 @@ Getting Started
.. code-block:: python .. code-block:: python
@pytest.mark.esp32 @pytest.mark.parametrize('target', [
@pytest.mark.esp32s2 'esp32',
'esp32s2',
], indirect=True)
@pytest.mark.generic @pytest.mark.generic
def test_hello_world(dut) -> None: def test_hello_world(dut) -> None:
dut.expect('Hello world!') dut.expect('Hello world!')
This is a simple test script that could run with the ESP-IDF getting-started example :example:`get-started/hello_world`. This is a simple test script that could run with the ESP-IDF getting-started example :example:`get-started/hello_world`.
First two lines are the target markers: In this test script, the ``@pytest.mark.parametrize`` decorator is used to parameterize the test case. The ``target`` parameter is a special parameter that indicates the target board type. The ``indirect=True`` argument indicates that this parameter is pre-calculated before other fixtures.
* The ``@pytest.mark.esp32`` is a marker that indicates that this test case should be run on the ESP32. Next is the environment marker. The ``@pytest.mark.generic`` marker indicates that this test case should run on the generic board type.
* The ``@pytest.mark.esp32s2`` is a marker that indicates that this test case should be run on the ESP32-S2.
.. note:: .. note::
If the test case can be run on all targets officially supported by ESP-IDF (call ``idf.py --list-targets`` for more details), you can use a special marker ``supported_targets`` to apply all of them in one line. For the detailed explanation of the environment markers, please refer to :idf_file:`env_markers definition <pytest.ini>`
We also supports ``preview_targets`` and ``all_targets`` as special target markers (call ``idf.py --list-targets --preview`` for a full targets list including preview targets).
Next, we have the environment marker:
* The ``@pytest.mark.generic`` is a marker that indicates that this test case should be run on the ``generic`` board type.
.. note::
For the detailed explanation of the environment markers, please refer to :idf_file:`ENV_MARKERS definition <tools/ci/idf_pytest/constants.py>`
Finally, we have the test function. With a ``dut`` fixture. In single-dut test cases, the ``dut`` fixture is an instance of ``IdfDut`` class, for multi-dut test cases, it is a tuple of ``IdfDut`` instances. For more details regarding the ``IdfDut`` class, please refer to `pytest-embedded IdfDut API reference <https://docs.espressif.com/projects/pytest-embedded/en/latest/api.html#pytest_embedded_idf.dut.IdfDut>`__. Finally, we have the test function. With a ``dut`` fixture. In single-dut test cases, the ``dut`` fixture is an instance of ``IdfDut`` class, for multi-dut test cases, it is a tuple of ``IdfDut`` instances. For more details regarding the ``IdfDut`` class, please refer to `pytest-embedded IdfDut API reference <https://docs.espressif.com/projects/pytest-embedded/en/latest/api.html#pytest_embedded_idf.dut.IdfDut>`__.
@@ -142,8 +133,10 @@ If the test case needs to run all supported targets with these two sdkconfig fil
.. code-block:: python .. code-block:: python
@pytest.mark.esp32 @pytest.mark.parametrize('target', [
@pytest.mark.esp32s2 'esp32', # <-- run with esp32 target
'esp32s2', # <-- run with esp32s2 target
], indirect=True)
@pytest.mark.parametrize('config', [ # <-- parameterize the sdkconfig file @pytest.mark.parametrize('config', [ # <-- parameterize the sdkconfig file
'foo', # <-- run with sdkconfig.ci.foo 'foo', # <-- run with sdkconfig.ci.foo
'bar', # <-- run with sdkconfig.ci.bar 'bar', # <-- run with sdkconfig.ci.bar
@@ -180,17 +173,6 @@ The test case ID is used to identify the test case in the JUnit report.
Nearly all the CLI options of pytest-embedded supports parameterization. To see all supported CLI options, you may run ``pytest --help`` and check the ``embedded-...`` sections for vanilla pytest-embedded ones, and the ``idf`` sections for ESP-IDF specific ones. Nearly all the CLI options of pytest-embedded supports parameterization. To see all supported CLI options, you may run ``pytest --help`` and check the ``embedded-...`` sections for vanilla pytest-embedded ones, and the ``idf`` sections for ESP-IDF specific ones.
.. note::
The target markers, like ``@pytest.mark.esp32`` and ``@pytest.mark.esp32s2``, are actually syntactic sugar for parameterization. In fact they are defined as:
.. code-block:: python
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
Same App With Different sdkconfig Files, Different Targets Same App With Different sdkconfig Files, Different Targets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -198,9 +180,9 @@ For some test cases, you may need to run the same app with different sdkconfig f
.. code-block:: python .. code-block:: python
@pytest.mark.parametrize('config', [ @pytest.mark.parametrize('config, target', [
pytest.param('foo', marks=[pytest.mark.esp32]), pytest.param('foo', 'esp32'),
pytest.param('bar', marks=[pytest.mark.esp32s2]), pytest.param('bar', 'esp32s2'),
], indirect=True) ], indirect=True)
Now this test function would be replicated to 2 test cases (represented as test case IDs): Now this test function would be replicated to 2 test cases (represented as test case IDs):
@@ -261,40 +243,6 @@ After setting the param ``count`` to 2, all the fixtures are changed into tuples
For detailed multi-dut parametrization documentation, please refer to `pytest-embedded Multi-DUT documentation <https://docs.espressif.com/projects/pytest-embedded/en/latest/key_concepts.html#multi-duts>`__. For detailed multi-dut parametrization documentation, please refer to `pytest-embedded Multi-DUT documentation <https://docs.espressif.com/projects/pytest-embedded/en/latest/key_concepts.html#multi-duts>`__.
.. warning::
In some test scripts, you may see target markers like ``@pytest.mark.esp32`` and ``@pytest.mark.esp32s2`` used together with multi-DUT test cases. This is deprecated and should be replaced with the ``target`` parametrization.
For example,
.. code-block:: python
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize('count', [
2,
], indirect=True)
def test_hello_world(dut) -> None:
dut[0].expect('Hello world!')
dut[1].expect('Hello world!')
should be replaced with:
.. code-block:: python
@pytest.mark.parametrize('count', [
2,
], indirect=True)
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
def test_hello_world(dut) -> None:
dut[0].expect('Hello world!')
dut[1].expect('Hello world!')
This could help avoid the ambiguity of the target markers when multi-DUT test cases are using different type of targets.
Multi-Target Tests with Different Apps Multi-Target Tests with Different Apps
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -494,7 +442,7 @@ First you need to install ESP-IDF with additional Python requirements:
.. code-block:: shell .. code-block:: shell
$ cd $IDF_PATH $ cd $IDF_PATH
$ bash install.sh --enable-ci --enable-pytest $ bash install.sh --enable-ci
$ . ./export.sh $ . ./export.sh
Build Directories Build Directories
@@ -544,7 +492,7 @@ If you want to build and test with all sdkconfig files at the same time, you sho
.. code-block:: shell .. code-block:: shell
$ cd $IDF_PATH/examples/system/console/basic $ cd $IDF_PATH/examples/system/console/basic
$ python $IDF_PATH/tools/ci/ci_build_apps.py . --target esp32 -v --pytest-apps $ idf-ci build run --target esp32 --only-test-related
$ pytest --target esp32 $ pytest --target esp32
The app with ``sdkconfig.ci.history`` will be built in ``build_esp32_history``, and the app with ``sdkconfig.ci.nohistory`` will be built in ``build_esp32_nohistory``. ``pytest --target esp32`` will run tests on both apps. The app with ``sdkconfig.ci.history`` will be built in ``build_esp32_history``, and the app with ``sdkconfig.ci.nohistory`` will be built in ``build_esp32_nohistory``. ``pytest --target esp32`` will run tests on both apps.
@@ -580,8 +528,8 @@ Of course we can build the required binaries manually, but we can also use our C
.. code-block:: shell .. code-block:: shell
$ cd $IDF_PATH/examples/openthread $ cd $IDF_PATH/examples/openthread
$ python $IDF_PATH/tools/ci/ci_build_apps.py . --target all -v --pytest-apps -k test_thread_connect $ idf-ci build run --only-test-related -k test_thread_connect
$ pytest --target esp32c6,esp32h2,esp32s3 -k test_thread_connect $ pytest -k test_thread_connect
.. important:: .. important::
@@ -697,9 +645,7 @@ This marker means that the test case could still be run locally with ``pytest --
Add New Markers Add New Markers
--------------- ---------------
We are using two types of custom markers, target markers which indicate that the test cases should support this target, and env markers which indicate that the test cases should be assigned to runners with these tags in CI. You can add new markers by adding one line under the :idf_file:`pytest.ini`. If it is a marker that specifies a type of test environment, it should be added into ``env_markers`` section. Otherwise it should be added into ``markers`` section. The syntax should be: ``<marker_name>: <marker_description>``.
You can add new markers by adding one line under the :idf_file:`conftest.py`. If it is a target marker, it should be added into ``TARGET_MARKERS``. If it is a marker that specifies a type of test environment, it should be added into ``ENV_MARKERS``. The syntax should be: ``<marker_name>: <marker_description>``.
Skip Auto Flash Binary Skip Auto Flash Binary
---------------------- ----------------------

View File

@@ -33,13 +33,13 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
安装 安装
============ ============
基础依赖项可以通过执行 ESP-IDF 安装脚本 ``--enable-pytest`` 进行安装: 基础依赖项可以通过执行 ESP-IDF 安装脚本 ``--enable-ci`` 进行安装:
.. code-block:: bash .. code-block:: bash
$ install.sh --enable-pytest $ install.sh --enable-ci
额外的测试脚本依赖项可以通过执行 ESP-IDF 安装脚本 ``--enable-pytest-specific`` 进行安装: 额外的测试脚本依赖项可以通过执行 ESP-IDF 安装脚本 ``--enable-test-specific`` 进行安装:
.. code-block:: bash .. code-block:: bash
@@ -91,32 +91,23 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
.. code-block:: python .. code-block:: python
@pytest.mark.esp32 @pytest.mark.parametrize('target', [
@pytest.mark.esp32s2 'esp32',
'esp32s2',
], indirect=True)
@pytest.mark.generic @pytest.mark.generic
def test_hello_world(dut) -> None: def test_hello_world(dut) -> None:
dut.expect('Hello world!') dut.expect('Hello world!')
这是一个简单的测试脚本,可以与入门示例 :example:`get-started/hello_world` 一起运行。 这是一个简单的测试脚本,可以与入门示例 :example:`get-started/hello_world` 一起运行。
前两行是目标标记: 在这个测试脚本中,使用了 ``@pytest.mark.parametrize`` 装饰器来参数化测试用例。``target`` 参数是一个特殊参数,用于指示目标板类型。``indirect=True`` 参数表示此参数在其他 fixture 之前被预先计算。
* ``@pytest.mark.esp32`` 是一个标记表示此测试用例应在 ESP32 上运行。 紧接着是环境标记。``@pytest.mark.generic`` 标记表示此测试用例应在 generic 板类型上运行。
* ``@pytest.mark.esp32s2`` 是一个标记,表示此测试用例应在 ESP32-S2 上运行。
.. note:: .. note::
如果测试用例可以在 ESP-IDF 官方支持的所有目标芯片上运行,调用 ``idf.py --list-targets`` 获取更多详情,可以使用特殊的标记 ``supported_targets`` 来在一行中应用所有目标 有关环境标记的详细解释,请参阅 :idf_file:`env_markers 定义 <pytest.ini>`
也支持 ``preview_targets````all_targets`` 作为特殊的目标标记,调用 ``idf.py --list-targets --preview`` 获取包括预览目标的完整目标列表。
环境标记:
* ``@pytest.mark.generic`` 标记表示此测试用例应在 generic 板类型上运行。
.. note::
有关环境标记的详细解释,请参阅 :idf_file:`ENV_MARKERS 定义 <tools/ci/idf_pytest/constants.py>`
关于测试函数,使用了一个 ``dut`` fixture。在单一 DUT 测试用例中,``dut`` fixture 是 ``IdfDut`` 类的一个实例,对于多个 DUT 测试用例,它是 ``IdfDut`` 实例的一个元组。有关 ``IdfDut`` 类的更多详细信息,请参阅 `pytest-embedded IdfDut API 参考 <https://docs.espressif.com/projects/pytest-embedded/en/latest/api.html#pytest_embedded_idf.dut.IdfDut>`__ 关于测试函数,使用了一个 ``dut`` fixture。在单一 DUT 测试用例中,``dut`` fixture 是 ``IdfDut`` 类的一个实例,对于多个 DUT 测试用例,它是 ``IdfDut`` 实例的一个元组。有关 ``IdfDut`` 类的更多详细信息,请参阅 `pytest-embedded IdfDut API 参考 <https://docs.espressif.com/projects/pytest-embedded/en/latest/api.html#pytest_embedded_idf.dut.IdfDut>`__
@@ -142,8 +133,10 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
.. code-block:: python .. code-block:: python
@pytest.mark.esp32 @pytest.mark.parametrize('target', [
@pytest.mark.esp32s2 'esp32', # <-- run with esp32 target
'esp32s2', # <-- run with esp32s2 target
], indirect=True)
@pytest.mark.parametrize('config', [ # <-- parameterize the sdkconfig file @pytest.mark.parametrize('config', [ # <-- parameterize the sdkconfig file
'foo', # <-- run with sdkconfig.ci.foo 'foo', # <-- run with sdkconfig.ci.foo
'bar', # <-- run with sdkconfig.ci.bar 'bar', # <-- run with sdkconfig.ci.bar
@@ -180,17 +173,6 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
几乎所有 pytest-embedded 的 CLI 选项都支持参数化。要查看所有支持的 CLI 选项,您可以运行 ``pytest --help`` 命令,并检查 ``embedded-...`` 部分以查看普通 pytest-embedded 选项,以及 ``idf`` 部分以查看 ESP-IDF 特定选项。 几乎所有 pytest-embedded 的 CLI 选项都支持参数化。要查看所有支持的 CLI 选项,您可以运行 ``pytest --help`` 命令,并检查 ``embedded-...`` 部分以查看普通 pytest-embedded 选项,以及 ``idf`` 部分以查看 ESP-IDF 特定选项。
.. note::
目标标记,例如 ``@pytest.mark.esp32````@pytest.mark.esp32s2``,是参数化的一种语法糖。它们被定义为:
.. code-block:: python
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
使用不同的 sdkconfig 文件运行相同的应用程序,支持不同的目标芯片 使用不同的 sdkconfig 文件运行相同的应用程序,支持不同的目标芯片
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -198,9 +180,9 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
.. code-block:: python .. code-block:: python
@pytest.mark.parametrize('config', [ @pytest.mark.parametrize('config, target', [
pytest.param('foo', marks=[pytest.mark.esp32]), pytest.param('foo', 'esp32'),
pytest.param('bar', marks=[pytest.mark.esp32s2]), pytest.param('bar', 'esp32s2'),
], indirect=True) ], indirect=True)
此时,这个测试函数将被复制为 2 个测试用例(测试用例 ID 此时,这个测试函数将被复制为 2 个测试用例(测试用例 ID
@@ -261,40 +243,6 @@ ESP-IDF 在主机端使用 pytest 框架(以及一些 pytest 插件)来自
有关详细的多个 DUT 参数化文档,请参阅 `pytest-embedded Multi-DUT 文档 <https://docs.espressif.com/projects/pytest-embedded/en/latest/key_concepts.html#multi-duts>`__ 有关详细的多个 DUT 参数化文档,请参阅 `pytest-embedded Multi-DUT 文档 <https://docs.espressif.com/projects/pytest-embedded/en/latest/key_concepts.html#multi-duts>`__
.. warning::
在一些测试脚本中,您可能会看到目标标记,如 ``@pytest.mark.esp32````@pytest.mark.esp32s2`` 用于多个 DUT 测试用例。这些脚本已被弃用,应该替换为 ``target`` 参数化。
例如,
.. code-block:: python
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize('count', [
2,
], indirect=True)
def test_hello_world(dut) -> None:
dut[0].expect('Hello world!')
dut[1].expect('Hello world!')
应该改为:
.. code-block:: python
@pytest.mark.parametrize('count', [
2,
], indirect=True)
@pytest.mark.parametrize('target', [
'esp32',
'esp32s2',
], indirect=True)
def test_hello_world(dut) -> None:
dut[0].expect('Hello world!')
dut[1].expect('Hello world!')
这有助于避免多个 DUT 测试用例在运行不同目标芯片时造成歧义。
用不同应用程序和目标芯片进行多目标测试 用不同应用程序和目标芯片进行多目标测试
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -494,7 +442,7 @@ CI 的工作流程如下所示:
.. code-block:: shell .. code-block:: shell
$ cd $IDF_PATH $ cd $IDF_PATH
$ bash install.sh --enable-ci --enable-pytest $ bash install.sh --enable-ci
$ . ./export.sh $ . ./export.sh
编译目录 编译目录
@@ -544,7 +492,7 @@ CI 的工作流程如下所示:
.. code-block:: shell .. code-block:: shell
$ cd $IDF_PATH/examples/system/console/basic $ cd $IDF_PATH/examples/system/console/basic
$ python $IDF_PATH/tools/ci/ci_build_apps.py . --target esp32 -v --pytest-apps $ idf-ci build run --target esp32 --only-test-related
$ pytest --target esp32 $ pytest --target esp32
包含 ``sdkconfig.ci.history`` 配置的应用程序会编译到 ``build_esp32_history`` 中,而包含 ``sdkconfig.ci.nohistory`` 配置的应用程序会编译到 ``build_esp32_nohistory`` 中。 ``pytest --target esp32`` 命令会在这两个应用程序上运行测试。 包含 ``sdkconfig.ci.history`` 配置的应用程序会编译到 ``build_esp32_history`` 中,而包含 ``sdkconfig.ci.nohistory`` 配置的应用程序会编译到 ``build_esp32_nohistory`` 中。 ``pytest --target esp32`` 命令会在这两个应用程序上运行测试。
@@ -580,8 +528,8 @@ CI 的工作流程如下所示:
.. code-block:: shell .. code-block:: shell
$ cd $IDF_PATH/examples/openthread $ cd $IDF_PATH/examples/openthread
$ python $IDF_PATH/tools/ci/ci_build_apps.py . --target all -v --pytest-apps -k test_thread_connect $ idf-ci build run --only-test-related -k test_thread_connect
$ pytest --target esp32c6,esp32h2,esp32s3 -k test_thread_connect $ pytest -k test_thread_connect
.. important:: .. important::
@@ -699,7 +647,7 @@ Pytest 使用技巧
我们目前使用两种自定义 marker。target marker 是指测试用例支持此目标芯片env marker 是指测试用例应分配到 CI 中具有相应 tag 的 runner 上。 我们目前使用两种自定义 marker。target marker 是指测试用例支持此目标芯片env marker 是指测试用例应分配到 CI 中具有相应 tag 的 runner 上。
你可以在 :idf_file:`conftest.py` 文件添加一行新的 marker。如果该 marker 是 target marker应将其添加到 ``TARGET_MARKERS`` 中。如果该 marker 指定了一类测试环境,应将其添加到 ``ENV_MARKERS`` 中。自定义 marker 格式``<marker_name>: <marker_description>`` 你可以通过在 :idf_file:`pytest.ini` 文件添加一行来添加新的 marker。如果该 marker 指定了一类测试环境,应将其添加到 ``env_markers`` 部分。否则,应将其添加到 ``markers`` 部分。语法应为``<marker_name>: <marker_description>``
跳过自动烧录二进制文件 跳过自动烧录二进制文件
------------------------------------- -------------------------------------

View File

@@ -2,8 +2,8 @@
# BSD Socket API Examples # BSD Socket API Examples
This directory contains simple examples demonstrating BSD Socket API. This directory contains simple examples demonstrating BSD Socket API.
Each example, contains README.md file with mode detailed informations about that particular example. Each example, contains README.md file with mode detailed information about that particular example.
For more general informations about all examples, see the README.md file in the upper level 'examples' directory. For more general information about all examples, see the README.md file in the upper level 'examples' directory.
Examples: Examples:
* UDP Client - The application creates UDP socket and sends message to the predefined port and IP address. After the server's reply, the application prints received reply as ASCII text, waits for 2 seconds and sends another message. * UDP Client - The application creates UDP socket and sends message to the predefined port and IP address. After the server's reply, the application prints received reply as ASCII text, waits for 2 seconds and sends another message.
@@ -56,10 +56,10 @@ They can also be run locally. Ref: [ESP-IDF Tests with Pytest Guide](https://doc
Example: Example:
```bash ```bash
$ cd $IDF_PATH $ cd $IDF_PATH
$ bash install.sh --enable-pytest $ bash install.sh --enable-ci
$ . ./export.sh $ . ./export.sh
$ cd examples/protocols/sockets/tcp_client $ cd examples/protocols/sockets/tcp_client
$ python $IDF_PATH/tools/ci/ci_build_apps.py . --target esp32 -vv --pytest-apps $ idf-ci build run --target esp32 --only-test-related
$ pytest --target esp32 $ pytest --target esp32
``` ```
@@ -112,7 +112,7 @@ Please make sure that when using the Local Link address, an interface id is incl
* On the host * On the host
- Interface name suffix is present when passing the address as a string, for example `fe80::260a:XXX:XXX:XXX%en0` - Interface name suffix is present when passing the address as a string, for example `fe80::260a:XXX:XXX:XXX%en0`
- The interface id is present when passing the endpoint as tupple, for example `socket.connect(('fd00::260a:XXXX:XXXX:XXXX', 3333, 0, 3))` - The interface id is present when passing the endpoint as tuple, for example `socket.connect(('fd00::260a:XXXX:XXXX:XXXX', 3333, 0, 3))`
## Hardware Required ## Hardware Required

View File

@@ -7,25 +7,6 @@ import pytest
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
CONFIGS = [
pytest.param('esp32_singlecore', marks=[pytest.mark.esp32]),
pytest.param(
'basic',
marks=[
pytest.mark.esp32,
pytest.mark.esp32s2,
pytest.mark.esp32s3,
pytest.mark.esp32c3,
pytest.mark.esp32c5,
pytest.mark.esp32c6,
pytest.mark.esp32c61,
pytest.mark.esp32h2,
pytest.mark.esp32p4,
pytest.mark.esp32c2,
],
),
]
@pytest.mark.generic @pytest.mark.generic
@idf_parametrize( @idf_parametrize(

View File

@@ -7,7 +7,6 @@ python_files = pytest_*.py
addopts = addopts =
-s -vv -s -vv
--embedded-services esp,idf --embedded-services esp,idf
-p no:idf-ci
--tb short --tb short
--strict-markers --strict-markers
--skip-check-coredump y --skip-check-coredump y
@@ -15,12 +14,14 @@ addopts =
--check-duplicates y --check-duplicates y
--ignore-glob */managed_components/* --ignore-glob */managed_components/*
--ignore pytest-embedded --ignore pytest-embedded
--unity-test-report-mode merge
--ignore-no-tests-collected-error
# ignore DeprecationWarning # ignore DeprecationWarning
filterwarnings = filterwarnings =
ignore::DeprecationWarning:matplotlib.*: ignore::DeprecationWarning:matplotlib.*:
ignore::DeprecationWarning:google.protobuf.*: ignore::DeprecationWarning:google.protobuf.*:
ignore::_pytest.warning_types.PytestExperimentalApiWarning ignore::FutureWarning
# log related # log related
log_cli = True log_cli = True
@@ -34,3 +35,94 @@ junit_family = xunit1
## log all to `system-out` when case fail ## log all to `system-out` when case fail
junit_logging = stdout junit_logging = stdout
junit_log_passing_tests = False junit_log_passing_tests = False
markers =
temp_skip_ci: mark test to be skipped in CI
temp_skip: mark test to be skipped in CI and locally
require_elf: mark test to be skipped if no elf file is found
env_markers =
qemu: build and test using qemu, not real target
macos: tests should be run on macos hosts
generic: tests should be run on generic runners
flash_suspend: support flash suspend feature
eth_ip101: connected via IP101 ethernet transceiver
eth_lan8720: connected via LAN8720 ethernet transceiver
eth_rtl8201: connected via RTL8201 ethernet transceiver
eth_ksz8041: connected via KSZ8041 ethernet transceiver
eth_dp83848: connected via DP83848 ethernet transceiver
eth_w5500: SPI Ethernet module with two W5500
eth_ksz8851snl: SPI Ethernet module with two KSZ8851SNL
eth_dm9051: SPI Ethernet module with two DM9051
quad_psram: runners with quad psram
octal_psram: runners with octal psram
usb_host_flash_disk: usb host runners with USB flash disk attached
usb_device: usb device runners
ethernet_ota: ethernet OTA runners
flash_encryption: Flash Encryption runners
flash_encryption_f4r8: Flash Encryption runners with 4-line flash and 8-line psram
flash_encryption_f8r8: Flash Encryption runners with 8-line flash and 8-line psram
flash_encryption_ota: Flash Encryption runners with ethernet OTA support with 4mb flash size
flash_multi: Multiple flash chips tests
psram: Chip has 4-line psram
ir_transceiver: runners with a pair of IR transmitter and receiver
twai_transceiver: runners with a TWAI PHY transceiver
flash_encryption_wifi_high_traffic: Flash Encryption runners with wifi high traffic support
ethernet: ethernet runner
ethernet_stress: ethernet runner with stress test
ethernet_flash_8m: ethernet runner with 8mb flash
ethernet_router: both the runner and dut connect to the same router through ethernet NIC
ethernet_vlan: ethernet runner GARM-32-SH-1-R16S5N3
wifi_ap: a wifi AP in the environment
wifi_router: both the runner and dut connect to the same wifi router
wifi_high_traffic: wifi high traffic runners
wifi_wlan: wifi runner with a wireless NIC
wifi_iperf: the AP and ESP dut were placed in a shielded box - for iperf test
Example_ShieldBox: multiple shielded APs connected to shielded ESP DUT via RF cable with programmable attenuator
xtal_26mhz: runner with 26MHz xtal on board
xtal_40mhz: runner with 40MHz xtal on board
external_flash: external flash memory connected via VSPI (FSPI)
sdcard_sdmode: sdcard running in SD mode, to be removed after test migration
sdcard_spimode: sdcard running in SPI mode
emmc: eMMC card
sdcard: sdcard runner
MSPI_F8R8: runner with Octal Flash and Octal PSRAM
MSPI_F4R8: runner with Quad Flash and Octal PSRAM
MSPI_F4R4: runner with Quad Flash and Quad PSRAM
flash_120m: runner with 120M supported Flash
jtag: runner where the chip is accessible through JTAG as well
usb_serial_jtag: runner where the chip is accessible through builtin JTAG as well
adc: ADC related tests should run on adc runners
xtal32k: Runner with external 32k crystal connected
no32kXtal: Runner with no external 32k crystal connected
psramv0: Runner with PSRAM version 0
esp32eco3: Runner with esp32 eco3 connected
ecdsa_efuse: Runner with test ECDSA private keys programmed in efuse
ccs811: Runner with CCS811 connected
nvs_encr_hmac: Runner with test HMAC key programmed in efuse
i2c_oled: Runner with ssd1306 I2C oled connected
httpbin: runner for tests that need to access the httpbin service
flash_4mb: C2 runners with 4 MB flash
jtag_re_enable: Runner to re-enable jtag which is softly disabled by burning bit SOFT_DIS_JTAG on eFuse
es8311: Development board that carries es8311 codec
camera: Runner with camera
ov5647: Runner with camera ov5647
multi_dut_modbus_rs485: a pair of runners connected by RS485 bus
ieee802154: ieee802154 related tests should run on ieee802154 runners.
openthread_br: tests should be used for openthread border router.
openthread_bbr: tests should be used for openthread border router linked to Internet.
openthread_sleep: tests should be used for openthread sleepy device.
zigbee_multi_dut: zigbee runner which have multiple duts.
wifi_two_dut: tests should be run on runners which has two wifi duts connected.
generic_multi_device: generic multiple devices whose corresponding gpio pins are connected to each other.
twai_network: multiple runners form a TWAI network.
sdio_master_slave: Test sdio multi board, esp32+esp32
sdio_multidev_32_c6: Test sdio multi board, esp32+esp32c6
sdio_multidev_p4_c5: Test sdio multi board, esp32p4+esp32c5
usj_device: Test usb_serial_jtag and usb_serial_jtag is used as serial only (not console)
twai_std: twai runner with all twai supported targets connect to usb-can adapter
lp_i2s: lp_i2s runner tested with hp_i2s
ram_app: ram_app runners
esp32c3eco7: esp32c3 major version(v1.1) chips
esp32c2eco4: esp32c2 major version(v2.0) chips
recovery_bootloader: Runner with recovery bootloader offset set in eFuse

View File

@@ -1,215 +0,0 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import argparse
import fnmatch
import glob
import os
import typing as t
import zipfile
from enum import Enum
from pathlib import Path
from zipfile import ZipFile
import urllib3
from idf_ci_utils import sanitize_job_name
from idf_pytest.constants import DEFAULT_BUILD_LOG_FILENAME
from minio import Minio
class ArtifactType(str, Enum):
MAP_AND_ELF_FILES = 'map_and_elf_files'
BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES = 'build_dir_without_map_and_elf_files'
LOGS = 'logs'
SIZE_REPORTS = 'size_reports'
JUNIT_REPORTS = 'junit_reports'
MODIFIED_FILES_AND_COMPONENTS_REPORT = 'modified_files_and_components_report'
TYPE_PATTERNS_DICT = {
ArtifactType.MAP_AND_ELF_FILES: [
'**/build*/bootloader/*.map',
'**/build*/bootloader/*.elf',
'**/build*/esp_tee/*.map',
'**/build*/esp_tee/*.elf',
'**/build*/*.map',
'**/build*/*.elf',
],
ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES: [
f'**/build*/{DEFAULT_BUILD_LOG_FILENAME}',
'**/build*/*.bin',
'**/build*/bootloader/*.bin',
'**/build*/esp_tee/*.bin',
'**/build*/partition_table/*.bin',
'**/build*/flasher_args.json',
'**/build*/flash_project_args',
'**/build*/config/sdkconfig.json',
'**/build*/sdkconfig',
'**/build*/project_description.json',
'list_job*.txt',
],
ArtifactType.LOGS: [
f'**/build*/{DEFAULT_BUILD_LOG_FILENAME}',
],
ArtifactType.SIZE_REPORTS: [
'**/build*/size.json',
'size_info.txt',
],
ArtifactType.JUNIT_REPORTS: [
'XUNIT_RESULT*.xml',
'build_summary*.xml',
],
ArtifactType.MODIFIED_FILES_AND_COMPONENTS_REPORT: [
'pipeline.env',
],
}
def getenv(env_var: str) -> str:
try:
return os.environ[env_var]
except KeyError as e:
raise Exception(f'Environment variable {env_var} not set') from e
def get_minio_client() -> Minio:
return Minio(
getenv('IDF_S3_SERVER').replace('https://', ''),
access_key=getenv('IDF_S3_ACCESS_KEY'),
secret_key=getenv('IDF_S3_SECRET_KEY'),
http_client=urllib3.PoolManager(
num_pools=10,
timeout=urllib3.Timeout.DEFAULT_TIMEOUT,
retries=urllib3.Retry(
total=5,
backoff_factor=0.2,
status_forcelist=[500, 502, 503, 504],
),
),
)
def _download_files(
pipeline_id: int,
*,
artifact_type: t.Optional[ArtifactType] = None,
job_name: t.Optional[str] = None,
job_id: t.Optional[int] = None,
) -> None:
if artifact_type:
prefix = f'{pipeline_id}/{artifact_type.value}/'
else:
prefix = f'{pipeline_id}/'
for obj in client.list_objects(getenv('IDF_S3_BUCKET'), prefix=prefix, recursive=True):
obj_name = obj.object_name
obj_p = Path(obj_name)
# <pipeline_id>/<action_type>/<job_name>/<job_id>.zip
if len(obj_p.parts) != 4:
print(f'Invalid object name: {obj_name}')
continue
if job_name:
# could be a pattern
if not fnmatch.fnmatch(obj_p.parts[2], job_name):
print(f'Job name {job_name} does not match {obj_p.parts[2]}')
continue
if job_id:
if obj_p.parts[3] != f'{job_id}.zip':
print(f'Job ID {job_id} does not match {obj_p.parts[3]}')
continue
client.fget_object(getenv('IDF_S3_BUCKET'), obj_name, obj_name)
print(f'Downloaded {obj_name}')
if obj_name.endswith('.zip'):
with ZipFile(obj_name, 'r') as zr:
zr.extractall()
print(f'Extracted {obj_name}')
os.remove(obj_name)
def _upload_files(
pipeline_id: int,
*,
artifact_type: ArtifactType,
job_name: str,
job_id: str,
) -> None:
has_file = False
with ZipFile(
f'{job_id}.zip',
'w',
compression=zipfile.ZIP_DEFLATED,
# 1 is the fastest compression level
# the size differs not much between 1 and 9
compresslevel=1,
) as zw:
for pattern in TYPE_PATTERNS_DICT[artifact_type]:
for file in glob.glob(pattern, recursive=True):
zw.write(file)
has_file = True
try:
if has_file:
obj_name = f'{pipeline_id}/{artifact_type.value}/{sanitize_job_name(job_name)}/{job_id}.zip'
client.fput_object(getenv('IDF_S3_BUCKET'), obj_name, f'{job_id}.zip')
print(f'Created archive file: {job_id}.zip, uploaded as {obj_name}')
finally:
os.remove(f'{job_id}.zip')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download or upload files from/to S3, the object name would be '
'[PIPELINE_ID]/[ACTION_TYPE]/[JOB_NAME]/[JOB_ID].zip.'
'\n'
'For example: 123456/binaries/build_pytest_examples_esp32/123456789.zip',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
common_args = argparse.ArgumentParser(add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
common_args.add_argument('--pipeline-id', type=int, help='Pipeline ID')
common_args.add_argument(
'--type', type=str, nargs='+', choices=[a.value for a in ArtifactType], help='Types of files to download'
)
action = parser.add_subparsers(dest='action', help='Download or Upload')
download = action.add_parser('download', help='Download files from S3', parents=[common_args])
upload = action.add_parser('upload', help='Upload files to S3', parents=[common_args])
download.add_argument('--job-name', type=str, help='Job name pattern')
download.add_argument('--job-id', type=int, help='Job ID')
upload.add_argument('--job-name', type=str, help='Job name')
upload.add_argument('--job-id', type=int, help='Job ID')
args = parser.parse_args()
client = get_minio_client()
ci_pipeline_id = args.pipeline_id or getenv('CI_PIPELINE_ID') # required
if args.action == 'download':
method = _download_files
ci_job_name = args.job_name # optional
ci_job_id = args.job_id # optional
else:
method = _upload_files # type: ignore
ci_job_name = args.job_name or getenv('CI_JOB_NAME') # required
ci_job_id = args.job_id or getenv('CI_JOB_ID') # required
if args.type:
types = [ArtifactType(t) for t in args.type]
else:
types = list(ArtifactType)
print(f'{"Pipeline ID":15}: {ci_pipeline_id}')
if ci_job_name:
print(f'{"Job name":15}: {ci_job_name}')
if ci_job_id:
print(f'{"Job ID":15}: {ci_job_id}')
for _t in types:
method(ci_pipeline_id, artifact_type=_t, job_name=ci_job_name, job_id=ci_job_id) # type: ignore

View File

@@ -54,30 +54,23 @@ build_stage2() {
# Override EXTRA_CFLAGS and EXTRA_CXXFLAGS in the environment # Override EXTRA_CFLAGS and EXTRA_CXXFLAGS in the environment
export EXTRA_CFLAGS=${PEDANTIC_CFLAGS/-Werror=unused-variable -Werror=unused-but-set-variable -Werror=unused-function/} export EXTRA_CFLAGS=${PEDANTIC_CFLAGS/-Werror=unused-variable -Werror=unused-but-set-variable -Werror=unused-function/}
export EXTRA_CXXFLAGS=${PEDANTIC_CXXFLAGS/-Werror=unused-variable -Werror=unused-but-set-variable -Werror=unused-function/} export EXTRA_CXXFLAGS=${PEDANTIC_CXXFLAGS/-Werror=unused-variable -Werror=unused-but-set-variable -Werror=unused-function/}
python -m idf_build_apps build -v \ idf-build-apps build \
-p ${TEMPLATE_APP_PATH} \ -p ${TEMPLATE_APP_PATH} \
-t all \
${CONFIG_STR} \ ${CONFIG_STR} \
--work-dir ${BUILD_PATH}/cmake \ --work-dir ${BUILD_PATH}/cmake \
--build-dir ${BUILD_DIR} \ --build-dir ${BUILD_DIR} \
--build-log ${BUILD_LOG_CMAKE} \ --build-log ${BUILD_LOG_CMAKE} \
--size-file size.json \
--keep-going \
--collect-size-info size_info.txt \
--default-build-targets esp32 esp32s2 esp32s3 esp32c2 esp32c3 esp32c5 esp32c6 esp32h2 esp32p4 esp32c61 esp32h21 esp32h4 --default-build-targets esp32 esp32s2 esp32s3 esp32c2 esp32c3 esp32c5 esp32c6 esp32h2 esp32p4 esp32c61 esp32h21 esp32h4
} }
build_stage1() { build_stage1() {
CONFIG_STR=$(get_config_str sdkconfig.ci2.*=) CONFIG_STR=$(get_config_str sdkconfig.ci2.*=)
python -m idf_build_apps build -v \ idf-build-apps build \
-p ${TEMPLATE_APP_PATH} \ -p ${TEMPLATE_APP_PATH} \
-t all \
${CONFIG_STR} \ ${CONFIG_STR} \
--work-dir ${BUILD_PATH}/cmake \ --work-dir ${BUILD_PATH}/cmake \
--build-dir ${BUILD_DIR} \ --build-dir ${BUILD_DIR} \
--build-log ${BUILD_LOG_CMAKE} \ --build-log ${BUILD_LOG_CMAKE} \
--size-file size.json \
--collect-size-info size_info.txt \
--default-build-targets esp32 esp32s2 esp32s3 esp32c2 esp32c3 esp32c5 esp32c6 esp32h2 esp32p4 esp32c61 esp32h21 esp32h4 --default-build-targets esp32 esp32s2 esp32s3 esp32c2 esp32c3 esp32c5 esp32c6 esp32h2 esp32p4 esp32c61 esp32h21 esp32h4
} }

View File

@@ -1,29 +1,23 @@
#!/usr/bin/env python #!/usr/bin/env python
# SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import difflib
import inspect import inspect
import os import os
import re import re
import sys import sys
import typing as t
from collections import defaultdict
from pathlib import Path from pathlib import Path
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import yaml import yaml
from idf_ci_utils import get_all_manifest_files
from idf_ci_utils import IDF_PATH from idf_ci_utils import IDF_PATH
from idf_ci_utils import idf_relpath
YES = u'\u2713'
NO = u'\u2717'
# | Supported Target | ... | # | Supported Target | ... |
# | ---------------- | --- | # | ---------------- | --- |
SUPPORTED_TARGETS_TABLE_REGEX = re.compile( SUPPORTED_TARGETS_TABLE_REGEX = re.compile(r'^\|\s*Supported Targets.+$\n^\|(?:\s*|-).+$\n?', re.MULTILINE)
r'^\|\s*Supported Targets.+$\n^\|(?:\s*|-).+$\n?', re.MULTILINE
)
USUAL_TO_FORMAL = { USUAL_TO_FORMAL = {
'esp32': 'ESP32', 'esp32': 'ESP32',
@@ -41,54 +35,51 @@ USUAL_TO_FORMAL = {
'linux': 'Linux', 'linux': 'Linux',
} }
FORMAL_TO_USUAL = { FORMAL_TO_USUAL = {v: k for k, v in USUAL_TO_FORMAL.items()}
'ESP32': 'esp32',
'ESP32-S2': 'esp32s2',
'ESP32-S3': 'esp32s3',
'ESP32-C3': 'esp32c3',
'ESP32-C2': 'esp32c2',
'ESP32-C6': 'esp32c6',
'ESP32-C5': 'esp32c5',
'ESP32-H2': 'esp32h2',
'ESP32-P4': 'esp32p4',
'ESP32-C61': 'esp32c61',
'ESP32-H21': 'esp32h21',
'ESP32-H4': 'esp32h4',
'Linux': 'linux',
}
def doublequote(s: str) -> str: def diff_lists(
if s.startswith('"') and s.endswith('"'): list1: t.List[str], list2: t.List[str], title1: str, title2: str, excluded: t.Optional[t.List[str]] = None
return s ) -> None:
"""
Compare two lists and print the differences.
"""
diff = difflib.ndiff(list1, list2)
if not diff:
return
return f'"{s}"' print(f'Difference between {title1} and {title2}:')
for line in diff:
last_part = line.split(' ', 1)[-1]
if excluded and last_part in excluded:
print(line + ' ' + '(excluded)')
else:
print(line)
def check_readme( def check_readme(
paths: List[str], paths: t.List[str],
exclude_dirs: Optional[List[str]] = None, exclude_dirs: t.Optional[t.List[str]] = None,
extra_default_build_targets: Optional[List[str]] = None, extra_default_build_targets: t.Optional[t.List[str]] = None,
) -> None: ) -> None:
from idf_build_apps import App, find_apps from idf_build_apps import App
from idf_build_apps import find_apps
from idf_build_apps.constants import SUPPORTED_TARGETS from idf_build_apps.constants import SUPPORTED_TARGETS
def get_readme_path(_app: App) -> Optional[str]: def get_readme_path(_app_dir: str) -> t.Optional[str]:
_readme_path = os.path.join(_app.app_dir, 'README.md') _readme_path = os.path.join(_app_dir, 'README.md')
if not os.path.isfile(_readme_path): if not os.path.isfile(_readme_path):
_readme_path = os.path.join(_app.app_dir, '..', 'README.md') _readme_path = os.path.join(_app_dir, '..', 'README.md')
if not os.path.isfile(_readme_path): if not os.path.isfile(_readme_path):
_readme_path = None # type: ignore _readme_path = None # type: ignore
return _readme_path return _readme_path
def _generate_new_support_table_str(_app: App) -> str: def _generate_new_support_table_str(_app_dir: str, _manifest_supported_targets: t.List[str]) -> str:
# extra space here # extra space here
table_headers = [ table_headers = [f'{USUAL_TO_FORMAL[target]}' for target in _manifest_supported_targets]
f'{USUAL_TO_FORMAL[target]}' for target in _app.supported_targets
]
table_headers = ['Supported Targets'] + table_headers table_headers = ['Supported Targets'] + table_headers
res = '| ' + ' | '.join(table_headers) + ' |\n' res = '| ' + ' | '.join(table_headers) + ' |\n'
@@ -96,8 +87,8 @@ def check_readme(
return res return res
def _parse_existing_support_table_str(_app: App) -> Tuple[Optional[str], List[str]]: def _parse_existing_support_table_str(_app_dir: str) -> t.Tuple[t.Optional[str], t.List[str]]:
_readme_path = get_readme_path(_app) _readme_path = get_readme_path(_app_dir)
if not _readme_path: if not _readme_path:
return None, SUPPORTED_TARGETS return None, SUPPORTED_TARGETS
@@ -109,41 +100,31 @@ def check_readme(
return None, SUPPORTED_TARGETS return None, SUPPORTED_TARGETS
# old style # old style
parts = [ parts = [part.strip() for part in support_string[0].split('\n', 1)[0].split('|') if part.strip()]
part.strip()
for part in support_string[0].split('\n', 1)[0].split('|')
if part.strip()
]
return support_string[0].strip(), [FORMAL_TO_USUAL[part] for part in parts[1:] if part in FORMAL_TO_USUAL] return support_string[0].strip(), [FORMAL_TO_USUAL[part] for part in parts[1:] if part in FORMAL_TO_USUAL]
def check_enable_build(_app: App, _old_supported_targets: List[str]) -> bool: def check_enable_build(
if _app.supported_targets == sorted(_old_supported_targets): _app_dir: str, _manifest_supported_targets: t.List[str], _old_supported_targets: t.List[str]
) -> bool:
if _manifest_supported_targets == sorted(_old_supported_targets):
return True return True
_readme_path = get_readme_path(_app) _readme_path = get_readme_path(_app_dir)
if_clause = f'IDF_TARGET in [{", ".join([doublequote(target) for target in sorted(_old_supported_targets)])}]' diff_lists(
sorted(_manifest_supported_targets),
sorted(_old_supported_targets),
'manifest-enabled targets',
f'supported targets table in {_readme_path}',
)
print( print(
inspect.cleandoc( inspect.cleandoc(
f''' f"""
{_app.app_dir}: To enable/disable build targets, please modify your manifest file:
- enable build targets according to the manifest file: {_app.supported_targets} {App.MANIFEST.most_suitable_rule(app_dir).by_manifest_file}
- enable build targets according to the old Supported Targets table under readme "{_readme_path}": {_old_supported_targets}
If you want to disable some targets, please use the following snippet: Please refer to https://docs.espressif.com/projects/idf-build-apps/en/latest/references/manifest.html#enable-disable-rules
for more details.
# Please combine this with the original one """
#
# Notes:
# - please keep in mind to avoid duplicated folders as yaml keys
# - please use parentheses to group conditions, the "or" and "and" operators could only accept two operands
{_app.app_dir}:
enable:
- if: {if_clause}
temporary: true
reason: <why only enable build jobs for these targets>
'''
) )
) )
@@ -153,47 +134,52 @@ def check_readme(
find_apps( find_apps(
paths, paths,
'all', 'all',
recursive=True,
exclude_list=exclude_dirs or [], exclude_list=exclude_dirs or [],
manifest_files=get_all_manifest_files(),
default_build_targets=SUPPORTED_TARGETS + extra_default_build_targets, default_build_targets=SUPPORTED_TARGETS + extra_default_build_targets,
) )
) )
exit_code = 0 exit_code = 0
apps_grouped: t.Dict[str, t.List[App]] = defaultdict(list)
checked_app_dirs = set()
for app in apps: for app in apps:
if app.app_dir not in checked_app_dirs: apps_grouped[app.app_dir].append(app)
checked_app_dirs.add(app.app_dir)
else:
continue
replace_str, old_supported_targets = _parse_existing_support_table_str(app) for app_dir in apps_grouped:
success = check_enable_build(app, old_supported_targets) replace_str, old_supported_targets = _parse_existing_support_table_str(app_dir)
# manifest defined ones
manifest_defined_targets = sorted(
{
target
for app in apps_grouped[app_dir]
for target in (
App.MANIFEST.enable_build_targets(app_dir)
+ App.MANIFEST.enable_build_targets(app_dir, config_name=app.config_name)
)
}
)
success = check_enable_build(app_dir, manifest_defined_targets, old_supported_targets)
if not success: if not success:
print(f'check_enable_build failed for app: {app}') print(f'check_enable_build failed for app: {app_dir}')
print('-' * 80) print('-' * 80)
exit_code = 1 exit_code = 1
readme_path = get_readme_path(app) readme_path = get_readme_path(app_dir)
new_readme_str = _generate_new_support_table_str(app_dir, manifest_defined_targets)
# no readme, create a new file # no readme, create a new file
if not readme_path: if not readme_path:
with open(os.path.join(app.app_dir, 'README.md'), 'w') as fw: with open(os.path.join(app_dir, 'README.md'), 'w') as fw:
fw.write(_generate_new_support_table_str(app) + '\n') fw.write(new_readme_str + '\n')
print(f'Added new README file: {os.path.join(app.app_dir, "README.md")}') print(f'Added new README file: {os.path.join(app_dir, "README.md")}')
print('-' * 80) print('-' * 80)
exit_code = 1 exit_code = 1
# has old table, but different string # has old table, but different string
elif replace_str and replace_str != _generate_new_support_table_str(app): elif replace_str and replace_str != new_readme_str:
with open(readme_path) as fr: with open(readme_path) as fr:
readme_str = fr.read() readme_str = fr.read()
with open(readme_path, 'w') as fw: with open(readme_path, 'w') as fw:
fw.write( fw.write(readme_str.replace(replace_str, new_readme_str))
readme_str.replace(
replace_str, _generate_new_support_table_str(app)
)
)
print(f'Modified README file: {readme_path}') print(f'Modified README file: {readme_path}')
print('-' * 80) print('-' * 80)
exit_code = 1 exit_code = 1
@@ -203,9 +189,7 @@ def check_readme(
readme_str = fr.read() readme_str = fr.read()
with open(readme_path, 'w') as fw: with open(readme_path, 'w') as fw:
fw.write( fw.write(new_readme_str + '\n\n' + readme_str) # extra new line
_generate_new_support_table_str(app) + '\n\n' + readme_str
) # extra new line
print(f'Modified README file: {readme_path}') print(f'Modified README file: {readme_path}')
print('-' * 80) print('-' * 80)
@@ -215,90 +199,61 @@ def check_readme(
def check_test_scripts( def check_test_scripts(
paths: List[str], paths: t.List[str],
exclude_dirs: Optional[List[str]] = None, exclude_dirs: t.Optional[t.List[str]] = None,
bypass_check_test_targets: Optional[List[str]] = None, bypass_check_test_targets: t.Optional[t.List[str]] = None,
extra_default_build_targets: Optional[List[str]] = None, extra_default_build_targets: t.Optional[t.List[str]] = None,
) -> None: ) -> None:
from idf_build_apps import App, find_apps from idf_build_apps import App
from idf_build_apps import find_apps
from idf_build_apps.constants import SUPPORTED_TARGETS from idf_build_apps.constants import SUPPORTED_TARGETS
from idf_pytest.script import get_pytest_cases from idf_ci import get_pytest_cases
# takes long time, run only in CI # takes long time, run only in CI
# dict: # dict:
# { # {
# app_dir: { # app_dir: {
# 'script_path': 'path/to/script', # 'script_paths': {'path/to/script1', 'path/to/script2'},
# 'targets': ['esp32', 'esp32s2', 'esp32s3', 'esp32c3', 'esp32c2', 'linux'], # 'targets': {'esp32', 'esp32s2', 'esp32s3', 'esp32c3', 'esp32c2', 'linux'},
# } # }
# } # }
def check_enable_test( def check_enable_test(
_app: App, _app_dir: str,
_pytest_app_dir_targets_dict: Dict[str, Dict[str, str]], _manifest_verified_targets: t.List[str],
_pytest_app_dir_targets_dict: t.Dict[str, t.Dict[str, t.Set[str]]],
) -> bool: ) -> bool:
if _app.app_dir in _pytest_app_dir_targets_dict: if _app_dir in _pytest_app_dir_targets_dict:
test_script_path = _pytest_app_dir_targets_dict[_app.app_dir]['script_path'] test_script_paths = _pytest_app_dir_targets_dict[_app_dir]['script_paths']
actual_verified_targets = sorted( actual_verified_targets = sorted(set(_pytest_app_dir_targets_dict[_app_dir]['targets']))
set(_pytest_app_dir_targets_dict[_app.app_dir]['targets'])
)
else: else:
return True # no test case return True # no test case
actual_extra_tested_targets = set(actual_verified_targets) - set( if _manifest_verified_targets == actual_verified_targets:
_app.verified_targets
)
if actual_extra_tested_targets - set(bypass_check_test_targets or []):
print(
inspect.cleandoc(
f'''
{_app.app_dir}:
- enable test targets according to the manifest file: {_app.verified_targets}
- enable test targets according to the test scripts: {actual_verified_targets}
test scripts enabled targets should be a subset of the manifest file declared ones.
Please check the test script: {test_script_path}.
'''
)
)
return False
if _app.verified_targets == actual_verified_targets:
return True return True
elif not (set(_app.verified_targets) - set(actual_verified_targets + (bypass_check_test_targets or []))): elif not (set(_manifest_verified_targets) - set(actual_verified_targets + (bypass_check_test_targets or []))):
print(f'WARNING: bypass test script check on {_app.app_dir} for targets {bypass_check_test_targets} ')
return True return True
if_clause = f'IDF_TARGET in [{", ".join([doublequote(target) for target in sorted(set(_app.verified_targets) - set(actual_verified_targets))])}]' _title2 = 'pytest enabled targets in test scripts: \n'
for script_path in test_script_paths:
_title2 += f' - {script_path}\n'
diff_lists(
_manifest_verified_targets,
actual_verified_targets,
'manifest-enabled targets',
_title2.rstrip(),
excluded=bypass_check_test_targets or [],
)
print( print(
inspect.cleandoc( inspect.cleandoc(
f''' f"""
{_app.app_dir}: To enable/disable test targets, please modify your manifest file:
- enable test targets according to the manifest file: {_app.verified_targets} {App.MANIFEST.most_suitable_rule(app_dir).by_manifest_file}
- enable test targets according to the test scripts: {actual_verified_targets}
the test scripts enabled test targets should be the same with the manifest file enabled ones. Please check To understand how to enable/disable test targets, please refer to:
the test script manually: {test_script_path}. https://docs.espressif.com/projects/pytest-embedded/en/latest/usages/markers.html#idf-parametrize
If you want to enable test targets in the pytest test scripts, please add `@pytest.mark.MISSING_TARGET` """
marker above the test case function.
If you want to disable the test targets in the manifest file, please modify your manifest file with
the following code snippet:
# Please combine this with the original one
#
# Notes:
# - please keep in mind to avoid duplicated folders as yaml keys
# - please use parentheses to group conditions, the "or" and "and" operators could only accept two operands
{_app.app_dir}:
disable_test:
- if: {if_clause}
temporary: true
reason: <why you disable this test>
'''
) )
) )
return False return False
@@ -307,42 +262,50 @@ def check_test_scripts(
find_apps( find_apps(
paths, paths,
'all', 'all',
recursive=True,
exclude_list=exclude_dirs or [], exclude_list=exclude_dirs or [],
manifest_files=get_all_manifest_files(),
default_build_targets=SUPPORTED_TARGETS + extra_default_build_targets, default_build_targets=SUPPORTED_TARGETS + extra_default_build_targets,
) )
) )
apps_grouped: t.Dict[str, t.List[App]] = defaultdict(list)
for app in apps:
apps_grouped[app.app_dir].append(app)
exit_code = 0 exit_code = 0
pytest_cases = get_pytest_cases(paths) pytest_cases = get_pytest_cases(
paths=paths,
marker_expr=None, # don't filter host_test
)
pytest_app_dir_targets_dict = {} pytest_app_dir_targets_dict = {}
for case in pytest_cases: for case in pytest_cases:
for pytest_app in case.apps: for pytest_app in case.apps:
app_dir = os.path.relpath(pytest_app.path, IDF_PATH) app_dir = idf_relpath(pytest_app.path)
if app_dir not in pytest_app_dir_targets_dict: if app_dir not in pytest_app_dir_targets_dict:
pytest_app_dir_targets_dict[app_dir] = { pytest_app_dir_targets_dict[app_dir] = {
'script_path': case.path, 'script_paths': {case.path},
'targets': [pytest_app.target], 'targets': {pytest_app.target},
} }
else: else:
pytest_app_dir_targets_dict[app_dir]['targets'].append( pytest_app_dir_targets_dict[app_dir]['script_paths'].add(case.path)
pytest_app.target pytest_app_dir_targets_dict[app_dir]['targets'].add(pytest_app.target)
for app_dir in apps_grouped:
# manifest defined ones
manifest_defined_targets = sorted(
{
target
for app in apps_grouped[app_dir]
for target in (
App.MANIFEST.enable_test_targets(app_dir)
+ App.MANIFEST.enable_test_targets(app_dir, config_name=app.config_name)
)
}
) )
checked_app_dirs = set() success = check_enable_test(app_dir, manifest_defined_targets, pytest_app_dir_targets_dict)
for app in apps:
if app.app_dir not in checked_app_dirs:
checked_app_dirs.add(app.app_dir)
else:
continue
success = check_enable_test(
app, pytest_app_dir_targets_dict
)
if not success: if not success:
print(f'check_enable_test failed for app: {app}') print(f'check_enable_test failed for app: {app_dir}')
print('-' * 80) print('-' * 80)
exit_code = 1 exit_code = 1
continue continue
@@ -350,26 +313,6 @@ def check_test_scripts(
sys.exit(exit_code) sys.exit(exit_code)
def check_exist() -> None:
exit_code = 0
config_files = get_all_manifest_files()
for file in config_files:
if 'managed_components' in Path(file).parts:
continue
with open(file) as fr:
configs = yaml.safe_load(fr)
for path in configs.keys():
if path.startswith('.'):
continue
if not os.path.isdir(path):
print(f'Path \'{path}\' referred in \'{file}\' does not exist!')
exit_code = 1
sys.exit(exit_code)
if __name__ == '__main__': if __name__ == '__main__':
if 'CI_JOB_ID' not in os.environ: if 'CI_JOB_ID' not in os.environ:
os.environ['CI_JOB_ID'] = 'fake' # this is a CI script os.environ['CI_JOB_ID'] = 'fake' # this is a CI script
@@ -400,20 +343,13 @@ if __name__ == '__main__':
arg = parser.parse_args() arg = parser.parse_args()
# Since this script is executed from the pre-commit hook environment, make sure IDF_PATH is set # Since this script is executed from the pre-commit hook environment, make sure IDF_PATH is set
os.environ['IDF_PATH'] = os.path.realpath( os.environ['IDF_PATH'] = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
os.path.join(os.path.dirname(__file__), '..', '..')
)
if arg.action == 'check-exist':
check_exist()
else:
check_dirs = set() check_dirs = set()
# check if *_caps.h files changed # check if *_caps.h files changed
check_all = False check_all = False
soc_caps_header_files = list( soc_caps_header_files = list((Path(IDF_PATH) / 'components' / 'soc').glob('**/*_caps.h'))
(Path(IDF_PATH) / 'components' / 'soc').glob('**/*_caps.h')
)
for p in arg.paths: for p in arg.paths:
if Path(p).resolve() in soc_caps_header_files: if Path(p).resolve() in soc_caps_header_files:
check_all = True check_all = True
@@ -429,25 +365,23 @@ if __name__ == '__main__':
if check_all: if check_all:
check_dirs = {IDF_PATH} check_dirs = {IDF_PATH}
_exclude_dirs = [os.path.join(IDF_PATH, 'tools', 'unit-test-app'), _exclude_dirs = [
os.path.join(IDF_PATH, 'tools', 'unit-test-app'),
os.path.join(IDF_PATH, 'tools', 'test_build_system', 'build_test_app'), os.path.join(IDF_PATH, 'tools', 'test_build_system', 'build_test_app'),
os.path.join(IDF_PATH, 'tools', 'templates', 'sample_project')] os.path.join(IDF_PATH, 'tools', 'templates', 'sample_project'),
]
else: else:
_exclude_dirs = [os.path.join(IDF_PATH, 'tools', 'templates', 'sample_project')] _exclude_dirs = [os.path.join(IDF_PATH, 'tools', 'templates', 'sample_project')]
extra_default_build_targets_list: List[str] = [] extra_default_build_targets_list: t.List[str] = []
bypass_check_test_targets_list: List[str] = [] bypass_check_test_targets_list: t.List[str] = []
if arg.config: if arg.config:
with open(arg.config) as fr: with open(arg.config) as fr:
configs = yaml.safe_load(fr) configs = yaml.safe_load(fr)
if configs: if configs:
extra_default_build_targets_list = ( extra_default_build_targets_list = configs.get('extra_default_build_targets') or []
configs.get('extra_default_build_targets') or [] bypass_check_test_targets_list = configs.get('bypass_check_test_targets') or []
)
bypass_check_test_targets_list = (
configs.get('bypass_check_test_targets') or []
)
if arg.action == 'check-readmes': if arg.action == 'check-readmes':
os.environ['INCLUDE_NIGHTLY_RUN'] = '1' os.environ['INCLUDE_NIGHTLY_RUN'] = '1'

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import sys
from pathlib import Path
import pytest
sys.path.insert(0, os.path.dirname(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from idf_ci_utils import IDF_PATH # noqa: E402
os.environ['IDF_PATH'] = IDF_PATH
os.environ['PYTEST_IGNORE_COLLECT_IMPORT_ERROR'] = '1'
from idf_pytest.plugin import IdfPytestEmbedded # noqa: E402
def main() -> None:
parser = argparse.ArgumentParser(description='Pytest linter check')
parser.add_argument(
'files',
nargs='*',
help='Python files to check (full paths separated by space)',
)
args = parser.parse_args()
# Convert input files to pytest-compatible paths
pytest_scripts = [str(Path(f).resolve()) for f in args.files]
cmd = [
'--collect-only',
*pytest_scripts,
'--target', 'all',
'-p', 'test_linter',
]
res = pytest.main(cmd, plugins=[IdfPytestEmbedded('all')])
sys.exit(res)
if __name__ == '__main__':
main()

View File

@@ -1,293 +0,0 @@
# SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
This file is used in CI generate binary files for different kinds of apps
"""
import argparse
import os
import sys
import typing as t
import unittest
from pathlib import Path
import yaml
from dynamic_pipelines.constants import DEFAULT_TEST_PATHS
from idf_build_apps import build_apps
from idf_build_apps import setup_logging
from idf_build_apps.utils import semicolon_separated_str_to_list
from idf_pytest.constants import DEFAULT_BUILD_TEST_RULES_FILEPATH
from idf_pytest.constants import DEFAULT_CONFIG_RULES_STR
from idf_pytest.constants import DEFAULT_FULL_BUILD_TEST_COMPONENTS
from idf_pytest.constants import DEFAULT_FULL_BUILD_TEST_FILEPATTERNS
from idf_pytest.constants import DEFAULT_IGNORE_WARNING_FILEPATH
from idf_pytest.script import get_all_apps
CI_ENV_VARS = {
'EXTRA_CFLAGS': '-Werror -Werror=deprecated-declarations -Werror=unused-variable '
'-Werror=unused-but-set-variable -Werror=unused-function -Wstrict-prototypes',
'EXTRA_CXXFLAGS': '-Werror -Werror=deprecated-declarations -Werror=unused-variable '
'-Werror=unused-but-set-variable -Werror=unused-function',
'LDGEN_CHECK_MAPPING': '1',
'IDF_CI_BUILD': '1',
}
def main(args: argparse.Namespace) -> None:
extra_default_build_targets: t.List[str] = []
if args.default_build_test_rules:
with open(args.default_build_test_rules) as fr:
configs = yaml.safe_load(fr)
if configs:
extra_default_build_targets = configs.get('extra_default_build_targets') or []
test_related_apps, non_test_related_apps = get_all_apps(
args.paths,
args.target,
config_rules_str=args.config,
marker_expr=args.marker_expr,
filter_expr=args.filter_expr,
preserve_all=args.preserve_all,
extra_default_build_targets=extra_default_build_targets,
modified_files=args.modified_files,
modified_components=args.modified_components,
ignore_app_dependencies_components=args.ignore_app_dependencies_components,
ignore_app_dependencies_filepatterns=args.ignore_app_dependencies_filepatterns,
)
if args.pytest_apps:
apps = test_related_apps
else:
apps = non_test_related_apps
if args.extra_preserve_dirs:
for app in apps:
if app.preserve:
continue
for extra_preserve_dir in args.extra_preserve_dirs:
abs_extra_preserve_dir = Path(extra_preserve_dir).resolve()
abs_app_dir = Path(app.app_dir).resolve()
if abs_extra_preserve_dir == abs_app_dir or abs_extra_preserve_dir in abs_app_dir.parents:
app.preserve = True
res = build_apps(
sorted(apps),
parallel_count=args.parallel_count,
parallel_index=args.parallel_index,
dry_run=False,
build_verbose=args.build_verbose,
keep_going=True,
collect_size_info='size_info.txt',
collect_app_info=args.collect_app_info,
ignore_warning_strs=args.ignore_warning_str,
ignore_warning_file=args.ignore_warning_file,
copy_sdkconfig=args.copy_sdkconfig,
modified_components=args.modified_components,
modified_files=args.modified_files,
ignore_app_dependencies_components=args.ignore_app_dependencies_components,
ignore_app_dependencies_filepatterns=args.ignore_app_dependencies_filepatterns,
junitxml=args.junitxml,
)
sys.exit(res)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Build all the apps for different test types. Will auto remove those non-test apps binaries',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('paths', nargs='*', help='Paths to the apps to build.')
parser.add_argument(
'-t',
'--target',
default='all',
help='Build apps for given target',
)
parser.add_argument(
'--config',
default=DEFAULT_CONFIG_RULES_STR,
nargs='+',
help='Adds configurations (sdkconfig file names) to build. This can either be '
'FILENAME[=NAME] or FILEPATTERN. FILENAME is the name of the sdkconfig file, '
'relative to the project directory, to be used. Optional NAME can be specified, '
'which can be used as a name of this configuration. FILEPATTERN is the name of '
'the sdkconfig file, relative to the project directory, with at most one wildcard. '
'The part captured by the wildcard is used as the name of the configuration.',
)
parser.add_argument(
'-v',
'--verbose',
action='count',
help='Increase the LOGGER level of the script. Can be specified multiple times.',
)
parser.add_argument(
'--build-verbose',
action='store_true',
help='Enable verbose output from build system.',
)
parser.add_argument(
'--preserve-all',
action='store_true',
help='Preserve the binaries for all apps when specified.',
)
parser.add_argument('--parallel-count', default=1, type=int, help='Number of parallel build jobs.')
parser.add_argument(
'--parallel-index',
default=1,
type=int,
help='Index (1-based) of the job, out of the number specified by --parallel-count.',
)
parser.add_argument(
'--collect-app-info',
default='list_job_@p.txt',
help='If specified, the test case name and app info json will be written to this file',
)
parser.add_argument(
'--ignore-warning-str',
nargs='+',
help='Ignore the warning string that match the specified regex in the build output. space-separated list',
)
parser.add_argument(
'--ignore-warning-file',
default=DEFAULT_IGNORE_WARNING_FILEPATH,
type=argparse.FileType('r'),
help='Ignore the warning strings in the specified file. Each line should be a regex string.',
)
parser.add_argument(
'--copy-sdkconfig',
action='store_true',
help='Copy the sdkconfig file to the build directory.',
)
parser.add_argument(
'--extra-preserve-dirs',
nargs='+',
help='also preserve binaries of the apps under the specified dirs',
)
parser.add_argument(
'--pytest-apps',
action='store_true',
help='Only build apps required by pytest scripts. '
'Will build non-test-related apps if this flag is unspecified.',
)
parser.add_argument(
'-m',
'--marker-expr',
default='not host_test', # host_test apps would be built and tested under the same job
help='only build tests matching given mark expression. For example: -m "host_test and generic". Works only'
'for pytest',
)
parser.add_argument(
'-k',
'--filter-expr',
help='only build tests matching given filter expression. For example: -k "test_hello_world". Works only'
'for pytest',
)
parser.add_argument(
'--default-build-test-rules',
default=DEFAULT_BUILD_TEST_RULES_FILEPATH,
help='default build test rules config file',
)
parser.add_argument(
'--skip-setting-flags',
action='store_true',
help='by default this script would set the build flags exactly the same as the CI ones. '
'Set this flag to use your local build flags.',
)
parser.add_argument(
'--modified-components',
type=semicolon_separated_str_to_list,
default=os.getenv('MR_MODIFIED_COMPONENTS'),
help='semicolon-separated string which specifies the modified components. '
'app with `depends_components` set in the corresponding manifest files would only be built '
'if depends on any of the specified components. '
'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list',
)
parser.add_argument(
'--modified-files',
type=semicolon_separated_str_to_list,
default=os.getenv('MR_MODIFIED_FILES'),
help='semicolon-separated string which specifies the modified files. '
'app with `depends_filepatterns` set in the corresponding manifest files would only be built '
'if any of the specified file pattern matches any of the specified modified files. '
'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list',
)
parser.add_argument(
'-ic',
'--ignore-app-dependencies-components',
type=semicolon_separated_str_to_list,
help='semicolon-separated string which specifies the modified components used for '
'ignoring checking the app dependencies. '
'The `depends_components` and `depends_filepatterns` set in the manifest files will be ignored '
'when any of the specified components matches any of the modified components. '
'Must be used together with --modified-components. '
'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list',
)
parser.add_argument(
'-if',
'--ignore-app-dependencies-filepatterns',
type=semicolon_separated_str_to_list,
help='semicolon-separated string which specifies the file patterns used for '
'ignoring checking the app dependencies. '
'The `depends_components` and `depends_filepatterns` set in the manifest files will be ignored '
'when any of the specified file patterns matches any of the modified files. '
'Must be used together with --modified-files. '
'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list',
)
parser.add_argument(
'--junitxml',
default='build_summary_@p.xml',
help='Path to the junitxml file. If specified, the junitxml file will be generated',
)
arguments = parser.parse_args()
setup_logging(arguments.verbose)
# set default paths
if not arguments.paths:
arguments.paths = DEFAULT_TEST_PATHS
# skip setting flags in CI
if not arguments.skip_setting_flags and not os.getenv('CI_JOB_ID'):
for _k, _v in CI_ENV_VARS.items():
os.environ[_k] = _v # type: ignore
print(f'env var {_k} set to "{_v}"')
if os.getenv('IS_MR_PIPELINE') == '0' or os.getenv('BUILD_AND_TEST_ALL_APPS') == '1':
print('Build and run all test cases, and compile all cmake apps')
arguments.modified_components = None
arguments.modified_files = None
arguments.ignore_app_dependencies_components = None
arguments.ignore_app_dependencies_filepatterns = None
else:
print(
f'Build and run only test cases matching:\n'
f'- modified components: {arguments.modified_components}\n'
f'- modified files: {arguments.modified_files}'
)
if arguments.modified_components is not None and not arguments.ignore_app_dependencies_components:
# setting default values
arguments.ignore_app_dependencies_components = DEFAULT_FULL_BUILD_TEST_COMPONENTS
if arguments.modified_files is not None and not arguments.ignore_app_dependencies_filepatterns:
# setting default values
arguments.ignore_app_dependencies_filepatterns = DEFAULT_FULL_BUILD_TEST_FILEPATTERNS
main(arguments)
class TestParsingShellScript(unittest.TestCase):
"""
This test case is run in CI jobs to make sure the CI build flags is the same as the ones recorded in CI_ENV_VARS
"""
def test_parse_result(self) -> None:
for k, v in CI_ENV_VARS.items():
self.assertEqual(os.getenv(k), v)

View File

@@ -3,7 +3,7 @@
# internal use only for CI # internal use only for CI
# get latest MR information by source branch # get latest MR information by source branch
# #
# SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2020-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
import argparse import argparse
@@ -17,7 +17,8 @@ from gitlab_api import Gitlab
from idf_ci_utils import IDF_PATH from idf_ci_utils import IDF_PATH
if t.TYPE_CHECKING: if t.TYPE_CHECKING:
from gitlab.v4.objects import ProjectCommit, ProjectMergeRequest from gitlab.v4.objects import ProjectCommit
from gitlab.v4.objects import ProjectMergeRequest
def _get_mr_obj(source_branch: str) -> t.Optional['ProjectMergeRequest']: def _get_mr_obj(source_branch: str) -> t.Optional['ProjectMergeRequest']:
@@ -115,16 +116,6 @@ def get_mr_components(
return list(components) return list(components)
def get_target_in_tags(tags: str) -> str:
from idf_pytest.constants import TARGET_MARKERS
for x in tags.split(','):
if x in TARGET_MARKERS:
return x
raise RuntimeError(f'No target marker found in {tags}')
def _print_list(_list: t.List[str], separator: str = '\n') -> None: def _print_list(_list: t.List[str], separator: str = '\n') -> None:
print(separator.join(_list)) print(separator.join(_list))
@@ -159,7 +150,5 @@ if __name__ == '__main__':
_print_list([commit.id for commit in get_mr_commits(args.src_branch)]) _print_list([commit.id for commit in get_mr_commits(args.src_branch)])
elif args.action == 'components': elif args.action == 'components':
_print_list(get_mr_components(args.src_branch, args.modified_files)) _print_list(get_mr_components(args.src_branch, args.modified_files))
elif args.action == 'target_in_tags':
print(get_target_in_tags(args.tags))
else: else:
raise NotImplementedError('not possible to get here') raise NotImplementedError('not possible to get here')

View File

@@ -1,28 +0,0 @@
# SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import os
import re
import sys
import yaml
if __name__ == '__main__':
description = os.getenv('CI_MERGE_REQUEST_DESCRIPTION', '')
if not description:
sys.exit(0)
pattern = r'^## Dynamic Pipeline Configuration(?:[^`]*?)```(?:\w+)(.*?)```'
result = re.search(pattern, description, re.DOTALL | re.MULTILINE)
if not result:
sys.exit(0)
data = yaml.safe_load(result.group(1))
raise_report_exit_code = False
with open('pipeline.env', 'a+') as f:
if 'Test Case Filters' in data:
raise_report_exit_code = True
test_case_filters = ' or '.join(data.get('Test Case Filters'))
f.write(f'TEST_CASE_FILTERS={test_case_filters}\n')
if raise_report_exit_code:
f.write('REPORT_EXIT_CODE=30\n')

View File

@@ -4,33 +4,8 @@ import os
from idf_ci_utils import IDF_PATH from idf_ci_utils import IDF_PATH
# use relative path to avoid absolute path in pipeline
DEFAULT_TEST_PATHS = [
'examples',
os.path.join('tools', 'test_apps'),
'components',
]
DEFAULT_APPS_BUILD_PER_JOB = 60
DEFAULT_CASES_TEST_PER_JOB = 30
DEFAULT_BUILD_CHILD_PIPELINE_FILEPATH = os.path.join(IDF_PATH, 'build_child_pipeline.yml')
DEFAULT_TARGET_TEST_CHILD_PIPELINE_FILEPATH = os.path.join(IDF_PATH, 'target_test_child_pipeline.yml')
DEFAULT_BUILD_CHILD_PIPELINE_NAME = 'Build Child Pipeline'
DEFAULT_TARGET_TEST_CHILD_PIPELINE_NAME = 'Target Test Child Pipeline'
DEFAULT_TARGET_TEST_JOB_TEMPLATE_NAME = '.dynamic_target_test_template'
TIMEOUT_4H_TEMPLATE_NAME = '.timeout_4h_template'
TEST_RELATED_BUILD_JOB_NAME = 'build_test_related_apps'
NON_TEST_RELATED_BUILD_JOB_NAME = 'build_non_test_related_apps'
COMMENT_START_MARKER = '### Dynamic Pipeline Report' COMMENT_START_MARKER = '### Dynamic Pipeline Report'
TEST_RELATED_APPS_FILENAME = 'test_related_apps.txt'
NON_TEST_RELATED_APPS_FILENAME = 'non_test_related_apps.txt'
TEST_RELATED_APPS_DOWNLOAD_URLS_FILENAME = 'test_related_apps_download_urls.yml'
REPORT_TEMPLATE_FILEPATH = os.path.join( REPORT_TEMPLATE_FILEPATH = os.path.join(
IDF_PATH, 'tools', 'ci', 'dynamic_pipelines', 'templates', 'report.template.html' IDF_PATH, 'tools', 'ci', 'dynamic_pipelines', 'templates', 'report.template.html'
) )
@@ -44,8 +19,6 @@ RETRY_JOB_PICTURE_PATH = 'tools/ci/dynamic_pipelines/templates/retry-jobs.png'
RETRY_JOB_TITLE = '\n\nRetry failed jobs with with help of "retry_failed_jobs" stage of the pipeline:' RETRY_JOB_TITLE = '\n\nRetry failed jobs with with help of "retry_failed_jobs" stage of the pipeline:'
RETRY_JOB_PICTURE_LINK = '![Retry Jobs Image]({pic_url})' RETRY_JOB_PICTURE_LINK = '![Retry Jobs Image]({pic_url})'
BUILD_ONLY_LABEL = 'For Maintainers: Only Build Tests'
KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH = os.path.join( KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH = os.path.join(
IDF_PATH, 'tools', 'ci', 'dynamic_pipelines', 'templates', 'known_generate_test_child_pipeline_warnings.yml' IDF_PATH, 'tools', 'ci', 'dynamic_pipelines', 'templates', 'known_generate_test_child_pipeline_warnings.yml'
) )

View File

@@ -1,131 +1,14 @@
# SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import inspect
import os import os
import typing as t import typing as t
import urllib.parse import urllib.parse
from dataclasses import dataclass from dataclasses import dataclass
from xml.etree.ElementTree import Element from xml.etree.ElementTree import Element
import yaml
from idf_ci_utils import IDF_PATH from idf_ci_utils import IDF_PATH
class Job:
def __init__(
self,
*,
name: str,
extends: t.Optional[t.List[str]] = None,
tags: t.Optional[t.List[str]] = None,
stage: t.Optional[str] = None,
parallel: int = 1,
variables: t.Optional[t.Dict[str, str]] = None,
script: t.Optional[t.List[str]] = None,
before_script: t.Optional[t.List[str]] = None,
after_script: t.Optional[t.List[str]] = None,
needs: t.Optional[t.List[str]] = None,
**kwargs: t.Any,
) -> None:
self.name = name
self.extends = extends
self.tags = tags
self.stage = stage
self.parallel = parallel
self.variables = variables or {}
self.script = script
self.before_script = before_script
self.after_script = after_script
self.needs = needs
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self) -> str:
return yaml.dump(self.to_dict()) # type: ignore
def set_variable(self, key: str, value: str) -> None:
self.variables[key] = value
def to_dict(self) -> t.Dict[str, t.Any]:
res = {}
for k, v in inspect.getmembers(self):
if k.startswith('_'):
continue
# name is the dict key
if k == 'name':
continue
# parallel 1 is not allowed
if k == 'parallel' and v == 1:
continue
if v is None:
continue
if inspect.ismethod(v) or inspect.isfunction(v):
continue
res[k] = v
return {self.name: res}
class EmptyJob(Job):
def __init__(
self,
*,
name: t.Optional[str] = None,
tags: t.Optional[t.List[str]] = None,
stage: t.Optional[str] = None,
before_script: t.Optional[t.List[str]] = None,
after_script: t.Optional[t.List[str]] = None,
**kwargs: t.Any,
) -> None:
super().__init__(
name=name or 'fake_pass_job',
tags=tags or ['fast_run', 'shiny'],
stage=stage or 'build',
script=['echo "This is a fake job to pass the pipeline"'],
before_script=before_script or [],
after_script=after_script or [],
**kwargs,
)
class BuildJob(Job):
def __init__(
self,
*,
extends: t.Optional[t.List[str]] = None,
tags: t.Optional[t.List[str]] = None,
stage: t.Optional[str] = None,
**kwargs: t.Any,
) -> None:
super().__init__(
extends=extends or ['.dynamic_build_template'],
tags=tags or ['build', 'shiny'],
stage=stage or 'build',
**kwargs,
)
class TargetTestJob(Job):
def __init__(
self,
*,
extends: t.Optional[t.List[str]] = None,
stage: t.Optional[str] = None,
**kwargs: t.Any,
) -> None:
super().__init__(
extends=extends or ['.dynamic_target_test_template'],
stage=stage or 'target_test',
**kwargs,
)
@dataclass @dataclass
class TestCase: class TestCase:
name: str name: str

View File

@@ -9,14 +9,11 @@ import re
import typing as t import typing as t
from textwrap import dedent from textwrap import dedent
import yaml
from artifacts_handler import ArtifactType
from gitlab import GitlabUpdateError from gitlab import GitlabUpdateError
from gitlab_api import Gitlab from gitlab_api import Gitlab
from idf_build_apps import App
from idf_build_apps.constants import BuildStatus from idf_build_apps.constants import BuildStatus
from idf_ci_local.app import AppWithMetricsInfo from idf_ci_local.app import AppWithMetricsInfo
from idf_ci_local.uploader import AppUploader from idf_ci_utils import idf_relpath
from prettytable import PrettyTable from prettytable import PrettyTable
from .constants import BINARY_SIZE_METRIC_NAME from .constants import BINARY_SIZE_METRIC_NAME
@@ -29,7 +26,6 @@ from .constants import RETRY_JOB_PICTURE_LINK
from .constants import RETRY_JOB_PICTURE_PATH from .constants import RETRY_JOB_PICTURE_PATH
from .constants import RETRY_JOB_TITLE from .constants import RETRY_JOB_TITLE
from .constants import SIZE_DIFFERENCE_BYTES_THRESHOLD from .constants import SIZE_DIFFERENCE_BYTES_THRESHOLD
from .constants import TEST_RELATED_APPS_DOWNLOAD_URLS_FILENAME
from .constants import TOP_N_APPS_BY_SIZE_DIFF from .constants import TOP_N_APPS_BY_SIZE_DIFF
from .models import GitlabJob from .models import GitlabJob
from .models import TestCase from .models import TestCase
@@ -45,7 +41,17 @@ from .utils import load_known_failure_cases
class ReportGenerator: class ReportGenerator:
REGEX_PATTERN = r'#### {}\n[\s\S]*?(?=\n#### |$)' REGEX_PATTERN = r'#### {}\n[\s\S]*?(?=\n#### |$)'
def __init__(self, project_id: int, mr_iid: int, pipeline_id: int, job_id: int, commit_id: str, *, title: str): def __init__(
self,
project_id: int,
mr_iid: int,
pipeline_id: int,
job_id: int,
commit_id: str,
local_commit_id: str,
*,
title: str,
):
gl_project = Gitlab(project_id).project gl_project = Gitlab(project_id).project
if mr_iid is not None: if mr_iid is not None:
self.mr = gl_project.mergerequests.get(mr_iid) self.mr = gl_project.mergerequests.get(mr_iid)
@@ -54,6 +60,7 @@ class ReportGenerator:
self.pipeline_id = pipeline_id self.pipeline_id = pipeline_id
self.job_id = job_id self.job_id = job_id
self.commit_id = commit_id self.commit_id = commit_id
self.local_commit_id = local_commit_id
self.title = title self.title = title
self.output_filepath = self.title.lower().replace(' ', '_') + '.html' self.output_filepath = self.title.lower().replace(' ', '_') + '.html'
@@ -61,11 +68,7 @@ class ReportGenerator:
@property @property
def get_commit_summary(self) -> str: def get_commit_summary(self) -> str:
"""Return a formatted commit summary string.""" return f'with CI commit SHA: {self.commit_id[:8]}, local commit SHA: {self.local_commit_id[:8]}'
return (
f'with CI commit SHA: {self.commit_id[:8]}, '
f'local commit SHA: {os.getenv("CI_MERGE_REQUEST_SOURCE_BRANCH_SHA", "")[:8]}'
)
@staticmethod @staticmethod
def get_download_link_for_url(url: str) -> str: def get_download_link_for_url(url: str) -> str:
@@ -89,7 +92,7 @@ class ReportGenerator:
with open(output_filepath, 'w') as file: with open(output_filepath, 'w') as file:
file.write(report_str) file.write(report_str)
# for example, {URL}/-/esp-idf/-/jobs/{id}/artifacts/list_job_84.txt # for example, {URL}/-/esp-idf/-/jobs/{id}/artifacts/app_info_84.txt
# CI_PAGES_URL is {URL}/esp-idf, which missed one `-` # CI_PAGES_URL is {URL}/esp-idf, which missed one `-`
report_url: str = get_artifacts_url(job_id, output_filepath) report_url: str = get_artifacts_url(job_id, output_filepath)
return report_url return report_url
@@ -344,14 +347,13 @@ class BuildReportGenerator(ReportGenerator):
pipeline_id: int, pipeline_id: int,
job_id: int, job_id: int,
commit_id: str, commit_id: str,
local_commit_id: str,
*, *,
title: str = 'Build Report', title: str = 'Build Report',
apps: t.List[App], apps: t.List[AppWithMetricsInfo],
): ) -> None:
super().__init__(project_id, mr_iid, pipeline_id, job_id, commit_id, title=title) super().__init__(project_id, mr_iid, pipeline_id, job_id, commit_id, local_commit_id, title=title)
self.apps = apps self.apps = apps
self._uploader = AppUploader(self.pipeline_id)
self.apps_presigned_url_filepath = TEST_RELATED_APPS_DOWNLOAD_URLS_FILENAME
self.report_titles_map = { self.report_titles_map = {
'failed_apps': 'Failed Apps', 'failed_apps': 'Failed Apps',
'built_test_related_apps': 'Built Apps - Test Related', 'built_test_related_apps': 'Built Apps - Test Related',
@@ -363,7 +365,6 @@ class BuildReportGenerator(ReportGenerator):
self.failed_apps_report_file = 'failed_apps.html' self.failed_apps_report_file = 'failed_apps.html'
self.built_apps_report_file = 'built_apps.html' self.built_apps_report_file = 'built_apps.html'
self.skipped_apps_report_file = 'skipped_apps.html' self.skipped_apps_report_file = 'skipped_apps.html'
self.app_presigned_urls_dict: t.Dict[str, t.Dict[str, str]] = {}
@staticmethod @staticmethod
def custom_sort(item: AppWithMetricsInfo) -> t.Tuple[int, t.Any]: def custom_sort(item: AppWithMetricsInfo) -> t.Tuple[int, t.Any]:
@@ -461,19 +462,13 @@ class BuildReportGenerator(ReportGenerator):
sections = [] sections = []
if new_test_related_apps: if new_test_related_apps:
for app in new_test_related_apps:
for artifact_type in [ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES, ArtifactType.MAP_AND_ELF_FILES]:
url = self._uploader.get_app_presigned_url(app, artifact_type)
self.app_presigned_urls_dict.setdefault(app.build_path, {})[artifact_type.value] = url
new_test_related_apps_table_section = self.create_table_section( new_test_related_apps_table_section = self.create_table_section(
title=self.report_titles_map['new_test_related_apps'], title=self.report_titles_map['new_test_related_apps'],
items=new_test_related_apps, items=new_test_related_apps,
headers=[ headers=[
'App Dir', 'App Dir',
'Build Dir', 'Build Dir',
'Bin Files with Build Log (without map and elf)', 'Download Command',
'Map and Elf Files',
'Your Branch App Size', 'Your Branch App Size',
], ],
row_attrs=[ row_attrs=[
@@ -481,31 +476,17 @@ class BuildReportGenerator(ReportGenerator):
'build_dir', 'build_dir',
], ],
value_functions=[ value_functions=[
('Your Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)), ('Your Branch App Size', lambda _app: str(_app.metrics[BINARY_SIZE_METRIC_NAME].source_value)),
( (
'Bin Files with Build Log (without map and elf)', 'Download Command',
lambda app: self.get_download_link_for_url( lambda _app: f'idf-ci gitlab download-artifacts --pipeline-id {self.pipeline_id} '
self.app_presigned_urls_dict[app.build_path][ f'{idf_relpath(_app.build_path)}',
ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES.value
]
),
),
(
'Map and Elf Files',
lambda app: self.get_download_link_for_url(
self.app_presigned_urls_dict[app.build_path][ArtifactType.MAP_AND_ELF_FILES.value]
),
), ),
], ],
) )
sections.extend(new_test_related_apps_table_section) sections.extend(new_test_related_apps_table_section)
if built_test_related_apps: if built_test_related_apps:
for app in built_test_related_apps:
for artifact_type in [ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES, ArtifactType.MAP_AND_ELF_FILES]:
url = self._uploader.get_app_presigned_url(app, artifact_type)
self.app_presigned_urls_dict.setdefault(app.build_path, {})[artifact_type.value] = url
built_test_related_apps = self._sort_items( built_test_related_apps = self._sort_items(
built_test_related_apps, built_test_related_apps,
key='metrics.binary_size.difference_percentage', key='metrics.binary_size.difference_percentage',
@@ -519,8 +500,7 @@ class BuildReportGenerator(ReportGenerator):
headers=[ headers=[
'App Dir', 'App Dir',
'Build Dir', 'Build Dir',
'Bin Files with Build Log (without map and elf)', 'Download Command',
'Map and Elf Files',
'Your Branch App Size', 'Your Branch App Size',
'Target Branch App Size', 'Target Branch App Size',
'Size Diff', 'Size Diff',
@@ -536,18 +516,9 @@ class BuildReportGenerator(ReportGenerator):
('Size Diff', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference)), ('Size Diff', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference)),
('Size Diff, %', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)), ('Size Diff, %', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)),
( (
'Bin Files with Build Log (without map and elf)', 'Download Command',
lambda app: self.get_download_link_for_url( lambda _app: f'idf-ci gitlab download-artifacts --pipeline-id {self.pipeline_id} '
self.app_presigned_urls_dict[app.build_path][ f'{idf_relpath(_app.build_path)}',
ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES.value
]
),
),
(
'Map and Elf Files',
lambda app: self.get_download_link_for_url(
self.app_presigned_urls_dict[app.build_path][ArtifactType.MAP_AND_ELF_FILES.value]
),
), ),
], ],
) )
@@ -560,7 +531,7 @@ class BuildReportGenerator(ReportGenerator):
headers=[ headers=[
'App Dir', 'App Dir',
'Build Dir', 'Build Dir',
'Build Log', 'Download Command',
'Your Branch App Size', 'Your Branch App Size',
], ],
row_attrs=[ row_attrs=[
@@ -568,13 +539,12 @@ class BuildReportGenerator(ReportGenerator):
'build_dir', 'build_dir',
], ],
value_functions=[ value_functions=[
('Your Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)),
( (
'Build Log', 'Download Command',
lambda app: self.get_download_link_for_url( lambda _app: f'idf-ci gitlab download-artifacts --pipeline-id {self.pipeline_id} '
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS) f'{idf_relpath(_app.build_path)}',
),
), ),
('Your Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)),
], ],
) )
sections.extend(new_non_test_related_apps_table_section) sections.extend(new_non_test_related_apps_table_section)
@@ -592,7 +562,7 @@ class BuildReportGenerator(ReportGenerator):
headers=[ headers=[
'App Dir', 'App Dir',
'Build Dir', 'Build Dir',
'Build Log', 'Download Command',
'Your Branch App Size', 'Your Branch App Size',
'Target Branch App Size', 'Target Branch App Size',
'Size Diff', 'Size Diff',
@@ -608,10 +578,9 @@ class BuildReportGenerator(ReportGenerator):
('Size Diff', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference)), ('Size Diff', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference)),
('Size Diff, %', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)), ('Size Diff, %', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)),
( (
'Build Log', 'Download Command',
lambda app: self.get_download_link_for_url( lambda _app: f'idf-ci gitlab download-artifacts --pipeline-id {self.pipeline_id} '
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS) f'{idf_relpath(_app.build_path)}',
),
), ),
], ],
) )
@@ -646,11 +615,6 @@ class BuildReportGenerator(ReportGenerator):
self.additional_info += self._generate_top_n_apps_by_size_table() self.additional_info += self._generate_top_n_apps_by_size_table()
# also generate a yaml file that includes the apps and the presigned urls
# for helping debugging locally
with open(self.apps_presigned_url_filepath, 'w') as fw:
yaml.dump(self.app_presigned_urls_dict, fw)
return sections return sections
def get_failed_apps_report_parts(self) -> t.List[str]: def get_failed_apps_report_parts(self) -> t.List[str]:
@@ -661,14 +625,13 @@ class BuildReportGenerator(ReportGenerator):
failed_apps_table_section = self.create_table_section( failed_apps_table_section = self.create_table_section(
title=self.report_titles_map['failed_apps'], title=self.report_titles_map['failed_apps'],
items=failed_apps, items=failed_apps,
headers=['App Dir', 'Build Dir', 'Failed Reason', 'Build Log'], headers=['App Dir', 'Build Dir', 'Failed Reason', 'Download Command'],
row_attrs=['app_dir', 'build_dir', 'build_comment'], row_attrs=['app_dir', 'build_dir', 'build_comment'],
value_functions=[ value_functions=[
( (
'Build Log', 'Download Command',
lambda app: self.get_download_link_for_url( lambda _app: f'idf-ci gitlab download-artifacts --pipeline-id {self.pipeline_id} '
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS) f'{idf_relpath(_app.build_path)}',
),
), ),
], ],
) )
@@ -690,16 +653,8 @@ class BuildReportGenerator(ReportGenerator):
skipped_apps_table_section = self.create_table_section( skipped_apps_table_section = self.create_table_section(
title=self.report_titles_map['skipped_apps'], title=self.report_titles_map['skipped_apps'],
items=skipped_apps, items=skipped_apps,
headers=['App Dir', 'Build Dir', 'Skipped Reason', 'Build Log'], headers=['App Dir', 'Build Dir', 'Skipped Reason'],
row_attrs=['app_dir', 'build_dir', 'build_comment'], row_attrs=['app_dir', 'build_dir', 'build_comment'],
value_functions=[
(
'Build Log',
lambda app: self.get_download_link_for_url(
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS)
),
),
],
) )
skipped_apps_report_url = self.write_report_to_file( skipped_apps_report_url = self.write_report_to_file(
self.generate_html_report(''.join(skipped_apps_table_section)), self.generate_html_report(''.join(skipped_apps_table_section)),
@@ -734,11 +689,12 @@ class TargetTestReportGenerator(ReportGenerator):
pipeline_id: int, pipeline_id: int,
job_id: int, job_id: int,
commit_id: str, commit_id: str,
local_commit_id: str,
*, *,
title: str = 'Target Test Report', title: str = 'Target Test Report',
test_cases: t.List[TestCase], test_cases: t.List[TestCase],
): ) -> None:
super().__init__(project_id, mr_iid, pipeline_id, job_id, commit_id, title=title) super().__init__(project_id, mr_iid, pipeline_id, job_id, commit_id, local_commit_id, title=title)
self.test_cases = test_cases self.test_cases = test_cases
self._known_failure_cases_set = None self._known_failure_cases_set = None
@@ -975,11 +931,12 @@ class JobReportGenerator(ReportGenerator):
pipeline_id: int, pipeline_id: int,
job_id: int, job_id: int,
commit_id: str, commit_id: str,
local_commit_id: str,
*, *,
title: str = 'Job Report', title: str = 'Job Report',
jobs: t.List[GitlabJob], jobs: t.List[GitlabJob],
): ):
super().__init__(project_id, mr_iid, pipeline_id, job_id, commit_id, title=title) super().__init__(project_id, mr_iid, pipeline_id, job_id, commit_id, local_commit_id, title=title)
self.jobs = jobs self.jobs = jobs
self.report_titles_map = { self.report_titles_map = {
'failed_jobs': 'Failed Jobs (Excludes "integration_test" and "target_test" jobs)', 'failed_jobs': 'Failed Jobs (Excludes "integration_test" and "target_test" jobs)',

View File

@@ -1,75 +0,0 @@
# SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import argparse
import sys
import __init__ # noqa: F401 # inject the system path
from idf_build_apps import build_apps
from idf_build_apps import setup_logging
from idf_build_apps.utils import semicolon_separated_str_to_list
from idf_ci_local.app import import_apps_from_txt
from idf_pytest.constants import DEFAULT_IGNORE_WARNING_FILEPATH
from dynamic_pipelines.constants import TEST_RELATED_APPS_FILENAME
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build Apps for Dynamic Pipeline')
parser.add_argument('app_list_file', default=TEST_RELATED_APPS_FILENAME, help='List of apps to build')
parser.add_argument(
'--build-verbose',
action='store_true',
help='Enable verbose output from build system.',
)
parser.add_argument('--parallel-count', default=1, type=int, help='Number of parallel build jobs.')
parser.add_argument(
'--parallel-index',
default=1,
type=int,
help='Index (1-based) of the job, out of the number specified by --parallel-count.',
)
parser.add_argument(
'--ignore-warning-file',
default=DEFAULT_IGNORE_WARNING_FILEPATH,
type=argparse.FileType('r'),
help='Ignore the warning strings in the specified file. Each line should be a regex string.',
)
parser.add_argument(
'--modified-components',
type=semicolon_separated_str_to_list,
help='semicolon-separated string which specifies the modified components. '
'app with `depends_components` set in the corresponding manifest files would only be built '
'if depends on any of the specified components. '
'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list',
)
parser.add_argument(
'--collect-app-info',
default='list_job_@p.txt',
help='If specified, the test case name and app info json will be written to this file',
)
parser.add_argument(
'--junitxml',
default='build_summary_@p.xml',
help='Path to the junitxml file. If specified, the junitxml file will be generated',
)
args = parser.parse_args()
setup_logging(verbose=1)
sys.exit(
build_apps(
import_apps_from_txt(args.app_list_file),
build_verbose=args.build_verbose,
keep_going=True,
ignore_warning_file=args.ignore_warning_file,
modified_components=args.modified_components,
check_app_dependencies=True,
parallel_count=args.parallel_count,
parallel_index=args.parallel_index,
collect_size_info='size_info.txt',
collect_app_info=args.collect_app_info,
junitxml=args.junitxml,
copy_sdkconfig=True,
)
)

View File

@@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
"""This file is used for generating the child pipeline for build jobs.""" """This file is used for generating the child pipeline for build jobs."""
@@ -8,26 +8,13 @@ import typing as t
import __init__ # noqa: F401 # inject the system path import __init__ # noqa: F401 # inject the system path
import yaml import yaml
from idf_build_apps.manifest import FolderRule
from idf_build_apps.utils import semicolon_separated_str_to_list from idf_build_apps.utils import semicolon_separated_str_to_list
from idf_ci_local.app import dump_apps_to_txt from idf_ci.idf_gitlab import build_child_pipeline
from idf_ci_utils import IDF_PATH from idf_ci_utils import IDF_PATH
from idf_pytest.constants import DEFAULT_CONFIG_RULES_STR
from idf_pytest.constants import DEFAULT_FULL_BUILD_TEST_COMPONENTS
from idf_pytest.constants import DEFAULT_FULL_BUILD_TEST_FILEPATTERNS
from idf_pytest.constants import CollectMode
from idf_pytest.script import get_all_apps
from dynamic_pipelines.constants import DEFAULT_APPS_BUILD_PER_JOB BUILD_CHILD_PIPELINE_FILEPATH = os.path.join(IDF_PATH, 'build_child_pipeline.yml')
from dynamic_pipelines.constants import DEFAULT_BUILD_CHILD_PIPELINE_FILEPATH TEST_PATHS = ['examples', os.path.join('tools', 'test_apps'), 'components']
from dynamic_pipelines.constants import DEFAULT_BUILD_CHILD_PIPELINE_NAME
from dynamic_pipelines.constants import DEFAULT_TEST_PATHS
from dynamic_pipelines.constants import NON_TEST_RELATED_APPS_FILENAME
from dynamic_pipelines.constants import NON_TEST_RELATED_BUILD_JOB_NAME
from dynamic_pipelines.constants import TEST_RELATED_APPS_FILENAME
from dynamic_pipelines.constants import TEST_RELATED_BUILD_JOB_NAME
from dynamic_pipelines.models import BuildJob
from dynamic_pipelines.models import EmptyJob
from dynamic_pipelines.utils import dump_jobs_to_yaml
def _separate_str_to_list(s: str) -> t.List[str]: def _separate_str_to_list(s: str) -> t.List[str]:
@@ -56,90 +43,15 @@ def main(arguments: argparse.Namespace) -> None:
if configs: if configs:
extra_default_build_targets = configs.get('extra_default_build_targets') or [] extra_default_build_targets = configs.get('extra_default_build_targets') or []
build_jobs = [] if extra_default_build_targets:
########################################### FolderRule.DEFAULT_BUILD_TARGETS.extend(extra_default_build_targets)
# special case with -k, ignore other args #
########################################### build_child_pipeline(
if arguments.filter_expr: paths=args.paths,
# build only test related apps modified_files=args.modified_files,
test_related_apps, _ = get_all_apps( compare_manifest_sha_filepath=args.compare_manifest_sha_filepath,
arguments.paths, yaml_output=args.yaml_output,
target=CollectMode.ALL,
config_rules_str=DEFAULT_CONFIG_RULES_STR,
filter_expr=arguments.filter_expr,
marker_expr='not host_test',
extra_default_build_targets=extra_default_build_targets,
) )
dump_apps_to_txt(sorted(test_related_apps), TEST_RELATED_APPS_FILENAME)
print(f'Generate test related apps file {TEST_RELATED_APPS_FILENAME} with {len(test_related_apps)} apps')
test_apps_build_job = BuildJob(
name=TEST_RELATED_BUILD_JOB_NAME,
parallel=len(test_related_apps) // DEFAULT_APPS_BUILD_PER_JOB + 1,
variables={
'APP_LIST_FILE': TEST_RELATED_APPS_FILENAME,
},
)
build_jobs.append(test_apps_build_job)
else:
#############
# all cases #
#############
test_related_apps, non_test_related_apps = get_all_apps(
arguments.paths,
CollectMode.ALL,
marker_expr='not host_test',
config_rules_str=DEFAULT_CONFIG_RULES_STR,
extra_default_build_targets=extra_default_build_targets,
compare_manifest_sha_filepath=arguments.compare_manifest_sha_filepath,
modified_components=arguments.modified_components,
modified_files=arguments.modified_files,
ignore_app_dependencies_components=arguments.ignore_app_dependencies_components,
ignore_app_dependencies_filepatterns=arguments.ignore_app_dependencies_filepatterns,
)
dump_apps_to_txt(sorted(test_related_apps), TEST_RELATED_APPS_FILENAME)
print(f'Generate test related apps file {TEST_RELATED_APPS_FILENAME} with {len(test_related_apps)} apps')
dump_apps_to_txt(sorted(non_test_related_apps), NON_TEST_RELATED_APPS_FILENAME)
print(
f'Generate non-test related apps file {NON_TEST_RELATED_APPS_FILENAME} '
f'with {len(non_test_related_apps)} apps'
)
if test_related_apps:
test_apps_build_job = BuildJob(
name=TEST_RELATED_BUILD_JOB_NAME,
parallel=len(test_related_apps) // DEFAULT_APPS_BUILD_PER_JOB + 1,
variables={
'APP_LIST_FILE': TEST_RELATED_APPS_FILENAME,
},
)
build_jobs.append(test_apps_build_job)
if non_test_related_apps:
non_test_apps_build_job = BuildJob(
name=NON_TEST_RELATED_BUILD_JOB_NAME,
parallel=len(non_test_related_apps) // DEFAULT_APPS_BUILD_PER_JOB + 1,
variables={
'APP_LIST_FILE': NON_TEST_RELATED_APPS_FILENAME,
},
)
build_jobs.append(non_test_apps_build_job)
if mr_labels := os.getenv('CI_MERGE_REQUEST_LABELS'):
print(f'MR labels: {mr_labels}')
# check if there's no jobs
if not build_jobs:
print('No apps need to be built. Create one empty job instead')
build_jobs.append(EmptyJob())
extra_include_yml = []
else:
extra_include_yml = ['tools/ci/dynamic_pipelines/templates/test_child_pipeline.yml']
dump_jobs_to_yaml(build_jobs, arguments.yaml_output, DEFAULT_BUILD_CHILD_PIPELINE_NAME, extra_include_yml)
print(f'Generate child pipeline yaml file {arguments.yaml_output} with {sum(j.parallel for j in build_jobs)} jobs')
if __name__ == '__main__': if __name__ == '__main__':
@@ -150,22 +62,17 @@ if __name__ == '__main__':
parser.add_argument( parser.add_argument(
'-o', '-o',
'--yaml-output', '--yaml-output',
default=DEFAULT_BUILD_CHILD_PIPELINE_FILEPATH, default=BUILD_CHILD_PIPELINE_FILEPATH,
help='Output YAML path', help='Output YAML path',
) )
# use relative path to avoid absolute path in pipeline
parser.add_argument( parser.add_argument(
'-p', '-p',
'--paths', '--paths',
nargs='+', nargs='+',
default=DEFAULT_TEST_PATHS, default=TEST_PATHS,
help='Paths to the apps to build.', help='Paths to the apps to build.',
) )
parser.add_argument(
'-k',
'--filter-expr',
help='only build tests matching given filter expression. For example: -k "test_hello_world". Works only'
'for pytest',
)
parser.add_argument( parser.add_argument(
'--default-build-test-rules', '--default-build-test-rules',
default=os.path.join(IDF_PATH, '.gitlab', 'ci', 'default-build-test-rules.yml'), default=os.path.join(IDF_PATH, '.gitlab', 'ci', 'default-build-test-rules.yml'),
@@ -176,16 +83,6 @@ if __name__ == '__main__':
default=os.path.join(IDF_PATH, '.manifest_sha'), default=os.path.join(IDF_PATH, '.manifest_sha'),
help='Path to the recorded manifest sha file generated by `idf-build-apps dump-manifest-sha`', help='Path to the recorded manifest sha file generated by `idf-build-apps dump-manifest-sha`',
) )
parser.add_argument(
'--modified-components',
type=_separate_str_to_list,
default=os.getenv('MR_MODIFIED_COMPONENTS'),
help='semicolon-separated string which specifies the modified components. '
'app with `depends_components` set in the corresponding manifest files would only be built '
'if depends on any of the specified components. '
'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list',
)
parser.add_argument( parser.add_argument(
'--modified-files', '--modified-files',
type=_separate_str_to_list, type=_separate_str_to_list,
@@ -196,65 +93,7 @@ if __name__ == '__main__':
'If set to "", the value would be considered as None. ' 'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list', 'If set to ";", the value would be considered as an empty list',
) )
parser.add_argument(
'-ic',
'--ignore-app-dependencies-components',
type=_separate_str_to_list,
help='semicolon-separated string which specifies the modified components used for '
'ignoring checking the app dependencies. '
'The `depends_components` and `depends_filepatterns` set in the manifest files will be ignored '
'when any of the specified components matches any of the modified components. '
'Must be used together with --modified-components. '
'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list',
)
parser.add_argument(
'-if',
'--ignore-app-dependencies-filepatterns',
type=_separate_str_to_list,
help='semicolon-separated string which specifies the file patterns used for '
'ignoring checking the app dependencies. '
'The `depends_components` and `depends_filepatterns` set in the manifest files will be ignored '
'when any of the specified file patterns matches any of the modified files. '
'Must be used together with --modified-files. '
'If set to "", the value would be considered as None. '
'If set to ";", the value would be considered as an empty list',
)
args = parser.parse_args() args = parser.parse_args()
if test_case_filters := os.getenv('TEST_CASE_FILTERS', None):
args.filter_expr = test_case_filters
if os.getenv('IS_MR_PIPELINE') == '0' or os.getenv('BUILD_AND_TEST_ALL_APPS') == '1':
print('Build and run all test cases, and compile all cmake apps')
args.modified_components = None
args.modified_files = None
args.ignore_app_dependencies_components = None
args.ignore_app_dependencies_filepatterns = None
elif args.filter_expr is not None:
print('Build and run only test cases matching "%s"' % args.filter_expr)
args.modified_components = None
args.modified_files = None
args.ignore_app_dependencies_components = None
args.ignore_app_dependencies_filepatterns = None
else:
print(
f'Build and run only test cases matching:\n'
f'- modified components: {args.modified_components}\n'
f'- modified files: {args.modified_files}'
)
if args.modified_components is not None and not args.ignore_app_dependencies_components:
# setting default values
args.ignore_app_dependencies_components = DEFAULT_FULL_BUILD_TEST_COMPONENTS
if args.modified_files is not None and not args.ignore_app_dependencies_filepatterns:
# setting default values
args.ignore_app_dependencies_filepatterns = DEFAULT_FULL_BUILD_TEST_FILEPATTERNS
if not os.path.isfile(args.compare_manifest_sha_filepath):
# ignore if the file does not exist
args.compare_manifest_sha_filepath = None
main(args) main(args)

View File

@@ -3,11 +3,13 @@
import argparse import argparse
import glob import glob
import os import os
import subprocess
import typing as t import typing as t
import __init__ # noqa: F401 # inject the system path import __init__ # noqa: F401 # inject the system path
from idf_build_apps import json_list_files_to_apps
from idf_ci import GitlabEnvVars
from idf_ci_local.app import enrich_apps_with_metrics_info from idf_ci_local.app import enrich_apps_with_metrics_info
from idf_ci_local.app import import_apps_from_txt
from dynamic_pipelines.report import BuildReportGenerator from dynamic_pipelines.report import BuildReportGenerator
from dynamic_pipelines.report import JobReportGenerator from dynamic_pipelines.report import JobReportGenerator
@@ -60,12 +62,13 @@ def common_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--mr-iid', type=int, default=os.getenv('CI_MERGE_REQUEST_IID'), help='Merge Request IID') parser.add_argument('--mr-iid', type=int, default=os.getenv('CI_MERGE_REQUEST_IID'), help='Merge Request IID')
parser.add_argument('--pipeline-id', type=int, default=os.getenv('PARENT_PIPELINE_ID'), help='Pipeline ID') parser.add_argument('--pipeline-id', type=int, default=os.getenv('PARENT_PIPELINE_ID'), help='Pipeline ID')
parser.add_argument('--job-id', type=int, default=os.getenv('CI_JOB_ID'), help='Job ID') parser.add_argument('--job-id', type=int, default=os.getenv('CI_JOB_ID'), help='Job ID')
parser.add_argument('--commit-id', default=os.getenv('CI_COMMIT_SHA'), help='MR commit ID') parser.add_argument('--commit-id', default=os.getenv('CI_COMMIT_SHA', ''), help='MR merged result commit ID')
parser.add_argument('--local-commit-id', default=os.getenv('PIPELINE_COMMIT_SHA', ''), help='local dev commit ID')
def conditional_arguments(report_type_args: argparse.Namespace, parser: argparse.ArgumentParser) -> None: def conditional_arguments(report_type_args: argparse.Namespace, parser: argparse.ArgumentParser) -> None:
if report_type_args.report_type == 'build': if report_type_args.report_type == 'build':
parser.add_argument('--app-list-filepattern', default='list_job_*.txt', help='Pattern to match app list files') parser.add_argument('--app-list-filepattern', default='app_info*.txt', help='Pattern to match app list files')
elif report_type_args.report_type == 'target_test': elif report_type_args.report_type == 'target_test':
parser.add_argument( parser.add_argument(
'--junit-report-filepattern', default='XUNIT_RESULT*.xml', help='Pattern to match JUnit report files' '--junit-report-filepattern', default='XUNIT_RESULT*.xml', help='Pattern to match JUnit report files'
@@ -73,16 +76,30 @@ def conditional_arguments(report_type_args: argparse.Namespace, parser: argparse
def generate_build_report(args: argparse.Namespace) -> None: def generate_build_report(args: argparse.Namespace) -> None:
apps: t.List[t.Any] = [ # generate presigned url for the artifacts
app for file_name in glob.glob(args.app_list_filepattern) for app in import_apps_from_txt(file_name) subprocess.check_output(
] [
'idf-ci',
'gitlab',
'generate-presigned-json',
'--commit-sha',
args.local_commit_id,
'--output',
'presigned.json',
],
)
print('generated presigned.json')
# generate report
apps = json_list_files_to_apps(glob.glob(args.app_list_filepattern))
print(f'loaded {len(apps)} apps')
app_metrics = fetch_app_metrics( app_metrics = fetch_app_metrics(
source_commit_sha=os.environ.get('CI_COMMIT_SHA'), source_commit_sha=args.commit_id,
target_commit_sha=os.environ.get('CI_MERGE_REQUEST_TARGET_BRANCH_SHA'), target_commit_sha=os.environ.get('CI_MERGE_REQUEST_TARGET_BRANCH_SHA'),
) )
apps = enrich_apps_with_metrics_info(app_metrics, apps) apps = enrich_apps_with_metrics_info(app_metrics, apps)
report_generator = BuildReportGenerator( report_generator = BuildReportGenerator(
args.project_id, args.mr_iid, args.pipeline_id, args.job_id, args.commit_id, apps=apps args.project_id, args.mr_iid, args.pipeline_id, args.job_id, args.commit_id, args.local_commit_id, apps=apps
) )
report_generator.post_report() report_generator.post_report()
@@ -90,10 +107,20 @@ def generate_build_report(args: argparse.Namespace) -> None:
def generate_target_test_report(args: argparse.Namespace) -> None: def generate_target_test_report(args: argparse.Namespace) -> None:
test_cases: t.List[t.Any] = parse_testcases_from_filepattern(args.junit_report_filepattern) test_cases: t.List[t.Any] = parse_testcases_from_filepattern(args.junit_report_filepattern)
report_generator = TargetTestReportGenerator( report_generator = TargetTestReportGenerator(
args.project_id, args.mr_iid, args.pipeline_id, args.job_id, args.commit_id, test_cases=test_cases args.project_id,
args.mr_iid,
args.pipeline_id,
args.job_id,
args.commit_id,
args.local_commit_id,
test_cases=test_cases,
) )
report_generator.post_report() report_generator.post_report()
if GitlabEnvVars().IDF_CI_IS_DEBUG_PIPELINE:
print('Debug pipeline detected, exit non-zero to fail the pipeline in order to block merge')
exit(30)
def generate_jobs_report(args: argparse.Namespace) -> None: def generate_jobs_report(args: argparse.Namespace) -> None:
jobs: t.List[t.Any] = fetch_failed_jobs(args.commit_id) jobs: t.List[t.Any] = fetch_failed_jobs(args.commit_id)
@@ -102,7 +129,7 @@ def generate_jobs_report(args: argparse.Namespace) -> None:
return return
report_generator = JobReportGenerator( report_generator = JobReportGenerator(
args.project_id, args.mr_iid, args.pipeline_id, args.job_id, args.commit_id, jobs=jobs args.project_id, args.mr_iid, args.pipeline_id, args.job_id, args.commit_id, args.local_commit_id, jobs=jobs
) )
report_generator.post_report(print_retry_jobs_message=any(job.is_failed for job in jobs)) report_generator.post_report(print_retry_jobs_message=any(job.is_failed for job in jobs))

View File

@@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
"""This file is used for generating the child pipeline for target test jobs. """This file is used for generating the child pipeline for target test jobs.
@@ -8,120 +8,39 @@
""" """
import argparse import argparse
import glob
import os import os
import typing as t import typing as t
from collections import Counter
from collections import defaultdict
import __init__ # noqa: F401 # inject the system path import __init__ # noqa: F401 # inject the system path
import yaml import yaml
from idf_build_apps import App from idf_ci import get_pytest_cases
from idf_ci_local.app import import_apps_from_txt from idf_ci.idf_gitlab import test_child_pipeline
from idf_ci.idf_pytest import GroupedPytestCases
from idf_ci.idf_pytest.models import GroupKey
from idf_ci_utils import IDF_PATH
from idf_pytest.constants import TIMEOUT_4H_MARKERS from idf_pytest.constants import TIMEOUT_4H_MARKERS
from idf_pytest.script import get_pytest_cases
from dynamic_pipelines.constants import BUILD_ONLY_LABEL
from dynamic_pipelines.constants import DEFAULT_CASES_TEST_PER_JOB
from dynamic_pipelines.constants import DEFAULT_TARGET_TEST_CHILD_PIPELINE_FILEPATH
from dynamic_pipelines.constants import DEFAULT_TARGET_TEST_CHILD_PIPELINE_NAME
from dynamic_pipelines.constants import DEFAULT_TARGET_TEST_JOB_TEMPLATE_NAME
from dynamic_pipelines.constants import DEFAULT_TEST_PATHS
from dynamic_pipelines.constants import KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH from dynamic_pipelines.constants import KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH
from dynamic_pipelines.constants import TIMEOUT_4H_TEMPLATE_NAME
from dynamic_pipelines.models import EmptyJob BUILD_ONLY_LABEL = 'For Maintainers: Only Build Tests'
from dynamic_pipelines.models import Job TIMEOUT_4H_TEMPLATE_NAME = '.timeout_4h_template'
from dynamic_pipelines.models import TargetTestJob TARGET_TEST_CHILD_PIPELINE_FILEPATH = os.path.join(IDF_PATH, 'target_test_child_pipeline.yml')
from dynamic_pipelines.utils import dump_jobs_to_yaml
def get_tags_with_amount(s: str) -> t.List[str]: def main(output_filepath: str) -> None:
c: Counter = Counter()
for _t in s.split(','):
c[_t] += 1
res = set()
for target, amount in c.items():
if amount > 1:
res.add(f'{target}_{amount}')
else:
res.add(target)
return sorted(res)
def get_target_test_jobs(
paths: str, apps: t.List[App], exclude_runner_tags: t.Set[str]
) -> t.Tuple[t.List[Job], t.List[str], t.List[str]]:
"""
Return the target test jobs and the extra yaml files to include
"""
if mr_labels := os.getenv('CI_MERGE_REQUEST_LABELS'): if mr_labels := os.getenv('CI_MERGE_REQUEST_LABELS'):
print(f'MR labels: {mr_labels}') print(f'MR labels: {mr_labels}')
if BUILD_ONLY_LABEL in mr_labels.split(','): if BUILD_ONLY_LABEL in mr_labels.split(','):
print('MR has build only label, skip generating target test child pipeline') print('MR has build only label, skip generating target test child pipeline')
return [EmptyJob()], [], [] test_child_pipeline(
output_filepath,
pytest_cases = get_pytest_cases( cases=GroupedPytestCases([]),
paths,
apps=apps,
marker_expr='not host_test', # since it's generating target-test child pipeline
) )
return
no_env_marker_test_cases: t.List[str] = [] cases = GroupedPytestCases(get_pytest_cases())
res = defaultdict(list)
for case in pytest_cases:
if not case.env_markers:
no_env_marker_test_cases.append(case.item.nodeid)
continue
res[(case.target_selector, tuple(sorted(case.env_markers)))].append(case)
target_test_jobs: t.List[Job] = []
for (target_selector, env_markers), cases in res.items():
runner_tags = get_tags_with_amount(target_selector) + list(env_markers)
if ','.join(runner_tags) in exclude_runner_tags:
print('WARNING: excluding test cases with runner tags:', runner_tags)
continue
_extends = [DEFAULT_TARGET_TEST_JOB_TEMPLATE_NAME]
for timeout_4h_marker in TIMEOUT_4H_MARKERS:
if timeout_4h_marker in env_markers:
_extends.append(TIMEOUT_4H_TEMPLATE_NAME)
target_test_job = TargetTestJob(
extends=_extends,
name=f'{target_selector} - {",".join(env_markers)}',
tags=runner_tags,
parallel=len(cases) // DEFAULT_CASES_TEST_PER_JOB + 1,
)
target_test_job.set_variable('TARGET_SELECTOR', f"'{target_selector}'")
target_test_job.set_variable('ENV_MARKERS', "'" + ' and '.join(env_markers) + "'")
target_test_job.set_variable('PYTEST_NODES', ' '.join([f"'{case.item.nodeid}'" for case in cases]))
target_test_jobs.append(target_test_job)
extra_include_yml: t.List[str] = []
if not target_test_jobs:
print('No target test cases required, create one empty job instead')
target_test_jobs.append(EmptyJob())
else:
extra_include_yml = ['tools/ci/dynamic_pipelines/templates/generate_target_test_report.yml']
fast_pipeline_flag = int(os.getenv('REPORT_EXIT_CODE', 0)) == 30
if fast_pipeline_flag:
extra_include_yml = ['tools/ci/dynamic_pipelines/templates/fast_pipeline.yml']
no_env_marker_test_cases.sort()
return target_test_jobs, extra_include_yml, no_env_marker_test_cases
def generate_target_test_child_pipeline(
paths: str,
apps: t.List[App],
output_filepath: str,
) -> None:
with open(KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH) as fr: with open(KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH) as fr:
known_warnings_dict = yaml.safe_load(fr) or dict() known_warnings_dict = yaml.safe_load(fr) or dict()
@@ -131,37 +50,34 @@ def generate_target_test_child_pipeline(
if exclude_runner_tags := os.getenv('EXCLUDE_RUNNER_TAGS'): if exclude_runner_tags := os.getenv('EXCLUDE_RUNNER_TAGS'):
exclude_runner_tags_set.update(exclude_runner_tags.split(';')) exclude_runner_tags_set.update(exclude_runner_tags.split(';'))
target_test_jobs, extra_include_yml, no_env_marker_test_cases = get_target_test_jobs( flattened_cases = []
paths=paths, additional_dict: t.Dict[GroupKey, t.Dict[str, t.Any]] = {}
apps=apps, for key, grouped_cases in cases.grouped_cases.items():
exclude_runner_tags=exclude_runner_tags_set, # skip test cases with no runner tags
) if ','.join(sorted(key.runner_tags)) in exclude_runner_tags_set:
print(f'WARNING: excluding test cases with runner tags: {key.runner_tags}')
continue
known_no_env_marker_test_cases = set(known_warnings_dict.get('no_env_marker_test_cases', [])) flattened_cases.extend(grouped_cases)
no_env_marker_test_cases_set = set(no_env_marker_test_cases)
no_env_marker_test_cases_fail = False for case in grouped_cases:
if no_env_marker_test_cases_set - known_no_env_marker_test_cases: for timeout_4h_marker in TIMEOUT_4H_MARKERS:
print('ERROR: NEW "no_env_marker_test_cases" detected:') if timeout_4h_marker in case.env_markers:
for case in no_env_marker_test_cases_set - known_no_env_marker_test_cases: if key not in additional_dict:
print(f' - {case}') additional_dict[key] = {
no_env_marker_test_cases_fail = True 'extra_extends': [],
}
print( if TIMEOUT_4H_TEMPLATE_NAME not in additional_dict[key]['extra_extends']:
'Please add at least one environment markers to the test cases listed above. ' additional_dict[key]['extra_extends'].append(TIMEOUT_4H_TEMPLATE_NAME)
'You may check all the env markers here: tools/ci/idf_pytest/constants.py'
)
if no_env_marker_test_cases_fail: test_child_pipeline(
raise SystemExit('Failed to generate target test child pipeline.')
dump_jobs_to_yaml(
target_test_jobs,
output_filepath, output_filepath,
DEFAULT_TARGET_TEST_CHILD_PIPELINE_NAME, cases=GroupedPytestCases(
extra_include_yml, cases=flattened_cases,
additional_dict=additional_dict,
),
) )
print(f'Generate child pipeline yaml file {output_filepath} with {sum(j.parallel for j in target_test_jobs)} jobs')
if __name__ == '__main__': if __name__ == '__main__':
@@ -169,35 +85,13 @@ if __name__ == '__main__':
description='Generate Target Test Child Pipeline. Update Build Report in MR pipelines', description='Generate Target Test Child Pipeline. Update Build Report in MR pipelines',
formatter_class=argparse.ArgumentDefaultsHelpFormatter, formatter_class=argparse.ArgumentDefaultsHelpFormatter,
) )
parser.add_argument(
'-p',
'--paths',
nargs='+',
default=DEFAULT_TEST_PATHS,
help='Paths to the apps to build.',
)
parser.add_argument( parser.add_argument(
'-o', '-o',
'--output', '--output',
default=DEFAULT_TARGET_TEST_CHILD_PIPELINE_FILEPATH, default=TARGET_TEST_CHILD_PIPELINE_FILEPATH,
help='Output child pipeline file path', help='Output child pipeline file path',
) )
parser.add_argument(
'--app-info-filepattern',
default='list_job_*.txt',
help='glob pattern to specify the files that include built app info generated by '
'`idf-build-apps --collect-app-info ...`. will not raise ValueError when binary '
'paths not exist in local file system if not listed recorded in the app info.',
)
args = parser.parse_args() args = parser.parse_args()
apps = [] main(args.output)
for f in glob.glob(args.app_info_filepattern):
apps.extend(import_apps_from_txt(f))
generate_target_test_child_pipeline(
paths=args.paths,
apps=apps,
output_filepath=args.output,
)

View File

@@ -9,6 +9,7 @@
- .before_script:build - .before_script:build
- .after_script:build:ccache-show-stats:upload-failed-job-logs - .after_script:build:ccache-show-stats:upload-failed-job-logs
image: $ESP_ENV_IMAGE image: $ESP_ENV_IMAGE
tags: [build, shiny]
stage: build stage: build
timeout: 1 hour timeout: 1 hour
variables: variables:
@@ -20,28 +21,27 @@
needs: needs:
- pipeline: $PARENT_PIPELINE_ID - pipeline: $PARENT_PIPELINE_ID
job: generate_build_child_pipeline job: generate_build_child_pipeline
- pipeline: $PARENT_PIPELINE_ID
job: pipeline_variables
artifacts: artifacts:
paths: paths:
# The other artifacts patterns are defined under tools/ci/artifacts_handler.py # The other artifacts patterns are defined under .idf_ci.toml
# Now we're uploading/downloading the binary files from our internal storage server # Now we're uploading/downloading the binary files from our internal storage server
#
# keep the log file to help debug # keep the log file to help debug
- "**/build*/build_log.txt" - "**/build*/build_log.txt"
# build spec files # build spec files
- build_summary_*.xml - build_summary_*.xml
# list of built apps # list of built apps
- list_job_*.txt - app_info_*.txt
when: always when: always
expire_in: 1 week expire_in: 1 week
script: script:
# CI specific options start from "--parallel-count xxx". could ignore when running locally # CI specific options start from "--parallel-count xxx". could ignore when running locally
- run_cmd python tools/ci/dynamic_pipelines/scripts/child_pipeline_build_apps.py $APP_LIST_FILE - run_cmd idf-ci build run
--parallel-count ${CI_NODE_TOTAL:-1} --parallel-count ${CI_NODE_TOTAL:-1}
--parallel-index ${CI_NODE_INDEX:-1} --parallel-index ${CI_NODE_INDEX:-1}
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt" --modified-files ${MR_MODIFIED_FILES}
--modified-components ${MR_MODIFIED_COMPONENTS}
--junitxml "build_summary_${CI_JOB_NAME_SLUG}.xml"
- run_cmd python tools/ci/artifacts_handler.py upload --type size_reports
.dynamic_target_test_template: .dynamic_target_test_template:
extends: extends:
@@ -52,9 +52,7 @@
variables: variables:
SUBMODULES_TO_FETCH: "none" SUBMODULES_TO_FETCH: "none"
# set while generating the pipeline # set while generating the pipeline
PYTEST_NODES: "" nodes: ""
TARGET_SELECTOR: ""
ENV_MARKERS: ""
INSTALL_EXTRA_TOOLS: "xtensa-esp-elf-gdb riscv32-esp-elf-gdb openocd-esp32 esp-rom-elfs" INSTALL_EXTRA_TOOLS: "xtensa-esp-elf-gdb riscv32-esp-elf-gdb openocd-esp32 esp-rom-elfs"
PYTEST_EXTRA_FLAGS: "--dev-passwd ${ETHERNET_TEST_PASSWORD} --dev-user ${ETHERNET_TEST_USER} --capture=fd --verbosity=0 --unity-test-report-mode merge" PYTEST_EXTRA_FLAGS: "--dev-passwd ${ETHERNET_TEST_PASSWORD} --dev-user ${ETHERNET_TEST_USER} --capture=fd --verbosity=0 --unity-test-report-mode merge"
needs: needs:
@@ -77,15 +75,12 @@
when: always when: always
expire_in: 1 week expire_in: 1 week
script: script:
# get known failure cases - run_cmd idf-ci gitlab download-known-failure-cases-file ${KNOWN_FAILURE_CASES_FILE_NAME}
- run_cmd python tools/ci/get_known_failure_cases_file.py
# get runner env config file # get runner env config file
- retry_failed git clone $TEST_ENV_CONFIG_REPO - retry_failed git clone $TEST_ENV_CONFIG_REPO
- run_cmd python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs - run_cmd python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
# CI specific options start from "--known-failure-cases-file xxx". could ignore when running locally # CI specific options start from "--known-failure-cases-file xxx". could ignore when running locally
- run_cmd pytest ${PYTEST_NODES} - run_cmd pytest $nodes
--target ${TARGET_SELECTOR}
-m ${ENV_MARKERS}
--pipeline-id $PARENT_PIPELINE_ID --pipeline-id $PARENT_PIPELINE_ID
--junitxml=XUNIT_RESULT_${CI_JOB_NAME_SLUG}.xml --junitxml=XUNIT_RESULT_${CI_JOB_NAME_SLUG}.xml
--ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME} --ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME}
@@ -94,9 +89,7 @@
${PYTEST_EXTRA_FLAGS} ${PYTEST_EXTRA_FLAGS}
after_script: after_script:
- source tools/ci/utils.sh - source tools/ci/utils.sh
- section_start "upload_junit_reports" - run_cmd idf-ci gitlab upload-artifacts --type junit
- run_cmd python tools/ci/artifacts_handler.py upload --type logs junit_reports
- section_end "upload_junit_reports"
.timeout_4h_template: .timeout_4h_template:
timeout: 4 hours timeout: 4 hours

View File

@@ -1,16 +0,0 @@
.generate_pytest_report_base:
stage: .post
tags: [build, shiny]
image: $ESP_ENV_IMAGE
artifacts:
paths:
- target_test_report.html
expire_in: 2 week
when: always
fast_pipeline:pipeline_ended:always_failed:
when: on_success
extends: .generate_pytest_report_base
script:
- python tools/ci/dynamic_pipelines/scripts/generate_report.py --report-type target_test
- exit 30

View File

@@ -1,8 +1,29 @@
all_test_finished:
stage: .post
tags: [fast_run, shiny]
image: $ESP_ENV_IMAGE
when: always
# this job is used to check if all target test jobs are finished
# because the `needs` make the later jobs run even if the previous stage are not finished
# and there's no `needs: stage` for now in gitlab
# https://gitlab.com/gitlab-org/gitlab/-/issues/220758
artifacts:
untracked: true
expire_in: 1 week
when: always
before_script: []
script:
- echo "all test jobs finished"
generate_pytest_report: generate_pytest_report:
stage: .post stage: .post
tags: [build, shiny] tags: [build, shiny]
image: $ESP_ENV_IMAGE image: $ESP_ENV_IMAGE
when: always when: always
needs:
- all_test_finished
- pipeline: $PARENT_PIPELINE_ID
job: pipeline_variables
artifacts: artifacts:
paths: paths:
- target_test_report.html - target_test_report.html
@@ -11,8 +32,7 @@ generate_pytest_report:
- succeeded_cases.html - succeeded_cases.html
expire_in: 2 week expire_in: 2 week
when: always when: always
script: script:
- python tools/ci/get_known_failure_cases_file.py - run_cmd idf-ci gitlab download-known-failure-cases-file ${KNOWN_FAILURE_CASES_FILE_NAME}
- python tools/ci/dynamic_pipelines/scripts/generate_report.py --report-type target_test - python tools/ci/dynamic_pipelines/scripts/generate_report.py --report-type target_test
- python tools/ci/previous_stage_job_status.py --stage target_test - python tools/ci/previous_stage_job_status.py --stage target_test

View File

@@ -2,7 +2,13 @@
# no_env_marker_test_cases: List of test cases that do not have environment markers. # no_env_marker_test_cases: List of test cases that do not have environment markers.
# each item shall be the test node id, you may check the error message to get the node id. # each item shall be the test node id, you may check the error message to get the node id.
no_env_marker_test_cases: no_env_marker_test_cases:
- dummy_test_case - test_examples_security_secure_boot
- test_examples_security_secure_boot_corrupt_app_sig
- test_examples_security_secure_boot_corrupt_bl_sig
- test_examples_security_secure_boot_key_combo
- test_examples_security_secure_boot_key_revoke
- test_usb_enum
- test_usb_ext_port
# no_runner_tags: List of runner tags that has no test runner set. # no_runner_tags: List of runner tags that has no test runner set.
# each item shall be a comma separated list of runner tags. # each item shall be a comma separated list of runner tags.

View File

@@ -1,3 +1,20 @@
all_build_finished:
stage: assign_test
tags: [fast_run, shiny]
image: $ESP_ENV_IMAGE
when: always
# this job is used to check if all build jobs are finished
# because the `needs` make the later jobs run even if the previous stage are not finished
# and there's no `needs: stage` for now in gitlab
# https://gitlab.com/gitlab-org/gitlab/-/issues/220758
artifacts:
untracked: true
expire_in: 1 week
when: always
before_script: []
script:
- echo "all test jobs finished"
generate_pytest_build_report: generate_pytest_build_report:
stage: assign_test stage: assign_test
image: $ESP_ENV_IMAGE image: $ESP_ENV_IMAGE
@@ -5,18 +22,20 @@ generate_pytest_build_report:
- build - build
- shiny - shiny
when: always when: always
needs:
- all_build_finished
- pipeline: $PARENT_PIPELINE_ID
job: pipeline_variables
artifacts: artifacts:
paths: paths:
- failed_apps.html - failed_apps.html
- built_apps.html - built_apps.html
- skipped_apps.html - skipped_apps.html
- build_report.html - build_report.html
- test_related_apps_download_urls.yml - presigned.json
expire_in: 2 week expire_in: 1 week
when: always when: always
script: script:
- env
- python tools/ci/dynamic_pipelines/scripts/generate_report.py --report-type build - python tools/ci/dynamic_pipelines/scripts/generate_report.py --report-type build
- python tools/ci/previous_stage_job_status.py --stage build - python tools/ci/previous_stage_job_status.py --stage build
@@ -27,6 +46,10 @@ generate_pytest_child_pipeline:
tags: tags:
- build - build
- shiny - shiny
needs:
- build_test_related_apps # won't work if the parallel count exceeds 100, now it's around 50
- pipeline: $PARENT_PIPELINE_ID
job: pipeline_variables
artifacts: artifacts:
paths: paths:
- target_test_child_pipeline.yml - target_test_child_pipeline.yml
@@ -39,7 +62,6 @@ Pytest Target Test Jobs:
- generate_pytest_child_pipeline - generate_pytest_child_pipeline
variables: variables:
PARENT_PIPELINE_ID: $PARENT_PIPELINE_ID PARENT_PIPELINE_ID: $PARENT_PIPELINE_ID
REPORT_EXIT_CODE: $REPORT_EXIT_CODE
# https://gitlab.com/gitlab-org/gitlab/-/issues/214340 # https://gitlab.com/gitlab-org/gitlab/-/issues/214340
inherit: inherit:
variables: false variables: false

View File

@@ -8,12 +8,13 @@ import unittest
from unittest.mock import MagicMock from unittest.mock import MagicMock
from unittest.mock import patch from unittest.mock import patch
from idf_build_apps import json_list_files_to_apps
sys.path.insert(0, os.path.join(f'{os.environ.get("IDF_PATH")}', 'tools', 'ci', 'python_packages')) sys.path.insert(0, os.path.join(f'{os.environ.get("IDF_PATH")}', 'tools', 'ci', 'python_packages'))
sys.path.insert(0, os.path.join(f'{os.environ.get("IDF_PATH")}', 'tools', 'ci')) sys.path.insert(0, os.path.join(f'{os.environ.get("IDF_PATH")}', 'tools', 'ci'))
from idf_build_apps.constants import BuildStatus # noqa: E402 from idf_build_apps.constants import BuildStatus # noqa: E402
from idf_ci_local.app import enrich_apps_with_metrics_info # noqa: E402 from idf_ci_local.app import enrich_apps_with_metrics_info # noqa: E402
from idf_ci_local.app import import_apps_from_txt # noqa: E402
from dynamic_pipelines.models import GitlabJob # noqa: E402 from dynamic_pipelines.models import GitlabJob # noqa: E402
from dynamic_pipelines.report import BuildReportGenerator # noqa: E402 from dynamic_pipelines.report import BuildReportGenerator # noqa: E402
@@ -40,7 +41,6 @@ class TestReportGeneration(unittest.TestCase):
def setup_patches(self) -> None: def setup_patches(self) -> None:
self.gitlab_patcher = patch('dynamic_pipelines.report.Gitlab') self.gitlab_patcher = patch('dynamic_pipelines.report.Gitlab')
self.uploader_patcher = patch('dynamic_pipelines.report.AppUploader')
self.failure_rate_patcher = patch('dynamic_pipelines.report.fetch_failed_testcases_failure_ratio') self.failure_rate_patcher = patch('dynamic_pipelines.report.fetch_failed_testcases_failure_ratio')
self.env_patcher = patch.dict( self.env_patcher = patch.dict(
'os.environ', 'os.environ',
@@ -54,7 +54,6 @@ class TestReportGeneration(unittest.TestCase):
self.yaml_dump_patcher = patch('dynamic_pipelines.report.yaml.dump') self.yaml_dump_patcher = patch('dynamic_pipelines.report.yaml.dump')
self.MockGitlab = self.gitlab_patcher.start() self.MockGitlab = self.gitlab_patcher.start()
self.MockUploader = self.uploader_patcher.start()
self.test_cases_failure_rate = self.failure_rate_patcher.start() self.test_cases_failure_rate = self.failure_rate_patcher.start()
self.env_patcher.start() self.env_patcher.start()
self.yaml_dump_patcher.start() self.yaml_dump_patcher.start()
@@ -63,10 +62,8 @@ class TestReportGeneration(unittest.TestCase):
self.mock_mr = MagicMock() self.mock_mr = MagicMock()
self.MockGitlab.return_value.project = self.mock_project self.MockGitlab.return_value.project = self.mock_project
self.mock_project.mergerequests.get.return_value = self.mock_mr self.mock_project.mergerequests.get.return_value = self.mock_mr
self.MockUploader.return_value.get_app_presigned_url.return_value = 'https://example.com/presigned-url'
self.addCleanup(self.gitlab_patcher.stop) self.addCleanup(self.gitlab_patcher.stop)
self.addCleanup(self.uploader_patcher.stop)
self.addCleanup(self.failure_rate_patcher.stop) self.addCleanup(self.failure_rate_patcher.stop)
self.addCleanup(self.env_patcher.stop) self.addCleanup(self.env_patcher.stop)
self.addCleanup(self.yaml_dump_patcher.stop) self.addCleanup(self.yaml_dump_patcher.stop)
@@ -80,7 +77,6 @@ class TestReportGeneration(unittest.TestCase):
self.build_report_generator.failed_apps_report_file, self.build_report_generator.failed_apps_report_file,
self.build_report_generator.built_apps_report_file, self.build_report_generator.built_apps_report_file,
self.build_report_generator.skipped_apps_report_file, self.build_report_generator.skipped_apps_report_file,
self.build_report_generator.apps_presigned_url_filepath,
] ]
for file_path in files_to_delete: for file_path in files_to_delete:
if os.path.exists(file_path): if os.path.exists(file_path):
@@ -112,7 +108,8 @@ class TestReportGeneration(unittest.TestCase):
] ]
test_cases = parse_testcases_from_filepattern(os.path.join(self.reports_sample_data_path, 'XUNIT_*.xml')) test_cases = parse_testcases_from_filepattern(os.path.join(self.reports_sample_data_path, 'XUNIT_*.xml'))
apps = enrich_apps_with_metrics_info( apps = enrich_apps_with_metrics_info(
built_apps_size_info_response, import_apps_from_txt(os.path.join(self.reports_sample_data_path, 'apps')) built_apps_size_info_response,
json_list_files_to_apps([os.path.join(self.reports_sample_data_path, 'apps')]),
) )
self.target_test_report_generator = TargetTestReportGenerator( self.target_test_report_generator = TargetTestReportGenerator(
project_id=123, project_id=123,

View File

@@ -10,7 +10,6 @@ from urllib.parse import urlencode
from urllib.parse import urlparse from urllib.parse import urlparse
import requests import requests
import yaml
from .constants import CI_DASHBOARD_API from .constants import CI_DASHBOARD_API
from .constants import CI_JOB_TOKEN from .constants import CI_JOB_TOKEN
@@ -18,42 +17,9 @@ from .constants import CI_MERGE_REQUEST_SOURCE_BRANCH_SHA
from .constants import CI_PAGES_URL from .constants import CI_PAGES_URL
from .constants import CI_PROJECT_URL from .constants import CI_PROJECT_URL
from .models import GitlabJob from .models import GitlabJob
from .models import Job
from .models import TestCase from .models import TestCase
def dump_jobs_to_yaml(
jobs: t.List[Job],
output_filepath: str,
pipeline_name: str,
extra_include_yml: t.Optional[t.List[str]] = None,
) -> None:
yaml_dict = {}
for job in jobs:
yaml_dict.update(job.to_dict())
# global stuffs
yaml_dict.update(
{
'include': [
'tools/ci/dynamic_pipelines/templates/.dynamic_jobs.yml',
'.gitlab/ci/common.yml',
],
'workflow': {
'name': pipeline_name,
'rules': [
# always run the child pipeline, if they are created
{'when': 'always'},
],
},
}
)
yaml_dict['include'].extend(extra_include_yml or [])
with open(output_filepath, 'w') as fw:
yaml.dump(yaml_dict, fw, indent=2)
def parse_testcases_from_filepattern(junit_report_filepattern: str) -> t.List[TestCase]: def parse_testcases_from_filepattern(junit_report_filepattern: str) -> t.List[TestCase]:
""" """
Parses test cases from XML files matching the provided file pattern. Parses test cases from XML files matching the provided file pattern.

View File

@@ -1,65 +1,61 @@
tools/ble/**/* tools/ble/**/*
tools/bt/README.md
tools/bt/bt_hci_to_btsnoop.py
tools/catch/**/* tools/catch/**/*
tools/ci/check_*.py
tools/ci/check_*.txt
tools/ci/check_*.sh
tools/ci/check_copyright_config.yaml
tools/ci/get_all_test_results.py
tools/gdb_panic_server.py
tools/check_term.py tools/check_term.py
tools/python_version_checker.py tools/ci/*exclude*.txt
tools/ci/astyle-rules.yml tools/ci/astyle-rules.yml
tools/ci/check_*.py
tools/ci/check_*.sh
tools/ci/check_*.txt
tools/ci/check_copyright_config.yaml
tools/ci/checkout_project_ref.py tools/ci/checkout_project_ref.py
tools/ci/ci_fetch_submodule.py tools/ci/ci_fetch_submodule.py
tools/ci/ci_get_mr_info.py tools/ci/ci_get_mr_info.py
tools/ci/ci_process_description.py tools/ci/cleanup_ignore_lists.py
tools/ci/configure_ci_environment.sh tools/ci/configure_ci_environment.sh
tools/ci/generate_rules.py
tools/ci/deploy_docs.py tools/ci/deploy_docs.py
tools/ci/dynamic_pipelines/**/*
tools/ci/envsubst.py tools/ci/envsubst.py
tools/ci/*exclude*.txt
tools/ci/executable-list.txt tools/ci/executable-list.txt
tools/ci/fix_empty_prototypes.sh tools/ci/fix_empty_prototypes.sh
tools/ci/generate_rules.py
tools/ci/get-full-sources.sh tools/ci/get-full-sources.sh
tools/ci/get_all_test_results.py
tools/ci/get_supported_examples.sh
tools/ci/gitlab_yaml_linter.py
tools/ci/idf_build_apps_dump_soc_caps.py
tools/ci/idf_ci_local/**/*
tools/ci/idf_ci_utils.py tools/ci/idf_ci_utils.py
tools/ci/idf_pytest/**/*
tools/ci/mirror-submodule-update.sh tools/ci/mirror-submodule-update.sh
tools/ci/multirun_with_pyenv.sh tools/ci/multirun_with_pyenv.sh
tools/ci/mypy_ignore_list.txt tools/ci/mypy_ignore_list.txt
tools/ci/previous_stage_job_status.py
tools/ci/push_to_github.sh tools/ci/push_to_github.sh
tools/ci/python_packages/wifi_tools.py
tools/ci/utils.sh
tools/eclipse-code-style.xml
tools/format.sh
tools/mocks/**/*
tools/set-submodules-to-github.sh
tools/templates/sample_project/main/main.c
tools/templates/sample_project/CMakeLists.txt
tools/templates/sample_project/main/CMakeLists.txt
tools/templates/sample_component/CMakeLists.txt
tools/templates/sample_component/include/main.h
tools/templates/sample_component/main.c
tools/ci/cleanup_ignore_lists.py
tools/ci/artifacts_handler.py
tools/ci/get_known_failure_cases_file.py
tools/unit-test-app/**/*
tools/ci/gitlab_yaml_linter.py
tools/ci/dynamic_pipelines/**/*
tools/ci/idf_ci_local/**/*
tools/ci/get_supported_examples.sh
tools/ci/python_packages/common_test_methods.py tools/ci/python_packages/common_test_methods.py
tools/ci/python_packages/gitlab_api.py tools/ci/python_packages/gitlab_api.py
tools/ci/python_packages/idf_http_server_test/**/* tools/ci/python_packages/idf_http_server_test/**/*
tools/ci/python_packages/idf_iperf_test_util/**/* tools/ci/python_packages/idf_iperf_test_util/**/*
tools/esp_prov/**/* tools/ci/python_packages/wifi_tools.py
tools/ci/sort_yaml.py
tools/ci/sg_rules/* tools/ci/sg_rules/*
tools/ci/previous_stage_job_status.py tools/ci/sort_yaml.py
tools/legacy_exports/export_legacy.fish tools/ci/utils.sh
tools/legacy_exports/export_legacy.sh tools/eclipse-code-style.xml
tools/legacy_exports/export_legacy.ps1 tools/esp_prov/**/*
tools/format.sh
tools/gdb_panic_server.py
tools/legacy_exports/export_legacy.bat tools/legacy_exports/export_legacy.bat
tools/ci/idf_build_apps_dump_soc_caps.py tools/legacy_exports/export_legacy.fish
tools/bt/bt_hci_to_btsnoop.py tools/legacy_exports/export_legacy.ps1
tools/bt/README.md tools/legacy_exports/export_legacy.sh
tools/ci/test_linter.py tools/mocks/**/*
tools/ci/check_test_files.py tools/python_version_checker.py
tools/set-submodules-to-github.sh
tools/templates/sample_component/CMakeLists.txt
tools/templates/sample_component/include/main.h
tools/templates/sample_component/main.c
tools/templates/sample_project/CMakeLists.txt
tools/templates/sample_project/main/CMakeLists.txt
tools/templates/sample_project/main/main.c
tools/unit-test-app/**/*

View File

@@ -67,7 +67,6 @@ tools/ci/check_requirement_files.py
tools/ci/check_rules_components_patterns.py tools/ci/check_rules_components_patterns.py
tools/ci/check_soc_headers_leak.py tools/ci/check_soc_headers_leak.py
tools/ci/check_soc_struct_headers.py tools/ci/check_soc_struct_headers.py
tools/ci/check_test_files.py
tools/ci/check_tools_files_patterns.py tools/ci/check_tools_files_patterns.py
tools/ci/check_type_comments.py tools/ci/check_type_comments.py
tools/ci/checkout_project_ref.py tools/ci/checkout_project_ref.py

View File

@@ -1,22 +0,0 @@
# SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import os
import urllib3
from minio import Minio
from artifacts_handler import get_minio_client
def getenv(env_var: str) -> str:
try:
return os.environ[env_var]
except KeyError as e:
raise Exception(f'Environment variable {env_var} not set') from e
if __name__ == '__main__':
client = get_minio_client()
file_name = getenv('KNOWN_FAILURE_CASES_FILE_NAME')
client.fget_object('ignore-test-result-files', file_name, file_name)

View File

@@ -1,22 +1,21 @@
# SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import os import os
import subprocess
import sys import sys
import typing as t import typing as t
from typing import Literal
from dynamic_pipelines.constants import BINARY_SIZE_METRIC_NAME from dynamic_pipelines.constants import BINARY_SIZE_METRIC_NAME
from idf_build_apps import App from idf_build_apps import App
from idf_build_apps import CMakeApp from idf_build_apps import CMakeApp
from idf_build_apps import json_to_app from idf_build_apps.utils import rmdir
from idf_ci_local.uploader import AppUploader if t.TYPE_CHECKING:
from idf_ci_local.uploader import get_app_uploader pass
class IdfCMakeApp(CMakeApp): class IdfCMakeApp(CMakeApp):
uploader: t.ClassVar[t.Optional['AppUploader']] = get_app_uploader() build_system: t.Literal['idf_cmake'] = 'idf_cmake'
build_system: Literal['idf_cmake'] = 'idf_cmake'
def _initialize_hook(self, **kwargs: t.Any) -> None: def _initialize_hook(self, **kwargs: t.Any) -> None:
# ensure this env var exists # ensure this env var exists
@@ -27,8 +26,24 @@ class IdfCMakeApp(CMakeApp):
def _post_build(self) -> None: def _post_build(self) -> None:
super()._post_build() super()._post_build()
if self.uploader: # only upload in CI
self.uploader.upload_app(self.build_path) if os.getenv('CI_JOB_ID'):
subprocess.run(
[
'idf-ci',
'gitlab',
'upload-artifacts',
self.app_dir,
],
stdout=sys.stdout,
stderr=sys.stderr,
)
rmdir(
self.build_path,
exclude_file_patterns=[
'build_log.txt',
],
)
class Metrics: class Metrics:
@@ -74,26 +89,6 @@ class AppWithMetricsInfo(IdfCMakeApp):
arbitrary_types_allowed = True arbitrary_types_allowed = True
def dump_apps_to_txt(apps: t.List[App], output_filepath: str) -> None:
with open(output_filepath, 'w') as fw:
for app in apps:
fw.write(app.model_dump_json() + '\n')
def import_apps_from_txt(input_filepath: str) -> t.List[App]:
apps: t.List[App] = []
with open(input_filepath) as fr:
for line in fr:
if line := line.strip():
try:
apps.append(json_to_app(line, extra_classes=[IdfCMakeApp]))
except Exception: # noqa
print('Failed to deserialize app from line: %s' % line)
sys.exit(1)
return apps
def enrich_apps_with_metrics_info( def enrich_apps_with_metrics_info(
app_metrics_info_map: t.Dict[str, t.Dict[str, t.Any]], apps: t.List[App] app_metrics_info_map: t.Dict[str, t.Dict[str, t.Any]], apps: t.List[App]
) -> t.List[AppWithMetricsInfo]: ) -> t.List[AppWithMetricsInfo]:

View File

@@ -1,172 +0,0 @@
# SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import abc
import glob
import os
import typing as t
from datetime import timedelta
from zipfile import ZIP_DEFLATED
from zipfile import ZipFile
import minio
from artifacts_handler import ArtifactType
from artifacts_handler import get_minio_client
from artifacts_handler import getenv
from idf_build_apps import App
from idf_build_apps.utils import rmdir
from idf_ci_utils import IDF_PATH
from idf_pytest.constants import DEFAULT_BUILD_LOG_FILENAME
from idf_pytest.constants import DEFAULT_SIZE_JSON_FILENAME
class AppDownloader:
ALL_ARTIFACT_TYPES = [ArtifactType.MAP_AND_ELF_FILES, ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES]
@abc.abstractmethod
def _download_app(self, app_build_path: str, artifact_type: ArtifactType) -> None:
pass
def download_app(self, app_build_path: str, artifact_type: t.Optional[ArtifactType] = None) -> None:
"""
Download the app
:param app_build_path: the path to the build directory
:param artifact_type: if not specify, download all types of artifacts
:return: None
"""
if not artifact_type:
for _artifact_type in self.ALL_ARTIFACT_TYPES:
self._download_app(app_build_path, _artifact_type)
else:
self._download_app(app_build_path, artifact_type)
class AppUploader(AppDownloader):
TYPE_PATTERNS_DICT = {
ArtifactType.MAP_AND_ELF_FILES: [
'bootloader/*.map',
'bootloader/*.elf',
'esp_tee/*.map',
'esp_tee/*.elf',
'*.map',
'*.elf',
'gdbinit/*',
],
ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES: [
'*.bin',
'bootloader/*.bin',
'esp_tee/*.bin',
'partition_table/*.bin',
'flasher_args.json',
'flash_project_args',
'config/sdkconfig.json',
'sdkconfig',
'project_description.json',
],
ArtifactType.LOGS: [
DEFAULT_BUILD_LOG_FILENAME,
],
}
def __init__(self, pipeline_id: t.Union[str, int, None] = None) -> None:
self.pipeline_id = str(pipeline_id or '1')
self._client = get_minio_client()
def get_app_object_name(self, app_path: str, zip_name: str, artifact_type: ArtifactType) -> str:
return f'{self.pipeline_id}/{artifact_type.value}/{app_path}/{zip_name}'
def _upload_app(self, app_build_path: str, artifact_type: ArtifactType) -> bool:
app_path, build_dir = os.path.split(app_build_path)
zip_filename = f'{build_dir}.zip'
has_file = False
with ZipFile(
zip_filename,
'w',
compression=ZIP_DEFLATED,
# 1 is the fastest compression level
# the size differs not much between 1 and 9
compresslevel=1,
) as zw:
for pattern in self.TYPE_PATTERNS_DICT[artifact_type]:
for file in glob.glob(os.path.join(app_build_path, pattern), recursive=True):
zw.write(file)
has_file = True
uploaded = False
try:
if has_file:
obj_name = self.get_app_object_name(app_path, zip_filename, artifact_type)
self._client.fput_object(getenv('IDF_S3_BUCKET'), obj_name, zip_filename)
uploaded = True
finally:
os.remove(zip_filename)
return uploaded
def upload_app(self, app_build_path: str, artifact_type: t.Optional[ArtifactType] = None) -> None:
uploaded = False
if not artifact_type:
upload_types: t.Iterable[ArtifactType] = self.TYPE_PATTERNS_DICT.keys()
else:
upload_types = [artifact_type]
# Upload of size.json files is handled by GitLab CI via "artifacts_handler.py" script.
print(f'Uploading {app_build_path} {[k.value for k in upload_types]} to minio server')
for upload_type in upload_types:
uploaded |= self._upload_app(app_build_path, upload_type)
if uploaded:
rmdir(app_build_path, exclude_file_patterns=[DEFAULT_BUILD_LOG_FILENAME, DEFAULT_SIZE_JSON_FILENAME])
def _download_app(self, app_build_path: str, artifact_type: ArtifactType) -> None:
app_path, build_dir = os.path.split(app_build_path)
zip_filename = f'{build_dir}.zip'
# path are relative to IDF_PATH
current_dir = os.getcwd()
os.chdir(IDF_PATH)
try:
obj_name = self.get_app_object_name(app_path, zip_filename, artifact_type)
print(f'Downloading {obj_name}')
try:
try:
self._client.stat_object(getenv('IDF_S3_BUCKET'), obj_name)
except minio.error.S3Error as e:
raise RuntimeError(
f'No such file on minio server: {obj_name}. '
f'Probably the build failed or the artifacts got expired. '
f'Full error message: {str(e)}'
)
else:
self._client.fget_object(getenv('IDF_S3_BUCKET'), obj_name, zip_filename)
print(f'Downloaded to {zip_filename}')
except minio.error.S3Error as e:
raise RuntimeError("Shouldn't happen, please report this bug in the CI channel" + str(e))
with ZipFile(zip_filename, 'r') as zr:
zr.extractall()
os.remove(zip_filename)
finally:
os.chdir(current_dir)
def get_app_presigned_url(self, app: App, artifact_type: ArtifactType) -> str:
obj_name = self.get_app_object_name(app.app_dir, f'{app.build_dir}.zip', artifact_type)
try:
self._client.stat_object(
getenv('IDF_S3_BUCKET'),
obj_name,
)
except minio.error.S3Error:
return ''
else:
return self._client.get_presigned_url( # type: ignore
'GET', getenv('IDF_S3_BUCKET'), obj_name, expires=timedelta(days=4)
)
def get_app_uploader() -> t.Optional['AppUploader']:
if parent_pipeline_id := os.getenv('PARENT_PIPELINE_ID'):
return AppUploader(parent_pipeline_id)
return None

View File

@@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2020-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# internal use only for CI # internal use only for CI
# some CI related util functions # some CI related util functions
@@ -9,7 +9,6 @@ import subprocess
import sys import sys
import typing as t import typing as t
from functools import cached_property from functools import cached_property
from pathlib import Path
IDF_PATH: str = os.path.abspath(os.getenv('IDF_PATH', os.path.join(os.path.dirname(__file__), '..', '..'))) IDF_PATH: str = os.path.abspath(os.getenv('IDF_PATH', os.path.join(os.path.dirname(__file__), '..', '..')))
@@ -233,18 +232,6 @@ class GitlabYmlConfig:
return self.config[name] # type: ignore return self.config[name] # type: ignore
def get_all_manifest_files() -> t.List[str]:
paths: t.List[str] = []
for p in Path(IDF_PATH).glob('**/.build-test-rules.yml'):
if 'managed_components' in p.parts:
continue
paths.append(str(p))
return paths
def sanitize_job_name(name: str) -> str: def sanitize_job_name(name: str) -> str:
""" """
Sanitize the job name from CI_JOB_NAME Sanitize the job name from CI_JOB_NAME

View File

@@ -4,18 +4,7 @@
Pytest Related Constants. Don't import third-party packages here. Pytest Related Constants. Don't import third-party packages here.
""" """
import os
import typing as t import typing as t
import warnings
from dataclasses import dataclass
from enum import Enum
from functools import cached_property
from pathlib import Path
from _pytest.python import Function
from idf_ci_utils import IDF_PATH
from idf_ci_utils import idf_relpath
from pytest_embedded.utils import to_list
SUPPORTED_TARGETS = [ SUPPORTED_TARGETS = [
'esp32', 'esp32',
@@ -30,333 +19,10 @@ SUPPORTED_TARGETS = [
'esp32c61', 'esp32c61',
] ]
PREVIEW_TARGETS: t.List[str] = [] # this PREVIEW_TARGETS excludes 'linux' target PREVIEW_TARGETS: t.List[str] = [] # this PREVIEW_TARGETS excludes 'linux' target
DEFAULT_SDKCONFIG = 'default'
DEFAULT_LOGDIR = 'pytest-embedded' DEFAULT_LOGDIR = 'pytest-embedded'
TARGET_MARKERS = {
'esp32': 'support esp32 target',
'esp32s2': 'support esp32s2 target',
'esp32s3': 'support esp32s3 target',
'esp32c3': 'support esp32c3 target',
'esp32c2': 'support esp32c2 target',
'esp32c5': 'support esp32c5 target',
'esp32c6': 'support esp32c6 target',
'esp32h2': 'support esp32h2 target',
'esp32h4': 'support esp32h4 target', # as preview
'esp32h21': 'support esp32h21 target', # as preview
'esp32p4': 'support esp32p4 target',
'esp32c61': 'support esp32c61 target',
'linux': 'support linux target',
}
SPECIAL_MARKERS = {
'supported_targets': 'support all officially announced supported targets, refer to `SUPPORTED_TARGETS`',
'preview_targets': "support all preview targets ('none')",
'all_targets': 'support all targets, including supported ones and preview ones',
'temp_skip_ci': 'temp skip tests for specified targets only in ci',
'temp_skip': 'temp skip tests for specified targets both in ci and locally',
'nightly_run': 'tests should be executed as part of the nightly trigger pipeline',
'host_test': 'tests which should not be built at the build stage, and instead built in host_test stage',
'require_elf': 'tests which require elf file',
}
ENV_MARKERS = {
# special markers
'qemu': 'build and test using qemu, not real target',
'macos': 'tests should be run on macos hosts',
# single-dut markers
'generic': 'tests should be run on generic runners',
'flash_suspend': 'support flash suspend feature',
'eth_ip101': 'connected via IP101 ethernet transceiver',
'eth_lan8720': 'connected via LAN8720 ethernet transceiver',
'eth_rtl8201': 'connected via RTL8201 ethernet transceiver',
'eth_ksz8041': 'connected via KSZ8041 ethernet transceiver',
'eth_dp83848': 'connected via DP83848 ethernet transceiver',
'eth_w5500': 'SPI Ethernet module with two W5500',
'eth_ksz8851snl': 'SPI Ethernet module with two KSZ8851SNL',
'eth_dm9051': 'SPI Ethernet module with two DM9051',
'quad_psram': 'runners with quad psram',
'octal_psram': 'runners with octal psram',
'usb_host_flash_disk': 'usb host runners with USB flash disk attached',
'usb_device': 'usb device runners',
'ethernet_ota': 'ethernet OTA runners',
'flash_encryption': 'Flash Encryption runners',
'flash_encryption_f4r8': 'Flash Encryption runners with 4-line flash and 8-line psram',
'flash_encryption_f8r8': 'Flash Encryption runners with 8-line flash and 8-line psram',
'flash_encryption_ota': 'Flash Encryption runners with ethernet OTA support with 4mb flash size',
'flash_multi': 'Multiple flash chips tests',
'psram': 'Chip has 4-line psram',
'ir_transceiver': 'runners with a pair of IR transmitter and receiver',
'twai_transceiver': 'runners with a TWAI PHY transceiver',
'flash_encryption_wifi_high_traffic': 'Flash Encryption runners with wifi high traffic support',
'ethernet': 'ethernet runner',
'ethernet_stress': 'ethernet runner with stress test',
'ethernet_flash_8m': 'ethernet runner with 8mb flash',
'ethernet_router': 'both the runner and dut connect to the same router through ethernet NIC',
'ethernet_vlan': 'ethernet runner GARM-32-SH-1-R16S5N3',
'wifi_ap': 'a wifi AP in the environment',
'wifi_router': 'both the runner and dut connect to the same wifi router',
'wifi_high_traffic': 'wifi high traffic runners',
'wifi_wlan': 'wifi runner with a wireless NIC',
'wifi_iperf': 'the AP and ESP dut were placed in a shielded box - for iperf test',
'Example_ShieldBox': 'multiple shielded APs connected to shielded ESP DUT via RF cable with programmable attenuator', # noqa E501
'xtal_26mhz': 'runner with 26MHz xtal on board',
'xtal_40mhz': 'runner with 40MHz xtal on board',
'external_flash': 'external flash memory connected via VSPI (FSPI)',
'sdcard_sdmode': 'sdcard running in SD mode, to be removed after test migration',
'sdcard_spimode': 'sdcard running in SPI mode',
'emmc': 'eMMC card',
'sdcard': 'sdcard runner',
'MSPI_F8R8': 'runner with Octal Flash and Octal PSRAM',
'MSPI_F4R8': 'runner with Quad Flash and Octal PSRAM',
'MSPI_F4R4': 'runner with Quad Flash and Quad PSRAM',
'flash_120m': 'runner with 120M supported Flash',
'jtag': 'runner where the chip is accessible through JTAG as well',
'usb_serial_jtag': 'runner where the chip is accessible through builtin JTAG as well',
'adc': 'ADC related tests should run on adc runners',
'xtal32k': 'Runner with external 32k crystal connected',
'no32kXtal': 'Runner with no external 32k crystal connected',
'psramv0': 'Runner with PSRAM version 0',
'esp32eco3': 'Runner with esp32 eco3 connected',
'ecdsa_efuse': 'Runner with test ECDSA private keys programmed in efuse',
'ccs811': 'Runner with CCS811 connected',
'nvs_encr_hmac': 'Runner with test HMAC key programmed in efuse',
'i2c_oled': 'Runner with ssd1306 I2C oled connected',
'httpbin': 'runner for tests that need to access the httpbin service',
'flash_4mb': 'C2 runners with 4 MB flash',
'jtag_re_enable': 'Runner to re-enable jtag which is softly disabled by burning bit SOFT_DIS_JTAG on eFuse',
'es8311': 'Development board that carries es8311 codec',
'camera': 'Runner with camera',
'ov5647': 'Runner with camera ov5647',
# multi-dut markers
'multi_dut_modbus_rs485': 'a pair of runners connected by RS485 bus',
'ieee802154': 'ieee802154 related tests should run on ieee802154 runners.',
'openthread_br': 'tests should be used for openthread border router.',
'openthread_bbr': 'tests should be used for openthread border router linked to Internet.',
'openthread_sleep': 'tests should be used for openthread sleepy device.',
'zigbee_multi_dut': 'zigbee runner which have multiple duts.',
'wifi_two_dut': 'tests should be run on runners which has two wifi duts connected.',
'generic_multi_device': 'generic multiple devices whose corresponding gpio pins are connected to each other.',
'twai_network': 'multiple runners form a TWAI network.',
'sdio_master_slave': 'Test sdio multi board, esp32+esp32',
'sdio_multidev_32_c6': 'Test sdio multi board, esp32+esp32c6',
'sdio_multidev_p4_c5': 'Test sdio multi board, esp32p4+esp32c5',
'usj_device': 'Test usb_serial_jtag and usb_serial_jtag is used as serial only (not console)',
'twai_std': 'twai runner with all twai supported targets connect to usb-can adapter',
'lp_i2s': 'lp_i2s runner tested with hp_i2s',
'ram_app': 'ram_app runners',
'esp32c3eco7': 'esp32c3 major version(v1.1) chips',
'esp32c2eco4': 'esp32c2 major version(v2.0) chips',
'recovery_bootloader': 'Runner with recovery bootloader offset set in eFuse',
}
# by default the timeout is 1h, for some special cases we need to extend it # by default the timeout is 1h, for some special cases we need to extend it
TIMEOUT_4H_MARKERS = [ TIMEOUT_4H_MARKERS = [
'ethernet_stress', 'ethernet_stress',
] ]
DEFAULT_CONFIG_RULES_STR = ['sdkconfig.ci=default', 'sdkconfig.ci.*=', '=default']
DEFAULT_IGNORE_WARNING_FILEPATH = os.path.join(IDF_PATH, 'tools', 'ci', 'ignore_build_warnings.txt')
DEFAULT_BUILD_TEST_RULES_FILEPATH = os.path.join(IDF_PATH, '.gitlab', 'ci', 'default-build-test-rules.yml')
DEFAULT_FULL_BUILD_TEST_COMPONENTS = [
'cxx',
'esp_common',
'esp_hw_support',
'esp_rom',
'esp_system',
'esp_timer',
'freertos',
'hal',
'heap',
'log',
'newlib',
'riscv',
'soc',
'xtensa',
]
DEFAULT_FULL_BUILD_TEST_FILEPATTERNS = [
# tools
'tools/cmake/**/*',
'tools/tools.json',
# ci
'tools/ci/ignore_build_warnings.txt',
]
DEFAULT_BUILD_LOG_FILENAME = 'build_log.txt'
DEFAULT_SIZE_JSON_FILENAME = 'size.json'
class CollectMode(str, Enum):
SINGLE_SPECIFIC = 'single_specific'
MULTI_SPECIFIC = 'multi_specific'
MULTI_ALL_WITH_PARAM = 'multi_all_with_param'
ALL = 'all'
class PytestApp:
"""
Pytest App with relative path to IDF_PATH
"""
def __init__(self, path: str, target: str, config: str) -> None:
self.path = idf_relpath(path)
self.target = target
self.config = config
def __hash__(self) -> int:
return hash((self.path, self.target, self.config))
@cached_property
def build_dir(self) -> str:
return os.path.join(self.path, f'build_{self.target}_{self.config}')
@dataclass
class PytestCase:
apps: t.List[PytestApp]
item: Function
multi_dut_without_param: bool
def __hash__(self) -> int:
return hash((self.path, self.name, self.apps, self.all_markers))
@cached_property
def path(self) -> str:
return str(self.item.path)
@cached_property
def name(self) -> str:
return self.item.originalname # type: ignore
@cached_property
def targets(self) -> t.List[str]:
if not self.multi_dut_without_param:
return [app.target for app in self.apps]
# multi-dut test cases without parametrize
skip = True
for _t in [app.target for app in self.apps]:
if _t in self.target_markers:
skip = False
warnings.warn(
f'`pytest.mark.[TARGET]` defined in parametrize for multi-dut test cases is deprecated. ' # noqa: W604
f'Please use parametrize instead for test case {self.item.nodeid}'
)
break
if not skip:
return [app.target for app in self.apps]
return [''] * len(self.apps) # this will help to filter these cases out later
@cached_property
def is_single_dut_test_case(self) -> bool:
return True if len(self.apps) == 1 else False
@cached_property
def is_host_test(self) -> bool:
return 'host_test' in self.all_markers or 'linux' in self.targets
# the following markers could be changed dynamically, don't use cached_property
@property
def all_markers(self) -> t.Set[str]:
return {marker.name for marker in self.item.iter_markers()}
@property
def skip_targets(self) -> t.Set[str]:
def _get_temp_markers_disabled_targets(marker_name: str) -> t.Set[str]:
temp_marker = self.item.get_closest_marker(marker_name)
if not temp_marker:
return set()
# temp markers should always use keyword arguments `targets` and `reason`
if not temp_marker.kwargs.get('targets') or not temp_marker.kwargs.get('reason'):
raise ValueError(
f'`{marker_name}` should always use keyword arguments `targets` and `reason`. ' # noqa: W604
f'For example: '
f'`@pytest.mark.{marker_name}(targets=["esp32"], reason="IDF-xxxx, will fix it ASAP")`'
)
return set(to_list(temp_marker.kwargs['targets'])) # type: ignore
temp_skip_ci_targets = _get_temp_markers_disabled_targets('temp_skip_ci')
temp_skip_targets = _get_temp_markers_disabled_targets('temp_skip')
# in CI we skip the union of `temp_skip` and `temp_skip_ci`
if os.getenv('CI_JOB_ID'):
_skip_targets = temp_skip_ci_targets.union(temp_skip_targets)
else: # we use `temp_skip` locally
_skip_targets = temp_skip_targets
return _skip_targets
@property
def target_markers(self) -> t.Set[str]:
return {marker for marker in self.all_markers if marker in TARGET_MARKERS} - self.skip_targets
@property
def env_markers(self) -> t.Set[str]:
return {marker for marker in self.all_markers if marker in ENV_MARKERS}
@property
def target_selector(self) -> str:
return ','.join(app.target for app in self.apps)
@property
def requires_elf_or_map(self) -> bool:
"""
This property determines whether the test case requires elf or map file. By default, one app in the test case
only requires .bin files.
:return: True if the test case requires elf or map file, False otherwise
"""
if 'jtag' in self.env_markers or 'usb_serial_jtag' in self.env_markers:
return True
cases_need_elf = ['panic', 'gdbstub_runtime']
if 'require_elf' in SPECIAL_MARKERS:
return True
for case in cases_need_elf:
if any(case in Path(app.path).parts for app in self.apps):
return True
return False
def all_built_in_app_lists(self, app_lists: t.Optional[t.List[str]] = None) -> t.Optional[str]:
"""
Check if all binaries of the test case are built in the app lists.
:param app_lists: app lists to check
:return: debug string if not all binaries are built in the app lists, None otherwise
"""
if app_lists is None:
# ignore this feature
return None
bin_found = [0] * len(self.apps)
for i, app in enumerate(self.apps):
if app.build_dir in app_lists:
bin_found[i] = 1
if sum(bin_found) == 0:
msg = f'Skip test case {self.name} because all following binaries are not listed in the app lists: ' # noqa: E713
for app in self.apps:
msg += f'\n - {app.build_dir}'
print(msg)
return msg
if sum(bin_found) == len(self.apps):
return None
# some found, some not, looks suspicious
msg = f'Found some binaries of test case {self.name} are not listed in the app lists.' # noqa: E713
for i, app in enumerate(self.apps):
if bin_found[i] == 0:
msg += f'\n - {app.build_dir}'
msg += '\nMight be a issue of .build-test-rules.yml files'
print(msg)
return msg

View File

@@ -1,107 +1,94 @@
# SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import importlib
import logging
import os import os
import sys
import typing as t import typing as t
from collections import defaultdict from pathlib import Path
from functools import cached_property
from unittest.mock import MagicMock
from xml.etree import ElementTree as ET from xml.etree import ElementTree as ET
import pytest import pytest
from _pytest.config import ExitCode import yaml
from _pytest.main import Session from _pytest.config import Config
from _pytest.python import Function from _pytest.python import Function
from _pytest.runner import CallInfo from _pytest.runner import CallInfo
from idf_build_apps import App from dynamic_pipelines.constants import KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH
from idf_build_apps.constants import BuildStatus from idf_ci import IdfPytestPlugin
from idf_ci_utils import idf_relpath from idf_ci import PytestCase
from idf_ci.idf_pytest.plugin import IDF_CI_PYTEST_DEBUG_INFO_KEY
from idf_ci_utils import to_list
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded.plugin import parse_multi_dut_args
from pytest_embedded.utils import find_by_suffix from pytest_embedded.utils import find_by_suffix
from pytest_embedded.utils import to_list
from pytest_ignore_test_results.ignore_results import ChildCase from pytest_ignore_test_results.ignore_results import ChildCase
from pytest_ignore_test_results.ignore_results import ChildCasesStashKey from pytest_ignore_test_results.ignore_results import ChildCasesStashKey
from .constants import CollectMode
from .constants import DEFAULT_SDKCONFIG
from .constants import PREVIEW_TARGETS
from .constants import PytestApp
from .constants import PytestCase
from .constants import SUPPORTED_TARGETS
from .utils import comma_sep_str_to_list
from .utils import format_case_id from .utils import format_case_id
from .utils import merge_junit_files from .utils import merge_junit_files
from .utils import normalize_testcase_file_path from .utils import normalize_testcase_file_path
IDF_PYTEST_EMBEDDED_KEY = pytest.StashKey['IdfPytestEmbedded']() IDF_LOCAL_PLUGIN_KEY = pytest.StashKey['IdfLocalPlugin']()
ITEM_FAILED_CASES_KEY = pytest.StashKey[list]()
ITEM_FAILED_KEY = pytest.StashKey[bool]()
ITEM_PYTEST_CASE_KEY = pytest.StashKey[PytestCase]()
class IdfPytestEmbedded: def requires_elf_or_map(case: PytestCase) -> bool:
"""
Determines whether the test case requires elf or map file. By default, one app in the test case
only requires .bin files.
:return: True if the test case requires elf or map file, False otherwise
"""
if 'jtag' in case.env_markers or 'usb_serial_jtag' in case.env_markers:
return True
folders_need_elf = ['panic', 'gdbstub_runtime']
if 'require_elf' in case.all_markers:
return True
for folder in folders_need_elf:
if any(folder in Path(app.path).parts for app in case.apps):
return True
return False
def skipped_targets(item: Function) -> t.Set[str]:
def _get_temp_markers_disabled_targets(marker_name: str) -> t.Set[str]:
temp_marker = item.get_closest_marker(marker_name)
if not temp_marker:
return set()
# temp markers should always use keyword arguments `targets` and `reason`
if not temp_marker.kwargs.get('targets') or not temp_marker.kwargs.get('reason'):
raise ValueError(
f'`{marker_name}` should always use keyword arguments `targets` and `reason`. ' # noqa: W604
f'For example: '
f'`@pytest.mark.{marker_name}(targets=["esp32"], reason="IDF-xxxx, will fix it ASAP")`'
)
return set(to_list(temp_marker.kwargs['targets']))
temp_skip_ci_targets = _get_temp_markers_disabled_targets('temp_skip_ci')
temp_skip_targets = _get_temp_markers_disabled_targets('temp_skip')
# in CI we skip the union of `temp_skip` and `temp_skip_ci`
if os.getenv('CI_JOB_ID'):
_skip_targets = temp_skip_ci_targets.union(temp_skip_targets)
else: # we use `temp_skip` locally
_skip_targets = temp_skip_targets
return _skip_targets
class IdfLocalPlugin:
UNITY_RESULT_MAPPINGS = { UNITY_RESULT_MAPPINGS = {
'PASS': 'passed', 'PASS': 'passed',
'FAIL': 'failed', 'FAIL': 'failed',
'IGNORE': 'skipped', 'IGNORE': 'skipped',
} }
def __init__( def __init__(self) -> None:
self, with open(KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH) as fr:
target: t.Union[t.List[str], str], known_warnings_dict = yaml.safe_load(fr) or dict()
*,
config_name: t.Optional[str] = None,
single_target_duplicate_mode: bool = False,
apps: t.Optional[t.List[App]] = None,
):
if isinstance(target, str):
# sequence also matters
self.target = comma_sep_str_to_list(target)
else:
self.target = target
if not self.target: self.exclude_no_env_markers_test_cases: t.Set[str] = set(known_warnings_dict['no_env_marker_test_cases'])
raise ValueError('`target` should not be empty')
self.config_name = config_name
# these are useful while gathering all the multi-dut test cases
# when this mode is activated,
#
# pytest.mark.esp32
# pytest.mark.parametrize('count', [2], indirect=True)
# def test_foo(dut):
# pass
#
# should be collected when running `pytest --target esp32`
#
# otherwise, it should be collected when running `pytest --target esp32,esp32`
self._single_target_duplicate_mode = single_target_duplicate_mode
self.apps_list = (
[os.path.join(idf_relpath(app.app_dir), app.build_dir) for app in apps if app.build_status == BuildStatus.SUCCESS]
if apps is not None
else None
)
self.cases: t.List[PytestCase] = []
# record the additional info
# test case id: {key: value}
self.additional_info: t.Dict[str, t.Dict[str, t.Any]] = defaultdict(dict)
@cached_property
def collect_mode(self) -> CollectMode:
if len(self.target) == 1:
if self.target[0] == CollectMode.MULTI_ALL_WITH_PARAM:
return CollectMode.MULTI_ALL_WITH_PARAM
else:
return CollectMode.SINGLE_SPECIFIC
else:
return CollectMode.MULTI_SPECIFIC
@staticmethod @staticmethod
def get_param(item: Function, key: str, default: t.Any = None) -> t.Any: def get_param(item: Function, key: str, default: t.Any = None) -> t.Any:
@@ -112,210 +99,57 @@ class IdfPytestEmbedded:
return item.callspec.params.get(key, default) or default return item.callspec.params.get(key, default) or default
def item_to_pytest_case(self, item: Function) -> t.Optional[PytestCase]: @pytest.hookimpl(wrapper=True)
""" def pytest_collection_modifyitems(self, config: Config, items: t.List[Function]) -> t.Generator[None, None, None]:
Turn pytest item to PytestCase yield # throw it back to idf-ci
"""
count = self.get_param(item, 'count', 1)
# default app_path is where the test script locates deselected_items = []
app_paths = to_list(parse_multi_dut_args(count, self.get_param(item, 'app_path', os.path.dirname(item.path))))
configs = to_list(parse_multi_dut_args(count, self.get_param(item, 'config', DEFAULT_SDKCONFIG)))
targets = to_list(parse_multi_dut_args(count, self.get_param(item, 'target')))
multi_dut_without_param = False # Filter
if count > 1 and targets == [None] * count: filtered_items = []
multi_dut_without_param = True
try:
targets = to_list(parse_multi_dut_args(count, '|'.join(self.target))) # check later while collecting
except ValueError: # count doesn't match
return None
elif targets is None:
targets = self.target
return PytestCase(
apps=[PytestApp(app_paths[i], targets[i], configs[i]) for i in range(count)],
item=item,
multi_dut_without_param=multi_dut_without_param
)
def pytest_collectstart(self) -> None:
# mock the optional packages while collecting locally
if not os.getenv('CI_JOB_ID') or os.getenv('PYTEST_IGNORE_COLLECT_IMPORT_ERROR') == '1':
# optional packages required by test scripts
for p in [
'scapy',
'scapy.all',
'websocket', # websocket-client
'netifaces',
'RangeHTTPServer', # rangehttpserver
'dbus', # dbus-python
'dbus.mainloop',
'dbus.mainloop.glib',
'google.protobuf', # protobuf
'google.protobuf.internal',
'bleak',
'paho', # paho-mqtt
'paho.mqtt',
'paho.mqtt.client',
'paramiko',
'netmiko',
'pyecharts',
'pyecharts.options',
'pyecharts.charts',
'can', # python-can
]:
try:
importlib.import_module(p)
except ImportError:
logging.warning(f'Optional package {p} is not installed, mocking it while collecting...')
sys.modules[p] = MagicMock()
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(self, items: t.List[Function]) -> None:
"""
Background info:
We're using `pytest.mark.[TARGET]` as a syntactic sugar to indicate that they are actually supported by all
the listed targets. For example,
>>> @pytest.mark.esp32
>>> @pytest.mark.esp32s2
should be treated as
>>> @pytest.mark.parametrize('target', [
>>> 'esp32',
>>> 'esp32s2',
>>> ], indirect=True)
All single-dut test cases, and some of the multi-dut test cases with the same targets, are using this
way to indicate the supported targets.
To avoid ambiguity,
- when we're collecting single-dut test cases with esp32, we call
`pytest --collect-only --target esp32`
- when we're collecting multi-dut test cases, we list all the targets, even when they're the same
`pytest --collect-only --target esp32,esp32` for two esp32 connected
`pytest --collect-only --target esp32,esp32s2` for esp32 and esp32s2 connected
therefore, we have two different logic for searching test cases, explained in 2.1 and 2.2
"""
# 1. Filter according to nighty_run related markers
if os.getenv('INCLUDE_NIGHTLY_RUN') == '1':
# nightly_run and non-nightly_run cases are both included
pass
elif os.getenv('NIGHTLY_RUN') == '1':
# only nightly_run cases are included
items[:] = [_item for _item in items if _item.get_closest_marker('nightly_run') is not None]
else:
# only non-nightly_run cases are included
items[:] = [_item for _item in items if _item.get_closest_marker('nightly_run') is None]
# 2. Add markers according to special markers
item_to_case_dict: t.Dict[Function, PytestCase] = {}
for item in items: for item in items:
case = self.item_to_pytest_case(item) case = IdfPytestPlugin.get_case_by_item(item)
if case is None: if not case:
deselected_items.append(item)
continue continue
item.stash[ITEM_PYTEST_CASE_KEY] = item_to_case_dict[item] = case if case.target_selector in skipped_targets(item):
if 'supported_targets' in item.keywords: deselected_items.append(item)
for _target in SUPPORTED_TARGETS: item.stash[IDF_CI_PYTEST_DEBUG_INFO_KEY] = 'skipped by temp_skip markers'
item.add_marker(_target) continue
if 'preview_targets' in item.keywords:
for _target in PREVIEW_TARGETS:
item.add_marker(_target)
if 'all_targets' in item.keywords:
for _target in [*SUPPORTED_TARGETS, *PREVIEW_TARGETS]:
item.add_marker(_target)
# add single-dut "target" as param if not case.env_markers and 'host_test' not in case.all_markers:
_item_target_param = self.get_param(item, 'target', None) if case.name in self.exclude_no_env_markers_test_cases:
if case.is_single_dut_test_case and _item_target_param and _item_target_param not in case.all_markers: deselected_items.append(item)
item.add_marker(_item_target_param) continue
items[:] = [_item for _item in items if _item in item_to_case_dict] raise ValueError(
f'Test case {case.name} does not have any env markers. '
f'Please add env markers to the test case or add it to the '
f'`no_env_markers_test_cases` list in {KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH}'
)
# 3.1. CollectMode.SINGLE_SPECIFIC, like `pytest --target esp32`
if self.collect_mode == CollectMode.SINGLE_SPECIFIC:
filtered_items = []
for item in items:
case = item_to_case_dict[item]
# single-dut one
if case.is_single_dut_test_case and self.target[0] in case.target_markers:
filtered_items.append(item)
# multi-dut ones and in single_target_duplicate_mode
elif self._single_target_duplicate_mode and not case.is_single_dut_test_case:
# ignore those test cases with `target` defined in parametrize, since these will be covered in 3.3
if self.get_param(item, 'target', None) is None and self.target[0] in case.target_markers:
filtered_items.append(item) filtered_items.append(item)
items[:] = filtered_items items[:] = filtered_items
# 3.2. CollectMode.MULTI_SPECIFIC, like `pytest --target esp32,esp32`
elif self.collect_mode == CollectMode.MULTI_SPECIFIC:
items[:] = [_item for _item in items if item_to_case_dict[_item].targets == self.target]
# 3.3. CollectMode.MULTI_ALL_WITH_PARAM, intended to be used by `get_pytest_cases` # Deselect
else: config.hook.pytest_deselected(items=deselected_items)
filtered_items = []
for item in items:
case = item_to_case_dict[item]
target = self.get_param(item, 'target', None)
if (
not case.is_single_dut_test_case and
target is not None and
target not in case.skip_targets
):
filtered_items.append(item)
items[:] = filtered_items
# 4. filter according to the sdkconfig, if there's param 'config' defined
if self.config_name:
_items = []
for item in items:
case = item_to_case_dict[item]
if self.config_name not in set(app.config or DEFAULT_SDKCONFIG for app in case.apps):
self.additional_info[case.name]['skip_reason'] = f'Only run with sdkconfig {self.config_name}'
else:
_items.append(item)
items[:] = _items
# 5. filter by `self.apps_list`, skip the test case if not listed
# should only be used in CI
_items = []
for item in items:
case = item_to_case_dict[item]
if msg := case.all_built_in_app_lists(self.apps_list):
self.additional_info[case.name]['skip_reason'] = msg
else:
_items.append(item)
# OKAY!!! All left ones will be executed, sort it and add more markers # OKAY!!! All left ones will be executed, sort it and add more markers
items[:] = sorted( items[:] = sorted(items, key=lambda x: (os.path.dirname(x.path), self.get_param(x, 'config', 'default')))
_items, key=lambda x: (os.path.dirname(x.path), self.get_param(x, 'config', DEFAULT_SDKCONFIG))
)
for item in items: for item in items:
case = item_to_case_dict[item] case = IdfPytestPlugin.get_case_by_item(item)
# set default timeout 10 minutes for each case # set default timeout 10 minutes for each case
if 'timeout' not in item.keywords: if 'timeout' not in item.keywords:
item.add_marker(pytest.mark.timeout(10 * 60)) item.add_marker(pytest.mark.timeout(10 * 60))
# add 'xtal_40mhz' tag as a default tag for esp32c2 target # add 'xtal_40mhz' tag as a default tag for esp32c2 target
# only add this marker for esp32c2 cases if 'esp32c2' in case.targets and 'xtal_26mhz' not in case.all_markers:
if 'esp32c2' in self.target and 'esp32c2' in case.targets and 'xtal_26mhz' not in case.all_markers:
item.add_marker('xtal_40mhz') item.add_marker('xtal_40mhz')
def pytest_report_collectionfinish(self, items: t.List[Function]) -> None:
self.cases = [item.stash[ITEM_PYTEST_CASE_KEY] for item in items]
def pytest_custom_test_case_name(self, item: Function) -> str: def pytest_custom_test_case_name(self, item: Function) -> str:
return item.funcargs.get('test_case_name', item.nodeid) # type: ignore return item.funcargs.get('test_case_name', item.nodeid) # type: ignore
@@ -388,8 +222,3 @@ class IdfPytestEmbedded:
case.attrib['ci_job_url'] = ci_job_url case.attrib['ci_job_url'] = ci_job_url
xml.write(junit) xml.write(junit)
def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None:
if exitstatus != 0:
if exitstatus == ExitCode.NO_TESTS_COLLECTED:
session.exitstatus = 0

View File

@@ -1,3 +0,0 @@
[pytest]
addopts = -p no:idf-ci
python_files = test_*.py

View File

@@ -1,238 +0,0 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import fnmatch
import io
import logging
import os.path
import typing as t
from contextlib import redirect_stdout
from pathlib import Path
import pytest
from _pytest.config import ExitCode
from idf_build_apps import App
from idf_build_apps import find_apps
from idf_build_apps.constants import SUPPORTED_TARGETS
from idf_build_apps.constants import BuildStatus
from idf_ci_local.app import IdfCMakeApp
from idf_ci_utils import IDF_PATH
from idf_ci_utils import get_all_manifest_files
from idf_ci_utils import idf_relpath
from idf_ci_utils import to_list
from idf_py_actions.constants import PREVIEW_TARGETS as TOOLS_PREVIEW_TARGETS
from idf_py_actions.constants import SUPPORTED_TARGETS as TOOLS_SUPPORTED_TARGETS
from .constants import DEFAULT_BUILD_LOG_FILENAME
from .constants import DEFAULT_CONFIG_RULES_STR
from .constants import DEFAULT_SIZE_JSON_FILENAME
from .constants import CollectMode
from .constants import PytestCase
from .plugin import IdfPytestEmbedded
def get_pytest_files(paths: t.List[str]) -> t.List[str]:
# this is a workaround to solve pytest collector super slow issue
# benchmark with
# - time pytest -m esp32 --collect-only
# user=15.57s system=1.35s cpu=95% total=17.741
# - time { find -name 'pytest_*.py'; } | xargs pytest -m esp32 --collect-only
# user=0.11s system=0.63s cpu=36% total=2.044
# user=1.76s system=0.22s cpu=43% total=4.539
# use glob.glob would also save a bunch of time
pytest_scripts: t.Set[str] = set()
for p in paths:
path = Path(p)
pytest_scripts.update(str(_p) for _p in path.glob('**/pytest_*.py') if 'managed_components' not in _p.parts)
return list(pytest_scripts)
def get_pytest_cases(
paths: t.Union[str, t.List[str]],
target: str = CollectMode.ALL,
*,
config_name: t.Optional[str] = None,
marker_expr: t.Optional[str] = None,
filter_expr: t.Optional[str] = None,
apps: t.Optional[t.List[App]] = None,
) -> t.List[PytestCase]:
"""
Return the list of test cases
For single-dut test cases, `target` could be
- [TARGET], e.g. `esp32`, to get the test cases for the given target
- or `single_all`, to get all single-dut test cases
For multi-dut test cases, `target` could be
- [TARGET,[TARGET...]], e.g. `esp32,esp32s2`, to get the test cases for the given targets
- or `multi_all`, to get all multi-dut test cases
:param paths: paths to search for pytest scripts
:param target: target or keywords to get test cases for, detailed above
:param config_name: sdkconfig name
:param marker_expr: pytest marker expression, `-m`
:param filter_expr: pytest filter expression, `-k`
:param apps: built app list, skip the tests required by apps not in the list
:return: list of test cases
"""
paths = to_list(paths)
cases: t.List[PytestCase] = []
pytest_scripts = get_pytest_files(paths) # type: ignore
if not pytest_scripts:
print(f'WARNING: no pytest scripts found for target {target} under paths {", ".join(paths)}')
return cases
def _get_pytest_cases(_target: str, _single_target_duplicate_mode: bool = False) -> t.List[PytestCase]:
collector = IdfPytestEmbedded(
_target, config_name=config_name, single_target_duplicate_mode=_single_target_duplicate_mode, apps=apps
)
with io.StringIO() as buf:
with redirect_stdout(buf):
cmd = ['--collect-only', *pytest_scripts, '--target', _target, '-q']
if marker_expr:
cmd.extend(['-m', marker_expr])
if filter_expr:
cmd.extend(['-k', filter_expr])
res = pytest.main(cmd, plugins=[collector])
if res.value != ExitCode.OK:
if res.value == ExitCode.NO_TESTS_COLLECTED:
print(f'WARNING: no pytest app found for target {_target} under paths {", ".join(paths)}')
else:
print(buf.getvalue())
raise RuntimeError(f'pytest collection failed at {", ".join(paths)} with command "{" ".join(cmd)}"')
return collector.cases # type: ignore
if target == CollectMode.ALL:
targets = TOOLS_SUPPORTED_TARGETS + TOOLS_PREVIEW_TARGETS + [CollectMode.MULTI_ALL_WITH_PARAM]
else:
targets = [target]
for _target in targets:
if target == CollectMode.ALL:
cases.extend(_get_pytest_cases(_target, _single_target_duplicate_mode=True))
else:
cases.extend(_get_pytest_cases(_target))
return sorted(cases, key=lambda x: (x.path, x.name, str(x.targets)))
def get_all_apps(
paths: t.List[str],
target: str = CollectMode.ALL,
*,
marker_expr: t.Optional[str] = None,
filter_expr: t.Optional[str] = None,
config_rules_str: t.Optional[t.List[str]] = None,
preserve_all: bool = False,
extra_default_build_targets: t.Optional[t.List[str]] = None,
compare_manifest_sha_filepath: t.Optional[str] = None,
modified_components: t.Optional[t.List[str]] = None,
modified_files: t.Optional[t.List[str]] = None,
ignore_app_dependencies_components: t.Optional[t.List[str]] = None,
ignore_app_dependencies_filepatterns: t.Optional[t.List[str]] = None,
) -> t.Tuple[t.Set[App], t.Set[App]]:
"""
Return the tuple of test-required apps and non-test-related apps
:param paths: paths to search for pytest scripts
:param target: target or keywords to get test cases for, explained in `get_pytest_cases`
:param marker_expr: pytest marker expression, `-m`
:param filter_expr: pytest filter expression, `-k`
:param config_rules_str: config rules string
:param preserve_all: preserve all apps
:param extra_default_build_targets: extra default build targets
:param compare_manifest_sha_filepath: check manifest sha filepath
:param modified_components: modified components
:param modified_files: modified files
:param ignore_app_dependencies_components: ignore app dependencies components
:param ignore_app_dependencies_filepatterns: ignore app dependencies filepatterns
:return: tuple of test-required apps and non-test-related apps
"""
# target could be comma separated list
all_apps: t.List[App] = []
for _t in set(target.split(',')):
all_apps.extend(
find_apps(
paths,
_t,
build_system=IdfCMakeApp,
recursive=True,
build_dir='build_@t_@w',
config_rules_str=config_rules_str or DEFAULT_CONFIG_RULES_STR,
build_log_filename=DEFAULT_BUILD_LOG_FILENAME,
size_json_filename=DEFAULT_SIZE_JSON_FILENAME,
check_warnings=True,
manifest_rootpath=IDF_PATH,
compare_manifest_sha_filepath=compare_manifest_sha_filepath,
manifest_files=get_all_manifest_files(),
default_build_targets=SUPPORTED_TARGETS + (extra_default_build_targets or []),
modified_components=modified_components,
modified_files=modified_files,
ignore_app_dependencies_components=ignore_app_dependencies_components,
ignore_app_dependencies_filepatterns=ignore_app_dependencies_filepatterns,
include_skipped_apps=True,
)
)
pytest_cases = get_pytest_cases(
paths,
target,
marker_expr=marker_expr,
filter_expr=filter_expr,
)
modified_pytest_cases = []
if modified_files:
modified_pytest_scripts = [
os.path.dirname(f) for f in modified_files if fnmatch.fnmatch(os.path.basename(f), 'pytest_*.py')
]
if modified_pytest_scripts:
modified_pytest_cases = get_pytest_cases(
modified_pytest_scripts,
target,
marker_expr=marker_expr,
filter_expr=filter_expr,
)
# app_path, target, config
pytest_app_path_tuple_dict: t.Dict[t.Tuple[str, str, str], PytestCase] = {}
for case in pytest_cases:
for app in case.apps:
pytest_app_path_tuple_dict[(app.path, app.target, app.config)] = case
modified_pytest_app_path_tuple_dict: t.Dict[t.Tuple[str, str, str], PytestCase] = {}
for case in modified_pytest_cases:
for app in case.apps:
modified_pytest_app_path_tuple_dict[(app.path, app.target, app.config)] = case
test_related_apps: t.Set[App] = set()
non_test_related_apps: t.Set[App] = set()
for app in all_apps:
# PytestCase.app.path is idf_relpath
app_path = idf_relpath(app.app_dir)
# override build_status if test script got modified
if case := modified_pytest_app_path_tuple_dict.get((app_path, app.target, app.config_name)):
test_related_apps.add(app)
app.build_status = BuildStatus.SHOULD_BE_BUILT
app.preserve = True
logging.debug('Found app: %s - required by modified test case %s', app, case.path)
elif app.build_status != BuildStatus.SKIPPED:
if case := pytest_app_path_tuple_dict.get((app_path, app.target, app.config_name)):
test_related_apps.add(app)
# build or not should be decided by the build stage
app.preserve = True
logging.debug('Found test-related app: %s - required by %s', app, case.path)
else:
non_test_related_apps.add(app)
app.preserve = preserve_all
logging.debug('Found non-test-related app: %s', app)
print(f'Found {len(test_related_apps)} test-related apps')
print(f'Found {len(non_test_related_apps)} non-test-related apps')
return test_related_apps, non_test_related_apps

View File

@@ -1,71 +0,0 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import shutil
import sys
import tempfile
import typing as t
from pathlib import Path
import pytest
tools_ci_dir = os.path.join(os.path.dirname(__file__), '..', '..')
if tools_ci_dir not in sys.path:
sys.path.append(tools_ci_dir)
tools_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
if tools_dir not in sys.path:
sys.path.append(tools_dir)
from idf_ci_utils import IDF_PATH # noqa: E402
from idf_pytest.constants import DEFAULT_LOGDIR # noqa: E402
def create_project(name: str, folder: Path) -> Path:
p = folder / name
p.mkdir(parents=True, exist_ok=True)
(p / 'main').mkdir(parents=True, exist_ok=True)
with open(p / 'CMakeLists.txt', 'w') as fw:
fw.write(
"""cmake_minimum_required(VERSION 3.16)
include($ENV{{IDF_PATH}}/tools/cmake/project.cmake)
project({})
""".format(
name
)
)
with open(p / 'main' / 'CMakeLists.txt', 'w') as fw:
fw.write(
"""idf_component_register(SRCS "{}.c"
INCLUDE_DIRS ".")
""".format(
name
)
)
with open(p / 'main' / f'{name}.c', 'w') as fw:
fw.write(
"""#include <stdio.h>
void app_main(void) {}
"""
)
return p
@pytest.fixture
def work_dirpath() -> t.Generator[Path, None, None]:
os.makedirs(os.path.join(IDF_PATH, DEFAULT_LOGDIR), exist_ok=True)
p = Path(tempfile.mkdtemp(prefix=os.path.join(IDF_PATH, DEFAULT_LOGDIR) + os.sep))
try:
yield p
except Exception:
logging.critical('Test is failing, Please check the log in %s', p)
raise
else:
shutil.rmtree(p)

View File

@@ -1,131 +0,0 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
from pathlib import Path
from idf_pytest.script import get_all_apps
from idf_pytest.script import SUPPORTED_TARGETS
from conftest import create_project
def test_get_all_apps_non(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
create_project('bar', work_dirpath)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)])
assert test_related_apps == set()
assert len(non_test_related_apps) == 2 * len(SUPPORTED_TARGETS)
def test_get_all_apps_single_dut_test_script(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
with open(work_dirpath / 'foo' / 'pytest_get_all_apps_single_dut_test_script.py', 'w') as fw:
fw.write(
"""import pytest
@pytest.mark.esp32
@pytest.mark.esp32s2
def test_foo(dut):
pass
"""
)
create_project('bar', work_dirpath)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='all')
assert len(test_related_apps) == 2
assert len(non_test_related_apps) == 2 * len(SUPPORTED_TARGETS) - 2
def test_get_all_apps_multi_dut_with_markers_test_script(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
(work_dirpath / 'foo' / 'pytest_get_all_apps_multi_dut_with_markers_test_script.py').write_text(
"""import pytest
@pytest.mark.esp32
@pytest.mark.parametrize('count', [2, 3], indirect=True)
def test_foo(dut):
pass
""",
encoding='utf-8',
)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='all')
assert len(test_related_apps) == 1
assert len(non_test_related_apps) == len(SUPPORTED_TARGETS) - 1
def test_get_all_apps_multi_dut_test_script(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
with open(work_dirpath / 'foo' / 'pytest_get_all_apps_multi_dut_test_script.py', 'w') as fw:
fw.write(
"""import pytest
@pytest.mark.parametrize(
'count, target', [
(2, 'esp32s2|esp32s3'),
(3, 'esp32|esp32s3|esp32'),
], indirect=True
)
def test_foo(dut):
pass
"""
)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='esp32s2,esp32s3')
assert len(test_related_apps) == 2
assert len(non_test_related_apps) == 0
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='esp32,esp32s3,esp32')
assert len(test_related_apps) == 2
assert len(non_test_related_apps) == 0
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='all')
assert len(test_related_apps) == 3
assert len(non_test_related_apps) == len(SUPPORTED_TARGETS) - 3
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='foo,bar')
assert len(test_related_apps) == 0
assert len(non_test_related_apps) == 0
def test_get_all_apps_modified_pytest_script(work_dirpath: Path) -> None:
create_project('foo', work_dirpath)
create_project('bar', work_dirpath)
(work_dirpath / 'pytest_get_all_apps_modified_pytest_script.py').write_text(
"""import pytest
import os
@pytest.mark.parametrize('count, target', [(2, 'esp32')], indirect=True)
@pytest.mark.parametrize('app_path', [
'{}|{}'.format(os.path.join(os.path.dirname(__file__), 'foo'), os.path.join(os.path.dirname(__file__), 'bar')),
], indirect=True
)
def test_multi_foo_bar(dut):
pass
""",
encoding='utf-8',
)
test_related_apps, non_test_related_apps = get_all_apps([str(work_dirpath)], target='all')
assert len(test_related_apps) == 2 # foo-esp32, bar-esp32
assert len(non_test_related_apps) == 2 * len(SUPPORTED_TARGETS) - 2
test_related_apps, non_test_related_apps = get_all_apps(
[str(work_dirpath)], target='all', modified_files=[], modified_components=[]
)
assert len(test_related_apps) == 0
assert len(non_test_related_apps) == 0
test_related_apps, non_test_related_apps = get_all_apps(
[str(work_dirpath)],
target='all',
modified_files=[str(work_dirpath / 'pytest_get_all_apps_modified_pytest_script.py')],
modified_components=[],
)
assert len(test_related_apps) == 2
assert len(non_test_related_apps) == 0

View File

@@ -1,179 +0,0 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import textwrap
from pathlib import Path
from idf_pytest.constants import CollectMode
from idf_pytest.script import get_pytest_cases
TEMPLATE_SCRIPT = '''
import pytest
@pytest.mark.esp32
@pytest.mark.esp32s2
def test_foo_single(dut):
pass
@pytest.mark.parametrize('target', [
'esp32',
'esp32c3',
])
def test_foo_single_with_param(dut):
pass
@pytest.mark.parametrize(
'count, target', [
(2, 'esp32|esp32s2'),
(3, 'esp32s2|esp32s2|esp32s3'),
], indirect=True
)
def test_foo_multi(dut):
pass
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.parametrize(
'count', [2], indirect=True
)
def test_foo_multi_with_marker(dut):
pass
'''
def test_get_pytest_cases_single_specific(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_get_pytest_cases_single_specific.py'
script.write_text(TEMPLATE_SCRIPT)
cases = get_pytest_cases([str(work_dirpath)], 'esp32')
assert len(cases) == 2
assert cases[0].targets == ['esp32']
assert cases[0].name == 'test_foo_single'
assert cases[1].targets == ['esp32']
assert cases[1].name == 'test_foo_single_with_param'
def test_get_pytest_cases_multi_specific(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_get_pytest_cases_multi_specific.py'
script.write_text(TEMPLATE_SCRIPT)
cases = get_pytest_cases([str(work_dirpath)], 'esp32s2,esp32s2, esp32s3')
assert len(cases) == 1
assert cases[0].targets == ['esp32s2', 'esp32s2', 'esp32s3']
cases = get_pytest_cases([str(work_dirpath)], 'esp32s3,esp32s2,esp32s2') # order matters
assert len(cases) == 0
def test_get_pytest_cases_multi_all(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_get_pytest_cases_multi_all.py'
script.write_text(TEMPLATE_SCRIPT)
cases = get_pytest_cases([str(work_dirpath)], CollectMode.MULTI_ALL_WITH_PARAM)
assert len(cases) == 2
assert cases[0].targets == ['esp32', 'esp32s2']
assert cases[1].targets == ['esp32s2', 'esp32s2', 'esp32s3']
def test_get_pytest_cases_all(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_get_pytest_cases_all.py'
script.write_text(TEMPLATE_SCRIPT)
cases = get_pytest_cases([str(work_dirpath)], CollectMode.ALL)
assert len(cases) == 8
assert cases[0].targets == ['esp32', 'esp32s2']
assert cases[0].name == 'test_foo_multi'
assert cases[1].targets == ['esp32s2', 'esp32s2', 'esp32s3']
assert cases[1].name == 'test_foo_multi'
assert cases[2].targets == ['esp32', 'esp32']
assert cases[2].name == 'test_foo_multi_with_marker'
assert cases[3].targets == ['esp32s2', 'esp32s2']
assert cases[3].name == 'test_foo_multi_with_marker'
assert cases[4].targets == ['esp32']
assert cases[4].name == 'test_foo_single'
assert cases[5].targets == ['esp32s2']
assert cases[5].name == 'test_foo_single'
assert cases[6].targets == ['esp32']
assert cases[6].name == 'test_foo_single_with_param'
assert cases[7].targets == ['esp32c3']
assert cases[7].name == 'test_foo_single_with_param'
def test_multi_with_marker_and_app_path(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_multi_with_marker_and_app_path.py'
script.write_text(
textwrap.dedent(
'''
import pytest
@pytest.mark.esp32c2
@pytest.mark.parametrize(
'count,app_path', [
(2, 'foo|bar'),
(3, 'foo|bar|baz'),
], indirect=True
)
def test_foo_multi_with_marker_and_app_path(dut):
pass
'''
)
)
cases = get_pytest_cases([str(work_dirpath)], 'esp32c3,esp32c3')
assert len(cases) == 0
cases = get_pytest_cases([str(work_dirpath)], 'esp32c2,esp32c2')
assert len(cases) == 1
assert cases[0].targets == ['esp32c2', 'esp32c2']
cases = get_pytest_cases([str(work_dirpath)], 'esp32c2,esp32c2,esp32c2')
assert len(cases) == 1
assert cases[0].targets == ['esp32c2', 'esp32c2', 'esp32c2']
def test_filter_with_sdkconfig_name(work_dirpath: Path) -> None:
script = work_dirpath / 'pytest_filter_with_sdkconfig_name.py'
script.write_text(
textwrap.dedent(
'''
import pytest
@pytest.mark.esp32
@pytest.mark.parametrize(
'config', [
'foo',
'bar',
], indirect=True
)
def test_filter_with_sdkconfig_name_single_dut(dut):
pass
@pytest.mark.esp32
@pytest.mark.parametrize(
'count', [2], indirect=True
)
@pytest.mark.parametrize(
'config', [
'foo|bar',
'bar|baz',
], indirect=True
)
def test_filter_with_sdkconfig_name_multi_dut(dut):
pass
'''
)
)
cases = get_pytest_cases([str(work_dirpath)], 'esp32', config_name='foo')
assert len(cases) == 1
cases = get_pytest_cases([str(work_dirpath)], 'esp32,esp32', config_name='foo')
assert len(cases) == 1
cases = get_pytest_cases([str(work_dirpath)], 'esp32,esp32', config_name='bar')
assert len(cases) == 2

View File

@@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD # SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import logging import logging
import os import os
@@ -62,10 +62,6 @@ def merge_junit_files(junit_files: t.List[str], target_path: str) -> None:
fw.write(ET.tostring(merged_testsuite)) fw.write(ET.tostring(merged_testsuite))
def comma_sep_str_to_list(s: str) -> t.List[str]:
return [s.strip() for s in s.split(',') if s.strip()]
def normalize_testcase_file_path(file: str, app_path: t.Union[str, tuple]) -> str: def normalize_testcase_file_path(file: str, app_path: t.Union[str, tuple]) -> str:
""" """
Normalize file paths to a consistent format, resolving relative paths based on the `app_path`. Normalize file paths to a consistent format, resolving relative paths based on the `app_path`.
@@ -82,9 +78,7 @@ def normalize_testcase_file_path(file: str, app_path: t.Union[str, tuple]) -> st
def normalize_path(file_path: str, app_path: str) -> str: def normalize_path(file_path: str, app_path: str) -> str:
"""Helper function to normalize a single path.""" """Helper function to normalize a single path."""
if not os.path.isabs(file_path): if not os.path.isabs(file_path):
resolved_path = os.path.normpath( resolved_path = os.path.normpath(os.path.join(app_path, file_path.removeprefix('./').removeprefix('../')))
os.path.join(app_path, file_path.removeprefix('./').removeprefix('../'))
)
else: else:
resolved_path = os.path.normpath(file_path) resolved_path = os.path.normpath(file_path)

View File

@@ -1,512 +0,0 @@
# SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import ast
import itertools
import os
import typing as t
import warnings
from collections import defaultdict
import pytest
from idf_pytest.constants import PREVIEW_TARGETS
from idf_pytest.constants import SUPPORTED_TARGETS
from idf_pytest.constants import TARGET_MARKERS
from pytest import Config
from pytest import Function
from pytest import Mark
def is_target_in_marker(mark: Mark) -> bool:
return mark.name in TARGET_MARKERS or mark.name in ('supported_targets', 'preview_targets', 'linux')
def remove_keys(data: t.Dict[str, t.Any], keys_to_remove: t.List[str]) -> t.Dict[str, t.Any]:
"""
Remove specific keys from a dictionary.
"""
return {key: value for key, value in data.items() if key not in keys_to_remove}
def get_values_by_keys(data: t.Dict[str, t.Any], keys: t.List[str]) -> t.Tuple[t.Any, ...]:
"""
Retrieve values from a dictionary for specified keys.
"""
return tuple([data[key] for key in keys if key in data])
def group_by_target(vals: t.List[t.Dict[str, t.Any]]) -> t.List[t.Dict[str, t.Any]]:
"""
Groups rows by non-target keys and modifies targets to 'supported_targets'
if all supported targets are present in a group.
Parameters:
vals: List of dictionaries to process.
Returns:
Processed list of dictionaries with supported targets.
"""
if not vals or 'target' not in vals[0]:
return vals
def _process_group(
_vals: t.List[t.Dict[str, t.Any]], group: t.List[str], group_name: str
) -> t.List[t.Dict[str, t.Any]]:
# Identify keys excluding 'target'
non_target_keys = [key for key in sorted(_vals[0].keys()) if key != 'target']
# Group rows by values of keys excluding 'target'
grouped_rows = defaultdict(list)
for index, row in enumerate(_vals):
key = get_values_by_keys(row, non_target_keys)
grouped_rows[key].append((index, row['target']))
# Identify groups that contain all supported targets
to_skip_lines: t.Set[int] = set()
to_update_lines: t.Set[int] = set()
for _, rows in grouped_rows.items():
lines = []
remaining_targets = set(group)
for index, target in rows:
if target in remaining_targets:
lines.append(index)
remaining_targets.remove(target)
if not remaining_targets:
to_skip_lines.update(lines[1:]) # Skip all but the first matching line
to_update_lines.add(lines[0]) # Update the first matching line
break
# Construct new list of rows with modifications
new_values = []
for ind, row in enumerate(_vals):
if ind in to_update_lines:
row['target'] = group_name
if ind not in to_skip_lines:
new_values.append(row)
return new_values
if SUPPORTED_TARGETS:
vals = _process_group(vals, SUPPORTED_TARGETS, 'supported_targets')
if PREVIEW_TARGETS:
vals = _process_group(vals, PREVIEW_TARGETS, 'preview_targets')
return vals
class CurrentItemContext:
test_name: str
class PathRestore:
# If restored is True, then add the import os when the file is being formatted.
restored: bool = False
def __init__(self, path: str) -> None:
PathRestore.restored = True
self.path = path
def __repr__(self) -> str:
return f"f'{self.path}'"
def restore_path(vals: t.List[t.Dict[str, t.Any]], file_path: str) -> t.List[t.Dict[str, t.Any]]:
if 'app_path' not in vals[0].keys():
return vals
file_path = os.path.dirname(os.path.abspath(file_path))
for row in vals:
paths = row['app_path'].split('|')
row['app_path'] = '|'.join([
f'{{os.path.join(os.path.dirname(__file__), "{os.path.relpath(p, file_path)}")}}' for p in paths
])
row['app_path'] = PathRestore(row['app_path'])
return vals
def make_hashable(item: t.Any) -> t.Union[t.Tuple[t.Any, ...], t.Any]:
"""Recursively convert object to a hashable form, storing original values."""
if isinstance(item, (set, list, tuple)):
converted = tuple(make_hashable(i) for i in item)
elif isinstance(item, dict):
converted = tuple(sorted((k, make_hashable(v)) for k, v in item.items()))
else:
converted = item # Primitives are already hashable
return converted
def restore_params(data: t.List[t.Dict[str, t.Any]]) -> t.List[t.Tuple[t.List[str], t.List[t.Any]]]:
"""
Restore parameters from pytest --collect-only data structure.
"""
# Ensure all dictionaries have the same number of keys
if len({len(d) for d in data}) != 1:
raise ValueError(
f'Inconsistent parameter {CurrentItemContext.test_name} structure: all rows must have the same number of keys.'
)
all_markers_is_empty = []
for d in data:
if 'markers' in d:
all_markers_is_empty.append(not (d['markers']))
d['markers'] = list(set(d['markers']))
if all(all_markers_is_empty):
for d in data:
del d['markers']
hashable_to_original: t.Dict[t.Tuple[str, t.Any], t.Any] = {}
def save_to_hash(key: str, hashable_value: t.Any, original_value: t.Any) -> t.Any:
"""Stores the mapping of hashable values to their original."""
if isinstance(original_value, list):
original_value = tuple(original_value)
hashable_to_original[(key, hashable_value)] = original_value
return hashable_value
def restore_from_hash(key: str, hashable_value: t.Any) -> t.Any:
"""Restores the original value from its hashable equivalent."""
return hashable_to_original.get((key, hashable_value), hashable_value)
# Convert data to a hashable format
data = [{k: save_to_hash(k, make_hashable(v), v) for k, v in row.items()} for row in data]
unique_data = []
for d in data:
if d not in unique_data:
unique_data.append(d)
data = unique_data
data = group_by_target(data)
params_multiplier: t.List[t.Tuple[t.List[str], t.List[t.Any]]] = []
current_keys: t.List[str] = sorted(data[0].keys(), key=lambda x: (x == 'markers', x))
i = 1
while len(current_keys) > i:
# It should be combinations because we are only concerned with the elements, not their order.
for _ in itertools.combinations(current_keys, i):
perm: t.List[str] = list(_)
if perm == ['markers'] or [k for k in current_keys if k not in perm] == ['markers']:
# The mark_runner must be used together with another parameter.
continue
grouped_buckets = defaultdict(list)
for row in data:
grouped_buckets[get_values_by_keys(row, perm)].append(remove_keys(row, perm))
grouped_values = list(grouped_buckets.values())
if all(v == grouped_values[0] for v in grouped_values):
current_keys = [k for k in current_keys if k not in perm]
params_multiplier.append((perm, list(grouped_buckets.keys())))
data = grouped_values[0]
break
else:
i += 1
if data:
remaining_values = [get_values_by_keys(row, current_keys) for row in data]
params_multiplier.append((current_keys, remaining_values))
for key, values in params_multiplier:
values[:] = [tuple(restore_from_hash(key[i], v) for i, v in enumerate(row)) for row in values]
output: t.List[t.Any] = []
if len(key) == 1:
for row in values:
output.extend(row)
values[:] = output
for p in params_multiplier:
if 'markers' in p[0]:
for i, el in enumerate(p[1]):
if el[-1] == ():
p[1][i] = el[:-1]
return params_multiplier
def format_mark(name: str, args: t.Tuple[t.Any, ...], kwargs: t.Dict[str, t.Any]) -> str:
"""Format pytest mark with given arguments and keyword arguments."""
args_str = ', '.join(repr(arg) if isinstance(arg, str) else str(arg) for arg in args)
kwargs_str = ', '.join(f'{key}={repr(value) if isinstance(value, str) else value}' for key, value in kwargs.items())
combined = ', '.join(filter(None, [args_str, kwargs_str]))
return f'@pytest.mark.{name}({combined})\n' if combined else f'@pytest.mark.{name}\n'
def format_parametrize(keys: t.Union[str, t.List[str]], values: t.List[t.Any], indirect: t.Sequence[str]) -> str:
"""Format pytest parametrize for given keys and values."""
# Ensure keys is always a list
if isinstance(keys, str):
keys = [keys]
# Markers will always be at the end, so just remove markers from the keys if it is present
# keys = [k for k in keys if k not in ('__markers',)]
key_str = repr(keys[0]) if len(keys) == 1 else repr(','.join(keys))
# If there any value which need to be represented in some spec way, best way is wrap it with class like PathRestore
formatted_values = [' ' + repr(value) for value in values]
values_str = ',\n'.join(formatted_values)
if indirect:
return f'@idf_parametrize({key_str}, [\n{values_str}\n], indirect={indirect})\n'
return f'@idf_parametrize({key_str}, [\n{values_str}\n])\n'
def key_for_item(item: Function) -> t.Tuple[str, str]:
return item.originalname, str(item.fspath)
def collect_markers(item: Function) -> t.Tuple[t.List[Mark], t.List[Mark]]:
"""Separate local and global markers for a pytest item."""
local_markers, global_markers = [], []
for mark in item.iter_markers():
if mark.name == 'parametrize':
continue
if 'callspec' in dir(item) and mark in item.callspec.marks:
local_markers.append(mark)
else:
global_markers.append(mark)
return local_markers, global_markers
class MarkerRepr(str):
def __new__(cls, mark_name: str, kwargs_str: str, args_str: str, all_args: str) -> 'MarkerRepr':
if not all_args:
instance = super().__new__(cls, f'pytest.mark.{mark_name}')
else:
instance = super().__new__(cls, f'pytest.mark.{mark_name}({all_args})')
return instance # type: ignore
def __init__(self, mark_name: str, kwargs_str: str, args_str: str, all_args: str) -> None:
super().__init__()
self.kwargs_str = kwargs_str
self.args_str = args_str
self.all_args = all_args
self.mark_name = mark_name
def __hash__(self) -> int:
return hash(repr(self))
def __repr__(self) -> str:
if not self.all_args:
return f'pytest.mark.{self.mark_name}'
return f'pytest.mark.{self.mark_name}({self.all_args})'
def mark_to_source(mark: Mark) -> MarkerRepr:
"""Convert a Mark instance to its pytest.mark source code representation."""
kwargs_str = ', '.join(f'{k}={repr(v)}' for k, v in mark.kwargs.items())
args_str = ', '.join(repr(arg) for arg in mark.args)
all_args = ', '.join(filter(None, [args_str, kwargs_str]))
return MarkerRepr(mark.name, kwargs_str, args_str, all_args)
def process_local_markers(local_markers: t.List[Mark]) -> t.Tuple[t.List[str], t.List[MarkerRepr]]:
"""Process local markers to extract targets and runners."""
local_targets, other_markers = [], []
for mark in local_markers:
if is_target_in_marker(mark):
local_targets.append(mark.name)
else:
other_markers.append(mark_to_source(mark))
return sorted(local_targets), sorted(other_markers)
def validate_global_markers(
global_markers: t.List[Mark], local_targets: t.List[str], function_name: str
) -> t.List[Mark]:
"""Validate and normalize global markers."""
normalized_markers = []
for mark in global_markers:
if is_target_in_marker(mark):
if local_targets:
warnings.warn(f'IN {function_name} IGNORING GLOBAL TARGET {mark.name} DUE TO LOCAL TARGETS')
continue
normalized_markers.append(mark)
return normalized_markers
def filter_target(_targets: t.List[str]) -> t.List[str]:
"""
Filters the input targets based on certain conditions.
"""
if len(_targets) == 1:
return _targets
def remove_duplicates(target_list: t.List[str], group: t.List[str], group_name: str) -> t.List[str]:
updated_target = []
for _t in target_list:
if _t in group:
warnings.warn(f'{_t} is already included in {group_name}, no need to specify it separately.')
continue
updated_target.append(_t)
return updated_target
if 'supported_targets' in _targets:
_targets = remove_duplicates(_targets, SUPPORTED_TARGETS, 'supported_targets')
if 'preview_targets' in _targets:
_targets = remove_duplicates(_targets, PREVIEW_TARGETS, 'preview_targets')
return _targets
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(config: Config, items: t.List[Function]) -> None:
"""
Local and Global marks in my diff are as follows:
- Local: Used with a parameter inside a parameterized function, like:
parameterized(param(marks=[....]))
- Global: A regular mark.
"""
test_name_to_params: t.Dict[t.Tuple[str, str], t.List] = defaultdict(list)
test_name_to_global_mark: t.Dict[t.Tuple[str, str], t.List] = defaultdict(list)
test_name_has_local_target_marks = defaultdict(bool)
# Collect all fixtures to determine if a parameter is regular or a fixture
fm = config.pluginmanager.get_plugin('funcmanage')
known_fixtures = set(fm._arg2fixturedefs.keys())
# Collecting data
for item in items:
collected = []
item_key = key_for_item(item)
local_markers, global_markers = collect_markers(item)
# global_markers.sort(key=lambda x: x.name)
global_markers.reverse() # markers of item need to be reverted to save origin order
local_targets, other_markers = process_local_markers(local_markers)
if local_targets:
test_name_has_local_target_marks[item_key] = True
local_targets = filter_target(local_targets)
other_markers_dict = {'markers': other_markers} if other_markers else {'markers': []}
if local_targets:
for target in local_targets:
params = item.callspec.params if 'callspec' in dir(item) else {}
collected.append({**params, **other_markers_dict, 'target': target})
else:
if 'callspec' in dir(item):
collected.append({**other_markers_dict, **item.callspec.params})
global_markers = validate_global_markers(global_markers, local_targets, item.name)
# Just warning if global markers was changed
if item_key in test_name_to_global_mark:
if test_name_to_global_mark[item_key] != global_markers:
warnings.warn(
f'{item.originalname} HAS DIFFERENT GLOBAL MARKERS! {test_name_to_global_mark[item_key]} {global_markers}'
)
test_name_to_global_mark[item_key] = global_markers
test_name_to_params[item_key].extend(collected)
# Post-processing: Modify files based on collected data
for (function_name, file_path), function_params in test_name_to_params.items():
CurrentItemContext.test_name = function_name
to_add_lines = []
global_targets = []
for mark in test_name_to_global_mark[(function_name, file_path)]:
if is_target_in_marker(mark):
global_targets.append(mark.name)
continue
to_add_lines.append(format_mark(mark.name, mark.args, mark.kwargs))
function_params_will_not_update = True
if test_name_has_local_target_marks[(function_name, file_path)]:
function_params_will_not_update = False
# After filter_target, it will lose part of them, but we need them when removing decorators in the file.
original_global_targets = global_targets
global_targets = filter_target(global_targets)
is_target_already_in_params = any({'target' in param for param in function_params})
extra = []
if global_targets:
# If any of param have target then skip add global marker.
if is_target_already_in_params:
warnings.warn(f'Function {function_name} already have target params! Skip adding global target')
else:
extra = [{'target': _t} for _t in global_targets]
def _update_file(file_path: str, to_add_lines: t.List[str], lines: t.List[str]) -> None:
output = []
start_with_comment = True
imports = ['from pytest_embedded_idf.utils import idf_parametrize']
if PathRestore.restored:
imports += ['import os']
for i, line in enumerate(lines):
if line.strip() in imports:
continue
if start_with_comment:
if not line == '\n' and not line.startswith(('from', 'import', '#')):
output.extend([f'{_imp}\n' for _imp in imports])
start_with_comment = False
if i in skip_lines:
continue
if line.startswith(f'def {function_name}('):
output.extend(to_add_lines)
output.append(line)
with open(file_path, 'w+') as file:
file.writelines(output)
if not function_params_will_not_update:
buffered_params: t.List[str] = []
if function_params:
function_params = restore_path(function_params, file_path)
for parameter_names, parameter_values in restore_params(function_params):
buffered_params.append(
format_parametrize(
parameter_names,
parameter_values,
indirect=[p for p in parameter_names if p in known_fixtures],
)
)
to_add_lines.extend(buffered_params)
with open(file_path) as file:
lines = file.readlines()
tree = ast.parse(''.join(lines))
skip_lines: t.Set[int] = set()
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef) and node.name == function_name:
for dec in node.decorator_list:
assert dec.end_lineno is not None
skip_lines.update(list(range(dec.lineno - 1, dec.end_lineno))) # ast count lines from 1 not 0
break
_update_file(file_path, to_add_lines, lines)
if global_targets:
with open(file_path) as file:
lines = file.readlines()
tree = ast.parse(''.join(lines))
skip_lines = set()
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef) and node.name == function_name:
for dec in node.decorator_list:
if isinstance(dec, ast.Attribute):
if dec.attr in original_global_targets:
assert dec.end_lineno is not None
skip_lines.update(list(range(dec.lineno - 1, dec.end_lineno)))
break
if extra:
to_add_lines = [format_parametrize('target', [_t['target'] for _t in extra], ['target'])] if extra else []
else:
to_add_lines = []
_update_file(file_path, to_add_lines, lines)

View File

@@ -111,7 +111,7 @@ This requires the following python libraries to run:
To install the dependency packages needed, please run the following command: To install the dependency packages needed, please run the following command:
```shell ```shell
bash install.sh --enable-pytest bash install.sh --enable-ci
``` ```
**Note:** For troubleshooting errors with BLE transport, please refer this [link](https://bleak.readthedocs.io/en/latest/troubleshooting.html). **Note:** For troubleshooting errors with BLE transport, please refer this [link](https://bleak.readthedocs.io/en/latest/troubleshooting.html).

View File

@@ -72,13 +72,13 @@ def action_print_help(script_extension: str) -> None:
optional arguments: optional arguments:
targets-to-install 'all', a single target (e.g. 'esp32s2'), or a comma-separated list of targets (e.g. 'esp32,esp32c3,esp32h2') targets-to-install 'all', a single target (e.g. 'esp32s2'), or a comma-separated list of targets (e.g. 'esp32,esp32c3,esp32h2')
--enable-* a specific feature to enable (e.g. '--enable-pytest' will enable feature pytest) --enable-* a specific feature to enable (e.g. '--enable-ci' will enable feature ci)
--disable-* a specific feature to disable (e.g. '--disable-pytest' will disable feature pytest) --disable-* a specific feature to disable (e.g. '--disable-pytest' will disable feature pytest)
supported features: {', '.join(features)} supported features: {', '.join(features)}
{help_opts} show this help message and exit {help_opts} show this help message and exit
For more information, please see https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/tools/idf-tools.html#install-scripts For more information, please see https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/tools/idf-tools.html#install-scripts
""") # noqa: E222 """) # noqa: E222, E501
def main() -> None: def main() -> None:

View File

@@ -7,12 +7,6 @@
"optional": false, "optional": false,
"requirement_path": "tools/requirements/requirements.core.txt" "requirement_path": "tools/requirements/requirements.core.txt"
}, },
{
"name": "pytest",
"description": "Packages for CI with pytest",
"optional": true,
"requirement_path": "tools/requirements/requirements.pytest.txt"
},
{ {
"name": "test-specific", "name": "test-specific",
"description": "Packages for specific test scripts", "description": "Packages for specific test scripts",

View File

@@ -6,12 +6,8 @@
# https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/tools/idf-tools.html # https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/tools/idf-tools.html
# ci # ci
idf-ci>=0.2.3,<1
coverage coverage
idf-build-apps
jsonschema jsonschema
junit_xml
python-gitlab
pyyaml
SimpleWebSocketServer
minio
prettytable prettytable

View File

@@ -1,21 +0,0 @@
# Python package requirements for pytest in ESP-IDF.
# This feature can be enabled by running "install.{sh,bat,ps1,fish} --enable-pytest"
#
# This file lists Python packages without version specifiers. Version details
# are stored in a separate constraints file. For more information, visit:
# https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/tools/idf-tools.html
pytest-embedded-serial-esp
pytest-embedded-idf
pytest-embedded-jtag
pytest-embedded-qemu
pytest-rerunfailures
pytest-timeout
pytest-ignore-test-results
# ci
minio
# build
python-gitlab
idf-build-apps

View File

@@ -4,8 +4,6 @@ import pytest
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
TEST_APP_IN_FLASH = [pytest.param('app_in_flash', marks=pytest.mark.esp32p4)]
@pytest.mark.generic @pytest.mark.generic
@idf_parametrize('config', ['app_in_flash'], indirect=['config']) @idf_parametrize('config', ['app_in_flash'], indirect=['config'])

View File

@@ -4,6 +4,10 @@ tools/test_apps/security/secure_boot:
disable: disable:
- if: IDF_ENV_FPGA != 1 and CONFIG_NAME != "qemu" - if: IDF_ENV_FPGA != 1 and CONFIG_NAME != "qemu"
reason: the test can only run on an FPGA as efuses need to be reset during the test. reason: the test can only run on an FPGA as efuses need to be reset during the test.
disable_test:
- if: IDF_TARGET in ["esp32", "esp32c2", "esp32c6", "esp32h2", "esp32s2", "esp32c61", "esp32p4", "esp32s3"]
temporary: true
reason: Can't use Kconfig option IDF_ENV_FPGA in `disable`. IDFCI-2992
tools/test_apps/security/signed_app_no_secure_boot: tools/test_apps/security/signed_app_no_secure_boot:
enable: enable:

View File

@@ -1,5 +1,5 @@
| Supported Targets | ESP32 | ESP32-C3 | ESP32-C5 | ESP32-C6 | ESP32-H2 | ESP32-P4 | ESP32-S2 | ESP32-S3 | | Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-C5 | ESP32-C6 | ESP32-C61 | ESP32-H2 | ESP32-H21 | ESP32-H4 | ESP32-P4 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | | ----------------- | ----- | -------- | -------- | -------- | -------- | --------- | -------- | --------- | -------- | -------- | -------- | -------- |
# Secure Boot # Secure Boot
@@ -102,7 +102,7 @@ Under `Security features`
- Install pytest requirements - Install pytest requirements
``` ```
bash $IDF_PATH/install.sh --enable-pytest bash $IDF_PATH/install.sh --enable-ci
``` ```
### Build and test ### Build and test

View File

@@ -7,6 +7,7 @@ import zlib
import pytest import pytest
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
# To prepare a runner for these tests, # To prepare a runner for these tests,
# 1. Connect an FPGA with C3 image # 1. Connect an FPGA with C3 image
# 2. Use a COM port for programming and export it as ESPPORT # 2. Use a COM port for programming and export it as ESPPORT
@@ -93,16 +94,17 @@ def test_examples_security_secure_boot(dut: Dut) -> None:
# Correctly signed bootloader + correctly signed app should work # Correctly signed bootloader + correctly signed app should work
@pytest.mark.host_test @pytest.mark.host_test
@pytest.mark.qemu @pytest.mark.qemu
@pytest.mark.esp32c3
@pytest.mark.parametrize( @pytest.mark.parametrize(
'qemu_extra_args', 'qemu_extra_args',
[ [
f'-drive file={os.path.join(os.path.dirname(__file__), "test", "esp32c3_efuses.bin")},if=none,format=raw,id=efuse ' f'-drive file={os.path.join(os.path.dirname(__file__), "test", "esp32c3_efuses.bin")},'
f'if=none,format=raw,id=efuse '
'-global driver=nvram.esp32c3.efuse,property=drive,value=efuse ' '-global driver=nvram.esp32c3.efuse,property=drive,value=efuse '
'-global driver=timer.esp32c3.timg,property=wdt_disable,value=true', '-global driver=timer.esp32c3.timg,property=wdt_disable,value=true',
], ],
indirect=True, indirect=True,
) )
@pytest.mark.parametrize('target', ['esp32c3'], indirect=True)
@pytest.mark.parametrize('config', ['qemu'], indirect=True) @pytest.mark.parametrize('config', ['qemu'], indirect=True)
def test_examples_security_secure_boot_qemu(dut: Dut) -> None: def test_examples_security_secure_boot_qemu(dut: Dut) -> None:
try: try:

View File

@@ -3,7 +3,6 @@
import os import os
import pytest import pytest
from artifacts_handler import ArtifactType
from idf_ci_utils import IDF_PATH from idf_ci_utils import IDF_PATH
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
@@ -21,7 +20,8 @@ def test_app_mmu_page_size_32k_and_bootloader_mmu_page_size_64k(dut: Dut, app_do
path_to_mmu_page_size_64k_build = os.path.join(dut.app.app_path, f'build_{dut.target}_{app_config}') path_to_mmu_page_size_64k_build = os.path.join(dut.app.app_path, f'build_{dut.target}_{app_config}')
if app_downloader: if app_downloader:
app_downloader.download_app( app_downloader.download_app(
os.path.relpath(path_to_mmu_page_size_64k_build, IDF_PATH), ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES os.path.relpath(path_to_mmu_page_size_64k_build, IDF_PATH),
'flash',
) )
dut.serial.bootloader_flash(path_to_mmu_page_size_64k_build) dut.serial.bootloader_flash(path_to_mmu_page_size_64k_build)
@@ -43,7 +43,8 @@ def test_app_mmu_page_size_64k_and_bootloader_mmu_page_size_32k(dut: Dut, app_do
path_to_mmu_page_size_32k_build = os.path.join(dut.app.app_path, f'build_{dut.target}_{app_config}') path_to_mmu_page_size_32k_build = os.path.join(dut.app.app_path, f'build_{dut.target}_{app_config}')
if app_downloader: if app_downloader:
app_downloader.download_app( app_downloader.download_app(
os.path.relpath(path_to_mmu_page_size_32k_build, IDF_PATH), ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES os.path.relpath(path_to_mmu_page_size_32k_build, IDF_PATH),
'flash',
) )
dut.serial.bootloader_flash(path_to_mmu_page_size_32k_build) dut.serial.bootloader_flash(path_to_mmu_page_size_32k_build)

View File

@@ -4,7 +4,6 @@ import os
import re import re
import pytest import pytest
from artifacts_handler import ArtifactType
from idf_ci_utils import IDF_PATH from idf_ci_utils import IDF_PATH
from pytest_embedded import Dut from pytest_embedded import Dut
from pytest_embedded_idf.utils import idf_parametrize from pytest_embedded_idf.utils import idf_parametrize
@@ -24,7 +23,8 @@ def test_multicore_app_and_unicore_bootloader(dut: Dut, app_downloader, config)
path_to_unicore_build = os.path.join(dut.app.app_path, f'build_{dut.target}_{app_config}') path_to_unicore_build = os.path.join(dut.app.app_path, f'build_{dut.target}_{app_config}')
if app_downloader: if app_downloader:
app_downloader.download_app( app_downloader.download_app(
os.path.relpath(path_to_unicore_build, IDF_PATH), ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES os.path.relpath(path_to_unicore_build, IDF_PATH),
'flash',
) )
dut.serial.bootloader_flash(path_to_unicore_build) dut.serial.bootloader_flash(path_to_unicore_build)
@@ -50,7 +50,8 @@ def test_unicore_app_and_multicore_bootloader(dut: Dut, app_downloader, config)
path_to_multicore_build = os.path.join(dut.app.app_path, f'build_{dut.target}_{app_config}') path_to_multicore_build = os.path.join(dut.app.app_path, f'build_{dut.target}_{app_config}')
if app_downloader: if app_downloader:
app_downloader.download_app( app_downloader.download_app(
os.path.relpath(path_to_multicore_build, IDF_PATH), ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES os.path.relpath(path_to_multicore_build, IDF_PATH),
'flash',
) )
dut.serial.bootloader_flash(path_to_multicore_build) dut.serial.bootloader_flash(path_to_multicore_build)

View File

@@ -9,7 +9,7 @@ This directory contains tests for the build system and build-related tools. Thes
## Running the tests locally ## Running the tests locally
1. Install pytest using `install.{sh,bat,ps1,fish} --enable-pytest`. 1. Install pytest using `install.{sh,bat,ps1,fish} --enable-ci`.
1. Activate the IDF shell environment using `export.{sh,bat,ps1,fish}`. 1. Activate the IDF shell environment using `export.{sh,bat,ps1,fish}`.
1. To run all the tests, go to `$IDF_PATH/tools/test_build_system` directory, then run: 1. To run all the tests, go to `$IDF_PATH/tools/test_build_system` directory, then run:
``` ```