ci: rename local idf_ci folder, avoid name collision

This commit is contained in:
Fu Hanxi
2025-04-25 11:13:17 +02:00
parent fbc38a95ec
commit a1d5d5cbd6
12 changed files with 213 additions and 231 deletions

View File

@ -36,20 +36,23 @@ from _pytest.config import Config
from _pytest.fixtures import FixtureRequest
from artifacts_handler import ArtifactType
from dynamic_pipelines.constants import TEST_RELATED_APPS_DOWNLOAD_URLS_FILENAME
from idf_ci.app import import_apps_from_txt
from idf_ci.uploader import AppDownloader, AppUploader
from idf_ci_utils import IDF_PATH, idf_relpath
from idf_pytest.constants import (
DEFAULT_SDKCONFIG,
ENV_MARKERS,
SPECIAL_MARKERS,
TARGET_MARKERS,
PytestCase,
DEFAULT_LOGDIR,
)
from idf_pytest.plugin import IDF_PYTEST_EMBEDDED_KEY, ITEM_PYTEST_CASE_KEY, IdfPytestEmbedded
from idf_ci_local.app import import_apps_from_txt
from idf_ci_local.uploader import AppDownloader
from idf_ci_local.uploader import AppUploader
from idf_ci_utils import IDF_PATH
from idf_ci_utils import idf_relpath
from idf_pytest.constants import DEFAULT_LOGDIR
from idf_pytest.constants import DEFAULT_SDKCONFIG
from idf_pytest.constants import ENV_MARKERS
from idf_pytest.constants import SPECIAL_MARKERS
from idf_pytest.constants import TARGET_MARKERS
from idf_pytest.constants import PytestCase
from idf_pytest.plugin import IDF_PYTEST_EMBEDDED_KEY
from idf_pytest.plugin import ITEM_PYTEST_CASE_KEY
from idf_pytest.plugin import IdfPytestEmbedded
from idf_pytest.utils import format_case_id
from pytest_embedded.plugin import multi_dut_argument, multi_dut_fixture
from pytest_embedded.plugin import multi_dut_argument
from pytest_embedded.plugin import multi_dut_fixture
from pytest_embedded_idf.dut import IdfDut
from pytest_embedded_idf.unity_tester import CaseTester
@ -426,8 +429,10 @@ def pytest_addoption(parser: pytest.Parser) -> None:
def pytest_configure(config: Config) -> None:
from pytest_embedded_idf.utils import supported_targets, preview_targets
from idf_pytest.constants import SUPPORTED_TARGETS, PREVIEW_TARGETS
from idf_pytest.constants import PREVIEW_TARGETS
from idf_pytest.constants import SUPPORTED_TARGETS
from pytest_embedded_idf.utils import preview_targets
from pytest_embedded_idf.utils import supported_targets
supported_targets.set(SUPPORTED_TARGETS)
preview_targets.set(PREVIEW_TARGETS)

View File

@ -15,8 +15,8 @@ from gitlab import GitlabUpdateError
from gitlab_api import Gitlab
from idf_build_apps import App
from idf_build_apps.constants import BuildStatus
from idf_ci.app import AppWithMetricsInfo
from idf_ci.uploader import AppUploader
from idf_ci_local.app import AppWithMetricsInfo
from idf_ci_local.uploader import AppUploader
from prettytable import PrettyTable
from .constants import BINARY_SIZE_METRIC_NAME
@ -211,7 +211,7 @@ class ReportGenerator:
items: t.List[t.Union[TestCase, GitlabJob, AppWithMetricsInfo]],
key: t.Union[str, t.Callable[[t.Union[TestCase, GitlabJob, AppWithMetricsInfo]], t.Any]],
order: str = 'asc',
sort_function: t.Optional[t.Callable[[t.Any], t.Any]] = None
sort_function: t.Optional[t.Callable[[t.Any], t.Any]] = None,
) -> t.List[t.Union[TestCase, GitlabJob, AppWithMetricsInfo]]:
"""
Sort items based on a given key, order, and optional custom sorting function.
@ -219,11 +219,13 @@ class ReportGenerator:
:param items: List of items to sort.
:param key: A string representing the attribute name or a function to extract the sorting key.
:param order: Order of sorting ('asc' for ascending, 'desc' for descending).
:param sort_function: A custom function to control sorting logic (e.g., prioritizing positive/negative/zero values).
:param sort_function: A custom function to control sorting logic
(e.g., prioritizing positive/negative/zero values).
:return: List of sorted instances.
"""
key_func = None
if isinstance(key, str):
def key_func(item: t.Any) -> t.Any:
return getattr(item, key)
@ -249,7 +251,7 @@ class ReportGenerator:
return comment
def _update_mr_comment(self, comment: str, print_retry_jobs_message: bool) -> None:
retry_job_picture_comment = (f'{RETRY_JOB_TITLE}\n\n' f'{RETRY_JOB_PICTURE_LINK}').format(
retry_job_picture_comment = (f'{RETRY_JOB_TITLE}\n\n{RETRY_JOB_PICTURE_LINK}').format(
pic_url=get_repository_file_url(RETRY_JOB_PICTURE_PATH)
)
del_retry_job_pic_pattern = re.escape(RETRY_JOB_TITLE) + r'.*?' + re.escape(f'{RETRY_JOB_PICTURE_PATH})')
@ -332,7 +334,12 @@ class BuildReportGenerator(ReportGenerator):
2. Sort other items by absolute size_difference_percentage.
"""
# Priority: 0 for zero binaries, 1 for non-zero binaries
zero_binary_priority = 1 if item.metrics[BINARY_SIZE_METRIC_NAME].source_value != 0 or item.metrics[BINARY_SIZE_METRIC_NAME].target_value != 0 else 0
zero_binary_priority = (
1
if item.metrics[BINARY_SIZE_METRIC_NAME].source_value != 0
or item.metrics[BINARY_SIZE_METRIC_NAME].target_value != 0
else 0
)
# Secondary sort: Negative absolute size_difference_percentage for descending order
size_difference_sort = abs(item.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)
return zero_binary_priority, size_difference_sort
@ -342,19 +349,23 @@ class BuildReportGenerator(ReportGenerator):
Generate a markdown table for the top N apps by size difference.
Only includes apps with size differences greater than 500 bytes.
"""
filtered_apps = [app for app in self.apps if abs(app.metrics[BINARY_SIZE_METRIC_NAME].difference) > SIZE_DIFFERENCE_BYTES_THRESHOLD]
filtered_apps = [
app
for app in self.apps
if abs(app.metrics[BINARY_SIZE_METRIC_NAME].difference) > SIZE_DIFFERENCE_BYTES_THRESHOLD
]
top_apps = sorted(
filtered_apps,
key=lambda app: abs(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage),
reverse=True
filtered_apps, key=lambda app: abs(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage), reverse=True
)[:TOP_N_APPS_BY_SIZE_DIFF]
if not top_apps:
return ''
table = (f'\n⚠️⚠️⚠️ Top {len(top_apps)} Apps with Binary Size Sorted by Size Difference\n'
f'Note: Apps with changes of less than {SIZE_DIFFERENCE_BYTES_THRESHOLD} bytes are not shown.\n')
table = (
f'\n⚠️⚠️⚠️ Top {len(top_apps)} Apps with Binary Size Sorted by Size Difference\n'
f'Note: Apps with changes of less than {SIZE_DIFFERENCE_BYTES_THRESHOLD} bytes are not shown.\n'
)
table += '| App Dir | Build Dir | Size Diff (bytes) | Size Diff (%) |\n'
table += '|---------|-----------|-------------------|---------------|\n'
for app in top_apps:
@ -363,13 +374,17 @@ class BuildReportGenerator(ReportGenerator):
f'{app.metrics[BINARY_SIZE_METRIC_NAME].difference} | '
f'{app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage}% |\n'
)
table += ('\n**For more details, please click on the numbers in the summary above '
'to view the corresponding report files.** ⬆️⬆️⬆️\n\n')
table += (
'\n**For more details, please click on the numbers in the summary above '
'to view the corresponding report files.** ⬆️⬆️⬆️\n\n'
)
return table
@staticmethod
def split_new_and_existing_apps(apps: t.Iterable[AppWithMetricsInfo]) -> t.Tuple[t.List[AppWithMetricsInfo], t.List[AppWithMetricsInfo]]:
def split_new_and_existing_apps(
apps: t.Iterable[AppWithMetricsInfo],
) -> t.Tuple[t.List[AppWithMetricsInfo], t.List[AppWithMetricsInfo]]:
"""
Splits apps into new apps and existing apps.
@ -388,10 +403,7 @@ class BuildReportGenerator(ReportGenerator):
:param preserve: Whether to filter preserved apps.
:return: Filtered list of apps.
"""
return [
app for app in self.apps
if app.build_status == build_status and app.preserve == preserve
]
return [app for app in self.apps if app.build_status == build_status and app.preserve == preserve]
def get_built_apps_report_parts(self) -> t.List[str]:
"""
@ -430,14 +442,13 @@ class BuildReportGenerator(ReportGenerator):
'build_dir',
],
value_functions=[
(
'Your Branch App Size',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)
),
('Your Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)),
(
'Bin Files with Build Log (without map and elf)',
lambda app: self.get_download_link_for_url(
self.app_presigned_urls_dict[app.build_path][ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES.value]
self.app_presigned_urls_dict[app.build_path][
ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES.value
]
),
),
(
@ -481,26 +492,16 @@ class BuildReportGenerator(ReportGenerator):
'build_dir',
],
value_functions=[
(
'Your Branch App Size',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)
),
(
'Target Branch App Size',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].target_value)
),
(
'Size Diff',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference)
),
(
'Size Diff, %',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)
),
('Your Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)),
('Target Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].target_value)),
('Size Diff', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference)),
('Size Diff, %', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)),
(
'Bin Files with Build Log (without map and elf)',
lambda app: self.get_download_link_for_url(
self.app_presigned_urls_dict[app.build_path][ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES.value]
self.app_presigned_urls_dict[app.build_path][
ArtifactType.BUILD_DIR_WITHOUT_MAP_AND_ELF_FILES.value
]
),
),
(
@ -528,12 +529,13 @@ class BuildReportGenerator(ReportGenerator):
'build_dir',
],
value_functions=[
('Your Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)),
(
'Your Branch App Size',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)
'Build Log',
lambda app: self.get_download_link_for_url(
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS)
),
),
('Build Log', lambda app: self.get_download_link_for_url(
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS))),
],
)
sections.extend(new_non_test_related_apps_table_section)
@ -562,24 +564,16 @@ class BuildReportGenerator(ReportGenerator):
'build_dir',
],
value_functions=[
('Your Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)),
('Target Branch App Size', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].target_value)),
('Size Diff', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference)),
('Size Diff, %', lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)),
(
'Your Branch App Size',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].source_value)
'Build Log',
lambda app: self.get_download_link_for_url(
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS)
),
),
(
'Target Branch App Size',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].target_value)
),
(
'Size Diff',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference)
),
(
'Size Diff, %',
lambda app: str(app.metrics[BINARY_SIZE_METRIC_NAME].difference_percentage)
),
('Build Log', lambda app: self.get_download_link_for_url(
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS))),
],
)
sections.extend(built_non_test_related_apps_table_section)
@ -631,7 +625,12 @@ class BuildReportGenerator(ReportGenerator):
headers=['App Dir', 'Build Dir', 'Failed Reason', 'Build Log'],
row_attrs=['app_dir', 'build_dir', 'build_comment'],
value_functions=[
('Build Log', lambda app: self.get_download_link_for_url(self._uploader.get_app_presigned_url(app, ArtifactType.LOGS))),
(
'Build Log',
lambda app: self.get_download_link_for_url(
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS)
),
),
],
)
failed_apps_report_url = self.write_report_to_file(
@ -655,7 +654,12 @@ class BuildReportGenerator(ReportGenerator):
headers=['App Dir', 'Build Dir', 'Skipped Reason', 'Build Log'],
row_attrs=['app_dir', 'build_dir', 'build_comment'],
value_functions=[
('Build Log', lambda app: self.get_download_link_for_url(self._uploader.get_app_presigned_url(app, ArtifactType.LOGS))),
(
'Build Log',
lambda app: self.get_download_link_for_url(
self._uploader.get_app_presigned_url(app, ArtifactType.LOGS)
),
),
],
)
skipped_apps_report_url = self.write_report_to_file(
@ -790,7 +794,7 @@ class TargetTestReportGenerator(ReportGenerator):
'Test Case',
'Test App Path',
'Failure Reason',
f'Failures on your branch (40 latest testcases)',
'Failures on your branch (40 latest testcases)',
'Dut Log URL',
'Create Known Failure Case Jira',
'Job URL',
@ -800,12 +804,10 @@ class TargetTestReportGenerator(ReportGenerator):
value_functions=[
(
'Failures on your branch (40 latest testcases)',
lambda item: f"{getattr(item, 'latest_failed_count', '')} / {getattr(item, 'latest_total_count', '')}",
lambda item: f'{getattr(item, "latest_failed_count", "")} '
f'/ {getattr(item, "latest_total_count", "")}',
),
(
'Create Known Failure Case Jira',
known_failure_issue_jira_fast_link
)
('Create Known Failure Case Jira', known_failure_issue_jira_fast_link),
],
)
other_branch_cases_table_section = self.create_table_section(
@ -825,12 +827,10 @@ class TargetTestReportGenerator(ReportGenerator):
value_functions=[
(
'Cases that failed in other branches as well (40 latest testcases)',
lambda item: f"{getattr(item, 'latest_failed_count', '')} / {getattr(item, 'latest_total_count', '')}",
lambda item: f'{getattr(item, "latest_failed_count", "")} '
f'/ {getattr(item, "latest_total_count", "")}',
),
(
'Create Known Failure Case Jira',
known_failure_issue_jira_fast_link
)
('Create Known Failure Case Jira', known_failure_issue_jira_fast_link),
],
)
known_failures_cases_table_section = self.create_table_section(
@ -986,7 +986,8 @@ class JobReportGenerator(ReportGenerator):
value_functions=[
(
'Failures across all other branches (10 latest jobs)',
lambda item: f"{getattr(item, 'latest_failed_count', '')} / {getattr(item, 'latest_total_count', '')}",
lambda item: f'{getattr(item, "latest_failed_count", "")} '
f'/ {getattr(item, "latest_total_count", "")}',
)
],
)

View File

@ -4,13 +4,14 @@ import argparse
import sys
import __init__ # noqa: F401 # inject the system path
from dynamic_pipelines.constants import TEST_RELATED_APPS_FILENAME
from idf_build_apps import build_apps
from idf_build_apps import setup_logging
from idf_build_apps.utils import semicolon_separated_str_to_list
from idf_ci.app import import_apps_from_txt
from idf_ci_local.app import import_apps_from_txt
from idf_pytest.constants import DEFAULT_IGNORE_WARNING_FILEPATH
from dynamic_pipelines.constants import TEST_RELATED_APPS_FILENAME
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build Apps for Dynamic Pipeline')
parser.add_argument('app_list_file', default=TEST_RELATED_APPS_FILENAME, help='List of apps to build')

View File

@ -1,12 +1,22 @@
# SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""This file is used for generating the child pipeline for build jobs."""
import argparse
import os
import typing as t
import __init__ # noqa: F401 # inject the system path
import yaml
from idf_build_apps.utils import semicolon_separated_str_to_list
from idf_ci_local.app import dump_apps_to_txt
from idf_ci_utils import IDF_PATH
from idf_pytest.constants import DEFAULT_CONFIG_RULES_STR
from idf_pytest.constants import DEFAULT_FULL_BUILD_TEST_COMPONENTS
from idf_pytest.constants import DEFAULT_FULL_BUILD_TEST_FILEPATTERNS
from idf_pytest.constants import CollectMode
from idf_pytest.script import get_all_apps
from dynamic_pipelines.constants import DEFAULT_APPS_BUILD_PER_JOB
from dynamic_pipelines.constants import DEFAULT_BUILD_CHILD_PIPELINE_FILEPATH
from dynamic_pipelines.constants import DEFAULT_BUILD_CHILD_PIPELINE_NAME
@ -18,14 +28,6 @@ from dynamic_pipelines.constants import TEST_RELATED_BUILD_JOB_NAME
from dynamic_pipelines.models import BuildJob
from dynamic_pipelines.models import EmptyJob
from dynamic_pipelines.utils import dump_jobs_to_yaml
from idf_build_apps.utils import semicolon_separated_str_to_list
from idf_ci.app import dump_apps_to_txt
from idf_ci_utils import IDF_PATH
from idf_pytest.constants import CollectMode
from idf_pytest.constants import DEFAULT_CONFIG_RULES_STR
from idf_pytest.constants import DEFAULT_FULL_BUILD_TEST_COMPONENTS
from idf_pytest.constants import DEFAULT_FULL_BUILD_TEST_FILEPATTERNS
from idf_pytest.script import get_all_apps
def _separate_str_to_list(s: str) -> t.List[str]:
@ -101,7 +103,8 @@ def main(arguments: argparse.Namespace) -> None:
print(f'Generate test related apps file {TEST_RELATED_APPS_FILENAME} with {len(test_related_apps)} apps')
dump_apps_to_txt(sorted(non_test_related_apps), NON_TEST_RELATED_APPS_FILENAME)
print(
f'Generate non-test related apps file {NON_TEST_RELATED_APPS_FILENAME} with {len(non_test_related_apps)} apps'
f'Generate non-test related apps file {NON_TEST_RELATED_APPS_FILENAME} '
f'with {len(non_test_related_apps)} apps'
)
if test_related_apps:
@ -171,7 +174,7 @@ if __name__ == '__main__':
parser.add_argument(
'--compare-manifest-sha-filepath',
default=os.path.join(IDF_PATH, '.manifest_sha'),
help='Path to the recorded manifest sha file generated by `idf-build-apps dump-manifest-sha`'
help='Path to the recorded manifest sha file generated by `idf-build-apps dump-manifest-sha`',
)
parser.add_argument(
'--modified-components',

View File

@ -6,14 +6,15 @@ import os
import typing as t
import __init__ # noqa: F401 # inject the system path
from idf_ci_local.app import enrich_apps_with_metrics_info
from idf_ci_local.app import import_apps_from_txt
from dynamic_pipelines.report import BuildReportGenerator
from dynamic_pipelines.report import JobReportGenerator
from dynamic_pipelines.report import TargetTestReportGenerator
from dynamic_pipelines.utils import fetch_app_metrics
from dynamic_pipelines.utils import fetch_failed_jobs
from dynamic_pipelines.utils import parse_testcases_from_filepattern
from idf_ci.app import enrich_apps_with_metrics_info
from idf_ci.app import import_apps_from_txt
def main() -> None:

View File

@ -6,6 +6,7 @@
2. Post the Build Report if it's running in an MR pipeline.
3. Generate the child pipeline for target test jobs.
"""
import argparse
import glob
import os
@ -15,24 +16,23 @@ from collections import defaultdict
import __init__ # noqa: F401 # inject the system path
import yaml
from idf_build_apps import App
from idf_ci_local.app import import_apps_from_txt
from idf_pytest.constants import TIMEOUT_4H_MARKERS
from idf_pytest.script import get_pytest_cases
from dynamic_pipelines.constants import BUILD_ONLY_LABEL
from dynamic_pipelines.constants import DEFAULT_CASES_TEST_PER_JOB
from dynamic_pipelines.constants import DEFAULT_TARGET_TEST_CHILD_PIPELINE_FILEPATH
from dynamic_pipelines.constants import DEFAULT_TARGET_TEST_CHILD_PIPELINE_NAME
from dynamic_pipelines.constants import DEFAULT_TARGET_TEST_JOB_TEMPLATE_NAME
from dynamic_pipelines.constants import DEFAULT_TEST_PATHS
from dynamic_pipelines.constants import (
KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH,
)
from dynamic_pipelines.constants import KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH
from dynamic_pipelines.constants import TIMEOUT_4H_TEMPLATE_NAME
from dynamic_pipelines.models import EmptyJob
from dynamic_pipelines.models import Job
from dynamic_pipelines.models import TargetTestJob
from dynamic_pipelines.utils import dump_jobs_to_yaml
from idf_build_apps import App
from idf_ci.app import import_apps_from_txt
from idf_pytest.constants import TIMEOUT_4H_MARKERS
from idf_pytest.script import get_pytest_cases
def get_tags_with_amount(s: str) -> t.List[str]:

View File

@ -11,18 +11,28 @@ from unittest.mock import patch
sys.path.insert(0, os.path.join(f'{os.environ.get("IDF_PATH")}', 'tools', 'ci', 'python_packages'))
sys.path.insert(0, os.path.join(f'{os.environ.get("IDF_PATH")}', 'tools', 'ci'))
from dynamic_pipelines.models import GitlabJob # noqa: E402
from dynamic_pipelines.report import JobReportGenerator, TargetTestReportGenerator, BuildReportGenerator # noqa: E402
from dynamic_pipelines.utils import load_file, parse_testcases_from_filepattern # noqa: E402
from idf_build_apps.constants import BuildStatus # noqa: E402
from idf_ci.app import import_apps_from_txt # noqa: E402
from idf_ci.app import enrich_apps_with_metrics_info # noqa: E402
from idf_ci_local.app import enrich_apps_with_metrics_info # noqa: E402
from idf_ci_local.app import import_apps_from_txt # noqa: E402
from dynamic_pipelines.models import GitlabJob # noqa: E402
from dynamic_pipelines.report import BuildReportGenerator # noqa: E402
from dynamic_pipelines.report import JobReportGenerator # noqa: E402
from dynamic_pipelines.report import TargetTestReportGenerator # noqa: E402
from dynamic_pipelines.utils import load_file # noqa: E402
from dynamic_pipelines.utils import parse_testcases_from_filepattern # noqa: E402
class TestReportGeneration(unittest.TestCase):
def setUp(self) -> None:
self.reports_sample_data_path = os.path.join(
os.environ.get('IDF_PATH', ''), 'tools', 'ci', 'dynamic_pipelines', 'tests', 'test_report_generator', 'reports_sample_data'
os.environ.get('IDF_PATH', ''),
'tools',
'ci',
'dynamic_pipelines',
'tests',
'test_report_generator',
'reports_sample_data',
)
self.setup_patches()
self.load_test_and_job_reports()
@ -32,12 +42,15 @@ class TestReportGeneration(unittest.TestCase):
self.gitlab_patcher = patch('dynamic_pipelines.report.Gitlab')
self.uploader_patcher = patch('dynamic_pipelines.report.AppUploader')
self.failure_rate_patcher = patch('dynamic_pipelines.report.fetch_failed_testcases_failure_ratio')
self.env_patcher = patch.dict('os.environ', {
'CI_DASHBOARD_HOST': 'https://test_dashboard_host',
'CI_PAGES_URL': 'https://artifacts_path',
'CI_JOB_ID': '1',
'JIRA_SERVER': 'https://jira.com',
})
self.env_patcher = patch.dict(
'os.environ',
{
'CI_DASHBOARD_HOST': 'https://test_dashboard_host',
'CI_PAGES_URL': 'https://artifacts_path',
'CI_JOB_ID': '1',
'JIRA_SERVER': 'https://jira.com',
},
)
self.yaml_dump_patcher = patch('dynamic_pipelines.report.yaml.dump')
self.MockGitlab = self.gitlab_patcher.start()
@ -86,12 +99,21 @@ class TestReportGeneration(unittest.TestCase):
def create_report_generators(self) -> None:
jobs_response_raw = load_file(os.path.join(self.reports_sample_data_path, 'jobs_api_response.json'))
failure_rate_jobs_response = load_file(os.path.join(self.reports_sample_data_path, 'failure_rate_jobs_response.json'))
built_apps_size_info_response = json.loads(load_file(os.path.join(self.reports_sample_data_path, 'apps_size_info_api_response.json')))
failure_rate_jobs_response = load_file(
os.path.join(self.reports_sample_data_path, 'failure_rate_jobs_response.json')
)
built_apps_size_info_response = json.loads(
load_file(os.path.join(self.reports_sample_data_path, 'apps_size_info_api_response.json'))
)
failure_rates = {item['name']: item for item in json.loads(failure_rate_jobs_response).get('jobs', [])}
jobs = [GitlabJob.from_json_data(job_json, failure_rates.get(job_json['name'], {})) for job_json in json.loads(jobs_response_raw)['jobs']]
jobs = [
GitlabJob.from_json_data(job_json, failure_rates.get(job_json['name'], {}))
for job_json in json.loads(jobs_response_raw)['jobs']
]
test_cases = parse_testcases_from_filepattern(os.path.join(self.reports_sample_data_path, 'XUNIT_*.xml'))
apps = enrich_apps_with_metrics_info(built_apps_size_info_response, import_apps_from_txt(os.path.join(self.reports_sample_data_path, 'apps')))
apps = enrich_apps_with_metrics_info(
built_apps_size_info_response, import_apps_from_txt(os.path.join(self.reports_sample_data_path, 'apps'))
)
self.target_test_report_generator = TargetTestReportGenerator(
project_id=123,
mr_iid=1,
@ -99,25 +121,13 @@ class TestReportGeneration(unittest.TestCase):
job_id=0,
commit_id='cccc',
title='Test Report',
test_cases=test_cases
test_cases=test_cases,
)
self.job_report_generator = JobReportGenerator(
project_id=123,
mr_iid=1,
pipeline_id=456,
job_id=0,
commit_id='cccc',
title='Job Report',
jobs=jobs
project_id=123, mr_iid=1, pipeline_id=456, job_id=0, commit_id='cccc', title='Job Report', jobs=jobs
)
self.build_report_generator = BuildReportGenerator(
project_id=123,
mr_iid=1,
pipeline_id=456,
job_id=0,
commit_id='cccc',
title='Build Report',
apps=apps
project_id=123, mr_iid=1, pipeline_id=456, job_id=0, commit_id='cccc', title='Build Report', apps=apps
)
self.target_test_report_generator._known_failure_cases_set = {
'*.test_wpa_supplicant_ut',
@ -189,7 +199,7 @@ class TestReportGeneration(unittest.TestCase):
difference=i * 1000,
difference_percentage=i * 0.5,
)
}
},
)
for i in range(1, 6)
]
@ -200,7 +210,7 @@ class TestReportGeneration(unittest.TestCase):
job_id=0,
commit_id='cccc',
title='Build Report',
apps=apps_with_size_diff
apps=apps_with_size_diff,
)
top_apps_table = build_report_generator._generate_top_n_apps_by_size_table()
@ -218,12 +228,7 @@ class TestReportGeneration(unittest.TestCase):
size_difference_percentage=1.0,
build_status=BuildStatus.SUCCESS,
preserve=True,
metrics={
'binary_size': MagicMock(
difference=1000,
difference_percentage=1.0
)
}
metrics={'binary_size': MagicMock(difference=1000, difference_percentage=1.0)},
),
MagicMock(
app_dir='test_app_2',
@ -232,23 +237,12 @@ class TestReportGeneration(unittest.TestCase):
size_difference_percentage=2.0,
build_status=BuildStatus.SUCCESS,
preserve=False,
metrics={
'binary_size': MagicMock(
difference=2000,
difference_percentage=2.0
)
}
metrics={'binary_size': MagicMock(difference=2000, difference_percentage=2.0)},
),
]
build_report_generator = BuildReportGenerator(
project_id=123,
mr_iid=1,
pipeline_id=456,
job_id=0,
commit_id='cccc',
title='Build Report',
apps=apps
project_id=123, mr_iid=1, pipeline_id=456, job_id=0, commit_id='cccc', title='Build Report', apps=apps
)
built_apps_report_parts = build_report_generator.get_built_apps_report_parts()
@ -264,24 +258,14 @@ class TestReportGeneration(unittest.TestCase):
build_dir='build_dir_1',
build_comment='Compilation error',
build_status=BuildStatus.FAILED,
metrics={
'binary_size': MagicMock(
difference=None,
difference_percentage=None
)
}
metrics={'binary_size': MagicMock(difference=None, difference_percentage=None)},
),
MagicMock(
app_dir='failed_app_2',
build_dir='build_dir_2',
build_comment='Linker error',
build_status=BuildStatus.FAILED,
metrics={
'binary_size': MagicMock(
difference=None,
difference_percentage=None
)
}
metrics={'binary_size': MagicMock(difference=None, difference_percentage=None)},
),
]
@ -292,7 +276,7 @@ class TestReportGeneration(unittest.TestCase):
job_id=0,
commit_id='cccc',
title='Build Report',
apps=failed_apps
apps=failed_apps,
)
failed_apps_report_parts = build_report_generator.get_failed_apps_report_parts()
@ -308,24 +292,14 @@ class TestReportGeneration(unittest.TestCase):
build_dir='build_dir_1',
build_comment='Dependencies unmet',
build_status=BuildStatus.SKIPPED,
metrics={
'binary_size': MagicMock(
difference=None,
difference_percentage=None
)
}
metrics={'binary_size': MagicMock(difference=None, difference_percentage=None)},
),
MagicMock(
app_dir='skipped_app_2',
build_dir='build_dir_2',
build_comment='Feature flag disabled',
build_status=BuildStatus.SKIPPED,
metrics={
'binary_size': MagicMock(
difference=None,
difference_percentage=None
)
}
metrics={'binary_size': MagicMock(difference=None, difference_percentage=None)},
),
]
@ -336,7 +310,7 @@ class TestReportGeneration(unittest.TestCase):
job_id=0,
commit_id='cccc',
title='Build Report',
apps=skipped_apps
apps=skipped_apps,
)
skipped_apps_report_parts = build_report_generator.get_skipped_apps_report_parts()

View File

@ -44,7 +44,7 @@ tools/ci/get_known_failure_cases_file.py
tools/unit-test-app/**/*
tools/ci/gitlab_yaml_linter.py
tools/ci/dynamic_pipelines/**/*
tools/ci/idf_ci/**/*
tools/ci/idf_ci_local/**/*
tools/ci/get_supported_examples.sh
tools/ci/python_packages/common_test_methods.py
tools/ci/python_packages/gitlab_api.py

View File

@ -9,8 +9,9 @@ from dynamic_pipelines.constants import BINARY_SIZE_METRIC_NAME
from idf_build_apps import App
from idf_build_apps import CMakeApp
from idf_build_apps import json_to_app
from idf_ci.uploader import AppUploader
from idf_ci.uploader import get_app_uploader
from idf_ci_local.uploader import AppUploader
from idf_ci_local.uploader import get_app_uploader
class IdfCMakeApp(CMakeApp):
@ -34,6 +35,7 @@ class Metrics:
"""
Represents a metric and its values for source, target, and the differences.
"""
def __init__(
self,
source_value: t.Optional[float] = None,
@ -65,10 +67,7 @@ class AppWithMetricsInfo(IdfCMakeApp):
def __init__(self, **kwargs: t.Any) -> None:
super().__init__(**kwargs)
self.metrics = {
metric_name: metric_data
for metric_name, metric_data in kwargs.get('metrics', {}).items()
}
self.metrics = {metric_name: metric_data for metric_name, metric_data in kwargs.get('metrics', {}).items()}
self.is_new_app = kwargs.get('is_new_app', False)
class Config:
@ -96,8 +95,7 @@ def import_apps_from_txt(input_filepath: str) -> t.List[App]:
def enrich_apps_with_metrics_info(
app_metrics_info_map: t.Dict[str, t.Dict[str, t.Any]],
apps: t.List[App]
app_metrics_info_map: t.Dict[str, t.Dict[str, t.Any]], apps: t.List[App]
) -> t.List[AppWithMetricsInfo]:
def _get_full_attributes(obj: App) -> t.Dict[str, t.Any]:
"""
@ -130,10 +128,7 @@ def enrich_apps_with_metrics_info(
key = f'{app.app_dir}_{app.config_name}_{app.target}'
app_attributes = _get_full_attributes(app)
metrics = {
metric_name: default_metric
for metric_name, default_metric in default_metrics_structure.items()
}
metrics = {metric_name: default_metric for metric_name, default_metric in default_metrics_structure.items()}
is_new_app = False
if key in app_metrics_info_map:

View File

@ -141,7 +141,7 @@ class AppUploader(AppDownloader):
self._client.fget_object(getenv('IDF_S3_BUCKET'), obj_name, zip_filename)
print(f'Downloaded to {zip_filename}')
except minio.error.S3Error as e:
raise RuntimeError('Shouldn\'t happen, please report this bug in the CI channel' + str(e))
raise RuntimeError("Shouldn't happen, please report this bug in the CI channel" + str(e))
with ZipFile(zip_filename, 'r') as zr:
zr.extractall()

View File

@ -12,20 +12,20 @@ import pytest
from _pytest.config import ExitCode
from idf_build_apps import App
from idf_build_apps import find_apps
from idf_build_apps.constants import BuildStatus
from idf_build_apps.constants import SUPPORTED_TARGETS
from idf_ci.app import IdfCMakeApp
from idf_ci_utils import get_all_manifest_files
from idf_build_apps.constants import BuildStatus
from idf_ci_local.app import IdfCMakeApp
from idf_ci_utils import IDF_PATH
from idf_ci_utils import get_all_manifest_files
from idf_ci_utils import idf_relpath
from idf_ci_utils import to_list
from idf_py_actions.constants import PREVIEW_TARGETS as TOOLS_PREVIEW_TARGETS
from idf_py_actions.constants import SUPPORTED_TARGETS as TOOLS_SUPPORTED_TARGETS
from .constants import CollectMode
from .constants import DEFAULT_BUILD_LOG_FILENAME
from .constants import DEFAULT_CONFIG_RULES_STR
from .constants import DEFAULT_SIZE_JSON_FILENAME
from .constants import CollectMode
from .constants import PytestCase
from .plugin import IdfPytestEmbedded
@ -84,7 +84,9 @@ def get_pytest_cases(
return cases
def _get_pytest_cases(_target: str, _single_target_duplicate_mode: bool = False) -> t.List[PytestCase]:
collector = IdfPytestEmbedded(_target, config_name=config_name, single_target_duplicate_mode=_single_target_duplicate_mode, apps=apps)
collector = IdfPytestEmbedded(
_target, config_name=config_name, single_target_duplicate_mode=_single_target_duplicate_mode, apps=apps
)
with io.StringIO() as buf:
with redirect_stdout(buf):
@ -100,9 +102,7 @@ def get_pytest_cases(
print(f'WARNING: no pytest app found for target {_target} under paths {", ".join(paths)}')
else:
print(buf.getvalue())
raise RuntimeError(
f'pytest collection failed at {", ".join(paths)} with command \"{" ".join(cmd)}\"'
)
raise RuntimeError(f'pytest collection failed at {", ".join(paths)} with command "{" ".join(cmd)}"')
return collector.cases # type: ignore
@ -155,26 +155,28 @@ def get_all_apps(
# target could be comma separated list
all_apps: t.List[App] = []
for _t in set(target.split(',')):
all_apps.extend(find_apps(
paths,
_t,
build_system=IdfCMakeApp,
recursive=True,
build_dir='build_@t_@w',
config_rules_str=config_rules_str or DEFAULT_CONFIG_RULES_STR,
build_log_filename=DEFAULT_BUILD_LOG_FILENAME,
size_json_filename=DEFAULT_SIZE_JSON_FILENAME,
check_warnings=True,
manifest_rootpath=IDF_PATH,
compare_manifest_sha_filepath=compare_manifest_sha_filepath,
manifest_files=get_all_manifest_files(),
default_build_targets=SUPPORTED_TARGETS + (extra_default_build_targets or []),
modified_components=modified_components,
modified_files=modified_files,
ignore_app_dependencies_components=ignore_app_dependencies_components,
ignore_app_dependencies_filepatterns=ignore_app_dependencies_filepatterns,
include_skipped_apps=True,
))
all_apps.extend(
find_apps(
paths,
_t,
build_system=IdfCMakeApp,
recursive=True,
build_dir='build_@t_@w',
config_rules_str=config_rules_str or DEFAULT_CONFIG_RULES_STR,
build_log_filename=DEFAULT_BUILD_LOG_FILENAME,
size_json_filename=DEFAULT_SIZE_JSON_FILENAME,
check_warnings=True,
manifest_rootpath=IDF_PATH,
compare_manifest_sha_filepath=compare_manifest_sha_filepath,
manifest_files=get_all_manifest_files(),
default_build_targets=SUPPORTED_TARGETS + (extra_default_build_targets or []),
modified_components=modified_components,
modified_files=modified_files,
ignore_app_dependencies_components=ignore_app_dependencies_components,
ignore_app_dependencies_filepatterns=ignore_app_dependencies_filepatterns,
include_skipped_apps=True,
)
)
pytest_cases = get_pytest_cases(
paths,