diff --git a/.gitignore b/.gitignore index 4d3b754290..61e02dae70 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,11 @@ GPATH # MacOS directory files .DS_Store +# Components Unit Test Apps files +components/**/build +components/**/sdkconfig +components/**/sdkconfig.old + # Example project files examples/**/sdkconfig examples/**/sdkconfig.old diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4c74067585..bb596d7b37 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -64,22 +64,39 @@ variables: .fetch_submodules: &fetch_submodules | python $SUBMODULE_FETCH_TOOL -s $SUBMODULES_TO_FETCH +.add_ssh_keys: &add_ssh_keys | + mkdir -p ~/.ssh + chmod 700 ~/.ssh + echo -n $GITLAB_KEY > ~/.ssh/id_rsa_base64 + base64 --decode --ignore-garbage ~/.ssh/id_rsa_base64 > ~/.ssh/id_rsa + chmod 600 ~/.ssh/id_rsa + echo -e "Host gitlab.espressif.cn\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config + before_script: - source tools/ci/setup_python.sh # apply bot filter in before script - *apply_bot_filter # add gitlab ssh key - - mkdir -p ~/.ssh - - chmod 700 ~/.ssh - - echo -n $GITLAB_KEY > ~/.ssh/id_rsa_base64 - - base64 --decode --ignore-garbage ~/.ssh/id_rsa_base64 > ~/.ssh/id_rsa - - chmod 600 ~/.ssh/id_rsa - - echo -e "Host gitlab.espressif.cn\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config + - *add_ssh_keys # Set some options and environment for CI - source tools/ci/configure_ci_environment.sh - *setup_tools_unless_target_test - *fetch_submodules +# used for component-based unit test apps +.before_script_for_component_ut: + variables: + COMPONENT_UT_EXCLUDE_LIST_FP: ${CI_PROJECT_DIR}/tools/ci/component_ut_excludes.txt + before_script: + - source tools/ci/setup_python.sh + - *apply_bot_filter + - *add_ssh_keys + - source tools/ci/configure_ci_environment.sh + - *setup_tools_unless_target_test + - *fetch_submodules + - export COMPONENT_UT_DIRS=`find components/ -name test_apps -type d` + - export COMPONENT_UT_EXCLUDES=`[ -r $COMPONENT_UT_EXCLUDE_LIST_FP ] && cat $COMPONENT_UT_EXCLUDE_LIST_FP | xargs` + # used for check scripts which we want to run unconditionally .before_script_lesser_nofilter: variables: diff --git a/components/esp_netif/test_apps/CMakeLists.txt b/components/esp_netif/test_apps/CMakeLists.txt new file mode 100644 index 0000000000..03a8cad939 --- /dev/null +++ b/components/esp_netif/test_apps/CMakeLists.txt @@ -0,0 +1,7 @@ +# This is the project CMakeLists.txt file for the test subproject +cmake_minimum_required(VERSION 3.5) + +set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/tools/unit-test-app/components") + +include($ENV{IDF_PATH}/tools/cmake/project.cmake) +project(esp_netif_test) diff --git a/components/esp_netif/test_apps/component_ut_test.py b/components/esp_netif/test_apps/component_ut_test.py new file mode 100644 index 0000000000..2d23c0ea11 --- /dev/null +++ b/components/esp_netif/test_apps/component_ut_test.py @@ -0,0 +1,15 @@ +from __future__ import print_function + +import ttfw_idf + + +@ttfw_idf.idf_component_unit_test(env_tag='COMPONENT_UT_GENERIC') +def test_component_ut_esp_netif(env, extra_data): + dut = env.get_dut('esp_netif', 'components/esp_netif/test_app') + dut.start_app() + stdout = dut.expect('Tests finished', full_stdout=True) + ttfw_idf.ComponentUTResult.parse_result(stdout) + + +if __name__ == '__main__': + test_component_ut_esp_netif() diff --git a/components/esp_netif/test_apps/main/CMakeLists.txt b/components/esp_netif/test_apps/main/CMakeLists.txt new file mode 100644 index 0000000000..472bc34f63 --- /dev/null +++ b/components/esp_netif/test_apps/main/CMakeLists.txt @@ -0,0 +1,5 @@ +idf_component_register(SRCS "esp_netif_test.c" + REQUIRES test_utils + INCLUDE_DIRS "." + PRIV_INCLUDE_DIRS "$ENV{IDF_PATH}/components/esp_netif/private_include" "." + PRIV_REQUIRES unity esp_netif nvs_flash) diff --git a/components/esp_netif/test_apps/main/esp_netif_test.c b/components/esp_netif/test_apps/main/esp_netif_test.c new file mode 100644 index 0000000000..264435507b --- /dev/null +++ b/components/esp_netif/test_apps/main/esp_netif_test.c @@ -0,0 +1,287 @@ +#include +#include +#include "unity.h" +#include "unity_fixture.h" +#include "esp_netif.h" +#include "esp_wifi.h" +#include "nvs_flash.h" +#include "esp_wifi_netif.h" +#include "sdkconfig.h" +#include "lwip/sockets.h" +#include "test_utils.h" + + +TEST_GROUP(esp_netif); + +TEST_SETUP(esp_netif) +{ +} + +TEST_TEAR_DOWN(esp_netif) +{ +} + +TEST(esp_netif, init_and_destroy) +{ + esp_netif_config_t cfg = ESP_NETIF_DEFAULT_WIFI_STA(); + esp_netif_t *esp_netif = esp_netif_new(NULL); + + TEST_ASSERT_EQUAL(NULL, esp_netif); + esp_netif = esp_netif_new(&cfg); + TEST_ASSERT_NOT_EQUAL(NULL, esp_netif); + + esp_netif_destroy(esp_netif); +} + + +TEST(esp_netif, get_from_if_key) +{ + // init default netif + esp_netif_config_t cfg = ESP_NETIF_DEFAULT_WIFI_STA(); + esp_netif_t *esp_netif = esp_netif_new(&cfg); + TEST_ASSERT_NOT_NULL(esp_netif); + + // check it's accessible by key + TEST_ASSERT_EQUAL(esp_netif, esp_netif_get_handle_from_ifkey("WIFI_STA_DEF")); + + // destroy it + esp_netif_destroy(esp_netif); + + // check it's also destroyed in list + TEST_ASSERT_EQUAL(NULL, esp_netif_get_handle_from_ifkey("WIFI_STA_DEF")); + +} + + +TEST(esp_netif, create_delete_multiple_netifs) +{ + // interface key has to be a unique identifier + const char* if_keys[] = { "if1", "if2", "if3", "if4", "if5", "if6", "if7", "if8", "if9" }; + const int nr_of_netifs = sizeof(if_keys)/sizeof(char*); + esp_netif_t *netifs[nr_of_netifs]; + + // create 10 wifi stations + for (int i=0; i (bool, bool) logging.info('Build all apps') return True, True - if os.getenv(TEST_LABELS[action]): - logging.info('Build test cases apps') - return True, False - else: - logging.info('Skip all') - return False, False + labels = TEST_LABELS[action] + if not isinstance(labels, list): + labels = [labels] + + for label in labels: + if os.getenv(label): + logging.info('Build test cases apps') + return True, False + else: + logging.info('Skip all') + return False, False def output_json(apps_dict_list, target, build_system, output_dir): @@ -59,8 +62,7 @@ def main(): parser.add_argument('test_type', choices=TEST_LABELS.keys(), help='Scan test type') - parser.add_argument('paths', - nargs='+', + parser.add_argument('paths', nargs='+', help='One or more app paths') parser.add_argument('-b', '--build-system', choices=BUILD_SYSTEMS.keys(), @@ -71,8 +73,7 @@ def main(): parser.add_argument('-o', '--output-path', required=True, help="output path of the scan result") - parser.add_argument("--exclude", - action="append", + parser.add_argument("--exclude", nargs="*", help='Ignore specified directory. Can be used multiple times.') parser.add_argument('--preserve', action="store_true", help='add this flag to preserve artifacts for all apps') @@ -90,15 +91,17 @@ def main(): raise e if (not build_standalone_apps) and (not build_test_case_apps): - for target in VALID_TARGETS: + for target in SUPPORTED_TARGETS: output_json([], target, args.build_system, args.output_path) SystemExit(0) + paths = set([os.path.join(os.getenv('IDF_PATH'), path) if not os.path.isabs(path) else path for path in args.paths]) + test_cases = [] - for path in set(args.paths): + for path in paths: if args.test_type == 'example_test': assign = ExampleAssignTest(path, args.ci_config_file) - elif args.test_type == 'test_apps': + elif args.test_type in ['test_apps', 'component_ut']: assign = TestAppsAssignTest(path, args.ci_config_file) else: raise SystemExit(1) # which is impossible @@ -123,7 +126,7 @@ def main(): build_system_class = BUILD_SYSTEMS[build_system] if build_test_case_apps: - for target in VALID_TARGETS: + for target in SUPPORTED_TARGETS: target_dict = scan_info_dict[target] test_case_apps = target_dict['test_case_apps'] = set() for case in test_cases: @@ -134,21 +137,21 @@ def main(): test_case_apps.update(find_apps(build_system_class, app_dir, True, default_exclude, target.lower())) exclude_apps.append(app_dir) else: - for target in VALID_TARGETS: + for target in SUPPORTED_TARGETS: scan_info_dict[target]['test_case_apps'] = set() if build_standalone_apps: - for target in VALID_TARGETS: + for target in SUPPORTED_TARGETS: target_dict = scan_info_dict[target] standalone_apps = target_dict['standalone_apps'] = set() - for path in args.paths: + for path in paths: standalone_apps.update(find_apps(build_system_class, path, True, exclude_apps, target.lower())) else: - for target in VALID_TARGETS: + for target in SUPPORTED_TARGETS: scan_info_dict[target]['standalone_apps'] = set() test_case_apps_preserve_default = True if build_system == 'cmake' else False - for target in VALID_TARGETS: + for target in SUPPORTED_TARGETS: apps = [] for app_dir in scan_info_dict[target]['test_case_apps']: apps.append({ diff --git a/tools/ci/python_packages/ttfw_idf/IDFApp.py b/tools/ci/python_packages/ttfw_idf/IDFApp.py index 64c434f2c9..dfa767ea22 100644 --- a/tools/ci/python_packages/ttfw_idf/IDFApp.py +++ b/tools/ci/python_packages/ttfw_idf/IDFApp.py @@ -22,7 +22,7 @@ import sys from abc import abstractmethod from tiny_test_fw import App -from .IDFAssignTest import ExampleGroup, TestAppsGroup, UnitTestGroup, IDFCaseGroup +from .IDFAssignTest import ExampleGroup, TestAppsGroup, UnitTestGroup, IDFCaseGroup, ComponentUTGroup try: import gitlab_api @@ -202,9 +202,9 @@ class IDFApp(App.BaseApp): def __str__(self): parts = ['app<{}>'.format(self.app_path)] if self.config_name: - parts.extend('config<{}>'.format(self.config_name)) + parts.append('config<{}>'.format(self.config_name)) if self.target: - parts.extend('target<{}>'.format(self.target)) + parts.append('target<{}>'.format(self.target)) return ' '.join(parts) @classmethod @@ -447,6 +447,11 @@ class TestApp(Example): super(TestApp, self).__init__(app_path, config_name, target, case_group, artifacts_cls) +class ComponentUTApp(TestApp): + def __init__(self, app_path, config_name='default', target='esp32', case_group=ComponentUTGroup, artifacts_cls=Artifacts): + super(ComponentUTApp, self).__init__(app_path, config_name, target, case_group, artifacts_cls) + + class LoadableElfTestApp(TestApp): def __init__(self, app_path, app_files, config_name='default', target='esp32', case_group=TestAppsGroup, artifacts_cls=Artifacts): # add arg `app_files` for loadable elf test_app. diff --git a/tools/ci/python_packages/ttfw_idf/IDFAssignTest.py b/tools/ci/python_packages/ttfw_idf/IDFAssignTest.py index 28e568ebb4..200d3618ad 100644 --- a/tools/ci/python_packages/ttfw_idf/IDFAssignTest.py +++ b/tools/ci/python_packages/ttfw_idf/IDFAssignTest.py @@ -17,7 +17,9 @@ except ImportError: import gitlab_api from tiny_test_fw.Utility import CIAssignTest -IDF_PATH_FROM_ENV = os.getenv("IDF_PATH") +from idf_py_actions.constants import SUPPORTED_TARGETS + +IDF_PATH_FROM_ENV = os.getenv('IDF_PATH') class IDFCaseGroup(CIAssignTest.Group): @@ -28,33 +30,36 @@ class IDFCaseGroup(CIAssignTest.Group): def get_artifact_index_file(cls): assert cls.LOCAL_BUILD_DIR if IDF_PATH_FROM_ENV: - artifact_index_file = os.path.join(IDF_PATH_FROM_ENV, cls.LOCAL_BUILD_DIR, "artifact_index.json") + artifact_index_file = os.path.join(IDF_PATH_FROM_ENV, cls.LOCAL_BUILD_DIR, 'artifact_index.json') else: - artifact_index_file = "artifact_index.json" + artifact_index_file = 'artifact_index.json' return artifact_index_file class IDFAssignTest(CIAssignTest.AssignTest): + def __init__(self, test_case_path, ci_config_file, case_group=IDFCaseGroup): + super(IDFAssignTest, self).__init__(test_case_path, ci_config_file, case_group) + def format_build_log_path(self, parallel_num): - return "{}/list_job_{}.json".format(self.case_group.LOCAL_BUILD_DIR, parallel_num) + return '{}/list_job_{}.json'.format(self.case_group.LOCAL_BUILD_DIR, parallel_num) def create_artifact_index_file(self, project_id=None, pipeline_id=None): if project_id is None: - project_id = os.getenv("CI_PROJECT_ID") + project_id = os.getenv('CI_PROJECT_ID') if pipeline_id is None: - pipeline_id = os.getenv("CI_PIPELINE_ID") + pipeline_id = os.getenv('CI_PIPELINE_ID') gitlab_inst = gitlab_api.Gitlab(project_id) artifact_index_list = [] for build_job_name in self.case_group.BUILD_JOB_NAMES: job_info_list = gitlab_inst.find_job_id(build_job_name, pipeline_id=pipeline_id) for job_info in job_info_list: - parallel_num = job_info["parallel_num"] or 1 # Could be None if "parallel_num" not defined for the job - raw_data = gitlab_inst.download_artifact(job_info["id"], + parallel_num = job_info['parallel_num'] or 1 # Could be None if "parallel_num" not defined for the job + raw_data = gitlab_inst.download_artifact(job_info['id'], [self.format_build_log_path(parallel_num)])[0] build_info_list = [json.loads(line) for line in raw_data.decode().splitlines()] for build_info in build_info_list: - build_info["ci_job_id"] = job_info["id"] + build_info['ci_job_id'] = job_info['id'] artifact_index_list.append(build_info) artifact_index_file = self.case_group.get_artifact_index_file() try: @@ -63,48 +68,47 @@ class IDFAssignTest(CIAssignTest.AssignTest): if e.errno != errno.EEXIST: raise e - with open(artifact_index_file, "w") as f: + with open(artifact_index_file, 'w') as f: json.dump(artifact_index_list, f) -SUPPORTED_TARGETS = [ - 'esp32', - 'esp32s2', -] - - class ExampleGroup(IDFCaseGroup): - SORT_KEYS = CI_JOB_MATCH_KEYS = ["env_tag", "target"] + SORT_KEYS = CI_JOB_MATCH_KEYS = ['env_tag', 'target'] - LOCAL_BUILD_DIR = "build_examples" - BUILD_JOB_NAMES = ["build_examples_cmake_{}".format(target) for target in SUPPORTED_TARGETS] + LOCAL_BUILD_DIR = 'build_examples' + BUILD_JOB_NAMES = ['build_examples_cmake_{}'.format(target) for target in SUPPORTED_TARGETS] class TestAppsGroup(ExampleGroup): - LOCAL_BUILD_DIR = "build_test_apps" - BUILD_JOB_NAMES = ["build_test_apps_{}".format(target) for target in SUPPORTED_TARGETS] + LOCAL_BUILD_DIR = 'build_test_apps' + BUILD_JOB_NAMES = ['build_test_apps_{}'.format(target) for target in SUPPORTED_TARGETS] + + +class ComponentUTGroup(TestAppsGroup): + LOCAL_BUILD_DIR = 'build_component_ut' + BUILD_JOB_NAMES = ['build_component_ut_{}'.format(target) for target in SUPPORTED_TARGETS] class UnitTestGroup(IDFCaseGroup): - SORT_KEYS = ["test environment", "tags", "chip_target"] - CI_JOB_MATCH_KEYS = ["test environment"] + SORT_KEYS = ['test environment', 'tags', 'chip_target'] + CI_JOB_MATCH_KEYS = ['test environment'] - LOCAL_BUILD_DIR = "tools/unit-test-app/builds" - BUILD_JOB_NAMES = ["build_esp_idf_tests_cmake_{}".format(target) for target in SUPPORTED_TARGETS] + LOCAL_BUILD_DIR = 'tools/unit-test-app/builds' + BUILD_JOB_NAMES = ['build_esp_idf_tests_cmake_{}'.format(target) for target in SUPPORTED_TARGETS] MAX_CASE = 50 ATTR_CONVERT_TABLE = { - "execution_time": "execution time" + 'execution_time': 'execution time' } DUT_CLS_NAME = { - "esp32": "ESP32DUT", - "esp32s2": "ESP32S2DUT", - "esp8266": "ESP8266DUT", + 'esp32': 'ESP32DUT', + 'esp32s2': 'ESP32S2DUT', + 'esp8266': 'ESP8266DUT', } def __init__(self, case): super(UnitTestGroup, self).__init__(case) - for tag in self._get_case_attr(case, "tags"): + for tag in self._get_case_attr(case, 'tags'): self.ci_job_match_keys.add(tag) @staticmethod @@ -119,7 +123,7 @@ class UnitTestGroup(IDFCaseGroup): if self.accept_new_case(): for key in self.filters: if self._get_case_attr(case, key) != self.filters[key]: - if key == "tags": + if key == 'tags': if set(self._get_case_attr(case, key)).issubset(set(self.filters[key])): continue break @@ -136,18 +140,18 @@ class UnitTestGroup(IDFCaseGroup): case_data = [] for case in test_cases: one_case_data = { - "config": self._get_case_attr(case, "config"), - "name": self._get_case_attr(case, "summary"), - "reset": self._get_case_attr(case, "reset"), - "timeout": self._get_case_attr(case, "timeout"), + 'config': self._get_case_attr(case, 'config'), + 'name': self._get_case_attr(case, 'summary'), + 'reset': self._get_case_attr(case, 'reset'), + 'timeout': self._get_case_attr(case, 'timeout'), } - if test_function in ["run_multiple_devices_cases", "run_multiple_stage_cases"]: + if test_function in ['run_multiple_devices_cases', 'run_multiple_stage_cases']: try: - one_case_data["child case num"] = self._get_case_attr(case, "child case num") + one_case_data['child case num'] = self._get_case_attr(case, 'child case num') except KeyError as e: - print("multiple devices/stages cases must contains at least two test functions") - print("case name: {}".format(one_case_data["name"])) + print('multiple devices/stages cases must contains at least two test functions') + print('case name: {}'.format(one_case_data['name'])) raise e case_data.append(one_case_data) @@ -160,18 +164,18 @@ class UnitTestGroup(IDFCaseGroup): :return: dict of list of cases for each test functions """ case_by_test_function = { - "run_multiple_devices_cases": [], - "run_multiple_stage_cases": [], - "run_unit_test_cases": [], + 'run_multiple_devices_cases': [], + 'run_multiple_stage_cases': [], + 'run_unit_test_cases': [], } for case in self.case_list: - if case["multi_device"] == "Yes": - case_by_test_function["run_multiple_devices_cases"].append(case) - elif case["multi_stage"] == "Yes": - case_by_test_function["run_multiple_stage_cases"].append(case) + if case['multi_device'] == 'Yes': + case_by_test_function['run_multiple_devices_cases'].append(case) + elif case['multi_stage'] == 'Yes': + case_by_test_function['run_multiple_stage_cases'].append(case) else: - case_by_test_function["run_unit_test_cases"].append(case) + case_by_test_function['run_unit_test_cases'].append(case) return case_by_test_function def output(self): @@ -181,12 +185,12 @@ class UnitTestGroup(IDFCaseGroup): :return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group} """ - target = self._get_case_attr(self.case_list[0], "chip_target") + target = self._get_case_attr(self.case_list[0], 'chip_target') if target: overwrite = { - "dut": { - "package": "ttfw_idf", - "class": self.DUT_CLS_NAME[target], + 'dut': { + 'package': 'ttfw_idf', + 'class': self.DUT_CLS_NAME[target], } } else: @@ -196,11 +200,11 @@ class UnitTestGroup(IDFCaseGroup): output_data = { # we don't need filter for test function, as UT uses a few test functions for all cases - "CaseConfig": [ + 'CaseConfig': [ { - "name": test_function, - "extra_data": self._create_extra_data(test_cases, test_function), - "overwrite": overwrite, + 'name': test_function, + 'extra_data': self._create_extra_data(test_cases, test_function), + 'overwrite': overwrite, } for test_function, test_cases in case_by_test_function.items() if test_cases ], } @@ -210,22 +214,29 @@ class UnitTestGroup(IDFCaseGroup): class ExampleAssignTest(IDFAssignTest): CI_TEST_JOB_PATTERN = re.compile(r'^example_test_.+') - def __init__(self, est_case_path, ci_config_file): - super(ExampleAssignTest, self).__init__(est_case_path, ci_config_file, case_group=ExampleGroup) + def __init__(self, test_case_path, ci_config_file): + super(ExampleAssignTest, self).__init__(test_case_path, ci_config_file, case_group=ExampleGroup) class TestAppsAssignTest(IDFAssignTest): CI_TEST_JOB_PATTERN = re.compile(r'^test_app_test_.+') - def __init__(self, est_case_path, ci_config_file): - super(TestAppsAssignTest, self).__init__(est_case_path, ci_config_file, case_group=TestAppsGroup) + def __init__(self, test_case_path, ci_config_file): + super(TestAppsAssignTest, self).__init__(test_case_path, ci_config_file, case_group=TestAppsGroup) + + +class ComponentUTAssignTest(IDFAssignTest): + CI_TEST_JOB_PATTERN = re.compile(r'^component_ut_test_.+') + + def __init__(self, test_case_path, ci_config_file): + super(ComponentUTAssignTest, self).__init__(test_case_path, ci_config_file, case_group=ComponentUTGroup) class UnitTestAssignTest(IDFAssignTest): CI_TEST_JOB_PATTERN = re.compile(r'^UT_.+') - def __init__(self, est_case_path, ci_config_file): - super(UnitTestAssignTest, self).__init__(est_case_path, ci_config_file, case_group=UnitTestGroup) + def __init__(self, test_case_path, ci_config_file): + super(UnitTestAssignTest, self).__init__(test_case_path, ci_config_file, case_group=UnitTestGroup) def search_cases(self, case_filter=None): """ @@ -252,13 +263,14 @@ class UnitTestAssignTest(IDFAssignTest): return test_cases test_cases = [] - if os.path.isdir(self.test_case_path): - for yml_file in find_by_suffix('.yml', self.test_case_path): - test_cases.extend(get_test_cases_from_yml(yml_file)) - elif os.path.isfile(self.test_case_path): - test_cases.extend(get_test_cases_from_yml(self.test_case_path)) - else: - print("Test case path is invalid. Should only happen when use @bot to skip unit test.") + for path in self.test_case_paths: + if os.path.isdir(path): + for yml_file in find_by_suffix('.yml', path): + test_cases.extend(get_test_cases_from_yml(yml_file)) + elif os.path.isfile(path) and path.endswith('.yml'): + test_cases.extend(get_test_cases_from_yml(path)) + else: + print('Test case path is invalid. Should only happen when use @bot to skip unit test.') # filter keys are lower case. Do map lower case keys with original keys. try: @@ -285,27 +297,30 @@ class UnitTestAssignTest(IDFAssignTest): # sort cases with configs and test functions # in later stage cases with similar attributes are more likely to be assigned to the same job # it will reduce the count of flash DUT operations - test_cases.sort(key=lambda x: x["config"] + x["multi_stage"] + x["multi_device"]) + test_cases.sort(key=lambda x: x['config'] + x['multi_stage'] + x['multi_device']) return test_cases if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument("case_group", choices=["example_test", "custom_test", "unit_test"]) - parser.add_argument("test_case", help="test case folder or file") - parser.add_argument("ci_config_file", help="gitlab ci config file") - parser.add_argument("output_path", help="output path of config files") - parser.add_argument("--pipeline_id", "-p", type=int, default=None, help="pipeline_id") - parser.add_argument("--test-case-file-pattern", help="file name pattern used to find Python test case files") + parser.add_argument('case_group', choices=['example_test', 'custom_test', 'unit_test', 'component_ut']) + parser.add_argument('test_case_paths', nargs='+', help='test case folder or file') + parser.add_argument('-c', '--config', help='gitlab ci config file') + parser.add_argument('-o', '--output', help='output path of config files') + parser.add_argument('--pipeline_id', '-p', type=int, default=None, help='pipeline_id') + parser.add_argument('--test-case-file-pattern', help='file name pattern used to find Python test case files') args = parser.parse_args() - args_list = [args.test_case, args.ci_config_file] + test_case_paths = [os.path.join(IDF_PATH_FROM_ENV, path) if not os.path.isabs(path) else path for path in args.test_case_paths] + args_list = [test_case_paths, args.config] if args.case_group == 'example_test': assigner = ExampleAssignTest(*args_list) elif args.case_group == 'custom_test': assigner = TestAppsAssignTest(*args_list) elif args.case_group == 'unit_test': assigner = UnitTestAssignTest(*args_list) + elif args.case_group == 'component_ut': + assigner = ComponentUTAssignTest(*args_list) else: raise SystemExit(1) # which is impossible @@ -313,5 +328,5 @@ if __name__ == '__main__': assigner.CI_TEST_JOB_PATTERN = re.compile(r'{}'.format(args.test_case_file_pattern)) assigner.assign_cases() - assigner.output_configs(args.output_path) + assigner.output_configs(args.output) assigner.create_artifact_index_file() diff --git a/tools/ci/python_packages/ttfw_idf/__init__.py b/tools/ci/python_packages/ttfw_idf/__init__.py index 4abcadcd29..95c5e0e200 100644 --- a/tools/ci/python_packages/ttfw_idf/__init__.py +++ b/tools/ci/python_packages/ttfw_idf/__init__.py @@ -17,10 +17,13 @@ import logging import os import re +import junit_xml + from tiny_test_fw import TinyFW, Utility -from .IDFApp import IDFApp, Example, LoadableElfTestApp, UT, TestApp # noqa: export all Apps for users -from .IDFDUT import IDFDUT, ESP32DUT, ESP32S2DUT, ESP8266DUT, ESP32QEMUDUT # noqa: export DUTs for users from .DebugUtils import OCDBackend, GDBBackend, CustomProcess # noqa: export DebugUtils for users +from .IDFApp import IDFApp, Example, LoadableElfTestApp, UT, TestApp, ComponentUTApp # noqa: export all Apps for users +from .IDFDUT import IDFDUT, ESP32DUT, ESP32S2DUT, ESP8266DUT, ESP32QEMUDUT # noqa: export DUTs for users +from .unity_test_parser import TestResults, TestFormat # pass TARGET_DUT_CLS_DICT to Env.py to avoid circular dependency issue. TARGET_DUT_CLS_DICT = { @@ -108,6 +111,22 @@ def ci_target_check(func): return wrapper +def test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, drop_kwargs_dut=False, **kwargs): + test_target = local_test_check(target) + dut = get_dut_class(test_target, erase_nvs) + if drop_kwargs_dut and 'dut' in kwargs: # panic_test() will inject dut, resolve conflicts here + dut = kwargs['dut'] + del kwargs['dut'] + original_method = TinyFW.test_method( + app=app, dut=dut, target=upper_list_or_str(target), ci_target=upper_list_or_str(ci_target), + module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs, + dut_dict=TARGET_DUT_CLS_DICT, **kwargs + ) + test_func = original_method(func) + test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"]) + return test_func + + @ci_target_check def idf_example_test(app=Example, target="ESP32", ci_target=None, module="examples", execution_time=1, level="example", erase_nvs=True, config_name=None, **kwargs): @@ -125,19 +144,8 @@ def idf_example_test(app=Example, target="ESP32", ci_target=None, module="exampl :param kwargs: other keyword args :return: test method """ - def test(func): - test_target = local_test_check(target) - dut = get_dut_class(test_target, erase_nvs) - original_method = TinyFW.test_method( - app=app, dut=dut, target=upper_list_or_str(target), ci_target=upper_list_or_str(ci_target), - module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs, - dut_dict=TARGET_DUT_CLS_DICT, **kwargs - ) - test_func = original_method(func) - test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"]) - return test_func - + return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs) return test @@ -157,25 +165,36 @@ def idf_unit_test(app=UT, target="ESP32", ci_target=None, module="unit-test", ex :param kwargs: other keyword args :return: test method """ - def test(func): - test_target = local_test_check(target) - dut = get_dut_class(test_target, erase_nvs) - original_method = TinyFW.test_method( - app=app, dut=dut, target=upper_list_or_str(target), ci_target=upper_list_or_str(ci_target), - module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs, - dut_dict=TARGET_DUT_CLS_DICT, **kwargs - ) - test_func = original_method(func) - test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"]) - return test_func - + return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs) return test @ci_target_check def idf_custom_test(app=TestApp, target="ESP32", ci_target=None, module="misc", execution_time=1, - level="integration", erase_nvs=True, config_name=None, group="test-apps", **kwargs): + level="integration", erase_nvs=True, config_name=None, **kwargs): + """ + decorator for idf custom tests (with default values for some keyword args). + + :param app: test application class + :param target: target supported, string or list + :param ci_target: target auto run in CI, if None than all target will be tested, None, string or list + :param module: module, string + :param execution_time: execution time in minutes, int + :param level: test level, could be used to filter test cases, string + :param erase_nvs: if need to erase_nvs in DUT.start_app() + :param config_name: if specified, name of the app configuration + :param kwargs: other keyword args + :return: test method + """ + def test(func): + return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, drop_kwargs_dut=True, **kwargs) + return test + + +@ci_target_check +def idf_component_unit_test(app=ComponentUTApp, target="ESP32", ci_target=None, module="misc", execution_time=1, + level="integration", erase_nvs=True, config_name=None, **kwargs): """ decorator for idf custom tests (with default values for some keyword args). @@ -187,29 +206,41 @@ def idf_custom_test(app=TestApp, target="ESP32", ci_target=None, module="misc", :param level: test level, could be used to filter test cases, string :param erase_nvs: if need to erase_nvs in DUT.start_app() :param config_name: if specified, name of the app configuration - :param group: identifier to group custom tests (unused for now, defaults to "test-apps") :param kwargs: other keyword args :return: test method """ def test(func): - test_target = local_test_check(target) - dut = get_dut_class(test_target, erase_nvs) - if 'dut' in kwargs: # panic_test() will inject dut, resolve conflicts here - dut = kwargs['dut'] - del kwargs['dut'] - original_method = TinyFW.test_method( - app=app, dut=dut, target=upper_list_or_str(target), ci_target=upper_list_or_str(ci_target), - module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs, - dut_dict=TARGET_DUT_CLS_DICT, **kwargs - ) - test_func = original_method(func) - test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"]) - return test_func + return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs) return test +class ComponentUTResult: + """ + Function Class, parse component unit test results + """ + + @staticmethod + def parse_result(stdout): + try: + results = TestResults(stdout, TestFormat.UNITY_FIXTURE_VERBOSE) + except (ValueError, TypeError) as e: + raise ValueError('Error occurs when parsing the component unit test stdout to JUnit report: ' + str(e)) + + group_name = results.tests()[0].group() + with open(os.path.join(os.getenv('LOG_PATH', ''), '{}_XUNIT_RESULT.xml'.format(group_name)), 'w') as fw: + junit_xml.to_xml_report_file(fw, [results.to_junit()]) + + if results.num_failed(): + # raise exception if any case fails + err_msg = 'Failed Cases:\n' + for test_case in results.test_iter(): + if test_case.result() == 'FAIL': + err_msg += '\t{}: {}'.format(test_case.name(), test_case.message()) + raise AssertionError(err_msg) + + def log_performance(item, value): """ do print performance with pre-defined format to console diff --git a/tools/ci/python_packages/ttfw_idf/unity_test_parser.py b/tools/ci/python_packages/ttfw_idf/unity_test_parser.py new file mode 100644 index 0000000000..63c4bb140e --- /dev/null +++ b/tools/ci/python_packages/ttfw_idf/unity_test_parser.py @@ -0,0 +1,375 @@ +""" +Modification version of https://github.com/ETCLabs/unity-test-parser/blob/develop/unity_test_parser.py +since only python 3.6 or higher version have ``enum.auto()`` + +unity_test_parser.py + +Parse the output of the Unity Test Framework for C. Parsed results are held in the TestResults +object format, which can then be converted to various XML formats. +""" + +import enum +import re + +import junit_xml + +_NORMAL_TEST_REGEX = re.compile(r"(?P.+):(?P\d+):(?P[^\s:]+):(?PPASS|FAIL|IGNORE)(?:: (?P.+))?") +_UNITY_FIXTURE_VERBOSE_PREFIX_REGEX = re.compile(r"(?PTEST\((?P[^\s,]+), (?P[^\s\)]+)\))(?P.+)?$") +_UNITY_FIXTURE_REMAINDER_REGEX = re.compile(r"^(?P.+):(?P\d+)::(?PPASS|FAIL|IGNORE)(?:: (?P.+))?") +_TEST_SUMMARY_BLOCK_REGEX = re.compile( + r"^(?P\d+) Tests (?P\d+) Failures (?P\d+) Ignored\s*\r?\n(?POK|FAIL)(?:ED)?", re.MULTILINE +) +_TEST_RESULT_ENUM = ["PASS", "FAIL", "IGNORE"] + + +class TestFormat(enum.Enum): + """Represents the flavor of Unity used to produce a given output.""" + + UNITY_BASIC = 0 + # UNITY_FIXTURE = enum.auto() + UNITY_FIXTURE_VERBOSE = 1 + + +globals().update(TestFormat.__members__) + + +class TestStats: + """Statistics about a test collection""" + + def __init__(self): + self.total = 0 + self.passed = 0 + self.failed = 0 + self.ignored = 0 + + def __eq__(self, other): + if isinstance(other, self.__class__): + return (self.total == other.total + and self.passed == other.passed + and self.failed == other.failed + and self.ignored == other.ignored) + return False + + +class TestResult: + """ + Class representing the result of a single test. + + Contains the test name, its result (either PASS, FAIL or IGNORE), the file and line number if + the test result was not PASS, and an optional message. + """ + + def __init__( + self, + test_name, + result, + group="default", + file="", + line=0, + message="", + full_line="", + ): + if result not in _TEST_RESULT_ENUM: + raise ValueError("result must be one of {}.".format(_TEST_RESULT_ENUM)) + + self._test_name = test_name + self._result = result + self._group = group + self._message = message + self._full_line = full_line + + if result != "PASS": + self._file = file + self._line = line + else: + self._file = "" + self._line = 0 + + def file(self): + """The file name - returns empty string if the result is PASS.""" + return self._file + + def line(self): + """The line number - returns 0 if the result is PASS.""" + return self._line + + def name(self): + """The test name.""" + return self._test_name + + def result(self): + """The test result, one of PASS, FAIL or IGNORED.""" + return self._result + + def group(self): + """ + The test group, if applicable. + + For basic Unity output, this will always be "default". + """ + return self._group + + def message(self): + """The accompanying message - returns empty string if the result is PASS.""" + return self._message + + def full_line(self): + """The original, full line of unit test output that this object was created from.""" + return self._full_line + + +class TestResults: + """ + Class representing Unity test results. + + After being initialized with raw test output, it parses the output and represents it as a list + of TestResult objects which can be inspected or converted to other types of output, e.g. JUnit + XML. + """ + + def __init__(self, test_output, test_format=TestFormat.UNITY_BASIC): + """ + Create a new TestResults object from Unity test output. + + Keyword arguments: + test_output -- The full test console output, must contain the overall result and summary + block at the bottom. + + Optional arguments: + test_format -- TestFormat enum representing the flavor of Unity used to create the output. + + Exceptions: + ValueError, if the test output is not formatted properly. + """ + self._tests = [] + self._test_stats = self._find_summary_block(test_output) + + if test_format is TestFormat.UNITY_BASIC: + self._parse_unity_basic(test_output) + elif test_format is TestFormat.UNITY_FIXTURE_VERBOSE: + self._parse_unity_fixture_verbose(test_output) + else: + raise ValueError( + "test_format must be one of UNITY_BASIC or UNITY_FIXTURE_VERBOSE." + ) + + def num_tests(self): + """The total number of tests parsed.""" + return self._test_stats.total + + def num_passed(self): + """The number of tests with result PASS.""" + return self._test_stats.passed + + def num_failed(self): + """The number of tests with result FAIL.""" + return self._test_stats.failed + + def num_ignored(self): + """The number of tests with result IGNORE.""" + return self._test_stats.ignored + + def test_iter(self): + """Get an iterator for iterating over individual tests. + + Returns an iterator over TestResult objects. + + Example: + for test in unity_results.test_iter(): + print(test.name()) + """ + return iter(self._tests) + + def tests(self): + """Get a list of all the tests (TestResult objects).""" + return self._tests + + def to_junit( + self, suite_name="all_tests", + ): + """ + Convert the tests to JUnit XML. + + Returns a junit_xml.TestSuite containing all of the test cases. One test suite will be + generated with the name given in suite_name. Unity Fixture test groups are mapped to the + classname attribute of test cases; for basic Unity output there will be one class named + "default". + + Optional arguments: + suite_name -- The name to use for the "name" and "package" attributes of the testsuite element. + + Sample output: + + + + + """ + test_case_list = [] + + for test in self._tests: + if test.result() == "PASS": + test_case_list.append( + junit_xml.TestCase(name=test.name(), classname=test.group()) + ) + else: + junit_tc = junit_xml.TestCase( + name=test.name(), + classname=test.group(), + file=test.file(), + line=test.line(), + ) + if test.result() == "FAIL": + junit_tc.add_failure_info( + message=test.message(), output=test.full_line() + ) + elif test.result() == "IGNORE": + junit_tc.add_skipped_info( + message=test.message(), output=test.full_line() + ) + test_case_list.append(junit_tc) + + return junit_xml.TestSuite( + name=suite_name, package=suite_name, test_cases=test_case_list + ) + + def _find_summary_block(self, unity_output): + """ + Find and parse the test summary block. + + Unity prints a test summary block at the end of a test run of the form: + ----------------------- + X Tests Y Failures Z Ignored + [PASS|FAIL] + + Returns the contents of the test summary block as a TestStats object. + """ + match = _TEST_SUMMARY_BLOCK_REGEX.search(unity_output) + if not match: + raise ValueError("A Unity test summary block was not found.") + + try: + stats = TestStats() + stats.total = int(match.group("num_tests")) + stats.failed = int(match.group("num_failures")) + stats.ignored = int(match.group("num_ignored")) + stats.passed = stats.total - stats.failed - stats.ignored + return stats + except ValueError: + raise ValueError("The Unity test summary block was not valid.") + + def _parse_unity_basic(self, unity_output): + """ + Parse basic unity output. + + This is of the form file:line:test_name:result[:optional_message] + """ + found_test_stats = TestStats() + + for test in _NORMAL_TEST_REGEX.finditer(unity_output): + try: + new_test = TestResult( + test.group("test_name"), + test.group("result"), + file=test.group("file"), + line=int(test.group("line")), + message=test.group("message") + if test.group("message") is not None + else "", + full_line=test.group(0), + ) + except ValueError: + continue + + self._add_new_test(new_test, found_test_stats) + + if len(self._tests) == 0: + raise ValueError("No tests were found.") + + if found_test_stats != self._test_stats: + raise ValueError("Test output does not match summary block.") + + def _parse_unity_fixture_verbose(self, unity_output): + """ + Parse the output of the unity_fixture add-in invoked with the -v flag. + + This is a more complex operation than basic unity output, because the output for a single + test can span multiple lines. There is a prefix of the form "TEST(test_group, test_name)" + that always exists on the first line for a given test. Immediately following that can be a + pass or fail message, or some number of diagnostic messages followed by a pass or fail + message. + """ + found_test_stats = TestStats() + + line_iter = iter(unity_output.splitlines()) + try: + line = next(line_iter) + while True: + prefix_match = _UNITY_FIXTURE_VERBOSE_PREFIX_REGEX.search(line) + line = next(line_iter) + if prefix_match: + # Handle the remaining portion of a test case line after the unity_fixture + # prefix. + remainder = prefix_match.group("remainder") + if remainder: + self._parse_unity_fixture_remainder( + prefix_match, remainder, found_test_stats + ) + # Handle any subsequent lines with more information on the same test case. + while not _UNITY_FIXTURE_VERBOSE_PREFIX_REGEX.search(line): + self._parse_unity_fixture_remainder( + prefix_match, line, found_test_stats + ) + line = next(line_iter) + except StopIteration: + pass + + if len(self._tests) == 0: + raise ValueError("No tests were found.") + + if found_test_stats != self._test_stats: + raise ValueError("Test output does not match summary block.") + + def _parse_unity_fixture_remainder(self, prefix_match, remainder, test_stats): + """ + Parse the remainder of a Unity Fixture test case. + + Can be on the same line as the prefix or on subsequent lines. + """ + new_test = None + + if remainder == " PASS": + new_test = TestResult( + prefix_match.group("test_name"), + "PASS", + group=prefix_match.group("test_group"), + full_line=prefix_match.group(0), + ) + else: + remainder_match = _UNITY_FIXTURE_REMAINDER_REGEX.match(remainder) + if remainder_match: + new_test = TestResult( + prefix_match.group("test_name"), + remainder_match.group("result"), + group=prefix_match.group("test_group"), + file=remainder_match.group("file"), + line=int(remainder_match.group("line")), + message=remainder_match.group("message") + if remainder_match.group("message") is not None + else "", + full_line=prefix_match.group("prefix") + remainder_match.group(0), + ) + + if new_test is not None: + self._add_new_test(new_test, test_stats) + + def _add_new_test(self, new_test, test_stats): + """Add a new test and increment the proper members of test_stats.""" + test_stats.total += 1 + if new_test.result() == "PASS": + test_stats.passed += 1 + elif new_test.result() == "FAIL": + test_stats.failed += 1 + else: + test_stats.ignored += 1 + + self._tests.append(new_test) diff --git a/tools/unit-test-app/components/test_utils/test_runner.c b/tools/unit-test-app/components/test_utils/test_runner.c index 6c17a0640f..daa64d5295 100644 --- a/tools/unit-test-app/components/test_utils/test_runner.c +++ b/tools/unit-test-app/components/test_utils/test_runner.c @@ -18,6 +18,7 @@ #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "unity.h" +#include "unity_test_runner.h" #include "test_utils.h" #include "esp_newlib.h"