feat(tiny_test_fw): ignore known failure cases result

py
This commit is contained in:
Fu Hanxi
2021-08-10 10:23:26 +08:00
committed by Zim Kalinowski
parent 45330303d6
commit 7089f62dab
6 changed files with 150 additions and 99 deletions

View File

@ -43,9 +43,11 @@
# clone test env configs # clone test env configs
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO - ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs - python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd tools/ci/python_packages/tiny_test_fw/bin # git clone the known failure cases repo, run test
- ./tools/ci/retry_failed.sh git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test # run test
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE - cd tools/ci/python_packages/tiny_test_fw/bin
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.example_debug_template: .example_debug_template:
stage: target_test stage: target_test
@ -82,9 +84,11 @@
# clone test env configs # clone test env configs
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO - ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs - python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd tools/ci/python_packages/tiny_test_fw/bin # git clone the known failure cases repo, run test
- ./tools/ci/retry_failed.sh git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test # run test
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE - cd tools/ci/python_packages/tiny_test_fw/bin
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.test_app_template: .test_app_template:
extends: .example_test_template extends: .example_test_template

View File

@ -13,19 +13,30 @@
# limitations under the License. # limitations under the License.
""" Interface for test cases. """ """ Interface for test cases. """
import os
import time
import traceback
import functools import functools
import os
import socket import socket
import time
from datetime import datetime from datetime import datetime
import junit_xml import junit_xml
from . import Env from . import DUT, App, Env, Utility
from . import DUT from .Utility import format_case_id
from . import App
from . import Utility
class TestCaseFailed(AssertionError):
def __init__(self, *cases):
"""
Raise this exception if one or more test cases fail in a 'normal' way (ie the test runs but fails, no unexpected exceptions)
This will avoid dumping the Python stack trace, because the assumption is the junit error info and full job log already has
enough information for a developer to debug.
'cases' argument is the names of one or more test cases
"""
message = 'Test case{} failed: {}'.format('s' if len(cases) > 1 else '', ', '.join(str(c) for c in cases))
super(TestCaseFailed, self).__init__(self, message)
class DefaultEnvConfig(object): class DefaultEnvConfig(object):
@ -87,8 +98,8 @@ class JunitReport(object):
@classmethod @classmethod
def output_report(cls, junit_file_path): def output_report(cls, junit_file_path):
""" Output current test result to file. """ """ Output current test result to file. """
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), "w") as f: with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False) junit_xml.to_xml_report_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
@classmethod @classmethod
def get_current_test_case(cls): def get_current_test_case(cls):
@ -184,21 +195,20 @@ def test_method(**kwargs):
env_inst = Env.Env(**env_config) env_inst = Env.Env(**env_config)
# prepare for xunit test results # prepare for xunit test results
junit_file_path = env_inst.app_cls.get_log_folder(env_config["test_suite_name"]) junit_file_path = env_inst.app_cls.get_log_folder(env_config['test_suite_name'])
junit_test_case = JunitReport.create_test_case(case_info["ID"]) junit_test_case = JunitReport.create_test_case(format_case_id(case_info['ID'],
target=case_info['chip'].lower()))
result = False result = False
try: try:
Utility.console_log("starting running test: " + test_func.__name__, color="green") Utility.console_log("starting running test: " + test_func.__name__, color="green")
# execute test function # execute test function
test_func(env_inst, extra_data) test_func(env_inst, extra_data)
# if finish without exception, test result is True # if finish without exception, test result is True
result = True result = True
except TestCaseFailed as e:
junit_test_case.add_failure_info(str(e))
except Exception as e: except Exception as e:
# handle all the exceptions here Utility.handle_unexpected_exception(junit_test_case, e)
traceback.print_exc()
# log failure
junit_test_case.add_failure_info(str(e) + ":\r\n" + traceback.format_exc())
finally: finally:
# do close all DUTs, if result is False then print DUT debug info # do close all DUTs, if result is False then print DUT debug info
close_errors = env_inst.close(dut_debug=(not result)) close_errors = env_inst.close(dut_debug=(not result))
@ -210,7 +220,7 @@ def test_method(**kwargs):
# and raise exception in DUT close to fail test case if reset detected. # and raise exception in DUT close to fail test case if reset detected.
if close_errors: if close_errors:
for error in close_errors: for error in close_errors:
junit_test_case.add_failure_info("env close error: ".format(error)) junit_test_case.add_failure_info(str(error))
result = False result = False
if not case_info["junit_report_by_case"]: if not case_info["junit_report_by_case"]:
JunitReport.test_case_finish(junit_test_case) JunitReport.test_case_finish(junit_test_case)

View File

@ -1,7 +1,8 @@
from __future__ import print_function from __future__ import print_function
import os.path import os.path
import sys import sys
import traceback
_COLOR_CODES = { _COLOR_CODES = {
"white": u'\033[0m', "white": u'\033[0m',
@ -73,3 +74,20 @@ def load_source(path):
sys.path.remove(dir) sys.path.remove(dir)
__LOADED_MODULES[path] = ret __LOADED_MODULES[path] = ret
return ret return ret
def handle_unexpected_exception(junit_test_case, exception):
"""
Helper to log & add junit result details for an unexpected exception encountered
when running a test case.
Should always be called from inside an except: block
"""
traceback.print_exc()
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
e_str = str(exception) if str(exception) else repr(exception)
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc()))
def format_case_id(case_name, target='esp32', config='default'):
return '{}.{}.{}'.format(target, config, case_name)

View File

@ -21,13 +21,14 @@ Command line interface to run test cases from a given path.
Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases. Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases.
""" """
import argparse
import os import os
import sys import sys
import argparse
import threading import threading
from fnmatch import fnmatch
from tiny_test_fw import TinyFW from tiny_test_fw.TinyFW import JunitReport, set_default_config
from tiny_test_fw.Utility import SearchCases, CaseConfig from tiny_test_fw.Utility import CaseConfig, SearchCases, console_log
class Runner(threading.Thread): class Runner(threading.Thread):
@ -37,28 +38,64 @@ class Runner(threading.Thread):
:param env_config_file: env config file :param env_config_file: env config file
""" """
def __init__(self, test_case, case_config, env_config_file=None): def __init__(self, test_case, case_config, env_config_file=None, known_failure_cases_file=None):
super(Runner, self).__init__() super(Runner, self).__init__()
self.setDaemon(True) self.setDaemon(True)
if case_config: if case_config:
test_suite_name = os.path.splitext(os.path.basename(case_config))[0] test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
else: else:
test_suite_name = "TestRunner" test_suite_name = "TestRunner"
TinyFW.set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name) set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
test_methods = SearchCases.Search.search_test_cases(test_case) test_methods = SearchCases.Search.search_test_cases(test_case)
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config) self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
self.test_result = [] self.known_failure_cases = self._get_config_cases(known_failure_cases_file)
@staticmethod
def _get_config_cases(config_file):
res = set()
if not config_file or not os.path.isfile(config_file):
return res
for line in open(config_file).readlines():
if not line:
continue
if not line.strip():
continue
without_comments = line.split("#")[0].strip()
if without_comments:
res.add(without_comments)
return res
def run(self): def run(self):
for case in self.test_cases: for case in self.test_cases:
result = case.run() case.run()
self.test_result.append(result)
@staticmethod
def is_known_issue(tc_name, known_cases):
for case in known_cases:
if tc_name == case:
return True
if fnmatch(tc_name, case):
return True
return False
def get_test_result(self): def get_test_result(self):
return self.test_result and all(self.test_result) _res = True
console_log("Test Results:")
for tc in JunitReport.JUNIT_TEST_SUITE.test_cases:
if tc.failures:
if self.is_known_issue(tc.name, self.known_failure_cases):
console_log(" Known Failure: " + tc.name, color="orange")
else:
console_log(" Test Fail: " + tc.name, color="red")
_res = False
else:
console_log(" Test Succeed: " + tc.name, color="green")
return _res
if __name__ == '__main__': if __name__ == "__main__":
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("test_case", parser.add_argument("test_case",
@ -67,15 +104,17 @@ if __name__ == '__main__':
help="case filter/config file") help="case filter/config file")
parser.add_argument("--env_config_file", "-e", default=None, parser.add_argument("--env_config_file", "-e", default=None,
help="test env config file") help="test env config file")
parser.add_argument("--known_failure_cases_file", default=None,
help="known failure cases file")
args = parser.parse_args() args = parser.parse_args()
runner = Runner(args.test_case, args.case_config, args.env_config_file) runner = Runner(args.test_case, args.case_config, args.env_config_file, args.known_failure_cases_file)
runner.start() runner.start()
while True: while True:
try: try:
runner.join(1) runner.join(1)
if not runner.isAlive(): if not runner.is_alive():
break break
except KeyboardInterrupt: except KeyboardInterrupt:
print("exit by Ctrl-C") print("exit by Ctrl-C")

View File

@ -20,10 +20,6 @@ from .IDFDUT import IDFDUT, ESP32DUT, ESP32S2DUT, ESP8266DUT, ESP32QEMUDUT # no
from .DebugUtils import OCDProcess, GDBProcess, TelnetProcess, CustomProcess # noqa: export DebugUtils for users from .DebugUtils import OCDProcess, GDBProcess, TelnetProcess, CustomProcess # noqa: export DebugUtils for users
def format_case_id(chip, case_name):
return "{}.{}".format(chip, case_name)
def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", execution_time=1, def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", execution_time=1,
level="example", erase_nvs=True, config_name=None, **kwargs): level="example", erase_nvs=True, config_name=None, **kwargs):
""" """
@ -51,7 +47,6 @@ def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", e
def test(func): def test(func):
test_func = original_method(func) test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func return test_func
return test return test
@ -83,7 +78,6 @@ def idf_unit_test(app=UT, dut=IDFDUT, chip="ESP32", module="unit-test", executio
def test(func): def test(func):
test_func = original_method(func) test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func return test_func
return test return test
@ -118,7 +112,6 @@ def idf_custom_test(app=TestApp, dut=IDFDUT, chip="ESP32", module="misc", execut
def test(func): def test(func):
test_func = original_method(func) test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func return test_func
return test return test

View File

@ -18,17 +18,20 @@
Test script for unit test case. Test script for unit test case.
""" """
import re
import time
import argparse import argparse
import re
import threading import threading
import time
from tiny_test_fw import TinyFW, Utility, Env, DUT
import ttfw_idf import ttfw_idf
from tiny_test_fw import DUT, Env, TinyFW, Utility
from tiny_test_fw.TinyFW import TestCaseFailed
from tiny_test_fw.Utility import format_case_id, handle_unexpected_exception
UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests." UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
# matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)" # matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)"
RESET_PATTERN = re.compile(r"(rst:0x[0-9a-fA-F]*\s\([\w].*?\),boot:0x[0-9a-fA-F]*\s\([\w].*?\))") RESET_PATTERN = re.compile(r"(rst:0x[0-9a-fA-F]*\s\([\w].*?\),boot:0x[0-9a-fA-F]*\s\([\w].*?\))")
@ -70,11 +73,7 @@ def reset_reason_matches(reported_str, expected_str):
return False return False
class TestCaseFailed(AssertionError): def format_test_case_config(test_case_data, target='esp32'):
pass
def format_test_case_config(test_case_data):
""" """
convert the test case data to unified format. convert the test case data to unified format.
We need to following info to run unit test cases: We need to following info to run unit test cases:
@ -94,6 +93,7 @@ def format_test_case_config(test_case_data):
If config is not specified for test case, then If config is not specified for test case, then
:param test_case_data: string, list, or a dictionary list :param test_case_data: string, list, or a dictionary list
:param target: target
:return: formatted data :return: formatted data
""" """
@ -133,6 +133,9 @@ def format_test_case_config(test_case_data):
if "config" not in _case: if "config" not in _case:
_case["config"] = "default" _case["config"] = "default"
if 'target' not in _case:
_case['target'] = target
return _case return _case
if not isinstance(test_case_data, list): if not isinstance(test_case_data, list):
@ -160,7 +163,11 @@ def replace_app_bin(dut, name, new_app_bin):
def format_case_name(case): def format_case_name(case):
return "[{}] {}".format(case["config"], case["name"]) # we could split cases of same config into multiple binaries as we have limited rom space
# we should regard those configs like `default` and `default_2` as the same config
match = STRIP_CONFIG_PATTERN.match(case['config'])
stripped_config_name = match.group(1)
return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
def reset_dut(dut): def reset_dut(dut):
@ -188,8 +195,11 @@ def reset_dut(dut):
def log_test_case(description, test_case, ut_config): def log_test_case(description, test_case, ut_config):
Utility.console_log("Running {} '{}' (config {})".format(description, test_case["name"], ut_config), color="orange") Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
Utility.console_log("Tags: %s" % ", ".join("%s=%s" % (k,v) for (k,v) in test_case.items() if k != "name" and v is not None), color="orange") color='orange')
Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
if k != 'name' and v is not None),
color='orange')
def run_one_normal_case(dut, one_case, junit_test_case): def run_one_normal_case(dut, one_case, junit_test_case):
@ -287,7 +297,7 @@ def run_unit_test_cases(env, extra_data):
:return: None :return: None
""" """
case_config = format_test_case_config(extra_data) case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
# we don't want stop on failed case (unless some special scenarios we can't handle) # we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing # this flag is used to log if any of the case failed during executing
@ -306,14 +316,14 @@ def run_unit_test_cases(env, extra_data):
log_test_case("test case", one_case, ut_config) log_test_case("test case", one_case, ut_config)
performance_items = [] performance_items = []
# create junit report test case # create junit report test case
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"])) junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try: try:
run_one_normal_case(dut, one_case, junit_test_case) run_one_normal_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items() performance_items = dut.get_performance_items()
except TestCaseFailed: except TestCaseFailed:
failed_cases.append(format_case_name(one_case)) failed_cases.append(format_case_name(one_case))
except Exception as e: except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e)) handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case)) failed_cases.append(format_case_name(one_case))
finally: finally:
TinyFW.JunitReport.update_performance(performance_items) TinyFW.JunitReport.update_performance(performance_items)
@ -321,13 +331,6 @@ def run_unit_test_cases(env, extra_data):
# close DUT when finish running all cases for one config # close DUT when finish running all cases for one config
env.close_dut(dut.name) env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
class Handler(threading.Thread): class Handler(threading.Thread):
@ -502,19 +505,21 @@ def run_multiple_devices_cases(env, extra_data):
""" """
failed_cases = [] failed_cases = []
case_config = format_test_case_config(extra_data) case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
duts = {} duts = {}
for ut_config in case_config: for ut_config in case_config:
Utility.console_log("Running unit test for config: " + ut_config, "O") Utility.console_log("Running unit test for config: " + ut_config, "O")
for one_case in case_config[ut_config]: for one_case in case_config[ut_config]:
log_test_case("multi-device test", one_case, ut_config, ) log_test_case("multi-device test", one_case, ut_config, )
result = False result = False
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"])) junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try: try:
result = run_one_multiple_devices_case(duts, ut_config, env, one_case, result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
one_case.get('app_bin'), junit_test_case) one_case.get('app_bin'), junit_test_case)
except TestCaseFailed:
pass # result is False, this is handled by the finally block
except Exception as e: except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e)) handle_unexpected_exception(junit_test_case, e)
finally: finally:
if result: if result:
Utility.console_log("Success: " + format_case_name(one_case), color="green") Utility.console_log("Success: " + format_case_name(one_case), color="green")
@ -527,12 +532,6 @@ def run_multiple_devices_cases(env, extra_data):
env.close_dut(dut) env.close_dut(dut)
duts = {} duts = {}
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
def run_one_multiple_stage_case(dut, one_case, junit_test_case): def run_one_multiple_stage_case(dut, one_case, junit_test_case):
reset_dut(dut) reset_dut(dut)
@ -641,7 +640,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
def run_multiple_stage_cases(env, extra_data): def run_multiple_stage_cases(env, extra_data):
""" """
extra_data can be 2 types of value extra_data can be 2 types of value
1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others 1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
3. as list of string or dict: 3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...] [case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
@ -650,7 +649,7 @@ def run_multiple_stage_cases(env, extra_data):
:return: None :return: None
""" """
case_config = format_test_case_config(extra_data) case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
# we don't want stop on failed case (unless some special scenarios we can't handle) # we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing # this flag is used to log if any of the case failed during executing
@ -667,14 +666,14 @@ def run_multiple_stage_cases(env, extra_data):
for one_case in case_config[ut_config]: for one_case in case_config[ut_config]:
log_test_case("multi-stage test", one_case, ut_config) log_test_case("multi-stage test", one_case, ut_config)
performance_items = [] performance_items = []
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"])) junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try: try:
run_one_multiple_stage_case(dut, one_case, junit_test_case) run_one_multiple_stage_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items() performance_items = dut.get_performance_items()
except TestCaseFailed: except TestCaseFailed:
failed_cases.append(format_case_name(one_case)) failed_cases.append(format_case_name(one_case))
except Exception as e: except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e)) handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case)) failed_cases.append(format_case_name(one_case))
finally: finally:
TinyFW.JunitReport.update_performance(performance_items) TinyFW.JunitReport.update_performance(performance_items)
@ -682,17 +681,9 @@ def run_multiple_stage_cases(env, extra_data):
# close DUT when finish running all cases for one config # close DUT when finish running all cases for one config
env.close_dut(dut.name) env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
def detect_update_unit_test_info(env, extra_data, app_bin): def detect_update_unit_test_info(env, extra_data, app_bin):
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
case_config = format_test_case_config(extra_data)
for ut_config in case_config: for ut_config in case_config:
dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config) dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config)
@ -762,20 +753,16 @@ if __name__ == '__main__':
type=int, type=int,
default=1 default=1
) )
parser.add_argument("--env_config_file", "-e", parser.add_argument('--env_config_file', '-e',
help="test env config file", help='test env config file',
default=None default=None)
) parser.add_argument('--app_bin', '-b',
parser.add_argument("--app_bin", "-b", help='application binary file for flashing the chip',
help="application binary file for flashing the chip", default=None)
default=None parser.add_argument('test',
) help='Comma separated list of <option>:<argument> where option can be "name" (default), '
parser.add_argument( '"child case num", "config", "timeout".',
'test', nargs='+')
help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
"config", "timeout".',
nargs='+'
)
args = parser.parse_args() args = parser.parse_args()
list_of_dicts = [] list_of_dicts = []
for test in args.test: for test in args.test: