From e6938f8f39428a92ccb5ae930f1af8f30abc3a0e Mon Sep 17 00:00:00 2001 From: Ivan Kravets Date: Tue, 10 May 2022 20:21:49 +0300 Subject: [PATCH] List available project tests with a new "pio test --list-tests" option --- HISTORY.rst | 9 +-- docs | 2 +- platformio/debug/helpers.py | 4 +- platformio/test/command.py | 108 ++++++++++++------------------------ platformio/test/helpers.py | 62 +++++++++++++++++++++ platformio/test/result.py | 4 +- tests/commands/test_test.py | 21 +++++++ 7 files changed, 130 insertions(+), 80 deletions(-) create mode 100644 platformio/test/helpers.py diff --git a/HISTORY.rst b/HISTORY.rst index 5ed7fc2c..ddf6f7dd 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -9,12 +9,12 @@ Release Notes PlatformIO Core 6 ----------------- -**A professional collaborative platform for safety-critical and declarative embedded development** +**A professional collaborative platform for declarative, safety-critical, and test-driven embedded development.** 6.0.0 (2022-??-??) ~~~~~~~~~~~~~~~~~~ -Please check `Migration guide from 5.x to 6.0 `__. +Please check the `Migration guide from 5.x to 6.0 `__. * **Package Management** @@ -56,9 +56,10 @@ Please check `Migration guide from 5.x to 6.0 `__ - Added support for a `custom Unity library `__ (`issue #3980 `_) - Added support for the ``socket://`` and ``rfc2217://`` protocols using `test_port `__ option (`issue #4229 `_) + - List available project tests with a new `pio test --list-tests `__ option - Pass extra arguments to the testing program with a new `pio test --program-arg `__ option (`issue #3132 `_) - Generate reports in JUnit and JSON formats using the `pio test `__ command (`issue #2891 `_) - - Provide more information when the native program crashed on a host (errored with a negative return code) (`issue #3429 `_) + - Provide more information when the native program crashed on a host (errored with a non-zero return code) (`issue #3429 `_) - Fixed an issue when command line parameters (``--ignore``, ``--filter``) do not override values defined in the |PIOCONF| (`issue #3845 `_) - Renamed the "test_build_project_src" project configuration option to the `test_build_src `__ - Removed the "test_transport" option in favor of the `Custom "unity_config.h" `_ @@ -85,7 +86,7 @@ Please check `Migration guide from 5.x to 6.0 `_) - - Changed default path for compilation database `compile_commands.json `__ to the project root + - Changed a default path for compilation database `compile_commands.json `__ to the project root * **Project Configuration** diff --git a/docs b/docs index 7adff49f..a997e10d 160000 --- a/docs +++ b/docs @@ -1 +1 @@ -Subproject commit 7adff49f78939a6e32d65f58bd3bca221399826d +Subproject commit a997e10df9e5082d8a312bfc9113d363facf4b19 diff --git a/platformio/debug/helpers.py b/platformio/debug/helpers.py index 3797976c..f132356e 100644 --- a/platformio/debug/helpers.py +++ b/platformio/debug/helpers.py @@ -26,7 +26,7 @@ from platformio.commands.run.command import cli as cmd_run from platformio.commands.run.command import print_processing_header from platformio.compat import IS_WINDOWS, is_bytes from platformio.debug.exception import DebugInvalidOptionsError -from platformio.test.command import get_test_names +from platformio.test.helpers import list_test_names from platformio.test.result import TestSuite from platformio.test.runners.base import TestRunnerOptions from platformio.test.runners.factory import TestRunnerFactory @@ -82,7 +82,7 @@ def predebug_project( ): # pylint: disable=too-many-arguments debug_testname = project_config.get("env:" + env_name, "debug_test") if debug_testname: - test_names = get_test_names(project_config) + test_names = list_test_names(project_config) if debug_testname not in test_names: raise DebugInvalidOptionsError( "Unknown test name `%s`. Valid names are `%s`" diff --git a/platformio/test/command.py b/platformio/test/command.py index 7101bf2c..bce1d7e7 100644 --- a/platformio/test/command.py +++ b/platformio/test/command.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import fnmatch import os import shutil @@ -20,9 +19,9 @@ import click from platformio import app, exception, fs, util from platformio.project.config import ProjectConfig -from platformio.test.exception import TestDirNotExistsError +from platformio.test.helpers import list_test_suites from platformio.test.reports.base import TestReportFactory -from platformio.test.result import TestResult, TestStatus, TestSuite +from platformio.test.result import TestResult, TestStatus from platformio.test.runners.base import TestRunnerOptions from platformio.test.runners.factory import TestRunnerFactory @@ -83,6 +82,7 @@ from platformio.test.runners.factory import TestRunnerFactory multiple=True, help="A program argument (multiple are allowed)", ) +@click.option("--list-tests", is_flag=True) @click.option("--json-output", type=click.Path(resolve_path=True)) @click.option("--junit-output", type=click.Path(resolve_path=True)) @click.option("--verbose", "-v", is_flag=True) @@ -103,6 +103,7 @@ def test_cmd( # pylint: disable=too-many-arguments,too-many-locals,redefined-bu monitor_rts, monitor_dtr, program_args, + list_tests, json_output, junit_output, verbose, @@ -110,9 +111,14 @@ def test_cmd( # pylint: disable=too-many-arguments,too-many-locals,redefined-bu app.set_session_var("custom_project_conf", project_conf) with fs.cd(project_dir): - config = ProjectConfig.get_instance(project_conf) - config.validate(envs=environment) - test_names = get_test_names(config) + project_config = ProjectConfig.get_instance(project_conf) + project_config.validate(envs=environment) + + test_result = TestResult(project_dir) + test_suites = list_test_suites( + project_config, environments=environment, filters=filter, ignores=ignore + ) + test_names = sorted(set(s.test_name for s in test_suites)) if not verbose: click.echo("Verbose mode can be enabled via `-v, --verbose` option") @@ -120,62 +126,36 @@ def test_cmd( # pylint: disable=too-many-arguments,too-many-locals,redefined-bu if verbose: click.echo(" (%s)" % ", ".join(test_names)) - test_result = TestResult(project_dir) - default_envs = config.default_envs() - for env_name in config.envs(): - for test_name in test_names: - test_suite = TestSuite(env_name, test_name) - test_result.add_suite(test_suite) - - # filter and ignore patterns - patterns = dict(filter=list(filter), ignore=list(ignore)) - for key in patterns: - if patterns[key]: # overriden from CLI - continue - patterns[key].extend( - config.get(f"env:{env_name}", f"test_{key}", []) - ) - - skip_conditions = [ - environment and env_name not in environment, - not environment and default_envs and env_name not in default_envs, - test_name != "*" - and patterns["filter"] - and not any( - fnmatch.fnmatch(test_name, p) for p in patterns["filter"] - ), - test_name != "*" - and any(fnmatch.fnmatch(test_name, p) for p in patterns["ignore"]), - ] - if any(skip_conditions): - continue - - runner = TestRunnerFactory.new( - test_suite, - config, - TestRunnerOptions( - verbose=verbose, - without_building=without_building, - without_uploading=without_uploading, - without_testing=without_testing, - upload_port=upload_port, - test_port=test_port, - no_reset=no_reset, - monitor_rts=monitor_rts, - monitor_dtr=monitor_dtr, - program_args=program_args, - ), - ) - click.echo() - print_suite_header(test_suite) - runner.start(ctx) - print_suite_footer(test_suite) + for test_suite in test_suites: + test_result.add_suite(test_suite) + if list_tests or test_suite.is_finished(): # skipped by user + continue + runner = TestRunnerFactory.new( + test_suite, + project_config, + TestRunnerOptions( + verbose=verbose, + without_building=without_building, + without_uploading=without_uploading, + without_testing=without_testing, + upload_port=upload_port, + test_port=test_port, + no_reset=no_reset, + monitor_rts=monitor_rts, + monitor_dtr=monitor_dtr, + program_args=program_args, + ), + ) + click.echo() + print_suite_header(test_suite) + runner.start(ctx) + print_suite_footer(test_suite) # Reset custom project config app.set_session_var("custom_project_conf", None) stdout_report = TestReportFactory.new("stdout", test_result) - stdout_report.generate(verbose=verbose) + stdout_report.generate(verbose=verbose or list_tests) for output_format, output_path in [("json", json_output), ("junit", junit_output)]: if not output_path: @@ -187,20 +167,6 @@ def test_cmd( # pylint: disable=too-many-arguments,too-many-locals,redefined-bu raise exception.ReturnErrorCode(1) -def get_test_names(config): - test_dir = config.get("platformio", "test_dir") - if not os.path.isdir(test_dir): - raise TestDirNotExistsError(test_dir) - names = [] - for root, _, __ in os.walk(test_dir): - if not os.path.basename(root).startswith("test_"): - continue - names.append(os.path.relpath(root, test_dir)) - if not names: - names = ["*"] - return names - - def print_suite_header(test_suite): click.echo( "Processing %s in %s environment" diff --git a/platformio/test/helpers.py b/platformio/test/helpers.py new file mode 100644 index 00000000..a8e2f818 --- /dev/null +++ b/platformio/test/helpers.py @@ -0,0 +1,62 @@ +# Copyright (c) 2014-present PlatformIO +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from fnmatch import fnmatch + +from platformio.test.exception import TestDirNotExistsError +from platformio.test.result import TestSuite + + +def list_test_names(project_config): + test_dir = project_config.get("platformio", "test_dir") + if not os.path.isdir(test_dir): + raise TestDirNotExistsError(test_dir) + names = [] + for root, _, __ in os.walk(test_dir): + if not os.path.basename(root).startswith("test_"): + continue + names.append(os.path.relpath(root, test_dir).replace("\\", "/")) + if not names: + names = ["*"] + return names + + +def list_test_suites(project_config, environments, filters, ignores): + result = [] + default_envs = project_config.default_envs() + test_names = list_test_names(project_config) + for env_name in project_config.envs(): + for test_name in test_names: + + # filter and ignore patterns + patterns = dict(filter=list(filters), ignore=list(ignores)) + for key in patterns: + if patterns[key]: # overriden from CLI + continue + patterns[key].extend( + project_config.get(f"env:{env_name}", f"test_{key}", []) + ) + + skip_conditions = [ + environments and env_name not in environments, + not environments and default_envs and env_name not in default_envs, + test_name != "*" + and patterns["filter"] + and not any(fnmatch(test_name, p) for p in patterns["filter"]), + test_name != "*" + and any(fnmatch(test_name, p) for p in patterns["ignore"]), + ] + result.append(TestSuite(env_name, test_name, finished=any(skip_conditions))) + return result diff --git a/platformio/test/result.py b/platformio/test/result.py index 5ca716df..b2000b2e 100644 --- a/platformio/test/result.py +++ b/platformio/test/result.py @@ -93,13 +93,13 @@ class TestCase: class TestSuite: - def __init__(self, env_name, test_name): + def __init__(self, env_name, test_name, finished=False): self.env_name = env_name self.test_name = test_name self.timestamp = 0 self.duration = 0 self._cases = [] - self._finished = False + self._finished = finished @property def cases(self): diff --git a/tests/commands/test_test.py b/tests/commands/test_test.py index 376d5fcd..3ce95cd1 100644 --- a/tests/commands/test_test.py +++ b/tests/commands/test_test.py @@ -65,6 +65,27 @@ def test_calculator_example(tmp_path: Path): assert junit_failed_testcase.find("failure").get("message") == "Expected 32 Was 33" +def test_list_tests(clirunner, validate_cliresult, tmp_path: Path): + json_output_path = tmp_path / "report.json" + result = clirunner.invoke( + pio_test_cmd, + [ + "-d", + os.path.join("examples", "unit-testing", "calculator"), + "--list-tests", + "--json-output", + str(json_output_path), + ], + ) + validate_cliresult(result) + # test JSON + json_report = load_json(str(json_output_path)) + assert json_report["testcase_nums"] == 0 + assert json_report["failure_nums"] == 0 + assert json_report["skipped_nums"] == 0 + assert len(json_report["test_suites"]) == 6 + + def test_group_and_custom_runner(clirunner, validate_cliresult, tmp_path: Path): project_dir = tmp_path / "project" project_dir.mkdir()