List available project tests with a new "pio test --list-tests" option

This commit is contained in:
Ivan Kravets
2022-05-10 20:21:49 +03:00
parent 6d705172f5
commit e6938f8f39
7 changed files with 130 additions and 80 deletions

View File

@ -9,12 +9,12 @@ Release Notes
PlatformIO Core 6
-----------------
**A professional collaborative platform for safety-critical and declarative embedded development**
**A professional collaborative platform for declarative, safety-critical, and test-driven embedded development.**
6.0.0 (2022-??-??)
~~~~~~~~~~~~~~~~~~
Please check `Migration guide from 5.x to 6.0 <https://docs.platformio.org/en/latest/core/migration.html>`__.
Please check the `Migration guide from 5.x to 6.0 <https://docs.platformio.org/en/latest/core/migration.html>`__.
* **Package Management**
@ -56,9 +56,10 @@ Please check `Migration guide from 5.x to 6.0 <https://docs.platformio.org/en/la
- Added support for a custom `testing command <https://docs.platformio.org/en/latest/projectconf/section_env_test.html#test-testing-command>`__
- Added support for a `custom Unity library <https://docs.platformio.org/en/latest/advanced/unit-testing/frameworks/custom/examples/custom_unity_library.html>`__ (`issue #3980 <https://github.com/platformio/platformio-core/issues/3980>`_)
- Added support for the ``socket://`` and ``rfc2217://`` protocols using `test_port <https://docs.platformio.org/en/latest/projectconf/section_env_test.html#test-port>`__ option (`issue #4229 <https://github.com/platformio/platformio-core/issues/4229>`_)
- List available project tests with a new `pio test --list-tests <https://docs.platformio.org/en/latest/core/userguide/cmd_test.html#cmdoption-pio-test-list-tests>`__ option
- Pass extra arguments to the testing program with a new `pio test --program-arg <https://docs.platformio.org/en/latest/core/userguide/cmd_test.html#cmdoption-pio-test-a>`__ option (`issue #3132 <https://github.com/platformio/platformio-core/issues/3132>`_)
- Generate reports in JUnit and JSON formats using the `pio test <https://docs.platformio.org/en/latest/core/userguide/cmd_test.html>`__ command (`issue #2891 <https://github.com/platformio/platformio-core/issues/2891>`_)
- Provide more information when the native program crashed on a host (errored with a negative return code) (`issue #3429 <https://github.com/platformio/platformio-core/issues/3429>`_)
- Provide more information when the native program crashed on a host (errored with a non-zero return code) (`issue #3429 <https://github.com/platformio/platformio-core/issues/3429>`_)
- Fixed an issue when command line parameters (``--ignore``, ``--filter``) do not override values defined in the |PIOCONF| (`issue #3845 <https://github.com/platformio/platformio-core/issues/3845>`_)
- Renamed the "test_build_project_src" project configuration option to the `test_build_src <https://docs.platformio.org/en/latest//projectconf/section_env_test.html#test-build-src>`__
- Removed the "test_transport" option in favor of the `Custom "unity_config.h" <https://docs.platformio.org/en/latest/advanced/unit-testing/frameworks/unity.html>`_
@ -85,7 +86,7 @@ Please check `Migration guide from 5.x to 6.0 <https://docs.platformio.org/en/la
* **Integration**
- Added a new build variable (``COMPILATIONDB_INCLUDE_TOOLCHAIN``) to include toolchain paths in the compilation database (`issue #3735 <https://github.com/platformio/platformio-core/issues/3735>`_)
- Changed default path for compilation database `compile_commands.json <https://docs.platformio.org/en/latest/integration/compile_commands.html>`__ to the project root
- Changed a default path for compilation database `compile_commands.json <https://docs.platformio.org/en/latest/integration/compile_commands.html>`__ to the project root
* **Project Configuration**

2
docs

Submodule docs updated: 7adff49f78...a997e10df9

View File

@ -26,7 +26,7 @@ from platformio.commands.run.command import cli as cmd_run
from platformio.commands.run.command import print_processing_header
from platformio.compat import IS_WINDOWS, is_bytes
from platformio.debug.exception import DebugInvalidOptionsError
from platformio.test.command import get_test_names
from platformio.test.helpers import list_test_names
from platformio.test.result import TestSuite
from platformio.test.runners.base import TestRunnerOptions
from platformio.test.runners.factory import TestRunnerFactory
@ -82,7 +82,7 @@ def predebug_project(
): # pylint: disable=too-many-arguments
debug_testname = project_config.get("env:" + env_name, "debug_test")
if debug_testname:
test_names = get_test_names(project_config)
test_names = list_test_names(project_config)
if debug_testname not in test_names:
raise DebugInvalidOptionsError(
"Unknown test name `%s`. Valid names are `%s`"

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import os
import shutil
@ -20,9 +19,9 @@ import click
from platformio import app, exception, fs, util
from platformio.project.config import ProjectConfig
from platformio.test.exception import TestDirNotExistsError
from platformio.test.helpers import list_test_suites
from platformio.test.reports.base import TestReportFactory
from platformio.test.result import TestResult, TestStatus, TestSuite
from platformio.test.result import TestResult, TestStatus
from platformio.test.runners.base import TestRunnerOptions
from platformio.test.runners.factory import TestRunnerFactory
@ -83,6 +82,7 @@ from platformio.test.runners.factory import TestRunnerFactory
multiple=True,
help="A program argument (multiple are allowed)",
)
@click.option("--list-tests", is_flag=True)
@click.option("--json-output", type=click.Path(resolve_path=True))
@click.option("--junit-output", type=click.Path(resolve_path=True))
@click.option("--verbose", "-v", is_flag=True)
@ -103,6 +103,7 @@ def test_cmd( # pylint: disable=too-many-arguments,too-many-locals,redefined-bu
monitor_rts,
monitor_dtr,
program_args,
list_tests,
json_output,
junit_output,
verbose,
@ -110,9 +111,14 @@ def test_cmd( # pylint: disable=too-many-arguments,too-many-locals,redefined-bu
app.set_session_var("custom_project_conf", project_conf)
with fs.cd(project_dir):
config = ProjectConfig.get_instance(project_conf)
config.validate(envs=environment)
test_names = get_test_names(config)
project_config = ProjectConfig.get_instance(project_conf)
project_config.validate(envs=environment)
test_result = TestResult(project_dir)
test_suites = list_test_suites(
project_config, environments=environment, filters=filter, ignores=ignore
)
test_names = sorted(set(s.test_name for s in test_suites))
if not verbose:
click.echo("Verbose mode can be enabled via `-v, --verbose` option")
@ -120,62 +126,36 @@ def test_cmd( # pylint: disable=too-many-arguments,too-many-locals,redefined-bu
if verbose:
click.echo(" (%s)" % ", ".join(test_names))
test_result = TestResult(project_dir)
default_envs = config.default_envs()
for env_name in config.envs():
for test_name in test_names:
test_suite = TestSuite(env_name, test_name)
test_result.add_suite(test_suite)
# filter and ignore patterns
patterns = dict(filter=list(filter), ignore=list(ignore))
for key in patterns:
if patterns[key]: # overriden from CLI
continue
patterns[key].extend(
config.get(f"env:{env_name}", f"test_{key}", [])
)
skip_conditions = [
environment and env_name not in environment,
not environment and default_envs and env_name not in default_envs,
test_name != "*"
and patterns["filter"]
and not any(
fnmatch.fnmatch(test_name, p) for p in patterns["filter"]
),
test_name != "*"
and any(fnmatch.fnmatch(test_name, p) for p in patterns["ignore"]),
]
if any(skip_conditions):
continue
runner = TestRunnerFactory.new(
test_suite,
config,
TestRunnerOptions(
verbose=verbose,
without_building=without_building,
without_uploading=without_uploading,
without_testing=without_testing,
upload_port=upload_port,
test_port=test_port,
no_reset=no_reset,
monitor_rts=monitor_rts,
monitor_dtr=monitor_dtr,
program_args=program_args,
),
)
click.echo()
print_suite_header(test_suite)
runner.start(ctx)
print_suite_footer(test_suite)
for test_suite in test_suites:
test_result.add_suite(test_suite)
if list_tests or test_suite.is_finished(): # skipped by user
continue
runner = TestRunnerFactory.new(
test_suite,
project_config,
TestRunnerOptions(
verbose=verbose,
without_building=without_building,
without_uploading=without_uploading,
without_testing=without_testing,
upload_port=upload_port,
test_port=test_port,
no_reset=no_reset,
monitor_rts=monitor_rts,
monitor_dtr=monitor_dtr,
program_args=program_args,
),
)
click.echo()
print_suite_header(test_suite)
runner.start(ctx)
print_suite_footer(test_suite)
# Reset custom project config
app.set_session_var("custom_project_conf", None)
stdout_report = TestReportFactory.new("stdout", test_result)
stdout_report.generate(verbose=verbose)
stdout_report.generate(verbose=verbose or list_tests)
for output_format, output_path in [("json", json_output), ("junit", junit_output)]:
if not output_path:
@ -187,20 +167,6 @@ def test_cmd( # pylint: disable=too-many-arguments,too-many-locals,redefined-bu
raise exception.ReturnErrorCode(1)
def get_test_names(config):
test_dir = config.get("platformio", "test_dir")
if not os.path.isdir(test_dir):
raise TestDirNotExistsError(test_dir)
names = []
for root, _, __ in os.walk(test_dir):
if not os.path.basename(root).startswith("test_"):
continue
names.append(os.path.relpath(root, test_dir))
if not names:
names = ["*"]
return names
def print_suite_header(test_suite):
click.echo(
"Processing %s in %s environment"

View File

@ -0,0 +1,62 @@
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from fnmatch import fnmatch
from platformio.test.exception import TestDirNotExistsError
from platformio.test.result import TestSuite
def list_test_names(project_config):
test_dir = project_config.get("platformio", "test_dir")
if not os.path.isdir(test_dir):
raise TestDirNotExistsError(test_dir)
names = []
for root, _, __ in os.walk(test_dir):
if not os.path.basename(root).startswith("test_"):
continue
names.append(os.path.relpath(root, test_dir).replace("\\", "/"))
if not names:
names = ["*"]
return names
def list_test_suites(project_config, environments, filters, ignores):
result = []
default_envs = project_config.default_envs()
test_names = list_test_names(project_config)
for env_name in project_config.envs():
for test_name in test_names:
# filter and ignore patterns
patterns = dict(filter=list(filters), ignore=list(ignores))
for key in patterns:
if patterns[key]: # overriden from CLI
continue
patterns[key].extend(
project_config.get(f"env:{env_name}", f"test_{key}", [])
)
skip_conditions = [
environments and env_name not in environments,
not environments and default_envs and env_name not in default_envs,
test_name != "*"
and patterns["filter"]
and not any(fnmatch(test_name, p) for p in patterns["filter"]),
test_name != "*"
and any(fnmatch(test_name, p) for p in patterns["ignore"]),
]
result.append(TestSuite(env_name, test_name, finished=any(skip_conditions)))
return result

View File

@ -93,13 +93,13 @@ class TestCase:
class TestSuite:
def __init__(self, env_name, test_name):
def __init__(self, env_name, test_name, finished=False):
self.env_name = env_name
self.test_name = test_name
self.timestamp = 0
self.duration = 0
self._cases = []
self._finished = False
self._finished = finished
@property
def cases(self):

View File

@ -65,6 +65,27 @@ def test_calculator_example(tmp_path: Path):
assert junit_failed_testcase.find("failure").get("message") == "Expected 32 Was 33"
def test_list_tests(clirunner, validate_cliresult, tmp_path: Path):
json_output_path = tmp_path / "report.json"
result = clirunner.invoke(
pio_test_cmd,
[
"-d",
os.path.join("examples", "unit-testing", "calculator"),
"--list-tests",
"--json-output",
str(json_output_path),
],
)
validate_cliresult(result)
# test JSON
json_report = load_json(str(json_output_path))
assert json_report["testcase_nums"] == 0
assert json_report["failure_nums"] == 0
assert json_report["skipped_nums"] == 0
assert len(json_report["test_suites"]) == 6
def test_group_and_custom_runner(clirunner, validate_cliresult, tmp_path: Path):
project_dir = tmp_path / "project"
project_dir.mkdir()