mirror of
https://github.com/platformio/platformio-core.git
synced 2025-07-29 17:47:14 +02:00
Improve printing of tabulated results
This commit is contained in:
@ -1,3 +1,3 @@
|
||||
[settings]
|
||||
line_length=79
|
||||
known_third_party=bottle,click,pytest,requests,SCons,semantic_version,serial,twisted,autobahn,jsonrpc
|
||||
known_third_party=bottle,click,pytest,requests,SCons,semantic_version,serial,twisted,autobahn,jsonrpc,tabulate
|
||||
|
@ -79,10 +79,8 @@ DEFAULT_SETTINGS = {
|
||||
},
|
||||
"enable_telemetry": {
|
||||
"description":
|
||||
("Telemetry service <https://docs.platformio.org/page/"
|
||||
"userguide/cmd_settings.html?#enable-telemetry> (Yes/No)"),
|
||||
"value":
|
||||
True
|
||||
("Telemetry service <http://bit.ly/pio-telemetry> (Yes/No)"),
|
||||
"value": True
|
||||
},
|
||||
"force_verbose": {
|
||||
"description": "Force verbose output when processing environments",
|
||||
|
@ -15,6 +15,7 @@
|
||||
import json
|
||||
|
||||
import click
|
||||
from tabulate import tabulate
|
||||
|
||||
from platformio import fs
|
||||
from platformio.compat import dump_json_to_unicode
|
||||
@ -42,32 +43,18 @@ def cli(query, installed, json_output): # pylint: disable=R0912
|
||||
click.echo("")
|
||||
click.echo("Platform: ", nl=False)
|
||||
click.secho(platform, bold=True)
|
||||
click.echo("-" * terminal_width)
|
||||
click.echo("=" * terminal_width)
|
||||
print_boards(boards)
|
||||
return True
|
||||
|
||||
|
||||
def print_boards(boards):
|
||||
terminal_width, _ = click.get_terminal_size()
|
||||
BOARDLIST_TPL = ("{type:<30} {mcu:<14} {frequency:<8} "
|
||||
" {flash:<7} {ram:<6} {name}")
|
||||
click.echo(
|
||||
BOARDLIST_TPL.format(type=click.style("ID", fg="cyan"),
|
||||
mcu="MCU",
|
||||
frequency="Frequency",
|
||||
flash="Flash",
|
||||
ram="RAM",
|
||||
name="Name"))
|
||||
click.echo("-" * terminal_width)
|
||||
|
||||
for board in boards:
|
||||
click.echo(
|
||||
BOARDLIST_TPL.format(type=click.style(board['id'], fg="cyan"),
|
||||
mcu=board['mcu'],
|
||||
frequency="%dMHz" % (board['fcpu'] / 1000000),
|
||||
flash=fs.format_filesize(board['rom']),
|
||||
ram=fs.format_filesize(board['ram']),
|
||||
name=board['name']))
|
||||
tabulate([(click.style(b['id'], fg="cyan"), b['mcu'], "%dMHz" %
|
||||
(b['fcpu'] / 1000000), fs.format_filesize(
|
||||
b['rom']), fs.format_filesize(b['ram']), b['name'])
|
||||
for b in boards],
|
||||
headers=["ID", "MCU", "Frequency", "Flash", "RAM", "Name"]))
|
||||
|
||||
|
||||
def _get_boards(installed=False):
|
||||
|
@ -19,6 +19,7 @@ from os.path import isdir, join
|
||||
|
||||
import click
|
||||
import semantic_version
|
||||
from tabulate import tabulate
|
||||
|
||||
from platformio import exception, fs, util
|
||||
from platformio.commands import PlatformioCLI
|
||||
@ -486,66 +487,48 @@ def lib_stats(json_output):
|
||||
if json_output:
|
||||
return click.echo(dump_json_to_unicode(result))
|
||||
|
||||
printitem_tpl = "{name:<33} {url}"
|
||||
printitemdate_tpl = "{name:<33} {date:23} {url}"
|
||||
|
||||
def _print_title(title):
|
||||
click.secho(title.upper(), bold=True)
|
||||
click.echo("*" * len(title))
|
||||
|
||||
def _print_header(with_date=False):
|
||||
click.echo((printitemdate_tpl if with_date else printitem_tpl).format(
|
||||
name=click.style("Name", fg="cyan"),
|
||||
date="Date",
|
||||
url=click.style("Url", fg="blue")))
|
||||
|
||||
terminal_width, _ = click.get_terminal_size()
|
||||
click.echo("-" * terminal_width)
|
||||
|
||||
def _print_lib_item(item):
|
||||
date = str(
|
||||
time.strftime("%c", util.parse_date(item['date'])) if "date" in
|
||||
item else "")
|
||||
url = click.style("https://platformio.org/lib/show/%s/%s" %
|
||||
(item['id'], quote(item['name'])),
|
||||
fg="blue")
|
||||
click.echo(
|
||||
(printitemdate_tpl if "date" in item else printitem_tpl).format(
|
||||
name=click.style(item['name'], fg="cyan"), date=date, url=url))
|
||||
|
||||
def _print_tag_item(name):
|
||||
click.echo(
|
||||
printitem_tpl.format(
|
||||
name=click.style(name, fg="cyan"),
|
||||
url=click.style("https://platformio.org/lib/search?query=" +
|
||||
quote("keyword:%s" % name),
|
||||
fg="blue")))
|
||||
|
||||
for key in ("updated", "added"):
|
||||
_print_title("Recently " + key)
|
||||
_print_header(with_date=True)
|
||||
for item in result.get(key, []):
|
||||
_print_lib_item(item)
|
||||
tabular_data = [(click.style(item['name'], fg="cyan"),
|
||||
time.strftime("%c", util.parse_date(item['date'])),
|
||||
"https://platformio.org/lib/show/%s/%s" %
|
||||
(item['id'], quote(item['name'])))
|
||||
for item in result.get(key, [])]
|
||||
table = tabulate(tabular_data,
|
||||
headers=[
|
||||
click.style("RECENTLY " + key.upper(), bold=True),
|
||||
"Date", "URL"
|
||||
])
|
||||
click.echo(table)
|
||||
click.echo()
|
||||
|
||||
_print_title("Recent keywords")
|
||||
_print_header(with_date=False)
|
||||
for item in result.get("lastkeywords"):
|
||||
_print_tag_item(item)
|
||||
click.echo()
|
||||
|
||||
_print_title("Popular keywords")
|
||||
_print_header(with_date=False)
|
||||
for item in result.get("topkeywords"):
|
||||
_print_tag_item(item)
|
||||
click.echo()
|
||||
for key in ("lastkeywords", "topkeywords"):
|
||||
tabular_data = [(click.style(name, fg="cyan"),
|
||||
"https://platformio.org/lib/search?query=" +
|
||||
quote("keyword:%s" % name))
|
||||
for name in result.get(key, [])]
|
||||
table = tabulate(
|
||||
tabular_data,
|
||||
headers=[
|
||||
click.style(
|
||||
("RECENT" if key == "lastkeywords" else "POPULAR") +
|
||||
" KEYWORDS",
|
||||
bold=True), "URL"
|
||||
])
|
||||
click.echo(table)
|
||||
click.echo()
|
||||
|
||||
for key, title in (("dlday", "Today"), ("dlweek", "Week"), ("dlmonth",
|
||||
"Month")):
|
||||
_print_title("Featured: " + title)
|
||||
_print_header(with_date=False)
|
||||
for item in result.get(key, []):
|
||||
_print_lib_item(item)
|
||||
tabular_data = [(click.style(item['name'], fg="cyan"),
|
||||
"https://platformio.org/lib/show/%s/%s" %
|
||||
(item['id'], quote(item['name'])))
|
||||
for item in result.get(key, [])]
|
||||
table = tabulate(tabular_data,
|
||||
headers=[
|
||||
click.style("FEATURED: " + title.upper(),
|
||||
bold=True), "URL"
|
||||
])
|
||||
click.echo(table)
|
||||
click.echo()
|
||||
|
||||
return True
|
||||
|
@ -13,4 +13,3 @@
|
||||
# limitations under the License.
|
||||
|
||||
from platformio.commands.run.command import cli
|
||||
from platformio.commands.run.helpers import print_header
|
||||
|
@ -18,13 +18,14 @@ from os.path import isfile, join
|
||||
from time import time
|
||||
|
||||
import click
|
||||
from tabulate import tabulate
|
||||
|
||||
from platformio import exception, fs
|
||||
from platformio import exception, fs, util
|
||||
from platformio.commands.device import device_monitor as cmd_device_monitor
|
||||
from platformio.commands.run.helpers import (clean_build_dir,
|
||||
handle_legacy_libdeps,
|
||||
print_summary)
|
||||
handle_legacy_libdeps)
|
||||
from platformio.commands.run.processor import EnvironmentProcessor
|
||||
from platformio.commands.test.processor import CTX_META_TEST_IS_RUNNING
|
||||
from platformio.project.config import ProjectConfig
|
||||
from platformio.project.helpers import (find_project_dir_above,
|
||||
get_project_build_dir)
|
||||
@ -73,6 +74,8 @@ def cli(ctx, environment, target, upload_port, project_dir, project_conf, jobs,
|
||||
if isfile(project_dir):
|
||||
project_dir = find_project_dir_above(project_dir)
|
||||
|
||||
is_test_running = CTX_META_TEST_IS_RUNNING in ctx.meta
|
||||
|
||||
with fs.cd(project_dir):
|
||||
config = ProjectConfig.get_instance(
|
||||
project_conf or join(project_dir, "platformio.ini"))
|
||||
@ -91,38 +94,112 @@ def cli(ctx, environment, target, upload_port, project_dir, project_conf, jobs,
|
||||
|
||||
handle_legacy_libdeps(project_dir, config)
|
||||
|
||||
results = []
|
||||
start_time = time()
|
||||
default_envs = config.default_envs()
|
||||
for envname in config.envs():
|
||||
results = []
|
||||
for env in config.envs():
|
||||
skipenv = any([
|
||||
environment and envname not in environment, not environment
|
||||
and default_envs and envname not in default_envs
|
||||
environment and env not in environment, not environment
|
||||
and default_envs and env not in default_envs
|
||||
])
|
||||
if skipenv:
|
||||
results.append((envname, None))
|
||||
results.append({"env": env})
|
||||
continue
|
||||
|
||||
if not silent and any(status is not None
|
||||
for (_, status) in results):
|
||||
# print empty line between multi environment project
|
||||
if not silent and any(
|
||||
r.get("succeeded") is not None for r in results):
|
||||
click.echo()
|
||||
|
||||
ep = EnvironmentProcessor(ctx, envname, config, target,
|
||||
upload_port, silent, verbose, jobs)
|
||||
result = (envname, ep.process())
|
||||
results.append(result)
|
||||
results.append(
|
||||
process_env(ctx, env, config, environment, target, upload_port,
|
||||
silent, verbose, jobs, is_test_running))
|
||||
|
||||
if result[1] and "monitor" in ep.get_build_targets() and \
|
||||
"nobuild" not in ep.get_build_targets():
|
||||
ctx.invoke(cmd_device_monitor,
|
||||
environment=environment[0] if environment else None)
|
||||
command_failed = any(r.get("succeeded") is False for r in results)
|
||||
|
||||
found_error = any(status is False for (_, status) in results)
|
||||
if (not is_test_running and (command_failed or not silent)
|
||||
and len(results) > 1):
|
||||
print_processing_summary(results)
|
||||
|
||||
if (found_error or not silent) and len(results) > 1:
|
||||
click.echo()
|
||||
print_summary(results, start_time)
|
||||
|
||||
if found_error:
|
||||
if command_failed:
|
||||
raise exception.ReturnErrorCode(1)
|
||||
return True
|
||||
|
||||
|
||||
def process_env(ctx, name, config, environments, targets, upload_port, silent,
|
||||
verbose, jobs, is_test_running):
|
||||
if not is_test_running and not silent:
|
||||
print_processing_header(name, config, verbose)
|
||||
|
||||
ep = EnvironmentProcessor(ctx, name, config, targets, upload_port, silent,
|
||||
verbose, jobs)
|
||||
result = {"env": name, "elapsed": time(), "succeeded": ep.process()}
|
||||
result['elapsed'] = time() - result['elapsed']
|
||||
|
||||
# print footer on error or when is not unit testing
|
||||
if not is_test_running and (not silent or not result['succeeded']):
|
||||
print_processing_footer(result)
|
||||
|
||||
if (result['succeeded'] and "monitor" in ep.get_build_targets()
|
||||
and "nobuild" not in ep.get_build_targets()):
|
||||
ctx.invoke(cmd_device_monitor,
|
||||
environment=environments[0] if environments else None)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def print_processing_header(env, config, verbose=False):
|
||||
env_dump = []
|
||||
for k, v in config.items(env=env):
|
||||
if verbose or k in ("platform", "framework", "board"):
|
||||
env_dump.append("%s: %s" %
|
||||
(k, ", ".join(v) if isinstance(v, list) else v))
|
||||
click.echo("Processing %s (%s)" %
|
||||
(click.style(env, fg="cyan", bold=True), "; ".join(env_dump)))
|
||||
terminal_width, _ = click.get_terminal_size()
|
||||
click.secho("-" * terminal_width, bold=True)
|
||||
|
||||
|
||||
def print_processing_footer(result):
|
||||
is_failed = not result.get("succeeded")
|
||||
util.print_labeled_bar(
|
||||
"[%s] Took %.2f seconds" %
|
||||
((click.style("FAILED", fg="red", bold=True) if is_failed else
|
||||
click.style("SUCCESS", fg="green", bold=True)), result['elapsed']),
|
||||
is_error=is_failed)
|
||||
|
||||
|
||||
def print_processing_summary(results):
|
||||
tabular_data = []
|
||||
succeeded_nums = 0
|
||||
failed_nums = 0
|
||||
elapsed = 0
|
||||
|
||||
for result in results:
|
||||
elapsed += result.get("elapsed", 0)
|
||||
if result.get("succeeded") is False:
|
||||
failed_nums += 1
|
||||
status_str = click.style("FAILED", fg="red")
|
||||
elif result.get("succeeded") is None:
|
||||
status_str = "IGNORED"
|
||||
else:
|
||||
succeeded_nums += 1
|
||||
status_str = click.style("SUCCESS", fg="green")
|
||||
|
||||
tabular_data.append(
|
||||
(click.style(result['env'], fg="cyan"), status_str,
|
||||
util.humanize_elapsed_time(result.get("elapsed"))))
|
||||
|
||||
click.echo()
|
||||
click.echo(tabulate(tabular_data,
|
||||
headers=[
|
||||
click.style(s, bold=True)
|
||||
for s in ("Environment", "Status", "Time")
|
||||
]),
|
||||
err=failed_nums)
|
||||
|
||||
util.print_labeled_bar(
|
||||
"%s%d succeeded in %s" %
|
||||
("%d failed, " % failed_nums if failed_nums else "", succeeded_nums,
|
||||
util.humanize_elapsed_time(elapsed)),
|
||||
is_error=failed_nums,
|
||||
fg="red" if failed_nums else "green")
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
from os import makedirs
|
||||
from os.path import isdir, isfile, join
|
||||
from time import time
|
||||
|
||||
import click
|
||||
|
||||
@ -63,41 +62,3 @@ def clean_build_dir(build_dir, config):
|
||||
makedirs(build_dir)
|
||||
with open(checksum_file, "w") as f:
|
||||
f.write(checksum)
|
||||
|
||||
|
||||
def print_header(label, is_error=False, fg=None):
|
||||
terminal_width, _ = click.get_terminal_size()
|
||||
width = len(click.unstyle(label))
|
||||
half_line = "=" * int((terminal_width - width - 2) / 2)
|
||||
click.secho("%s %s %s" % (half_line, label, half_line),
|
||||
fg=fg,
|
||||
err=is_error)
|
||||
|
||||
|
||||
def print_summary(results, start_time):
|
||||
print_header("[%s]" % click.style("SUMMARY"))
|
||||
|
||||
succeeded_nums = 0
|
||||
failed_nums = 0
|
||||
envname_max_len = max(
|
||||
[len(click.style(envname, fg="cyan")) for (envname, _) in results])
|
||||
for (envname, status) in results:
|
||||
if status is False:
|
||||
failed_nums += 1
|
||||
status_str = click.style("FAILED", fg="red")
|
||||
elif status is None:
|
||||
status_str = click.style("IGNORED", fg="yellow")
|
||||
else:
|
||||
succeeded_nums += 1
|
||||
status_str = click.style("SUCCESS", fg="green")
|
||||
|
||||
format_str = "Environment {0:<%d}\t[{1}]" % envname_max_len
|
||||
click.echo(format_str.format(click.style(envname, fg="cyan"),
|
||||
status_str),
|
||||
err=status is False)
|
||||
|
||||
print_header("%s%d succeeded in %.2f seconds" %
|
||||
("%d failed, " % failed_nums if failed_nums else "",
|
||||
succeeded_nums, time() - start_time),
|
||||
is_error=failed_nums,
|
||||
fg="red" if failed_nums else "green")
|
||||
|
@ -12,16 +12,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from time import time
|
||||
|
||||
import click
|
||||
|
||||
from platformio import exception, telemetry
|
||||
from platformio.commands.platform import \
|
||||
platform_install as cmd_platform_install
|
||||
from platformio.commands.run.helpers import print_header
|
||||
from platformio.commands.test.processor import (CTX_META_TEST_IS_RUNNING,
|
||||
CTX_META_TEST_RUNNING_NAME)
|
||||
from platformio.commands.test.processor import CTX_META_TEST_RUNNING_NAME
|
||||
from platformio.managers.platform import PlatformFactory
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
@ -29,8 +23,6 @@ from platformio.managers.platform import PlatformFactory
|
||||
|
||||
class EnvironmentProcessor(object):
|
||||
|
||||
DEFAULT_PRINT_OPTIONS = ("platform", "framework", "board")
|
||||
|
||||
def __init__( # pylint: disable=too-many-arguments
|
||||
self, cmd_ctx, name, config, targets, upload_port, silent, verbose,
|
||||
jobs):
|
||||
@ -44,37 +36,6 @@ class EnvironmentProcessor(object):
|
||||
self.jobs = jobs
|
||||
self.options = config.items(env=name, as_dict=True)
|
||||
|
||||
def process(self):
|
||||
terminal_width, _ = click.get_terminal_size()
|
||||
start_time = time()
|
||||
env_dump = []
|
||||
|
||||
for k, v in self.options.items():
|
||||
if self.verbose or k in self.DEFAULT_PRINT_OPTIONS:
|
||||
env_dump.append(
|
||||
"%s: %s" % (k, ", ".join(v) if isinstance(v, list) else v))
|
||||
|
||||
if not self.silent:
|
||||
click.echo("Processing %s (%s)" % (click.style(
|
||||
self.name, fg="cyan", bold=True), "; ".join(env_dump)))
|
||||
click.secho("-" * terminal_width, bold=True)
|
||||
|
||||
result = self._run_platform()
|
||||
is_error = result['returncode'] != 0
|
||||
|
||||
if self.silent and not is_error:
|
||||
return True
|
||||
|
||||
if is_error or CTX_META_TEST_IS_RUNNING not in self.cmd_ctx.meta:
|
||||
print_header(
|
||||
"[%s] Took %.2f seconds" %
|
||||
((click.style("ERROR", fg="red", bold=True) if
|
||||
is_error else click.style("SUCCESS", fg="green", bold=True)),
|
||||
time() - start_time),
|
||||
is_error=is_error)
|
||||
|
||||
return not is_error
|
||||
|
||||
def get_build_variables(self):
|
||||
variables = {"pioenv": self.name, "project_config": self.config.path}
|
||||
|
||||
@ -92,7 +53,7 @@ class EnvironmentProcessor(object):
|
||||
return [t for t in self.targets]
|
||||
return self.config.get("env:" + self.name, "targets", [])
|
||||
|
||||
def _run_platform(self):
|
||||
def process(self):
|
||||
if "platform" not in self.options:
|
||||
raise exception.UndefinedEnvPlatform(self.name)
|
||||
|
||||
@ -113,5 +74,6 @@ class EnvironmentProcessor(object):
|
||||
skip_default_package=True)
|
||||
p = PlatformFactory.newPlatform(self.options['platform'])
|
||||
|
||||
return p.run(build_vars, build_targets, self.silent, self.verbose,
|
||||
self.jobs)
|
||||
result = p.run(build_vars, build_targets, self.silent, self.verbose,
|
||||
self.jobs)
|
||||
return result['returncode'] == 0
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import click
|
||||
from tabulate import tabulate
|
||||
|
||||
from platformio import app
|
||||
from platformio.compat import string_types
|
||||
@ -26,17 +27,7 @@ def cli():
|
||||
@cli.command("get", short_help="Get existing setting/-s")
|
||||
@click.argument("name", required=False)
|
||||
def settings_get(name):
|
||||
|
||||
list_tpl = u"{name:<40} {value:<35} {description}"
|
||||
terminal_width, _ = click.get_terminal_size()
|
||||
|
||||
click.echo(
|
||||
list_tpl.format(name=click.style("Name", fg="cyan"),
|
||||
value=(click.style("Value", fg="green") +
|
||||
click.style(" [Default]", fg="yellow")),
|
||||
description="Description"))
|
||||
click.echo("-" * terminal_width)
|
||||
|
||||
tabular_data = []
|
||||
for _name, _data in sorted(app.DEFAULT_SETTINGS.items()):
|
||||
if name and name != _name:
|
||||
continue
|
||||
@ -46,20 +37,19 @@ def settings_get(name):
|
||||
if not isinstance(_value, string_types) else _value)
|
||||
if isinstance(_value, bool):
|
||||
_value_str = "Yes" if _value else "No"
|
||||
_value_str = click.style(_value_str, fg="green")
|
||||
|
||||
if _value != _data['value']:
|
||||
_defvalue_str = str(_data['value'])
|
||||
if isinstance(_data['value'], bool):
|
||||
_defvalue_str = "Yes" if _data['value'] else "No"
|
||||
_value_str += click.style(" [%s]" % _defvalue_str, fg="yellow")
|
||||
else:
|
||||
_value_str += click.style(" ", fg="yellow")
|
||||
_value_str += " [%s]" % _defvalue_str
|
||||
_value_str = click.style(_value_str, fg="yellow")
|
||||
|
||||
click.echo(
|
||||
list_tpl.format(name=click.style(_name, fg="cyan"),
|
||||
value=_value_str,
|
||||
description=_data['description']))
|
||||
tabular_data.append((_name, _value_str, _data['description']))
|
||||
|
||||
click.echo(
|
||||
tabulate(tabular_data,
|
||||
headers=["Name", "Current value [Default]", "Description"]))
|
||||
|
||||
|
||||
@cli.command("set", short_help="Set new value for the setting")
|
||||
|
@ -20,9 +20,9 @@ from os.path import isdir, join
|
||||
from time import time
|
||||
|
||||
import click
|
||||
from tabulate import tabulate
|
||||
|
||||
from platformio import exception, fs
|
||||
from platformio.commands.run.helpers import print_header
|
||||
from platformio import exception, fs, util
|
||||
from platformio.commands.test.embedded import EmbeddedTestProcessor
|
||||
from platformio.commands.test.native import NativeTestProcessor
|
||||
from platformio.project.config import ProjectConfig
|
||||
@ -87,12 +87,12 @@ def cli( # pylint: disable=redefined-builtin
|
||||
config.validate(envs=environment)
|
||||
|
||||
click.echo("Verbose mode can be enabled via `-v, --verbose` option")
|
||||
click.echo("Collected %d items" % len(test_names))
|
||||
click.secho("Collected %d items" % len(test_names), bold=True)
|
||||
|
||||
results = []
|
||||
start_time = time()
|
||||
default_envs = config.default_envs()
|
||||
for testname in test_names:
|
||||
|
||||
for envname in config.envs():
|
||||
section = "env:%s" % envname
|
||||
|
||||
@ -114,9 +114,12 @@ def cli( # pylint: disable=redefined-builtin
|
||||
for p in patterns['ignore']]),
|
||||
]
|
||||
if any(skip_conditions):
|
||||
results.append((None, testname, envname))
|
||||
results.append({"env": envname, "test": testname})
|
||||
continue
|
||||
|
||||
click.echo()
|
||||
print_processing_header(testname, envname)
|
||||
|
||||
cls = (NativeTestProcessor
|
||||
if config.get(section, "platform") == "native" else
|
||||
EmbeddedTestProcessor)
|
||||
@ -133,43 +136,24 @@ def cli( # pylint: disable=redefined-builtin
|
||||
monitor_rts=monitor_rts,
|
||||
monitor_dtr=monitor_dtr,
|
||||
verbose=verbose))
|
||||
results.append((tp.process(), testname, envname))
|
||||
result = {
|
||||
"env": envname,
|
||||
"test": testname,
|
||||
"elapsed": time(),
|
||||
"succeeded": tp.process()
|
||||
}
|
||||
result['elapsed'] = time() - result['elapsed']
|
||||
results.append(result)
|
||||
|
||||
print_processing_footer(result)
|
||||
|
||||
if without_testing:
|
||||
return
|
||||
|
||||
passed_nums = 0
|
||||
failed_nums = 0
|
||||
testname_max_len = max([len(r[1]) for r in results])
|
||||
envname_max_len = max([len(click.style(r[2], fg="cyan")) for r in results])
|
||||
print_testing_summary(results)
|
||||
|
||||
print_header("[%s]" % click.style("TEST SUMMARY"))
|
||||
click.echo()
|
||||
|
||||
for result in results:
|
||||
status, testname, envname = result
|
||||
if status is False:
|
||||
failed_nums += 1
|
||||
status_str = click.style("FAILED", fg="red")
|
||||
elif status is None:
|
||||
status_str = click.style("IGNORED", fg="yellow")
|
||||
else:
|
||||
passed_nums += 1
|
||||
status_str = click.style("PASSED", fg="green")
|
||||
|
||||
format_str = "test/{:<%d} > {:<%d}\t[{}]" % (testname_max_len,
|
||||
envname_max_len)
|
||||
click.echo(format_str.format(testname, click.style(envname, fg="cyan"),
|
||||
status_str),
|
||||
err=status is False)
|
||||
|
||||
print_header("%s%d passed in %.2f seconds" %
|
||||
("%d failed, " % failed_nums if failed_nums else "",
|
||||
passed_nums, time() - start_time),
|
||||
is_error=failed_nums,
|
||||
fg="red" if failed_nums else "green")
|
||||
|
||||
if failed_nums:
|
||||
command_failed = any(r.get("succeeded") is False for r in results)
|
||||
if command_failed:
|
||||
raise exception.ReturnErrorCode(1)
|
||||
|
||||
|
||||
@ -181,3 +165,58 @@ def get_test_names(test_dir):
|
||||
if not names:
|
||||
names = ["*"]
|
||||
return names
|
||||
|
||||
|
||||
def print_processing_header(test, env):
|
||||
click.echo("Processing %s in %s environment" % (click.style(
|
||||
test, fg="yellow", bold=True), click.style(env, fg="cyan", bold=True)))
|
||||
terminal_width, _ = click.get_terminal_size()
|
||||
click.secho("-" * terminal_width, bold=True)
|
||||
|
||||
|
||||
def print_processing_footer(result):
|
||||
is_failed = not result.get("succeeded")
|
||||
util.print_labeled_bar(
|
||||
"[%s] Took %.2f seconds" %
|
||||
((click.style("FAILED", fg="red", bold=True) if is_failed else
|
||||
click.style("PASSED", fg="green", bold=True)), result['elapsed']),
|
||||
is_error=is_failed)
|
||||
|
||||
|
||||
def print_testing_summary(results):
|
||||
click.echo()
|
||||
# util.print_labeled_bar("SUMMARY")
|
||||
|
||||
tabular_data = []
|
||||
succeeded_nums = 0
|
||||
failed_nums = 0
|
||||
elapsed = 0
|
||||
|
||||
for result in results:
|
||||
elapsed += result.get("elapsed", 0)
|
||||
if result.get("succeeded") is False:
|
||||
failed_nums += 1
|
||||
status_str = click.style("FAILED", fg="red")
|
||||
elif result.get("succeeded") is None:
|
||||
status_str = "IGNORED"
|
||||
else:
|
||||
succeeded_nums += 1
|
||||
status_str = click.style("PASSED", fg="green")
|
||||
|
||||
tabular_data.append(
|
||||
(result['test'], click.style(result['env'], fg="cyan"), status_str,
|
||||
util.humanize_elapsed_time(result.get("elapsed"))))
|
||||
|
||||
click.echo(tabulate(tabular_data,
|
||||
headers=[
|
||||
click.style(s, bold=True)
|
||||
for s in ("Test", "Environment", "Status", "Time")
|
||||
]),
|
||||
err=failed_nums)
|
||||
|
||||
util.print_labeled_bar(
|
||||
"%s%d succeeded in %s" %
|
||||
("%d failed, " % failed_nums if failed_nums else "", succeeded_nums,
|
||||
util.humanize_elapsed_time(elapsed)),
|
||||
is_error=failed_nums,
|
||||
fg="red" if failed_nums else "green")
|
||||
|
@ -28,7 +28,7 @@ class EmbeddedTestProcessor(TestProcessorBase):
|
||||
|
||||
def process(self):
|
||||
if not self.options['without_building']:
|
||||
self.print_progress("Building... (1/3)")
|
||||
self.print_progress("Building...")
|
||||
target = ["__test"]
|
||||
if self.options['without_uploading']:
|
||||
target.append("checkprogsize")
|
||||
@ -36,7 +36,7 @@ class EmbeddedTestProcessor(TestProcessorBase):
|
||||
return False
|
||||
|
||||
if not self.options['without_uploading']:
|
||||
self.print_progress("Uploading... (2/3)")
|
||||
self.print_progress("Uploading...")
|
||||
target = ["upload"]
|
||||
if self.options['without_building']:
|
||||
target.append("nobuild")
|
||||
@ -48,7 +48,7 @@ class EmbeddedTestProcessor(TestProcessorBase):
|
||||
if self.options['without_testing']:
|
||||
return None
|
||||
|
||||
self.print_progress("Testing... (3/3)")
|
||||
self.print_progress("Testing...")
|
||||
return self.run()
|
||||
|
||||
def run(self):
|
||||
|
@ -24,12 +24,12 @@ class NativeTestProcessor(TestProcessorBase):
|
||||
|
||||
def process(self):
|
||||
if not self.options['without_building']:
|
||||
self.print_progress("Building... (1/2)")
|
||||
self.print_progress("Building...")
|
||||
if not self.build_or_upload(["__test"]):
|
||||
return False
|
||||
if self.options['without_testing']:
|
||||
return None
|
||||
self.print_progress("Testing... (2/2)")
|
||||
self.print_progress("Testing...")
|
||||
return self.run()
|
||||
|
||||
def run(self):
|
||||
|
@ -20,7 +20,6 @@ from string import Template
|
||||
import click
|
||||
|
||||
from platformio import exception
|
||||
from platformio.commands.run.helpers import print_header
|
||||
from platformio.project.helpers import get_project_test_dir
|
||||
|
||||
TRANSPORT_OPTIONS = {
|
||||
@ -100,12 +99,8 @@ class TestProcessorBase(object):
|
||||
def get_baudrate(self):
|
||||
return int(self.env_options.get("test_speed", self.DEFAULT_BAUDRATE))
|
||||
|
||||
def print_progress(self, text, is_error=False):
|
||||
click.echo()
|
||||
print_header("[test/%s > %s] %s" %
|
||||
(click.style(self.test_name, fg="yellow"),
|
||||
click.style(self.env_name, fg="cyan"), text),
|
||||
is_error=is_error)
|
||||
def print_progress(self, text):
|
||||
click.secho(text, bold=self.options.get("verbose"))
|
||||
|
||||
def build_or_upload(self, target):
|
||||
if not self._outputcpp_generated:
|
||||
@ -115,9 +110,6 @@ class TestProcessorBase(object):
|
||||
if self.test_name != "*":
|
||||
self.cmd_ctx.meta[CTX_META_TEST_RUNNING_NAME] = self.test_name
|
||||
|
||||
if not self.options['verbose']:
|
||||
click.echo("Please wait...")
|
||||
|
||||
try:
|
||||
from platformio.commands.run import cli as cmd_run
|
||||
return self.cmd_ctx.invoke(cmd_run,
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
@ -445,6 +446,31 @@ def merge_dicts(d1, d2, path=None):
|
||||
return d1
|
||||
|
||||
|
||||
def print_labeled_bar(label, is_error=False, fg=None):
|
||||
terminal_width, _ = click.get_terminal_size()
|
||||
width = len(click.unstyle(label))
|
||||
half_line = "=" * int((terminal_width - width - 2) / 2)
|
||||
click.secho("%s %s %s" % (half_line, label, half_line),
|
||||
fg=fg,
|
||||
err=is_error)
|
||||
|
||||
|
||||
def humanize_elapsed_time(total):
|
||||
total = total or 0
|
||||
constants = ((3600 * 24, "day"), (3600, "hour"), (60, "minute"),
|
||||
(1, "second"))
|
||||
tokens = []
|
||||
for coef, name in constants:
|
||||
t = (math.floor if total > 60 else round)(total / coef)
|
||||
if t == 0:
|
||||
continue
|
||||
tokens.append("%d %s%s" % (t, name, "s" if t > 1 else ""))
|
||||
total -= t * coef
|
||||
if len(tokens) > 1:
|
||||
tokens[-1] = "and %s" % tokens[-1]
|
||||
return ", ".join(tokens)
|
||||
|
||||
|
||||
def get_original_version(version):
|
||||
if version.count(".") != 2:
|
||||
return None
|
||||
|
3
setup.py
3
setup.py
@ -23,7 +23,8 @@ install_requires = [
|
||||
"colorama",
|
||||
"pyserial>=3,<4,!=3.3",
|
||||
"requests>=2.4.0,<3",
|
||||
"semantic_version>=2.5.0,<3"
|
||||
"semantic_version>=2.5.0,<3",
|
||||
"tabulate>=0.8.3"
|
||||
]
|
||||
|
||||
setup(
|
||||
|
@ -26,5 +26,5 @@ def test_local_env():
|
||||
])
|
||||
if result['returncode'] != 1:
|
||||
pytest.fail(result)
|
||||
assert all([s in result['out']
|
||||
assert all([s in result['err']
|
||||
for s in ("PASSED", "IGNORED", "FAILED")]), result['out']
|
||||
|
Reference in New Issue
Block a user