Merge branch 'ci/test_pytest_embedded' into 'master'

CI: use pytest-embedded

See merge request espressif/esp-idf!15984
This commit is contained in:
Fu Hanxi
2021-11-30 09:50:29 +00:00
16 changed files with 546 additions and 134 deletions

View File

@@ -39,5 +39,7 @@ indent_size = 4
max_line_length = 120
[{*.sh,*.yml,*.yaml}]
indent_style = space
indent_size = 2
[*.ini]
indent_size = 2

View File

@@ -69,6 +69,7 @@ variables:
TEST_ENV_CONFIG_REPO: "https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/qa/ci-test-runner-configs.git"
CI_AUTO_TEST_SCRIPT_REPO_URL: "https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/qa/auto_test_script.git"
CI_AUTO_TEST_SCRIPT_REPO_BRANCH: "ci/v3.1"
PYTEST_EMBEDDED_TAG: "v0.4.5"
.setup_tools_unless_target_test: &setup_tools_unless_target_test |
if [[ -n "$IDF_DONT_USE_MIRRORS" ]]; then
@@ -123,6 +124,19 @@ before_script:
- export PYTHONPATH="$IDF_PATH/tools:$IDF_PATH/tools/ci/python_packages:$PYTHONPATH"
- fetch_submodules
.before_script_pytest:
before_script:
- source tools/ci/utils.sh
- source tools/ci/setup_python.sh
- add_gitlab_ssh_keys
- source tools/ci/configure_ci_environment.sh
- *setup_tools_unless_target_test
- fetch_submodules
- cd /tmp
- retry_failed git clone --depth 1 --branch $PYTEST_EMBEDDED_TAG https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/idf/pytest-embedded.git
- cd pytest-embedded && bash foreach.sh install
- cd $IDF_PATH
default:
retry:
max: 2

View File

@@ -7,6 +7,45 @@
SIZE_INFO_LOCATION: "$CI_PROJECT_DIR/size_info.txt"
dependencies: []
.build_pytest_template:
extends:
- .build_template
- .before_script_pytest
dependencies: # set dependencies to null to avoid missing artifacts issue
needs:
- job: fast_template_app
artifacts: false
variables:
PYTHON_VER: 3.6.13
artifacts:
paths:
- "**/build*/size.json"
- "**/build*/build.log"
- "**/build*/*.bin"
- "**/build*/*.elf"
- "**/build*/*.map"
- "**/build*/flasher_args.json"
- "**/build*/config/sdkconfig.json"
- "**/build*/bootloader/*.bin"
- "**/build*/partition_table/*.bin"
- $SIZE_INFO_LOCATION
when: always
expire_in: 3 days
build_examples_pytest_esp32:
extends:
- .build_pytest_template
- .rules:build:example_test-esp32
script:
- python tools/ci/build_pytest_apps.py --all-pytest-apps --under-dir examples --target esp32 --size-info $SIZE_INFO_LOCATION -vv
build_examples_pytest_esp32c3:
extends:
- .build_pytest_template
- .rules:build:example_test-esp32c3
script:
- python tools/ci/build_pytest_apps.py --all-pytest-apps --under-dir examples --target esp32c3 --size-info $SIZE_INFO_LOCATION -vv
.build_template_app_template:
extends: .build_template
variables:

View File

@@ -1,3 +1,46 @@
.pytest_template:
stage: target_test
extends: .before_script_pytest
artifacts:
when: always
paths:
- XUNIT_RESULT.xml
reports:
junit: XUNIT_RESULT.xml
script:
- pytest $TEST_DIR -m $TARGET_MARKER -m $ENV_MARKER --junitxml=XUNIT_RESULT.xml
.pytest_examples_dir_template:
extends: .pytest_template
variables:
TEST_DIR: examples
example_test_pytest_esp32_generic:
extends:
- .pytest_examples_dir_template
- .rules:test:example_test-esp32
needs:
- build_examples_pytest_esp32
variables:
TARGET_MARKER: esp32
ENV_MARKER: generic
tags: # in gitlab 14.1 or later, we can use `parallel: matrix` with the `tags` keyword. https://docs.gitlab.com/ee/ci/jobs/job_control.html#run-a-matrix-of-parallel-trigger-jobs
- ESP32
- Example_GENERIC
example_test_pytest_esp32c3_generic:
extends:
- .pytest_examples_dir_template
- .rules:test:example_test-esp32c3
needs:
- build_examples_pytest_esp32c3
variables:
TARGET_MARKER: esp32c3
ENV_MARKER: generic
tags:
- ESP32C3
- Example_GENERIC
# for parallel jobs, CI_JOB_NAME will be "job_name index/total" (for example, "IT_001 1/2")
# we need to convert to pattern "job_name_index.yml"
.define_config_file_name: &define_config_file_name |

View File

@@ -150,6 +150,7 @@ disable=print-statement,
too-many-nested-blocks,
too-many-branches,
too-many-statements,
ungrouped-imports, # since we have isort in pre-commit
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option

131
conftest.py Normal file
View File

@@ -0,0 +1,131 @@
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=W0621
# This file is a pytest root configuration file and provide the following functionalities:
# 1. Defines a few fixtures that could be used under the whole project.
# 2. Defines a few hook functions.
#
# IDF is using [pytest](https://github.com/pytest-dev/pytest) and
# [pytest-embedded plugin](https://github.com/espressif/pytest-embedded) as its example test framework.
#
# This is an experimental feature, and if you found any bug or have any question, please report to
# https://github.com/espressif/pytest-embedded/issues
import logging
import os
import sys
from typing import List, Optional
import pytest
from _pytest.config import Config
from _pytest.fixtures import FixtureRequest
from pytest_embedded.plugin import parse_configuration
def _is_target_marker(marker: str) -> bool:
if marker.startswith('esp32'):
return True
if marker.startswith('esp8'):
return True
return False
@pytest.fixture(scope='session')
def target_markers(pytestconfig: Config) -> List[str]:
res = []
for item in pytestconfig.getini('markers'):
marker = item.split(':')[0]
if _is_target_marker(marker):
res.append(marker)
return res
@pytest.fixture(scope='session')
def env_markers(pytestconfig: Config) -> List[str]:
res = []
for item in pytestconfig.getini('markers'):
marker = item.split(':')[0]
if not marker.startswith('esp32'):
res.append(marker)
return res
@pytest.fixture(scope='session')
def param_markers(pytestconfig: Config) -> List[str]:
res: List[str] = []
offset = -1
while True:
try:
offset = pytestconfig.invocation_params.args.index('-m', offset + 1)
except ValueError:
return res
res.append(pytestconfig.invocation_params.args[offset + 1]) # we want the marker after '-m'
@pytest.fixture
def target(request: FixtureRequest, target_markers: List[str], param_markers: List[str]) -> Optional[str]:
param_target_markers = [marker for marker in param_markers if marker in target_markers]
if len(param_target_markers) > 1:
raise ValueError('Please only specify one target marker at the same time')
elif len(param_target_markers) == 0:
target = None
else:
target = param_target_markers[0]
return getattr(request, 'param', None) or target
@pytest.fixture
def config(request: FixtureRequest) -> Optional[str]:
return getattr(request, 'param', None) or request.config.option.__dict__.get('config') or None
@pytest.fixture
@parse_configuration
def build_dir(request: FixtureRequest, app_path: str, target: Optional[str], config: Optional[str]) -> str:
"""
Check local build dir with the following priority:
1. build_<target>_<config>
2. build_<target>
3. build_<config>
4. build
Args:
request: pytest fixture
app_path: app path
target: target
config: config
Returns:
valid build directory
"""
param_or_cli: str = getattr(request, 'param', None) or request.config.option.__dict__.get('build_dir')
if param_or_cli is not None: # respect the parametrize and the cli
return param_or_cli
check_dirs = []
if target is not None and config is not None:
check_dirs.append(f'build_{target}_{config}')
if target is not None:
check_dirs.append(f'build_{target}')
if config is not None:
check_dirs.append(f'build_{config}')
check_dirs.append('build')
for check_dir in check_dirs:
binary_path = os.path.join(app_path, check_dir)
if os.path.isdir(binary_path):
logging.info(f'find valid binary path: {binary_path}')
return check_dir
logging.warning(f'checking binary path: {binary_path}... missing... try another place')
recommend_place = check_dirs[0]
logging.error(
f'no build dir valid. Please build the binary via "idf.py -B {recommend_place} build" and run pytest again')
sys.exit(1)

View File

@@ -1,22 +0,0 @@
# type: ignore
from __future__ import print_function
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32c3'])
def test_examples_system_console_advanced(env, _):
dut = env.get_dut('console_example', 'examples/system/console/advanced', app_config_name='history')
print('Using binary path: {}'.format(dut.app.binary_path))
dut.start_app()
dut.expect('Command history enabled')
env.close_dut(dut.name)
dut = env.get_dut('console_example', 'examples/system/console/advanced', app_config_name='nohistory')
print('Using binary path: {}'.format(dut.app.binary_path))
dut.start_app()
dut.expect('Command history disabled')
if __name__ == '__main__':
test_examples_system_console_advanced()

View File

@@ -0,0 +1,18 @@
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
@pytest.mark.esp32
@pytest.mark.esp32c3
@pytest.mark.generic
@pytest.mark.parametrize('config', [
'history',
'nohistory',
], indirect=True)
def test_console_advanced(config, dut): # type: ignore
if config == 'history':
dut.expect('Command history enabled')
elif config == 'nohistory':
dut.expect('Command history disabled')

View File

@@ -1,22 +0,0 @@
# type: ignore
from __future__ import print_function
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32c3'])
def test_examples_system_console_basic(env, _):
dut = env.get_dut('console_example', 'examples/system/console/basic', app_config_name='history')
print('Using binary path: {}'.format(dut.app.binary_path))
dut.start_app()
dut.expect('Command history enabled')
env.close_dut(dut.name)
dut = env.get_dut('console_example', 'examples/system/console/basic', app_config_name='nohistory')
print('Using binary path: {}'.format(dut.app.binary_path))
dut.start_app()
dut.expect('Command history disabled')
if __name__ == '__main__':
test_examples_system_console_basic()

View File

@@ -0,0 +1,18 @@
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
@pytest.mark.esp32
@pytest.mark.esp32c3
@pytest.mark.generic
@pytest.mark.parametrize('config', [
'history',
'nohistory',
], indirect=True)
def test_console_advanced(config, dut): # type: ignore
if config == 'history':
dut.expect('Command history enabled')
elif config == 'nohistory':
dut.expect('Command history disabled')

17
pytest.ini Normal file
View File

@@ -0,0 +1,17 @@
[pytest]
# only the files with prefix `pytest_` would be recognized as pytest test scripts.
python_files = pytest_*.py
addopts = --embedded-services esp,idf
markers =
esp32: support esp32 target
esp32c3: support esp32c3 target
generic: tests should be run on generic runners
# log related
log_auto_indent = True
log_cli = True
log_cli_level = INFO
log_cli_format = %(asctime)s %(levelname)s %(message)s
log_cli_date_format = %Y-%m-%d %H:%M:%S

View File

@@ -10,6 +10,7 @@ import logging
import os.path
import re
import sys
from typing import List, Optional, TextIO
from find_build_apps import BUILD_SYSTEMS, BuildError, BuildItem, setup_logging
from find_build_apps.common import SIZE_JSON_FN, rmdir
@@ -21,7 +22,84 @@ LOG_ERROR_WARNING = re.compile(r'(error|warning):', re.IGNORECASE)
LOG_DEBUG_LINES = 25
def main(): # type: () -> None
def build_apps(
build_items: List[BuildItem],
parallel_count: int = 1,
parallel_index: int = 1,
dry_run: bool = False,
build_verbose: bool = False,
keep_going: bool = False,
output_build_list: Optional[TextIO] = None,
size_info: Optional[TextIO] = None
) -> None:
if not build_items:
logging.warning('Empty build list')
sys.exit(0)
num_builds = len(build_items)
num_jobs = parallel_count
job_index = parallel_index - 1 # convert to 0-based index
num_builds_per_job = (num_builds + num_jobs - 1) // num_jobs
min_job_index = num_builds_per_job * job_index
if min_job_index >= num_builds:
logging.warning(
f'Nothing to do for job {job_index + 1} (build total: {num_builds}, per job: {num_builds_per_job})')
sys.exit(0)
max_job_index = min(num_builds_per_job * (job_index + 1) - 1, num_builds - 1)
logging.info('Total {} builds, max. {} builds per job, running builds {}-{}'.format(
num_builds, num_builds_per_job, min_job_index + 1, max_job_index + 1))
builds_for_current_job = build_items[min_job_index:max_job_index + 1]
for i, build_item in enumerate(builds_for_current_job):
index = i + min_job_index + 1
build_item.index = index
build_item.dry_run = dry_run
build_item.verbose = build_verbose
build_item.keep_going = keep_going
logging.debug('\tBuild {}: {}'.format(index, repr(build_item)))
if output_build_list:
output_build_list.write(build_item.to_json_expanded() + '\n')
failed_builds = []
for build_item in builds_for_current_job:
logging.info('Running build {}: {}'.format(build_item.index, repr(build_item)))
build_system_class = BUILD_SYSTEMS[build_item.build_system]
try:
build_system_class.build(build_item)
except BuildError as e:
logging.error(str(e))
if build_item.build_log_path:
log_filename = os.path.basename(build_item.build_log_path)
with open(build_item.build_log_path, 'r') as f:
lines = [line.rstrip() for line in f.readlines() if line.rstrip()] # non-empty lines
logging.debug('Error and warning lines from {}:'.format(log_filename))
for line in lines:
if LOG_ERROR_WARNING.search(line):
logging.warning('>>> {}'.format(line))
logging.debug('Last {} lines of {}:'.format(LOG_DEBUG_LINES, log_filename))
for line in lines[-LOG_DEBUG_LINES:]:
logging.debug('>>> {}'.format(line))
if keep_going:
failed_builds.append(build_item)
else:
sys.exit(1)
else:
if size_info:
build_item.write_size_info(size_info)
if not build_item.preserve:
logging.info(f'Removing build directory {build_item.build_path}')
# we only remove binaries here, log files are still needed by check_build_warnings.py
rmdir(build_item.build_path, exclude_file_pattern=SIZE_JSON_FN)
if failed_builds:
logging.error('The following build have failed:')
for build in failed_builds:
logging.error('\t{}'.format(build))
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ESP-IDF app builder')
parser.add_argument(
'-v',
@@ -87,76 +165,7 @@ def main(): # type: () -> None
help='Name of the file to read the list of builds from. If not specified, read from stdin.',
)
args = parser.parse_args()
setup_logging(args)
build_items = [BuildItem.from_json(line) for line in args.build_list]
if not build_items:
logging.warning('Empty build list')
SystemExit(0)
num_builds = len(build_items)
num_jobs = args.parallel_count
job_index = args.parallel_index - 1 # convert to 0-based index
num_builds_per_job = (num_builds + num_jobs - 1) // num_jobs
min_job_index = num_builds_per_job * job_index
if min_job_index >= num_builds:
logging.warn('Nothing to do for job {} (build total: {}, per job: {})'.format(
job_index + 1, num_builds, num_builds_per_job))
raise SystemExit(0)
max_job_index = min(num_builds_per_job * (job_index + 1) - 1, num_builds - 1)
logging.info('Total {} builds, max. {} builds per job, running builds {}-{}'.format(
num_builds, num_builds_per_job, min_job_index + 1, max_job_index + 1))
builds_for_current_job = build_items[min_job_index:max_job_index + 1]
for i, build_info in enumerate(builds_for_current_job):
index = i + min_job_index + 1
build_info.index = index
build_info.dry_run = args.dry_run
build_info.verbose = args.build_verbose
build_info.keep_going = args.keep_going
logging.debug(' Build {}: {}'.format(index, repr(build_info)))
if args.output_build_list:
args.output_build_list.write(build_info.to_json_expanded() + '\n')
failed_builds = []
for build_info in builds_for_current_job:
logging.info('Running build {}: {}'.format(build_info.index, repr(build_info)))
build_system_class = BUILD_SYSTEMS[build_info.build_system]
try:
build_system_class.build(build_info)
except BuildError as e:
logging.error(str(e))
if build_info.build_log_path:
log_filename = os.path.basename(build_info.build_log_path)
with open(build_info.build_log_path, 'r') as f:
lines = [line.rstrip() for line in f.readlines() if line.rstrip()] # non-empty lines
logging.debug('Error and warning lines from {}:'.format(log_filename))
for line in lines:
if LOG_ERROR_WARNING.search(line):
logging.warning('>>> {}'.format(line))
logging.debug('Last {} lines of {}:'.format(LOG_DEBUG_LINES, log_filename))
for line in lines[-LOG_DEBUG_LINES:]:
logging.debug('>>> {}'.format(line))
if args.keep_going:
failed_builds.append(build_info)
else:
raise SystemExit(1)
else:
if args.size_info:
build_info.write_size_info(args.size_info)
if not build_info.preserve:
logging.info('Removing build directory {}'.format(build_info.build_path))
# we only remove binaries here, log files are still needed by check_build_warnings.py
rmdir(build_info.build_path, exclude_file_pattern=SIZE_JSON_FN)
if failed_builds:
logging.error('The following build have failed:')
for build in failed_builds:
logging.error(' {}'.format(build))
raise SystemExit(1)
if __name__ == '__main__':
main()
items = [BuildItem.from_json(line) for line in args.build_list]
build_apps(items, args.parallel_count, args.parallel_index, args.dry_run, args.build_verbose,
args.keep_going, args.output_build_list, args.size_info)

View File

@@ -0,0 +1,133 @@
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
This file is used to generate binary files for the given path.
"""
import argparse
import logging
import os
import sys
from typing import List
from idf_ci_utils import IDF_PATH, get_pytest_dirs
try:
from build_apps import build_apps
from find_apps import find_apps, find_builds_for_app
from find_build_apps import BuildItem, CMakeBuildSystem, config_rules_from_str, setup_logging
except ImportError:
sys.path.append(os.path.join(IDF_PATH, 'tools'))
from build_apps import build_apps
from find_apps import find_apps, find_builds_for_app
from find_build_apps import BuildItem, CMakeBuildSystem, config_rules_from_str, setup_logging
def main(args: argparse.Namespace) -> None:
if args.all_pytest_apps:
paths = get_pytest_dirs(IDF_PATH, args.under_dir)
args.recursive = True
elif args.paths is None:
paths = [os.getcwd()]
else:
paths = args.paths
app_dirs = []
for path in paths:
app_dirs += find_apps(CMakeBuildSystem, path, args.recursive, [], args.target)
if not app_dirs:
logging.error('No apps found')
sys.exit(1)
logging.info('Found {} apps'.format(len(app_dirs)))
app_dirs.sort()
# Find compatible configurations of each app, collect them as BuildItems
build_items: List[BuildItem] = []
config_rules = config_rules_from_str(args.config or [])
for app_dir in app_dirs:
app_dir = os.path.realpath(app_dir)
build_items += find_builds_for_app(
app_dir,
app_dir,
'build_@t_@w',
f'{app_dir}/build_@t_@w/build.log',
args.target,
'cmake',
config_rules,
True,
)
logging.info('Found {} builds'.format(len(build_items)))
build_items.sort(key=lambda x: x.build_path) # type: ignore
build_apps(build_items, args.parallel_count, args.parallel_index, False, args.build_verbose, True, None,
args.size_info)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tool to generate build steps for IDF apps')
parser.add_argument(
'--recursive',
action='store_true',
help='Look for apps in the specified directories recursively.',
)
parser.add_argument('--target', required=True, help='Build apps for given target.')
parser.add_argument(
'--config',
default=['sdkconfig.ci=default', 'sdkconfig.ci.*=', '=default'],
action='append',
help='Adds configurations (sdkconfig file names) to build. This can either be ' +
'FILENAME[=NAME] or FILEPATTERN. FILENAME is the name of the sdkconfig file, ' +
'relative to the project directory, to be used. Optional NAME can be specified, ' +
'which can be used as a name of this configuration. FILEPATTERN is the name of ' +
'the sdkconfig file, relative to the project directory, with at most one wildcard. ' +
'The part captured by the wildcard is used as the name of the configuration.',
)
parser.add_argument(
'-p', '--paths',
nargs='*',
help='One or more app paths. Will use the current path if not specified.'
)
parser.add_argument(
'--all-pytest-apps',
action='store_true',
help='Look for all pytest apps. "--paths" would be ignored if specify this flag.'
)
parser.add_argument(
'--under-dir',
help='Build only the pytest apps under this directory if specified. '
'Would be ignored if "--all-pytest-apps" is unflagged.'
)
parser.add_argument(
'--parallel-count',
default=1,
type=int,
help='Number of parallel build jobs.'
)
parser.add_argument(
'--parallel-index',
default=1,
type=int,
help='Index (1-based) of the job, out of the number specified by --parallel-count.',
)
parser.add_argument(
'--size-info',
type=argparse.FileType('a'),
help='If specified, the test case name and size info json will be written to this file'
)
parser.add_argument(
'-v',
'--verbose',
action='count',
help='Increase the logging level of the script. Can be specified multiple times.',
)
parser.add_argument(
'--build-verbose',
action='store_true',
help='Enable verbose output from build system.',
)
arguments = parser.parse_args()
setup_logging(arguments)
main(arguments)

View File

@@ -10,11 +10,12 @@ import os
import re
import subprocess
import sys
from typing import List, Optional
IDF_PATH = os.path.abspath(os.getenv('IDF_PATH', os.path.join(os.path.dirname(__file__), '..', '..')))
def get_submodule_dirs(full_path=False): # type: (bool) -> list
def get_submodule_dirs(full_path: bool = False) -> List:
"""
To avoid issue could be introduced by multi-os or additional dependency,
we use python and git to get this output
@@ -49,7 +50,7 @@ def _check_git_filemode(full_path): # type: (str) -> bool
return False
def is_executable(full_path): # type: (str) -> bool
def is_executable(full_path: str) -> bool:
"""
os.X_OK will always return true on windows. Use git to check file mode.
:param full_path: file full path
@@ -60,7 +61,7 @@ def is_executable(full_path): # type: (str) -> bool
return os.access(full_path, os.X_OK)
def get_git_files(path=IDF_PATH, full_path=False): # type: (str, bool) -> list[str]
def get_git_files(path: str = IDF_PATH, full_path: bool = False) -> List[str]:
"""
Get the result of git ls-files
:param path: path to run git ls-files
@@ -87,7 +88,7 @@ def get_git_files(path=IDF_PATH, full_path=False): # type: (str, bool) -> list[
# https://github.com/python/cpython/pull/6299/commits/bfd63120c18bd055defb338c075550f975e3bec1
# In order to solve python https://bugs.python.org/issue9584
# glob pattern does not support brace expansion issue
def _translate(pat): # type: (str) -> str
def _translate(pat: str) -> str:
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
@@ -176,7 +177,7 @@ def _translate(pat): # type: (str) -> str
return res
def translate(pat): # type: (str) -> str
def translate(pat: str) -> str:
res = _translate(pat)
return r'(?s:%s)\Z' % res
@@ -194,3 +195,36 @@ magic_check_bytes = re.compile(b'([*?[{])')
# glob.magic_check = magic_check
# glob.magic_check_bytes = magic_check_bytes
# fnmatch.translate = translate
def is_in_directory(file_path: str, folder: str) -> bool:
return os.path.realpath(file_path).startswith(os.path.realpath(folder) + os.sep)
def get_pytest_dirs(folder: str, under_dir: Optional[str] = None) -> List[str]:
from io import StringIO
import pytest
from _pytest.nodes import Item
class CollectPlugin:
def __init__(self) -> None:
self.nodes: List[Item] = []
def pytest_collection_modifyitems(self, items: List[Item]) -> None:
for item in items:
self.nodes.append(item)
collector = CollectPlugin()
sys_stdout = sys.stdout
sys.stdout = StringIO() # swallow the output
pytest.main(['--collect-only', folder], plugins=[collector])
sys.stdout = sys_stdout # restore sys.stdout
test_file_paths = set(node.fspath for node in collector.nodes)
if under_dir:
return [os.path.dirname(file) for file in test_file_paths if is_in_directory(file, under_dir)]
return [os.path.dirname(file) for file in test_file_paths]

View File

@@ -102,7 +102,7 @@ def find_builds_for_app(app_path, work_dir, build_dir, build_log, target_arg,
))
if not build_items:
logging.debug('Adding build: app {}, default sdkconfig, config name "{}"'.format(app_path, default_config_name))
logging.debug('\tAdding build: app {}, default sdkconfig, config name "{}"'.format(app_path, default_config_name))
return [
BuildItem(
app_path,
@@ -134,16 +134,7 @@ def find_apps(build_system_class, path, recursive, exclude_list, target):
"""
build_system_name = build_system_class.NAME
logging.debug('Looking for {} apps in {}{}'.format(build_system_name, path, ' recursively' if recursive else ''))
if not recursive:
if exclude_list:
logging.warning('--exclude option is ignored when used without --recursive')
if not build_system_class.is_app(path):
logging.warning('Path {} specified without --recursive flag, but no {} app found there'.format(
path, build_system_name))
return []
return [path]
# The remaining part is for recursive == True
apps_found = [] # type: typing.List[str]
for root, dirs, _ in os.walk(path, topdown=True):
logging.debug('Entering {}'.format(root))
@@ -167,6 +158,12 @@ def find_apps(build_system_class, path, recursive, exclude_list, target):
logging.debug('Skipping, app has no supported targets')
continue
if not recursive:
if not apps_found:
logging.warning('Path {} specified without --recursive flag, but no {} app found there'.format(
path, build_system_name))
break # only check the top-most dir if "recursive" is unflagged
return apps_found

View File

@@ -462,6 +462,6 @@ def setup_logging(args):
logging.basicConfig(
format='%(levelname)s: %(message)s',
stream=args.log_file or sys.stderr,
stream=getattr(args, 'log_file', None) or sys.stderr,
level=log_level,
)