mirror of
https://github.com/espressif/esp-idf.git
synced 2025-07-30 18:57:19 +02:00
Merge branch 'bugfix/ci_retries_when_download' into 'master'
CI: retry shell commands when download See merge request espressif/esp-idf!9342
This commit is contained in:
@ -39,7 +39,7 @@ assign_test:
|
||||
# assign unit test cases
|
||||
- python tools/ci/python_packages/ttfw_idf/CIAssignUnitTest.py $UNIT_TEST_CASE_FILE $CI_TARGET_TEST_CONFIG_FILE $IDF_PATH/components/idf_test/unit_test/CIConfigs
|
||||
# clone test script to assign tests
|
||||
- git clone $TEST_SCRIPT_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_SCRIPT_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT auto_test_script auto_test_script
|
||||
- cd auto_test_script
|
||||
# assgin integration test cases
|
||||
@ -70,7 +70,7 @@ update_test_cases:
|
||||
PYTHON_VER: 3
|
||||
script:
|
||||
- export GIT_SHA=$(echo ${CI_COMMIT_SHA} | cut -c 1-8)
|
||||
- git clone $TEST_MANAGEMENT_REPO
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_MANAGEMENT_REPO
|
||||
- python $CHECKOUT_REF_SCRIPT test-management test-management
|
||||
- cd test-management
|
||||
- echo $BOT_JIRA_ACCOUNT > ${BOT_ACCOUNT_CONFIG_FILE}
|
||||
|
@ -23,7 +23,7 @@
|
||||
- $BOT_LABEL_INTEGRATION_TEST
|
||||
- $BOT_LABEL_REGULAR_TEST
|
||||
script:
|
||||
- git clone $SSC_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $SSC_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT SSC SSC
|
||||
- cd SSC
|
||||
- MAKEFLAGS= ./ci_build_ssc.sh $TARGET_NAME
|
||||
|
@ -177,7 +177,7 @@ deploy_test_result:
|
||||
# artifacts of job update_test_cases creates test-management folder
|
||||
# we need to remove it so we can clone test-management folder again
|
||||
- rm -rf test-management
|
||||
- git clone $TEST_MANAGEMENT_REPO
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_MANAGEMENT_REPO
|
||||
- python3 $CHECKOUT_REF_SCRIPT test-management test-management
|
||||
- cd test-management
|
||||
- echo $BOT_JIRA_ACCOUNT > ${BOT_ACCOUNT_CONFIG_FILE}
|
||||
|
@ -50,7 +50,7 @@
|
||||
- $IDF_PATH/examples/get-started/hello_world/tidybuild/report/*
|
||||
expire_in: 1 day
|
||||
script:
|
||||
- git clone $IDF_ANALYSIS_UTILS static_analysis_utils && cd static_analysis_utils
|
||||
- ./tools/ci/retry_failed.sh git clone $IDF_ANALYSIS_UTILS static_analysis_utils && cd static_analysis_utils
|
||||
# Setup parameters of triggered/regular job
|
||||
- export TRIGGERED_RELATIVE=${BOT_LABEL_STATIC_ANALYSIS-} && export TRIGGERED_ABSOLUTE=${BOT_LABEL_STATIC_ANALYSIS_ALL-} && export TARGET_BRANCH=${BOT_CUSTOMIZED_REVISION-}
|
||||
- ./analyze.sh $IDF_PATH/examples/get-started/hello_world/ $IDF_PATH/tools/ci/static-analysis-rules.yml $IDF_PATH/output.xml
|
||||
|
@ -132,7 +132,7 @@ build_template_app:
|
||||
script:
|
||||
# Set the variable for 'esp-idf-template' testing
|
||||
- ESP_IDF_TEMPLATE_GIT=${ESP_IDF_TEMPLATE_GIT:-"https://github.com/espressif/esp-idf-template.git"}
|
||||
- git clone ${ESP_IDF_TEMPLATE_GIT}
|
||||
- ./tools/ci/retry_failed.sh git clone ${ESP_IDF_TEMPLATE_GIT}
|
||||
# Try to use the same branch name for esp-idf-template that we're
|
||||
# using on esp-idf. If it doesn't exist then just stick to the default branch
|
||||
- python $CHECKOUT_REF_SCRIPT esp-idf-template esp-idf-template
|
||||
|
@ -39,7 +39,7 @@
|
||||
# first test if config file exists, if not exist, exit 0
|
||||
- test -e $CONFIG_FILE || exit 0
|
||||
# clone test env configs
|
||||
- git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
|
||||
- cd tools/ci/python_packages/tiny_test_fw/bin
|
||||
# run test
|
||||
@ -77,7 +77,7 @@
|
||||
# first test if config file exists, if not exist, exit 0
|
||||
- test -e $CONFIG_FILE || exit 0
|
||||
# clone test env configs
|
||||
- git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
|
||||
- cd tools/ci/python_packages/tiny_test_fw/bin
|
||||
# run test
|
||||
@ -164,10 +164,10 @@
|
||||
# first test if config file exists, if not exist, exit 0
|
||||
- test -e $CONFIG_FILE || exit 0
|
||||
# clone local test env configs
|
||||
- git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
|
||||
# clone test bench
|
||||
- git clone $TEST_SCRIPT_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_SCRIPT_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT auto_test_script auto_test_script
|
||||
- cd auto_test_script
|
||||
# run test
|
||||
@ -240,7 +240,7 @@ example_test_002:
|
||||
# first test if config file exists, if not exist, exit 0
|
||||
- test -e $CONFIG_FILE || exit 0
|
||||
# clone test env configs
|
||||
- git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
|
||||
- cd tools/ci/python_packages/tiny_test_fw/bin
|
||||
# run test
|
||||
@ -612,10 +612,10 @@ nvs_compatible_test:
|
||||
# first test if config file exists, if not exist, exit 0
|
||||
- test -e $CONFIG_FILE || exit 0
|
||||
# clone local test env configs
|
||||
- git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
|
||||
# clone test bench
|
||||
- git clone $TEST_SCRIPT_REPOSITORY
|
||||
- ./tools/ci/retry_failed.sh git clone $TEST_SCRIPT_REPOSITORY
|
||||
- python $CHECKOUT_REF_SCRIPT auto_test_script auto_test_script
|
||||
- cd auto_test_script
|
||||
# prepare nvs bins
|
||||
|
@ -56,6 +56,7 @@ tools/ci/get_supported_examples.sh
|
||||
tools/ci/mirror-submodule-update.sh
|
||||
tools/ci/multirun_with_pyenv.sh
|
||||
tools/ci/push_to_github.sh
|
||||
tools/ci/retry_failed.sh
|
||||
tools/ci/test_build_system.sh
|
||||
tools/ci/test_build_system_cmake.sh
|
||||
tools/ci/test_configure_ci_environment.sh
|
||||
|
45
tools/ci/retry_failed.sh
Executable file
45
tools/ci/retry_failed.sh
Executable file
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
#
|
||||
# Retries a command RETRY_ATTEMPTS times in case of failure
|
||||
#
|
||||
# Inspired by https://stackoverflow.com/a/8351489
|
||||
#
|
||||
|
||||
max_attempts=${RETRY_ATTEMPTS-3}
|
||||
RETRY_TIMEWAIT=${RETRY_TIMEWAIT-1}
|
||||
attempt=1
|
||||
exitCode=0
|
||||
whole_start=$(date +%s)
|
||||
attempt_start=whole_start
|
||||
|
||||
while true; do
|
||||
if "$@" ; then
|
||||
exitCode=0
|
||||
break
|
||||
else
|
||||
exitCode=$?
|
||||
fi
|
||||
|
||||
if (( $attempt >= $max_attempts )) ; then
|
||||
break
|
||||
fi
|
||||
|
||||
echo "Failed! ("$@") Spent time $(( $(date '+%s') - ${attempt_start} )) sec. Retrying in ${RETRY_TIMEWAIT}..." 1>&2
|
||||
sleep $RETRY_TIMEWAIT
|
||||
attempt=$(( attempt + 1 ))
|
||||
RETRY_TIMEWAIT=$(( RETRY_TIMEWAIT * 2 ))
|
||||
attempt_start=$(date +%s)
|
||||
done
|
||||
|
||||
if [[ $exitCode != 0 ]] ; then
|
||||
echo -n "Totally failed! ("$@")" 1>&2
|
||||
else
|
||||
echo -n "Done ("$@")" 1>&2
|
||||
fi
|
||||
|
||||
echo " Spent time $(( $(date '+%s') - ${whole_start} )) sec in total" 1>&2
|
||||
|
||||
exit $exitCode
|
Reference in New Issue
Block a user