From 262107691aea5bead1a4fc12f1fdd87e7c2c1e2f Mon Sep 17 00:00:00 2001 From: Angus Gratton Date: Fri, 21 May 2021 19:09:33 +1000 Subject: [PATCH 1/2] pthread: Fix pthread_cond_timedwait returning early from timeout The reason timeouts would sometimes happen before the abstime deadline was due to rounding errors converting the timeout to milliseconds, and also because vTaskDelay(1) only delays until the next tick which is less than one full tick period. Closes https://github.com/espressif/esp-idf/issues/6901 --- components/pthread/pthread_cond_var.c | 20 ++++++- components/pthread/test/test_cxx_cond_var.cpp | 58 +++++++++++++++++-- docs/en/api-reference/system/pthread.rst | 2 + 3 files changed, 73 insertions(+), 7 deletions(-) diff --git a/components/pthread/pthread_cond_var.c b/components/pthread/pthread_cond_var.c index 78265f85d9..03267a54c0 100644 --- a/components/pthread/pthread_cond_var.c +++ b/components/pthread/pthread_cond_var.c @@ -128,7 +128,8 @@ int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut, const struc gettimeofday(&cur_time, NULL); abs_time.tv_sec = to->tv_sec; - abs_time.tv_usec = to->tv_nsec / 1000; + // Round up nanoseconds to the next microsecond + abs_time.tv_usec = (to->tv_nsec + 1000 - 1) / 1000; if (timercmp(&abs_time, &cur_time, <)) { /* As per the pthread spec, if the time has already @@ -137,14 +138,27 @@ int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut, const struc timeout_msec = 0; } else { timersub(&abs_time, &cur_time, &diff_time); - timeout_msec = (diff_time.tv_sec * 1000) + (diff_time.tv_usec / 1000); + // Round up timeout microseconds to the next millisecond + timeout_msec = (diff_time.tv_sec * 1000) + + ((diff_time.tv_usec + 1000 - 1) / 1000); } if (timeout_msec <= 0) { return ETIMEDOUT; } - timeout_ticks = timeout_msec / portTICK_PERIOD_MS; + // Round up milliseconds to the next tick + timeout_ticks = (timeout_msec + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS; + + /* We have to add 1 more tick of delay + + The reason for this is that vTaskDelay(1) will sleep until the start of the next tick, + which can be any amount of time up to one tick period. So if we don't add one more tick, + we're likely to timeout a small time (< 1 tick period) before the requested timeout. + If we add 1 tick then we will timeout a small time (< 1 tick period) after the + requested timeout. + */ + timeout_ticks += 1; } esp_pthread_cond_waiter_t w; diff --git a/components/pthread/test/test_cxx_cond_var.cpp b/components/pthread/test/test_cxx_cond_var.cpp index ccb411ae63..0225f60710 100644 --- a/components/pthread/test/test_cxx_cond_var.cpp +++ b/components/pthread/test/test_cxx_cond_var.cpp @@ -4,13 +4,15 @@ #include #include #include +#include +#include "freertos/FreeRTOS.h" #include "unity.h" #if __GTHREADS && __GTHREADS_CXX0X -std::condition_variable cv; -std::mutex cv_m; -std::atomic i{0}; +static std::condition_variable cv; +static std::mutex cv_m; +static std::atomic i{0}; static void waits(int idx, int timeout_ms) { @@ -45,4 +47,52 @@ TEST_CASE("C++ condition_variable", "[std::condition_variable]") std::cout << "All threads joined\n"; } -#endif + + +TEST_CASE("cxx: condition_variable can timeout", "[cxx]") +{ + std::condition_variable cv; + std::mutex mtx; + std::unique_lock lck(mtx); + srand(99); + for (int i = 0; i < 10; ++i) { + usleep(rand() % 1000); + auto status = cv.wait_for(lck, std::chrono::milliseconds(200)); + TEST_ASSERT_EQUAL(std::cv_status::timeout, status); + } +} + +TEST_CASE("cxx: condition_variable timeout never before deadline", "[cxx]") +{ + using SysClock = std::chrono::system_clock; + + std::mutex mutex; + std::condition_variable cond; + std::unique_lock lock(mutex); + + for (int i = 0; i < 25; ++i) { + auto timeout = std::chrono::milliseconds(portTICK_PERIOD_MS * (i+1)); + auto deadline = SysClock::now() + timeout; + + auto secs = std::chrono::time_point_cast(deadline); + auto nsecs = std::chrono::duration_cast + (deadline - secs); + struct timespec ts = { + .tv_sec = static_cast(secs.time_since_epoch().count()), + .tv_nsec = static_cast(nsecs.count())}; + int rc = ::pthread_cond_timedwait(cond.native_handle(), + lock.mutex()->native_handle(), &ts); + auto status = (rc == ETIMEDOUT) ? std::cv_status::timeout : + std::cv_status::no_timeout; + auto end = SysClock::now(); + auto extra = end - deadline; + auto extra_us = extra / std::chrono::microseconds(1); + printf("timeout %lldms Extra time: %lldus, status: %s\n", timeout.count(), extra_us, + (status == std::cv_status::timeout) ? "timeout" : "no timeout"); + + // The timed wait should always return at least 1us after the timeout deadline + TEST_ASSERT_GREATER_THAN(0, extra_us); + } +} + +#endif // __GTHREADS && __GTHREADS_CXX0X diff --git a/docs/en/api-reference/system/pthread.rst b/docs/en/api-reference/system/pthread.rst index c16ebf8a1e..1fb69f2e5f 100644 --- a/docs/en/api-reference/system/pthread.rst +++ b/docs/en/api-reference/system/pthread.rst @@ -94,6 +94,8 @@ Condition Variables Static initializer constant ``PTHREAD_COND_INITIALIZER`` is supported. +* The resolution of ``pthread_cond_timedwait()`` timeouts is the RTOS tick period (see :ref:`CONFIG_FREERTOS_HZ`). Timeouts may be delayed up to one tick period after the requested timeout. + .. note:: These functions can be called from tasks created using either pthread or FreeRTOS APIs Thread-Specific Data From c685ef303bf739f7a067341da95c8259c478b796 Mon Sep 17 00:00:00 2001 From: Angus Gratton Date: Wed, 16 Jun 2021 10:43:21 +1000 Subject: [PATCH 2/2] pthread: Fix race conditions in existing pthread UTs --- components/pthread/test/test_pthread.c | 1 + components/pthread/test/test_pthread_cxx.cpp | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/components/pthread/test/test_pthread.c b/components/pthread/test/test_pthread.c index 7453f7a75a..f58bbf849a 100644 --- a/components/pthread/test/test_pthread.c +++ b/components/pthread/test/test_pthread.c @@ -12,6 +12,7 @@ static void *compute_square(void *arg) { int *num = (int *) arg; *num = (*num) * (*num); + vTaskDelay(2); // ensure the test task has time to continue execution pthread_exit((void *) num); return NULL; } diff --git a/components/pthread/test/test_pthread_cxx.cpp b/components/pthread/test/test_pthread_cxx.cpp index cb92f14c84..0943190d37 100644 --- a/components/pthread/test/test_pthread_cxx.cpp +++ b/components/pthread/test/test_pthread_cxx.cpp @@ -80,14 +80,16 @@ TEST_CASE("pthread C++", "[pthread]") std::thread t3(thread_main); std::thread t4(thread_main); - if (t3.joinable()) { - std::cout << "Join thread " << std::hex << t3.get_id() << std::endl; - t3.join(); - } - if (t4.joinable()) { - std::cout << "Join thread " << std::hex << t4.get_id() << std::endl; - t4.join(); - } + TEST_ASSERT(t3.joinable()); + TEST_ASSERT(t4.joinable()); + std::cout << "Join thread " << std::hex << t3.get_id() << std::endl; + t3.join(); + std::cout << "Join thread " << std::hex << t4.get_id() << std::endl; + t4.join(); + + // we don't know if/when t2 has finished, so delay another 2s before + // deleting the common mutexes + std::this_thread::sleep_for(std::chrono::seconds(2)); global_sp_mtx.reset(); // avoid reported leak global_sp_recur_mtx.reset();