Merge "bionic: fix pthread_mutex_timedlock for PI mutexes" into main

This commit is contained in:
Andy Hung 2023-12-14 17:52:55 +00:00 committed by Gerrit Code Review
commit 46f0ddb29f
5 changed files with 88 additions and 26 deletions

View file

@ -28,6 +28,7 @@
#include "private/bionic_futex.h"
#include <stdatomic.h>
#include <time.h>
#include "private/bionic_time_conversions.h"
@ -35,7 +36,6 @@
static inline __always_inline int FutexWithTimeout(volatile void* ftx, int op, int value,
bool use_realtime_clock,
const timespec* abs_timeout, int bitset) {
const timespec* futex_abs_timeout = abs_timeout;
// pthread's and semaphore's default behavior is to use CLOCK_REALTIME, however this behavior is
// essentially never intended, as that clock is prone to change discontinuously.
//
@ -46,16 +46,26 @@ static inline __always_inline int FutexWithTimeout(volatile void* ftx, int op, i
// We have seen numerous bugs directly attributable to this difference. Therefore, we provide
// this general workaround to always use CLOCK_MONOTONIC for waiting, regardless of what the input
// timespec is.
timespec converted_monotonic_abs_timeout;
if (abs_timeout && use_realtime_clock) {
monotonic_time_from_realtime_time(converted_monotonic_abs_timeout, *abs_timeout);
if (converted_monotonic_abs_timeout.tv_sec < 0) {
timespec converted_timeout;
if (abs_timeout) {
if ((op & FUTEX_CMD_MASK) == FUTEX_LOCK_PI) {
if (!use_realtime_clock) {
realtime_time_from_monotonic_time(converted_timeout, *abs_timeout);
abs_timeout = &converted_timeout;
}
} else {
op &= ~FUTEX_CLOCK_REALTIME;
if (use_realtime_clock) {
monotonic_time_from_realtime_time(converted_timeout, *abs_timeout);
abs_timeout = &converted_timeout;
}
}
if (abs_timeout->tv_sec < 0) {
return -ETIMEDOUT;
}
futex_abs_timeout = &converted_monotonic_abs_timeout;
}
return __futex(ftx, op, value, futex_abs_timeout, bitset);
return __futex(ftx, op, value, abs_timeout, bitset);
}
int __futex_wait_ex(volatile void* ftx, bool shared, int value, bool use_realtime_clock,
@ -66,6 +76,22 @@ int __futex_wait_ex(volatile void* ftx, bool shared, int value, bool use_realtim
int __futex_pi_lock_ex(volatile void* ftx, bool shared, bool use_realtime_clock,
const timespec* abs_timeout) {
return FutexWithTimeout(ftx, (shared ? FUTEX_LOCK_PI : FUTEX_LOCK_PI_PRIVATE), 0,
use_realtime_clock, abs_timeout, 0);
// We really want FUTEX_LOCK_PI2 which is default CLOCK_MONOTONIC, but that isn't supported
// on linux before 5.14. FUTEX_LOCK_PI uses CLOCK_REALTIME. Here we verify support.
static atomic_int lock_op = 0;
int op = atomic_load_explicit(&lock_op, memory_order_relaxed);
if (op == 0) {
uint32_t tmp = 0;
if (__futex(&tmp, FUTEX_LOCK_PI2, 0, nullptr, 0) == 0) {
__futex(&tmp, FUTEX_UNLOCK_PI, 0, nullptr, 0);
op = FUTEX_LOCK_PI2;
} else {
op = FUTEX_LOCK_PI;
}
atomic_store_explicit(&lock_op, op, memory_order_relaxed);
}
if (!shared) op |= FUTEX_PRIVATE_FLAG;
return FutexWithTimeout(ftx, op, 0 /* value */, use_realtime_clock, abs_timeout, 0 /* bitset */);
}

View file

@ -52,23 +52,32 @@ void timeval_from_timespec(timeval& tv, const timespec& ts) {
tv.tv_usec = ts.tv_nsec / 1000;
}
void monotonic_time_from_realtime_time(timespec& monotonic_time, const timespec& realtime_time) {
monotonic_time = realtime_time;
static void convert_timespec_clocks(timespec& new_time, clockid_t new_clockbase,
const timespec& old_time, clockid_t old_clockbase) {
// get reference clocks
timespec new_clock;
clock_gettime(new_clockbase, &new_clock);
timespec old_clock;
clock_gettime(old_clockbase, &old_clock);
timespec cur_monotonic_time;
clock_gettime(CLOCK_MONOTONIC, &cur_monotonic_time);
timespec cur_realtime_time;
clock_gettime(CLOCK_REALTIME, &cur_realtime_time);
// compute new time by moving old delta to the new clock.
new_time.tv_sec = old_time.tv_sec - old_clock.tv_sec + new_clock.tv_sec;
new_time.tv_nsec = old_time.tv_nsec - old_clock.tv_nsec + new_clock.tv_nsec;
monotonic_time.tv_nsec -= cur_realtime_time.tv_nsec;
monotonic_time.tv_nsec += cur_monotonic_time.tv_nsec;
if (monotonic_time.tv_nsec >= NS_PER_S) {
monotonic_time.tv_nsec -= NS_PER_S;
monotonic_time.tv_sec += 1;
} else if (monotonic_time.tv_nsec < 0) {
monotonic_time.tv_nsec += NS_PER_S;
monotonic_time.tv_sec -= 1;
// correct nsec to second wrap.
if (new_time.tv_nsec >= NS_PER_S) {
new_time.tv_nsec -= NS_PER_S;
new_time.tv_sec += 1;
} else if (new_time.tv_nsec < 0) {
new_time.tv_nsec += NS_PER_S;
new_time.tv_sec -= 1;
}
monotonic_time.tv_sec -= cur_realtime_time.tv_sec;
monotonic_time.tv_sec += cur_monotonic_time.tv_sec;
}
void monotonic_time_from_realtime_time(timespec& monotonic_time, const timespec& realtime_time) {
convert_timespec_clocks(monotonic_time, CLOCK_MONOTONIC, realtime_time, CLOCK_REALTIME);
}
void realtime_time_from_monotonic_time(timespec& realtime_time, const timespec& monotonic_time) {
convert_timespec_clocks(realtime_time, CLOCK_REALTIME, monotonic_time, CLOCK_MONOTONIC);
}

View file

@ -16,7 +16,7 @@
#pragma once
#define NS_PER_S 1000000000
#define NS_PER_S 1'000'000'000LL
// Size of the shadow call stack. This can be small because these stacks only
// contain return addresses. This must be a power of 2 so the mask trick works.

View file

@ -45,6 +45,9 @@ __LIBC_HIDDEN__ void timeval_from_timespec(timeval& tv, const timespec& ts);
__LIBC_HIDDEN__ void monotonic_time_from_realtime_time(timespec& monotonic_time,
const timespec& realtime_time);
__LIBC_HIDDEN__ void realtime_time_from_monotonic_time(timespec& realtime_time,
const timespec& monotonic_time);
__END_DECLS
static inline int check_timespec(const timespec* ts, bool null_allowed) {

View file

@ -2398,6 +2398,20 @@ static void pthread_mutex_timedlock_helper(clockid_t clock,
ts.tv_sec = -1;
ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
// check we wait long enough for the lock.
ASSERT_EQ(0, clock_gettime(clock, &ts));
const int64_t start_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
// add a second to get deadline.
ts.tv_sec += 1;
ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
// The timedlock must have waited at least 1 second before returning.
clock_gettime(clock, &ts);
const int64_t end_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
ASSERT_GT(end_ns - start_ns, NS_PER_S);
// If the mutex is unlocked, pthread_mutex_timedlock should succeed.
ASSERT_EQ(0, pthread_mutex_unlock(&m));
@ -2443,7 +2457,11 @@ static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
timespec ts;
clock_gettime(clock, &ts);
const int64_t start_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
// add a second to get deadline.
ts.tv_sec += 1;
ASSERT_EQ(0, lock_function(&m.lock, &ts));
struct ThreadArgs {
@ -2472,6 +2490,12 @@ static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
void* result;
ASSERT_EQ(0, pthread_join(thread, &result));
ASSERT_EQ(ETIMEDOUT, reinterpret_cast<intptr_t>(result));
// The timedlock must have waited at least 1 second before returning.
clock_gettime(clock, &ts);
const int64_t end_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
ASSERT_GT(end_ns - start_ns, NS_PER_S);
ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
}