Merge "Clean up __builtin_expect usage."
This commit is contained in:
commit
9562d38df1
4 changed files with 26 additions and 48 deletions
|
@ -53,9 +53,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define __likely(cond) __builtin_expect(!!(cond), 1)
|
||||
#define __unlikely(cond) __builtin_expect(!!(cond), 0)
|
||||
|
||||
#define RWLOCKATTR_DEFAULT 0
|
||||
#define RWLOCKATTR_SHARED_MASK 0x0010
|
||||
|
||||
|
@ -212,7 +209,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
|
|||
return EINVAL;
|
||||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
if (__unlikely(!read_precondition(rwlock, __get_thread()->tid)))
|
||||
if (__predict_false(!read_precondition(rwlock, __get_thread()->tid)))
|
||||
ret = EBUSY;
|
||||
else
|
||||
rwlock->numLocks ++;
|
||||
|
@ -230,7 +227,7 @@ int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *
|
|||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
int tid = __get_thread()->tid;
|
||||
if (__unlikely(!read_precondition(rwlock, tid))) {
|
||||
if (__predict_false(!read_precondition(rwlock, tid))) {
|
||||
rwlock->pendingReaders += 1;
|
||||
do {
|
||||
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
|
||||
|
@ -260,7 +257,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
|
|||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
int tid = __get_thread()->tid;
|
||||
if (__unlikely(!write_precondition(rwlock, tid))) {
|
||||
if (__predict_false(!write_precondition(rwlock, tid))) {
|
||||
ret = EBUSY;
|
||||
} else {
|
||||
rwlock->numLocks ++;
|
||||
|
@ -279,7 +276,7 @@ int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *
|
|||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
int tid = __get_thread()->tid;
|
||||
if (__unlikely(!write_precondition(rwlock, tid))) {
|
||||
if (__predict_false(!write_precondition(rwlock, tid))) {
|
||||
/* If we can't read yet, wait until the rwlock is unlocked
|
||||
* and try again. Increment pendingReaders to get the
|
||||
* cond broadcast when that happens.
|
||||
|
|
|
@ -81,9 +81,6 @@
|
|||
/* the maximum value of overrun counters */
|
||||
#define DELAYTIMER_MAX 0x7fffffff
|
||||
|
||||
#define __likely(x) __builtin_expect(!!(x),1)
|
||||
#define __unlikely(x) __builtin_expect(!!(x),0)
|
||||
|
||||
typedef struct thr_timer thr_timer_t;
|
||||
typedef struct thr_timer_table thr_timer_table_t;
|
||||
|
||||
|
@ -294,7 +291,7 @@ static void* timer_thread_start(void*);
|
|||
|
||||
int timer_create(clockid_t clock_id, struct sigevent* evp, timer_t* timer_id) {
|
||||
// If not a SIGEV_THREAD timer, the kernel can handle it without our help.
|
||||
if (__likely(evp == NULL || evp->sigev_notify != SIGEV_THREAD)) {
|
||||
if (__predict_true(evp == NULL || evp->sigev_notify != SIGEV_THREAD)) {
|
||||
return __timer_create(clock_id, evp, timer_id);
|
||||
}
|
||||
|
||||
|
@ -360,7 +357,7 @@ int timer_create(clockid_t clock_id, struct sigevent* evp, timer_t* timer_id) {
|
|||
int
|
||||
timer_delete( timer_t id )
|
||||
{
|
||||
if ( __likely(!TIMER_ID_IS_WRAPPED(id)) )
|
||||
if ( __predict_true(!TIMER_ID_IS_WRAPPED(id)) )
|
||||
return __timer_delete( id );
|
||||
else
|
||||
{
|
||||
|
@ -422,7 +419,7 @@ timer_gettime( timer_t id, struct itimerspec* ospec )
|
|||
return -1;
|
||||
}
|
||||
|
||||
if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) {
|
||||
if ( __predict_true(!TIMER_ID_IS_WRAPPED(id)) ) {
|
||||
return __timer_gettime( id, ospec );
|
||||
} else {
|
||||
thr_timer_t* timer = thr_timer_from_id(id);
|
||||
|
@ -450,7 +447,7 @@ timer_settime( timer_t id,
|
|||
return -1;
|
||||
}
|
||||
|
||||
if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) {
|
||||
if ( __predict_true(!TIMER_ID_IS_WRAPPED(id)) ) {
|
||||
return __timer_settime( id, flags, spec, ospec );
|
||||
} else {
|
||||
thr_timer_t* timer = thr_timer_from_id(id);
|
||||
|
@ -494,7 +491,7 @@ timer_settime( timer_t id,
|
|||
int
|
||||
timer_getoverrun(timer_t id)
|
||||
{
|
||||
if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) {
|
||||
if ( __predict_true(!TIMER_ID_IS_WRAPPED(id)) ) {
|
||||
return __timer_getoverrun( id );
|
||||
} else {
|
||||
thr_timer_t* timer = thr_timer_from_id(id);
|
||||
|
|
|
@ -56,19 +56,6 @@ int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct time
|
|||
return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
|
||||
}
|
||||
|
||||
#define __likely(cond) __builtin_expect(!!(cond), 1)
|
||||
#define __unlikely(cond) __builtin_expect(!!(cond), 0)
|
||||
|
||||
void*
|
||||
__get_stack_base(int *p_stack_size)
|
||||
{
|
||||
pthread_internal_t* thread = __get_thread();
|
||||
|
||||
*p_stack_size = thread->attr.stack_size;
|
||||
return thread->attr.stack_base;
|
||||
}
|
||||
|
||||
|
||||
/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
|
||||
* and thread cancelation
|
||||
*/
|
||||
|
@ -399,7 +386,7 @@ int pthread_mutex_init(pthread_mutex_t *mutex,
|
|||
if (mutex == NULL)
|
||||
return EINVAL;
|
||||
|
||||
if (__likely(attr == NULL)) {
|
||||
if (__predict_true(attr == NULL)) {
|
||||
mutex->value = MUTEX_TYPE_BITS_NORMAL;
|
||||
return 0;
|
||||
}
|
||||
|
@ -565,7 +552,7 @@ _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype)
|
|||
for (;;) {
|
||||
/* increment counter, overflow was already checked */
|
||||
int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
|
||||
if (__likely(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
|
||||
if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
|
||||
/* mutex is still locked, not need for a memory barrier */
|
||||
return 0;
|
||||
}
|
||||
|
@ -582,7 +569,7 @@ int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
|
|||
{
|
||||
int mvalue, mtype, tid, shared;
|
||||
|
||||
if (__unlikely(mutex == NULL))
|
||||
if (__predict_false(mutex == NULL))
|
||||
return EINVAL;
|
||||
|
||||
mvalue = mutex->value;
|
||||
|
@ -590,7 +577,7 @@ int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
|
|||
shared = (mvalue & MUTEX_SHARED_MASK);
|
||||
|
||||
/* Handle normal case first */
|
||||
if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
|
||||
if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
|
||||
_normal_lock(mutex, shared);
|
||||
return 0;
|
||||
}
|
||||
|
@ -631,7 +618,7 @@ int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
|
|||
* implement it to get rid of the explicit memory
|
||||
* barrier below.
|
||||
*/
|
||||
if (__unlikely(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
|
||||
if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
|
||||
mvalue = mutex->value;
|
||||
continue;
|
||||
}
|
||||
|
@ -643,7 +630,7 @@ int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
|
|||
* we will change it to 2 to indicate contention. */
|
||||
if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
|
||||
newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
|
||||
if (__unlikely(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
|
||||
if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
|
||||
mvalue = mutex->value;
|
||||
continue;
|
||||
}
|
||||
|
@ -676,7 +663,7 @@ int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
|
|||
{
|
||||
int mvalue, mtype, tid, shared;
|
||||
|
||||
if (__unlikely(mutex == NULL))
|
||||
if (__predict_false(mutex == NULL))
|
||||
return EINVAL;
|
||||
|
||||
mvalue = mutex->value;
|
||||
|
@ -684,7 +671,7 @@ int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
|
|||
shared = (mvalue & MUTEX_SHARED_MASK);
|
||||
|
||||
/* Handle common case first */
|
||||
if (__likely(mtype == MUTEX_TYPE_BITS_NORMAL)) {
|
||||
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
|
||||
_normal_unlock(mutex, shared);
|
||||
return 0;
|
||||
}
|
||||
|
@ -701,7 +688,7 @@ int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
|
|||
if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
|
||||
for (;;) {
|
||||
int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
|
||||
if (__likely(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
|
||||
if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
|
||||
/* success: we still own the mutex, so no memory barrier */
|
||||
return 0;
|
||||
}
|
||||
|
@ -743,7 +730,7 @@ int pthread_mutex_trylock_impl(pthread_mutex_t *mutex)
|
|||
{
|
||||
int mvalue, mtype, tid, shared;
|
||||
|
||||
if (__unlikely(mutex == NULL))
|
||||
if (__predict_false(mutex == NULL))
|
||||
return EINVAL;
|
||||
|
||||
mvalue = mutex->value;
|
||||
|
@ -751,7 +738,7 @@ int pthread_mutex_trylock_impl(pthread_mutex_t *mutex)
|
|||
shared = (mvalue & MUTEX_SHARED_MASK);
|
||||
|
||||
/* Handle common case first */
|
||||
if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) )
|
||||
if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) )
|
||||
{
|
||||
if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED,
|
||||
shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
|
||||
|
@ -775,7 +762,7 @@ int pthread_mutex_trylock_impl(pthread_mutex_t *mutex)
|
|||
mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||
mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
||||
|
||||
if (__likely(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
|
||||
if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
@ -841,7 +828,7 @@ int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
|
|||
/* compute absolute expiration time */
|
||||
__timespec_to_relative_msec(&abstime, msecs, clock);
|
||||
|
||||
if (__unlikely(mutex == NULL))
|
||||
if (__predict_false(mutex == NULL))
|
||||
return EINVAL;
|
||||
|
||||
mvalue = mutex->value;
|
||||
|
@ -849,7 +836,7 @@ int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
|
|||
shared = (mvalue & MUTEX_SHARED_MASK);
|
||||
|
||||
/* Handle common case first */
|
||||
if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) )
|
||||
if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) )
|
||||
{
|
||||
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||
const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
||||
|
@ -886,7 +873,7 @@ int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
|
|||
/* first try a quick lock */
|
||||
if (mvalue == mtype) {
|
||||
mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
||||
if (__likely(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
|
||||
if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
@ -1063,7 +1050,7 @@ __pthread_cond_pulse(pthread_cond_t *cond, int counter)
|
|||
{
|
||||
long flags;
|
||||
|
||||
if (__unlikely(cond == NULL))
|
||||
if (__predict_false(cond == NULL))
|
||||
return EINVAL;
|
||||
|
||||
flags = (cond->value & ~COND_COUNTER_MASK);
|
||||
|
@ -1204,7 +1191,7 @@ int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) )
|
|||
* stores performed by the initialization function are observable on
|
||||
* this CPU after we exit.
|
||||
*/
|
||||
if (__likely((*ocptr & ONCE_COMPLETED) != 0)) {
|
||||
if (__predict_true((*ocptr & ONCE_COMPLETED) != 0)) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -105,9 +105,6 @@ extern int __set_tls(void* ptr);
|
|||
#error unsupported architecture
|
||||
#endif
|
||||
|
||||
/* return the stack base and size, used by our malloc debugger */
|
||||
extern void* __get_stack_base(int* p_stack_size);
|
||||
|
||||
__END_DECLS
|
||||
|
||||
#if defined(__cplusplus)
|
||||
|
|
Loading…
Reference in a new issue