am 6304d8b2
: Use private futexes for semaphores, unless they are initialized with pshared != 0.
This commit is contained in:
commit
7d8574cefe
4 changed files with 147 additions and 56 deletions
|
@ -43,7 +43,7 @@
|
|||
#include <memory.h>
|
||||
#include <assert.h>
|
||||
#include <malloc.h>
|
||||
#include <linux/futex.h>
|
||||
#include <bionic_futex.h>
|
||||
#include <cutils/atomic-inline.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/stat.h>
|
||||
|
@ -54,6 +54,16 @@ extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCo
|
|||
extern void _exit_thread(int retCode);
|
||||
extern int __set_errno(int);
|
||||
|
||||
int __futex_wake_ex(volatile void *ftx, int pshared, int val)
|
||||
{
|
||||
return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
|
||||
}
|
||||
|
||||
int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout)
|
||||
{
|
||||
return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
|
||||
}
|
||||
|
||||
#define __likely(cond) __builtin_expect(!!(cond), 1)
|
||||
#define __unlikely(cond) __builtin_expect(!!(cond), 0)
|
||||
|
||||
|
@ -716,24 +726,6 @@ int pthread_setschedparam(pthread_t thid, int policy,
|
|||
}
|
||||
|
||||
|
||||
int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
|
||||
int __futex_wake(volatile void *ftx, int count);
|
||||
|
||||
int __futex_syscall3(volatile void *ftx, int op, int val);
|
||||
int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout);
|
||||
|
||||
#ifndef FUTEX_PRIVATE_FLAG
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
#endif
|
||||
|
||||
#ifndef FUTEX_WAIT_PRIVATE
|
||||
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
|
||||
#endif
|
||||
|
||||
#ifndef FUTEX_WAKE_PRIVATE
|
||||
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
|
||||
#endif
|
||||
|
||||
// mutex lock states
|
||||
//
|
||||
// 0: unlocked
|
||||
|
@ -935,10 +927,8 @@ _normal_lock(pthread_mutex_t* mutex)
|
|||
* that the mutex is in state 2 when we go to sleep on it, which
|
||||
* guarantees a wake-up call.
|
||||
*/
|
||||
int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
|
||||
|
||||
while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
|
||||
__futex_syscall4(&mutex->value, wait_op, shared|2, 0);
|
||||
__futex_wait_ex(&mutex->value, shared, shared|2, 0);
|
||||
}
|
||||
ANDROID_MEMBAR_FULL();
|
||||
}
|
||||
|
@ -961,7 +951,6 @@ _normal_unlock(pthread_mutex_t* mutex)
|
|||
* if it wasn't 1 we have to do some additional work.
|
||||
*/
|
||||
if (__atomic_dec(&mutex->value) != (shared|1)) {
|
||||
int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
|
||||
/*
|
||||
* Start by releasing the lock. The decrement changed it from
|
||||
* "contended lock" to "uncontended lock", which means we still
|
||||
|
@ -999,7 +988,7 @@ _normal_unlock(pthread_mutex_t* mutex)
|
|||
* Either way we have correct behavior and nobody is orphaned on
|
||||
* the wait queue.
|
||||
*/
|
||||
__futex_syscall3(&mutex->value, wake_op, 1);
|
||||
__futex_wake_ex(&mutex->value, shared, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1019,7 +1008,7 @@ _recursive_unlock(void)
|
|||
|
||||
int pthread_mutex_lock(pthread_mutex_t *mutex)
|
||||
{
|
||||
int mtype, tid, new_lock_type, shared, wait_op;
|
||||
int mtype, tid, new_lock_type, shared;
|
||||
|
||||
if (__unlikely(mutex == NULL))
|
||||
return EINVAL;
|
||||
|
@ -1064,8 +1053,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
|
|||
new_lock_type = 1;
|
||||
|
||||
/* compute futex wait opcode and restore shared flag in mtype */
|
||||
wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
|
||||
mtype |= shared;
|
||||
mtype |= shared;
|
||||
|
||||
for (;;) {
|
||||
int oldv;
|
||||
|
@ -1091,7 +1079,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
|
|||
*/
|
||||
new_lock_type = 2;
|
||||
|
||||
__futex_syscall4(&mutex->value, wait_op, oldv, NULL);
|
||||
__futex_wait_ex(&mutex->value, shared, oldv, NULL);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1131,8 +1119,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
|
|||
|
||||
/* Wake one waiting thread, if any */
|
||||
if ((oldv & 3) == 2) {
|
||||
int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
|
||||
__futex_syscall3(&mutex->value, wake_op, 1);
|
||||
__futex_wake_ex(&mutex->value, shared, 1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1234,7 +1221,7 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
|
|||
clockid_t clock = CLOCK_MONOTONIC;
|
||||
struct timespec abstime;
|
||||
struct timespec ts;
|
||||
int mtype, tid, oldv, new_lock_type, shared, wait_op;
|
||||
int mtype, tid, oldv, new_lock_type, shared;
|
||||
|
||||
/* compute absolute expiration time */
|
||||
__timespec_to_relative_msec(&abstime, msecs, clock);
|
||||
|
@ -1248,8 +1235,6 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
|
|||
/* Handle common case first */
|
||||
if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
|
||||
{
|
||||
int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
|
||||
|
||||
/* fast path for uncontended lock */
|
||||
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
|
@ -1261,7 +1246,7 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
|
|||
if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
|
||||
return EBUSY;
|
||||
|
||||
__futex_syscall4(&mutex->value, wait_op, shared|2, &ts);
|
||||
__futex_wait_ex(&mutex->value, shared, shared|2, &ts);
|
||||
}
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
|
@ -1294,7 +1279,6 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
|
|||
new_lock_type = 1;
|
||||
|
||||
/* Compute wait op and restore sharing bit in mtype */
|
||||
wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
|
||||
mtype |= shared;
|
||||
|
||||
for (;;) {
|
||||
|
@ -1325,7 +1309,7 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
|
|||
if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
|
||||
return EBUSY;
|
||||
|
||||
__futex_syscall4(&mutex->value, wait_op, oldv, &ts);
|
||||
__futex_wait_ex(&mutex->value, shared, oldv, &ts);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1418,7 +1402,6 @@ static int
|
|||
__pthread_cond_pulse(pthread_cond_t *cond, int counter)
|
||||
{
|
||||
long flags;
|
||||
int wake_op;
|
||||
|
||||
if (__unlikely(cond == NULL))
|
||||
return EINVAL;
|
||||
|
@ -1432,8 +1415,7 @@ __pthread_cond_pulse(pthread_cond_t *cond, int counter)
|
|||
break;
|
||||
}
|
||||
|
||||
wake_op = COND_IS_SHARED(cond) ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
|
||||
__futex_syscall3(&cond->value, wake_op, counter);
|
||||
__futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1458,10 +1440,9 @@ int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
|
|||
{
|
||||
int status;
|
||||
int oldvalue = cond->value;
|
||||
int wait_op = COND_IS_SHARED(cond) ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
|
||||
|
||||
pthread_mutex_unlock(mutex);
|
||||
status = __futex_syscall4(&cond->value, wait_op, oldvalue, reltime);
|
||||
status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime);
|
||||
pthread_mutex_lock(mutex);
|
||||
|
||||
if (status == (-ETIMEDOUT)) return ETIMEDOUT;
|
||||
|
|
|
@ -31,6 +31,16 @@
|
|||
#include <sys/atomics.h>
|
||||
#include <time.h>
|
||||
#include <cutils/atomic-inline.h>
|
||||
#include <bionic_futex.h>
|
||||
|
||||
/* Use the lower 31-bits for the counter, and the high bit for
|
||||
* the shared flag.
|
||||
*/
|
||||
#define SEM_VALUE_MASK 0x7fffffff
|
||||
#define SEM_SHARED_MASK 0x80000000
|
||||
|
||||
#define SEM_GET_SHARED(sem) ((sem)->count & SEM_SHARED_MASK)
|
||||
#define SEM_GET_VALUE(sem) ((sem)->count & SEM_VALUE_MASK)
|
||||
|
||||
int sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
{
|
||||
|
@ -39,12 +49,16 @@ int sem_init(sem_t *sem, int pshared, unsigned int value)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (pshared != 0) {
|
||||
errno = ENOSYS;
|
||||
/* ensure that 'value' can be stored in the semaphore */
|
||||
if ((value & SEM_VALUE_MASK) != value) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
sem->count = value;
|
||||
if (pshared != 0)
|
||||
sem->count |= SEM_SHARED_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -55,10 +69,11 @@ int sem_destroy(sem_t *sem)
|
|||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
if (sem->count == 0) {
|
||||
if ((sem->count & SEM_VALUE_MASK) == 0) {
|
||||
errno = EBUSY;
|
||||
return -1;
|
||||
}
|
||||
sem->count = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -91,32 +106,60 @@ int sem_unlink(const char * name)
|
|||
}
|
||||
|
||||
|
||||
/* Return 0 if a semaphore's value is 0
|
||||
* Otherwise, decrement the value and return the old value.
|
||||
*/
|
||||
static int
|
||||
__atomic_dec_if_positive( volatile unsigned int* pvalue )
|
||||
__sem_dec_if_positive(volatile unsigned int *pvalue)
|
||||
{
|
||||
unsigned int shared = (*pvalue & SEM_SHARED_MASK);
|
||||
unsigned int old;
|
||||
|
||||
do {
|
||||
old = *pvalue;
|
||||
old = (*pvalue & SEM_VALUE_MASK);
|
||||
}
|
||||
while ( old != 0 && __atomic_cmpxchg( (int)old, (int)old-1, (volatile int*)pvalue ) != 0 );
|
||||
while ( old != 0 &&
|
||||
__atomic_cmpxchg((int)(old|shared),
|
||||
(int)((old-1)|shared),
|
||||
(volatile int*)pvalue) != 0 );
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
/* Increment the value of a semaphore atomically.
|
||||
* NOTE: the value will wrap above SEM_VALUE_MASK
|
||||
*/
|
||||
static int
|
||||
__sem_inc(volatile unsigned int *pvalue)
|
||||
{
|
||||
unsigned int shared = (*pvalue & SEM_SHARED_MASK);
|
||||
unsigned int old;
|
||||
|
||||
do {
|
||||
old = (*pvalue & SEM_VALUE_MASK);
|
||||
} while ( __atomic_cmpxchg((int)(old|shared),
|
||||
(int)(((old+1)&SEM_VALUE_MASK)|shared),
|
||||
(volatile int*)pvalue) != 0);
|
||||
return old;
|
||||
}
|
||||
|
||||
/* lock a semaphore */
|
||||
int sem_wait(sem_t *sem)
|
||||
{
|
||||
unsigned shared;
|
||||
|
||||
if (sem == NULL) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
shared = SEM_GET_SHARED(sem);
|
||||
|
||||
for (;;) {
|
||||
if (__atomic_dec_if_positive(&sem->count))
|
||||
if (__sem_dec_if_positive(&sem->count))
|
||||
break;
|
||||
|
||||
__futex_wait(&sem->count, 0, 0);
|
||||
__futex_wait_ex(&sem->count, shared, shared, NULL);
|
||||
}
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
|
@ -125,6 +168,7 @@ int sem_wait(sem_t *sem)
|
|||
int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
|
||||
{
|
||||
int ret;
|
||||
unsigned int shared;
|
||||
|
||||
if (sem == NULL) {
|
||||
errno = EINVAL;
|
||||
|
@ -133,7 +177,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
|
|||
|
||||
/* POSIX says we need to try to decrement the semaphore
|
||||
* before checking the timeout value */
|
||||
if (__atomic_dec_if_positive(&sem->count)) {
|
||||
if (__sem_dec_if_positive(&sem->count)) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
@ -148,6 +192,8 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
|
|||
return -1;
|
||||
}
|
||||
|
||||
shared = SEM_GET_SHARED(sem);
|
||||
|
||||
for (;;) {
|
||||
struct timespec ts;
|
||||
int ret;
|
||||
|
@ -166,7 +212,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
|
|||
return -1;
|
||||
}
|
||||
|
||||
ret = __futex_wait(&sem->count, 0, &ts);
|
||||
ret = __futex_wait_ex(&sem->count, shared, shared, &ts);
|
||||
|
||||
/* return in case of timeout or interrupt */
|
||||
if (ret == -ETIMEDOUT || ret == -EINTR) {
|
||||
|
@ -174,7 +220,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (__atomic_dec_if_positive(&sem->count)) {
|
||||
if (__sem_dec_if_positive(&sem->count)) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
break;
|
||||
}
|
||||
|
@ -185,12 +231,16 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
|
|||
/* unlock a semaphore */
|
||||
int sem_post(sem_t *sem)
|
||||
{
|
||||
unsigned int shared;
|
||||
|
||||
if (sem == NULL)
|
||||
return EINVAL;
|
||||
|
||||
shared = SEM_GET_SHARED(sem);
|
||||
|
||||
ANDROID_MEMBAR_FULL();
|
||||
if (__atomic_inc((volatile int*)&sem->count) >= 0)
|
||||
__futex_wake(&sem->count, 1);
|
||||
if (__sem_inc(&sem->count) >= 0)
|
||||
__futex_wake_ex(&sem->count, shared, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -202,7 +252,7 @@ int sem_trywait(sem_t *sem)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (__atomic_dec_if_positive(&sem->count) > 0) {
|
||||
if (__sem_dec_if_positive(&sem->count) > 0) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -218,6 +268,6 @@ int sem_getvalue(sem_t *sem, int *sval)
|
|||
return -1;
|
||||
}
|
||||
|
||||
*sval = sem->count;
|
||||
*sval = SEM_GET_VALUE(sem);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -5,6 +5,9 @@ Differences between current and Android 2.2:
|
|||
|
||||
- <pthread.h>: Add reader/writer locks implementation.
|
||||
|
||||
- <semaphore.h>: Use private futexes for semaphore implementation,
|
||||
unless your set 'pshared' to non-0 when calling sem_init().
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
Differences between Android 2.2. and Android 2.1:
|
||||
|
||||
|
|
57
libc/private/bionic_futex.h
Normal file
57
libc/private/bionic_futex.h
Normal file
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright (C) 2008 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _BIONIC_FUTEX_H
|
||||
#define _BIONIC_FUTEX_H
|
||||
|
||||
#include <linux/futex.h>
|
||||
|
||||
extern int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
|
||||
extern int __futex_wake(volatile void *ftx, int count);
|
||||
|
||||
extern int __futex_syscall3(volatile void *ftx, int op, int val);
|
||||
extern int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout);
|
||||
|
||||
#ifndef FUTEX_PRIVATE_FLAG
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
#endif
|
||||
|
||||
#ifndef FUTEX_WAIT_PRIVATE
|
||||
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
|
||||
#endif
|
||||
|
||||
#ifndef FUTEX_WAKE_PRIVATE
|
||||
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
|
||||
#endif
|
||||
|
||||
/* Like __futex_wait/wake, but take an additionnal 'pshared' argument.
|
||||
* when non-0, this will use normal futexes. Otherwise, private futexes.
|
||||
*/
|
||||
extern int __futex_wake_ex(volatile void *ftx, int pshared, int val);
|
||||
extern int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout);
|
||||
|
||||
#endif /* _BIONIC_FUTEX_H */
|
Loading…
Reference in a new issue