Merge "<pthread.h> fixes and pthread cleanup."

This commit is contained in:
Elliott Hughes 2013-10-31 20:36:48 +00:00 committed by Gerrit Code Review
commit 0360e3ab2d
20 changed files with 1006 additions and 959 deletions

View file

@ -133,7 +133,6 @@ libc_common_src_files := \
bionic/system_properties_compat.c \ bionic/system_properties_compat.c \
bionic/tcgetpgrp.c \ bionic/tcgetpgrp.c \
bionic/tcsetpgrp.c \ bionic/tcsetpgrp.c \
bionic/thread_atexit.c \
bionic/time64.c \ bionic/time64.c \
bionic/umount.c \ bionic/umount.c \
bionic/unlockpt.c \ bionic/unlockpt.c \
@ -242,18 +241,27 @@ libc_bionic_src_files := \
bionic/pause.cpp \ bionic/pause.cpp \
bionic/pipe.cpp \ bionic/pipe.cpp \
bionic/poll.cpp \ bionic/poll.cpp \
bionic/pthread_atfork.cpp \
bionic/pthread_attr.cpp \ bionic/pthread_attr.cpp \
bionic/pthread_cond.cpp \
bionic/pthread_create.cpp \
bionic/pthread_detach.cpp \ bionic/pthread_detach.cpp \
bionic/pthread_equal.cpp \ bionic/pthread_equal.cpp \
bionic/pthread_exit.cpp \
bionic/pthread_getcpuclockid.cpp \ bionic/pthread_getcpuclockid.cpp \
bionic/pthread_getschedparam.cpp \ bionic/pthread_getschedparam.cpp \
bionic/pthread_internals.cpp \ bionic/pthread_internals.cpp \
bionic/pthread_join.cpp \ bionic/pthread_join.cpp \
bionic/pthread_key.cpp \
bionic/pthread_kill.cpp \ bionic/pthread_kill.cpp \
bionic/pthread_mutex.cpp \
bionic/pthread_once.cpp \
bionic/pthread_rwlock.cpp \
bionic/pthread_self.cpp \ bionic/pthread_self.cpp \
bionic/pthread_setname_np.cpp \ bionic/pthread_setname_np.cpp \
bionic/pthread_setschedparam.cpp \ bionic/pthread_setschedparam.cpp \
bionic/pthread_sigmask.cpp \ bionic/pthread_sigmask.cpp \
bionic/ptrace.cpp \
bionic/raise.cpp \ bionic/raise.cpp \
bionic/readlink.cpp \ bionic/readlink.cpp \
bionic/rename.cpp \ bionic/rename.cpp \
@ -285,8 +293,10 @@ libc_bionic_src_files := \
bionic/stubs.cpp \ bionic/stubs.cpp \
bionic/symlink.cpp \ bionic/symlink.cpp \
bionic/sysconf.cpp \ bionic/sysconf.cpp \
bionic/thread_atexit.cpp \
bionic/tdestroy.cpp \ bionic/tdestroy.cpp \
bionic/__thread_entry.cpp \ bionic/__thread_entry.cpp \
bionic/timer.cpp \
bionic/tmpfile.cpp \ bionic/tmpfile.cpp \
bionic/unlink.cpp \ bionic/unlink.cpp \
bionic/utimes.cpp \ bionic/utimes.cpp \
@ -414,17 +424,6 @@ libc_upstream_netbsd_src_files := \
upstream-netbsd/libc/string/strxfrm.c \ upstream-netbsd/libc/string/strxfrm.c \
upstream-netbsd/libc/unistd/killpg.c \ upstream-netbsd/libc/unistd/killpg.c \
libc_common_src_files += \
bionic/pthread-atfork.c \
bionic/pthread-rwlocks.c \
bionic/pthread-timers.c \
bionic/ptrace.c \
libc_static_common_src_files += \
bionic/pthread.c \
bionic/pthread_create.cpp \
bionic/pthread_key.cpp \
# Architecture specific source files go here # Architecture specific source files go here
# ========================================================= # =========================================================
ifeq ($(TARGET_ARCH),arm) ifeq ($(TARGET_ARCH),arm)

View file

@ -1,126 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdlib.h>
#include <errno.h>
#include <pthread.h>
#include <sys/queue.h>
static pthread_mutex_t handler_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
struct atfork_t
{
CIRCLEQ_ENTRY(atfork_t) entries;
void (*prepare)(void);
void (*child)(void);
void (*parent)(void);
};
static CIRCLEQ_HEAD(atfork_head_t, atfork_t) atfork_head = \
CIRCLEQ_HEAD_INITIALIZER(atfork_head);
void __bionic_atfork_run_prepare()
{
struct atfork_t *cursor;
/* We will lock this here, and unlock it in the parent and child functions.
* This ensures that nobody can modify the handler array between the calls
* to the prepare and parent/child handlers.
*
* TODO: If a handler mucks with the list, it could cause problems. Right
* now it's ok because all they can do is add new items to the end
* of the list, but if/when we implement cleanup in dlclose() things
* will get more interesting...
*/
pthread_mutex_lock(&handler_mutex);
/* Call pthread_atfork() prepare handlers. Posix states that the prepare
* handlers should be called in the reverse order of the parent/child
* handlers, so we iterate backwards.
*/
for (cursor = atfork_head.cqh_last;
cursor != (void*)&atfork_head;
cursor = cursor->entries.cqe_prev) {
if (cursor->prepare != NULL) {
cursor->prepare();
}
}
}
void __bionic_atfork_run_child()
{
struct atfork_t *cursor;
pthread_mutexattr_t attr;
/* Call pthread_atfork() child handlers */
for (cursor = atfork_head.cqh_first;
cursor != (void*)&atfork_head;
cursor = cursor->entries.cqe_next) {
if (cursor->child != NULL) {
cursor->child();
}
}
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&handler_mutex, &attr);
}
void __bionic_atfork_run_parent()
{
struct atfork_t *cursor;
/* Call pthread_atfork() parent handlers */
for (cursor = atfork_head.cqh_first;
cursor != (void*)&atfork_head;
cursor = cursor->entries.cqe_next) {
if (cursor->parent != NULL) {
cursor->parent();
}
}
pthread_mutex_unlock(&handler_mutex);
}
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void))
{
struct atfork_t *entry = malloc(sizeof(struct atfork_t));
if (entry == NULL) {
return ENOMEM;
}
entry->prepare = prepare;
entry->parent = parent;
entry->child = child;
pthread_mutex_lock(&handler_mutex);
CIRCLEQ_INSERT_TAIL(&atfork_head, entry, entries);
pthread_mutex_unlock(&handler_mutex);
return 0;
}

View file

@ -0,0 +1,120 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <pthread.h>
static pthread_mutex_t gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
struct atfork_t {
atfork_t* next;
atfork_t* prev;
void (*prepare)(void);
void (*child)(void);
void (*parent)(void);
};
struct atfork_list_t {
atfork_t* first;
atfork_t* last;
};
static atfork_list_t gAtForkList = { NULL, NULL };
void __bionic_atfork_run_prepare() {
// We will lock this here, and unlock it in the parent and child functions.
// This ensures that nobody can modify the handler array between the calls
// to the prepare and parent/child handlers.
//
// TODO: If a handler mucks with the list, it could cause problems. Right
// now it's ok because all they can do is add new items to the end
// of the list, but if/when we implement cleanup in dlclose() things
// will get more interesting...
pthread_mutex_lock(&gAtForkListMutex);
// Call pthread_atfork() prepare handlers. POSIX states that the prepare
// handlers should be called in the reverse order of the parent/child
// handlers, so we iterate backwards.
for (atfork_t* it = gAtForkList.last; it != NULL; it = it->prev) {
if (it->prepare != NULL) {
it->prepare();
}
}
}
void __bionic_atfork_run_child() {
for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) {
if (it->child != NULL) {
it->child();
}
}
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&gAtForkListMutex, &attr);
}
void __bionic_atfork_run_parent() {
for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) {
if (it->parent != NULL) {
it->parent();
}
}
pthread_mutex_unlock(&gAtForkListMutex);
}
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) {
atfork_t* entry = reinterpret_cast<atfork_t*>(malloc(sizeof(atfork_t)));
if (entry == NULL) {
return ENOMEM;
}
entry->prepare = prepare;
entry->parent = parent;
entry->child = child;
pthread_mutex_lock(&gAtForkListMutex);
// Append 'entry' to the list.
entry->next = NULL;
entry->prev = gAtForkList.last;
if (entry->prev != NULL) {
entry->prev->next = entry;
}
if (gAtForkList.first == NULL) {
gAtForkList.first = entry;
}
gAtForkList.last = entry;
pthread_mutex_unlock(&gAtForkListMutex);
return 0;
}

View file

@ -56,7 +56,7 @@ int pthread_attr_setdetachstate(pthread_attr_t* attr, int state) {
return 0; return 0;
} }
int pthread_attr_getdetachstate(pthread_attr_t const* attr, int* state) { int pthread_attr_getdetachstate(const pthread_attr_t* attr, int* state) {
*state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED) ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE; *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED) ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE;
return 0; return 0;
} }
@ -66,17 +66,17 @@ int pthread_attr_setschedpolicy(pthread_attr_t* attr, int policy) {
return 0; return 0;
} }
int pthread_attr_getschedpolicy(pthread_attr_t const* attr, int* policy) { int pthread_attr_getschedpolicy(const pthread_attr_t* attr, int* policy) {
*policy = attr->sched_policy; *policy = attr->sched_policy;
return 0; return 0;
} }
int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const* param) { int pthread_attr_setschedparam(pthread_attr_t* attr, const sched_param* param) {
attr->sched_priority = param->sched_priority; attr->sched_priority = param->sched_priority;
return 0; return 0;
} }
int pthread_attr_getschedparam(pthread_attr_t const* attr, struct sched_param* param) { int pthread_attr_getschedparam(const pthread_attr_t* attr, sched_param* param) {
param->sched_priority = attr->sched_priority; param->sched_priority = attr->sched_priority;
return 0; return 0;
} }
@ -89,7 +89,7 @@ int pthread_attr_setstacksize(pthread_attr_t* attr, size_t stack_size) {
return 0; return 0;
} }
int pthread_attr_getstacksize(pthread_attr_t const* attr, size_t* stack_size) { int pthread_attr_getstacksize(const pthread_attr_t* attr, size_t* stack_size) {
*stack_size = attr->stack_size; *stack_size = attr->stack_size;
return 0; return 0;
} }
@ -100,7 +100,7 @@ int pthread_attr_setstackaddr(pthread_attr_t*, void*) {
return ENOSYS; return ENOSYS;
} }
int pthread_attr_getstackaddr(pthread_attr_t const* attr, void** stack_addr) { int pthread_attr_getstackaddr(const pthread_attr_t* attr, void** stack_addr) {
// This was removed from POSIX.1-2008. // This was removed from POSIX.1-2008.
// Needed for ABI compatibility with the NDK. // Needed for ABI compatibility with the NDK.
*stack_addr = (char*)attr->stack_base + attr->stack_size; *stack_addr = (char*)attr->stack_base + attr->stack_size;
@ -119,7 +119,7 @@ int pthread_attr_setstack(pthread_attr_t* attr, void* stack_base, size_t stack_s
return 0; return 0;
} }
int pthread_attr_getstack(pthread_attr_t const* attr, void** stack_base, size_t* stack_size) { int pthread_attr_getstack(const pthread_attr_t* attr, void** stack_base, size_t* stack_size) {
*stack_base = attr->stack_base; *stack_base = attr->stack_base;
*stack_size = attr->stack_size; *stack_size = attr->stack_size;
return 0; return 0;
@ -130,7 +130,7 @@ int pthread_attr_setguardsize(pthread_attr_t* attr, size_t guard_size) {
return 0; return 0;
} }
int pthread_attr_getguardsize(pthread_attr_t const* attr, size_t* guard_size) { int pthread_attr_getguardsize(const pthread_attr_t* attr, size_t* guard_size) {
*guard_size = attr->guard_size; *guard_size = attr->guard_size;
return 0; return 0;
} }
@ -141,7 +141,7 @@ int pthread_getattr_np(pthread_t thid, pthread_attr_t* attr) {
return 0; return 0;
} }
int pthread_attr_setscope(pthread_attr_t* , int scope) { int pthread_attr_setscope(pthread_attr_t*, int scope) {
if (scope == PTHREAD_SCOPE_SYSTEM) { if (scope == PTHREAD_SCOPE_SYSTEM) {
return 0; return 0;
} }
@ -151,6 +151,7 @@ int pthread_attr_setscope(pthread_attr_t* , int scope) {
return EINVAL; return EINVAL;
} }
int pthread_attr_getscope(pthread_attr_t const*) { int pthread_attr_getscope(const pthread_attr_t*, int* scope) {
return PTHREAD_SCOPE_SYSTEM; *scope = PTHREAD_SCOPE_SYSTEM;
return 0;
} }

View file

@ -0,0 +1,214 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <pthread.h>
#include <errno.h>
#include <limits.h>
#include <sys/atomics.h>
#include <sys/mman.h>
#include <unistd.h>
#include "pthread_internal.h"
#include "private/bionic_atomic_inline.h"
#include "private/bionic_futex.h"
#include "private/bionic_pthread.h"
#include "private/bionic_time_conversions.h"
#include "private/bionic_tls.h"
#include "private/thread_private.h"
int pthread_condattr_init(pthread_condattr_t* attr) {
if (attr == NULL) {
return EINVAL;
}
*attr = PTHREAD_PROCESS_PRIVATE;
return 0;
}
int pthread_condattr_getpshared(const pthread_condattr_t* attr, int* pshared) {
if (attr == NULL || pshared == NULL) {
return EINVAL;
}
*pshared = *attr;
return 0;
}
int pthread_condattr_setpshared(pthread_condattr_t* attr, int pshared) {
if (attr == NULL) {
return EINVAL;
}
if (pshared != PTHREAD_PROCESS_SHARED && pshared != PTHREAD_PROCESS_PRIVATE) {
return EINVAL;
}
*attr = pshared;
return 0;
}
int pthread_condattr_destroy(pthread_condattr_t* attr) {
if (attr == NULL) {
return EINVAL;
}
*attr = 0xdeada11d;
return 0;
}
// We use one bit in condition variable values as the 'shared' flag
// The rest is a counter.
#define COND_SHARED_MASK 0x0001
#define COND_COUNTER_INCREMENT 0x0002
#define COND_COUNTER_MASK (~COND_SHARED_MASK)
#define COND_IS_SHARED(c) (((c)->value & COND_SHARED_MASK) != 0)
// XXX *technically* there is a race condition that could allow
// XXX a signal to be missed. If thread A is preempted in _wait()
// XXX after unlocking the mutex and before waiting, and if other
// XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
// XXX before thread A is scheduled again and calls futex_wait(),
// XXX then the signal will be lost.
int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr) {
if (cond == NULL) {
return EINVAL;
}
cond->value = 0;
if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED) {
cond->value |= COND_SHARED_MASK;
}
return 0;
}
int pthread_cond_destroy(pthread_cond_t* cond) {
if (cond == NULL) {
return EINVAL;
}
cond->value = 0xdeadc04d;
return 0;
}
// This function is used by pthread_cond_broadcast and
// pthread_cond_signal to atomically decrement the counter
// then wake up 'counter' threads.
static int __pthread_cond_pulse(pthread_cond_t* cond, int counter) {
if (__predict_false(cond == NULL)) {
return EINVAL;
}
long flags = (cond->value & ~COND_COUNTER_MASK);
while (true) {
long old_value = cond->value;
long new_value = ((old_value - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK) | flags;
if (__bionic_cmpxchg(old_value, new_value, &cond->value) == 0) {
break;
}
}
// Ensure that all memory accesses previously made by this thread are
// visible to the woken thread(s). On the other side, the "wait"
// code will issue any necessary barriers when locking the mutex.
//
// This may not strictly be necessary -- if the caller follows
// recommended practice and holds the mutex before signaling the cond
// var, the mutex ops will provide correct semantics. If they don't
// hold the mutex, they're subject to race conditions anyway.
ANDROID_MEMBAR_FULL();
__futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
return 0;
}
__LIBC_HIDDEN__
int __pthread_cond_timedwait_relative(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* reltime) {
int old_value = cond->value;
pthread_mutex_unlock(mutex);
int status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), old_value, reltime);
pthread_mutex_lock(mutex);
if (status == (-ETIMEDOUT)) {
return ETIMEDOUT;
}
return 0;
}
__LIBC_HIDDEN__
int __pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime, clockid_t clock) {
timespec ts;
timespec* tsp;
if (abstime != NULL) {
if (__timespec_to_absolute(&ts, abstime, clock) < 0) {
return ETIMEDOUT;
}
tsp = &ts;
} else {
tsp = NULL;
}
return __pthread_cond_timedwait_relative(cond, mutex, tsp);
}
int pthread_cond_broadcast(pthread_cond_t* cond) {
return __pthread_cond_pulse(cond, INT_MAX);
}
int pthread_cond_signal(pthread_cond_t* cond) {
return __pthread_cond_pulse(cond, 1);
}
int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex) {
return __pthread_cond_timedwait(cond, mutex, NULL, CLOCK_REALTIME);
}
int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t * mutex, const timespec *abstime) {
return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
}
// TODO: this exists only for backward binary compatibility.
int pthread_cond_timedwait_monotonic(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime) {
return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
}
int pthread_cond_timedwait_monotonic_np(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime) {
return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
}
int pthread_cond_timedwait_relative_np(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* reltime) {
return __pthread_cond_timedwait_relative(cond, mutex, reltime);
}
int pthread_cond_timeout_np(pthread_cond_t* cond, pthread_mutex_t* mutex, unsigned ms) {
timespec ts;
timespec_from_ms(ts, ms);
return __pthread_cond_timedwait_relative(cond, mutex, &ts);
}

View file

@ -82,13 +82,12 @@ void __init_tls(pthread_internal_t* thread) {
} }
} }
__LIBC_ABI_PRIVATE__
int _init_thread(pthread_internal_t* thread, bool add_to_thread_list) { int _init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
int error = 0; int error = 0;
// Set the scheduling policy/priority of the thread. // Set the scheduling policy/priority of the thread.
if (thread->attr.sched_policy != SCHED_NORMAL) { if (thread->attr.sched_policy != SCHED_NORMAL) {
struct sched_param param; sched_param param;
param.sched_priority = thread->attr.sched_priority; param.sched_priority = thread->attr.sched_priority;
if (sched_setscheduler(thread->tid, thread->attr.sched_policy, &param) == -1) { if (sched_setscheduler(thread->tid, thread->attr.sched_policy, &param) == -1) {
#if __LP64__ #if __LP64__

View file

@ -0,0 +1,135 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <pthread.h>
#include <stdlib.h>
#include <sys/mman.h>
#include "pthread_internal.h"
extern "C" void _exit_with_stack_teardown(void*, size_t, int);
extern "C" void __exit(int);
/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
* and thread cancelation
*/
void __pthread_cleanup_push(__pthread_cleanup_t* c, __pthread_cleanup_func_t routine, void* arg) {
pthread_internal_t* thread = __get_thread();
c->__cleanup_routine = routine;
c->__cleanup_arg = arg;
c->__cleanup_prev = thread->cleanup_stack;
thread->cleanup_stack = c;
}
void __pthread_cleanup_pop(__pthread_cleanup_t* c, int execute) {
pthread_internal_t* thread = __get_thread();
thread->cleanup_stack = c->__cleanup_prev;
if (execute) {
c->__cleanup_routine(c->__cleanup_arg);
}
}
void pthread_exit(void* retval) {
pthread_internal_t* thread = __get_thread();
// Call the cleanup handlers first.
while (thread->cleanup_stack) {
__pthread_cleanup_t* c = thread->cleanup_stack;
thread->cleanup_stack = c->__cleanup_prev;
c->__cleanup_routine(c->__cleanup_arg);
}
// Call the TLS destructors. It is important to do that before removing this
// thread from the global list. This will ensure that if someone else deletes
// a TLS key, the corresponding value will be set to NULL in this thread's TLS
// space (see pthread_key_delete).
pthread_key_clean_all();
if (thread->alternate_signal_stack != NULL) {
// Tell the kernel to stop using the alternate signal stack.
stack_t ss;
ss.ss_sp = NULL;
ss.ss_flags = SS_DISABLE;
sigaltstack(&ss, NULL);
// Free it.
munmap(thread->alternate_signal_stack, SIGSTKSZ);
thread->alternate_signal_stack = NULL;
}
// Keep track of what we need to know about the stack before we lose the pthread_internal_t.
void* stack_base = thread->attr.stack_base;
size_t stack_size = thread->attr.stack_size;
bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0);
// If the thread is detached, destroy the pthread_internal_t,
// otherwise keep it in memory and signal any joiners.
pthread_mutex_lock(&gThreadListLock);
if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
_pthread_internal_remove_locked(thread);
} else {
// Make sure that the pthread_internal_t doesn't have stale pointers to a stack that
// will be unmapped after the exit call below.
if (!user_allocated_stack) {
thread->attr.stack_base = NULL;
thread->attr.stack_size = 0;
thread->tls = NULL;
}
// Indicate that the thread has exited for joining threads.
thread->attr.flags |= PTHREAD_ATTR_FLAG_ZOMBIE;
thread->return_value = retval;
// Signal the joining thread if present.
if (thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) {
pthread_cond_signal(&thread->join_cond);
}
}
pthread_mutex_unlock(&gThreadListLock);
if (user_allocated_stack) {
// Cleaning up this thread's stack is the creator's responsibility, not ours.
__exit(0);
} else {
// We need to munmap the stack we're running on before calling exit.
// That's not something we can do in C.
// We don't want to take a signal after we've unmapped the stack.
// That's one last thing we can handle in C.
sigset_t mask;
sigfillset(&mask);
sigprocmask(SIG_SETMASK, &mask, NULL);
_exit_with_stack_teardown(stack_base, stack_size, 0);
}
/* NOTREACHED, but we told the compiler this function is noreturn, and it doesn't believe us. */
abort();
}

View file

@ -29,13 +29,8 @@
#define _PTHREAD_INTERNAL_H_ #define _PTHREAD_INTERNAL_H_
#include <pthread.h> #include <pthread.h>
#include <stdbool.h>
#include <sys/cdefs.h>
__BEGIN_DECLS struct pthread_internal_t {
typedef struct pthread_internal_t
{
struct pthread_internal_t* next; struct pthread_internal_t* next;
struct pthread_internal_t* prev; struct pthread_internal_t* prev;
pthread_attr_t attr; pthread_attr_t attr;
@ -55,12 +50,12 @@ typedef struct pthread_internal_t
*/ */
#define __BIONIC_DLERROR_BUFFER_SIZE 512 #define __BIONIC_DLERROR_BUFFER_SIZE 512
char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE]; char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE];
} pthread_internal_t; };
int _init_thread(pthread_internal_t* thread, bool add_to_thread_list); __LIBC_HIDDEN__ int _init_thread(pthread_internal_t* thread, bool add_to_thread_list);
void __init_tls(pthread_internal_t* thread); __LIBC_HIDDEN__ void __init_tls(pthread_internal_t* thread);
void _pthread_internal_add(pthread_internal_t* thread); __LIBC_HIDDEN__ void _pthread_internal_add(pthread_internal_t* thread);
pthread_internal_t* __get_thread(void); __LIBC_HIDDEN__ pthread_internal_t* __get_thread(void);
__LIBC_HIDDEN__ void pthread_key_clean_all(void); __LIBC_HIDDEN__ void pthread_key_clean_all(void);
__LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread); __LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread);
@ -91,12 +86,13 @@ __LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread)
__LIBC_HIDDEN__ extern pthread_internal_t* gThreadList; __LIBC_HIDDEN__ extern pthread_internal_t* gThreadList;
__LIBC_HIDDEN__ extern pthread_mutex_t gThreadListLock; __LIBC_HIDDEN__ extern pthread_mutex_t gThreadListLock;
/* needed by fork.c */ __LIBC_HIDDEN__ int __timespec_to_absolute(timespec*, const timespec*, clockid_t);
extern void __timer_table_start_stop(int stop);
extern void __bionic_atfork_run_prepare();
extern void __bionic_atfork_run_child();
extern void __bionic_atfork_run_parent();
__END_DECLS /* needed by fork.c */
__LIBC_HIDDEN__ extern void __timer_table_start_stop(int);
__LIBC_HIDDEN__ extern void __bionic_atfork_run_prepare();
__LIBC_HIDDEN__ extern void __bionic_atfork_run_child();
__LIBC_HIDDEN__ extern void __bionic_atfork_run_parent();
__LIBC_HIDDEN__ extern int __pthread_settid(pthread_t, pid_t);
#endif /* _PTHREAD_INTERNAL_H_ */ #endif /* _PTHREAD_INTERNAL_H_ */

View file

@ -28,11 +28,13 @@
#include "pthread_internal.h" #include "pthread_internal.h"
#include "private/bionic_futex.h"
#include "private/bionic_pthread.h"
#include "private/bionic_tls.h" #include "private/bionic_tls.h"
#include "private/ScopedPthreadMutexLocker.h" #include "private/ScopedPthreadMutexLocker.h"
__LIBC_HIDDEN__ pthread_internal_t* gThreadList = NULL; pthread_internal_t* gThreadList = NULL;
__LIBC_HIDDEN__ pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
void _pthread_internal_remove_locked(pthread_internal_t* thread) { void _pthread_internal_remove_locked(pthread_internal_t* thread) {
if (thread->next != NULL) { if (thread->next != NULL) {
@ -51,7 +53,7 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread) {
} }
} }
__LIBC_ABI_PRIVATE__ void _pthread_internal_add(pthread_internal_t* thread) { void _pthread_internal_add(pthread_internal_t* thread) {
ScopedPthreadMutexLocker locker(&gThreadListLock); ScopedPthreadMutexLocker locker(&gThreadListLock);
// We insert at the head. // We insert at the head.
@ -63,6 +65,42 @@ __LIBC_ABI_PRIVATE__ void _pthread_internal_add(pthread_internal_t* thread) {
gThreadList = thread; gThreadList = thread;
} }
__LIBC_ABI_PRIVATE__ pthread_internal_t* __get_thread(void) { pthread_internal_t* __get_thread(void) {
return reinterpret_cast<pthread_internal_t*>(__get_tls()[TLS_SLOT_THREAD_ID]); return reinterpret_cast<pthread_internal_t*>(__get_tls()[TLS_SLOT_THREAD_ID]);
} }
pid_t __pthread_gettid(pthread_t t) {
return reinterpret_cast<pthread_internal_t*>(t)->tid;
}
int __pthread_settid(pthread_t t, pid_t tid) {
if (t == 0) {
return EINVAL;
}
reinterpret_cast<pthread_internal_t*>(t)->tid = tid;
return 0;
}
// Initialize 'ts' with the difference between 'abstime' and the current time
// according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
int __timespec_to_absolute(timespec* ts, const timespec* abstime, clockid_t clock) {
clock_gettime(clock, ts);
ts->tv_sec = abstime->tv_sec - ts->tv_sec;
ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
if (ts->tv_nsec < 0) {
ts->tv_sec--;
ts->tv_nsec += 1000000000;
}
if ((ts->tv_nsec < 0) || (ts->tv_sec < 0)) {
return -1;
}
return 0;
}
int __futex_wake_ex(volatile void* ftx, int pshared, int val) {
return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
}
int __futex_wait_ex(volatile void* ftx, int pshared, int val, const timespec* timeout) {
return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
}

View file

@ -45,119 +45,6 @@
extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex); extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex); extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
extern void _exit_with_stack_teardown(void * stackBase, size_t stackSize, int status);
extern void __exit(int status);
int __futex_wake_ex(volatile void *ftx, int pshared, int val)
{
return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
}
int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout)
{
return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
}
/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
* and thread cancelation
*/
void __pthread_cleanup_push( __pthread_cleanup_t* c,
__pthread_cleanup_func_t routine,
void* arg )
{
pthread_internal_t* thread = __get_thread();
c->__cleanup_routine = routine;
c->__cleanup_arg = arg;
c->__cleanup_prev = thread->cleanup_stack;
thread->cleanup_stack = c;
}
void __pthread_cleanup_pop( __pthread_cleanup_t* c, int execute )
{
pthread_internal_t* thread = __get_thread();
thread->cleanup_stack = c->__cleanup_prev;
if (execute)
c->__cleanup_routine(c->__cleanup_arg);
}
void pthread_exit(void* retval) {
pthread_internal_t* thread = __get_thread();
// Call the cleanup handlers first.
while (thread->cleanup_stack) {
__pthread_cleanup_t* c = thread->cleanup_stack;
thread->cleanup_stack = c->__cleanup_prev;
c->__cleanup_routine(c->__cleanup_arg);
}
// Call the TLS destructors. It is important to do that before removing this
// thread from the global list. This will ensure that if someone else deletes
// a TLS key, the corresponding value will be set to NULL in this thread's TLS
// space (see pthread_key_delete).
pthread_key_clean_all();
if (thread->alternate_signal_stack != NULL) {
// Tell the kernel to stop using the alternate signal stack.
stack_t ss;
ss.ss_sp = NULL;
ss.ss_flags = SS_DISABLE;
sigaltstack(&ss, NULL);
// Free it.
munmap(thread->alternate_signal_stack, SIGSTKSZ);
thread->alternate_signal_stack = NULL;
}
// Keep track of what we need to know about the stack before we lose the pthread_internal_t.
void* stack_base = thread->attr.stack_base;
size_t stack_size = thread->attr.stack_size;
bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0);
// If the thread is detached, destroy the pthread_internal_t,
// otherwise keep it in memory and signal any joiners.
pthread_mutex_lock(&gThreadListLock);
if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
_pthread_internal_remove_locked(thread);
} else {
// Make sure that the thread struct doesn't have stale pointers to a stack that
// will be unmapped after the exit call below.
if (!user_allocated_stack) {
thread->attr.stack_base = NULL;
thread->attr.stack_size = 0;
thread->tls = NULL;
}
// Indicate that the thread has exited for joining threads.
thread->attr.flags |= PTHREAD_ATTR_FLAG_ZOMBIE;
thread->return_value = retval;
// Signal the joining thread if present.
if (thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) {
pthread_cond_signal(&thread->join_cond);
}
}
pthread_mutex_unlock(&gThreadListLock);
if (user_allocated_stack) {
// Cleaning up this thread's stack is the creator's responsibility, not ours.
__exit(0);
} else {
// We need to munmap the stack we're running on before calling exit.
// That's not something we can do in C.
// We don't want to take a signal after we've unmapped the stack.
// That's one last thing we can handle in C.
sigset_t mask;
sigfillset(&mask);
sigprocmask(SIG_SETMASK, &mask, NULL);
_exit_with_stack_teardown(stack_base, stack_size, 0);
}
}
/* a mutex is implemented as a 32-bit integer holding the following fields /* a mutex is implemented as a 32-bit integer holding the following fields
* *
* bits: name description * bits: name description
@ -387,8 +274,7 @@ int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
return EINVAL; return EINVAL;
} }
int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared) int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
{
if (!attr || !pshared) if (!attr || !pshared)
return EINVAL; return EINVAL;
@ -802,31 +688,10 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
return err; return err;
} }
/* initialize 'ts' with the difference between 'abstime' and the current time
* according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
*/
static int
__timespec_to_absolute(struct timespec* ts, const struct timespec* abstime, clockid_t clock)
{
clock_gettime(clock, ts);
ts->tv_sec = abstime->tv_sec - ts->tv_sec;
ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
if (ts->tv_nsec < 0) {
ts->tv_sec--;
ts->tv_nsec += 1000000000;
}
if ((ts->tv_nsec < 0) || (ts->tv_sec < 0))
return -1;
return 0;
}
/* initialize 'abstime' to the current time according to 'clock' plus 'msecs' /* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
* milliseconds. * milliseconds.
*/ */
static void static void __timespec_to_relative_msec(timespec* abstime, unsigned msecs, clockid_t clock) {
__timespec_to_relative_msec(struct timespec* abstime, unsigned msecs, clockid_t clock)
{
clock_gettime(clock, abstime); clock_gettime(clock, abstime);
abstime->tv_sec += msecs/1000; abstime->tv_sec += msecs/1000;
abstime->tv_nsec += (msecs%1000)*1000000; abstime->tv_nsec += (msecs%1000)*1000000;
@ -840,8 +705,8 @@ __LIBC_HIDDEN__
int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs) int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
{ {
clockid_t clock = CLOCK_MONOTONIC; clockid_t clock = CLOCK_MONOTONIC;
struct timespec abstime; timespec abstime;
struct timespec ts; timespec ts;
int mvalue, mtype, tid, shared; int mvalue, mtype, tid, shared;
/* compute absolute expiration time */ /* compute absolute expiration time */
@ -900,7 +765,7 @@ int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
} }
for (;;) { for (;;) {
struct timespec ts; timespec ts;
/* if the value is 'unlocked', try to acquire it directly */ /* if the value is 'unlocked', try to acquire it directly */
/* NOTE: put state to 2 since we know there is contention */ /* NOTE: put state to 2 since we know there is contention */
@ -977,299 +842,3 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
mutex->value = 0xdead10cc; mutex->value = 0xdead10cc;
return 0; return 0;
} }
int pthread_condattr_init(pthread_condattr_t *attr)
{
if (attr == NULL)
return EINVAL;
*attr = PTHREAD_PROCESS_PRIVATE;
return 0;
}
int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
{
if (attr == NULL || pshared == NULL)
return EINVAL;
*pshared = *attr;
return 0;
}
int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
{
if (attr == NULL)
return EINVAL;
if (pshared != PTHREAD_PROCESS_SHARED &&
pshared != PTHREAD_PROCESS_PRIVATE)
return EINVAL;
*attr = pshared;
return 0;
}
int pthread_condattr_destroy(pthread_condattr_t *attr)
{
if (attr == NULL)
return EINVAL;
*attr = 0xdeada11d;
return 0;
}
/* We use one bit in condition variable values as the 'shared' flag
* The rest is a counter.
*/
#define COND_SHARED_MASK 0x0001
#define COND_COUNTER_INCREMENT 0x0002
#define COND_COUNTER_MASK (~COND_SHARED_MASK)
#define COND_IS_SHARED(c) (((c)->value & COND_SHARED_MASK) != 0)
/* XXX *technically* there is a race condition that could allow
* XXX a signal to be missed. If thread A is preempted in _wait()
* XXX after unlocking the mutex and before waiting, and if other
* XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
* XXX before thread A is scheduled again and calls futex_wait(),
* XXX then the signal will be lost.
*/
int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr)
{
if (cond == NULL)
return EINVAL;
cond->value = 0;
if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
cond->value |= COND_SHARED_MASK;
return 0;
}
int pthread_cond_destroy(pthread_cond_t *cond)
{
if (cond == NULL)
return EINVAL;
cond->value = 0xdeadc04d;
return 0;
}
/* This function is used by pthread_cond_broadcast and
* pthread_cond_signal to atomically decrement the counter
* then wake-up 'counter' threads.
*/
static int
__pthread_cond_pulse(pthread_cond_t *cond, int counter)
{
long flags;
if (__predict_false(cond == NULL))
return EINVAL;
flags = (cond->value & ~COND_COUNTER_MASK);
for (;;) {
long oldval = cond->value;
long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
| flags;
if (__bionic_cmpxchg(oldval, newval, &cond->value) == 0)
break;
}
/*
* Ensure that all memory accesses previously made by this thread are
* visible to the woken thread(s). On the other side, the "wait"
* code will issue any necessary barriers when locking the mutex.
*
* This may not strictly be necessary -- if the caller follows
* recommended practice and holds the mutex before signaling the cond
* var, the mutex ops will provide correct semantics. If they don't
* hold the mutex, they're subject to race conditions anyway.
*/
ANDROID_MEMBAR_FULL();
__futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
return 0;
}
int pthread_cond_broadcast(pthread_cond_t *cond)
{
return __pthread_cond_pulse(cond, INT_MAX);
}
int pthread_cond_signal(pthread_cond_t *cond)
{
return __pthread_cond_pulse(cond, 1);
}
int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
return pthread_cond_timedwait(cond, mutex, NULL);
}
int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
pthread_mutex_t * mutex,
const struct timespec *reltime)
{
int status;
int oldvalue = cond->value;
pthread_mutex_unlock(mutex);
status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime);
pthread_mutex_lock(mutex);
if (status == (-ETIMEDOUT)) return ETIMEDOUT;
return 0;
}
int __pthread_cond_timedwait(pthread_cond_t *cond,
pthread_mutex_t * mutex,
const struct timespec *abstime,
clockid_t clock)
{
struct timespec ts;
struct timespec * tsp;
if (abstime != NULL) {
if (__timespec_to_absolute(&ts, abstime, clock) < 0)
return ETIMEDOUT;
tsp = &ts;
} else {
tsp = NULL;
}
return __pthread_cond_timedwait_relative(cond, mutex, tsp);
}
int pthread_cond_timedwait(pthread_cond_t *cond,
pthread_mutex_t * mutex,
const struct timespec *abstime)
{
return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
}
/* this one exists only for backward binary compatibility */
int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
pthread_mutex_t * mutex,
const struct timespec *abstime)
{
return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
}
int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
pthread_mutex_t * mutex,
const struct timespec *abstime)
{
return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
}
int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
pthread_mutex_t * mutex,
const struct timespec *reltime)
{
return __pthread_cond_timedwait_relative(cond, mutex, reltime);
}
int pthread_cond_timeout_np(pthread_cond_t *cond,
pthread_mutex_t * mutex,
unsigned ms)
{
struct timespec ts;
timespec_from_ms(ts, ms);
return __pthread_cond_timedwait_relative(cond, mutex, &ts);
}
/* NOTE: this implementation doesn't support a init function that throws a C++ exception
* or calls fork()
*/
int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) )
{
volatile pthread_once_t* ocptr = once_control;
/* PTHREAD_ONCE_INIT is 0, we use the following bit flags
*
* bit 0 set -> initialization is under way
* bit 1 set -> initialization is complete
*/
#define ONCE_INITIALIZING (1 << 0)
#define ONCE_COMPLETED (1 << 1)
/* First check if the once is already initialized. This will be the common
* case and we want to make this as fast as possible. Note that this still
* requires a load_acquire operation here to ensure that all the
* stores performed by the initialization function are observable on
* this CPU after we exit.
*/
if (__predict_true((*ocptr & ONCE_COMPLETED) != 0)) {
ANDROID_MEMBAR_FULL();
return 0;
}
for (;;) {
/* Try to atomically set the INITIALIZING flag.
* This requires a cmpxchg loop, and we may need
* to exit prematurely if we detect that
* COMPLETED is now set.
*/
int32_t oldval, newval;
do {
oldval = *ocptr;
if ((oldval & ONCE_COMPLETED) != 0)
break;
newval = oldval | ONCE_INITIALIZING;
} while (__bionic_cmpxchg(oldval, newval, ocptr) != 0);
if ((oldval & ONCE_COMPLETED) != 0) {
/* We detected that COMPLETED was set while in our loop */
ANDROID_MEMBAR_FULL();
return 0;
}
if ((oldval & ONCE_INITIALIZING) == 0) {
/* We got there first, we can jump out of the loop to
* handle the initialization */
break;
}
/* Another thread is running the initialization and hasn't completed
* yet, so wait for it, then try again. */
__futex_wait_ex(ocptr, 0, oldval, NULL);
}
/* call the initialization function. */
(*init_routine)();
/* Do a store_release indicating that initialization is complete */
ANDROID_MEMBAR_FULL();
*ocptr = ONCE_COMPLETED;
/* Wake up any waiters, if any */
__futex_wake_ex(ocptr, 0, INT_MAX);
return 0;
}
pid_t __pthread_gettid(pthread_t thid) {
pthread_internal_t* thread = (pthread_internal_t*) thid;
return thread->tid;
}
int __pthread_settid(pthread_t thid, pid_t tid) {
if (thid == 0) {
return EINVAL;
}
pthread_internal_t* thread = (pthread_internal_t*) thid;
thread->tid = tid;
return 0;
}

View file

@ -0,0 +1,100 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <pthread.h>
#include "private/bionic_atomic_inline.h"
#include "private/bionic_futex.h"
#define ONCE_INITIALIZING (1 << 0)
#define ONCE_COMPLETED (1 << 1)
/* NOTE: this implementation doesn't support a init function that throws a C++ exception
* or calls fork()
*/
int pthread_once(pthread_once_t* once_control, void (*init_routine)(void)) {
volatile pthread_once_t* once_control_ptr = once_control;
// PTHREAD_ONCE_INIT is 0, we use the following bit flags
// bit 0 set -> initialization is under way
// bit 1 set -> initialization is complete
// First check if the once is already initialized. This will be the common
// case and we want to make this as fast as possible. Note that this still
// requires a load_acquire operation here to ensure that all the
// stores performed by the initialization function are observable on
// this CPU after we exit.
if (__predict_true((*once_control_ptr & ONCE_COMPLETED) != 0)) {
ANDROID_MEMBAR_FULL();
return 0;
}
while (true) {
// Try to atomically set the INITIALIZING flag.
// This requires a cmpxchg loop, and we may need
// to exit prematurely if we detect that
// COMPLETED is now set.
int32_t old_value, new_value;
do {
old_value = *once_control_ptr;
if ((old_value & ONCE_COMPLETED) != 0) {
break;
}
new_value = old_value | ONCE_INITIALIZING;
} while (__bionic_cmpxchg(old_value, new_value, once_control_ptr) != 0);
if ((old_value & ONCE_COMPLETED) != 0) {
// We detected that COMPLETED was set while in our loop.
ANDROID_MEMBAR_FULL();
return 0;
}
if ((old_value & ONCE_INITIALIZING) == 0) {
// We got there first, we can jump out of the loop to handle the initialization.
break;
}
// Another thread is running the initialization and hasn't completed
// yet, so wait for it, then try again.
__futex_wait_ex(once_control_ptr, 0, old_value, NULL);
}
// Call the initialization function.
(*init_routine)();
// Do a store_release indicating that initialization is complete.
ANDROID_MEMBAR_FULL();
*once_control_ptr = ONCE_COMPLETED;
// Wake up any waiters, if any.
__futex_wake_ex(once_control_ptr, 0, INT_MAX);
return 0;
}

View file

@ -91,8 +91,7 @@ int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
} }
} }
int pthread_rwlockattr_getpshared(pthread_rwlockattr_t *attr, int *pshared) int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) {
{
if (!attr || !pshared) if (!attr || !pshared)
return EINVAL; return EINVAL;
@ -195,10 +194,62 @@ static void _pthread_rwlock_pulse(pthread_rwlock_t *rwlock)
pthread_cond_broadcast(&rwlock->cond); pthread_cond_broadcast(&rwlock->cond);
} }
static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
int ret = 0;
int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) if (rwlock == NULL) {
{ return EINVAL;
return pthread_rwlock_timedrdlock(rwlock, NULL); }
pthread_mutex_lock(&rwlock->lock);
int tid = __get_thread()->tid;
if (__predict_false(!read_precondition(rwlock, tid))) {
rwlock->pendingReaders += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
} while (ret == 0 && !read_precondition(rwlock, tid));
rwlock->pendingReaders -= 1;
if (ret != 0) {
goto EXIT;
}
}
++rwlock->numLocks;
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
}
static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
int ret = 0;
if (rwlock == NULL) {
return EINVAL;
}
pthread_mutex_lock(&rwlock->lock);
int tid = __get_thread()->tid;
if (__predict_false(!write_precondition(rwlock, tid))) {
// If we can't read yet, wait until the rwlock is unlocked
// and try again. Increment pendingReaders to get the
// cond broadcast when that happens.
rwlock->pendingWriters += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
} while (ret == 0 && !write_precondition(rwlock, tid));
rwlock->pendingWriters -= 1;
if (ret != 0) {
goto EXIT;
}
}
++rwlock->numLocks;
rwlock->writerThreadId = tid;
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
}
int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) {
return __pthread_rwlock_timedrdlock(rwlock, NULL);
} }
int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
@ -212,40 +263,18 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
if (__predict_false(!read_precondition(rwlock, __get_thread()->tid))) if (__predict_false(!read_precondition(rwlock, __get_thread()->tid)))
ret = EBUSY; ret = EBUSY;
else else
rwlock->numLocks ++; ++rwlock->numLocks;
pthread_mutex_unlock(&rwlock->lock); pthread_mutex_unlock(&rwlock->lock);
return ret; return ret;
} }
int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout) int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
{ return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
int tid = __get_thread()->tid;
if (__predict_false(!read_precondition(rwlock, tid))) {
rwlock->pendingReaders += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
} while (ret == 0 && !read_precondition(rwlock, tid));
rwlock->pendingReaders -= 1;
if (ret != 0)
goto EXIT;
}
rwlock->numLocks ++;
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
} }
int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) {
int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) return __pthread_rwlock_timedwrlock(rwlock, NULL);
{
return pthread_rwlock_timedwrlock(rwlock, NULL);
} }
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
@ -260,43 +289,17 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
if (__predict_false(!write_precondition(rwlock, tid))) { if (__predict_false(!write_precondition(rwlock, tid))) {
ret = EBUSY; ret = EBUSY;
} else { } else {
rwlock->numLocks ++; ++rwlock->numLocks;
rwlock->writerThreadId = tid; rwlock->writerThreadId = tid;
} }
pthread_mutex_unlock(&rwlock->lock); pthread_mutex_unlock(&rwlock->lock);
return ret; return ret;
} }
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout) int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
{ return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
int tid = __get_thread()->tid;
if (__predict_false(!write_precondition(rwlock, tid))) {
/* If we can't read yet, wait until the rwlock is unlocked
* and try again. Increment pendingReaders to get the
* cond broadcast when that happens.
*/
rwlock->pendingWriters += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
} while (ret == 0 && !write_precondition(rwlock, tid));
rwlock->pendingWriters -= 1;
if (ret != 0)
goto EXIT;
}
rwlock->numLocks ++;
rwlock->writerThreadId = tid;
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
} }
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
{ {
int ret = 0; int ret = 0;

View file

@ -31,7 +31,7 @@
#include "private/ErrnoRestorer.h" #include "private/ErrnoRestorer.h"
#include "pthread_accessor.h" #include "pthread_accessor.h"
int pthread_setschedparam(pthread_t t, int policy, struct sched_param const* param) { int pthread_setschedparam(pthread_t t, int policy, const sched_param* param) {
ErrnoRestorer errno_restorer; ErrnoRestorer errno_restorer;
pthread_accessor thread(t); pthread_accessor thread(t);

View file

@ -25,33 +25,31 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#include <sys/types.h> #include <sys/types.h>
#include <sys/ptrace.h> #include <sys/ptrace.h>
extern long __ptrace(int request, pid_t pid, void *addr, void *data); extern "C" long __ptrace(int request, pid_t pid, void* addr, void* data);
long ptrace(int request, pid_t pid, void * addr, void * data) long ptrace(int request, pid_t pid, void* addr, void* data) {
{ switch (request) {
switch (request) { case PTRACE_PEEKUSR:
case PTRACE_PEEKUSR: case PTRACE_PEEKTEXT:
case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA:
case PTRACE_PEEKDATA: {
{ long word;
long word; long ret = __ptrace(request, pid, addr, &word);
long ret; if (ret == 0) {
return word;
ret = __ptrace(request, pid, addr, &word); } else {
if (ret == 0) { // __ptrace already set errno for us.
return word; return -1;
} else { }
// __ptrace will set errno for us
return -1;
}
}
default:
return __ptrace(request, pid, addr, data);
} }
default:
return __ptrace(request, pid, addr, data);
}
} }
/* /*
@ -63,6 +61,7 @@ long ptrace(int request, pid_t pid, void * addr, void * data)
#define ATTRIBUTES __attribute__((noinline)) #define ATTRIBUTES __attribute__((noinline))
#endif #endif
void ATTRIBUTES _thread_created_hook(pid_t thread_id) extern "C" void _thread_created_hook(pid_t) ATTRIBUTES;
{
void _thread_created_hook(pid_t) {
} }

View file

@ -29,17 +29,18 @@
/* some simple glue used to make the BSD atexit code happy */ /* some simple glue used to make the BSD atexit code happy */
#include <pthread.h> #include <pthread.h>
#include "pthread_internal.h"
static pthread_mutex_t gAtExitLock = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t gAtExitLock = PTHREAD_MUTEX_INITIALIZER;
void _thread_atexit_lock( void ) __BEGIN_DECLS
{ __LIBC_HIDDEN__ void _thread_atexit_lock();
pthread_mutex_lock( &gAtExitLock ); __LIBC_HIDDEN__ void _thread_atexit_unlock();
__END_DECLS
void _thread_atexit_lock() {
pthread_mutex_lock(&gAtExitLock);
} }
void _thread_atexit_unlock( void ) void _thread_atexit_unlock() {
{ pthread_mutex_unlock(&gAtExitLock);
pthread_mutex_unlock( &gAtExitLock );
} }

View file

@ -33,11 +33,8 @@
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
extern int __pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const struct timespec*, extern int __pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const timespec*, clockid_t);
clockid_t); extern int __pthread_cond_timedwait_relative(pthread_cond_t*, pthread_mutex_t*, const timespec*);
extern int __pthread_cond_timedwait_relative(pthread_cond_t*, pthread_mutex_t*,
const struct timespec*);
// Normal (i.e. non-SIGEV_THREAD) timers are created directly by the kernel // Normal (i.e. non-SIGEV_THREAD) timers are created directly by the kernel
// and are passed as is to/from the caller. // and are passed as is to/from the caller.
@ -110,8 +107,8 @@ struct thr_timer {
pthread_cond_t cond; /* signal a state change to thread */ pthread_cond_t cond; /* signal a state change to thread */
int volatile done; /* set by timer_delete */ int volatile done; /* set by timer_delete */
int volatile stopped; /* set by _start_stop() */ int volatile stopped; /* set by _start_stop() */
struct timespec volatile expires; /* next expiration time, or 0 */ timespec volatile expires; /* next expiration time, or 0 */
struct timespec volatile period; /* reload value, or 0 */ timespec volatile period; /* reload value, or 0 */
int volatile overruns; /* current number of overruns */ int volatile overruns; /* current number of overruns */
}; };
@ -240,7 +237,7 @@ static pthread_once_t __timer_table_once = PTHREAD_ONCE_INIT;
static thr_timer_table_t* __timer_table; static thr_timer_table_t* __timer_table;
static void __timer_table_init(void) { static void __timer_table_init(void) {
__timer_table = calloc(1, sizeof(*__timer_table)); __timer_table = reinterpret_cast<thr_timer_table_t*>(calloc(1, sizeof(*__timer_table)));
if (__timer_table != NULL) { if (__timer_table != NULL) {
thr_timer_table_init(__timer_table); thr_timer_table_init(__timer_table);
} }
@ -258,7 +255,7 @@ static thr_timer_table_t* __timer_table_get(void) {
** requirements: the timers of fork child processes must be ** requirements: the timers of fork child processes must be
** disarmed but not deleted. ** disarmed but not deleted.
**/ **/
__LIBC_HIDDEN__ void __timer_table_start_stop(int stop) { void __timer_table_start_stop(int stop) {
// We access __timer_table directly so we don't create it if it doesn't yet exist. // We access __timer_table directly so we don't create it if it doesn't yet exist.
thr_timer_table_start_stop(__timer_table, stop); thr_timer_table_start_stop(__timer_table, stop);
} }
@ -286,7 +283,7 @@ thr_timer_unlock( thr_timer_t* t )
} }
static __inline__ void timespec_add(struct timespec* a, const struct timespec* b) { static __inline__ void timespec_add(timespec* a, const timespec* b) {
a->tv_sec += b->tv_sec; a->tv_sec += b->tv_sec;
a->tv_nsec += b->tv_nsec; a->tv_nsec += b->tv_nsec;
if (a->tv_nsec >= 1000000000) { if (a->tv_nsec >= 1000000000) {
@ -295,7 +292,7 @@ static __inline__ void timespec_add(struct timespec* a, const struct timespec* b
} }
} }
static __inline__ void timespec_sub(struct timespec* a, const struct timespec* b) { static __inline__ void timespec_sub(timespec* a, const timespec* b) {
a->tv_sec -= b->tv_sec; a->tv_sec -= b->tv_sec;
a->tv_nsec -= b->tv_nsec; a->tv_nsec -= b->tv_nsec;
if (a->tv_nsec < 0) { if (a->tv_nsec < 0) {
@ -304,15 +301,15 @@ static __inline__ void timespec_sub(struct timespec* a, const struct timespec* b
} }
} }
static __inline__ void timespec_zero(struct timespec* a) { static __inline__ void timespec_zero(timespec* a) {
a->tv_sec = a->tv_nsec = 0; a->tv_sec = a->tv_nsec = 0;
} }
static __inline__ int timespec_is_zero(const struct timespec* a) { static __inline__ int timespec_is_zero(const timespec* a) {
return (a->tv_sec == 0 && a->tv_nsec == 0); return (a->tv_sec == 0 && a->tv_nsec == 0);
} }
static __inline__ int timespec_cmp(const struct timespec* a, const struct timespec* b) { static __inline__ int timespec_cmp(const timespec* a, const timespec* b) {
if (a->tv_sec < b->tv_sec) return -1; if (a->tv_sec < b->tv_sec) return -1;
if (a->tv_sec > b->tv_sec) return +1; if (a->tv_sec > b->tv_sec) return +1;
if (a->tv_nsec < b->tv_nsec) return -1; if (a->tv_nsec < b->tv_nsec) return -1;
@ -320,7 +317,7 @@ static __inline__ int timespec_cmp(const struct timespec* a, const struct timesp
return 0; return 0;
} }
static __inline__ int timespec_cmp0(const struct timespec* a) { static __inline__ int timespec_cmp0(const timespec* a) {
if (a->tv_sec < 0) return -1; if (a->tv_sec < 0) return -1;
if (a->tv_sec > 0) return +1; if (a->tv_sec > 0) return +1;
if (a->tv_nsec < 0) return -1; if (a->tv_nsec < 0) return -1;
@ -330,15 +327,15 @@ static __inline__ int timespec_cmp0(const struct timespec* a) {
/** POSIX TIMERS APIs */ /** POSIX TIMERS APIs */
extern int __timer_create(clockid_t, struct sigevent*, timer_t*); extern "C" int __timer_create(clockid_t, sigevent*, timer_t*);
extern int __timer_delete(timer_t); extern "C" int __timer_delete(timer_t);
extern int __timer_gettime(timer_t, struct itimerspec*); extern "C" int __timer_gettime(timer_t, itimerspec*);
extern int __timer_settime(timer_t, int, const struct itimerspec*, struct itimerspec*); extern "C" int __timer_settime(timer_t, int, const itimerspec*, itimerspec*);
extern int __timer_getoverrun(timer_t); extern "C" int __timer_getoverrun(timer_t);
static void* timer_thread_start(void*); static void* timer_thread_start(void*);
int timer_create(clockid_t clock_id, struct sigevent* evp, timer_t* timer_id) { int timer_create(clockid_t clock_id, sigevent* evp, timer_t* timer_id) {
// If not a SIGEV_THREAD timer, the kernel can handle it without our help. // If not a SIGEV_THREAD timer, the kernel can handle it without our help.
if (__predict_true(evp == NULL || evp->sigev_notify != SIGEV_THREAD)) { if (__predict_true(evp == NULL || evp->sigev_notify != SIGEV_THREAD)) {
return __timer_create(clock_id, evp, timer_id); return __timer_create(clock_id, evp, timer_id);
@ -351,7 +348,7 @@ int timer_create(clockid_t clock_id, struct sigevent* evp, timer_t* timer_id) {
} }
// Check that the clock id is supported by the kernel. // Check that the clock id is supported by the kernel.
struct timespec dummy; timespec dummy;
if (clock_gettime(clock_id, &dummy) < 0 && errno == EINVAL) { if (clock_gettime(clock_id, &dummy) < 0 && errno == EINVAL) {
return -1; return -1;
} }
@ -435,34 +432,26 @@ timer_delete( timer_t id )
/* return the relative time until the next expiration, or 0 if /* return the relative time until the next expiration, or 0 if
* the timer is disarmed */ * the timer is disarmed */
static void static void timer_gettime_internal(thr_timer_t* timer, itimerspec* spec) {
timer_gettime_internal( thr_timer_t* timer, timespec diff = const_cast<timespec&>(timer->expires);
struct itimerspec* spec) if (!timespec_is_zero(&diff)) {
{ timespec now;
struct timespec diff;
diff = timer->expires; clock_gettime(timer->clock, &now);
if (!timespec_is_zero(&diff)) timespec_sub(&diff, &now);
{
struct timespec now;
clock_gettime( timer->clock, &now ); /* in case of overrun, return 0 */
timespec_sub(&diff, &now); if (timespec_cmp0(&diff) < 0) {
timespec_zero(&diff);
/* in case of overrun, return 0 */
if (timespec_cmp0(&diff) < 0) {
timespec_zero(&diff);
}
} }
}
spec->it_value = diff; spec->it_value = diff;
spec->it_interval = timer->period; spec->it_interval = const_cast<timespec&>(timer->period);
} }
int int timer_gettime(timer_t id, itimerspec* ospec) {
timer_gettime( timer_t id, struct itimerspec* ospec )
{
if (ospec == NULL) { if (ospec == NULL) {
errno = EINVAL; errno = EINVAL;
return -1; return -1;
@ -486,11 +475,7 @@ timer_gettime( timer_t id, struct itimerspec* ospec )
int int
timer_settime( timer_t id, timer_settime(timer_t id, int flags, const itimerspec* spec, itimerspec* ospec) {
int flags,
const struct itimerspec* spec,
struct itimerspec* ospec )
{
if (spec == NULL) { if (spec == NULL) {
errno = EINVAL; errno = EINVAL;
return -1; return -1;
@ -500,7 +485,7 @@ timer_settime( timer_t id,
return __timer_settime( id, flags, spec, ospec ); return __timer_settime( id, flags, spec, ospec );
} else { } else {
thr_timer_t* timer = thr_timer_from_id(id); thr_timer_t* timer = thr_timer_from_id(id);
struct timespec expires, now; timespec expires, now;
if (timer == NULL) { if (timer == NULL) {
errno = EINVAL; errno = EINVAL;
@ -526,8 +511,8 @@ timer_settime( timer_t id,
expires = now; expires = now;
} }
} }
timer->expires = expires; const_cast<timespec&>(timer->expires) = expires;
timer->period = spec->it_interval; const_cast<timespec&>(timer->period) = spec->it_interval;
thr_timer_unlock( timer ); thr_timer_unlock( timer );
/* signal the change to the thread */ /* signal the change to the thread */
@ -561,7 +546,7 @@ timer_getoverrun(timer_t id)
static void* timer_thread_start(void* arg) { static void* timer_thread_start(void* arg) {
thr_timer_t* timer = arg; thr_timer_t* timer = reinterpret_cast<thr_timer_t*>(arg);
thr_timer_lock(timer); thr_timer_lock(timer);
@ -572,8 +557,8 @@ static void* timer_thread_start(void* arg) {
// We loop until timer->done is set in timer_delete(). // We loop until timer->done is set in timer_delete().
while (!timer->done) { while (!timer->done) {
struct timespec expires = timer->expires; timespec expires = const_cast<timespec&>(timer->expires);
struct timespec period = timer->period; timespec period = const_cast<timespec&>(timer->period);
// If the timer is stopped or disarmed, wait indefinitely // If the timer is stopped or disarmed, wait indefinitely
// for a state change from timer_settime/_delete/_start_stop. // for a state change from timer_settime/_delete/_start_stop.
@ -584,13 +569,13 @@ static void* timer_thread_start(void* arg) {
// Otherwise, we need to do a timed wait until either a // Otherwise, we need to do a timed wait until either a
// state change of the timer expiration time. // state change of the timer expiration time.
struct timespec now; timespec now;
clock_gettime(timer->clock, &now); clock_gettime(timer->clock, &now);
if (timespec_cmp(&expires, &now) > 0) { if (timespec_cmp(&expires, &now) > 0) {
// Cool, there was no overrun, so compute the // Cool, there was no overrun, so compute the
// relative timeout as 'expires - now', then wait. // relative timeout as 'expires - now', then wait.
struct timespec diff = expires; timespec diff = expires;
timespec_sub(&diff, &now); timespec_sub(&diff, &now);
int ret = __pthread_cond_timedwait_relative(&timer->cond, &timer->mutex, &diff); int ret = __pthread_cond_timedwait_relative(&timer->cond, &timer->mutex, &diff);
@ -627,7 +612,7 @@ static void* timer_thread_start(void* arg) {
} else { } else {
timespec_zero(&expires); timespec_zero(&expires);
} }
timer->expires = expires; const_cast<timespec&>(timer->expires) = expires;
// Now call the timer callback function. Release the // Now call the timer callback function. Release the
// lock to allow the function to modify the timer setting // lock to allow the function to modify the timer setting

View file

@ -25,6 +25,7 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _PTHREAD_H_ #ifndef _PTHREAD_H_
#define _PTHREAD_H_ #define _PTHREAD_H_
@ -34,12 +35,8 @@
#include <limits.h> #include <limits.h>
#include <sys/types.h> #include <sys/types.h>
/* typedef struct {
* Types int volatile value;
*/
typedef struct
{
int volatile value;
} pthread_mutex_t; } pthread_mutex_t;
#define __PTHREAD_MUTEX_INIT_VALUE 0 #define __PTHREAD_MUTEX_INIT_VALUE 0
@ -61,241 +58,167 @@ enum {
PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL
}; };
typedef struct {
int volatile value;
typedef struct
{
int volatile value;
} pthread_cond_t; } pthread_cond_t;
typedef struct #define PTHREAD_COND_INITIALIZER {0}
{
uint32_t flags; typedef struct {
void * stack_base; uint32_t flags;
size_t stack_size; void* stack_base;
size_t guard_size; size_t stack_size;
int32_t sched_policy; size_t guard_size;
int32_t sched_priority; int32_t sched_policy;
int32_t sched_priority;
} pthread_attr_t; } pthread_attr_t;
typedef long pthread_mutexattr_t; typedef long pthread_mutexattr_t;
typedef long pthread_condattr_t; typedef long pthread_condattr_t;
typedef int pthread_rwlockattr_t;
typedef struct {
pthread_mutex_t lock;
pthread_cond_t cond;
int numLocks;
int writerThreadId;
int pendingReaders;
int pendingWriters;
void* reserved[4]; /* for future extensibility */
} pthread_rwlock_t;
#define PTHREAD_RWLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0, 0, 0, { NULL, NULL, NULL, NULL } }
typedef int pthread_key_t; typedef int pthread_key_t;
typedef long pthread_t; typedef long pthread_t;
typedef volatile int pthread_once_t; typedef volatile int pthread_once_t;
/* #define PTHREAD_ONCE_INIT 0
* Defines
*/
#define PTHREAD_COND_INITIALIZER {0}
#define PTHREAD_STACK_MIN (2 * PAGE_SIZE) #define PTHREAD_STACK_MIN (2 * PAGE_SIZE)
#define PTHREAD_CREATE_DETACHED 0x00000001 #define PTHREAD_CREATE_DETACHED 0x00000001
#define PTHREAD_CREATE_JOINABLE 0x00000000 #define PTHREAD_CREATE_JOINABLE 0x00000000
#define PTHREAD_ONCE_INIT 0
#define PTHREAD_PROCESS_PRIVATE 0 #define PTHREAD_PROCESS_PRIVATE 0
#define PTHREAD_PROCESS_SHARED 1 #define PTHREAD_PROCESS_SHARED 1
#define PTHREAD_SCOPE_SYSTEM 0 #define PTHREAD_SCOPE_SYSTEM 0
#define PTHREAD_SCOPE_PROCESS 1 #define PTHREAD_SCOPE_PROCESS 1
/* __BEGIN_DECLS
* Prototypes
*/
#ifdef __cplusplus
extern "C" {
#endif
int pthread_attr_init(pthread_attr_t * attr); int pthread_atfork(void (*)(void), void (*)(void), void(*)(void));
int pthread_attr_destroy(pthread_attr_t * attr);
int pthread_attr_setdetachstate(pthread_attr_t * attr, int state); int pthread_attr_destroy(pthread_attr_t*) __nonnull((1));
int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state); int pthread_attr_getdetachstate(const pthread_attr_t*, int*) __nonnull((1, 2));
int pthread_attr_getguardsize(const pthread_attr_t*, size_t*) __nonnull((1, 2));
int pthread_attr_getschedparam(const pthread_attr_t*, struct sched_param*) __nonnull((1, 2));
int pthread_attr_getschedpolicy(const pthread_attr_t*, int*) __nonnull((1, 2));
int pthread_attr_getscope(const pthread_attr_t*, int*) __nonnull((1, 2));
int pthread_attr_getstack(const pthread_attr_t*, void**, size_t*) __nonnull((1, 2, 3));
int pthread_attr_getstacksize(const pthread_attr_t*, size_t*) __nonnull((1, 2));
int pthread_attr_init(pthread_attr_t*) __nonnull((1));
int pthread_attr_setdetachstate(pthread_attr_t*, int) __nonnull((1));
int pthread_attr_setguardsize(pthread_attr_t*, size_t) __nonnull((1));
int pthread_attr_setschedparam(pthread_attr_t*, const struct sched_param*) __nonnull((1, 2));
int pthread_attr_setschedpolicy(pthread_attr_t*, int) __nonnull((1));
int pthread_attr_setscope(pthread_attr_t*, int) __nonnull((1));
int pthread_attr_setstack(pthread_attr_t*, void*, size_t) __nonnull((1));
int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size) __nonnull((1));
int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy); int pthread_condattr_destroy(pthread_condattr_t*) __nonnull((1));
int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy); int pthread_condattr_getpshared(const pthread_condattr_t*, int*) __nonnull((1, 2));
int pthread_condattr_init(pthread_condattr_t*) __nonnull((1));
int pthread_condattr_setpshared(pthread_condattr_t*, int) __nonnull((1));
int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param); int pthread_cond_broadcast(pthread_cond_t*) __nonnull((1));
int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param); int pthread_cond_destroy(pthread_cond_t*) __nonnull((1));
int pthread_cond_init(pthread_cond_t*, const pthread_condattr_t*) __nonnull((1));
int pthread_cond_signal(pthread_cond_t*) __nonnull((1));
int pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const struct timespec*) __nonnull((1, 2, 3));
int pthread_cond_wait(pthread_cond_t*, pthread_mutex_t*) __nonnull((1, 2));
int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size); int pthread_create(pthread_t*, pthread_attr_t const*, void *(*)(void*), void*) __nonnull((1, 3));
int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size); int pthread_detach(pthread_t);
void pthread_exit(void*) __noreturn;
int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stackaddr); int pthread_equal(pthread_t, pthread_t);
int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stackaddr);
int pthread_attr_setstack(pthread_attr_t * attr, void * stackaddr, size_t stack_size); int pthread_getattr_np(pthread_t, pthread_attr_t*) __nonnull((2));
int pthread_attr_getstack(pthread_attr_t const * attr, void ** stackaddr, size_t * stack_size);
int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size); int pthread_getcpuclockid(pthread_t, clockid_t*) __nonnull((2));
int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size);
int pthread_attr_setscope(pthread_attr_t *attr, int scope); int pthread_getschedparam(pthread_t, int*, struct sched_param*) __nonnull((2, 3));
int pthread_attr_getscope(pthread_attr_t const *attr);
int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr); void* pthread_getspecific(pthread_key_t);
int pthread_create(pthread_t *thread, pthread_attr_t const * attr, int pthread_join(pthread_t, void**);
void *(*start_routine)(void *), void * arg);
void pthread_exit(void * retval); int pthread_key_create(pthread_key_t*, void (*)(void*)) __nonnull((1));
int pthread_join(pthread_t thid, void ** ret_val); int pthread_key_delete(pthread_key_t);
int pthread_detach(pthread_t thid);
int pthread_kill(pthread_t, int);
int pthread_mutexattr_destroy(pthread_mutexattr_t*) __nonnull((1));
int pthread_mutexattr_getpshared(const pthread_mutexattr_t*, int*) __nonnull((1, 2));
int pthread_mutexattr_gettype(const pthread_mutexattr_t*, int*) __nonnull((1, 2));
int pthread_mutexattr_init(pthread_mutexattr_t*) __nonnull((1));
int pthread_mutexattr_setpshared(pthread_mutexattr_t*, int) __nonnull((1));
int pthread_mutexattr_settype(pthread_mutexattr_t*, int) __nonnull((1));
int pthread_mutex_destroy(pthread_mutex_t*) __nonnull((1));
int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __nonnull((1));
int pthread_mutex_lock(pthread_mutex_t*) __nonnull((1));
int pthread_mutex_timedlock(pthread_mutex_t*, struct timespec*) __nonnull((1, 2));
int pthread_mutex_trylock(pthread_mutex_t*) __nonnull((1));
int pthread_mutex_unlock(pthread_mutex_t*) __nonnull((1));
int pthread_once(pthread_once_t*, void (*)(void)) __nonnull((1, 2));
int pthread_rwlockattr_destroy(pthread_rwlockattr_t*) __nonnull((1));
int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t*, int*) __nonnull((1, 2));
int pthread_rwlockattr_init(pthread_rwlockattr_t*) __nonnull((1));
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t*, int) __nonnull((1));
int pthread_rwlock_destroy(pthread_rwlock_t*) __nonnull((1));
int pthread_rwlock_init(pthread_rwlock_t*, const pthread_rwlockattr_t*) __nonnull((1));
int pthread_rwlock_rdlock(pthread_rwlock_t*) __nonnull((1));
int pthread_rwlock_timedrdlock(pthread_rwlock_t*, const struct timespec*) __nonnull((1, 2));
int pthread_rwlock_timedwrlock(pthread_rwlock_t*, const struct timespec*) __nonnull((1, 2));
int pthread_rwlock_tryrdlock(pthread_rwlock_t*) __nonnull((1));
int pthread_rwlock_trywrlock(pthread_rwlock_t*) __nonnull((1));
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) __nonnull((1));
int pthread_rwlock_wrlock(pthread_rwlock_t*) __nonnull((1));
pthread_t pthread_self(void); pthread_t pthread_self(void);
int pthread_equal(pthread_t one, pthread_t two);
int pthread_getschedparam(pthread_t thid, int * policy, int pthread_setname_np(pthread_t, const char*) __nonnull((2));
struct sched_param * param);
int pthread_setschedparam(pthread_t thid, int policy,
struct sched_param const * param);
int pthread_mutexattr_init(pthread_mutexattr_t *attr); int pthread_setschedparam(pthread_t, int, const struct sched_param*) __nonnull((3));
int pthread_mutexattr_destroy(pthread_mutexattr_t *attr);
int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type);
int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type);
int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared);
int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared);
int pthread_mutex_init(pthread_mutex_t *mutex, int pthread_setspecific(pthread_key_t, const void*);
const pthread_mutexattr_t *attr);
int pthread_mutex_destroy(pthread_mutex_t *mutex);
int pthread_mutex_lock(pthread_mutex_t *mutex);
int pthread_mutex_unlock(pthread_mutex_t *mutex);
int pthread_mutex_trylock(pthread_mutex_t *mutex);
int pthread_mutex_timedlock(pthread_mutex_t *mutex, struct timespec* ts);
int pthread_condattr_init(pthread_condattr_t *attr); int pthread_sigmask(int, const sigset_t*, sigset_t*);
int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared);
int pthread_condattr_setpshared(pthread_condattr_t* attr, int pshared);
int pthread_condattr_destroy(pthread_condattr_t *attr);
int pthread_cond_init(pthread_cond_t *cond, typedef void (*__pthread_cleanup_func_t)(void*);
const pthread_condattr_t *attr);
int pthread_cond_destroy(pthread_cond_t *cond);
int pthread_cond_broadcast(pthread_cond_t *cond);
int pthread_cond_signal(pthread_cond_t *cond);
int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
int pthread_cond_timedwait(pthread_cond_t *cond,
pthread_mutex_t * mutex,
const struct timespec *abstime);
/* BIONIC: same as pthread_cond_timedwait, except the 'abstime' given refers
* to the CLOCK_MONOTONIC clock instead, to avoid any problems when
* the wall-clock time is changed brutally
*/
int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
pthread_mutex_t *mutex,
const struct timespec *abstime);
/* BIONIC: DEPRECATED. same as pthread_cond_timedwait_monotonic_np()
* unfortunately pthread_cond_timedwait_monotonic has shipped already
*/
int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
pthread_mutex_t *mutex,
const struct timespec *abstime);
#define HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC 1
/* BIONIC: same as pthread_cond_timedwait, except the 'reltime' given refers
* is relative to the current time.
*/
int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
pthread_mutex_t *mutex,
const struct timespec *reltime);
#define HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE 1
int pthread_cond_timeout_np(pthread_cond_t *cond,
pthread_mutex_t * mutex,
unsigned msecs);
/* same as pthread_mutex_lock(), but will wait up to 'msecs' milli-seconds
* before returning. same return values than pthread_mutex_trylock though, i.e.
* returns EBUSY if the lock could not be acquired after the timeout
* expired.
*/
int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs);
/* read-write lock support */
typedef int pthread_rwlockattr_t;
typedef struct {
pthread_mutex_t lock;
pthread_cond_t cond;
int numLocks;
int writerThreadId;
int pendingReaders;
int pendingWriters;
void* reserved[4]; /* for future extensibility */
} pthread_rwlock_t;
#define PTHREAD_RWLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0, 0, 0, { NULL, NULL, NULL, NULL } }
int pthread_rwlockattr_init(pthread_rwlockattr_t *attr);
int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr);
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared);
int pthread_rwlockattr_getpshared(pthread_rwlockattr_t *attr, int *pshared);
int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout);
int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout);
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *));
int pthread_key_delete (pthread_key_t);
int pthread_setspecific(pthread_key_t key, const void *value);
void *pthread_getspecific(pthread_key_t key);
int pthread_kill(pthread_t tid, int sig);
int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid);
int pthread_once(pthread_once_t *once_control, void (*init_routine)(void));
int pthread_setname_np(pthread_t thid, const char *thname);
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void));
typedef void (*__pthread_cleanup_func_t)(void*);
typedef struct __pthread_cleanup_t { typedef struct __pthread_cleanup_t {
struct __pthread_cleanup_t* __cleanup_prev; struct __pthread_cleanup_t* __cleanup_prev;
__pthread_cleanup_func_t __cleanup_routine; __pthread_cleanup_func_t __cleanup_routine;
void* __cleanup_arg; void* __cleanup_arg;
} __pthread_cleanup_t; } __pthread_cleanup_t;
extern void __pthread_cleanup_push(__pthread_cleanup_t* c, extern void __pthread_cleanup_push(__pthread_cleanup_t* c, __pthread_cleanup_func_t, void*);
__pthread_cleanup_func_t routine, extern void __pthread_cleanup_pop(__pthread_cleanup_t*, int);
void* arg);
extern void __pthread_cleanup_pop(__pthread_cleanup_t* c,
int execute);
/* Believe or not, the definitions of pthread_cleanup_push and /* Believe or not, the definitions of pthread_cleanup_push and
* pthread_cleanup_pop below are correct. Posix states that these * pthread_cleanup_pop below are correct. Posix states that these
* can be implemented as macros that might introduce opening and * can be implemented as macros that might introduce opening and
* closing braces, and that using setjmp/longjmp/return/break/continue * closing braces, and that using setjmp/longjmp/return/break/continue
* between them results in undefined behaviour. * between them results in undefined behavior.
*
* And indeed, GLibc and other C libraries use a similar definition
*/ */
#define pthread_cleanup_push(routine, arg) \ #define pthread_cleanup_push(routine, arg) \
do { \ do { \
@ -304,10 +227,40 @@ extern void __pthread_cleanup_pop(__pthread_cleanup_t* c,
#define pthread_cleanup_pop(execute) \ #define pthread_cleanup_pop(execute) \
__pthread_cleanup_pop( &__cleanup, (execute)); \ __pthread_cleanup_pop( &__cleanup, (execute)); \
} while (0); } while (0); \
#ifdef __cplusplus
} /* extern "C" */ #if !defined(__LP64__)
#endif
/* Deprecated by POSIX. TODO: support for LP64 but add deprecated attribute instead? */
int pthread_attr_getstackaddr(const pthread_attr_t*, void**) __nonnull((1, 2)); /* deprecated */
int pthread_attr_setstackaddr(pthread_attr_t*, void*) __nonnull((1)); /* deprecated */
/* Bionic additions that are deprecated even in the 32-bit ABI. */
int pthread_cond_timedwait_monotonic_np(pthread_cond_t*, pthread_mutex_t*, const struct timespec*);
int pthread_cond_timedwait_monotonic(pthread_cond_t*, pthread_mutex_t*, const struct timespec*);
#define HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC 1
/*
* Like pthread_cond_timedwait except 'reltime' is relative to the current time.
* TODO: not like glibc; include in LP64?
*/
int pthread_cond_timedwait_relative_np(pthread_cond_t*, pthread_mutex_t*, const struct timespec*);
#define HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE 1
/* TODO: not like glibc; include in LP64? */
int pthread_cond_timeout_np(pthread_cond_t*, pthread_mutex_t*, unsigned);
/* Like pthread_mutex_lock(), but will wait up to 'msecs' milli-seconds
* before returning. Same return values as pthread_mutex_trylock though, i.e.
* returns EBUSY if the lock could not be acquired after the timeout expired.
*
* TODO: replace with pthread_mutex_timedlock_np for LP64.
*/
int pthread_mutex_lock_timeout_np(pthread_mutex_t*, unsigned);
#endif /* !defined(__LP64__) */
__END_DECLS
#endif /* _PTHREAD_H_ */ #endif /* _PTHREAD_H_ */

View file

@ -51,11 +51,11 @@ extern int __futex_syscall4(volatile void *ftx, int op, int val, const struct ti
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG) #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
#endif #endif
/* Like __futex_wait/wake, but take an additionnal 'pshared' argument. /* Like __futex_wait/wake, but take an additional 'pshared' argument.
* when non-0, this will use normal futexes. Otherwise, private futexes. * when non-0, this will use normal futexes. Otherwise, private futexes.
*/ */
extern int __futex_wake_ex(volatile void *ftx, int pshared, int val); extern int __futex_wake_ex(volatile void *ftx, int pshared, int val);
extern int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout); extern int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout);
__END_DECLS __END_DECLS

View file

@ -35,7 +35,6 @@ __BEGIN_DECLS
/* Internal, not an NDK API */ /* Internal, not an NDK API */
extern pid_t __pthread_gettid(pthread_t thid); extern pid_t __pthread_gettid(pthread_t thid);
extern int __pthread_settid(pthread_t thid, pid_t tid);
__END_DECLS __END_DECLS

View file

@ -464,3 +464,65 @@ TEST(pthread, pthread_attr_setstacksize) {
ASSERT_EQ(GetActualStackSize(attributes), 32*1024U); ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
#endif #endif
} }
TEST(pthread, pthread_rwlock_smoke) {
pthread_rwlock_t l;
ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
ASSERT_EQ(0, pthread_rwlock_destroy(&l));
}
static int gOnceFnCallCount = 0;
static void OnceFn() {
++gOnceFnCallCount;
}
TEST(pthread, pthread_once_smoke) {
pthread_once_t once_control = PTHREAD_ONCE_INIT;
ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
ASSERT_EQ(1, gOnceFnCallCount);
}
static int gAtForkPrepareCalls = 0;
static void AtForkPrepare1() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 1; }
static void AtForkPrepare2() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 2; }
static int gAtForkParentCalls = 0;
static void AtForkParent1() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 1; }
static void AtForkParent2() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 2; }
static int gAtForkChildCalls = 0;
static void AtForkChild1() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 1; }
static void AtForkChild2() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 2; }
TEST(pthread, pthread_atfork) {
ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
int pid = fork();
ASSERT_NE(-1, pid) << strerror(errno);
// Child and parent calls are made in the order they were registered.
if (pid == 0) {
ASSERT_EQ(0x12, gAtForkChildCalls);
_exit(0);
}
ASSERT_EQ(0x12, gAtForkParentCalls);
// Prepare calls are made in the reverse order.
ASSERT_EQ(0x21, gAtForkPrepareCalls);
}
TEST(pthread, pthread_attr_getscope) {
pthread_attr_t attr;
ASSERT_EQ(0, pthread_attr_init(&attr));
int scope;
ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
}