Fix the pthread_setname_np test.

Fix the pthread_setname_np test to take into account that emulator kernels are
so old that they don't support setting the name of other threads.

The CLONE_DETACHED thread is obsolete since 2.5 kernels.

Rename kernel_id to tid.

Fix the signature of __pthread_clone.

Clean up the clone and pthread_setname_np implementations slightly.

Change-Id: I16c2ff8845b67530544bbda9aa6618058603066d
This commit is contained in:
Elliott Hughes 2013-02-14 18:59:37 -08:00
parent 3e3b239d2b
commit 40eabe24e4
14 changed files with 106 additions and 115 deletions

View file

@ -29,10 +29,10 @@
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
// int __pthread_clone(int (*fn)(void*), void* child_stack, int flags, void* arg);
// int __pthread_clone(void* (*fn)(void*), void* child_stack, int flags, void* arg);
ENTRY(__pthread_clone)
# Copy the args onto the new stack.
stmdb r1!, {r0, r3}
stmdb r1!, {r0, r3}
# The sys_clone system call only takes two arguments: 'flags' and 'child_stack'.
# 'child_stack' is already in r1, but we need to move 'flags' into position.

View file

@ -28,15 +28,15 @@
#include <linux/sched.h>
#include <sys/linux-syscalls.h>
#include <linux/errno.h>
.text
.type __pthread_clone, @function
.global __pthread_clone
.align 4
.ent __pthread_clone
/*
* int __pthread_clone(int (*fn)(void*), void *child_stack,
* int __pthread_clone(void* (*fn)(void*), void *child_stack,
* int flags, void *arg);
*/
@ -69,7 +69,7 @@ __pthread_clone:
bnez $a3,.L__error
beqz $v0,.L__thread_start
j $ra
.L__thread_start:
@ -77,7 +77,7 @@ __pthread_clone:
lw $a1,4($sp) # arg
addu $a2,$sp,16 # tls
# void __thread_entry(int (*func)(void*), void *arg, void *tls)
# void __thread_entry(void* (*func)(void*), void *arg, void *tls)
la $t9, __thread_entry
j $t9
@ -125,7 +125,7 @@ __bionic_clone:
bnez $a3,.L__error_bc
beqz $v0,.L__thread_start_bc
j $ra
.L__thread_start_bc:
@ -142,4 +142,3 @@ __bionic_clone:
j $t9
.end __bionic_clone

View file

@ -1,7 +1,7 @@
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
// int __pthread_clone(int (*fn)(void*), void* tls, int flags, void* arg);
// int __pthread_clone(void* (*fn)(void*), void* tls, int flags, void* arg);
ENTRY(__pthread_clone)
pushl %ebx
pushl %ecx

View file

@ -58,9 +58,8 @@ clone(int (*fn)(void *), void *child_stack, int flags, void* arg, ...)
int *parent_tidptr = NULL;
void *new_tls = NULL;
int *child_tidptr = NULL;
int ret;
/* extract optional parameters - they are cummulative */
/* extract optional parameters - they are cumulative. */
va_start(args, arg);
if (flags & (CLONE_PARENT_SETTID|CLONE_SETTLS|CLONE_CHILD_SETTID)) {
parent_tidptr = va_arg(args, int*);
@ -73,6 +72,5 @@ clone(int (*fn)(void *), void *child_stack, int flags, void* arg, ...)
}
va_end(args);
ret = __bionic_clone(flags, child_stack, parent_tidptr, new_tls, child_tidptr, fn, arg);
return ret;
return __bionic_clone(flags, child_stack, parent_tidptr, new_tls, child_tidptr, fn, arg);
}

View file

@ -9,7 +9,7 @@
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ -19,7 +19,7 @@
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
@ -49,8 +49,8 @@ int fork(void)
__timer_table_start_stop(0);
__bionic_atfork_run_parent();
} else {
/* Adjusting the kernel id after a fork */
(void)__pthread_settid(pthread_self(), gettid());
// Fix the tid in the pthread_internal_t struct after a fork.
__pthread_settid(pthread_self(), gettid());
/*
* Newly created process must update cpu accounting.

View file

@ -73,14 +73,15 @@ void __libc_init_tls(KernelArgumentBlock& args) {
unsigned stack_size = 128 * 1024;
unsigned stack_bottom = stack_top - stack_size;
static void* tls[BIONIC_TLS_SLOTS];
static pthread_internal_t thread;
thread.tid = gettid();
thread.tls = tls;
pthread_attr_init(&thread.attr);
pthread_attr_setstack(&thread.attr, (void*) stack_bottom, stack_size);
_init_thread(&thread, gettid(), false);
static void* tls_area[BIONIC_TLS_SLOTS];
__init_tls(tls_area, &thread);
tls_area[TLS_SLOT_BIONIC_PREINIT] = &args;
_init_thread(&thread, false);
__init_tls(&thread);
tls[TLS_SLOT_BIONIC_PREINIT] = &args;
}
void __libc_init_common(KernelArgumentBlock& args) {

View file

@ -42,7 +42,7 @@
* - trying to get the read-lock while there is a writer blocks
* - a single thread can acquire the lock multiple times in the same mode
*
* - Posix states that behaviour is undefined it a thread tries to acquire
* - Posix states that behavior is undefined it a thread tries to acquire
* the lock in two distinct modes (e.g. write after read, or read after write).
*
* - This implementation tries to avoid writer starvation by making the readers
@ -61,12 +61,6 @@
extern pthread_internal_t* __get_thread(void);
/* Return a global kernel ID for the current thread */
static int __get_thread_id(void)
{
return __get_thread()->kernel_id;
}
int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
{
if (!attr)
@ -150,8 +144,6 @@ int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *at
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
int ret;
if (rwlock == NULL)
return EINVAL;
@ -164,7 +156,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
}
/* Returns TRUE iff we can acquire a read lock. */
static __inline__ int read_precondition(pthread_rwlock_t *rwlock, int thread_id)
static __inline__ int read_precondition(pthread_rwlock_t* rwlock, int tid)
{
/* We can't have the lock if any writer is waiting for it (writer bias).
* This tries to avoid starvation when there are multiple readers racing.
@ -174,7 +166,7 @@ static __inline__ int read_precondition(pthread_rwlock_t *rwlock, int thread_id
/* We can have the lock if there is no writer, or if we write-own it */
/* The second test avoids a self-dead lock in case of buggy code. */
if (rwlock->writerThreadId == 0 || rwlock->writerThreadId == thread_id)
if (rwlock->writerThreadId == 0 || rwlock->writerThreadId == tid)
return 1;
/* Otherwise, we can't have it */
@ -182,14 +174,14 @@ static __inline__ int read_precondition(pthread_rwlock_t *rwlock, int thread_id
}
/* returns TRUE iff we can acquire a write lock. */
static __inline__ int write_precondition(pthread_rwlock_t *rwlock, int thread_id)
static __inline__ int write_precondition(pthread_rwlock_t* rwlock, int tid)
{
/* We can get the lock if nobody has it */
if (rwlock->numLocks == 0)
return 1;
/* Or if we already own it */
if (rwlock->writerThreadId == thread_id)
if (rwlock->writerThreadId == tid)
return 1;
/* Otherwise, not */
@ -220,7 +212,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
if (__unlikely(!read_precondition(rwlock, __get_thread_id())))
if (__unlikely(!read_precondition(rwlock, __get_thread()->tid)))
ret = EBUSY;
else
rwlock->numLocks ++;
@ -231,18 +223,18 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
{
int thread_id, ret = 0;
int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
thread_id = __get_thread_id();
if (__unlikely(!read_precondition(rwlock, thread_id))) {
int tid = __get_thread()->tid;
if (__unlikely(!read_precondition(rwlock, tid))) {
rwlock->pendingReaders += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
} while (ret == 0 && !read_precondition(rwlock, thread_id));
} while (ret == 0 && !read_precondition(rwlock, tid));
rwlock->pendingReaders -= 1;
if (ret != 0)
goto EXIT;
@ -261,18 +253,18 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
{
int thread_id, ret = 0;
int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
thread_id = __get_thread_id();
if (__unlikely(!write_precondition(rwlock, thread_id))) {
int tid = __get_thread()->tid;
if (__unlikely(!write_precondition(rwlock, tid))) {
ret = EBUSY;
} else {
rwlock->numLocks ++;
rwlock->writerThreadId = thread_id;
rwlock->writerThreadId = tid;
}
pthread_mutex_unlock(&rwlock->lock);
return ret;
@ -280,14 +272,14 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
{
int thread_id, ret = 0;
int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
thread_id = __get_thread_id();
if (__unlikely(!write_precondition(rwlock, thread_id))) {
int tid = __get_thread()->tid;
if (__unlikely(!write_precondition(rwlock, tid))) {
/* If we can't read yet, wait until the rwlock is unlocked
* and try again. Increment pendingReaders to get the
* cond broadcast when that happens.
@ -295,13 +287,13 @@ int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *
rwlock->pendingWriters += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
} while (ret == 0 && !write_precondition(rwlock, thread_id));
} while (ret == 0 && !write_precondition(rwlock, tid));
rwlock->pendingWriters -= 1;
if (ret != 0)
goto EXIT;
}
rwlock->numLocks ++;
rwlock->writerThreadId = thread_id;
rwlock->writerThreadId = tid;
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
@ -332,7 +324,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
* must be ourselves.
*/
else {
if (rwlock->writerThreadId != __get_thread_id()) {
if (rwlock->writerThreadId != __get_thread()->tid) {
ret = EPERM;
goto EXIT;
}

View file

@ -313,9 +313,9 @@ int pthread_getschedparam(pthread_t thid, int * policy,
int old_errno = errno;
pthread_internal_t * thread = (pthread_internal_t *)thid;
int err = sched_getparam(thread->kernel_id, param);
int err = sched_getparam(thread->tid, param);
if (!err) {
*policy = sched_getscheduler(thread->kernel_id);
*policy = sched_getscheduler(thread->tid);
} else {
err = errno;
errno = old_errno;
@ -330,7 +330,7 @@ int pthread_setschedparam(pthread_t thid, int policy,
int old_errno = errno;
int ret;
ret = sched_setscheduler(thread->kernel_id, policy, param);
ret = sched_setscheduler(thread->tid, policy, param);
if (ret < 0) {
ret = errno;
errno = old_errno;
@ -342,7 +342,7 @@ int pthread_setschedparam(pthread_t thid, int policy,
/* a mutex is implemented as a 32-bit integer holding the following fields
*
* bits: name description
* 31-16 tid owner thread's kernel id (recursive and errorcheck only)
* 31-16 tid owner thread's tid (recursive and errorcheck only)
* 15-14 type mutex type
* 13 shared process-shared flag
* 12-2 counter counter of recursive mutexes
@ -452,8 +452,8 @@ int pthread_setschedparam(pthread_t thid, int policy,
/* Mutex owner field:
*
* This is only used for recursive and errorcheck mutexes. It holds the
* kernel TID of the owning thread. Note that this works because the Linux
* kernel _only_ uses 16-bit values for thread ids.
* tid of the owning thread. Note that this works because the Linux
* kernel _only_ uses 16-bit values for tids.
*
* More specifically, it will wrap to 10000 when it reaches over 32768 for
* application processes. You can check this by running the following inside
@ -783,7 +783,7 @@ int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
}
/* Do we already own this recursive or error-check mutex ? */
tid = __get_thread()->kernel_id;
tid = __get_thread()->tid;
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
return _recursive_increment(mutex, mvalue, mtype);
@ -877,7 +877,7 @@ int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
}
/* Do we already own this recursive or error-check mutex ? */
tid = __get_thread()->kernel_id;
tid = __get_thread()->tid;
if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
return EPERM;
@ -951,7 +951,7 @@ int pthread_mutex_trylock_impl(pthread_mutex_t *mutex)
}
/* Do we already own this recursive or error-check mutex ? */
tid = __get_thread()->kernel_id;
tid = __get_thread()->tid;
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
return _recursive_increment(mutex, mvalue, mtype);
@ -1060,7 +1060,7 @@ int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
}
/* Do we already own this recursive or error-check mutex ? */
tid = __get_thread()->kernel_id;
tid = __get_thread()->tid;
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
return _recursive_increment(mutex, mvalue, mtype);
@ -1379,7 +1379,7 @@ int pthread_kill(pthread_t tid, int sig)
int old_errno = errno;
pthread_internal_t * thread = (pthread_internal_t *)tid;
ret = tgkill(getpid(), thread->kernel_id, sig);
ret = tgkill(getpid(), thread->tid, sig);
if (ret < 0) {
ret = errno;
errno = old_errno;
@ -1397,7 +1397,7 @@ int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid)
if (!thread)
return ESRCH;
*clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
*clockid = CLOCK_THREAD_CPUTIME_ID | (thread->tid << CLOCK_IDTYPE_BITS);
return 0;
}
@ -1474,25 +1474,18 @@ int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) )
return 0;
}
/* Return the kernel thread ID for a pthread.
* This is only defined for implementations where pthread <-> kernel is 1:1, which this is.
* Not the same as pthread_getthreadid_np, which is commonly defined to be opaque.
* Internal, not an NDK API.
*/
pid_t __pthread_gettid(pthread_t thid)
{
pthread_internal_t* thread = (pthread_internal_t*)thid;
return thread->kernel_id;
pid_t __pthread_gettid(pthread_t thid) {
pthread_internal_t* thread = (pthread_internal_t*) thid;
return thread->tid;
}
int __pthread_settid(pthread_t thid, pid_t tid)
{
if (thid == 0)
return EINVAL;
int __pthread_settid(pthread_t thid, pid_t tid) {
if (thid == 0) {
return EINVAL;
}
pthread_internal_t* thread = (pthread_internal_t*)thid;
thread->kernel_id = tid;
pthread_internal_t* thread = (pthread_internal_t*) thid;
thread->tid = tid;
return 0;
return 0;
}

View file

@ -41,7 +41,7 @@
#include "private/ErrnoRestorer.h"
#include "private/ScopedPthreadMutexLocker.h"
extern "C" int __pthread_clone(int (*fn)(void*), void* child_stack, int flags, void* arg);
extern "C" int __pthread_clone(void* (*fn)(void*), void* child_stack, int flags, void* arg);
#ifdef __i386__
#define ATTRIBUTES __attribute__((noinline)) __attribute__((fastcall))
@ -57,25 +57,23 @@ static pthread_mutex_t gPthreadStackCreationLock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
void __init_tls(void** tls, void* thread) {
((pthread_internal_t*) thread)->tls = tls;
void __init_tls(pthread_internal_t* thread) {
// Zero-initialize all the slots.
for (size_t i = 0; i < BIONIC_TLS_SLOTS; ++i) {
tls[i] = NULL;
thread->tls[i] = NULL;
}
// Slot 0 must point to itself. The x86 Linux kernel reads the TLS from %fs:0.
tls[TLS_SLOT_SELF] = tls;
tls[TLS_SLOT_THREAD_ID] = thread;
thread->tls[TLS_SLOT_SELF] = thread->tls;
thread->tls[TLS_SLOT_THREAD_ID] = thread;
// GCC looks in the TLS for the stack guard on x86, so copy it there from our global.
tls[TLS_SLOT_STACK_GUARD] = (void*) __stack_chk_guard;
thread->tls[TLS_SLOT_STACK_GUARD] = (void*) __stack_chk_guard;
__set_tls((void*) tls);
__set_tls(thread->tls);
}
// This trampoline is called from the assembly _pthread_clone() function.
extern "C" void __thread_entry(int (*func)(void*), void *arg, void **tls) {
extern "C" void __thread_entry(void* (*func)(void*), void* arg, void** tls) {
// Wait for our creating thread to release us. This lets it have time to
// notify gdb about this thread before we start doing anything.
// This also provides the memory barrier needed to ensure that all memory
@ -85,27 +83,26 @@ extern "C" void __thread_entry(int (*func)(void*), void *arg, void **tls) {
pthread_mutex_destroy(start_mutex);
pthread_internal_t* thread = (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
__init_tls(tls, thread);
thread->tls = tls;
__init_tls(thread);
if ((thread->internal_flags & kPthreadInitFailed) != 0) {
pthread_exit(NULL);
}
int result = func(arg);
pthread_exit((void*) result);
void* result = func(arg);
pthread_exit(result);
}
__LIBC_ABI_PRIVATE__
int _init_thread(pthread_internal_t* thread, pid_t kernel_id, bool add_to_thread_list) {
int _init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
int error = 0;
thread->kernel_id = kernel_id;
// Set the scheduling policy/priority of the thread.
if (thread->attr.sched_policy != SCHED_NORMAL) {
struct sched_param param;
param.sched_priority = thread->attr.sched_priority;
if (sched_setscheduler(kernel_id, thread->attr.sched_policy, &param) == -1) {
if (sched_setscheduler(thread->tid, thread->attr.sched_policy, &param) == -1) {
// For backwards compatibility reasons, we just warn about failures here.
// error = errno;
const char* msg = "pthread_create sched_setscheduler call failed: %s\n";
@ -198,9 +195,9 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
tls[TLS_SLOT_THREAD_ID] = thread;
int flags = CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED;
int tid = __pthread_clone((int(*)(void*))start_routine, tls, flags, arg);
int flags = CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM;
int tid = __pthread_clone(start_routine, tls, flags, arg);
if (tid < 0) {
int clone_errno = errno;
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) == 0) {
@ -210,7 +207,9 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
return clone_errno;
}
int init_errno = _init_thread(thread, tid, true);
thread->tid = tid;
int init_errno = _init_thread(thread, true);
if (init_errno != 0) {
// Mark the thread detached and let its __thread_entry run to
// completion. (It'll just exit immediately, cleaning up its resources.)
@ -222,7 +221,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
// Notify any debuggers about the new thread.
{
ScopedPthreadMutexLocker debugger_locker(&gDebuggerNotificationLock);
_thread_created_hook(tid);
_thread_created_hook(thread->tid);
}
// Publish the pthread_t and let the thread run.

View file

@ -38,7 +38,7 @@ typedef struct pthread_internal_t
struct pthread_internal_t* next;
struct pthread_internal_t* prev;
pthread_attr_t attr;
pid_t kernel_id;
pid_t tid;
bool allocated_on_heap;
pthread_cond_t join_cond;
int join_count;
@ -55,7 +55,8 @@ typedef struct pthread_internal_t
char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE];
} pthread_internal_t;
int _init_thread(pthread_internal_t* thread, pid_t kernel_id, bool add_to_thread_list);
int _init_thread(pthread_internal_t* thread, bool add_to_thread_list);
void __init_tls(pthread_internal_t* thread);
void _pthread_internal_add( pthread_internal_t* thread );
pthread_internal_t* __get_thread(void);

View file

@ -40,7 +40,7 @@
// This value is not exported by kernel headers.
#define MAX_TASK_COMM_LEN 16
#define TASK_COMM_FMT "/proc/self/task/%u/comm"
#define TASK_COMM_FMT "/proc/self/task/%d/comm"
int pthread_setname_np(pthread_t thread, const char* thread_name) {
ErrnoRestorer errno_restorer;
@ -56,14 +56,14 @@ int pthread_setname_np(pthread_t thread, const char* thread_name) {
// Changing our own name is an easy special case.
if (thread == pthread_self()) {
return prctl(PR_SET_NAME, (unsigned long)thread_name, 0, 0, 0) ? errno : 0;
return prctl(PR_SET_NAME, thread_name) ? errno : 0;
}
// Have to change another thread's name.
pthread_internal_t* t = reinterpret_cast<pthread_internal_t*>(thread);
char comm_name[sizeof(TASK_COMM_FMT) + 8];
snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, (unsigned int) t->kernel_id);
int fd = open(comm_name, O_RDWR);
snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, t->tid);
int fd = open(comm_name, O_WRONLY);
if (fd == -1) {
return errno;
}

View file

@ -78,9 +78,6 @@ enum {
#define GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT 4
#define BIONIC_TLS_SLOTS 64
/* set the Thread Local Storage, must contain at least BIONIC_TLS_SLOTS pointers */
extern void __init_tls(void** tls, void* thread_info);
/* syscall only, do not call directly */
extern int __set_tls(void* ptr);

View file

@ -161,7 +161,7 @@ td_err_e
td_thr_get_info(td_thrhandle_t const * handle, td_thrinfo_t * info)
{
info->ti_tid = handle->tid;
info->ti_lid = handle->tid; // Our pthreads uses kernel ids for tids
info->ti_lid = handle->tid;
info->ti_state = TD_THR_SLEEP; /* XXX this needs to be read from /proc/<pid>/task/<tid>.
This is only used to see if the thread is a zombie or not */
return TD_OK;

View file

@ -187,7 +187,7 @@ TEST(pthread, pthread_sigmask) {
}
#if __BIONIC__
extern "C" int __pthread_clone(int (*fn)(void*), void* child_stack, int flags, void* arg);
extern "C" int __pthread_clone(void* (*fn)(void*), void* child_stack, int flags, void* arg);
TEST(pthread, __pthread_clone) {
uintptr_t fake_child_stack[16];
errno = 0;
@ -210,9 +210,20 @@ TEST(pthread, pthread_setname_np__self) {
#if __BIONIC__ // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
TEST(pthread, pthread_setname_np__other) {
pthread_t t1;
ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
// Emulator kernels don't currently support setting the name of other threads.
char* filename = NULL;
asprintf(&filename, "/proc/self/task/%d/comm", gettid());
struct stat sb;
bool has_comm = (stat(filename, &sb) != -1);
free(filename);
if (has_comm) {
pthread_t t1;
ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
} else {
fprintf(stderr, "skipping test: this kernel doesn't have /proc/self/task/tid/comm files!\n");
}
}
#endif