Merge "Remove the global thread list."

This commit is contained in:
Elliott Hughes 2017-02-01 16:16:41 +00:00 committed by Gerrit Code Review
commit 9b9d4ebc8b
15 changed files with 32 additions and 220 deletions

View file

@ -1421,7 +1421,6 @@ cc_library_static {
"bionic/pthread_getcpuclockid.cpp",
"bionic/pthread_getschedparam.cpp",
"bionic/pthread_gettid_np.cpp",
"bionic/pthread_internal.cpp",
"bionic/pthread_join.cpp",
"bionic/pthread_key.cpp",
"bionic/pthread_kill.cpp",

View file

@ -114,10 +114,6 @@ void __libc_init_common(KernelArgumentBlock& args) {
__check_max_thread_id();
#endif
// Get the main thread from TLS and add it to the thread list.
pthread_internal_t* main_thread = __get_thread();
__pthread_internal_add(main_thread);
// Register atfork handlers to take and release the arc4random lock.
pthread_atfork(arc4random_fork_handler, _thread_arc4_unlock, _thread_arc4_unlock);

View file

@ -115,6 +115,13 @@ int __init_thread(pthread_internal_t* thread) {
return error;
}
void __free_thread(pthread_internal_t* thread) {
if (thread->mmap_size != 0) {
// Free mapped space, including thread stack and pthread_internal_t.
munmap(thread->attr.stack_base, thread->mmap_size);
}
}
static void* __create_thread_mapped_space(size_t mmap_size, size_t stack_guard_size) {
// Create a new private anonymous map.
int prot = PROT_READ | PROT_WRITE;
@ -265,9 +272,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
// be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
// reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
thread->startup_handshake_lock.unlock();
if (thread->mmap_size != 0) {
munmap(thread->attr.stack_base, thread->mmap_size);
}
__free_thread(thread);
__libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", strerror(errno));
return clone_errno;
}
@ -277,14 +282,13 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
// Mark the thread detached and replace its start_routine with a no-op.
// Letting the thread run is the easiest way to clean up its resources.
atomic_store(&thread->join_state, THREAD_DETACHED);
__pthread_internal_add(thread);
thread->start_routine = __do_nothing;
thread->startup_handshake_lock.unlock();
return init_errno;
}
// Publish the pthread_t and unlock the mutex to let the new thread start running.
*thread_out = __pthread_internal_add(thread);
*thread_out = reinterpret_cast<pthread_t>(thread);
thread->startup_handshake_lock.unlock();
return 0;

View file

@ -32,10 +32,7 @@
#include "pthread_internal.h"
int pthread_detach(pthread_t t) {
pthread_internal_t* thread = __pthread_internal_find(t);
if (thread == NULL) {
return ESRCH;
}
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
ThreadJoinState old_state = THREAD_NOT_JOINED;
while (old_state == THREAD_NOT_JOINED &&

View file

@ -104,9 +104,6 @@ void pthread_exit(void* return_value) {
// because we'll have freed the memory before the thread actually exits.
__set_tid_address(NULL);
// pthread_internal_t is freed below with stack, not here.
__pthread_internal_remove(thread);
if (thread->mmap_size != 0) {
// We need to free mapped space for detached threads when they exit.
// That's not something we can do in C.

View file

@ -31,10 +31,7 @@
#include "pthread_internal.h"
int pthread_getcpuclockid(pthread_t t, clockid_t* clockid) {
pthread_internal_t* thread = __pthread_internal_find(t);
if (thread == NULL) {
return ESRCH;
}
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
// The tid is stored in the top bits, but negated.
clockid_t result = ~static_cast<clockid_t>(thread->tid) << 3;

View file

@ -34,10 +34,7 @@
int pthread_getschedparam(pthread_t t, int* policy, sched_param* param) {
ErrnoRestorer errno_restorer;
pthread_internal_t* thread = __pthread_internal_find(t);
if (thread == NULL) {
return ESRCH;
}
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
int rc = sched_getparam(thread->tid, param);
if (rc == -1) {

View file

@ -1,98 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "pthread_internal.h"
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include "private/bionic_futex.h"
#include "private/bionic_tls.h"
#include "private/libc_logging.h"
#include "private/ScopedPthreadMutexLocker.h"
static pthread_internal_t* g_thread_list = NULL;
static pthread_mutex_t g_thread_list_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_t __pthread_internal_add(pthread_internal_t* thread) {
ScopedPthreadMutexLocker locker(&g_thread_list_lock);
// We insert at the head.
thread->next = g_thread_list;
thread->prev = NULL;
if (thread->next != NULL) {
thread->next->prev = thread;
}
g_thread_list = thread;
return reinterpret_cast<pthread_t>(thread);
}
void __pthread_internal_remove(pthread_internal_t* thread) {
ScopedPthreadMutexLocker locker(&g_thread_list_lock);
if (thread->next != NULL) {
thread->next->prev = thread->prev;
}
if (thread->prev != NULL) {
thread->prev->next = thread->next;
} else {
g_thread_list = thread->next;
}
}
static void __pthread_internal_free(pthread_internal_t* thread) {
if (thread->mmap_size != 0) {
// Free mapped space, including thread stack and pthread_internal_t.
munmap(thread->attr.stack_base, thread->mmap_size);
}
}
void __pthread_internal_remove_and_free(pthread_internal_t* thread) {
__pthread_internal_remove(thread);
__pthread_internal_free(thread);
}
pthread_internal_t* __pthread_internal_find(pthread_t thread_id) {
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(thread_id);
// check if thread is pthread_self() before acquiring the lock
if (thread == __get_thread()) {
return thread;
}
ScopedPthreadMutexLocker locker(&g_thread_list_lock);
for (pthread_internal_t* t = g_thread_list; t != NULL; t = t->next) {
if (t == thread) {
return thread;
}
}
return NULL;
}

View file

@ -56,10 +56,12 @@ enum ThreadJoinState {
class thread_local_dtor;
class pthread_internal_t {
public:
class pthread_internal_t* next;
class pthread_internal_t* prev;
// These two fields preserve backwards compatibility for code accessing the `tid` field,
// since we didn't always offer pthread_gettid_np.
void* unused0 __unused;
void* unused1 __unused;
public:
pid_t tid;
private:
@ -112,15 +114,12 @@ class pthread_internal_t {
char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE];
};
__LIBC_HIDDEN__ int __init_thread(pthread_internal_t* thread);
__LIBC_HIDDEN__ void __init_tls(pthread_internal_t* thread);
__LIBC_HIDDEN__ void __init_thread_stack_guard(pthread_internal_t* thread);
__LIBC_HIDDEN__ void __init_alternate_signal_stack(pthread_internal_t*);
__LIBC_HIDDEN__ int __init_thread(pthread_internal_t*);
__LIBC_HIDDEN__ void __free_thread(pthread_internal_t*);
__LIBC_HIDDEN__ pthread_t __pthread_internal_add(pthread_internal_t* thread);
__LIBC_HIDDEN__ pthread_internal_t* __pthread_internal_find(pthread_t pthread_id);
__LIBC_HIDDEN__ void __pthread_internal_remove(pthread_internal_t* thread);
__LIBC_HIDDEN__ void __pthread_internal_remove_and_free(pthread_internal_t* thread);
__LIBC_HIDDEN__ void __init_tls(pthread_internal_t*);
__LIBC_HIDDEN__ void __init_thread_stack_guard(pthread_internal_t*);
__LIBC_HIDDEN__ void __init_alternate_signal_stack(pthread_internal_t*);
// Make __get_thread() inlined for performance reason. See http://b/19825434.
static inline __always_inline pthread_internal_t* __get_thread() {

View file

@ -36,10 +36,7 @@ int pthread_join(pthread_t t, void** return_value) {
return EDEADLK;
}
pthread_internal_t* thread = __pthread_internal_find(t);
if (thread == NULL) {
return ESRCH;
}
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
ThreadJoinState old_state = THREAD_NOT_JOINED;
while ((old_state == THREAD_NOT_JOINED || old_state == THREAD_EXITED_NOT_JOINED) &&
@ -65,6 +62,6 @@ int pthread_join(pthread_t t, void** return_value) {
*return_value = thread->return_value;
}
__pthread_internal_remove_and_free(thread);
__free_thread(thread);
return 0;
}

View file

@ -35,10 +35,6 @@
int pthread_kill(pthread_t t, int sig) {
ErrnoRestorer errno_restorer;
pthread_internal_t* thread = __pthread_internal_find(t);
if (thread == NULL) {
return ESRCH;
}
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
return (tgkill(getpid(), thread->tid, sig) == -1) ? errno : 0;
}

View file

@ -43,12 +43,7 @@
#define MAX_TASK_COMM_LEN 16
static int __open_task_comm_fd(pthread_t t, int flags) {
pthread_internal_t* thread = __pthread_internal_find(t);
if (thread == nullptr) {
errno = ENOENT;
return -1;
}
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
char comm_name[64];
snprintf(comm_name, sizeof(comm_name), "/proc/self/task/%d/comm", thread->tid);
return open(comm_name, O_CLOEXEC | flags);

View file

@ -34,10 +34,7 @@
int pthread_setschedparam(pthread_t t, int policy, const sched_param* param) {
ErrnoRestorer errno_restorer;
pthread_internal_t* thread = __pthread_internal_find(t);
if (thread == NULL) {
return ESRCH;
}
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
int rc = sched_setscheduler(thread->tid, policy, param);
if (rc == -1) {

View file

@ -235,11 +235,6 @@ static void AssertDetached(pthread_t t, bool is_detached) {
ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
}
static void MakeDeadThread(pthread_t& t) {
ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
ASSERT_EQ(0, pthread_join(t, NULL));
}
TEST(pthread, pthread_create) {
void* expected_result = reinterpret_cast<void*>(123);
// Can we create a thread?
@ -443,16 +438,6 @@ TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
ASSERT_EQ(0, pthread_join(t, nullptr));
}
TEST(pthread, pthread_setname_np__pthread_getname_np__no_such_thread) {
pthread_t dead_thread;
MakeDeadThread(dead_thread);
// Call pthread_getname_np and pthread_setname_np after the thread has already exited.
ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
char name[64];
ASSERT_EQ(ENOENT, pthread_getname_np(dead_thread, name, sizeof(name)));
}
TEST(pthread, pthread_kill__0) {
// Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
@ -476,13 +461,6 @@ TEST(pthread, pthread_kill__in_signal_handler) {
ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
}
TEST(pthread, pthread_detach__no_such_thread) {
pthread_t dead_thread;
MakeDeadThread(dead_thread);
ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
}
TEST(pthread, pthread_getcpuclockid__clock_gettime) {
SpinFunctionHelper spin_helper;
@ -497,46 +475,6 @@ TEST(pthread, pthread_getcpuclockid__clock_gettime) {
ASSERT_EQ(0, pthread_join(t, nullptr));
}
TEST(pthread, pthread_getcpuclockid__no_such_thread) {
pthread_t dead_thread;
MakeDeadThread(dead_thread);
clockid_t c;
ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
}
TEST(pthread, pthread_getschedparam__no_such_thread) {
pthread_t dead_thread;
MakeDeadThread(dead_thread);
int policy;
sched_param param;
ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
}
TEST(pthread, pthread_setschedparam__no_such_thread) {
pthread_t dead_thread;
MakeDeadThread(dead_thread);
int policy = 0;
sched_param param;
ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
}
TEST(pthread, pthread_join__no_such_thread) {
pthread_t dead_thread;
MakeDeadThread(dead_thread);
ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
}
TEST(pthread, pthread_kill__no_such_thread) {
pthread_t dead_thread;
MakeDeadThread(dead_thread);
ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
}
TEST(pthread, pthread_join__multijoin) {
SpinFunctionHelper spin_helper;

View file

@ -509,14 +509,14 @@ TEST(time, timer_delete_terminates) {
struct TimerDeleteData {
timer_t timer_id;
pthread_t thread_id;
pid_t tid;
volatile bool complete;
};
static void TimerDeleteCallback(sigval_t value) {
TimerDeleteData* tdd = reinterpret_cast<TimerDeleteData*>(value.sival_ptr);
tdd->thread_id = pthread_self();
tdd->tid = gettid();
timer_delete(tdd->timer_id);
tdd->complete = true;
}
@ -548,8 +548,9 @@ TEST(time, timer_delete_from_timer_thread) {
// Since bionic timers are implemented by creating a thread to handle the
// callback, verify that the thread actually completes.
cur_time = time(NULL);
while (pthread_detach(tdd.thread_id) != ESRCH && (time(NULL) - cur_time) < 5);
ASSERT_EQ(ESRCH, pthread_detach(tdd.thread_id));
while ((kill(tdd.tid, 0) != -1 || errno != ESRCH) && (time(NULL) - cur_time) < 5);
ASSERT_EQ(-1, kill(tdd.tid, 0));
ASSERT_EQ(ESRCH, errno);
#endif
}