am 8004f735: Merge "Remove PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK."

* commit '8004f735f1bad7255268392e2c7ac648f0702e5e':
  Remove PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK.
This commit is contained in:
Yabin Cui 2015-01-07 01:59:07 +00:00 committed by Android Git Automerger
commit 8e8daba185
5 changed files with 44 additions and 56 deletions

View file

@ -85,8 +85,10 @@ void __libc_init_tls(KernelArgumentBlock& args) {
// because things like environment variables with global scope live on it. // because things like environment variables with global scope live on it.
// We also can't free the pthread_internal_t itself, since that lives on the main // We also can't free the pthread_internal_t itself, since that lives on the main
// thread's stack rather than on the heap. // thread's stack rather than on the heap.
// The main thread has no mmap allocated space for stack or pthread_internal_t.
main_thread.mmap_size = 0;
pthread_attr_init(&main_thread.attr); pthread_attr_init(&main_thread.attr);
main_thread.attr.flags = PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK | PTHREAD_ATTR_FLAG_MAIN_THREAD; main_thread.attr.flags = PTHREAD_ATTR_FLAG_MAIN_THREAD;
main_thread.attr.guard_size = 0; // The main thread has no guard page. main_thread.attr.guard_size = 0; // The main thread has no guard page.
main_thread.attr.stack_size = 0; // User code should never see this; we'll compute it when asked. main_thread.attr.stack_size = 0; // User code should never see this; we'll compute it when asked.
// TODO: the main thread's sched_policy and sched_priority need to be queried. // TODO: the main thread's sched_policy and sched_priority need to be queried.

View file

@ -52,8 +52,9 @@ extern "C" int __isthreaded;
// This code is used both by each new pthread and the code that initializes the main thread. // This code is used both by each new pthread and the code that initializes the main thread.
void __init_tls(pthread_internal_t* thread) { void __init_tls(pthread_internal_t* thread) {
if (thread->user_allocated_stack()) { if (thread->mmap_size == 0) {
// We don't know where the user got their stack, so assume the worst and zero the TLS area. // If the TLS area was not allocated by mmap(), it may not have been cleared to zero.
// So assume the worst and zero the TLS area.
memset(&thread->tls[0], 0, BIONIC_TLS_SLOTS * sizeof(void*)); memset(&thread->tls[0], 0, BIONIC_TLS_SLOTS * sizeof(void*));
} }
@ -106,62 +107,62 @@ int __init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
return error; return error;
} }
static void* __create_thread_stack(size_t stack_size, size_t guard_size) { static void* __create_thread_mapped_space(size_t mmap_size, size_t stack_guard_size) {
// Create a new private anonymous map. // Create a new private anonymous map.
int prot = PROT_READ | PROT_WRITE; int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
void* stack = mmap(NULL, stack_size, prot, flags, -1, 0); void* space = mmap(NULL, mmap_size, prot, flags, -1, 0);
if (stack == MAP_FAILED) { if (space == MAP_FAILED) {
__libc_format_log(ANDROID_LOG_WARN, __libc_format_log(ANDROID_LOG_WARN,
"libc", "libc",
"pthread_create failed: couldn't allocate %zd-byte stack: %s", "pthread_create failed: couldn't allocate %zu-bytes mapped space: %s",
stack_size, strerror(errno)); mmap_size, strerror(errno));
return NULL; return NULL;
} }
// Set the guard region at the end of the stack to PROT_NONE. // Stack is at the lower end of mapped space, stack guard region is at the lower end of stack.
if (mprotect(stack, guard_size, PROT_NONE) == -1) { // Set the stack guard region to PROT_NONE, so we can detect thread stack overflow.
if (mprotect(space, stack_guard_size, PROT_NONE) == -1) {
__libc_format_log(ANDROID_LOG_WARN, "libc", __libc_format_log(ANDROID_LOG_WARN, "libc",
"pthread_create failed: couldn't mprotect PROT_NONE %zd-byte stack guard region: %s", "pthread_create failed: couldn't mprotect PROT_NONE %zu-byte stack guard region: %s",
guard_size, strerror(errno)); stack_guard_size, strerror(errno));
munmap(stack, stack_size); munmap(space, mmap_size);
return NULL; return NULL;
} }
return stack; return space;
} }
static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp, void** child_stack) { static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp, void** child_stack) {
size_t allocate_stack_size; size_t mmap_size;
uint8_t* stack_top; uint8_t* stack_top;
if (attr->stack_base == NULL) { if (attr->stack_base == NULL) {
// The caller didn't provide a stack, so allocate one. // The caller didn't provide a stack, so allocate one.
// Make sure the stack size and guard size are multiples of PAGE_SIZE. // Make sure the stack size and guard size are multiples of PAGE_SIZE.
allocate_stack_size = BIONIC_ALIGN(attr->stack_size + sizeof(pthread_internal_t), PAGE_SIZE); mmap_size = BIONIC_ALIGN(attr->stack_size + sizeof(pthread_internal_t), PAGE_SIZE);
attr->guard_size = BIONIC_ALIGN(attr->guard_size, PAGE_SIZE); attr->guard_size = BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);
attr->stack_base = __create_thread_stack(allocate_stack_size, attr->guard_size); attr->stack_base = __create_thread_mapped_space(mmap_size, attr->guard_size);
if (attr->stack_base == NULL) { if (attr->stack_base == NULL) {
return EAGAIN; return EAGAIN;
} }
stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + allocate_stack_size; stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + mmap_size;
} else { } else {
// The caller did provide a stack, so remember we're not supposed to free it. // Remember the mmap size is zero and we don't need to free it.
attr->flags |= PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK; mmap_size = 0;
allocate_stack_size = 0;
stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + attr->stack_size; stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + attr->stack_size;
} }
// Thread stack is used for two sections: // Mapped space(or user allocated stack) is used for:
// pthread_internal_t. // thread_internal_t (including tls array)
// regular stack, from top to down. // thread stack (including guard page)
stack_top -= sizeof(pthread_internal_t); stack_top -= sizeof(pthread_internal_t);
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top); pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);
// No need to check stack_top alignment. The size of pthread_internal_t is 16-bytes aligned, // No need to check stack_top alignment. The size of pthread_internal_t is 16-bytes aligned,
// and user allocated stack is guaranteed by pthread_attr_setstack. // and user allocated stack is guaranteed by pthread_attr_setstack.
thread->allocated_stack_size = allocate_stack_size; thread->mmap_size = mmap_size;
thread->attr = *attr; thread->attr = *attr;
__init_tls(thread); __init_tls(thread);
@ -248,8 +249,8 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
// be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a // be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
// reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker. // reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
pthread_mutex_unlock(&thread->startup_handshake_mutex); pthread_mutex_unlock(&thread->startup_handshake_mutex);
if (!thread->user_allocated_stack()) { if (thread->mmap_size != 0) {
munmap(thread->attr.stack_base, thread->allocated_stack_size); munmap(thread->attr.stack_base, thread->mmap_size);
} }
__libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", strerror(errno)); __libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", strerror(errno));
return clone_errno; return clone_errno;

View file

@ -87,30 +87,23 @@ void pthread_exit(void* return_value) {
thread->alternate_signal_stack = NULL; thread->alternate_signal_stack = NULL;
} }
// Keep track of what we need to know about the stack before we lose the pthread_internal_t. bool free_mapped_space = false;
void* stack_base = thread->attr.stack_base;
size_t stack_size = thread->allocated_stack_size;
bool free_stack = false;
pthread_mutex_lock(&g_thread_list_lock); pthread_mutex_lock(&g_thread_list_lock);
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) { if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
// The thread is detached, so we can free the pthread_internal_t. // The thread is detached, no one will use pthread_internal_t after pthread_exit.
// So we can free mapped space, which includes pthread_internal_t and thread stack.
// First make sure that the kernel does not try to clear the tid field // First make sure that the kernel does not try to clear the tid field
// because we'll have freed the memory before the thread actually exits. // because we'll have freed the memory before the thread actually exits.
__set_tid_address(NULL); __set_tid_address(NULL);
// pthread_internal_t is freed below with stack, not here. // pthread_internal_t is freed below with stack, not here.
_pthread_internal_remove_locked(thread, false); _pthread_internal_remove_locked(thread, false);
if (!thread->user_allocated_stack()) { free_mapped_space = true;
free_stack = true;
}
} }
pthread_mutex_unlock(&g_thread_list_lock); pthread_mutex_unlock(&g_thread_list_lock);
// Detached threads exit with stack teardown, and everything deallocated here. if (free_mapped_space && thread->mmap_size != 0) {
// Threads that can be joined exit but leave their stacks for the pthread_join caller to clean up. // We need to free mapped space for detached threads when they exit.
if (free_stack) {
// We need to munmap the stack we're running on before calling exit.
// That's not something we can do in C. // That's not something we can do in C.
// We don't want to take a signal after we've unmapped the stack. // We don't want to take a signal after we've unmapped the stack.
@ -119,8 +112,10 @@ void pthread_exit(void* return_value) {
sigfillset(&mask); sigfillset(&mask);
sigprocmask(SIG_SETMASK, &mask, NULL); sigprocmask(SIG_SETMASK, &mask, NULL);
_exit_with_stack_teardown(stack_base, stack_size); _exit_with_stack_teardown(thread->attr.stack_base, thread->mmap_size);
} else { } else {
// No need to free mapped space. Either there was no space mapped, or it is left for
// the pthread_join caller to clean up.
__exit(0); __exit(0);
} }
} }

View file

@ -35,11 +35,8 @@
/* Has the thread been detached by a pthread_join or pthread_detach call? */ /* Has the thread been detached by a pthread_join or pthread_detach call? */
#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001 #define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
/* Was the thread's stack allocated by the user rather than by us? */
#define PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK 0x00000002
/* Has the thread been joined by another thread? */ /* Has the thread been joined by another thread? */
#define PTHREAD_ATTR_FLAG_JOINED 0x00000004 #define PTHREAD_ATTR_FLAG_JOINED 0x00000002
/* Is this the main thread? */ /* Is this the main thread? */
#define PTHREAD_ATTR_FLAG_MAIN_THREAD 0x80000000 #define PTHREAD_ATTR_FLAG_MAIN_THREAD 0x80000000
@ -70,10 +67,6 @@ struct pthread_internal_t {
return (*cached_pid != 0); return (*cached_pid != 0);
} }
bool user_allocated_stack() {
return (attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0;
}
pthread_attr_t attr; pthread_attr_t attr;
__pthread_cleanup_t* cleanup_stack; __pthread_cleanup_t* cleanup_stack;
@ -86,8 +79,7 @@ struct pthread_internal_t {
pthread_mutex_t startup_handshake_mutex; pthread_mutex_t startup_handshake_mutex;
/* Store real allocated stack size, including thread stack and pthread_internal_t. */ size_t mmap_size;
int allocated_stack_size;
void* tls[BIONIC_TLS_SLOTS]; void* tls[BIONIC_TLS_SLOTS];

View file

@ -51,11 +51,9 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread, bool free_threa
g_thread_list = thread->next; g_thread_list = thread->next;
} }
// For threads using user allocated stack (including the main thread), the pthread_internal_t if (free_thread && thread->mmap_size != 0) {
// can't be freed since it is on the stack. // Free mapped space, including thread stack and pthread_internal_t.
if (free_thread && !thread->user_allocated_stack()) { munmap(thread->attr.stack_base, thread->mmap_size);
// Use one munmap to free allocated stack size, including thread stack and pthread_internal_t.
munmap(thread->attr.stack_base, thread->allocated_stack_size);
} }
} }