Support larger guard regions.

This also fixes a long-standing bug where the guard region would be taken
out of the stack itself, rather than being -- as POSIX demands -- additional
space after the stack. Historically a 128KiB stack with a 256KiB guard would
have given you an immediate crash.

Bug: http://b/38413813
Test: builds, boots
Change-Id: Idd12a3899be1d92fea3d3e0fa6882ca2216bd79c
This commit is contained in:
Elliott Hughes 2017-06-27 17:01:57 -07:00
parent 0d5d0746e8
commit d6c678ca90
5 changed files with 67 additions and 37 deletions

View file

@ -43,7 +43,7 @@ int pthread_attr_init(pthread_attr_t* attr) {
attr->flags = 0;
attr->stack_base = NULL;
attr->stack_size = PTHREAD_STACK_SIZE_DEFAULT;
attr->guard_size = PAGE_SIZE;
attr->guard_size = PTHREAD_GUARD_SIZE;
attr->sched_policy = SCHED_NORMAL;
attr->sched_priority = 0;
return 0;

View file

@ -55,15 +55,16 @@ void __init_tls(pthread_internal_t* thread) {
thread->tls[TLS_SLOT_SELF] = thread->tls;
thread->tls[TLS_SLOT_THREAD_ID] = thread;
// Add a guard page before and after.
size_t allocation_size = BIONIC_TLS_SIZE + 2 * PAGE_SIZE;
// Add a guard before and after.
size_t allocation_size = BIONIC_TLS_SIZE + (2 * PTHREAD_GUARD_SIZE);
void* allocation = mmap(nullptr, allocation_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (allocation == MAP_FAILED) {
async_safe_fatal("failed to allocate TLS");
}
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, allocation, allocation_size, "bionic TLS guard page");
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, allocation, allocation_size, "bionic TLS guard");
thread->bionic_tls = reinterpret_cast<bionic_tls*>(static_cast<char*>(allocation) + PAGE_SIZE);
thread->bionic_tls = reinterpret_cast<bionic_tls*>(static_cast<char*>(allocation) +
PTHREAD_GUARD_SIZE);
if (mprotect(thread->bionic_tls, BIONIC_TLS_SIZE, PROT_READ | PROT_WRITE) != 0) {
async_safe_fatal("failed to mprotect TLS");
}
@ -79,15 +80,14 @@ void __init_alternate_signal_stack(pthread_internal_t* thread) {
// Create and set an alternate signal stack.
void* stack_base = mmap(NULL, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (stack_base != MAP_FAILED) {
// Create a guard page to catch stack overflows in signal handlers.
if (mprotect(stack_base, PAGE_SIZE, PROT_NONE) == -1) {
// Create a guard to catch stack overflows in signal handlers.
if (mprotect(stack_base, PTHREAD_GUARD_SIZE, PROT_NONE) == -1) {
munmap(stack_base, SIGNAL_STACK_SIZE);
return;
}
stack_t ss;
ss.ss_sp = reinterpret_cast<uint8_t*>(stack_base) + PAGE_SIZE;
ss.ss_size = SIGNAL_STACK_SIZE - PAGE_SIZE;
ss.ss_sp = reinterpret_cast<uint8_t*>(stack_base) + PTHREAD_GUARD_SIZE;
ss.ss_size = SIGNAL_STACK_SIZE - PTHREAD_GUARD_SIZE;
ss.ss_flags = 0;
sigaltstack(&ss, NULL);
thread->alternate_signal_stack = stack_base;
@ -95,7 +95,7 @@ void __init_alternate_signal_stack(pthread_internal_t* thread) {
// We can only use const static allocated string for mapped region name, as Android kernel
// uses the string pointer directly when dumping /proc/pid/maps.
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ss.ss_sp, ss.ss_size, "thread signal stack");
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, stack_base, PAGE_SIZE, "thread signal stack guard page");
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, stack_base, PTHREAD_GUARD_SIZE, "thread signal stack guard");
}
}
@ -149,7 +149,7 @@ static void* __create_thread_mapped_space(size_t mmap_size, size_t stack_guard_s
munmap(space, mmap_size);
return NULL;
}
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, space, stack_guard_size, "thread stack guard page");
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, space, stack_guard_size, "thread stack guard");
return space;
}
@ -161,7 +161,9 @@ static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp,
if (attr->stack_base == NULL) {
// The caller didn't provide a stack, so allocate one.
// Make sure the stack size and guard size are multiples of PAGE_SIZE.
mmap_size = BIONIC_ALIGN(attr->stack_size + sizeof(pthread_internal_t), PAGE_SIZE);
if (__builtin_add_overflow(attr->stack_size, attr->guard_size, &mmap_size)) return EAGAIN;
if (__builtin_add_overflow(mmap_size, sizeof(pthread_internal_t), &mmap_size)) return EAGAIN;
mmap_size = BIONIC_ALIGN(mmap_size, PAGE_SIZE);
attr->guard_size = BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);
attr->stack_base = __create_thread_mapped_space(mmap_size, attr->guard_size);
if (attr->stack_base == NULL) {
@ -176,7 +178,7 @@ static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp,
// Mapped space(or user allocated stack) is used for:
// pthread_internal_t
// thread stack (including guard page)
// thread stack (including guard)
// To safely access the pthread_internal_t and thread stack, we need to find a 16-byte aligned boundary.
stack_top = reinterpret_cast<uint8_t*>(

View file

@ -93,8 +93,8 @@ void pthread_exit(void* return_value) {
}
// Unmap the bionic TLS, including guard pages.
void* allocation = reinterpret_cast<char*>(thread->bionic_tls) - PAGE_SIZE;
munmap(allocation, BIONIC_TLS_SIZE + 2 * PAGE_SIZE);
void* allocation = reinterpret_cast<char*>(thread->bionic_tls) - PTHREAD_GUARD_SIZE;
munmap(allocation, BIONIC_TLS_SIZE + 2 * PTHREAD_GUARD_SIZE);
ThreadJoinState old_state = THREAD_NOT_JOINED;
while (old_state == THREAD_NOT_JOINED &&

View file

@ -141,25 +141,29 @@ static inline __always_inline bionic_tls& __get_bionic_tls() {
__LIBC_HIDDEN__ void pthread_key_clean_all(void);
// SIGSTKSZ (8kB) is not big enough.
// snprintf to a stack buffer of size PATH_MAX consumes ~7kB of stack.
// Also, on 64-bit, logging uses more than 8kB by itself:
// https://code.google.com/p/android/issues/detail?id=187064
#define SIGNAL_STACK_SIZE_WITHOUT_GUARD_PAGE (16 * 1024)
// Address space is precious on LP32, so use the minimum unit: one page.
// On LP64, we could use more but there's no obvious advantage to doing
// so, and the various media processes use RLIMIT_AS as a way to limit
// the amount of allocation they'll do.
#define PTHREAD_GUARD_SIZE PAGE_SIZE
/*
* Traditionally we gave threads a 1MiB stack. When we started
* allocating per-thread alternate signal stacks to ease debugging of
* stack overflows, we subtracted the same amount we were using there
* from the default thread stack size. This should keep memory usage
* roughly constant.
*/
#define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGNAL_STACK_SIZE_WITHOUT_GUARD_PAGE)
// SIGSTKSZ (8KiB) is not big enough.
// An snprintf to a stack buffer of size PATH_MAX consumes ~7KiB of stack.
// Also, on 64-bit, logging uses more than 8KiB by itself:
// https://code.google.com/p/android/issues/detail?id=187064
#define SIGNAL_STACK_SIZE_WITHOUT_GUARD (16 * 1024)
// Traditionally we gave threads a 1MiB stack. When we started
// allocating per-thread alternate signal stacks to ease debugging of
// stack overflows, we subtracted the same amount we were using there
// from the default thread stack size. This should keep memory usage
// roughly constant.
#define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGNAL_STACK_SIZE_WITHOUT_GUARD)
// Leave room for a guard page in the internally created signal stacks.
#define SIGNAL_STACK_SIZE (SIGNAL_STACK_SIZE_WITHOUT_GUARD_PAGE + PAGE_SIZE)
#define SIGNAL_STACK_SIZE (SIGNAL_STACK_SIZE_WITHOUT_GUARD + PTHREAD_GUARD_SIZE)
/* Needed by fork. */
// Needed by fork.
__LIBC_HIDDEN__ extern void __bionic_atfork_run_prepare();
__LIBC_HIDDEN__ extern void __bionic_atfork_run_child();
__LIBC_HIDDEN__ extern void __bionic_atfork_run_parent();

View file

@ -660,30 +660,54 @@ static size_t GetActualStackSize(const pthread_attr_t& attributes) {
return result;
}
TEST(pthread, pthread_attr_setguardsize) {
TEST(pthread, pthread_attr_setguardsize_tiny) {
pthread_attr_t attributes;
ASSERT_EQ(0, pthread_attr_init(&attributes));
// Get the default guard size.
size_t default_guard_size;
ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
// No such thing as too small: will be rounded up to one page by pthread_create.
ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
size_t guard_size;
ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
ASSERT_EQ(128U, guard_size);
ASSERT_EQ(4096U, GetActualGuardSize(attributes));
}
TEST(pthread, pthread_attr_setguardsize_reasonable) {
pthread_attr_t attributes;
ASSERT_EQ(0, pthread_attr_init(&attributes));
// Large enough and a multiple of the page size.
ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
size_t guard_size;
ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
ASSERT_EQ(32*1024U, guard_size);
ASSERT_EQ(32*1024U, GetActualGuardSize(attributes));
}
// Large enough but not a multiple of the page size; will be rounded up by pthread_create.
TEST(pthread, pthread_attr_setguardsize_needs_rounding) {
pthread_attr_t attributes;
ASSERT_EQ(0, pthread_attr_init(&attributes));
// Large enough but not a multiple of the page size.
ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
size_t guard_size;
ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
ASSERT_EQ(32*1024U + 1, guard_size);
ASSERT_EQ(36*1024U, GetActualGuardSize(attributes));
}
TEST(pthread, pthread_attr_setguardsize_enormous) {
pthread_attr_t attributes;
ASSERT_EQ(0, pthread_attr_init(&attributes));
// Larger than the stack itself. (Historically we mistakenly carved
// the guard out of the stack itself, rather than adding it after the
// end.)
ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024*1024));
size_t guard_size;
ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
ASSERT_EQ(32*1024*1024U, guard_size);
ASSERT_EQ(32*1024*1024U, GetActualGuardSize(attributes));
}
TEST(pthread, pthread_attr_setstacksize) {