Merge "Avoid using malloc debug code after exit."

This commit is contained in:
Christopher Ferris 2019-05-07 22:28:50 +00:00 committed by Gerrit Code Review
commit f15a00b1ec
2 changed files with 109 additions and 10 deletions

View file

@ -29,6 +29,7 @@
#include <errno.h>
#include <inttypes.h>
#include <malloc.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -103,6 +104,32 @@ void* debug_valloc(size_t size);
__END_DECLS
// ------------------------------------------------------------------------
class ScopedConcurrentLock {
public:
ScopedConcurrentLock() {
pthread_rwlock_rdlock(&lock_);
}
~ScopedConcurrentLock() {
pthread_rwlock_unlock(&lock_);
}
static void Init() {
pthread_rwlockattr_t attr;
// Set the attribute so that when a write lock is pending, read locks are no
// longer granted.
pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
pthread_rwlock_init(&lock_, &attr);
}
static void BlockAllOperations() {
pthread_rwlock_wrlock(&lock_);
}
private:
static pthread_rwlock_t lock_;
};
pthread_rwlock_t ScopedConcurrentLock::lock_;
static void InitAtfork() {
static pthread_once_t atfork_init = PTHREAD_ONCE_INIT;
pthread_once(&atfork_init, []() {
@ -257,6 +284,8 @@ bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* zygote_child,
info_log("%s: malloc debug enabled", getprogname());
}
ScopedConcurrentLock::Init();
return true;
}
@ -265,6 +294,10 @@ void debug_finalize() {
return;
}
// Make sure that there are no other threads doing debug allocations
// before we kill everything.
ScopedConcurrentLock::BlockAllOperations();
// Turn off capturing allocations calls.
DebugDisableSet(true);
@ -292,6 +325,8 @@ void debug_finalize() {
void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
size_t* total_memory, size_t* backtrace_size) {
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
// Verify the arguments.
@ -325,6 +360,7 @@ size_t debug_malloc_usable_size(void* pointer) {
if (DebugCallsDisabled() || pointer == nullptr) {
return g_dispatch->malloc_usable_size(pointer);
}
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
if (!VerifyPointer(pointer, "malloc_usable_size")) {
@ -388,6 +424,7 @@ void* debug_malloc(size_t size) {
if (DebugCallsDisabled()) {
return g_dispatch->malloc(size);
}
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
void* pointer = InternalMalloc(size);
@ -463,6 +500,7 @@ void debug_free(void* pointer) {
if (DebugCallsDisabled() || pointer == nullptr) {
return g_dispatch->free(pointer);
}
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
if (g_debug->config().options() & RECORD_ALLOCS) {
@ -480,6 +518,7 @@ void* debug_memalign(size_t alignment, size_t bytes) {
if (DebugCallsDisabled()) {
return g_dispatch->memalign(alignment, bytes);
}
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
if (bytes == 0) {
@ -558,6 +597,7 @@ void* debug_realloc(void* pointer, size_t bytes) {
if (DebugCallsDisabled()) {
return g_dispatch->realloc(pointer, bytes);
}
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
if (pointer == nullptr) {
@ -676,6 +716,7 @@ void* debug_calloc(size_t nmemb, size_t bytes) {
if (DebugCallsDisabled()) {
return g_dispatch->calloc(nmemb, bytes);
}
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
size_t size;
@ -737,6 +778,8 @@ int debug_malloc_info(int options, FILE* fp) {
if (DebugCallsDisabled() || !g_debug->TrackPointers()) {
return g_dispatch->malloc_info(options, fp);
}
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
MallocXmlElem root(fp, "malloc", "version=\"debug-malloc-1\"");
std::vector<ListInfoType> list;
@ -786,6 +829,7 @@ int debug_posix_memalign(void** memptr, size_t alignment, size_t size) {
int debug_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*),
void* arg) {
ScopedConcurrentLock lock;
if (g_debug->TrackPointers()) {
// Since malloc is disabled, don't bother acquiring any locks.
for (auto it = PointerData::begin(); it != PointerData::end(); ++it) {
@ -800,6 +844,7 @@ int debug_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_
}
void debug_malloc_disable() {
ScopedConcurrentLock lock;
g_dispatch->malloc_disable();
if (g_debug->pointer) {
g_debug->pointer->PrepareFork();
@ -807,6 +852,7 @@ void debug_malloc_disable() {
}
void debug_malloc_enable() {
ScopedConcurrentLock lock;
if (g_debug->pointer) {
g_debug->pointer->PostForkParent();
}
@ -817,6 +863,7 @@ ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t max_fram
if (DebugCallsDisabled() || pointer == nullptr) {
return 0;
}
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
if (!(g_debug->config().options() & BACKTRACE)) {
@ -870,6 +917,7 @@ static void write_dump(FILE* fp) {
}
bool debug_write_malloc_leak_info(FILE* fp) {
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
std::lock_guard<std::mutex> guard(g_dump_lock);
@ -883,6 +931,7 @@ bool debug_write_malloc_leak_info(FILE* fp) {
}
void debug_dump_heap(const char* file_name) {
ScopedConcurrentLock lock;
ScopedDisableDebugCalls disable;
std::lock_guard<std::mutex> guard(g_dump_lock);

View file

@ -42,13 +42,15 @@
#include <log/log.h>
#include <string>
#include <thread>
#include <vector>
#include "private/bionic_malloc.h"
static constexpr time_t kTimeoutSeconds = 5;
static constexpr time_t kTimeoutSeconds = 10;
static void Exec(const char* test_name, const char* debug_options, pid_t* pid) {
static void Exec(const char* test_name, const char* debug_options, pid_t* pid, int exit_code = 0,
time_t timeout_seconds = kTimeoutSeconds) {
int fds[2];
ASSERT_NE(-1, pipe(fds));
ASSERT_NE(-1, fcntl(fds[0], F_SETFL, O_NONBLOCK));
@ -94,7 +96,8 @@ static void Exec(const char* test_name, const char* debug_options, pid_t* pid) {
output.append(buffer.data(), bytes);
}
if ((time(nullptr) - start_time) > kTimeoutSeconds) {
if ((time(nullptr) - start_time) > timeout_seconds) {
kill(*pid, SIGINT);
break;
}
}
@ -109,7 +112,7 @@ static void Exec(const char* test_name, const char* debug_options, pid_t* pid) {
done = true;
break;
}
if ((time(nullptr) - start_time) > kTimeoutSeconds) {
if ((time(nullptr) - start_time) > timeout_seconds) {
break;
}
}
@ -119,21 +122,23 @@ static void Exec(const char* test_name, const char* debug_options, pid_t* pid) {
while (true) {
int kill_status;
int wait_pid = waitpid(*pid, &kill_status, WNOHANG);
if (wait_pid == *pid || (time(nullptr) - start_time) > kTimeoutSeconds) {
if (wait_pid == *pid || (time(nullptr) - start_time) > timeout_seconds) {
break;
}
}
}
ASSERT_TRUE(done) << "Timed out waiting for waitpid, output:\n" << output;
ASSERT_EQ(0, WEXITSTATUS(status)) << "Output:\n" << output;
ASSERT_FALSE(WIFSIGNALED(status))
<< "Failed with signal " << WTERMSIG(status) << "\nOutput:\n" << output;
ASSERT_EQ(exit_code, WEXITSTATUS(status)) << "Output:\n" << output;
}
static void GetLogStr(pid_t pid, std::string* log_str) {
static void GetLogStr(pid_t pid, std::string* log_str, log_id log = LOG_ID_MAIN) {
log_str->clear();
logger_list* list;
list = android_logger_list_open(LOG_ID_MAIN, ANDROID_LOG_RDONLY | ANDROID_LOG_NONBLOCK, 1000, pid);
list = android_logger_list_open(log, ANDROID_LOG_RDONLY | ANDROID_LOG_NONBLOCK, 1000, pid);
ASSERT_TRUE(list != nullptr);
while (true) {
@ -168,7 +173,8 @@ static void GetLogStr(pid_t pid, std::string* log_str) {
android_logger_list_close(list);
}
static void FindStrings(pid_t pid, std::vector<const char*> match_strings) {
static void FindStrings(pid_t pid, std::vector<const char*> match_strings,
time_t timeout_seconds = kTimeoutSeconds) {
std::string log_str;
time_t start = time(nullptr);
bool found_all;
@ -184,7 +190,7 @@ static void FindStrings(pid_t pid, std::vector<const char*> match_strings) {
if (found_all) {
return;
}
if ((time(nullptr) - start) > kTimeoutSeconds) {
if ((time(nullptr) - start) > timeout_seconds) {
break;
}
}
@ -414,3 +420,47 @@ TEST(MallocDebugSystemTest, verify_leak) {
TEST(MallocDebugSystemTest, verify_leak_allocation_limit) {
VerifyLeak("leak_memory_limit_");
}
static constexpr int kExpectedExitCode = 30;
TEST(MallocTests, DISABLED_exit_while_threads_allocating) {
std::atomic_uint32_t thread_mask;
thread_mask = 0;
for (size_t i = 0; i < 32; i++) {
std::thread malloc_thread([&thread_mask, i] {
while (true) {
void* ptr = malloc(100);
if (ptr == nullptr) {
exit(1000);
}
free(ptr);
thread_mask.fetch_or(1 << i);
}
});
malloc_thread.detach();
}
// Wait until each thread has done at least one allocation.
while (thread_mask.load() != 0xffffffff)
;
exit(kExpectedExitCode);
}
// Verify that exiting while other threads are doing malloc operations,
// that there are no crashes.
TEST(MallocDebugSystemTest, exit_while_threads_allocating) {
for (size_t i = 0; i < 100; i++) {
SCOPED_TRACE(::testing::Message() << "Run " << i);
pid_t pid;
ASSERT_NO_FATAL_FAILURE(Exec("MallocTests.DISABLED_exit_while_threads_allocating",
"verbose backtrace", &pid, kExpectedExitCode));
ASSERT_NO_FATAL_FAILURE(FindStrings(pid, std::vector<const char*>{"malloc debug enabled"}));
std::string log_str;
GetLogStr(pid, &log_str, LOG_ID_CRASH);
ASSERT_TRUE(log_str.find("Fatal signal") == std::string::npos)
<< "Found crash in log.\nLog message: " << log_str;
}
}