Do not hold hash table lock while backtracing.
There is a deadlock if holding the hash table lock while trying to do
a backtrace. Change the code so that the hash table lock is only held
while actually modifying either g_hash_table, or while modifying an
entry from g_hash_table.
Bug: 22423683
(cherry picked from commit 9fee99b060
)
Change-Id: I72173bfe6f824ceaceea625c24e7851b87467135
This commit is contained in:
parent
147a50d06e
commit
3a40a0000a
1 changed files with 3 additions and 5 deletions
|
@ -133,8 +133,9 @@ static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size
|
|||
size |= SIZE_FLAG_ZYGOTE_CHILD;
|
||||
}
|
||||
|
||||
// Keep the lock held for as little time as possible to prevent deadlocks.
|
||||
ScopedPthreadMutexLocker locker(&g_hash_table->lock);
|
||||
HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size);
|
||||
|
||||
if (entry != NULL) {
|
||||
entry->allocations++;
|
||||
} else {
|
||||
|
@ -302,8 +303,6 @@ extern "C" void* leak_malloc(size_t bytes) {
|
|||
|
||||
void* base = g_malloc_dispatch->malloc(size);
|
||||
if (base != NULL) {
|
||||
ScopedPthreadMutexLocker locker(&g_hash_table->lock);
|
||||
|
||||
uintptr_t backtrace[BACKTRACE_SIZE];
|
||||
size_t numEntries = GET_BACKTRACE(backtrace, BACKTRACE_SIZE);
|
||||
|
||||
|
@ -328,8 +327,6 @@ extern "C" void leak_free(void* mem) {
|
|||
return;
|
||||
}
|
||||
|
||||
ScopedPthreadMutexLocker locker(&g_hash_table->lock);
|
||||
|
||||
// check the guard to make sure it is valid
|
||||
AllocationEntry* header = to_header(mem);
|
||||
|
||||
|
@ -342,6 +339,7 @@ extern "C" void leak_free(void* mem) {
|
|||
}
|
||||
}
|
||||
|
||||
ScopedPthreadMutexLocker locker(&g_hash_table->lock);
|
||||
if (header->guard == GUARD || is_valid_entry(header->entry)) {
|
||||
// decrement the allocations
|
||||
HashEntry* entry = header->entry;
|
||||
|
|
Loading…
Reference in a new issue