# Via Android Git Automerger (1) and others * commit 'f07db75443cce4ab96c93bdaa0793d7b8e87547e': More debug malloc fixes.
This commit is contained in:
commit
d265d36749
8 changed files with 68 additions and 55 deletions
|
@ -460,6 +460,9 @@ out_vformat(Out *o, const char *format, va_list args)
|
|||
if (c == 's') {
|
||||
/* string */
|
||||
str = va_arg(args, const char*);
|
||||
if (str == NULL) {
|
||||
str = "(null)";
|
||||
}
|
||||
} else if (c == 'c') {
|
||||
/* character */
|
||||
/* NOTE: char is promoted to int when passed through the stack */
|
||||
|
|
|
@ -47,7 +47,7 @@ typedef _Unwind_Context __unwind_context;
|
|||
static _Unwind_Reason_Code trace_function(__unwind_context* context, void* arg) {
|
||||
stack_crawl_state_t* state = static_cast<stack_crawl_state_t*>(arg);
|
||||
if (state->count) {
|
||||
intptr_t ip = (intptr_t)_Unwind_GetIP(context);
|
||||
uintptr_t ip = _Unwind_GetIP(context);
|
||||
if (ip) {
|
||||
state->addrs[0] = ip;
|
||||
state->addrs++;
|
||||
|
@ -60,7 +60,7 @@ static _Unwind_Reason_Code trace_function(__unwind_context* context, void* arg)
|
|||
return _URC_END_OF_STACK;
|
||||
}
|
||||
|
||||
__LIBC_HIDDEN__ int get_backtrace(intptr_t* addrs, size_t max_entries) {
|
||||
__LIBC_HIDDEN__ int get_backtrace(uintptr_t* addrs, size_t max_entries) {
|
||||
stack_crawl_state_t state;
|
||||
state.count = max_entries;
|
||||
state.addrs = addrs;
|
||||
|
@ -68,8 +68,8 @@ __LIBC_HIDDEN__ int get_backtrace(intptr_t* addrs, size_t max_entries) {
|
|||
return max_entries - state.count;
|
||||
}
|
||||
|
||||
__LIBC_HIDDEN__ void log_backtrace(mapinfo_t* map_info, intptr_t* addrs, size_t c) {
|
||||
intptr_t self_bt[16];
|
||||
__LIBC_HIDDEN__ void log_backtrace(mapinfo_t* map_info, uintptr_t* addrs, size_t c) {
|
||||
uintptr_t self_bt[16];
|
||||
if (addrs == NULL) {
|
||||
c = get_backtrace(self_bt, 16);
|
||||
addrs = self_bt;
|
||||
|
@ -101,7 +101,7 @@ __LIBC_HIDDEN__ void log_backtrace(mapinfo_t* map_info, intptr_t* addrs, size_t
|
|||
}
|
||||
if (symbol) {
|
||||
__libc_format_log(ANDROID_LOG_ERROR, "libc", " #%02d pc %08x %s (%s+0x%x)",
|
||||
index, rel_pc, soname, symbol, addrs[i] - (intptr_t)offset);
|
||||
index, rel_pc, soname, symbol, addrs[i] - (uintptr_t) offset);
|
||||
} else {
|
||||
__libc_format_log(ANDROID_LOG_ERROR, "libc", " #%02d pc %08x %s",
|
||||
index, rel_pc, soname);
|
||||
|
|
|
@ -34,12 +34,12 @@
|
|||
|
||||
struct stack_crawl_state_t {
|
||||
size_t count;
|
||||
intptr_t* addrs;
|
||||
uintptr_t* addrs;
|
||||
};
|
||||
|
||||
struct mapinfo_t;
|
||||
|
||||
__LIBC_HIDDEN__ int get_backtrace(intptr_t* stack_frames, size_t max_entries);
|
||||
__LIBC_HIDDEN__ void log_backtrace(mapinfo_t* map_info, intptr_t* stack_frames, size_t frame_count);
|
||||
__LIBC_HIDDEN__ int get_backtrace(uintptr_t* stack_frames, size_t max_entries);
|
||||
__LIBC_HIDDEN__ void log_backtrace(mapinfo_t* map_info, uintptr_t* stack_frames, size_t frame_count);
|
||||
|
||||
#endif /* DEBUG_STACKTRACE_H */
|
||||
|
|
|
@ -78,9 +78,9 @@ struct hdr_t {
|
|||
uint32_t tag;
|
||||
hdr_t* prev;
|
||||
hdr_t* next;
|
||||
intptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
uintptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
int bt_depth;
|
||||
intptr_t freed_bt[MAX_BACKTRACE_DEPTH];
|
||||
uintptr_t freed_bt[MAX_BACKTRACE_DEPTH];
|
||||
int freed_bt_depth;
|
||||
size_t size;
|
||||
char front_guard[FRONT_GUARD_LEN];
|
||||
|
@ -102,7 +102,7 @@ static inline hdr_t* meta(void* user) {
|
|||
return reinterpret_cast<hdr_t*>(user) - 1;
|
||||
}
|
||||
|
||||
static unsigned num;
|
||||
static unsigned gAllocatedBlockCount;
|
||||
static hdr_t *tail;
|
||||
static hdr_t *head;
|
||||
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
@ -181,7 +181,7 @@ static inline void add(hdr_t *hdr, size_t size) {
|
|||
hdr->size = size;
|
||||
init_front_guard(hdr);
|
||||
init_rear_guard(hdr);
|
||||
num++;
|
||||
++gAllocatedBlockCount;
|
||||
add_locked(hdr, &tail, &head);
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ static inline int del(hdr_t *hdr) {
|
|||
|
||||
ScopedPthreadMutexLocker locker(&lock);
|
||||
del_locked(hdr, &tail, &head);
|
||||
num--;
|
||||
--gAllocatedBlockCount;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -298,7 +298,7 @@ static inline void del_from_backlog(hdr_t *hdr) {
|
|||
|
||||
static inline int del_leak(hdr_t *hdr, int *safe) {
|
||||
ScopedPthreadMutexLocker locker(&lock);
|
||||
return del_and_check_locked(hdr, &tail, &head, &num, safe);
|
||||
return del_and_check_locked(hdr, &tail, &head, &gAllocatedBlockCount, safe);
|
||||
}
|
||||
|
||||
static inline void add_to_backlog(hdr_t *hdr) {
|
||||
|
@ -342,7 +342,7 @@ extern "C" void chk_free(void *ptr) {
|
|||
hdr_t* hdr = meta(ptr);
|
||||
|
||||
if (del(hdr) < 0) {
|
||||
intptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
uintptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
int depth;
|
||||
depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH);
|
||||
if (hdr->tag == BACKLOG_TAG) {
|
||||
|
@ -387,7 +387,7 @@ extern "C" void *chk_realloc(void *ptr, size_t size) {
|
|||
hdr_t* hdr = meta(ptr);
|
||||
|
||||
if (del(hdr) < 0) {
|
||||
intptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
uintptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
int depth;
|
||||
depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH);
|
||||
if (hdr->tag == BACKLOG_TAG) {
|
||||
|
@ -442,28 +442,36 @@ extern "C" void *chk_calloc(int nmemb, size_t size) {
|
|||
}
|
||||
|
||||
static void heaptracker_free_leaked_memory() {
|
||||
size_t total = num;
|
||||
if (num) {
|
||||
log_message("+++ Leaked allocations: %d\n", num);
|
||||
}
|
||||
if (gAllocatedBlockCount == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hdr_t *del = NULL;
|
||||
while (head) {
|
||||
int safe;
|
||||
del = head;
|
||||
log_message("+++ Leaked block of size %d at %p (leak %d of %d)\n",
|
||||
del->size, user(del), 1 + total - num, total);
|
||||
if (del_leak(del, &safe)) {
|
||||
/* safe == 1, because the allocation is valid */
|
||||
log_backtrace(gMapInfo, del->bt, del->bt_depth);
|
||||
}
|
||||
}
|
||||
// Use /proc/self/exe link to obtain the program name for logging
|
||||
// purposes. If it's not available, we set it to "<unknown>".
|
||||
char exe[PATH_MAX];
|
||||
int count;
|
||||
if ((count = readlink("/proc/self/exe", exe, sizeof(exe) - 1)) == -1) {
|
||||
strlcpy(exe, "<unknown>", sizeof(exe));
|
||||
} else {
|
||||
exe[count] = '\0';
|
||||
}
|
||||
|
||||
// log_message("+++ DELETING %d BACKLOGGED ALLOCATIONS\n", backlog_num);
|
||||
while (backlog_head) {
|
||||
del = backlog_tail;
|
||||
del_from_backlog(del);
|
||||
size_t index = 1;
|
||||
const size_t total = gAllocatedBlockCount;
|
||||
while (head != NULL) {
|
||||
int safe;
|
||||
hdr_t* block = head;
|
||||
log_message("+++ %s leaked block of size %d at %p (leak %d of %d)",
|
||||
exe, block->size, user(block), index++, total);
|
||||
if (del_leak(block, &safe)) {
|
||||
/* safe == 1, because the allocation is valid */
|
||||
log_backtrace(gMapInfo, block->bt, block->bt_depth);
|
||||
}
|
||||
}
|
||||
|
||||
while (backlog_head != NULL) {
|
||||
del_from_backlog(backlog_tail);
|
||||
}
|
||||
}
|
||||
|
||||
/* Initializes malloc debugging framework.
|
||||
|
|
|
@ -148,7 +148,7 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||
}
|
||||
|
||||
// XXX: the protocol doesn't allow variable size for the stack trace (yet)
|
||||
*infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
|
||||
*infoSize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * BACKTRACE_SIZE);
|
||||
*overallSize = *infoSize * gHashTable.count;
|
||||
*backtraceSize = BACKTRACE_SIZE;
|
||||
|
||||
|
@ -167,7 +167,7 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||
const int count = gHashTable.count;
|
||||
for (int i = 0 ; i < count ; ++i) {
|
||||
HashEntry* entry = list[i];
|
||||
size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
|
||||
size_t entrySize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * entry->numEntries);
|
||||
if (entrySize < *infoSize) {
|
||||
/* we're writing less than a full entry, clear out the rest */
|
||||
memset(head + entrySize, 0, *infoSize - entrySize);
|
||||
|
@ -377,8 +377,10 @@ static void malloc_init_impl() {
|
|||
}
|
||||
|
||||
// mksh is way too leaky. http://b/7291287.
|
||||
if (debug_level >= 10 && strcmp(__progname, "sh") == 0) {
|
||||
return;
|
||||
if (debug_level >= 10) {
|
||||
if (strcmp(__progname, "sh") == 0 || strcmp(__progname, "/system/bin/sh") == 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Choose the appropriate .so for the requested debug level.
|
||||
|
|
|
@ -57,7 +57,7 @@ struct HashEntry {
|
|||
// fields above "size" are NOT sent to the host
|
||||
size_t size;
|
||||
size_t allocations;
|
||||
intptr_t backtrace[0];
|
||||
uintptr_t backtrace[0];
|
||||
};
|
||||
|
||||
struct HashTable {
|
||||
|
|
|
@ -90,7 +90,7 @@ static AllocationEntry* to_header(void* mem) {
|
|||
// Hash Table functions
|
||||
// =============================================================================
|
||||
|
||||
static uint32_t get_hash(intptr_t* backtrace, size_t numEntries) {
|
||||
static uint32_t get_hash(uintptr_t* backtrace, size_t numEntries) {
|
||||
if (backtrace == NULL) return 0;
|
||||
|
||||
int hash = 0;
|
||||
|
@ -103,7 +103,7 @@ static uint32_t get_hash(intptr_t* backtrace, size_t numEntries) {
|
|||
}
|
||||
|
||||
static HashEntry* find_entry(HashTable* table, int slot,
|
||||
intptr_t* backtrace, size_t numEntries, size_t size) {
|
||||
uintptr_t* backtrace, size_t numEntries, size_t size) {
|
||||
HashEntry* entry = table->slots[slot];
|
||||
while (entry != NULL) {
|
||||
//debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
|
||||
|
@ -113,7 +113,7 @@ static HashEntry* find_entry(HashTable* table, int slot,
|
|||
* including the flag bits.
|
||||
*/
|
||||
if (entry->size == size && entry->numEntries == numEntries &&
|
||||
!memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) {
|
||||
!memcmp(backtrace, entry->backtrace, numEntries * sizeof(uintptr_t))) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ static HashEntry* find_entry(HashTable* table, int slot,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size) {
|
||||
static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size_t size) {
|
||||
size_t hash = get_hash(backtrace, numEntries);
|
||||
size_t slot = hash % HASHTABLE_SIZE;
|
||||
|
||||
|
@ -142,7 +142,7 @@ static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_
|
|||
entry->allocations++;
|
||||
} else {
|
||||
// create a new entry
|
||||
entry = static_cast<HashEntry*>(dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t)));
|
||||
entry = static_cast<HashEntry*>(dlmalloc(sizeof(HashEntry) + numEntries*sizeof(uintptr_t)));
|
||||
if (!entry) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_
|
|||
entry->numEntries = numEntries;
|
||||
entry->size = size;
|
||||
|
||||
memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t));
|
||||
memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
|
||||
|
||||
gHashTable.slots[slot] = entry;
|
||||
|
||||
|
@ -272,7 +272,7 @@ extern "C" void* leak_malloc(size_t bytes) {
|
|||
if (base != NULL) {
|
||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
||||
|
||||
intptr_t backtrace[BACKTRACE_SIZE];
|
||||
uintptr_t backtrace[BACKTRACE_SIZE];
|
||||
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
|
||||
|
||||
AllocationEntry* header = reinterpret_cast<AllocationEntry*>(base);
|
||||
|
|
|
@ -180,8 +180,8 @@ static void* debug_realloc(void *ptr, size_t size, size_t old_size) {
|
|||
struct MutexInfo;
|
||||
|
||||
typedef struct CallStack {
|
||||
intptr_t depth;
|
||||
intptr_t* addrs;
|
||||
uintptr_t depth;
|
||||
uintptr_t* addrs;
|
||||
} CallStack;
|
||||
|
||||
typedef struct MutexInfo* MutexInfoListEntry;
|
||||
|
@ -222,7 +222,7 @@ typedef struct MutexInfo {
|
|||
CallStackList stacks;
|
||||
// call stack when this lock was acquired last
|
||||
int stackDepth;
|
||||
intptr_t stackTrace[STACK_TRACE_DEPTH];
|
||||
uintptr_t stackTrace[STACK_TRACE_DEPTH];
|
||||
} MutexInfo;
|
||||
|
||||
static void growingListInit(GrowingList* list) {
|
||||
|
@ -285,10 +285,10 @@ static int pthread_mutex_unlock_unchecked(pthread_mutex_t *mutex) {
|
|||
|
||||
/****************************************************************************/
|
||||
|
||||
static void dup_backtrace(CallStack* stack, size_t count, intptr_t const* addrs) {
|
||||
static void dup_backtrace(CallStack* stack, size_t count, uintptr_t const* addrs) {
|
||||
stack->depth = count;
|
||||
stack->addrs = DbgAllocLocked<intptr_t>(count);
|
||||
memcpy(stack->addrs, addrs, count * sizeof(intptr_t));
|
||||
stack->addrs = DbgAllocLocked<uintptr_t>(count);
|
||||
memcpy(stack->addrs, addrs, count * sizeof(uintptr_t));
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
|
@ -343,7 +343,7 @@ static void unlinkParentFromChild(MutexInfo* parent, MutexInfo* child) {
|
|||
/****************************************************************************/
|
||||
|
||||
static void callstackListAdd(CallStackList* pList,
|
||||
int count, intptr_t const* addrs) {
|
||||
int count, uintptr_t const* addrs) {
|
||||
growingListAdd(pList, sizeof(CallStackListEntry));
|
||||
dup_backtrace(&pList->stack[pList->count - 1], count, addrs);
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
|
|||
*/
|
||||
if (obj->historyMark) {
|
||||
int stackDepth;
|
||||
intptr_t addrs[STACK_TRACE_DEPTH];
|
||||
uintptr_t addrs[STACK_TRACE_DEPTH];
|
||||
|
||||
/* Turn off prediction temporarily in this thread while logging */
|
||||
sPthreadDebugDisabledThread = gettid();
|
||||
|
|
Loading…
Reference in a new issue