am 0d7177c0
: Merge "Clean up warnings in the malloc_debug_* files."
* commit '0d7177c0d47517514c81713427fc28e04dc3cd37': Clean up warnings in the malloc_debug_* files.
This commit is contained in:
commit
e5b85f45f9
11 changed files with 369 additions and 504 deletions
|
@ -505,7 +505,8 @@ libc_common_cflags := \
|
|||
-I$(LOCAL_PATH)/private \
|
||||
-DPOSIX_MISTAKE \
|
||||
-DLOG_ON_HEAP_ERROR \
|
||||
-std=gnu99
|
||||
-std=gnu99 \
|
||||
-Wall -Wextra
|
||||
|
||||
# these macro definitions are required to implement the
|
||||
# 'timezone' and 'daylight' global variables, as well as
|
||||
|
@ -812,7 +813,7 @@ LOCAL_SRC_FILES := \
|
|||
$(libc_arch_static_src_files) \
|
||||
$(libc_static_common_src_files) \
|
||||
bionic/dlmalloc.c \
|
||||
bionic/malloc_debug_common.c \
|
||||
bionic/malloc_debug_common.cpp \
|
||||
bionic/libc_init_static.c
|
||||
|
||||
LOCAL_CFLAGS := $(libc_common_cflags) \
|
||||
|
@ -844,7 +845,7 @@ LOCAL_SRC_FILES := \
|
|||
$(libc_arch_dynamic_src_files) \
|
||||
$(libc_static_common_src_files) \
|
||||
bionic/dlmalloc.c \
|
||||
bionic/malloc_debug_common.c \
|
||||
bionic/malloc_debug_common.cpp \
|
||||
bionic/pthread_debug.c \
|
||||
bionic/libc_init_dynamic.c
|
||||
|
||||
|
@ -886,10 +887,10 @@ LOCAL_CFLAGS := \
|
|||
LOCAL_C_INCLUDES := $(libc_common_c_includes)
|
||||
|
||||
LOCAL_SRC_FILES := \
|
||||
bionic/malloc_debug_leak.c \
|
||||
bionic/malloc_debug_check.c \
|
||||
bionic/malloc_debug_check_mapinfo.c \
|
||||
bionic/malloc_debug_stacktrace.c
|
||||
bionic/malloc_debug_leak.cpp \
|
||||
bionic/malloc_debug_check.cpp \
|
||||
bionic/malloc_debug_check_mapinfo.cpp \
|
||||
bionic/malloc_debug_stacktrace.cpp
|
||||
|
||||
LOCAL_MODULE:= libc_malloc_debug_leak
|
||||
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
|
||||
|
@ -917,7 +918,7 @@ LOCAL_CFLAGS := \
|
|||
LOCAL_C_INCLUDES := $(libc_common_c_includes)
|
||||
|
||||
LOCAL_SRC_FILES := \
|
||||
bionic/malloc_debug_qemu.c
|
||||
bionic/malloc_debug_qemu.cpp
|
||||
|
||||
LOCAL_MODULE:= libc_malloc_debug_qemu
|
||||
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
|
||||
|
|
|
@ -47,7 +47,7 @@ char* getcwd(char* buf, size_t size) {
|
|||
// TODO: if we need to support paths longer than that, we'll have to walk the tree ourselves.
|
||||
size = getpagesize();
|
||||
}
|
||||
buf = allocated_buf = reinterpret_cast<char*>(malloc(allocated_size));
|
||||
buf = allocated_buf = static_cast<char*>(malloc(allocated_size));
|
||||
if (buf == NULL) {
|
||||
// malloc set errno.
|
||||
return NULL;
|
||||
|
|
|
@ -67,33 +67,27 @@ extern unsigned int malloc_double_free_backlog;
|
|||
#define REAR_GUARD 0xbb
|
||||
#define REAR_GUARD_LEN (1<<5)
|
||||
|
||||
static void print_backtrace(const intptr_t *bt, unsigned int depth);
|
||||
|
||||
static void log_message(const char* format, ...)
|
||||
{
|
||||
extern pthread_mutex_t gAllocationsMutex;
|
||||
static void log_message(const char* format, ...) {
|
||||
extern const MallocDebug __libc_malloc_default_dispatch;
|
||||
extern const MallocDebug* __libc_malloc_dispatch;
|
||||
extern pthread_mutex_t gAllocationsMutex;
|
||||
|
||||
va_list args;
|
||||
|
||||
pthread_mutex_lock(&gAllocationsMutex);
|
||||
va_list args;
|
||||
{
|
||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
||||
const MallocDebug* current_dispatch = __libc_malloc_dispatch;
|
||||
__libc_malloc_dispatch = &__libc_malloc_default_dispatch;
|
||||
va_start(args, format);
|
||||
__libc_android_log_vprint(ANDROID_LOG_ERROR, "libc",
|
||||
format, args);
|
||||
__libc_android_log_vprint(ANDROID_LOG_ERROR, "libc", format, args);
|
||||
va_end(args);
|
||||
__libc_malloc_dispatch = current_dispatch;
|
||||
}
|
||||
pthread_mutex_unlock(&gAllocationsMutex);
|
||||
}
|
||||
|
||||
struct hdr {
|
||||
struct hdr_t {
|
||||
uint32_t tag;
|
||||
struct hdr *prev;
|
||||
struct hdr *next;
|
||||
hdr_t* prev;
|
||||
hdr_t* next;
|
||||
intptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
int bt_depth;
|
||||
intptr_t freed_bt[MAX_BACKTRACE_DEPTH];
|
||||
|
@ -102,43 +96,35 @@ struct hdr {
|
|||
char front_guard[FRONT_GUARD_LEN];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct ftr {
|
||||
struct ftr_t {
|
||||
char rear_guard[REAR_GUARD_LEN];
|
||||
} __attribute__((packed));
|
||||
|
||||
static inline struct ftr * to_ftr(struct hdr *hdr)
|
||||
{
|
||||
return (struct ftr *)(((char *)(hdr + 1)) + hdr->size);
|
||||
static inline ftr_t* to_ftr(hdr_t* hdr) {
|
||||
return reinterpret_cast<ftr_t*>(reinterpret_cast<char*>(hdr + 1) + hdr->size);
|
||||
}
|
||||
|
||||
static inline void *user(struct hdr *hdr)
|
||||
{
|
||||
static inline void* user(hdr_t* hdr) {
|
||||
return hdr + 1;
|
||||
}
|
||||
|
||||
static inline struct hdr *meta(void *user)
|
||||
{
|
||||
return ((struct hdr *)user) - 1;
|
||||
static inline hdr_t* meta(void* user) {
|
||||
return reinterpret_cast<hdr_t*>(user) - 1;
|
||||
}
|
||||
|
||||
/* Call this on exit() to get leaked memory */
|
||||
void free_leaked_memory(void);
|
||||
|
||||
static unsigned num;
|
||||
static struct hdr *tail;
|
||||
static struct hdr *head;
|
||||
static hdr_t *tail;
|
||||
static hdr_t *head;
|
||||
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static unsigned backlog_num;
|
||||
static struct hdr *backlog_tail;
|
||||
static struct hdr *backlog_head;
|
||||
static hdr_t *backlog_tail;
|
||||
static hdr_t *backlog_head;
|
||||
static pthread_mutex_t backlog_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
extern __LIBC_HIDDEN__
|
||||
int get_backtrace(intptr_t* addrs, size_t max_entries);
|
||||
extern __LIBC_HIDDEN__ int get_backtrace(intptr_t* addrs, size_t max_entries);
|
||||
|
||||
static void print_backtrace(const intptr_t *bt, unsigned int depth)
|
||||
{
|
||||
static void print_backtrace(const intptr_t *bt, unsigned int depth) {
|
||||
const mapinfo *mi;
|
||||
unsigned int cnt;
|
||||
unsigned int rel_pc;
|
||||
|
@ -158,39 +144,35 @@ static void print_backtrace(const intptr_t *bt, unsigned int depth)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void init_front_guard(struct hdr *hdr)
|
||||
{
|
||||
static inline void init_front_guard(hdr_t *hdr) {
|
||||
memset(hdr->front_guard, FRONT_GUARD, FRONT_GUARD_LEN);
|
||||
}
|
||||
|
||||
static inline bool is_front_guard_valid(struct hdr *hdr)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < FRONT_GUARD_LEN; i++)
|
||||
if (hdr->front_guard[i] != FRONT_GUARD)
|
||||
static inline bool is_front_guard_valid(hdr_t *hdr) {
|
||||
for (size_t i = 0; i < FRONT_GUARD_LEN; i++) {
|
||||
if (hdr->front_guard[i] != FRONT_GUARD) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void init_rear_guard(struct hdr *hdr)
|
||||
{
|
||||
struct ftr *ftr = to_ftr(hdr);
|
||||
static inline void init_rear_guard(hdr_t *hdr) {
|
||||
ftr_t* ftr = to_ftr(hdr);
|
||||
memset(ftr->rear_guard, REAR_GUARD, REAR_GUARD_LEN);
|
||||
}
|
||||
|
||||
static inline bool is_rear_guard_valid(struct hdr *hdr)
|
||||
{
|
||||
static inline bool is_rear_guard_valid(hdr_t *hdr) {
|
||||
unsigned i;
|
||||
int valid = 1;
|
||||
int first_mismatch = -1;
|
||||
struct ftr *ftr = to_ftr(hdr);
|
||||
ftr_t* ftr = to_ftr(hdr);
|
||||
for (i = 0; i < REAR_GUARD_LEN; i++) {
|
||||
if (ftr->rear_guard[i] != REAR_GUARD) {
|
||||
if (first_mismatch < 0)
|
||||
first_mismatch = i;
|
||||
valid = 0;
|
||||
}
|
||||
else if (first_mismatch >= 0) {
|
||||
} else if (first_mismatch >= 0) {
|
||||
log_message("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i);
|
||||
first_mismatch = -1;
|
||||
}
|
||||
|
@ -201,8 +183,7 @@ static inline bool is_rear_guard_valid(struct hdr *hdr)
|
|||
return valid;
|
||||
}
|
||||
|
||||
static inline void add_locked(struct hdr *hdr, struct hdr **tail, struct hdr **head)
|
||||
{
|
||||
static inline void add_locked(hdr_t *hdr, hdr_t **tail, hdr_t **head) {
|
||||
hdr->prev = NULL;
|
||||
hdr->next = *head;
|
||||
if (*head)
|
||||
|
@ -212,50 +193,46 @@ static inline void add_locked(struct hdr *hdr, struct hdr **tail, struct hdr **h
|
|||
*head = hdr;
|
||||
}
|
||||
|
||||
static inline int del_locked(struct hdr *hdr, struct hdr **tail, struct hdr **head)
|
||||
{
|
||||
if (hdr->prev)
|
||||
static inline int del_locked(hdr_t *hdr, hdr_t **tail, hdr_t **head) {
|
||||
if (hdr->prev) {
|
||||
hdr->prev->next = hdr->next;
|
||||
else
|
||||
} else {
|
||||
*head = hdr->next;
|
||||
if (hdr->next)
|
||||
}
|
||||
if (hdr->next) {
|
||||
hdr->next->prev = hdr->prev;
|
||||
else
|
||||
} else {
|
||||
*tail = hdr->prev;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void add(struct hdr *hdr, size_t size)
|
||||
{
|
||||
pthread_mutex_lock(&lock);
|
||||
static inline void add(hdr_t *hdr, size_t size) {
|
||||
ScopedPthreadMutexLocker locker(&lock);
|
||||
hdr->tag = ALLOCATION_TAG;
|
||||
hdr->size = size;
|
||||
init_front_guard(hdr);
|
||||
init_rear_guard(hdr);
|
||||
num++;
|
||||
add_locked(hdr, &tail, &head);
|
||||
pthread_mutex_unlock(&lock);
|
||||
}
|
||||
|
||||
static inline int del(struct hdr *hdr)
|
||||
{
|
||||
if (hdr->tag != ALLOCATION_TAG)
|
||||
static inline int del(hdr_t *hdr) {
|
||||
if (hdr->tag != ALLOCATION_TAG) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
ScopedPthreadMutexLocker locker(&lock);
|
||||
del_locked(hdr, &tail, &head);
|
||||
num--;
|
||||
pthread_mutex_unlock(&lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void poison(struct hdr *hdr)
|
||||
{
|
||||
static inline void poison(hdr_t *hdr) {
|
||||
memset(user(hdr), FREE_POISON, hdr->size);
|
||||
}
|
||||
|
||||
static int was_used_after_free(struct hdr *hdr)
|
||||
{
|
||||
static int was_used_after_free(hdr_t *hdr) {
|
||||
unsigned i;
|
||||
const char *data = (const char *)user(hdr);
|
||||
for (i = 0; i < hdr->size; i++)
|
||||
|
@ -265,8 +242,7 @@ static int was_used_after_free(struct hdr *hdr)
|
|||
}
|
||||
|
||||
/* returns 1 if valid, *safe == 1 if safe to dump stack */
|
||||
static inline int check_guards(struct hdr *hdr, int *safe)
|
||||
{
|
||||
static inline int check_guards(hdr_t *hdr, int *safe) {
|
||||
*safe = 1;
|
||||
if (!is_front_guard_valid(hdr)) {
|
||||
if (hdr->front_guard[0] == FRONT_GUARD) {
|
||||
|
@ -291,17 +267,15 @@ static inline int check_guards(struct hdr *hdr, int *safe)
|
|||
}
|
||||
|
||||
/* returns 1 if valid, *safe == 1 if safe to dump stack */
|
||||
static inline int check_allocation_locked(struct hdr *hdr, int *safe)
|
||||
{
|
||||
static inline int check_allocation_locked(hdr_t *hdr, int *safe) {
|
||||
int valid = 1;
|
||||
*safe = 1;
|
||||
|
||||
if (hdr->tag != ALLOCATION_TAG && hdr->tag != BACKLOG_TAG) {
|
||||
log_message("+++ ALLOCATION %p HAS INVALID TAG %08x (NOT DUMPING STACKTRACE)\n",
|
||||
user(hdr), hdr->tag);
|
||||
/* Allocation header is probably corrupt, do not dequeue or dump stack
|
||||
* trace.
|
||||
*/
|
||||
// Allocation header is probably corrupt, do not dequeue or dump stack
|
||||
// trace.
|
||||
*safe = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@ -310,11 +284,11 @@ static inline int check_allocation_locked(struct hdr *hdr, int *safe)
|
|||
log_message("+++ ALLOCATION %p SIZE %d WAS USED AFTER BEING FREED\n",
|
||||
user(hdr), hdr->size);
|
||||
valid = 0;
|
||||
/* check the guards to see if it's safe to dump a stack trace */
|
||||
(void)check_guards(hdr, safe);
|
||||
}
|
||||
else
|
||||
/* check the guards to see if it's safe to dump a stack trace */
|
||||
check_guards(hdr, safe);
|
||||
} else {
|
||||
valid = check_guards(hdr, safe);
|
||||
}
|
||||
|
||||
if (!valid && *safe) {
|
||||
log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n",
|
||||
|
@ -330,12 +304,10 @@ static inline int check_allocation_locked(struct hdr *hdr, int *safe)
|
|||
return valid;
|
||||
}
|
||||
|
||||
static inline int del_and_check_locked(struct hdr *hdr,
|
||||
struct hdr **tail, struct hdr **head, unsigned *cnt,
|
||||
int *safe)
|
||||
{
|
||||
int valid;
|
||||
valid = check_allocation_locked(hdr, safe);
|
||||
static inline int del_and_check_locked(hdr_t *hdr,
|
||||
hdr_t **tail, hdr_t **head, unsigned *cnt,
|
||||
int *safe) {
|
||||
int valid = check_allocation_locked(hdr, safe);
|
||||
if (safe) {
|
||||
(*cnt)--;
|
||||
del_locked(hdr, tail, head);
|
||||
|
@ -343,56 +315,42 @@ static inline int del_and_check_locked(struct hdr *hdr,
|
|||
return valid;
|
||||
}
|
||||
|
||||
static inline void del_from_backlog_locked(struct hdr *hdr)
|
||||
{
|
||||
int safe;
|
||||
(void)del_and_check_locked(hdr,
|
||||
&backlog_tail, &backlog_head, &backlog_num,
|
||||
&safe);
|
||||
hdr->tag = 0; /* clear the tag */
|
||||
static inline void del_from_backlog_locked(hdr_t *hdr) {
|
||||
int safe;
|
||||
del_and_check_locked(hdr,
|
||||
&backlog_tail, &backlog_head, &backlog_num,
|
||||
&safe);
|
||||
hdr->tag = 0; /* clear the tag */
|
||||
}
|
||||
|
||||
static inline void del_from_backlog(struct hdr *hdr)
|
||||
{
|
||||
pthread_mutex_lock(&backlog_lock);
|
||||
static inline void del_from_backlog(hdr_t *hdr) {
|
||||
ScopedPthreadMutexLocker locker(&backlog_lock);
|
||||
del_from_backlog_locked(hdr);
|
||||
pthread_mutex_unlock(&backlog_lock);
|
||||
}
|
||||
|
||||
static inline int del_leak(struct hdr *hdr, int *safe)
|
||||
{
|
||||
int valid;
|
||||
pthread_mutex_lock(&lock);
|
||||
valid = del_and_check_locked(hdr,
|
||||
&tail, &head, &num,
|
||||
safe);
|
||||
pthread_mutex_unlock(&lock);
|
||||
return valid;
|
||||
static inline int del_leak(hdr_t *hdr, int *safe) {
|
||||
ScopedPthreadMutexLocker locker(&lock);
|
||||
return del_and_check_locked(hdr, &tail, &head, &num, safe);
|
||||
}
|
||||
|
||||
static inline void add_to_backlog(struct hdr *hdr)
|
||||
{
|
||||
pthread_mutex_lock(&backlog_lock);
|
||||
static inline void add_to_backlog(hdr_t *hdr) {
|
||||
ScopedPthreadMutexLocker locker(&backlog_lock);
|
||||
hdr->tag = BACKLOG_TAG;
|
||||
backlog_num++;
|
||||
add_locked(hdr, &backlog_tail, &backlog_head);
|
||||
poison(hdr);
|
||||
/* If we've exceeded the maximum backlog, clear it up */
|
||||
while (backlog_num > malloc_double_free_backlog) {
|
||||
struct hdr *gone = backlog_tail;
|
||||
hdr_t *gone = backlog_tail;
|
||||
del_from_backlog_locked(gone);
|
||||
dlfree(gone);
|
||||
}
|
||||
pthread_mutex_unlock(&backlog_lock);
|
||||
}
|
||||
|
||||
void* chk_malloc(size_t size)
|
||||
{
|
||||
struct hdr *hdr;
|
||||
|
||||
extern "C" void* chk_malloc(size_t size) {
|
||||
// log_message("%s: %s\n", __FILE__, __FUNCTION__);
|
||||
|
||||
hdr = dlmalloc(sizeof(struct hdr) + size + sizeof(struct ftr));
|
||||
hdr_t* hdr = static_cast<hdr_t*>(dlmalloc(sizeof(hdr_t) + size + sizeof(ftr_t)));
|
||||
if (hdr) {
|
||||
hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH);
|
||||
add(hdr, size);
|
||||
|
@ -401,23 +359,19 @@ void* chk_malloc(size_t size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void* chk_memalign(size_t alignment, size_t bytes)
|
||||
{
|
||||
extern "C" void* chk_memalign(size_t, size_t bytes) {
|
||||
// log_message("%s: %s\n", __FILE__, __FUNCTION__);
|
||||
// XXX: it's better to use malloc, than being wrong
|
||||
return chk_malloc(bytes);
|
||||
}
|
||||
|
||||
void chk_free(void *ptr)
|
||||
{
|
||||
struct hdr *hdr;
|
||||
|
||||
extern "C" void chk_free(void *ptr) {
|
||||
// log_message("%s: %s\n", __FILE__, __FUNCTION__);
|
||||
|
||||
if (!ptr) /* ignore free(NULL) */
|
||||
return;
|
||||
|
||||
hdr = meta(ptr);
|
||||
hdr_t* hdr = meta(ptr);
|
||||
|
||||
if (del(hdr) < 0) {
|
||||
intptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
|
@ -436,26 +390,21 @@ void chk_free(void *ptr)
|
|||
log_message("+++ ALLOCATION %p SIZE %d NOW BEING FREED HERE:\n",
|
||||
user(hdr), hdr->size);
|
||||
print_backtrace(bt, depth);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
log_message("+++ ALLOCATION %p IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n",
|
||||
user(hdr));
|
||||
print_backtrace(bt, depth);
|
||||
/* Leak here so that we do not crash */
|
||||
//dlfree(user(hdr));
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
hdr->freed_bt_depth = get_backtrace(hdr->freed_bt,
|
||||
MAX_BACKTRACE_DEPTH);
|
||||
add_to_backlog(hdr);
|
||||
}
|
||||
}
|
||||
|
||||
void *chk_realloc(void *ptr, size_t size)
|
||||
{
|
||||
struct hdr *hdr;
|
||||
|
||||
extern "C" void *chk_realloc(void *ptr, size_t size) {
|
||||
// log_message("%s: %s\n", __FILE__, __FUNCTION__);
|
||||
|
||||
if (!size) {
|
||||
|
@ -463,10 +412,11 @@ void *chk_realloc(void *ptr, size_t size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!ptr)
|
||||
if (!ptr) {
|
||||
return chk_malloc(size);
|
||||
}
|
||||
|
||||
hdr = meta(ptr);
|
||||
hdr_t* hdr = meta(ptr);
|
||||
|
||||
if (del(hdr) < 0) {
|
||||
intptr_t bt[MAX_BACKTRACE_DEPTH];
|
||||
|
@ -491,8 +441,7 @@ void *chk_realloc(void *ptr, size_t size)
|
|||
* can default to this behavior.
|
||||
*/
|
||||
del_from_backlog(hdr);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
log_message("+++ REALLOCATION %p SIZE %d IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n",
|
||||
user(hdr), size);
|
||||
print_backtrace(bt, depth);
|
||||
|
@ -502,7 +451,7 @@ void *chk_realloc(void *ptr, size_t size)
|
|||
}
|
||||
}
|
||||
|
||||
hdr = dlrealloc(hdr, sizeof(struct hdr) + size + sizeof(struct ftr));
|
||||
hdr = static_cast<hdr_t*>(dlrealloc(hdr, sizeof(hdr_t) + size + sizeof(ftr_t)));
|
||||
if (hdr) {
|
||||
hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH);
|
||||
add(hdr, size);
|
||||
|
@ -512,12 +461,10 @@ void *chk_realloc(void *ptr, size_t size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void *chk_calloc(int nmemb, size_t size)
|
||||
{
|
||||
extern "C" void *chk_calloc(int nmemb, size_t size) {
|
||||
// log_message("%s: %s\n", __FILE__, __FUNCTION__);
|
||||
struct hdr *hdr;
|
||||
size_t total_size = nmemb * size;
|
||||
hdr = dlcalloc(1, sizeof(struct hdr) + total_size + sizeof(struct ftr));
|
||||
hdr_t* hdr = static_cast<hdr_t*>(dlcalloc(1, sizeof(hdr_t) + total_size + sizeof(ftr_t)));
|
||||
if (hdr) {
|
||||
hdr->bt_depth = get_backtrace(
|
||||
hdr->bt, MAX_BACKTRACE_DEPTH);
|
||||
|
@ -527,13 +474,12 @@ void *chk_calloc(int nmemb, size_t size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void heaptracker_free_leaked_memory(void)
|
||||
{
|
||||
struct hdr *del; int cnt;
|
||||
|
||||
if (num)
|
||||
static void heaptracker_free_leaked_memory() {
|
||||
if (num) {
|
||||
log_message("+++ THERE ARE %d LEAKED ALLOCATIONS\n", num);
|
||||
}
|
||||
|
||||
hdr_t *del = NULL;
|
||||
while (head) {
|
||||
int safe;
|
||||
del = head;
|
||||
|
@ -550,7 +496,7 @@ static void heaptracker_free_leaked_memory(void)
|
|||
|
||||
// log_message("+++ DELETING %d BACKLOGGED ALLOCATIONS\n", backlog_num);
|
||||
while (backlog_head) {
|
||||
del = backlog_tail;
|
||||
del = backlog_tail;
|
||||
del_from_backlog(del);
|
||||
dlfree(del);
|
||||
}
|
||||
|
@ -559,16 +505,14 @@ static void heaptracker_free_leaked_memory(void)
|
|||
/* Initializes malloc debugging framework.
|
||||
* See comments on MallocDebugInit in malloc_debug_common.h
|
||||
*/
|
||||
int malloc_debug_initialize(void)
|
||||
{
|
||||
extern "C" int malloc_debug_initialize() {
|
||||
if (!malloc_double_free_backlog)
|
||||
malloc_double_free_backlog = BACKLOG_DEFAULT_LEN;
|
||||
milist = init_mapinfo(getpid());
|
||||
return 0;
|
||||
}
|
||||
|
||||
void malloc_debug_finalize(void)
|
||||
{
|
||||
extern "C" void malloc_debug_finalize() {
|
||||
heaptracker_free_leaked_memory();
|
||||
deinit_mapinfo(milist);
|
||||
}
|
|
@ -37,19 +37,17 @@
|
|||
// 012345678901234567890123456789012345678901234567890123456789
|
||||
// 0 1 2 3 4 5
|
||||
|
||||
static mapinfo *parse_maps_line(char *line)
|
||||
{
|
||||
mapinfo *mi;
|
||||
static mapinfo* parse_maps_line(char* line) {
|
||||
int len = strlen(line);
|
||||
|
||||
if(len < 1) return 0;
|
||||
if (len < 1) return 0;
|
||||
line[--len] = 0;
|
||||
|
||||
if(len < 50) return 0;
|
||||
if(line[20] != 'x') return 0;
|
||||
if (len < 50) return 0;
|
||||
if (line[20] != 'x') return 0;
|
||||
|
||||
mi = dlmalloc(sizeof(mapinfo) + (len - 47));
|
||||
if(mi == 0) return 0;
|
||||
mapinfo* mi = static_cast<mapinfo*>(dlmalloc(sizeof(mapinfo) + (len - 47)));
|
||||
if (mi == 0) return 0;
|
||||
|
||||
mi->start = strtoul(line, 0, 16);
|
||||
mi->end = strtoul(line + 9, 0, 16);
|
||||
|
@ -63,16 +61,15 @@ static mapinfo *parse_maps_line(char *line)
|
|||
}
|
||||
|
||||
__LIBC_HIDDEN__
|
||||
mapinfo *init_mapinfo(int pid)
|
||||
{
|
||||
mapinfo *init_mapinfo(int pid) {
|
||||
struct mapinfo *milist = NULL;
|
||||
char data[1024];
|
||||
sprintf(data, "/proc/%d/maps", pid);
|
||||
char data[1024]; // Used to read lines as well as to construct the filename.
|
||||
snprintf(data, sizeof(data), "/proc/%d/maps", pid);
|
||||
FILE *fp = fopen(data, "r");
|
||||
if(fp) {
|
||||
while(fgets(data, sizeof(data), fp)) {
|
||||
if (fp) {
|
||||
while (fgets(data, sizeof(data), fp)) {
|
||||
mapinfo *mi = parse_maps_line(data);
|
||||
if(mi) {
|
||||
if (mi) {
|
||||
mi->next = milist;
|
||||
milist = mi;
|
||||
}
|
||||
|
@ -84,22 +81,20 @@ mapinfo *init_mapinfo(int pid)
|
|||
}
|
||||
|
||||
__LIBC_HIDDEN__
|
||||
void deinit_mapinfo(mapinfo *mi)
|
||||
{
|
||||
mapinfo *del;
|
||||
while(mi) {
|
||||
del = mi;
|
||||
mi = mi->next;
|
||||
dlfree(del);
|
||||
}
|
||||
void deinit_mapinfo(mapinfo *mi) {
|
||||
mapinfo *del;
|
||||
while (mi) {
|
||||
del = mi;
|
||||
mi = mi->next;
|
||||
dlfree(del);
|
||||
}
|
||||
}
|
||||
|
||||
/* Map a pc address to the name of the containing ELF file */
|
||||
__LIBC_HIDDEN__
|
||||
const char *map_to_name(mapinfo *mi, unsigned pc, const char* def)
|
||||
{
|
||||
while(mi) {
|
||||
if((pc >= mi->start) && (pc < mi->end)){
|
||||
const char *map_to_name(mapinfo *mi, unsigned pc, const char* def) {
|
||||
while (mi) {
|
||||
if ((pc >= mi->start) && (pc < mi->end)) {
|
||||
return mi->name;
|
||||
}
|
||||
mi = mi->next;
|
||||
|
@ -109,11 +104,10 @@ const char *map_to_name(mapinfo *mi, unsigned pc, const char* def)
|
|||
|
||||
/* Find the containing map info for the pc */
|
||||
__LIBC_HIDDEN__
|
||||
const mapinfo *pc_to_mapinfo(mapinfo *mi, unsigned pc, unsigned *rel_pc)
|
||||
{
|
||||
const mapinfo *pc_to_mapinfo(mapinfo *mi, unsigned pc, unsigned *rel_pc) {
|
||||
*rel_pc = pc;
|
||||
while(mi) {
|
||||
if((pc >= mi->start) && (pc < mi->end)){
|
||||
while (mi) {
|
||||
if ((pc >= mi->start) && (pc < mi->end)) {
|
||||
// Only calculate the relative offset for shared libraries
|
||||
if (strstr(mi->name, ".so")) {
|
||||
*rel_pc -= mi->start;
|
|
@ -31,16 +31,16 @@
|
|||
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
typedef struct mapinfo {
|
||||
struct mapinfo *next;
|
||||
unsigned start;
|
||||
unsigned end;
|
||||
char name[];
|
||||
} mapinfo;
|
||||
struct mapinfo {
|
||||
struct mapinfo* next;
|
||||
unsigned start;
|
||||
unsigned end;
|
||||
char name[];
|
||||
};
|
||||
|
||||
__LIBC_HIDDEN__ mapinfo *init_mapinfo(int pid);
|
||||
__LIBC_HIDDEN__ void deinit_mapinfo(mapinfo *mi);
|
||||
__LIBC_HIDDEN__ const char *map_to_name(mapinfo *mi, unsigned pc, const char* def);
|
||||
__LIBC_HIDDEN__ const mapinfo *pc_to_mapinfo(mapinfo *mi, unsigned pc, unsigned *rel_pc);
|
||||
|
||||
#endif/*MALLOC_DEBUG_CHECK_MAPINFO_H*/
|
||||
#endif /*MALLOC_DEBUG_CHECK_MAPINFO_H*/
|
||||
|
|
|
@ -58,12 +58,11 @@ HashTable gHashTable;
|
|||
// output functions
|
||||
// =============================================================================
|
||||
|
||||
static int hash_entry_compare(const void* arg1, const void* arg2)
|
||||
{
|
||||
static int hash_entry_compare(const void* arg1, const void* arg2) {
|
||||
int result;
|
||||
|
||||
HashEntry* e1 = *(HashEntry**)arg1;
|
||||
HashEntry* e2 = *(HashEntry**)arg2;
|
||||
const HashEntry* e1 = *static_cast<HashEntry* const*>(arg1);
|
||||
const HashEntry* e2 = *static_cast<HashEntry* const*>(arg2);
|
||||
|
||||
// if one or both arg pointers are null, deal gracefully
|
||||
if (e1 == NULL) {
|
||||
|
@ -111,9 +110,8 @@ static int hash_entry_compare(const void* arg1, const void* arg2)
|
|||
* not include heap overhead
|
||||
* "*backtraceSize" is set to the maximum number of entries in the back trace
|
||||
*/
|
||||
void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
||||
size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
|
||||
{
|
||||
extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
||||
size_t* infoSize, size_t* totalMemory, size_t* backtraceSize) {
|
||||
// don't do anything if we have invalid arguments
|
||||
if (info == NULL || overallSize == NULL || infoSize == NULL ||
|
||||
totalMemory == NULL || backtraceSize == NULL) {
|
||||
|
@ -121,22 +119,21 @@ void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||
}
|
||||
*totalMemory = 0;
|
||||
|
||||
pthread_mutex_lock(&gAllocationsMutex);
|
||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
||||
|
||||
if (gHashTable.count == 0) {
|
||||
*info = NULL;
|
||||
*overallSize = 0;
|
||||
*infoSize = 0;
|
||||
*backtraceSize = 0;
|
||||
goto done;
|
||||
return;
|
||||
}
|
||||
|
||||
void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
|
||||
HashEntry** list = static_cast<HashEntry**>(dlmalloc(sizeof(void*) * gHashTable.count));
|
||||
|
||||
// get the entries into an array to be sorted
|
||||
int index = 0;
|
||||
int i;
|
||||
for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
|
||||
for (size_t i = 0 ; i < HASHTABLE_SIZE ; ++i) {
|
||||
HashEntry* entry = gHashTable.slots[i];
|
||||
while (entry != NULL) {
|
||||
list[index] = entry;
|
||||
|
@ -152,19 +149,20 @@ void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||
*overallSize = *infoSize * gHashTable.count;
|
||||
*backtraceSize = BACKTRACE_SIZE;
|
||||
|
||||
// now get A byte array big enough for this
|
||||
*info = (uint8_t*)dlmalloc(*overallSize);
|
||||
// now get a byte array big enough for this
|
||||
*info = static_cast<uint8_t*>(dlmalloc(*overallSize));
|
||||
|
||||
if (*info == NULL) {
|
||||
*overallSize = 0;
|
||||
goto out_nomem_info;
|
||||
dlfree(list);
|
||||
return;
|
||||
}
|
||||
|
||||
qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
|
||||
qsort(list, gHashTable.count, sizeof(void*), hash_entry_compare);
|
||||
|
||||
uint8_t* head = *info;
|
||||
const int count = gHashTable.count;
|
||||
for (i = 0 ; i < count ; i++) {
|
||||
for (int i = 0 ; i < count ; ++i) {
|
||||
HashEntry* entry = list[i];
|
||||
size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
|
||||
if (entrySize < *infoSize) {
|
||||
|
@ -178,40 +176,30 @@ void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||
head += *infoSize;
|
||||
}
|
||||
|
||||
out_nomem_info:
|
||||
dlfree(list);
|
||||
|
||||
done:
|
||||
pthread_mutex_unlock(&gAllocationsMutex);
|
||||
}
|
||||
|
||||
void free_malloc_leak_info(uint8_t* info)
|
||||
{
|
||||
extern "C" void free_malloc_leak_info(uint8_t* info) {
|
||||
dlfree(info);
|
||||
}
|
||||
|
||||
struct mallinfo mallinfo()
|
||||
{
|
||||
extern "C" struct mallinfo mallinfo() {
|
||||
return dlmallinfo();
|
||||
}
|
||||
|
||||
size_t malloc_usable_size(void* mem)
|
||||
{
|
||||
extern "C" size_t malloc_usable_size(void* mem) {
|
||||
return dlmalloc_usable_size(mem);
|
||||
}
|
||||
|
||||
void* valloc(size_t bytes)
|
||||
{
|
||||
extern "C" void* valloc(size_t bytes) {
|
||||
return dlvalloc(bytes);
|
||||
}
|
||||
|
||||
void* pvalloc(size_t bytes)
|
||||
{
|
||||
extern "C" void* pvalloc(size_t bytes) {
|
||||
return dlpvalloc(bytes);
|
||||
}
|
||||
|
||||
int posix_memalign(void** memptr, size_t alignment, size_t size)
|
||||
{
|
||||
extern "C" int posix_memalign(void** memptr, size_t alignment, size_t size) {
|
||||
return dlposix_memalign(memptr, alignment, size);
|
||||
}
|
||||
|
||||
|
@ -223,27 +211,31 @@ int posix_memalign(void** memptr, size_t alignment, size_t size)
|
|||
#ifdef USE_DL_PREFIX
|
||||
|
||||
/* Table for dispatching malloc calls, initialized with default dispatchers. */
|
||||
const MallocDebug __libc_malloc_default_dispatch __attribute__((aligned(32))) =
|
||||
{
|
||||
extern const MallocDebug __libc_malloc_default_dispatch;
|
||||
const MallocDebug __libc_malloc_default_dispatch __attribute__((aligned(32))) = {
|
||||
dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign
|
||||
};
|
||||
|
||||
/* Selector of dispatch table to use for dispatching malloc calls. */
|
||||
const MallocDebug* __libc_malloc_dispatch = &__libc_malloc_default_dispatch;
|
||||
|
||||
void* malloc(size_t bytes) {
|
||||
extern "C" void* malloc(size_t bytes) {
|
||||
return __libc_malloc_dispatch->malloc(bytes);
|
||||
}
|
||||
void free(void* mem) {
|
||||
|
||||
extern "C" void free(void* mem) {
|
||||
__libc_malloc_dispatch->free(mem);
|
||||
}
|
||||
void* calloc(size_t n_elements, size_t elem_size) {
|
||||
|
||||
extern "C" void* calloc(size_t n_elements, size_t elem_size) {
|
||||
return __libc_malloc_dispatch->calloc(n_elements, elem_size);
|
||||
}
|
||||
void* realloc(void* oldMem, size_t bytes) {
|
||||
|
||||
extern "C" void* realloc(void* oldMem, size_t bytes) {
|
||||
return __libc_malloc_dispatch->realloc(oldMem, bytes);
|
||||
}
|
||||
void* memalign(size_t alignment, size_t bytes) {
|
||||
|
||||
extern "C" void* memalign(size_t alignment, size_t bytes) {
|
||||
return __libc_malloc_dispatch->memalign(alignment, bytes);
|
||||
}
|
||||
|
||||
|
@ -253,6 +245,7 @@ void* memalign(size_t alignment, size_t bytes) {
|
|||
#ifndef LIBC_STATIC
|
||||
#include <sys/system_properties.h>
|
||||
#include <dlfcn.h>
|
||||
#include <stdio.h>
|
||||
#include "logd.h"
|
||||
|
||||
/* Table for dispatching malloc calls, depending on environment. */
|
||||
|
@ -260,7 +253,7 @@ static MallocDebug gMallocUse __attribute__((aligned(32))) = {
|
|||
dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign
|
||||
};
|
||||
|
||||
extern char* __progname;
|
||||
extern char* __progname;
|
||||
|
||||
/* Handle to shared library where actual memory allocation is implemented.
|
||||
* This library is loaded and memory allocation calls are redirected there
|
||||
|
@ -296,13 +289,49 @@ static void* libc_malloc_impl_handle = NULL;
|
|||
* when the value of libc.debug.malloc = 10. It determines the size of the
|
||||
* backlog we use to detect multiple frees. If the property is not set, the
|
||||
* backlog length defaults to an internal constant defined in
|
||||
* malloc_debug_check.c
|
||||
* malloc_debug_check.cpp.
|
||||
*/
|
||||
unsigned int malloc_double_free_backlog;
|
||||
|
||||
static void InitMalloc(MallocDebug* table, int debug_level, const char* prefix) {
|
||||
__libc_android_log_print(ANDROID_LOG_INFO, "libc", "%s: using libc.debug.malloc %d (%s)\n",
|
||||
__progname, debug_level, prefix);
|
||||
|
||||
char symbol[128];
|
||||
|
||||
snprintf(symbol, sizeof(symbol), "%s_malloc", prefix);
|
||||
table->malloc = reinterpret_cast<MallocDebugMalloc>(dlsym(libc_malloc_impl_handle, symbol));
|
||||
if (table->malloc == NULL) {
|
||||
error_log("%s: dlsym(\"%s\") failed", __progname, symbol);
|
||||
}
|
||||
|
||||
snprintf(symbol, sizeof(symbol), "%s_free", prefix);
|
||||
table->free = reinterpret_cast<MallocDebugFree>(dlsym(libc_malloc_impl_handle, symbol));
|
||||
if (table->free == NULL) {
|
||||
error_log("%s: dlsym(\"%s\") failed", __progname, symbol);
|
||||
}
|
||||
|
||||
snprintf(symbol, sizeof(symbol), "%s_calloc", prefix);
|
||||
table->calloc = reinterpret_cast<MallocDebugCalloc>(dlsym(libc_malloc_impl_handle, symbol));
|
||||
if (table->calloc == NULL) {
|
||||
error_log("%s: dlsym(\"%s\") failed", __progname, symbol);
|
||||
}
|
||||
|
||||
snprintf(symbol, sizeof(symbol), "%s_realloc", prefix);
|
||||
table->realloc = reinterpret_cast<MallocDebugRealloc>(dlsym(libc_malloc_impl_handle, symbol));
|
||||
if (table->realloc == NULL) {
|
||||
error_log("%s: dlsym(\"%s\") failed", __progname, symbol);
|
||||
}
|
||||
|
||||
snprintf(symbol, sizeof(symbol), "%s_memalign", prefix);
|
||||
table->memalign = reinterpret_cast<MallocDebugMemalign>(dlsym(libc_malloc_impl_handle, symbol));
|
||||
if (table->memalign == NULL) {
|
||||
error_log("%s: dlsym(\"%s\") failed", __progname, symbol);
|
||||
}
|
||||
}
|
||||
|
||||
/* Initializes memory allocation framework once per process. */
|
||||
static void malloc_init_impl(void)
|
||||
{
|
||||
static void malloc_init_impl() {
|
||||
const char* so_name = NULL;
|
||||
MallocDebugInit malloc_debug_initialize = NULL;
|
||||
unsigned int qemu_running = 0;
|
||||
|
@ -328,13 +357,13 @@ static void malloc_init_impl(void)
|
|||
|
||||
/* If debug level has not been set by memcheck option in the emulator,
|
||||
* lets grab it from libc.debug.malloc system property. */
|
||||
if (!debug_level && __system_property_get("libc.debug.malloc", env)) {
|
||||
if (debug_level == 0 && __system_property_get("libc.debug.malloc", env)) {
|
||||
debug_level = atoi(env);
|
||||
}
|
||||
|
||||
/* Debug level 0 means that we should use dlxxx allocation
|
||||
* routines (default). */
|
||||
if (!debug_level) {
|
||||
if (debug_level == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -386,14 +415,14 @@ static void malloc_init_impl(void)
|
|||
// Load .so that implements the required malloc debugging functionality.
|
||||
libc_malloc_impl_handle = dlopen(so_name, RTLD_LAZY);
|
||||
if (libc_malloc_impl_handle == NULL) {
|
||||
error_log("%s: Missing module %s required for malloc debug level %d\n",
|
||||
__progname, so_name, debug_level);
|
||||
error_log("%s: Missing module %s required for malloc debug level %d: %s",
|
||||
__progname, so_name, debug_level, dlerror());
|
||||
return;
|
||||
}
|
||||
|
||||
// Initialize malloc debugging in the loaded module.
|
||||
malloc_debug_initialize =
|
||||
dlsym(libc_malloc_impl_handle, "malloc_debug_initialize");
|
||||
malloc_debug_initialize = reinterpret_cast<MallocDebugInit>(dlsym(libc_malloc_impl_handle,
|
||||
"malloc_debug_initialize"));
|
||||
if (malloc_debug_initialize == NULL) {
|
||||
error_log("%s: Initialization routine is not found in %s\n",
|
||||
__progname, so_name);
|
||||
|
@ -407,8 +436,10 @@ static void malloc_init_impl(void)
|
|||
|
||||
if (debug_level == 20) {
|
||||
// For memory checker we need to do extra initialization.
|
||||
int (*memcheck_initialize)(int, const char*) =
|
||||
dlsym(libc_malloc_impl_handle, "memcheck_initialize");
|
||||
typedef int (*MemCheckInit)(int, const char*);
|
||||
MemCheckInit memcheck_initialize =
|
||||
reinterpret_cast<MemCheckInit>(dlsym(libc_malloc_impl_handle,
|
||||
"memcheck_initialize"));
|
||||
if (memcheck_initialize == NULL) {
|
||||
error_log("%s: memcheck_initialize routine is not found in %s\n",
|
||||
__progname, so_name);
|
||||
|
@ -424,63 +455,16 @@ static void malloc_init_impl(void)
|
|||
// Initialize malloc dispatch table with appropriate routines.
|
||||
switch (debug_level) {
|
||||
case 1:
|
||||
__libc_android_log_print(ANDROID_LOG_INFO, "libc",
|
||||
"%s using MALLOC_DEBUG = %d (leak checker)\n",
|
||||
__progname, debug_level);
|
||||
gMallocUse.malloc =
|
||||
dlsym(libc_malloc_impl_handle, "leak_malloc");
|
||||
gMallocUse.free =
|
||||
dlsym(libc_malloc_impl_handle, "leak_free");
|
||||
gMallocUse.calloc =
|
||||
dlsym(libc_malloc_impl_handle, "leak_calloc");
|
||||
gMallocUse.realloc =
|
||||
dlsym(libc_malloc_impl_handle, "leak_realloc");
|
||||
gMallocUse.memalign =
|
||||
dlsym(libc_malloc_impl_handle, "leak_memalign");
|
||||
InitMalloc(&gMallocUse, debug_level, "leak");
|
||||
break;
|
||||
case 5:
|
||||
__libc_android_log_print(ANDROID_LOG_INFO, "libc",
|
||||
"%s using MALLOC_DEBUG = %d (fill)\n",
|
||||
__progname, debug_level);
|
||||
gMallocUse.malloc =
|
||||
dlsym(libc_malloc_impl_handle, "fill_malloc");
|
||||
gMallocUse.free =
|
||||
dlsym(libc_malloc_impl_handle, "fill_free");
|
||||
gMallocUse.calloc = dlcalloc;
|
||||
gMallocUse.realloc =
|
||||
dlsym(libc_malloc_impl_handle, "fill_realloc");
|
||||
gMallocUse.memalign =
|
||||
dlsym(libc_malloc_impl_handle, "fill_memalign");
|
||||
InitMalloc(&gMallocUse, debug_level, "fill");
|
||||
break;
|
||||
case 10:
|
||||
__libc_android_log_print(ANDROID_LOG_INFO, "libc",
|
||||
"%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
|
||||
__progname, debug_level);
|
||||
gMallocUse.malloc =
|
||||
dlsym(libc_malloc_impl_handle, "chk_malloc");
|
||||
gMallocUse.free =
|
||||
dlsym(libc_malloc_impl_handle, "chk_free");
|
||||
gMallocUse.calloc =
|
||||
dlsym(libc_malloc_impl_handle, "chk_calloc");
|
||||
gMallocUse.realloc =
|
||||
dlsym(libc_malloc_impl_handle, "chk_realloc");
|
||||
gMallocUse.memalign =
|
||||
dlsym(libc_malloc_impl_handle, "chk_memalign");
|
||||
InitMalloc(&gMallocUse, debug_level, "chk");
|
||||
break;
|
||||
case 20:
|
||||
__libc_android_log_print(ANDROID_LOG_INFO, "libc",
|
||||
"%s[%u] using MALLOC_DEBUG = %d (instrumented for emulator)\n",
|
||||
__progname, getpid(), debug_level);
|
||||
gMallocUse.malloc =
|
||||
dlsym(libc_malloc_impl_handle, "qemu_instrumented_malloc");
|
||||
gMallocUse.free =
|
||||
dlsym(libc_malloc_impl_handle, "qemu_instrumented_free");
|
||||
gMallocUse.calloc =
|
||||
dlsym(libc_malloc_impl_handle, "qemu_instrumented_calloc");
|
||||
gMallocUse.realloc =
|
||||
dlsym(libc_malloc_impl_handle, "qemu_instrumented_realloc");
|
||||
gMallocUse.memalign =
|
||||
dlsym(libc_malloc_impl_handle, "qemu_instrumented_memalign");
|
||||
InitMalloc(&gMallocUse, debug_level, "qemu_instrumented");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -492,12 +476,8 @@ static void malloc_init_impl(void)
|
|||
(gMallocUse.calloc == NULL) ||
|
||||
(gMallocUse.realloc == NULL) ||
|
||||
(gMallocUse.memalign == NULL)) {
|
||||
error_log("%s: Cannot initialize malloc dispatch table for debug level"
|
||||
" %d: %p, %p, %p, %p, %p\n",
|
||||
__progname, debug_level,
|
||||
gMallocUse.malloc, gMallocUse.free,
|
||||
gMallocUse.calloc, gMallocUse.realloc,
|
||||
gMallocUse.memalign);
|
||||
error_log("%s: some symbols for libc.debug.malloc level %d were not found (see above)",
|
||||
__progname, debug_level);
|
||||
dlclose(libc_malloc_impl_handle);
|
||||
libc_malloc_impl_handle = NULL;
|
||||
} else {
|
||||
|
@ -505,14 +485,14 @@ static void malloc_init_impl(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void malloc_fini_impl(void)
|
||||
{
|
||||
static void malloc_fini_impl() {
|
||||
if (libc_malloc_impl_handle) {
|
||||
MallocDebugFini malloc_debug_finalize = NULL;
|
||||
malloc_debug_finalize =
|
||||
dlsym(libc_malloc_impl_handle, "malloc_debug_finalize");
|
||||
if (malloc_debug_finalize)
|
||||
MallocDebugFini malloc_debug_finalize =
|
||||
reinterpret_cast<MallocDebugFini>(dlsym(libc_malloc_impl_handle,
|
||||
"malloc_debug_finalize"));
|
||||
if (malloc_debug_finalize) {
|
||||
malloc_debug_finalize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -526,23 +506,21 @@ static pthread_once_t malloc_fini_once_ctl = PTHREAD_ONCE_INIT;
|
|||
* This routine is called from __libc_init routines implemented
|
||||
* in libc_init_static.c and libc_init_dynamic.c files.
|
||||
*/
|
||||
void malloc_debug_init(void)
|
||||
{
|
||||
extern "C" void malloc_debug_init() {
|
||||
/* We need to initialize malloc iff we implement here custom
|
||||
* malloc routines (i.e. USE_DL_PREFIX is defined) for libc.so */
|
||||
#if defined(USE_DL_PREFIX) && !defined(LIBC_STATIC)
|
||||
if (pthread_once(&malloc_init_once_ctl, malloc_init_impl)) {
|
||||
if (pthread_once(&malloc_init_once_ctl, malloc_init_impl)) {
|
||||
error_log("Unable to initialize malloc_debug component.");
|
||||
}
|
||||
#endif // USE_DL_PREFIX && !LIBC_STATIC
|
||||
}
|
||||
|
||||
void malloc_debug_fini(void)
|
||||
{
|
||||
/* We need to finalize malloc iff we implement here custom
|
||||
extern "C" void malloc_debug_fini() {
|
||||
/* We need to finalize malloc iff we implement here custom
|
||||
* malloc routines (i.e. USE_DL_PREFIX is defined) for libc.so */
|
||||
#if defined(USE_DL_PREFIX) && !defined(LIBC_STATIC)
|
||||
if (pthread_once(&malloc_fini_once_ctl, malloc_fini_impl)) {
|
||||
if (pthread_once(&malloc_fini_once_ctl, malloc_fini_impl)) {
|
||||
error_log("Unable to finalize malloc_debug component.");
|
||||
}
|
||||
#endif // USE_DL_PREFIX && !LIBC_STATIC
|
|
@ -33,10 +33,6 @@
|
|||
#ifndef MALLOC_DEBUG_COMMON_H
|
||||
#define MALLOC_DEBUG_COMMON_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define HASHTABLE_SIZE 1543
|
||||
#define BACKTRACE_SIZE 32
|
||||
/* flag definitions, currently sharing storage with "size" */
|
||||
|
@ -49,7 +45,6 @@ extern "C" {
|
|||
// Structures
|
||||
// =============================================================================
|
||||
|
||||
typedef struct HashEntry HashEntry;
|
||||
struct HashEntry {
|
||||
size_t slot;
|
||||
HashEntry* prev;
|
||||
|
@ -61,25 +56,23 @@ struct HashEntry {
|
|||
intptr_t backtrace[0];
|
||||
};
|
||||
|
||||
typedef struct HashTable HashTable;
|
||||
struct HashTable {
|
||||
size_t count;
|
||||
HashEntry* slots[HASHTABLE_SIZE];
|
||||
};
|
||||
|
||||
/* Entry in malloc dispatch table. */
|
||||
typedef struct MallocDebug MallocDebug;
|
||||
typedef void* (*MallocDebugMalloc)(size_t);
|
||||
typedef void (*MallocDebugFree)(void*);
|
||||
typedef void* (*MallocDebugCalloc)(size_t, size_t);
|
||||
typedef void* (*MallocDebugRealloc)(void*, size_t);
|
||||
typedef void* (*MallocDebugMemalign)(size_t, size_t);
|
||||
struct MallocDebug {
|
||||
/* Address of the actual malloc routine. */
|
||||
void* (*malloc)(size_t bytes);
|
||||
/* Address of the actual free routine. */
|
||||
void (*free)(void* mem);
|
||||
/* Address of the actual calloc routine. */
|
||||
void* (*calloc)(size_t n_elements, size_t elem_size);
|
||||
/* Address of the actual realloc routine. */
|
||||
void* (*realloc)(void* oldMem, size_t bytes);
|
||||
/* Address of the actual memalign routine. */
|
||||
void* (*memalign)(size_t alignment, size_t bytes);
|
||||
MallocDebugMalloc malloc;
|
||||
MallocDebugFree free;
|
||||
MallocDebugCalloc calloc;
|
||||
MallocDebugRealloc realloc;
|
||||
MallocDebugMemalign memalign;
|
||||
};
|
||||
|
||||
/* Malloc debugging initialization and finalization routines.
|
||||
|
@ -94,8 +87,8 @@ struct MallocDebug {
|
|||
* MallocDebugInit returns:
|
||||
* 0 on success, -1 on failure.
|
||||
*/
|
||||
typedef int (*MallocDebugInit)(void);
|
||||
typedef void (*MallocDebugFini)(void);
|
||||
typedef int (*MallocDebugInit)();
|
||||
typedef void (*MallocDebugFini)();
|
||||
|
||||
// =============================================================================
|
||||
// log functions
|
||||
|
@ -108,8 +101,18 @@ typedef void (*MallocDebugFini)(void);
|
|||
#define info_log(format, ...) \
|
||||
__libc_android_log_print(ANDROID_LOG_INFO, "malloc_leak_check", (format), ##__VA_ARGS__ )
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* end of extern "C" */
|
||||
#endif
|
||||
class ScopedPthreadMutexLocker {
|
||||
public:
|
||||
explicit ScopedPthreadMutexLocker(pthread_mutex_t* mu) : mu_(mu) {
|
||||
pthread_mutex_lock(mu_);
|
||||
}
|
||||
|
||||
~ScopedPthreadMutexLocker() {
|
||||
pthread_mutex_unlock(mu_);
|
||||
}
|
||||
|
||||
private:
|
||||
pthread_mutex_t* mu_;
|
||||
};
|
||||
|
||||
#endif // MALLOC_DEBUG_COMMON_H
|
||||
|
|
|
@ -75,18 +75,21 @@ extern HashTable gHashTable;
|
|||
// =============================================================================
|
||||
// Structures
|
||||
// =============================================================================
|
||||
typedef struct AllocationEntry AllocationEntry;
|
||||
|
||||
struct AllocationEntry {
|
||||
HashEntry* entry;
|
||||
uint32_t guard;
|
||||
};
|
||||
|
||||
static AllocationEntry* to_header(void* mem) {
|
||||
return reinterpret_cast<AllocationEntry*>(mem) - 1;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Hash Table functions
|
||||
// =============================================================================
|
||||
static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
|
||||
{
|
||||
|
||||
static uint32_t get_hash(intptr_t* backtrace, size_t numEntries) {
|
||||
if (backtrace == NULL) return 0;
|
||||
|
||||
int hash = 0;
|
||||
|
@ -99,8 +102,7 @@ static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
|
|||
}
|
||||
|
||||
static HashEntry* find_entry(HashTable* table, int slot,
|
||||
intptr_t* backtrace, size_t numEntries, size_t size)
|
||||
{
|
||||
intptr_t* backtrace, size_t numEntries, size_t size) {
|
||||
HashEntry* entry = table->slots[slot];
|
||||
while (entry != NULL) {
|
||||
//debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
|
||||
|
@ -120,8 +122,7 @@ static HashEntry* find_entry(HashTable* table, int slot,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size)
|
||||
{
|
||||
static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size) {
|
||||
size_t hash = get_hash(backtrace, numEntries);
|
||||
size_t slot = hash % HASHTABLE_SIZE;
|
||||
|
||||
|
@ -130,8 +131,9 @@ static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_
|
|||
abort();
|
||||
}
|
||||
|
||||
if (gMallocLeakZygoteChild)
|
||||
if (gMallocLeakZygoteChild) {
|
||||
size |= SIZE_FLAG_ZYGOTE_CHILD;
|
||||
}
|
||||
|
||||
HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
|
||||
|
||||
|
@ -139,9 +141,10 @@ static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_
|
|||
entry->allocations++;
|
||||
} else {
|
||||
// create a new entry
|
||||
entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
|
||||
if (!entry)
|
||||
entry = static_cast<HashEntry*>(dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t)));
|
||||
if (!entry) {
|
||||
return NULL;
|
||||
}
|
||||
entry->allocations = 1;
|
||||
entry->slot = slot;
|
||||
entry->prev = NULL;
|
||||
|
@ -164,8 +167,7 @@ static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_
|
|||
return entry;
|
||||
}
|
||||
|
||||
static int is_valid_entry(HashEntry* entry)
|
||||
{
|
||||
static int is_valid_entry(HashEntry* entry) {
|
||||
if (entry != NULL) {
|
||||
int i;
|
||||
for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
|
||||
|
@ -184,8 +186,7 @@ static int is_valid_entry(HashEntry* entry)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void remove_entry(HashEntry* entry)
|
||||
{
|
||||
static void remove_entry(HashEntry* entry) {
|
||||
HashEntry* prev = entry->prev;
|
||||
HashEntry* next = entry->next;
|
||||
|
||||
|
@ -206,10 +207,13 @@ static void remove_entry(HashEntry* entry)
|
|||
// =============================================================================
|
||||
|
||||
#define CHK_FILL_FREE 0xef
|
||||
#define CHK_SENTINEL_VALUE (char)0xeb
|
||||
#define CHK_SENTINEL_VALUE 0xeb
|
||||
|
||||
void* fill_malloc(size_t bytes)
|
||||
{
|
||||
extern "C" void* fill_calloc(size_t n_elements, size_t elem_size) {
|
||||
return dlcalloc(n_elements, elem_size);
|
||||
}
|
||||
|
||||
extern "C" void* fill_malloc(size_t bytes) {
|
||||
void* buffer = dlmalloc(bytes);
|
||||
if (buffer) {
|
||||
memset(buffer, CHK_SENTINEL_VALUE, bytes);
|
||||
|
@ -217,15 +221,13 @@ void* fill_malloc(size_t bytes)
|
|||
return buffer;
|
||||
}
|
||||
|
||||
void fill_free(void* mem)
|
||||
{
|
||||
extern "C" void fill_free(void* mem) {
|
||||
size_t bytes = dlmalloc_usable_size(mem);
|
||||
memset(mem, CHK_FILL_FREE, bytes);
|
||||
dlfree(mem);
|
||||
}
|
||||
|
||||
void* fill_realloc(void* mem, size_t bytes)
|
||||
{
|
||||
extern "C" void* fill_realloc(void* mem, size_t bytes) {
|
||||
void* buffer = fill_malloc(bytes);
|
||||
if (mem == NULL) {
|
||||
return buffer;
|
||||
|
@ -239,8 +241,7 @@ void* fill_realloc(void* mem, size_t bytes)
|
|||
return buffer;
|
||||
}
|
||||
|
||||
void* fill_memalign(size_t alignment, size_t bytes)
|
||||
{
|
||||
extern "C" void* fill_memalign(size_t alignment, size_t bytes) {
|
||||
void* buffer = dlmemalign(alignment, bytes);
|
||||
if (buffer) {
|
||||
memset(buffer, CHK_SENTINEL_VALUE, bytes);
|
||||
|
@ -252,13 +253,11 @@ void* fill_memalign(size_t alignment, size_t bytes)
|
|||
// malloc leak functions
|
||||
// =============================================================================
|
||||
|
||||
#define MEMALIGN_GUARD ((void*)0xA1A41520)
|
||||
static void* MEMALIGN_GUARD = reinterpret_cast<void*>(0xA1A41520);
|
||||
|
||||
extern __LIBC_HIDDEN__
|
||||
int get_backtrace(intptr_t* addrs, size_t max_entries);
|
||||
extern __LIBC_HIDDEN__ int get_backtrace(intptr_t* addrs, size_t max_entries);
|
||||
|
||||
void* leak_malloc(size_t bytes)
|
||||
{
|
||||
extern "C" void* leak_malloc(size_t bytes) {
|
||||
// allocate enough space infront of the allocation to store the pointer for
|
||||
// the alloc structure. This will making free'ing the structer really fast!
|
||||
|
||||
|
@ -272,38 +271,35 @@ void* leak_malloc(size_t bytes)
|
|||
|
||||
void* base = dlmalloc(size);
|
||||
if (base != NULL) {
|
||||
pthread_mutex_lock(&gAllocationsMutex);
|
||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
||||
|
||||
intptr_t backtrace[BACKTRACE_SIZE];
|
||||
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
|
||||
intptr_t backtrace[BACKTRACE_SIZE];
|
||||
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
|
||||
|
||||
AllocationEntry* header = (AllocationEntry*)base;
|
||||
header->entry = record_backtrace(backtrace, numEntries, bytes);
|
||||
header->guard = GUARD;
|
||||
AllocationEntry* header = reinterpret_cast<AllocationEntry*>(base);
|
||||
header->entry = record_backtrace(backtrace, numEntries, bytes);
|
||||
header->guard = GUARD;
|
||||
|
||||
// now increment base to point to after our header.
|
||||
// this should just work since our header is 8 bytes.
|
||||
base = (AllocationEntry*)base + 1;
|
||||
|
||||
pthread_mutex_unlock(&gAllocationsMutex);
|
||||
// now increment base to point to after our header.
|
||||
// this should just work since our header is 8 bytes.
|
||||
base = reinterpret_cast<AllocationEntry*>(base) + 1;
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
void leak_free(void* mem)
|
||||
{
|
||||
extern "C" void leak_free(void* mem) {
|
||||
if (mem != NULL) {
|
||||
pthread_mutex_lock(&gAllocationsMutex);
|
||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
||||
|
||||
// check the guard to make sure it is valid
|
||||
AllocationEntry* header = (AllocationEntry*)mem - 1;
|
||||
AllocationEntry* header = to_header(mem);
|
||||
|
||||
if (header->guard != GUARD) {
|
||||
// could be a memaligned block
|
||||
if (((void**)mem)[-1] == MEMALIGN_GUARD) {
|
||||
mem = ((void**)mem)[-2];
|
||||
header = (AllocationEntry*)mem - 1;
|
||||
if (reinterpret_cast<void**>(mem)[-1] == MEMALIGN_GUARD) {
|
||||
mem = reinterpret_cast<void**>(mem)[-2];
|
||||
header = to_header(mem);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -322,36 +318,29 @@ void leak_free(void* mem)
|
|||
debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
|
||||
header->guard, header->entry);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&gAllocationsMutex);
|
||||
}
|
||||
}
|
||||
|
||||
void* leak_calloc(size_t n_elements, size_t elem_size)
|
||||
{
|
||||
size_t size;
|
||||
void* ptr;
|
||||
|
||||
extern "C" void* leak_calloc(size_t n_elements, size_t elem_size) {
|
||||
/* Fail on overflow - just to be safe even though this code runs only
|
||||
* within the debugging C library, not the production one */
|
||||
if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
|
||||
return NULL;
|
||||
}
|
||||
size = n_elements * elem_size;
|
||||
ptr = leak_malloc(size);
|
||||
size_t size = n_elements * elem_size;
|
||||
void* ptr = leak_malloc(size);
|
||||
if (ptr != NULL) {
|
||||
memset(ptr, 0, size);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* leak_realloc(void* oldMem, size_t bytes)
|
||||
{
|
||||
extern "C" void* leak_realloc(void* oldMem, size_t bytes) {
|
||||
if (oldMem == NULL) {
|
||||
return leak_malloc(bytes);
|
||||
}
|
||||
void* newMem = NULL;
|
||||
AllocationEntry* header = (AllocationEntry*)oldMem - 1;
|
||||
AllocationEntry* header = to_header(oldMem);
|
||||
if (header && header->guard == GUARD) {
|
||||
size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
|
||||
newMem = leak_malloc(bytes);
|
||||
|
@ -366,15 +355,16 @@ void* leak_realloc(void* oldMem, size_t bytes)
|
|||
return newMem;
|
||||
}
|
||||
|
||||
void* leak_memalign(size_t alignment, size_t bytes)
|
||||
{
|
||||
extern "C" void* leak_memalign(size_t alignment, size_t bytes) {
|
||||
// we can just use malloc
|
||||
if (alignment <= MALLOC_ALIGNMENT)
|
||||
if (alignment <= MALLOC_ALIGNMENT) {
|
||||
return leak_malloc(bytes);
|
||||
}
|
||||
|
||||
// need to make sure it's a power of two
|
||||
if (alignment & (alignment-1))
|
||||
if (alignment & (alignment-1)) {
|
||||
alignment = 1L << (31 - __builtin_clz(alignment));
|
||||
}
|
||||
|
||||
// here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
|
||||
// we will align by at least MALLOC_ALIGNMENT bytes
|
||||
|
@ -386,18 +376,19 @@ void* leak_memalign(size_t alignment, size_t bytes)
|
|||
|
||||
void* base = leak_malloc(size);
|
||||
if (base != NULL) {
|
||||
intptr_t ptr = (intptr_t)base;
|
||||
if ((ptr % alignment) == 0)
|
||||
intptr_t ptr = reinterpret_cast<intptr_t>(base);
|
||||
if ((ptr % alignment) == 0) {
|
||||
return base;
|
||||
}
|
||||
|
||||
// align the pointer
|
||||
ptr += ((-ptr) % alignment);
|
||||
|
||||
// there is always enough space for the base pointer and the guard
|
||||
((void**)ptr)[-1] = MEMALIGN_GUARD;
|
||||
((void**)ptr)[-2] = base;
|
||||
reinterpret_cast<void**>(ptr)[-1] = MEMALIGN_GUARD;
|
||||
reinterpret_cast<void**>(ptr)[-2] = base;
|
||||
|
||||
return (void*)ptr;
|
||||
return reinterpret_cast<void*>(ptr);
|
||||
}
|
||||
return base;
|
||||
}
|
|
@ -79,7 +79,7 @@
|
|||
* sources (file memcheck/memcheck_common.h). So, every time a change is made to
|
||||
* any of these two declaration, another one must be also updated accordingly.
|
||||
*/
|
||||
typedef struct MallocDesc {
|
||||
struct MallocDesc {
|
||||
/* Pointer to the memory block actually allocated from the heap. Note that
|
||||
* this is not the pointer that is returned to the malloc's caller. Pointer
|
||||
* returned to the caller is calculated by adding value stored in this field
|
||||
|
@ -115,7 +115,7 @@ typedef struct MallocDesc {
|
|||
|
||||
/* Number of access violations detected on this allocation. */
|
||||
uint32_t av_count;
|
||||
} MallocDesc;
|
||||
};
|
||||
|
||||
/* Describes memory block info queried from emulator. This structure is passed
|
||||
* along with TRACE_DEV_REG_QUERY_MALLOC event. When handling free and realloc
|
||||
|
@ -130,7 +130,7 @@ typedef struct MallocDesc {
|
|||
* memcheck/memecheck_common.h). So, every time a change is made to any of these
|
||||
* two declaration, another one must be also updated accordingly.
|
||||
*/
|
||||
typedef struct MallocDescQuery {
|
||||
struct MallocDescQuery {
|
||||
/* Pointer, for which information is queried. Note that this pointer doesn't
|
||||
* have to be exact pointer returned to malloc's caller, but can point
|
||||
* anywhere inside an allocated block, including guarding areas. Emulator
|
||||
|
@ -160,7 +160,7 @@ typedef struct MallocDescQuery {
|
|||
* response to the query.
|
||||
*/
|
||||
MallocDesc* desc;
|
||||
} MallocDescQuery;
|
||||
};
|
||||
|
||||
/* Describes memory block that is being freed back to the heap. This structure
|
||||
* is passed along with TRACE_DEV_REG_FREE_PTR event. The entire structure is
|
||||
|
@ -170,7 +170,7 @@ typedef struct MallocDescQuery {
|
|||
* memcheck/memecheck_common.h). So, every time a change is made to any of these
|
||||
* two declaration, another one must be also updated accordingly.
|
||||
*/
|
||||
typedef struct MallocFree {
|
||||
struct MallocFree {
|
||||
/* Pointer to be freed. */
|
||||
void* ptr;
|
||||
|
||||
|
@ -183,7 +183,7 @@ typedef struct MallocFree {
|
|||
|
||||
/* Process ID in context of which memory is being freed. */
|
||||
uint32_t free_pid;
|
||||
} MallocFree;
|
||||
};
|
||||
|
||||
// =============================================================================
|
||||
// Communication events
|
||||
|
@ -267,7 +267,7 @@ static void dump_malloc_descriptor(char* str,
|
|||
#define TR(...) \
|
||||
do { \
|
||||
char tr_str[4096]; \
|
||||
snprintf(tr_str, sizeof(tr_str), __VA_ARGS__ ); \
|
||||
snprintf(tr_str, sizeof(tr_str), __VA_ARGS__); \
|
||||
tr_str[sizeof(tr_str) - 1] = '\0'; \
|
||||
notify_qemu_string(&tr_str[0]); \
|
||||
} while (0)
|
||||
|
@ -290,27 +290,27 @@ static void dump_malloc_descriptor(char* str,
|
|||
#define qemu_debug_log(format, ...) \
|
||||
do { \
|
||||
__libc_android_log_print(ANDROID_LOG_DEBUG, "memcheck", \
|
||||
(format), ##__VA_ARGS__ ); \
|
||||
(format), ##__VA_ARGS__); \
|
||||
if (tracing_flags & DEBUG_TRACING_ENABLED) { \
|
||||
qemu_log(ANDROID_LOG_DEBUG, (format), ##__VA_ARGS__ ); \
|
||||
qemu_log(ANDROID_LOG_DEBUG, (format), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define qemu_error_log(format, ...) \
|
||||
do { \
|
||||
__libc_android_log_print(ANDROID_LOG_ERROR, "memcheck", \
|
||||
(format), ##__VA_ARGS__ ); \
|
||||
(format), ##__VA_ARGS__); \
|
||||
if (tracing_flags & ERROR_TRACING_ENABLED) { \
|
||||
qemu_log(ANDROID_LOG_ERROR, (format), ##__VA_ARGS__ ); \
|
||||
qemu_log(ANDROID_LOG_ERROR, (format), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define qemu_info_log(format, ...) \
|
||||
do { \
|
||||
__libc_android_log_print(ANDROID_LOG_INFO, "memcheck", \
|
||||
(format), ##__VA_ARGS__ ); \
|
||||
(format), ##__VA_ARGS__); \
|
||||
if (tracing_flags & INFO_TRACING_ENABLED) { \
|
||||
qemu_log(ANDROID_LOG_INFO, (format), ##__VA_ARGS__ ); \
|
||||
qemu_log(ANDROID_LOG_INFO, (format), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -368,10 +368,8 @@ static uint32_t tracing_flags = 0;
|
|||
* Return:
|
||||
* Pointer to the allocated memory returned to the malloc caller.
|
||||
*/
|
||||
static inline void*
|
||||
mallocdesc_user_ptr(const MallocDesc* desc)
|
||||
{
|
||||
return (char*)desc->ptr + desc->prefix_size;
|
||||
static inline void* mallocdesc_user_ptr(const MallocDesc* desc) {
|
||||
return static_cast<char*>(desc->ptr) + desc->prefix_size;
|
||||
}
|
||||
|
||||
/* Gets size of memory block actually allocated from the heap for the given
|
||||
|
@ -381,9 +379,7 @@ mallocdesc_user_ptr(const MallocDesc* desc)
|
|||
* Return:
|
||||
* Size of memory block actually allocated from the heap.
|
||||
*/
|
||||
static inline uint32_t
|
||||
mallocdesc_alloc_size(const MallocDesc* desc)
|
||||
{
|
||||
static inline uint32_t mallocdesc_alloc_size(const MallocDesc* desc) {
|
||||
return desc->prefix_size + desc->requested_bytes + desc->suffix_size;
|
||||
}
|
||||
|
||||
|
@ -393,10 +389,8 @@ mallocdesc_alloc_size(const MallocDesc* desc)
|
|||
* Return:
|
||||
* Pointer to the end of (one byte past) the allocated block.
|
||||
*/
|
||||
static inline void*
|
||||
mallocdesc_alloc_end(const MallocDesc* desc)
|
||||
{
|
||||
return (char*)desc->ptr + mallocdesc_alloc_size(desc);
|
||||
static inline void* mallocdesc_alloc_end(const MallocDesc* desc) {
|
||||
return static_cast<char*>(desc->ptr) + mallocdesc_alloc_size(desc);
|
||||
}
|
||||
|
||||
/* Fires up an event in the emulator.
|
||||
|
@ -404,9 +398,7 @@ mallocdesc_alloc_end(const MallocDesc* desc)
|
|||
* code - Event code (one of the TRACE_DEV_XXX).
|
||||
* val - Event's value parameter.
|
||||
*/
|
||||
static inline void
|
||||
notify_qemu(uint32_t code, uint32_t val)
|
||||
{
|
||||
static inline void notify_qemu(uint32_t code, uint32_t val) {
|
||||
if (NULL != qtrace) {
|
||||
*(volatile uint32_t*)((uint32_t)qtrace + ((code - 1024) << 2)) = val;
|
||||
}
|
||||
|
@ -417,9 +409,7 @@ notify_qemu(uint32_t code, uint32_t val)
|
|||
* Param:
|
||||
* str - Zero-terminated string to print.
|
||||
*/
|
||||
static void
|
||||
notify_qemu_string(const char* str)
|
||||
{
|
||||
static void notify_qemu_string(const char* str) {
|
||||
if (str != NULL) {
|
||||
notify_qemu(TRACE_DEV_REG_PRINT_USER_STR, (uint32_t)str);
|
||||
}
|
||||
|
@ -429,9 +419,7 @@ notify_qemu_string(const char* str)
|
|||
* Param:
|
||||
* pid - ID of the process that initialized libc.
|
||||
*/
|
||||
static void
|
||||
notify_qemu_libc_initialized(uint32_t pid)
|
||||
{
|
||||
static void notify_qemu_libc_initialized(uint32_t pid) {
|
||||
notify_qemu(TRACE_DEV_REG_LIBC_INIT, pid);
|
||||
}
|
||||
|
||||
|
@ -444,9 +432,7 @@ notify_qemu_libc_initialized(uint32_t pid)
|
|||
* the desc parameter passed to this routine has been zeroed out by the
|
||||
* emulator.
|
||||
*/
|
||||
static inline int
|
||||
notify_qemu_malloc(volatile MallocDesc* desc)
|
||||
{
|
||||
static inline int notify_qemu_malloc(volatile MallocDesc* desc) {
|
||||
desc->libc_pid = malloc_pid;
|
||||
desc->allocator_pid = getpid();
|
||||
desc->av_count = 0;
|
||||
|
@ -463,9 +449,7 @@ notify_qemu_malloc(volatile MallocDesc* desc)
|
|||
* Return:
|
||||
* Zero on success, or -1 on failure.
|
||||
*/
|
||||
static inline int
|
||||
notify_qemu_free(void* ptr_to_free)
|
||||
{
|
||||
static inline int notify_qemu_free(void* ptr_to_free) {
|
||||
volatile MallocFree free_desc;
|
||||
|
||||
free_desc.ptr = ptr_to_free;
|
||||
|
@ -489,9 +473,7 @@ notify_qemu_free(void* ptr_to_free)
|
|||
* Return:
|
||||
* Zero on success, or -1 on failure.
|
||||
*/
|
||||
static inline int
|
||||
query_qemu_malloc_info(void* ptr, MallocDesc* desc, uint32_t routine)
|
||||
{
|
||||
static inline int query_qemu_malloc_info(void* ptr, MallocDesc* desc, uint32_t routine) {
|
||||
volatile MallocDescQuery query;
|
||||
|
||||
query.ptr = ptr;
|
||||
|
@ -511,9 +493,7 @@ query_qemu_malloc_info(void* ptr, MallocDesc* desc, uint32_t routine)
|
|||
* prio - Message priority (debug, info, or error)
|
||||
* fmt + rest - Message format and parameters.
|
||||
*/
|
||||
static void
|
||||
qemu_log(int prio, const char* fmt, ...)
|
||||
{
|
||||
static void qemu_log(int prio, const char* fmt, ...) {
|
||||
va_list ap;
|
||||
char buf[4096];
|
||||
const char* prefix;
|
||||
|
@ -555,9 +535,7 @@ qemu_log(int prio, const char* fmt, ...)
|
|||
* str_buf_size - Size of string's buffer.
|
||||
* desc - Descriptor to dump.
|
||||
*/
|
||||
static void
|
||||
dump_malloc_descriptor(char* str, size_t str_buf_size, const MallocDesc* desc)
|
||||
{
|
||||
static void dump_malloc_descriptor(char* str, size_t str_buf_size, const MallocDesc* desc) {
|
||||
if (str_buf_size) {
|
||||
snprintf(str, str_buf_size,
|
||||
"MDesc: %p: %X <-> %X [%u + %u + %u] by pid=%03u in libc_pid=%03u",
|
||||
|
@ -573,9 +551,7 @@ dump_malloc_descriptor(char* str, size_t str_buf_size, const MallocDesc* desc)
|
|||
/* Causes an access violation on allocation descriptor, and verifies that
|
||||
* violation has been detected by memory checker in the emulator.
|
||||
*/
|
||||
static void
|
||||
test_access_violation(const MallocDesc* desc)
|
||||
{
|
||||
static void test_access_violation(const MallocDesc* desc) {
|
||||
MallocDesc desc_chk;
|
||||
char ch;
|
||||
volatile char* prefix = (volatile char*)desc->ptr;
|
||||
|
@ -617,9 +593,7 @@ void* qemu_instrumented_memalign(size_t alignment, size_t bytes);
|
|||
* Return:
|
||||
* 0 on success, or -1 on failure.
|
||||
*/
|
||||
int
|
||||
malloc_debug_initialize(void)
|
||||
{
|
||||
int malloc_debug_initialize() {
|
||||
/* We will be using emulator's magic page to report memory allocation
|
||||
* activities. In essence, what magic page does, it translates writes to
|
||||
* the memory mapped spaces into writes to an I/O port that emulator
|
||||
|
@ -657,9 +631,7 @@ malloc_debug_initialize(void)
|
|||
* Return:
|
||||
* 0 on success, or -1 on failure.
|
||||
*/
|
||||
int
|
||||
memcheck_initialize(int alignment, const char* memcheck_param)
|
||||
{
|
||||
int memcheck_initialize(int alignment, const char* memcheck_param) {
|
||||
malloc_alignment = alignment;
|
||||
|
||||
/* Parse -memcheck parameter for the guest tracing flags. */
|
||||
|
@ -705,9 +677,7 @@ memcheck_initialize(int alignment, const char* memcheck_param)
|
|||
* bytes (plus prefix, and suffix guards), and report allocation to the
|
||||
* emulator.
|
||||
*/
|
||||
void*
|
||||
qemu_instrumented_malloc(size_t bytes)
|
||||
{
|
||||
void* qemu_instrumented_malloc(size_t bytes) {
|
||||
MallocDesc desc;
|
||||
|
||||
/* Initialize block descriptor and allocate memory. Note that dlmalloc
|
||||
|
@ -742,9 +712,7 @@ qemu_instrumented_malloc(size_t bytes)
|
|||
* Primary responsibility of this routine is to free requested memory, and
|
||||
* report free block to the emulator.
|
||||
*/
|
||||
void
|
||||
qemu_instrumented_free(void* mem)
|
||||
{
|
||||
void qemu_instrumented_free(void* mem) {
|
||||
MallocDesc desc;
|
||||
|
||||
if (mem == NULL) {
|
||||
|
@ -787,14 +755,7 @@ qemu_instrumented_free(void* mem)
|
|||
/* This routine serves as entry point for 'calloc'.
|
||||
* This routine behaves similarly to qemu_instrumented_malloc.
|
||||
*/
|
||||
void*
|
||||
qemu_instrumented_calloc(size_t n_elements, size_t elem_size)
|
||||
{
|
||||
MallocDesc desc;
|
||||
void* ret;
|
||||
size_t total_size;
|
||||
size_t total_elements;
|
||||
|
||||
void* qemu_instrumented_calloc(size_t n_elements, size_t elem_size) {
|
||||
if (n_elements == 0 || elem_size == 0) {
|
||||
// Just let go zero bytes allocation.
|
||||
qemu_info_log("::: <libc_pid=%03u, pid=%03u>: Zero calloc redir to malloc",
|
||||
|
@ -808,6 +769,8 @@ qemu_instrumented_calloc(size_t n_elements, size_t elem_size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
MallocDesc desc;
|
||||
|
||||
/* Calculating prefix size. The trick here is to make sure that
|
||||
* first element (returned to the caller) is properly aligned. */
|
||||
if (DEFAULT_PREFIX_SIZE >= elem_size) {
|
||||
|
@ -827,8 +790,8 @@ qemu_instrumented_calloc(size_t n_elements, size_t elem_size)
|
|||
desc.suffix_size = DEFAULT_SUFFIX_SIZE;
|
||||
}
|
||||
desc.requested_bytes = n_elements * elem_size;
|
||||
total_size = desc.requested_bytes + desc.prefix_size + desc.suffix_size;
|
||||
total_elements = total_size / elem_size;
|
||||
size_t total_size = desc.requested_bytes + desc.prefix_size + desc.suffix_size;
|
||||
size_t total_elements = total_size / elem_size;
|
||||
total_size %= elem_size;
|
||||
if (total_size != 0) {
|
||||
// Add extra to the suffix area.
|
||||
|
@ -864,9 +827,7 @@ qemu_instrumented_calloc(size_t n_elements, size_t elem_size)
|
|||
* allocation, but overall it doesn't seem to matter, as caller of realloc
|
||||
* should not expect that pointer returned after shrinking will remain the same.
|
||||
*/
|
||||
void*
|
||||
qemu_instrumented_realloc(void* mem, size_t bytes)
|
||||
{
|
||||
void* qemu_instrumented_realloc(void* mem, size_t bytes) {
|
||||
MallocDesc new_desc;
|
||||
MallocDesc cur_desc;
|
||||
size_t to_copy;
|
||||
|
@ -936,7 +897,7 @@ qemu_instrumented_realloc(void* mem, size_t bytes)
|
|||
}
|
||||
|
||||
// Register new block with emulator.
|
||||
if(notify_qemu_malloc(&new_desc)) {
|
||||
if (notify_qemu_malloc(&new_desc)) {
|
||||
log_mdesc(error, &new_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u) notify_malloc failed -> ",
|
||||
malloc_pid, getpid(), mem, bytes);
|
||||
log_mdesc(error, &cur_desc, " <- ");
|
||||
|
@ -970,9 +931,7 @@ qemu_instrumented_realloc(void* mem, size_t bytes)
|
|||
/* This routine serves as entry point for 'memalign'.
|
||||
* This routine behaves similarly to qemu_instrumented_malloc.
|
||||
*/
|
||||
void*
|
||||
qemu_instrumented_memalign(size_t alignment, size_t bytes)
|
||||
{
|
||||
void* qemu_instrumented_memalign(size_t alignment, size_t bytes) {
|
||||
MallocDesc desc;
|
||||
|
||||
if (bytes == 0) {
|
|
@ -32,11 +32,10 @@
|
|||
// stack trace functions
|
||||
// =============================================================================
|
||||
|
||||
typedef struct
|
||||
{
|
||||
struct stack_crawl_state_t {
|
||||
size_t count;
|
||||
intptr_t* addrs;
|
||||
} stack_crawl_state_t;
|
||||
};
|
||||
|
||||
|
||||
/* depends how the system includes define this */
|
||||
|
@ -46,9 +45,8 @@ typedef struct _Unwind_Context __unwind_context;
|
|||
typedef _Unwind_Context __unwind_context;
|
||||
#endif
|
||||
|
||||
static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
|
||||
{
|
||||
stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
|
||||
static _Unwind_Reason_Code trace_function(__unwind_context* context, void* arg) {
|
||||
stack_crawl_state_t* state = static_cast<stack_crawl_state_t*>(arg);
|
||||
if (state->count) {
|
||||
intptr_t ip = (intptr_t)_Unwind_GetIP(context);
|
||||
if (ip) {
|
||||
|
@ -65,12 +63,10 @@ static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
|
|||
return _URC_END_OF_STACK;
|
||||
}
|
||||
|
||||
__LIBC_HIDDEN__
|
||||
int get_backtrace(intptr_t* addrs, size_t max_entries)
|
||||
{
|
||||
__LIBC_HIDDEN__ int get_backtrace(intptr_t* addrs, size_t max_entries) {
|
||||
stack_crawl_state_t state;
|
||||
state.count = max_entries;
|
||||
state.addrs = (intptr_t*)addrs;
|
||||
_Unwind_Backtrace(trace_function, (void*)&state);
|
||||
state.addrs = addrs;
|
||||
_Unwind_Backtrace(trace_function, &state);
|
||||
return max_entries - state.count;
|
||||
}
|
|
@ -115,7 +115,7 @@ int getpwuid_r(uid_t uid, passwd* pwd,
|
|||
}
|
||||
|
||||
static stubs_state_t* stubs_state_alloc() {
|
||||
stubs_state_t* s = reinterpret_cast<stubs_state_t*>(calloc(1, sizeof(*s)));
|
||||
stubs_state_t* s = static_cast<stubs_state_t*>(calloc(1, sizeof(*s)));
|
||||
if (s != NULL) {
|
||||
s->group_.gr_mem = s->group_members_;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ static stubs_state_t* stubs_state_alloc() {
|
|||
}
|
||||
|
||||
static void stubs_state_free(void* ptr) {
|
||||
stubs_state_t* state = reinterpret_cast<stubs_state_t*>(ptr);
|
||||
stubs_state_t* state = static_cast<stubs_state_t*>(ptr);
|
||||
free(state);
|
||||
}
|
||||
|
||||
|
@ -133,8 +133,7 @@ static void __stubs_key_init() {
|
|||
|
||||
static stubs_state_t* __stubs_state() {
|
||||
pthread_once(&stubs_once, __stubs_key_init);
|
||||
stubs_state_t* s =
|
||||
reinterpret_cast<stubs_state_t*>(pthread_getspecific(stubs_key));
|
||||
stubs_state_t* s = static_cast<stubs_state_t*>(pthread_getspecific(stubs_key));
|
||||
if (s == NULL) {
|
||||
s = stubs_state_alloc();
|
||||
if (s == NULL) {
|
||||
|
|
Loading…
Reference in a new issue