2016-01-15 00:35:40 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-04-27 01:51:32 +02:00
|
|
|
#include <errno.h>
|
2016-01-15 00:35:40 +01:00
|
|
|
#include <inttypes.h>
|
2016-04-27 01:51:32 +02:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <unistd.h>
|
2016-01-15 00:35:40 +01:00
|
|
|
|
|
|
|
#include <map>
|
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
#include "Allocator.h"
|
|
|
|
#include "HeapWalker.h"
|
2016-03-03 02:53:39 +01:00
|
|
|
#include "LeakFolding.h"
|
2016-04-27 01:51:32 +02:00
|
|
|
#include "ScopedSignalHandler.h"
|
2016-01-15 00:35:40 +01:00
|
|
|
#include "log.h"
|
|
|
|
|
2017-06-21 22:13:00 +02:00
|
|
|
namespace android {
|
|
|
|
|
2016-01-15 00:35:40 +01:00
|
|
|
bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
|
|
|
|
if (end == begin) {
|
|
|
|
end = begin + 1;
|
|
|
|
}
|
2016-03-03 02:53:39 +01:00
|
|
|
Range range{begin, end};
|
2018-11-29 02:01:59 +01:00
|
|
|
if (valid_mappings_range_.end != 0 &&
|
|
|
|
(begin < valid_mappings_range_.begin || end > valid_mappings_range_.end)) {
|
|
|
|
MEM_LOG_ALWAYS_FATAL("allocation %p-%p is outside mapping range %p-%p",
|
|
|
|
reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end),
|
|
|
|
reinterpret_cast<void*>(valid_mappings_range_.begin),
|
|
|
|
reinterpret_cast<void*>(valid_mappings_range_.end));
|
|
|
|
}
|
2016-03-03 02:53:39 +01:00
|
|
|
auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
|
2016-01-15 00:35:40 +01:00
|
|
|
if (inserted.second) {
|
|
|
|
valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
|
|
|
|
valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
|
2016-03-03 02:53:39 +01:00
|
|
|
allocation_bytes_ += range.size();
|
2016-01-15 00:35:40 +01:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
Range overlap = inserted.first->first;
|
2016-04-27 02:10:04 +02:00
|
|
|
if (overlap != range) {
|
2017-05-04 02:34:29 +02:00
|
|
|
MEM_ALOGE("range %p-%p overlaps with existing range %p-%p", reinterpret_cast<void*>(begin),
|
|
|
|
reinterpret_cast<void*>(end), reinterpret_cast<void*>(overlap.begin),
|
|
|
|
reinterpret_cast<void*>(overlap.end));
|
2016-04-27 02:10:04 +02:00
|
|
|
}
|
2016-01-15 00:35:40 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-27 01:51:32 +02:00
|
|
|
bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
|
|
|
|
walking_ptr_ = word_ptr;
|
|
|
|
// This access may segfault if the process under test has done something strange,
|
|
|
|
// for example mprotect(PROT_NONE) on a native heap page. If so, it will be
|
|
|
|
// caught and handled by mmaping a zero page over the faulting page.
|
|
|
|
uintptr_t value = *reinterpret_cast<uintptr_t*>(word_ptr);
|
|
|
|
walking_ptr_ = 0;
|
|
|
|
if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
|
|
|
|
AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
|
2016-03-03 02:53:39 +01:00
|
|
|
if (it != allocations_.end()) {
|
|
|
|
*range = it->first;
|
|
|
|
*info = &it->second;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void HeapWalker::RecurseRoot(const Range& root) {
|
|
|
|
allocator::vector<Range> to_do(1, root, allocator_);
|
2016-01-15 00:35:40 +01:00
|
|
|
while (!to_do.empty()) {
|
|
|
|
Range range = to_do.back();
|
|
|
|
to_do.pop_back();
|
2016-03-03 02:53:39 +01:00
|
|
|
|
2018-11-28 01:14:53 +01:00
|
|
|
walking_range_ = range;
|
2016-03-03 02:53:39 +01:00
|
|
|
ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
|
|
|
|
if (!ref_info->referenced_from_root) {
|
|
|
|
ref_info->referenced_from_root = true;
|
|
|
|
to_do.push_back(ref_range);
|
2016-01-15 00:35:40 +01:00
|
|
|
}
|
2016-03-03 02:53:39 +01:00
|
|
|
});
|
2018-11-28 01:14:53 +01:00
|
|
|
walking_range_ = Range{0, 0};
|
2016-01-15 00:35:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-29 02:01:59 +01:00
|
|
|
void HeapWalker::Mapping(uintptr_t begin, uintptr_t end) {
|
|
|
|
valid_mappings_range_.begin = std::min(valid_mappings_range_.begin, begin);
|
|
|
|
valid_mappings_range_.end = std::max(valid_mappings_range_.end, end);
|
|
|
|
}
|
|
|
|
|
2016-01-15 00:35:40 +01:00
|
|
|
void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
|
|
|
|
roots_.push_back(Range{begin, end});
|
|
|
|
}
|
|
|
|
|
|
|
|
void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
|
|
|
|
root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t HeapWalker::Allocations() {
|
|
|
|
return allocations_.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t HeapWalker::AllocationBytes() {
|
|
|
|
return allocation_bytes_;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool HeapWalker::DetectLeaks() {
|
2016-03-03 02:53:39 +01:00
|
|
|
// Recursively walk pointers from roots to mark referenced allocations
|
2016-01-15 00:35:40 +01:00
|
|
|
for (auto it = roots_.begin(); it != roots_.end(); it++) {
|
2016-03-03 02:53:39 +01:00
|
|
|
RecurseRoot(*it);
|
2016-01-15 00:35:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Range vals;
|
|
|
|
vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
|
|
|
|
vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
|
|
|
|
|
2016-03-03 02:53:39 +01:00
|
|
|
RecurseRoot(vals);
|
2016-01-15 00:35:40 +01:00
|
|
|
|
2018-11-28 01:14:53 +01:00
|
|
|
if (segv_page_count_ > 0) {
|
|
|
|
MEM_ALOGE("%zu pages skipped due to segfaults", segv_page_count_);
|
|
|
|
}
|
|
|
|
|
2016-01-15 00:35:40 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-22 19:50:05 +02:00
|
|
|
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
|
|
|
|
size_t* leak_bytes_out) {
|
2016-01-15 00:35:40 +01:00
|
|
|
leaked.clear();
|
|
|
|
|
|
|
|
size_t num_leaks = 0;
|
|
|
|
size_t leak_bytes = 0;
|
|
|
|
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
|
|
|
|
if (!it->second.referenced_from_root) {
|
|
|
|
num_leaks++;
|
|
|
|
leak_bytes += it->first.end - it->first.begin;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t n = 0;
|
|
|
|
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
|
|
|
|
if (!it->second.referenced_from_root) {
|
2016-03-03 02:53:39 +01:00
|
|
|
if (n++ < limit) {
|
2016-01-15 00:35:40 +01:00
|
|
|
leaked.push_back(it->first);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_leaks_out) {
|
|
|
|
*num_leaks_out = num_leaks;
|
|
|
|
}
|
|
|
|
if (leak_bytes_out) {
|
|
|
|
*leak_bytes_out = leak_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2016-04-27 01:51:32 +02:00
|
|
|
|
|
|
|
static bool MapOverPage(void* addr) {
|
|
|
|
const size_t page_size = sysconf(_SC_PAGE_SIZE);
|
2017-06-22 19:50:05 +02:00
|
|
|
void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
|
2016-04-27 01:51:32 +02:00
|
|
|
|
2017-06-22 19:50:05 +02:00
|
|
|
void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
2016-04-27 01:51:32 +02:00
|
|
|
if (ret == MAP_FAILED) {
|
2017-05-04 02:34:29 +02:00
|
|
|
MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
|
2016-04-27 01:51:32 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-22 19:50:05 +02:00
|
|
|
void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
|
|
|
|
void* /*uctx*/) {
|
2016-04-27 01:51:32 +02:00
|
|
|
uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
|
|
|
|
if (addr != walking_ptr_) {
|
|
|
|
handler.reset();
|
|
|
|
return;
|
|
|
|
}
|
2018-11-28 01:14:53 +01:00
|
|
|
if (!segv_logged_) {
|
|
|
|
MEM_ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
|
|
|
|
if (walking_range_.begin != 0U) {
|
|
|
|
MEM_ALOGW("while walking range %p-%p", reinterpret_cast<void*>(walking_range_.begin),
|
|
|
|
reinterpret_cast<void*>(walking_range_.end));
|
|
|
|
}
|
|
|
|
segv_logged_ = true;
|
|
|
|
}
|
|
|
|
segv_page_count_++;
|
2016-04-27 01:51:32 +02:00
|
|
|
if (!MapOverPage(si->si_addr)) {
|
|
|
|
handler.reset();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ScopedSignalHandler::SignalFn ScopedSignalHandler::handler_;
|
2017-06-21 22:13:00 +02:00
|
|
|
|
|
|
|
} // namespace android
|