libmemunreachable: clang-format everything
clang-format -i --sort-includes $(find . -name "*.cpp" -o -name "*.h") Test: builds Change-Id: Ia8e0677fe7f3f26dddba3a851cd2dfab9f14e421
This commit is contained in:
parent
07a57f0f28
commit
a83881e33c
36 changed files with 565 additions and 633 deletions
|
@ -33,9 +33,9 @@
|
|||
|
||||
#include "android-base/macros.h"
|
||||
|
||||
#include "anon_vma_naming.h"
|
||||
#include "Allocator.h"
|
||||
#include "LinkedList.h"
|
||||
#include "anon_vma_naming.h"
|
||||
|
||||
// runtime interfaces used:
|
||||
// abort
|
||||
|
@ -57,10 +57,9 @@ static constexpr size_t kChunkSize = 256 * 1024;
|
|||
static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
|
||||
static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
|
||||
static constexpr size_t kMinBucketAllocationSize = 8;
|
||||
static constexpr unsigned int kNumBuckets = const_log2(kMaxBucketAllocationSize)
|
||||
- const_log2(kMinBucketAllocationSize) + 1;
|
||||
static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize
|
||||
/ kPageSize;
|
||||
static constexpr unsigned int kNumBuckets =
|
||||
const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
|
||||
static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
|
||||
|
||||
std::atomic<int> heap_count;
|
||||
|
||||
|
@ -93,7 +92,7 @@ class HeapImpl {
|
|||
void FreeLocked(void* ptr);
|
||||
|
||||
struct MapAllocation {
|
||||
void *ptr;
|
||||
void* ptr;
|
||||
size_t size;
|
||||
MapAllocation* next;
|
||||
};
|
||||
|
@ -107,8 +106,7 @@ static inline unsigned int log2(size_t n) {
|
|||
}
|
||||
|
||||
static inline unsigned int size_to_bucket(size_t size) {
|
||||
if (size < kMinBucketAllocationSize)
|
||||
return kMinBucketAllocationSize;
|
||||
if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
|
||||
return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
|
||||
}
|
||||
|
||||
|
@ -140,8 +138,7 @@ static void* MapAligned(size_t size, size_t align) {
|
|||
|
||||
// Trim beginning
|
||||
if (aligned_ptr != ptr) {
|
||||
ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr)
|
||||
- reinterpret_cast<uintptr_t>(ptr);
|
||||
ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
|
||||
munmap(ptr, extra);
|
||||
map_size -= extra;
|
||||
ptr = aligned_ptr;
|
||||
|
@ -151,14 +148,13 @@ static void* MapAligned(size_t size, size_t align) {
|
|||
if (map_size != size) {
|
||||
assert(map_size > size);
|
||||
assert(ptr != NULL);
|
||||
munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size),
|
||||
map_size - size);
|
||||
munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
|
||||
}
|
||||
|
||||
#define PR_SET_VMA 0x53564d41
|
||||
#define PR_SET_VMA_ANON_NAME 0
|
||||
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
|
||||
reinterpret_cast<uintptr_t>(ptr), size, "leak_detector_malloc");
|
||||
#define PR_SET_VMA 0x53564d41
|
||||
#define PR_SET_VMA_ANON_NAME 0
|
||||
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
|
||||
"leak_detector_malloc");
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
@ -170,36 +166,31 @@ class Chunk {
|
|||
Chunk(HeapImpl* heap, int bucket);
|
||||
~Chunk() {}
|
||||
|
||||
void *Alloc();
|
||||
void* Alloc();
|
||||
void Free(void* ptr);
|
||||
void Purge();
|
||||
bool Empty();
|
||||
|
||||
static Chunk* ptr_to_chunk(void* ptr) {
|
||||
return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr)
|
||||
& ~(kChunkSize - 1));
|
||||
return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
|
||||
}
|
||||
static bool is_chunk(void* ptr) {
|
||||
return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
|
||||
}
|
||||
|
||||
unsigned int free_count() {
|
||||
return free_count_;
|
||||
}
|
||||
HeapImpl* heap() {
|
||||
return heap_;
|
||||
}
|
||||
LinkedList<Chunk*> node_; // linked list sorted by minimum free count
|
||||
unsigned int free_count() { return free_count_; }
|
||||
HeapImpl* heap() { return heap_; }
|
||||
LinkedList<Chunk*> node_; // linked list sorted by minimum free count
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(Chunk);
|
||||
HeapImpl* heap_;
|
||||
unsigned int bucket_;
|
||||
unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
|
||||
unsigned int max_allocations_; // maximum number of allocations in the chunk
|
||||
unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
|
||||
unsigned int free_count_; // number of available allocations
|
||||
unsigned int frees_since_purge_; // number of calls to Free since last Purge
|
||||
unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
|
||||
unsigned int max_allocations_; // maximum number of allocations in the chunk
|
||||
unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
|
||||
unsigned int free_count_; // number of available allocations
|
||||
unsigned int frees_since_purge_; // number of calls to Free since last Purge
|
||||
|
||||
// bitmap of pages that have been dirtied
|
||||
uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
|
||||
|
@ -210,13 +201,10 @@ class Chunk {
|
|||
char data_[0];
|
||||
|
||||
unsigned int ptr_to_n(void* ptr) {
|
||||
ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr)
|
||||
- reinterpret_cast<uintptr_t>(data_);
|
||||
ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
|
||||
return offset / allocation_size_;
|
||||
}
|
||||
void* n_to_ptr(unsigned int n) {
|
||||
return data_ + n * allocation_size_;
|
||||
}
|
||||
void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; }
|
||||
};
|
||||
static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
|
||||
|
||||
|
@ -225,23 +213,27 @@ void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
|
|||
assert(count == sizeof(Chunk));
|
||||
void* mem = MapAligned(kChunkSize, kChunkSize);
|
||||
if (!mem) {
|
||||
abort(); //throw std::bad_alloc;
|
||||
abort(); // throw std::bad_alloc;
|
||||
}
|
||||
|
||||
return mem;
|
||||
}
|
||||
|
||||
// Override new operator on chunk to use mmap to allocate kChunkSize
|
||||
void Chunk::operator delete(void *ptr) {
|
||||
void Chunk::operator delete(void* ptr) {
|
||||
assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
|
||||
munmap(ptr, kChunkSize);
|
||||
}
|
||||
|
||||
Chunk::Chunk(HeapImpl* heap, int bucket) :
|
||||
node_(this), heap_(heap), bucket_(bucket), allocation_size_(
|
||||
bucket_to_size(bucket)), max_allocations_(
|
||||
kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_(
|
||||
max_allocations_), frees_since_purge_(0) {
|
||||
Chunk::Chunk(HeapImpl* heap, int bucket)
|
||||
: node_(this),
|
||||
heap_(heap),
|
||||
bucket_(bucket),
|
||||
allocation_size_(bucket_to_size(bucket)),
|
||||
max_allocations_(kUsableChunkSize / allocation_size_),
|
||||
first_free_bitmap_(0),
|
||||
free_count_(max_allocations_),
|
||||
frees_since_purge_(0) {
|
||||
memset(dirty_pages_, 0, sizeof(dirty_pages_));
|
||||
memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
|
||||
}
|
||||
|
@ -254,8 +246,7 @@ void* Chunk::Alloc() {
|
|||
assert(free_count_ > 0);
|
||||
|
||||
unsigned int i = first_free_bitmap_;
|
||||
while (free_bitmap_[i] == 0)
|
||||
i++;
|
||||
while (free_bitmap_[i] == 0) i++;
|
||||
assert(i < arraysize(free_bitmap_));
|
||||
unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
|
||||
assert(free_bitmap_[i] & (1U << bit));
|
||||
|
@ -306,38 +297,35 @@ void Chunk::Free(void* ptr) {
|
|||
void Chunk::Purge() {
|
||||
frees_since_purge_ = 0;
|
||||
|
||||
//unsigned int allocsPerPage = kPageSize / allocation_size_;
|
||||
// unsigned int allocsPerPage = kPageSize / allocation_size_;
|
||||
}
|
||||
|
||||
// Override new operator on HeapImpl to use mmap to allocate a page
|
||||
void* HeapImpl::operator new(std::size_t count __attribute__((unused)))
|
||||
noexcept {
|
||||
void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
|
||||
assert(count == sizeof(HeapImpl));
|
||||
void* mem = MapAligned(kPageSize, kPageSize);
|
||||
if (!mem) {
|
||||
abort(); //throw std::bad_alloc;
|
||||
abort(); // throw std::bad_alloc;
|
||||
}
|
||||
|
||||
heap_count++;
|
||||
return mem;
|
||||
}
|
||||
|
||||
void HeapImpl::operator delete(void *ptr) {
|
||||
void HeapImpl::operator delete(void* ptr) {
|
||||
munmap(ptr, kPageSize);
|
||||
}
|
||||
|
||||
HeapImpl::HeapImpl() :
|
||||
free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {
|
||||
}
|
||||
HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
|
||||
|
||||
bool HeapImpl::Empty() {
|
||||
for (unsigned int i = 0; i < kNumBuckets; i++) {
|
||||
for (LinkedList<Chunk*> *it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
|
||||
for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
|
||||
if (!it->data()->Empty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (LinkedList<Chunk*> *it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
|
||||
for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
|
||||
if (!it->data()->Empty()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -350,12 +338,12 @@ bool HeapImpl::Empty() {
|
|||
HeapImpl::~HeapImpl() {
|
||||
for (unsigned int i = 0; i < kNumBuckets; i++) {
|
||||
while (!free_chunks_[i].empty()) {
|
||||
Chunk *chunk = free_chunks_[i].next()->data();
|
||||
Chunk* chunk = free_chunks_[i].next()->data();
|
||||
chunk->node_.remove();
|
||||
delete chunk;
|
||||
}
|
||||
while (!full_chunks_[i].empty()) {
|
||||
Chunk *chunk = full_chunks_[i].next()->data();
|
||||
Chunk* chunk = full_chunks_[i].next()->data();
|
||||
chunk->node_.remove();
|
||||
delete chunk;
|
||||
}
|
||||
|
@ -373,18 +361,18 @@ void* HeapImpl::AllocLocked(size_t size) {
|
|||
}
|
||||
int bucket = size_to_bucket(size);
|
||||
if (free_chunks_[bucket].empty()) {
|
||||
Chunk *chunk = new Chunk(this, bucket);
|
||||
Chunk* chunk = new Chunk(this, bucket);
|
||||
free_chunks_[bucket].insert(chunk->node_);
|
||||
}
|
||||
return free_chunks_[bucket].next()->data()->Alloc();
|
||||
}
|
||||
|
||||
void HeapImpl::Free(void *ptr) {
|
||||
void HeapImpl::Free(void* ptr) {
|
||||
std::lock_guard<std::mutex> lk(m_);
|
||||
FreeLocked(ptr);
|
||||
}
|
||||
|
||||
void HeapImpl::FreeLocked(void *ptr) {
|
||||
void HeapImpl::FreeLocked(void* ptr) {
|
||||
if (!Chunk::is_chunk(ptr)) {
|
||||
HeapImpl::MapFree(ptr);
|
||||
} else {
|
||||
|
@ -397,12 +385,11 @@ void HeapImpl::FreeLocked(void *ptr) {
|
|||
void* HeapImpl::MapAlloc(size_t size) {
|
||||
size = (size + kPageSize - 1) & ~(kPageSize - 1);
|
||||
|
||||
MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(
|
||||
sizeof(MapAllocation)));
|
||||
MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
|
||||
void* ptr = MapAligned(size, kChunkSize);
|
||||
if (!ptr) {
|
||||
FreeLocked(allocation);
|
||||
abort(); //throw std::bad_alloc;
|
||||
abort(); // throw std::bad_alloc;
|
||||
}
|
||||
allocation->ptr = ptr;
|
||||
allocation->size = size;
|
||||
|
@ -412,10 +399,9 @@ void* HeapImpl::MapAlloc(size_t size) {
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void HeapImpl::MapFree(void *ptr) {
|
||||
MapAllocation **allocation = &map_allocation_list_;
|
||||
while (*allocation && (*allocation)->ptr != ptr)
|
||||
allocation = &(*allocation)->next;
|
||||
void HeapImpl::MapFree(void* ptr) {
|
||||
MapAllocation** allocation = &map_allocation_list_;
|
||||
while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
|
||||
|
||||
assert(*allocation != nullptr);
|
||||
|
||||
|
@ -425,22 +411,22 @@ void HeapImpl::MapFree(void *ptr) {
|
|||
*allocation = (*allocation)->next;
|
||||
}
|
||||
|
||||
void HeapImpl::MoveToFreeList(Chunk *chunk, int bucket) {
|
||||
void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
|
||||
MoveToList(chunk, &free_chunks_[bucket]);
|
||||
}
|
||||
|
||||
void HeapImpl::MoveToFullList(Chunk *chunk, int bucket) {
|
||||
void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
|
||||
MoveToList(chunk, &full_chunks_[bucket]);
|
||||
}
|
||||
|
||||
void HeapImpl::MoveToList(Chunk *chunk, LinkedList<Chunk*>* head) {
|
||||
void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
|
||||
// Remove from old list
|
||||
chunk->node_.remove();
|
||||
|
||||
LinkedList<Chunk*> *node = head;
|
||||
LinkedList<Chunk*>* node = head;
|
||||
// Insert into new list, sorted by lowest free count
|
||||
while (node->next() != head && node->data() != nullptr
|
||||
&& node->data()->free_count() < chunk->free_count())
|
||||
while (node->next() != head && node->data() != nullptr &&
|
||||
node->data()->free_count() < chunk->free_count())
|
||||
node = node->next();
|
||||
|
||||
node->insert(chunk->node_);
|
||||
|
@ -469,7 +455,7 @@ void Heap::deallocate(void* ptr) {
|
|||
impl_->Free(ptr);
|
||||
}
|
||||
|
||||
void Heap::deallocate(HeapImpl*impl, void* ptr) {
|
||||
void Heap::deallocate(HeapImpl* impl, void* ptr) {
|
||||
impl->Free(ptr);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,14 +31,13 @@ extern std::atomic<int> heap_count;
|
|||
|
||||
class HeapImpl;
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
class Allocator;
|
||||
|
||||
|
||||
// Non-templated class that implements wraps HeapImpl to keep
|
||||
// implementation out of the header file
|
||||
class Heap {
|
||||
public:
|
||||
public:
|
||||
Heap();
|
||||
~Heap();
|
||||
|
||||
|
@ -59,110 +58,99 @@ public:
|
|||
static void deallocate(HeapImpl* impl, void* ptr);
|
||||
|
||||
// Allocate a class of type T
|
||||
template<class T>
|
||||
template <class T>
|
||||
T* allocate() {
|
||||
return reinterpret_cast<T*>(allocate(sizeof(T)));
|
||||
}
|
||||
|
||||
// Comparators, copied objects will be equal
|
||||
bool operator ==(const Heap& other) const {
|
||||
return impl_ == other.impl_;
|
||||
}
|
||||
bool operator !=(const Heap& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
bool operator==(const Heap& other) const { return impl_ == other.impl_; }
|
||||
bool operator!=(const Heap& other) const { return !(*this == other); }
|
||||
|
||||
// std::unique_ptr wrapper that allocates using allocate and deletes using
|
||||
// deallocate
|
||||
template<class T>
|
||||
template <class T>
|
||||
using unique_ptr = std::unique_ptr<T, std::function<void(void*)>>;
|
||||
|
||||
template<class T, class... Args>
|
||||
template <class T, class... Args>
|
||||
unique_ptr<T> make_unique(Args&&... args) {
|
||||
HeapImpl* impl = impl_;
|
||||
return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...),
|
||||
[impl](void* ptr) {
|
||||
reinterpret_cast<T*>(ptr)->~T();
|
||||
deallocate(impl, ptr);
|
||||
});
|
||||
return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...), [impl](void* ptr) {
|
||||
reinterpret_cast<T*>(ptr)->~T();
|
||||
deallocate(impl, ptr);
|
||||
});
|
||||
}
|
||||
|
||||
// std::unique_ptr wrapper that allocates using allocate and deletes using
|
||||
// deallocate
|
||||
template<class T>
|
||||
template <class T>
|
||||
using shared_ptr = std::shared_ptr<T>;
|
||||
|
||||
template<class T, class... Args>
|
||||
template <class T, class... Args>
|
||||
shared_ptr<T> make_shared(Args&&... args);
|
||||
|
||||
protected:
|
||||
protected:
|
||||
HeapImpl* impl_;
|
||||
bool owns_impl_;
|
||||
};
|
||||
|
||||
// STLAllocator implements the std allocator interface on top of a Heap
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
class STLAllocator {
|
||||
public:
|
||||
public:
|
||||
using value_type = T;
|
||||
~STLAllocator() {
|
||||
}
|
||||
~STLAllocator() {}
|
||||
|
||||
// Construct an STLAllocator on top of a Heap
|
||||
STLAllocator(const Heap& heap) : // NOLINT, implicit
|
||||
heap_(heap) {
|
||||
}
|
||||
STLAllocator(const Heap& heap)
|
||||
: // NOLINT, implicit
|
||||
heap_(heap) {}
|
||||
|
||||
// Rebind an STLAllocator from an another STLAllocator
|
||||
template<typename U>
|
||||
STLAllocator(const STLAllocator<U>& other) : // NOLINT, implicit
|
||||
heap_(other.heap_) {
|
||||
}
|
||||
template <typename U>
|
||||
STLAllocator(const STLAllocator<U>& other)
|
||||
: // NOLINT, implicit
|
||||
heap_(other.heap_) {}
|
||||
|
||||
STLAllocator(const STLAllocator&) = default;
|
||||
STLAllocator<T>& operator=(const STLAllocator<T>&) = default;
|
||||
|
||||
T* allocate(std::size_t n) {
|
||||
return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T)));
|
||||
}
|
||||
T* allocate(std::size_t n) { return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T))); }
|
||||
|
||||
void deallocate(T* ptr, std::size_t) {
|
||||
heap_.deallocate(ptr);
|
||||
}
|
||||
void deallocate(T* ptr, std::size_t) { heap_.deallocate(ptr); }
|
||||
|
||||
template<typename U>
|
||||
bool operator ==(const STLAllocator<U>& other) const {
|
||||
template <typename U>
|
||||
bool operator==(const STLAllocator<U>& other) const {
|
||||
return heap_ == other.heap_;
|
||||
}
|
||||
template<typename U>
|
||||
inline bool operator !=(const STLAllocator<U>& other) const {
|
||||
template <typename U>
|
||||
inline bool operator!=(const STLAllocator<U>& other) const {
|
||||
return !(this == other);
|
||||
}
|
||||
|
||||
template<typename U>
|
||||
template <typename U>
|
||||
friend class STLAllocator;
|
||||
|
||||
protected:
|
||||
protected:
|
||||
Heap heap_;
|
||||
};
|
||||
|
||||
|
||||
// Allocator extends STLAllocator with some convenience methods for allocating
|
||||
// a single object and for constructing unique_ptr and shared_ptr objects with
|
||||
// appropriate deleters.
|
||||
template<class T>
|
||||
template <class T>
|
||||
class Allocator : public STLAllocator<T> {
|
||||
public:
|
||||
~Allocator() {}
|
||||
|
||||
Allocator(const Heap& other) : // NOLINT, implicit
|
||||
STLAllocator<T>(other) {
|
||||
}
|
||||
Allocator(const Heap& other)
|
||||
: // NOLINT, implicit
|
||||
STLAllocator<T>(other) {}
|
||||
|
||||
template<typename U>
|
||||
Allocator(const STLAllocator<U>& other) : // NOLINT, implicit
|
||||
STLAllocator<T>(other) {
|
||||
}
|
||||
template <typename U>
|
||||
Allocator(const STLAllocator<U>& other)
|
||||
: // NOLINT, implicit
|
||||
STLAllocator<T>(other) {}
|
||||
|
||||
Allocator(const Allocator&) = default;
|
||||
Allocator<T>& operator=(const Allocator<T>&) = default;
|
||||
|
@ -171,24 +159,20 @@ class Allocator : public STLAllocator<T> {
|
|||
using STLAllocator<T>::deallocate;
|
||||
using STLAllocator<T>::heap_;
|
||||
|
||||
T* allocate() {
|
||||
return STLAllocator<T>::allocate(1);
|
||||
}
|
||||
void deallocate(void* ptr) {
|
||||
heap_.deallocate(ptr);
|
||||
}
|
||||
T* allocate() { return STLAllocator<T>::allocate(1); }
|
||||
void deallocate(void* ptr) { heap_.deallocate(ptr); }
|
||||
|
||||
using shared_ptr = Heap::shared_ptr<T>;
|
||||
|
||||
template<class... Args>
|
||||
shared_ptr make_shared(Args&& ...args) {
|
||||
template <class... Args>
|
||||
shared_ptr make_shared(Args&&... args) {
|
||||
return heap_.template make_shared<T>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
using unique_ptr = Heap::unique_ptr<T>;
|
||||
|
||||
template<class... Args>
|
||||
unique_ptr make_unique(Args&& ...args) {
|
||||
template <class... Args>
|
||||
unique_ptr make_unique(Args&&... args) {
|
||||
return heap_.template make_unique<T>(std::forward<Args>(args)...);
|
||||
}
|
||||
};
|
||||
|
@ -196,30 +180,31 @@ class Allocator : public STLAllocator<T> {
|
|||
// std::unique_ptr wrapper that allocates using allocate and deletes using
|
||||
// deallocate. Implemented outside class definition in order to pass
|
||||
// Allocator<T> to shared_ptr.
|
||||
template<class T, class... Args>
|
||||
template <class T, class... Args>
|
||||
inline Heap::shared_ptr<T> Heap::make_shared(Args&&... args) {
|
||||
return std::allocate_shared<T, Allocator<T>, Args...>(Allocator<T>(*this),
|
||||
std::forward<Args>(args)...);
|
||||
std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
namespace allocator {
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
using vector = std::vector<T, Allocator<T>>;
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
using list = std::list<T, Allocator<T>>;
|
||||
|
||||
template<class Key, class T, class Compare = std::less<Key>>
|
||||
template <class Key, class T, class Compare = std::less<Key>>
|
||||
using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;
|
||||
|
||||
template<class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
|
||||
using unordered_map = std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>;
|
||||
template <class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
|
||||
using unordered_map =
|
||||
std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>;
|
||||
|
||||
template<class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
|
||||
template <class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
|
||||
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;
|
||||
|
||||
template<class Key, class Compare = std::less<Key>>
|
||||
template <class Key, class Compare = std::less<Key>>
|
||||
using set = std::set<Key, Compare, Allocator<Key>>;
|
||||
|
||||
using string = std::basic_string<char, std::char_traits<char>, Allocator<char>>;
|
||||
|
|
|
@ -114,8 +114,8 @@ bool HeapWalker::DetectLeaks() {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
|
||||
size_t* num_leaks_out, size_t* leak_bytes_out) {
|
||||
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
|
||||
size_t* leak_bytes_out) {
|
||||
leaked.clear();
|
||||
|
||||
size_t num_leaks = 0;
|
||||
|
@ -148,9 +148,9 @@ bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
|
|||
|
||||
static bool MapOverPage(void* addr) {
|
||||
const size_t page_size = sysconf(_SC_PAGE_SIZE);
|
||||
void *page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size-1));
|
||||
void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
|
||||
|
||||
void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
|
||||
void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
||||
if (ret == MAP_FAILED) {
|
||||
MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
|
||||
return false;
|
||||
|
@ -159,7 +159,8 @@ static bool MapOverPage(void* addr) {
|
|||
return true;
|
||||
}
|
||||
|
||||
void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si, void* /*uctx*/) {
|
||||
void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
|
||||
void* /*uctx*/) {
|
||||
uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
|
||||
if (addr != walking_ptr_) {
|
||||
handler.reset();
|
||||
|
|
|
@ -34,31 +34,31 @@ struct Range {
|
|||
bool operator==(const Range& other) const {
|
||||
return this->begin == other.begin && this->end == other.end;
|
||||
}
|
||||
bool operator!=(const Range& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
bool operator!=(const Range& other) const { return !(*this == other); }
|
||||
};
|
||||
|
||||
// Comparator for Ranges that returns equivalence for overlapping ranges
|
||||
struct compare_range {
|
||||
bool operator()(const Range& a, const Range& b) const {
|
||||
return a.end <= b.begin;
|
||||
}
|
||||
bool operator()(const Range& a, const Range& b) const { return a.end <= b.begin; }
|
||||
};
|
||||
|
||||
class HeapWalker {
|
||||
public:
|
||||
explicit HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
|
||||
allocations_(allocator), allocation_bytes_(0),
|
||||
roots_(allocator), root_vals_(allocator),
|
||||
segv_handler_(allocator), walking_ptr_(0) {
|
||||
explicit HeapWalker(Allocator<HeapWalker> allocator)
|
||||
: allocator_(allocator),
|
||||
allocations_(allocator),
|
||||
allocation_bytes_(0),
|
||||
roots_(allocator),
|
||||
root_vals_(allocator),
|
||||
segv_handler_(allocator),
|
||||
walking_ptr_(0) {
|
||||
valid_allocations_range_.end = 0;
|
||||
valid_allocations_range_.begin = ~valid_allocations_range_.end;
|
||||
|
||||
segv_handler_.install(SIGSEGV,
|
||||
[=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) {
|
||||
segv_handler_.install(
|
||||
SIGSEGV, [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) {
|
||||
this->HandleSegFault(handler, signal, siginfo, uctx);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
~HeapWalker() {}
|
||||
|
@ -68,15 +68,14 @@ class HeapWalker {
|
|||
|
||||
bool DetectLeaks();
|
||||
|
||||
bool Leaked(allocator::vector<Range>&, size_t limit, size_t* num_leaks,
|
||||
size_t* leak_bytes);
|
||||
bool Leaked(allocator::vector<Range>&, size_t limit, size_t* num_leaks, size_t* leak_bytes);
|
||||
size_t Allocations();
|
||||
size_t AllocationBytes();
|
||||
|
||||
template<class F>
|
||||
template <class F>
|
||||
void ForEachPtrInRange(const Range& range, F&& f);
|
||||
|
||||
template<class F>
|
||||
template <class F>
|
||||
void ForEachAllocation(F&& f);
|
||||
|
||||
struct AllocationInfo {
|
||||
|
@ -84,7 +83,6 @@ class HeapWalker {
|
|||
};
|
||||
|
||||
private:
|
||||
|
||||
void RecurseRoot(const Range& root);
|
||||
bool WordContainsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);
|
||||
void HandleSegFault(ScopedSignalHandler&, int, siginfo_t*, void*);
|
||||
|
@ -103,7 +101,7 @@ class HeapWalker {
|
|||
uintptr_t walking_ptr_;
|
||||
};
|
||||
|
||||
template<class F>
|
||||
template <class F>
|
||||
inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
|
||||
uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
|
||||
// TODO(ccross): we might need to consider a pointer to the end of a buffer
|
||||
|
@ -118,7 +116,7 @@ inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
|
|||
}
|
||||
}
|
||||
|
||||
template<class F>
|
||||
template <class F>
|
||||
inline void HeapWalker::ForEachAllocation(F&& f) {
|
||||
for (auto& it : allocations_) {
|
||||
const Range& range = it.first;
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
// as a key in std::unordered_map.
|
||||
namespace std {
|
||||
|
||||
template<>
|
||||
template <>
|
||||
struct hash<Leak::Backtrace> {
|
||||
std::size_t operator()(const Leak::Backtrace& key) const {
|
||||
std::size_t seed = 0;
|
||||
|
@ -40,7 +40,7 @@ struct hash<Leak::Backtrace> {
|
|||
}
|
||||
|
||||
private:
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline void hash_combine(std::size_t& seed, const T& v) const {
|
||||
std::hash<T> hasher;
|
||||
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
|
||||
|
@ -51,7 +51,7 @@ struct hash<Leak::Backtrace> {
|
|||
|
||||
static bool operator==(const Leak::Backtrace& lhs, const Leak::Backtrace& rhs) {
|
||||
return (lhs.num_frames == rhs.num_frames) &&
|
||||
memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0;
|
||||
memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -31,11 +31,11 @@ void LeakFolding::ComputeDAG() {
|
|||
|
||||
Allocator<SCCInfo> scc_allocator = allocator_;
|
||||
|
||||
for (auto& scc_nodes: scc_list) {
|
||||
for (auto& scc_nodes : scc_list) {
|
||||
Allocator<SCCInfo>::unique_ptr leak_scc;
|
||||
leak_scc = scc_allocator.make_unique(scc_allocator);
|
||||
|
||||
for (auto& node: scc_nodes) {
|
||||
for (auto& node : scc_nodes) {
|
||||
node->ptr->scc = leak_scc.get();
|
||||
leak_scc->count++;
|
||||
leak_scc->size += node->ptr->range.size();
|
||||
|
@ -46,7 +46,7 @@ void LeakFolding::ComputeDAG() {
|
|||
|
||||
for (auto& it : leak_map_) {
|
||||
LeakInfo& leak = it.second;
|
||||
for (auto& ref: leak.node.references_out) {
|
||||
for (auto& ref : leak.node.references_out) {
|
||||
if (leak.scc != ref->ptr->scc) {
|
||||
leak.scc->node.Edge(&ref->ptr->scc->node);
|
||||
}
|
||||
|
@ -55,17 +55,14 @@ void LeakFolding::ComputeDAG() {
|
|||
}
|
||||
|
||||
void LeakFolding::AccumulateLeaks(SCCInfo* dominator) {
|
||||
std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_,
|
||||
[&](SCCInfo* scc) {
|
||||
if (scc->accumulator != dominator) {
|
||||
scc->accumulator = dominator;
|
||||
dominator->cuumulative_size += scc->size;
|
||||
dominator->cuumulative_count += scc->count;
|
||||
scc->node.Foreach([&](SCCInfo* ref) {
|
||||
walk(ref);
|
||||
});
|
||||
}
|
||||
});
|
||||
std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_, [&](SCCInfo* scc) {
|
||||
if (scc->accumulator != dominator) {
|
||||
scc->accumulator = dominator;
|
||||
dominator->cuumulative_size += scc->size;
|
||||
dominator->cuumulative_count += scc->count;
|
||||
scc->node.Foreach([&](SCCInfo* ref) { walk(ref); });
|
||||
}
|
||||
});
|
||||
walk(dominator);
|
||||
}
|
||||
|
||||
|
@ -73,27 +70,25 @@ bool LeakFolding::FoldLeaks() {
|
|||
Allocator<LeakInfo> leak_allocator = allocator_;
|
||||
|
||||
// Find all leaked allocations insert them into leak_map_ and leak_graph_
|
||||
heap_walker_.ForEachAllocation(
|
||||
[&](const Range& range, HeapWalker::AllocationInfo& allocation) {
|
||||
if (!allocation.referenced_from_root) {
|
||||
auto it = leak_map_.emplace(std::piecewise_construct,
|
||||
std::forward_as_tuple(range),
|
||||
std::forward_as_tuple(range, allocator_));
|
||||
LeakInfo& leak = it.first->second;
|
||||
leak_graph_.push_back(&leak.node);
|
||||
}
|
||||
});
|
||||
heap_walker_.ForEachAllocation([&](const Range& range, HeapWalker::AllocationInfo& allocation) {
|
||||
if (!allocation.referenced_from_root) {
|
||||
auto it = leak_map_.emplace(std::piecewise_construct, std::forward_as_tuple(range),
|
||||
std::forward_as_tuple(range, allocator_));
|
||||
LeakInfo& leak = it.first->second;
|
||||
leak_graph_.push_back(&leak.node);
|
||||
}
|
||||
});
|
||||
|
||||
// Find references between leaked allocations and connect them in leak_graph_
|
||||
for (auto& it : leak_map_) {
|
||||
LeakInfo& leak = it.second;
|
||||
heap_walker_.ForEachPtrInRange(leak.range,
|
||||
[&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
|
||||
if (!ptr_info->referenced_from_root) {
|
||||
LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
|
||||
leak.node.Edge(&ptr_leak->node);
|
||||
}
|
||||
});
|
||||
[&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
|
||||
if (!ptr_info->referenced_from_root) {
|
||||
LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
|
||||
leak.node.Edge(&ptr_leak->node);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Convert the cyclic graph to a DAG by grouping strongly connected components
|
||||
|
@ -110,8 +105,8 @@ bool LeakFolding::FoldLeaks() {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked,
|
||||
size_t* num_leaks_out, size_t* leak_bytes_out) {
|
||||
bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked, size_t* num_leaks_out,
|
||||
size_t* leak_bytes_out) {
|
||||
size_t num_leaks = 0;
|
||||
size_t leak_bytes = 0;
|
||||
for (auto& it : leak_map_) {
|
||||
|
@ -123,9 +118,8 @@ bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked,
|
|||
for (auto& it : leak_map_) {
|
||||
const LeakInfo& leak = it.second;
|
||||
if (leak.scc->dominator) {
|
||||
leaked.emplace_back(Leak{leak.range,
|
||||
leak.scc->cuumulative_count - 1,
|
||||
leak.scc->cuumulative_size - leak.range.size()});
|
||||
leaked.emplace_back(Leak{leak.range, leak.scc->cuumulative_count - 1,
|
||||
leak.scc->cuumulative_size - leak.range.size()});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,8 +22,11 @@
|
|||
class LeakFolding {
|
||||
public:
|
||||
LeakFolding(Allocator<void> allocator, HeapWalker& heap_walker)
|
||||
: allocator_(allocator), heap_walker_(heap_walker),
|
||||
leak_map_(allocator), leak_graph_(allocator), leak_scc_(allocator) {}
|
||||
: allocator_(allocator),
|
||||
heap_walker_(heap_walker),
|
||||
leak_map_(allocator),
|
||||
leak_graph_(allocator),
|
||||
leak_scc_(allocator) {}
|
||||
|
||||
bool FoldLeaks();
|
||||
|
||||
|
@ -33,8 +36,7 @@ class LeakFolding {
|
|||
size_t referenced_size;
|
||||
};
|
||||
|
||||
bool Leaked(allocator::vector<Leak>& leaked,
|
||||
size_t* num_leaks_out, size_t* leak_bytes_out);
|
||||
bool Leaked(allocator::vector<Leak>& leaked, size_t* num_leaks_out, size_t* leak_bytes_out);
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(LeakFolding);
|
||||
|
@ -54,9 +56,15 @@ class LeakFolding {
|
|||
bool dominator;
|
||||
SCCInfo* accumulator;
|
||||
|
||||
explicit SCCInfo(Allocator<SCCInfo> allocator) : node(this, allocator),
|
||||
count(0), size(0), cuumulative_count(0), cuumulative_size(0),
|
||||
dominator(false), accumulator(nullptr) {}
|
||||
explicit SCCInfo(Allocator<SCCInfo> allocator)
|
||||
: node(this, allocator),
|
||||
count(0),
|
||||
size(0),
|
||||
cuumulative_count(0),
|
||||
cuumulative_size(0),
|
||||
dominator(false),
|
||||
accumulator(nullptr) {}
|
||||
|
||||
private:
|
||||
SCCInfo(SCCInfo&&) = delete;
|
||||
DISALLOW_COPY_AND_ASSIGN(SCCInfo);
|
||||
|
@ -71,8 +79,7 @@ class LeakFolding {
|
|||
SCCInfo* scc;
|
||||
|
||||
LeakInfo(const Range& range, Allocator<LeakInfo> allocator)
|
||||
: node(this, allocator), range(range),
|
||||
scc(nullptr) {}
|
||||
: node(this, allocator), range(range), scc(nullptr) {}
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(LeakInfo);
|
||||
|
@ -86,4 +93,4 @@ class LeakFolding {
|
|||
allocator::vector<Allocator<SCCInfo>::unique_ptr> leak_scc_;
|
||||
};
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_
|
||||
#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
#include "log.h"
|
||||
|
||||
bool LeakPipe::SendFd(int sock, int fd) {
|
||||
struct msghdr hdr{};
|
||||
struct iovec iov{};
|
||||
struct msghdr hdr {};
|
||||
struct iovec iov {};
|
||||
unsigned int data = 0xfdfdfdfd;
|
||||
alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
|
||||
|
||||
|
@ -56,8 +56,8 @@ bool LeakPipe::SendFd(int sock, int fd) {
|
|||
}
|
||||
|
||||
int LeakPipe::ReceiveFd(int sock) {
|
||||
struct msghdr hdr{};
|
||||
struct iovec iov{};
|
||||
struct msghdr hdr {};
|
||||
struct iovec iov {};
|
||||
unsigned int data;
|
||||
alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
|
||||
|
||||
|
|
|
@ -34,15 +34,13 @@
|
|||
class LeakPipe {
|
||||
public:
|
||||
LeakPipe() {
|
||||
int ret = socketpair(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0, sv_);
|
||||
int ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, sv_);
|
||||
if (ret < 0) {
|
||||
MEM_LOG_ALWAYS_FATAL("failed to create socketpair: %s", strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
~LeakPipe() {
|
||||
Close();
|
||||
}
|
||||
~LeakPipe() { Close(); }
|
||||
|
||||
void Close() {
|
||||
close(sv_[0]);
|
||||
|
@ -77,13 +75,9 @@ class LeakPipe {
|
|||
public:
|
||||
LeakPipeBase() : fd_(-1) {}
|
||||
|
||||
~LeakPipeBase() {
|
||||
Close();
|
||||
}
|
||||
~LeakPipeBase() { Close(); }
|
||||
|
||||
void SetFd(int fd) {
|
||||
fd_ = fd;
|
||||
}
|
||||
void SetFd(int fd) { fd_ = fd; }
|
||||
|
||||
void Close() {
|
||||
close(fd_);
|
||||
|
@ -101,7 +95,7 @@ class LeakPipe {
|
|||
public:
|
||||
using LeakPipeBase::LeakPipeBase;
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
bool Send(const T& value) {
|
||||
ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, &value, sizeof(T)));
|
||||
if (ret < 0) {
|
||||
|
@ -115,7 +109,7 @@ class LeakPipe {
|
|||
return true;
|
||||
}
|
||||
|
||||
template<class T, class Alloc = std::allocator<T>>
|
||||
template <class T, class Alloc = std::allocator<T>>
|
||||
bool SendVector(const std::vector<T, Alloc>& vector) {
|
||||
size_t size = vector.size() * sizeof(T);
|
||||
if (!Send(size)) {
|
||||
|
@ -139,7 +133,7 @@ class LeakPipe {
|
|||
public:
|
||||
using LeakPipeBase::LeakPipeBase;
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
bool Receive(T* value) {
|
||||
ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, reinterpret_cast<void*>(value), sizeof(T)));
|
||||
if (ret < 0) {
|
||||
|
@ -153,7 +147,7 @@ class LeakPipe {
|
|||
return true;
|
||||
}
|
||||
|
||||
template<class T, class Alloc = std::allocator<T>>
|
||||
template <class T, class Alloc = std::allocator<T>>
|
||||
bool ReceiveVector(std::vector<T, Alloc>& vector) {
|
||||
size_t size = 0;
|
||||
if (!Receive(&size)) {
|
||||
|
@ -178,16 +172,11 @@ class LeakPipe {
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
LeakPipeReceiver& Receiver() {
|
||||
return receiver_;
|
||||
}
|
||||
LeakPipeReceiver& Receiver() { return receiver_; }
|
||||
|
||||
LeakPipeSender& Sender() {
|
||||
return sender_;
|
||||
}
|
||||
LeakPipeSender& Sender() { return sender_; }
|
||||
|
||||
private:
|
||||
LeakPipeReceiver receiver_;
|
||||
|
@ -198,4 +187,4 @@ class LeakPipe {
|
|||
int sv_[2];
|
||||
};
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_
|
||||
#endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
|
||||
#include "LineBuffer.h"
|
||||
|
||||
LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len) : fd_(fd), buffer_(buffer), buffer_len_(buffer_len) {
|
||||
}
|
||||
LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len)
|
||||
: fd_(fd), buffer_(buffer), buffer_len_(buffer_len) {}
|
||||
|
||||
bool LineBuffer::GetLine(char** line, size_t* line_len) {
|
||||
while (true) {
|
||||
|
|
|
@ -33,4 +33,4 @@ class LineBuffer {
|
|||
size_t bytes_ = 0;
|
||||
};
|
||||
|
||||
#endif // _LIBMEMUNREACHABLE_LINE_BUFFER_H
|
||||
#endif // _LIBMEMUNREACHABLE_LINE_BUFFER_H
|
||||
|
|
|
@ -17,44 +17,43 @@
|
|||
#ifndef LIBMEMUNREACHABLE_LINKED_LIST_H_
|
||||
#define LIBMEMUNREACHABLE_LINKED_LIST_H_
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
class LinkedList {
|
||||
public:
|
||||
LinkedList() : next_(this), prev_(this), data_() {}
|
||||
explicit LinkedList(T data) : LinkedList() {
|
||||
data_ = data;
|
||||
}
|
||||
~LinkedList() {}
|
||||
void insert(LinkedList<T>& node) {
|
||||
assert(node.empty());
|
||||
node.next_ = this->next_;
|
||||
node.next_->prev_ = &node;
|
||||
this->next_ = &node;
|
||||
node.prev_ = this;
|
||||
}
|
||||
void remove() {
|
||||
this->next_->prev_ = this->prev_;
|
||||
this->prev_->next_ = this->next_;
|
||||
this->next_ = this;
|
||||
this->prev_ = this;
|
||||
}
|
||||
T data() { return data_; }
|
||||
bool empty() { return next_ == this && prev_ == this; }
|
||||
LinkedList<T> *next() { return next_; }
|
||||
private:
|
||||
LinkedList<T> *next_;
|
||||
LinkedList<T> *prev_;
|
||||
T data_;
|
||||
public:
|
||||
LinkedList() : next_(this), prev_(this), data_() {}
|
||||
explicit LinkedList(T data) : LinkedList() { data_ = data; }
|
||||
~LinkedList() {}
|
||||
void insert(LinkedList<T>& node) {
|
||||
assert(node.empty());
|
||||
node.next_ = this->next_;
|
||||
node.next_->prev_ = &node;
|
||||
this->next_ = &node;
|
||||
node.prev_ = this;
|
||||
}
|
||||
void remove() {
|
||||
this->next_->prev_ = this->prev_;
|
||||
this->prev_->next_ = this->next_;
|
||||
this->next_ = this;
|
||||
this->prev_ = this;
|
||||
}
|
||||
T data() { return data_; }
|
||||
bool empty() { return next_ == this && prev_ == this; }
|
||||
LinkedList<T>* next() { return next_; }
|
||||
|
||||
private:
|
||||
LinkedList<T>* next_;
|
||||
LinkedList<T>* prev_;
|
||||
T data_;
|
||||
};
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
class LinkedListHead {
|
||||
public:
|
||||
LinkedListHead() : node_() {}
|
||||
~LinkedListHead() {}
|
||||
public:
|
||||
LinkedListHead() : node_() {}
|
||||
~LinkedListHead() {}
|
||||
|
||||
private:
|
||||
LinkedList<T> node_;
|
||||
private:
|
||||
LinkedList<T> node_;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -19,12 +19,12 @@
|
|||
#include <functional>
|
||||
#include <iomanip>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <backtrace.h>
|
||||
#include <android-base/macros.h>
|
||||
#include <backtrace.h>
|
||||
|
||||
#include "Allocator.h"
|
||||
#include "HeapWalker.h"
|
||||
|
@ -37,9 +37,9 @@
|
|||
#include "Semaphore.h"
|
||||
#include "ThreadCapture.h"
|
||||
|
||||
#include "memunreachable/memunreachable.h"
|
||||
#include "bionic.h"
|
||||
#include "log.h"
|
||||
#include "memunreachable/memunreachable.h"
|
||||
|
||||
const size_t Leak::contents_length;
|
||||
|
||||
|
@ -47,20 +47,21 @@ using namespace std::chrono_literals;
|
|||
|
||||
class MemUnreachable {
|
||||
public:
|
||||
MemUnreachable(pid_t pid, Allocator<void> allocator) : pid_(pid), allocator_(allocator),
|
||||
heap_walker_(allocator_) {}
|
||||
MemUnreachable(pid_t pid, Allocator<void> allocator)
|
||||
: pid_(pid), allocator_(allocator), heap_walker_(allocator_) {}
|
||||
bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
|
||||
const allocator::vector<Mapping>& mappings);
|
||||
bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
|
||||
size_t* num_leaks, size_t* leak_bytes);
|
||||
const allocator::vector<Mapping>& mappings);
|
||||
bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, size_t* num_leaks,
|
||||
size_t* leak_bytes);
|
||||
size_t Allocations() { return heap_walker_.Allocations(); }
|
||||
size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
|
||||
|
||||
private:
|
||||
bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
|
||||
allocator::vector<Mapping>& heap_mappings,
|
||||
allocator::vector<Mapping>& anon_mappings,
|
||||
allocator::vector<Mapping>& globals_mappings,
|
||||
allocator::vector<Mapping>& stack_mappings);
|
||||
allocator::vector<Mapping>& heap_mappings,
|
||||
allocator::vector<Mapping>& anon_mappings,
|
||||
allocator::vector<Mapping>& globals_mappings,
|
||||
allocator::vector<Mapping>& stack_mappings);
|
||||
DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
|
||||
pid_t pid_;
|
||||
Allocator<void> allocator_;
|
||||
|
@ -68,16 +69,17 @@ class MemUnreachable {
|
|||
};
|
||||
|
||||
static void HeapIterate(const Mapping& heap_mapping,
|
||||
const std::function<void(uintptr_t, size_t)>& func) {
|
||||
const std::function<void(uintptr_t, size_t)>& func) {
|
||||
malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
|
||||
[](uintptr_t base, size_t size, void* arg) {
|
||||
auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
|
||||
(*f)(base, size);
|
||||
}, const_cast<void*>(reinterpret_cast<const void*>(&func)));
|
||||
[](uintptr_t base, size_t size, void* arg) {
|
||||
auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
|
||||
(*f)(base, size);
|
||||
},
|
||||
const_cast<void*>(reinterpret_cast<const void*>(&func)));
|
||||
}
|
||||
|
||||
bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
|
||||
const allocator::vector<Mapping>& mappings) {
|
||||
const allocator::vector<Mapping>& mappings) {
|
||||
MEM_ALOGI("searching process %d for allocations", pid_);
|
||||
allocator::vector<Mapping> heap_mappings{mappings};
|
||||
allocator::vector<Mapping> anon_mappings{mappings};
|
||||
|
@ -118,8 +120,8 @@ bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& thr
|
|||
return true;
|
||||
}
|
||||
|
||||
bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
|
||||
size_t limit, size_t* num_leaks, size_t* leak_bytes) {
|
||||
bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
|
||||
size_t* num_leaks, size_t* leak_bytes) {
|
||||
MEM_ALOGI("sweeping process %d for unreachable memory", pid_);
|
||||
leaks.clear();
|
||||
|
||||
|
@ -127,7 +129,6 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
allocator::vector<Range> leaked1{allocator_};
|
||||
heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes);
|
||||
|
||||
|
@ -152,12 +153,12 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
|
|||
// in backtrace_map.
|
||||
leaks.reserve(leaked.size());
|
||||
|
||||
for (auto& it: leaked) {
|
||||
for (auto& it : leaked) {
|
||||
leaks.emplace_back();
|
||||
Leak* leak = &leaks.back();
|
||||
|
||||
ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it.range.begin),
|
||||
leak->backtrace.frames, leak->backtrace.max_frames);
|
||||
ssize_t num_backtrace_frames = malloc_backtrace(
|
||||
reinterpret_cast<void*>(it.range.begin), leak->backtrace.frames, leak->backtrace.max_frames);
|
||||
if (num_backtrace_frames > 0) {
|
||||
leak->backtrace.num_frames = num_backtrace_frames;
|
||||
|
||||
|
@ -183,14 +184,13 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
|
|||
leak->referenced_size = it.referenced_size;
|
||||
leak->total_size = leak->size + leak->referenced_size;
|
||||
memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin),
|
||||
std::min(leak->size, Leak::contents_length));
|
||||
std::min(leak->size, Leak::contents_length));
|
||||
}
|
||||
|
||||
MEM_ALOGI("folding done");
|
||||
|
||||
std::sort(leaks.begin(), leaks.end(), [](const Leak& a, const Leak& b) {
|
||||
return a.total_size > b.total_size;
|
||||
});
|
||||
std::sort(leaks.begin(), leaks.end(),
|
||||
[](const Leak& a, const Leak& b) { return a.total_size > b.total_size; });
|
||||
|
||||
if (leaks.size() > limit) {
|
||||
leaks.resize(limit);
|
||||
|
@ -205,11 +205,10 @@ static bool has_prefix(const allocator::string& s, const char* prefix) {
|
|||
}
|
||||
|
||||
bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
|
||||
allocator::vector<Mapping>& heap_mappings,
|
||||
allocator::vector<Mapping>& anon_mappings,
|
||||
allocator::vector<Mapping>& globals_mappings,
|
||||
allocator::vector<Mapping>& stack_mappings)
|
||||
{
|
||||
allocator::vector<Mapping>& heap_mappings,
|
||||
allocator::vector<Mapping>& anon_mappings,
|
||||
allocator::vector<Mapping>& globals_mappings,
|
||||
allocator::vector<Mapping>& stack_mappings) {
|
||||
heap_mappings.clear();
|
||||
anon_mappings.clear();
|
||||
globals_mappings.clear();
|
||||
|
@ -245,7 +244,8 @@ bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings
|
|||
stack_mappings.emplace_back(*it);
|
||||
} else if (mapping_name.size() == 0) {
|
||||
globals_mappings.emplace_back(*it);
|
||||
} else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") {
|
||||
} else if (has_prefix(mapping_name, "[anon:") &&
|
||||
mapping_name != "[anon:leak_detector_malloc]") {
|
||||
// TODO(ccross): it would be nice to treat named anonymous mappings as
|
||||
// possible leaks, but naming something in a .bss or .data section makes
|
||||
// it impossible to distinguish them from mmaped and then named mappings.
|
||||
|
@ -256,7 +256,7 @@ bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings
|
|||
return true;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
static inline const char* plural(T val) {
|
||||
return (val == 1) ? "" : "s";
|
||||
}
|
||||
|
@ -403,7 +403,6 @@ bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
|
|||
}
|
||||
|
||||
std::string Leak::ToString(bool log_contents) const {
|
||||
|
||||
std::ostringstream oss;
|
||||
|
||||
oss << " " << std::dec << size;
|
||||
|
@ -492,8 +491,8 @@ std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
|
|||
oss << std::endl;
|
||||
|
||||
for (auto it = leaks.begin(); it != leaks.end(); it++) {
|
||||
oss << it->ToString(log_contents);
|
||||
oss << std::endl;
|
||||
oss << it->ToString(log_contents);
|
||||
oss << std::endl;
|
||||
}
|
||||
|
||||
return oss.str();
|
||||
|
@ -523,7 +522,6 @@ bool LogUnreachableMemory(bool log_contents, size_t limit) {
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool NoLeaks() {
|
||||
UnreachableMemoryInfo info;
|
||||
if (!GetUnreachableMemory(info, 0)) {
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <fcntl.h>
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
@ -42,8 +42,8 @@ bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings) {
|
|||
int name_pos;
|
||||
char perms[5];
|
||||
Mapping mapping{};
|
||||
if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n",
|
||||
&mapping.begin, &mapping.end, perms, &name_pos) == 3) {
|
||||
if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n", &mapping.begin,
|
||||
&mapping.end, perms, &name_pos) == 3) {
|
||||
if (perms[0] == 'r') {
|
||||
mapping.read = true;
|
||||
}
|
||||
|
|
|
@ -33,4 +33,4 @@ struct Mapping {
|
|||
// the line data.
|
||||
bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings);
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_
|
||||
#endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_
|
||||
|
|
|
@ -23,17 +23,17 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "android-base/macros.h"
|
||||
|
||||
#include "PtracerThread.h"
|
||||
#include "anon_vma_naming.h"
|
||||
#include "log.h"
|
||||
#include "PtracerThread.h"
|
||||
|
||||
class Stack {
|
||||
public:
|
||||
|
@ -41,7 +41,7 @@ class Stack {
|
|||
int prot = PROT_READ | PROT_WRITE;
|
||||
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
page_size_ = sysconf(_SC_PAGE_SIZE);
|
||||
size_ += page_size_*2; // guard pages
|
||||
size_ += page_size_ * 2; // guard pages
|
||||
base_ = mmap(NULL, size_, prot, flags, -1, 0);
|
||||
if (base_ == MAP_FAILED) {
|
||||
base_ = NULL;
|
||||
|
@ -52,22 +52,20 @@ class Stack {
|
|||
mprotect(base_, page_size_, PROT_NONE);
|
||||
mprotect(top(), page_size_, PROT_NONE);
|
||||
};
|
||||
~Stack() {
|
||||
munmap(base_, size_);
|
||||
};
|
||||
~Stack() { munmap(base_, size_); };
|
||||
void* top() {
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(base_) + size_ - page_size_);
|
||||
};
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(Stack);
|
||||
|
||||
void *base_;
|
||||
void* base_;
|
||||
size_t size_;
|
||||
size_t page_size_;
|
||||
};
|
||||
|
||||
PtracerThread::PtracerThread(const std::function<int()>& func) :
|
||||
child_pid_(0) {
|
||||
PtracerThread::PtracerThread(const std::function<int()>& func) : child_pid_(0) {
|
||||
stack_ = std::make_unique<Stack>(PTHREAD_STACK_MIN);
|
||||
if (stack_->top() == nullptr) {
|
||||
MEM_LOG_ALWAYS_FATAL("failed to mmap child stack: %s", strerror(errno));
|
||||
|
@ -93,14 +91,13 @@ bool PtracerThread::Start() {
|
|||
std::unique_lock<std::mutex> lk(m_);
|
||||
|
||||
// Convert from void(*)(void*) to lambda with captures
|
||||
auto proxy = [](void *arg) -> int {
|
||||
auto proxy = [](void* arg) -> int {
|
||||
prctl(PR_SET_NAME, "libmemunreachable ptrace thread");
|
||||
return (*reinterpret_cast<std::function<int()>*>(arg))();
|
||||
};
|
||||
|
||||
child_pid_ = clone(proxy, stack_->top(),
|
||||
CLONE_VM|CLONE_FS|CLONE_FILES/*|CLONE_UNTRACED*/,
|
||||
reinterpret_cast<void*>(&func_));
|
||||
child_pid_ = clone(proxy, stack_->top(), CLONE_VM | CLONE_FS | CLONE_FILES /*|CLONE_UNTRACED*/,
|
||||
reinterpret_cast<void*>(&func_));
|
||||
if (child_pid_ < 0) {
|
||||
MEM_ALOGE("failed to clone child: %s", strerror(errno));
|
||||
return false;
|
||||
|
|
|
@ -36,6 +36,7 @@ class PtracerThread {
|
|||
~PtracerThread();
|
||||
bool Start();
|
||||
int Join();
|
||||
|
||||
private:
|
||||
void SetTracer(pid_t);
|
||||
void ClearTracer();
|
||||
|
@ -47,4 +48,4 @@ class PtracerThread {
|
|||
pid_t child_pid_;
|
||||
};
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_
|
||||
#endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_
|
||||
|
|
|
@ -27,11 +27,9 @@ class ScopedAlarm {
|
|||
public:
|
||||
ScopedAlarm(std::chrono::microseconds us, std::function<void()> func) {
|
||||
func_ = func;
|
||||
struct sigaction oldact{};
|
||||
struct sigaction act{};
|
||||
act.sa_handler = [](int) {
|
||||
ScopedAlarm::func_();
|
||||
};
|
||||
struct sigaction oldact {};
|
||||
struct sigaction act {};
|
||||
act.sa_handler = [](int) { ScopedAlarm::func_(); };
|
||||
sigaction(SIGALRM, &act, &oldact);
|
||||
|
||||
std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us);
|
||||
|
@ -43,10 +41,11 @@ class ScopedAlarm {
|
|||
~ScopedAlarm() {
|
||||
itimerval t = itimerval{};
|
||||
setitimer(ITIMER_REAL, &t, NULL);
|
||||
struct sigaction act{};
|
||||
struct sigaction act {};
|
||||
act.sa_handler = SIG_DFL;
|
||||
sigaction(SIGALRM, &act, NULL);
|
||||
}
|
||||
|
||||
private:
|
||||
static std::function<void()> func_;
|
||||
};
|
||||
|
|
|
@ -21,16 +21,14 @@
|
|||
|
||||
#include "android-base/macros.h"
|
||||
|
||||
#include "ScopedAlarm.h"
|
||||
#include "bionic.h"
|
||||
#include "log.h"
|
||||
#include "ScopedAlarm.h"
|
||||
|
||||
class DisableMallocGuard{
|
||||
class DisableMallocGuard {
|
||||
public:
|
||||
DisableMallocGuard() : disabled_(false){}
|
||||
~DisableMallocGuard() {
|
||||
Enable();
|
||||
}
|
||||
DisableMallocGuard() : disabled_(false) {}
|
||||
~DisableMallocGuard() { Enable(); }
|
||||
|
||||
void Disable() {
|
||||
if (!disabled_) {
|
||||
|
@ -45,6 +43,7 @@ class DisableMallocGuard{
|
|||
disabled_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(DisableMallocGuard);
|
||||
bool disabled_;
|
||||
|
@ -59,13 +58,9 @@ class DisableMallocGuard{
|
|||
// here.
|
||||
class ScopedDisableMalloc {
|
||||
public:
|
||||
ScopedDisableMalloc() {
|
||||
disable_malloc_.Disable();
|
||||
}
|
||||
ScopedDisableMalloc() { disable_malloc_.Disable(); }
|
||||
|
||||
~ScopedDisableMalloc() {
|
||||
disable_malloc_.Enable();
|
||||
}
|
||||
~ScopedDisableMalloc() { disable_malloc_.Enable(); }
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ScopedDisableMalloc);
|
||||
|
@ -74,18 +69,15 @@ class ScopedDisableMalloc {
|
|||
|
||||
class ScopedDisableMallocTimeout {
|
||||
public:
|
||||
explicit ScopedDisableMallocTimeout(std::chrono::milliseconds timeout = std::chrono::milliseconds(2000)) :
|
||||
timeout_(timeout), timed_out_(false), disable_malloc_() {
|
||||
explicit ScopedDisableMallocTimeout(
|
||||
std::chrono::milliseconds timeout = std::chrono::milliseconds(2000))
|
||||
: timeout_(timeout), timed_out_(false), disable_malloc_() {
|
||||
Disable();
|
||||
}
|
||||
|
||||
~ScopedDisableMallocTimeout() {
|
||||
Enable();
|
||||
}
|
||||
~ScopedDisableMallocTimeout() { Enable(); }
|
||||
|
||||
bool timed_out() {
|
||||
return timed_out_;
|
||||
}
|
||||
bool timed_out() { return timed_out_; }
|
||||
|
||||
void Enable() {
|
||||
disable_malloc_.Enable();
|
||||
|
@ -110,4 +102,4 @@ class ScopedDisableMallocTimeout {
|
|||
DisableMallocGuard disable_malloc_;
|
||||
};
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_
|
||||
#endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_
|
||||
|
|
|
@ -29,28 +29,22 @@ class ScopedPipe {
|
|||
MEM_LOG_ALWAYS_FATAL("failed to open pipe");
|
||||
}
|
||||
}
|
||||
~ScopedPipe() {
|
||||
Close();
|
||||
}
|
||||
~ScopedPipe() { Close(); }
|
||||
|
||||
ScopedPipe(ScopedPipe&& other) {
|
||||
SetReceiver(other.ReleaseReceiver());
|
||||
SetSender(other.ReleaseSender());
|
||||
}
|
||||
|
||||
ScopedPipe& operator = (ScopedPipe&& other) {
|
||||
ScopedPipe& operator=(ScopedPipe&& other) {
|
||||
SetReceiver(other.ReleaseReceiver());
|
||||
SetSender(other.ReleaseSender());
|
||||
return *this;
|
||||
}
|
||||
|
||||
void CloseReceiver() {
|
||||
close(ReleaseReceiver());
|
||||
}
|
||||
void CloseReceiver() { close(ReleaseReceiver()); }
|
||||
|
||||
void CloseSender() {
|
||||
close(ReleaseSender());
|
||||
}
|
||||
void CloseSender() { close(ReleaseSender()); }
|
||||
|
||||
void Close() {
|
||||
CloseReceiver();
|
||||
|
|
|
@ -31,9 +31,7 @@ class ScopedSignalHandler {
|
|||
using Fn = std::function<void(ScopedSignalHandler&, int, siginfo_t*, void*)>;
|
||||
|
||||
explicit ScopedSignalHandler(Allocator<Fn> allocator) : allocator_(allocator), signal_(-1) {}
|
||||
~ScopedSignalHandler() {
|
||||
reset();
|
||||
}
|
||||
~ScopedSignalHandler() { reset(); }
|
||||
|
||||
template <class F>
|
||||
void install(int signal, F&& f) {
|
||||
|
@ -65,7 +63,6 @@ class ScopedSignalHandler {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
using SignalFn = std::function<void(int, siginfo_t*, void*)>;
|
||||
DISALLOW_COPY_AND_ASSIGN(ScopedSignalHandler);
|
||||
|
@ -77,4 +74,4 @@ class ScopedSignalHandler {
|
|||
static SignalFn handler_;
|
||||
};
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
|
||||
#endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
|
||||
|
|
|
@ -29,7 +29,7 @@ class Semaphore {
|
|||
|
||||
void Wait(std::chrono::milliseconds ms) {
|
||||
std::unique_lock<std::mutex> lk(m_);
|
||||
cv_.wait_for(lk, ms, [&]{
|
||||
cv_.wait_for(lk, ms, [&] {
|
||||
if (count_ > 0) {
|
||||
count_--;
|
||||
return true;
|
||||
|
@ -44,6 +44,7 @@ class Semaphore {
|
|||
}
|
||||
cv_.notify_one();
|
||||
}
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(Semaphore);
|
||||
|
||||
|
@ -52,5 +53,4 @@ class Semaphore {
|
|||
std::condition_variable cv_;
|
||||
};
|
||||
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_
|
||||
#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
#include "Allocator.h"
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
class Node {
|
||||
public:
|
||||
allocator::set<Node<T>*> references_in;
|
||||
|
@ -34,39 +34,41 @@ class Node {
|
|||
|
||||
T* ptr;
|
||||
|
||||
Node(T* ptr, Allocator<Node> allocator) : references_in(allocator), references_out(allocator),
|
||||
ptr(ptr) {};
|
||||
Node(T* ptr, Allocator<Node> allocator)
|
||||
: references_in(allocator), references_out(allocator), ptr(ptr){};
|
||||
Node(Node&& rhs) = default;
|
||||
void Edge(Node<T>* ref) {
|
||||
references_out.emplace(ref);
|
||||
ref->references_in.emplace(this);
|
||||
}
|
||||
template<class F>
|
||||
template <class F>
|
||||
void Foreach(F&& f) {
|
||||
for (auto& node: references_out) {
|
||||
for (auto& node : references_out) {
|
||||
f(node->ptr);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(Node<T>);
|
||||
};
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
using Graph = allocator::vector<Node<T>*>;
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
using SCC = allocator::vector<Node<T>*>;
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
using SCCList = allocator::vector<SCC<T>>;
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
class TarjanAlgorithm {
|
||||
public:
|
||||
explicit TarjanAlgorithm(Allocator<void> allocator) : index_(0),
|
||||
stack_(allocator), components_(allocator) {}
|
||||
explicit TarjanAlgorithm(Allocator<void> allocator)
|
||||
: index_(0), stack_(allocator), components_(allocator) {}
|
||||
|
||||
void Execute(Graph<T>& graph, SCCList<T>& out);
|
||||
|
||||
private:
|
||||
static constexpr size_t UNDEFINED_INDEX = static_cast<size_t>(-1);
|
||||
void Tarjan(Node<T>* vertex, Graph<T>& graph);
|
||||
|
@ -76,17 +78,17 @@ class TarjanAlgorithm {
|
|||
SCCList<T> components_;
|
||||
};
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) {
|
||||
stack_.clear();
|
||||
components_.clear();
|
||||
index_ = 0;
|
||||
for (auto& it: graph) {
|
||||
for (auto& it : graph) {
|
||||
it->index = UNDEFINED_INDEX;
|
||||
it->lowlink = UNDEFINED_INDEX;
|
||||
}
|
||||
|
||||
for (auto& it: graph) {
|
||||
for (auto& it : graph) {
|
||||
if (it->index == UNDEFINED_INDEX) {
|
||||
Tarjan(it, graph);
|
||||
}
|
||||
|
@ -94,14 +96,14 @@ void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) {
|
|||
out.swap(components_);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) {
|
||||
assert(vertex->index == UNDEFINED_INDEX);
|
||||
vertex->index = index_;
|
||||
vertex->lowlink = index_;
|
||||
index_++;
|
||||
stack_.push_back(vertex);
|
||||
for (auto& it: vertex->references_out) {
|
||||
for (auto& it : vertex->references_out) {
|
||||
Node<T>* vertex_next = it;
|
||||
if (vertex_next->index == UNDEFINED_INDEX) {
|
||||
Tarjan(vertex_next, graph);
|
||||
|
@ -123,10 +125,10 @@ void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) {
|
|||
}
|
||||
}
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
void Tarjan(Graph<T>& graph, SCCList<T>& out) {
|
||||
TarjanAlgorithm<T> tarjan{graph.get_allocator()};
|
||||
tarjan.Execute(graph, out);
|
||||
}
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_TARJAN_H_
|
||||
#endif // LIBMEMUNREACHABLE_TARJAN_H_
|
||||
|
|
|
@ -21,13 +21,13 @@
|
|||
#include <fcntl.h>
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/uio.h>
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
@ -50,12 +50,12 @@
|
|||
// Convert a pid > 0 to a string. sprintf might allocate, so we can't use it.
|
||||
// Returns a pointer somewhere in buf to a null terminated string, or NULL
|
||||
// on error.
|
||||
static char *pid_to_str(char *buf, size_t len, pid_t pid) {
|
||||
static char* pid_to_str(char* buf, size_t len, pid_t pid) {
|
||||
if (pid <= 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
char *ptr = buf + len - 1;
|
||||
char* ptr = buf + len - 1;
|
||||
*ptr = 0;
|
||||
while (pid > 0) {
|
||||
ptr--;
|
||||
|
@ -79,6 +79,7 @@ class ThreadCaptureImpl {
|
|||
bool ReleaseThread(pid_t tid);
|
||||
bool CapturedThreadInfo(ThreadInfoList& threads);
|
||||
void InjectTestFunc(std::function<void(pid_t)>&& f) { inject_test_func_ = f; }
|
||||
|
||||
private:
|
||||
int CaptureThread(pid_t tid);
|
||||
bool ReleaseThread(pid_t tid, unsigned int signal);
|
||||
|
@ -92,9 +93,8 @@ class ThreadCaptureImpl {
|
|||
std::function<void(pid_t)> inject_test_func_;
|
||||
};
|
||||
|
||||
ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator) :
|
||||
captured_threads_(allocator), allocator_(allocator), pid_(pid) {
|
||||
}
|
||||
ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator)
|
||||
: captured_threads_(allocator), allocator_(allocator), pid_(pid) {}
|
||||
|
||||
bool ThreadCaptureImpl::ListThreads(TidList& tids) {
|
||||
tids.clear();
|
||||
|
@ -115,11 +115,11 @@ bool ThreadCaptureImpl::ListThreads(TidList& tids) {
|
|||
}
|
||||
|
||||
struct linux_dirent64 {
|
||||
uint64_t d_ino;
|
||||
int64_t d_off;
|
||||
uint16_t d_reclen;
|
||||
char d_type;
|
||||
char d_name[];
|
||||
uint64_t d_ino;
|
||||
int64_t d_off;
|
||||
uint16_t d_reclen;
|
||||
char d_type;
|
||||
char d_name[];
|
||||
} __attribute((packed));
|
||||
char dirent_buf[4096];
|
||||
ssize_t nread;
|
||||
|
@ -209,7 +209,7 @@ int ThreadCaptureImpl::PtraceAttach(pid_t tid) {
|
|||
bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) {
|
||||
thread_info.tid = tid;
|
||||
|
||||
const unsigned int max_num_regs = 128; // larger than number of registers on any device
|
||||
const unsigned int max_num_regs = 128; // larger than number of registers on any device
|
||||
uintptr_t regs[max_num_regs];
|
||||
struct iovec iovec;
|
||||
iovec.iov_base = ®s;
|
||||
|
@ -243,7 +243,7 @@ bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) {
|
|||
|
||||
thread_info.stack = std::pair<uintptr_t, uintptr_t>(regs[sp], 0);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
int ThreadCaptureImpl::CaptureThread(pid_t tid) {
|
||||
|
@ -266,7 +266,7 @@ int ThreadCaptureImpl::CaptureThread(pid_t tid) {
|
|||
|
||||
unsigned int resume_signal = 0;
|
||||
|
||||
unsigned int signal = WSTOPSIG(status);
|
||||
unsigned int signal = WSTOPSIG(status);
|
||||
if ((status >> 16) == PTRACE_EVENT_STOP) {
|
||||
switch (signal) {
|
||||
case SIGSTOP:
|
||||
|
@ -307,7 +307,7 @@ bool ThreadCaptureImpl::ReleaseThread(pid_t tid, unsigned int signal) {
|
|||
|
||||
bool ThreadCaptureImpl::ReleaseThreads() {
|
||||
bool ret = true;
|
||||
for (auto it = captured_threads_.begin(); it != captured_threads_.end(); ) {
|
||||
for (auto it = captured_threads_.begin(); it != captured_threads_.end();) {
|
||||
if (ReleaseThread(it->first, it->second)) {
|
||||
it = captured_threads_.erase(it);
|
||||
} else {
|
||||
|
|
|
@ -33,7 +33,7 @@ using ThreadInfoList = allocator::vector<ThreadInfo>;
|
|||
class ThreadCaptureImpl;
|
||||
|
||||
class ThreadCapture {
|
||||
public:
|
||||
public:
|
||||
ThreadCapture(pid_t pid, Allocator<ThreadCapture> allocator);
|
||||
~ThreadCapture();
|
||||
|
||||
|
@ -44,7 +44,7 @@ public:
|
|||
bool CapturedThreadInfo(ThreadInfoList& threads);
|
||||
void InjectTestFunc(std::function<void(pid_t)>&& f);
|
||||
|
||||
private:
|
||||
private:
|
||||
ThreadCapture(const ThreadCapture&) = delete;
|
||||
void operator=(const ThreadCapture&) = delete;
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#include <sys/prctl.h>
|
||||
|
||||
#define PR_SET_VMA 0x53564d41
|
||||
#define PR_SET_VMA_ANON_NAME 0
|
||||
#define PR_SET_VMA 0x53564d41
|
||||
#define PR_SET_VMA_ANON_NAME 0
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_
|
||||
#endif // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
#ifndef LIBMEMUNREACHABLE_BIONIC_H_
|
||||
#define LIBMEMUNREACHABLE_BIONIC_H_
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
__BEGIN_DECLS
|
||||
|
||||
|
@ -27,9 +27,9 @@ __BEGIN_DECLS
|
|||
extern void malloc_disable();
|
||||
extern void malloc_enable();
|
||||
extern int malloc_iterate(uintptr_t base, size_t size,
|
||||
void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
|
||||
void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
|
||||
extern ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
|
||||
|
||||
__END_DECLS
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_BIONIC_H_
|
||||
#endif // LIBMEMUNREACHABLE_BIONIC_H_
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
struct Leak {
|
||||
uintptr_t begin;
|
||||
|
@ -83,4 +83,4 @@ bool NoLeaks();
|
|||
|
||||
__END_DECLS
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_
|
||||
#endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_
|
||||
|
|
|
@ -43,4 +43,4 @@
|
|||
|
||||
#endif
|
||||
|
||||
#endif // LIBMEMUNREACHABLE_LOG_H_
|
||||
#endif // LIBMEMUNREACHABLE_LOG_H_
|
||||
|
|
|
@ -16,44 +16,42 @@
|
|||
|
||||
#include <Allocator.h>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <ScopedDisableMalloc.h>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
std::function<void()> ScopedAlarm::func_;
|
||||
|
||||
class AllocatorTest : public testing::Test {
|
||||
protected:
|
||||
AllocatorTest() : heap(), disable_malloc_() {}
|
||||
virtual void SetUp() {
|
||||
heap_count = 0;
|
||||
}
|
||||
virtual void SetUp() { heap_count = 0; }
|
||||
virtual void TearDown() {
|
||||
ASSERT_EQ(heap_count, 0);
|
||||
ASSERT_TRUE(heap.empty());
|
||||
ASSERT_FALSE(disable_malloc_.timed_out());
|
||||
}
|
||||
Heap heap;
|
||||
|
||||
private:
|
||||
ScopedDisableMallocTimeout disable_malloc_;
|
||||
};
|
||||
|
||||
TEST_F(AllocatorTest, simple) {
|
||||
Allocator<char[100]> allocator(heap);
|
||||
void *ptr = allocator.allocate();
|
||||
void* ptr = allocator.allocate();
|
||||
ASSERT_TRUE(ptr != NULL);
|
||||
allocator.deallocate(ptr);
|
||||
}
|
||||
|
||||
TEST_F(AllocatorTest, multiple) {
|
||||
Allocator<char[100]> allocator(heap);
|
||||
void *ptr1 = allocator.allocate();
|
||||
void* ptr1 = allocator.allocate();
|
||||
ASSERT_TRUE(ptr1 != NULL);
|
||||
void *ptr2 = allocator.allocate();
|
||||
void* ptr2 = allocator.allocate();
|
||||
ASSERT_TRUE(ptr2 != NULL);
|
||||
ASSERT_NE(ptr1, ptr2);
|
||||
allocator.deallocate(ptr1);
|
||||
void *ptr3 = allocator.allocate();
|
||||
void* ptr3 = allocator.allocate();
|
||||
ASSERT_EQ(ptr1, ptr3);
|
||||
allocator.deallocate(ptr3);
|
||||
allocator.deallocate(ptr2);
|
||||
|
@ -63,7 +61,7 @@ TEST_F(AllocatorTest, many) {
|
|||
const int num = 4096;
|
||||
const int size = 128;
|
||||
Allocator<char[size]> allocator(heap);
|
||||
void *ptr[num];
|
||||
void* ptr[num];
|
||||
for (int i = 0; i < num; i++) {
|
||||
ptr[i] = allocator.allocate();
|
||||
memset(ptr[i], 0xaa, size);
|
||||
|
@ -87,7 +85,7 @@ TEST_F(AllocatorTest, many) {
|
|||
TEST_F(AllocatorTest, large) {
|
||||
const size_t size = 1024 * 1024;
|
||||
Allocator<char[size]> allocator(heap);
|
||||
void *ptr = allocator.allocate();
|
||||
void* ptr = allocator.allocate();
|
||||
memset(ptr, 0xaa, size);
|
||||
allocator.deallocate(ptr);
|
||||
}
|
||||
|
@ -96,7 +94,7 @@ TEST_F(AllocatorTest, many_large) {
|
|||
const int num = 128;
|
||||
const int size = 1024 * 1024;
|
||||
Allocator<char[size]> allocator(heap);
|
||||
void *ptr[num];
|
||||
void* ptr[num];
|
||||
for (int i = 0; i < num; i++) {
|
||||
ptr[i] = allocator.allocate();
|
||||
memset(ptr[i], 0xaa, size);
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
#include <chrono>
|
||||
#include <functional>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <ScopedDisableMalloc.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
|
@ -36,75 +36,83 @@ class DisableMallocTest : public ::testing::Test {
|
|||
};
|
||||
|
||||
TEST_F(DisableMallocTest, reenable) {
|
||||
ASSERT_EXIT({
|
||||
alarm(100ms);
|
||||
void *ptr1 = malloc(128);
|
||||
ASSERT_NE(ptr1, nullptr);
|
||||
free(ptr1);
|
||||
{
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
}
|
||||
void *ptr2 = malloc(128);
|
||||
ASSERT_NE(ptr2, nullptr);
|
||||
free(ptr2);
|
||||
_exit(1);
|
||||
}, ::testing::ExitedWithCode(1), "");
|
||||
ASSERT_EXIT(
|
||||
{
|
||||
alarm(100ms);
|
||||
void* ptr1 = malloc(128);
|
||||
ASSERT_NE(ptr1, nullptr);
|
||||
free(ptr1);
|
||||
{ ScopedDisableMalloc disable_malloc; }
|
||||
void* ptr2 = malloc(128);
|
||||
ASSERT_NE(ptr2, nullptr);
|
||||
free(ptr2);
|
||||
_exit(1);
|
||||
},
|
||||
::testing::ExitedWithCode(1), "");
|
||||
}
|
||||
|
||||
TEST_F(DisableMallocTest, deadlock_allocate) {
|
||||
ASSERT_DEATH({
|
||||
void *ptr = malloc(128);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
free(ptr);
|
||||
{
|
||||
alarm(100ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
void* ptr = malloc(128);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
free(ptr);
|
||||
}
|
||||
}, "");
|
||||
ASSERT_DEATH(
|
||||
{
|
||||
void* ptr = malloc(128);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
free(ptr);
|
||||
{
|
||||
alarm(100ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
void* ptr = malloc(128);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
free(ptr);
|
||||
}
|
||||
},
|
||||
"");
|
||||
}
|
||||
|
||||
TEST_F(DisableMallocTest, deadlock_new) {
|
||||
ASSERT_DEATH({
|
||||
char* ptr = new(char);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
delete(ptr);
|
||||
{
|
||||
alarm(100ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
char* ptr = new (std::nothrow)(char);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
delete(ptr);
|
||||
}
|
||||
}, "");
|
||||
ASSERT_DEATH(
|
||||
{
|
||||
char* ptr = new (char);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
delete (ptr);
|
||||
{
|
||||
alarm(100ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
char* ptr = new (std::nothrow)(char);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
delete (ptr);
|
||||
}
|
||||
},
|
||||
"");
|
||||
}
|
||||
|
||||
TEST_F(DisableMallocTest, deadlock_delete) {
|
||||
ASSERT_DEATH({
|
||||
char* ptr = new(char);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
{
|
||||
alarm(250ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
delete(ptr);
|
||||
// Force ptr usage or this code gets optimized away by the arm64 compiler.
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
}
|
||||
}, "");
|
||||
ASSERT_DEATH(
|
||||
{
|
||||
char* ptr = new (char);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
{
|
||||
alarm(250ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
delete (ptr);
|
||||
// Force ptr usage or this code gets optimized away by the arm64 compiler.
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
}
|
||||
},
|
||||
"");
|
||||
}
|
||||
|
||||
TEST_F(DisableMallocTest, deadlock_free) {
|
||||
ASSERT_DEATH({
|
||||
void *ptr = malloc(128);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
{
|
||||
alarm(100ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
free(ptr);
|
||||
}
|
||||
}, "");
|
||||
ASSERT_DEATH(
|
||||
{
|
||||
void* ptr = malloc(128);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
{
|
||||
alarm(100ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
free(ptr);
|
||||
}
|
||||
},
|
||||
"");
|
||||
}
|
||||
|
||||
TEST_F(DisableMallocTest, deadlock_fork) {
|
||||
|
@ -113,6 +121,6 @@ TEST_F(DisableMallocTest, deadlock_fork) {
|
|||
alarm(100ms);
|
||||
ScopedDisableMalloc disable_malloc;
|
||||
fork();
|
||||
}
|
||||
}, "");
|
||||
}
|
||||
}, "");
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
#include "HeapWalker.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <ScopedDisableMalloc.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include "Allocator.h"
|
||||
|
||||
class HeapWalkerTest : public ::testing::Test {
|
||||
|
@ -172,20 +172,20 @@ TEST_F(HeapWalkerTest, cycle) {
|
|||
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
|
||||
|
||||
EXPECT_EQ(2U, num_leaks);
|
||||
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
|
||||
EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
|
||||
ASSERT_EQ(2U, leaked.size());
|
||||
}
|
||||
|
||||
TEST_F(HeapWalkerTest, segv) {
|
||||
const size_t page_size = sysconf(_SC_PAGE_SIZE);
|
||||
void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
ASSERT_NE(buffer1, nullptr);
|
||||
void* buffer2;
|
||||
|
||||
buffer2 = &buffer1;
|
||||
|
||||
HeapWalker heap_walker(heap_);
|
||||
heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1)+page_size);
|
||||
heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1) + page_size);
|
||||
heap_walker.Root(buffer_begin(buffer2), buffer_end(buffer2));
|
||||
|
||||
ASSERT_EQ(true, heap_walker.DetectLeaks());
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
|
||||
#include "bionic.h"
|
||||
|
||||
void malloc_disable() {
|
||||
}
|
||||
void malloc_disable() {}
|
||||
|
||||
void malloc_enable() {
|
||||
}
|
||||
void malloc_enable() {}
|
||||
|
|
|
@ -14,11 +14,11 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "HeapWalker.h"
|
||||
#include "LeakFolding.h"
|
||||
#include "HeapWalker.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <ScopedDisableMalloc.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include "Allocator.h"
|
||||
|
||||
class LeakFoldingTest : public ::testing::Test {
|
||||
|
@ -84,7 +84,7 @@ TEST_F(LeakFoldingTest, two) {
|
|||
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
|
||||
|
||||
EXPECT_EQ(2U, num_leaks);
|
||||
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
|
||||
EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
|
||||
ASSERT_EQ(2U, leaked.size());
|
||||
EXPECT_EQ(0U, leaked[0].referenced_count);
|
||||
EXPECT_EQ(0U, leaked[0].referenced_size);
|
||||
|
@ -113,7 +113,7 @@ TEST_F(LeakFoldingTest, dominator) {
|
|||
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
|
||||
|
||||
EXPECT_EQ(2U, num_leaks);
|
||||
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
|
||||
EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
|
||||
ASSERT_EQ(1U, leaked.size());
|
||||
EXPECT_EQ(1U, leaked[0].referenced_count);
|
||||
EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
|
||||
|
@ -144,10 +144,10 @@ TEST_F(LeakFoldingTest, cycle) {
|
|||
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
|
||||
|
||||
EXPECT_EQ(3U, num_leaks);
|
||||
EXPECT_EQ(3*sizeof(uintptr_t), leaked_bytes);
|
||||
EXPECT_EQ(3 * sizeof(uintptr_t), leaked_bytes);
|
||||
ASSERT_EQ(1U, leaked.size());
|
||||
EXPECT_EQ(2U, leaked[0].referenced_count);
|
||||
EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size);
|
||||
EXPECT_EQ(2 * sizeof(uintptr_t), leaked[0].referenced_size);
|
||||
}
|
||||
|
||||
TEST_F(LeakFoldingTest, dominator_cycle) {
|
||||
|
@ -175,13 +175,13 @@ TEST_F(LeakFoldingTest, dominator_cycle) {
|
|||
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
|
||||
|
||||
EXPECT_EQ(3U, num_leaks);
|
||||
EXPECT_EQ(5*sizeof(uintptr_t), leaked_bytes);
|
||||
EXPECT_EQ(5 * sizeof(uintptr_t), leaked_bytes);
|
||||
ASSERT_EQ(2U, leaked.size());
|
||||
|
||||
EXPECT_EQ(2U, leaked[0].referenced_count);
|
||||
EXPECT_EQ(3*sizeof(uintptr_t), leaked[0].referenced_size);
|
||||
EXPECT_EQ(3 * sizeof(uintptr_t), leaked[0].referenced_size);
|
||||
EXPECT_EQ(2U, leaked[1].referenced_count);
|
||||
EXPECT_EQ(3*sizeof(uintptr_t), leaked[1].referenced_size);
|
||||
EXPECT_EQ(3 * sizeof(uintptr_t), leaked[1].referenced_size);
|
||||
}
|
||||
|
||||
TEST_F(LeakFoldingTest, two_cycles) {
|
||||
|
@ -218,12 +218,12 @@ TEST_F(LeakFoldingTest, two_cycles) {
|
|||
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
|
||||
|
||||
EXPECT_EQ(6U, num_leaks);
|
||||
EXPECT_EQ(6*sizeof(uintptr_t), leaked_bytes);
|
||||
EXPECT_EQ(6 * sizeof(uintptr_t), leaked_bytes);
|
||||
ASSERT_EQ(2U, leaked.size());
|
||||
EXPECT_EQ(2U, leaked[0].referenced_count);
|
||||
EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size);
|
||||
EXPECT_EQ(2 * sizeof(uintptr_t), leaked[0].referenced_size);
|
||||
EXPECT_EQ(2U, leaked[1].referenced_count);
|
||||
EXPECT_EQ(2*sizeof(uintptr_t), leaked[1].referenced_size);
|
||||
EXPECT_EQ(2 * sizeof(uintptr_t), leaked[1].referenced_size);
|
||||
}
|
||||
|
||||
TEST_F(LeakFoldingTest, two_dominator_cycles) {
|
||||
|
@ -254,7 +254,7 @@ TEST_F(LeakFoldingTest, two_dominator_cycles) {
|
|||
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
|
||||
|
||||
EXPECT_EQ(4U, num_leaks);
|
||||
EXPECT_EQ(4*sizeof(uintptr_t), leaked_bytes);
|
||||
EXPECT_EQ(4 * sizeof(uintptr_t), leaked_bytes);
|
||||
ASSERT_EQ(4U, leaked.size());
|
||||
EXPECT_EQ(1U, leaked[0].referenced_count);
|
||||
EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
|
||||
|
@ -272,13 +272,13 @@ TEST_F(LeakFoldingTest, giant_dominator_cycle) {
|
|||
|
||||
HeapWalker heap_walker(heap_);
|
||||
|
||||
for (size_t i = 0; i < n; i ++) {
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
|
||||
reinterpret_cast<uintptr_t>(&buffer[i+1])));
|
||||
reinterpret_cast<uintptr_t>(&buffer[i + 1])));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < n - 1; i++) {
|
||||
buffer[i] = &buffer[i+1];
|
||||
buffer[i] = &buffer[i + 1];
|
||||
}
|
||||
buffer[n - 1] = &buffer[0];
|
||||
|
||||
|
@ -306,15 +306,15 @@ TEST_F(LeakFoldingTest, giant_cycle) {
|
|||
HeapWalker heap_walker(heap_);
|
||||
|
||||
for (size_t i = 0; i < n - 1; i++) {
|
||||
buffer[i] = &buffer[i+1];
|
||||
buffer[i] = &buffer[i + 1];
|
||||
}
|
||||
buffer[n - 1] = &buffer[0];
|
||||
|
||||
buffer1[0] = &buffer[0];
|
||||
|
||||
for (size_t i = 0; i < n; i ++) {
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
|
||||
reinterpret_cast<uintptr_t>(&buffer[i+1])));
|
||||
reinterpret_cast<uintptr_t>(&buffer[i + 1])));
|
||||
}
|
||||
|
||||
ALLOCATION(heap_walker, buffer1);
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
|
||||
#include <fcntl.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
|
@ -25,23 +25,16 @@
|
|||
|
||||
class HiddenPointer {
|
||||
public:
|
||||
explicit HiddenPointer(size_t size = 256) {
|
||||
Set(malloc(size));
|
||||
}
|
||||
~HiddenPointer() {
|
||||
Free();
|
||||
}
|
||||
void* Get() {
|
||||
return reinterpret_cast<void*>(~ptr_);
|
||||
}
|
||||
explicit HiddenPointer(size_t size = 256) { Set(malloc(size)); }
|
||||
~HiddenPointer() { Free(); }
|
||||
void* Get() { return reinterpret_cast<void*>(~ptr_); }
|
||||
void Free() {
|
||||
free(Get());
|
||||
Set(nullptr);
|
||||
}
|
||||
|
||||
private:
|
||||
void Set(void* ptr) {
|
||||
ptr_ = ~reinterpret_cast<uintptr_t>(ptr);
|
||||
}
|
||||
void Set(void* ptr) { ptr_ = ~reinterpret_cast<uintptr_t>(ptr); }
|
||||
volatile uintptr_t ptr_;
|
||||
};
|
||||
|
||||
|
|
|
@ -45,12 +45,10 @@ class ThreadListTest : public ::testing::TestWithParam<int> {
|
|||
WaitForThreads();
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
ASSERT_TRUE(heap.empty());
|
||||
}
|
||||
virtual void TearDown() { ASSERT_TRUE(heap.empty()); }
|
||||
|
||||
protected:
|
||||
template<class Function>
|
||||
template <class Function>
|
||||
void StartThreads(unsigned int threads, Function&& func) {
|
||||
threads_.reserve(threads);
|
||||
tids_.reserve(threads);
|
||||
|
@ -68,14 +66,14 @@ class ThreadListTest : public ::testing::TestWithParam<int> {
|
|||
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(m_);
|
||||
cv_stop_.wait(lk, [&] {return stop_;});
|
||||
cv_stop_.wait(lk, [&] { return stop_; });
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(m_);
|
||||
cv_start_.wait(lk, [&]{ return tids_.size() == threads; });
|
||||
cv_start_.wait(lk, [&] { return tids_.size() == threads; });
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,9 +91,7 @@ class ThreadListTest : public ::testing::TestWithParam<int> {
|
|||
tids_.clear();
|
||||
}
|
||||
|
||||
std::vector<pid_t>& tids() {
|
||||
return tids_;
|
||||
}
|
||||
std::vector<pid_t>& tids() { return tids_; }
|
||||
|
||||
Heap heap;
|
||||
|
||||
|
@ -143,7 +139,7 @@ TEST_F(ThreadListTest, list_one) {
|
|||
TEST_P(ThreadListTest, list_some) {
|
||||
const unsigned int threads = GetParam() - 1;
|
||||
|
||||
StartThreads(threads, [](){});
|
||||
StartThreads(threads, []() {});
|
||||
std::vector<pid_t> expected_tids = tids();
|
||||
expected_tids.push_back(getpid());
|
||||
|
||||
|
@ -176,10 +172,8 @@ class ThreadCaptureTest : public ThreadListTest {
|
|||
public:
|
||||
ThreadCaptureTest() {}
|
||||
~ThreadCaptureTest() {}
|
||||
void Fork(std::function<void()>&& child_init,
|
||||
std::function<void()>&& child_cleanup,
|
||||
std::function<void(pid_t)>&& parent) {
|
||||
|
||||
void Fork(std::function<void()>&& child_init, std::function<void()>&& child_cleanup,
|
||||
std::function<void(pid_t)>&& parent) {
|
||||
ScopedPipe start_pipe;
|
||||
ScopedPipe stop_pipe;
|
||||
|
||||
|
@ -211,39 +205,40 @@ class ThreadCaptureTest : public ThreadListTest {
|
|||
TEST_P(ThreadCaptureTest, capture_some) {
|
||||
const unsigned int threads = GetParam();
|
||||
|
||||
Fork([&](){
|
||||
// child init
|
||||
StartThreads(threads - 1, [](){});
|
||||
},
|
||||
[&](){
|
||||
// child cleanup
|
||||
StopThreads();
|
||||
},
|
||||
[&](pid_t child){
|
||||
// parent
|
||||
ASSERT_GT(child, 0);
|
||||
Fork(
|
||||
[&]() {
|
||||
// child init
|
||||
StartThreads(threads - 1, []() {});
|
||||
},
|
||||
[&]() {
|
||||
// child cleanup
|
||||
StopThreads();
|
||||
},
|
||||
[&](pid_t child) {
|
||||
// parent
|
||||
ASSERT_GT(child, 0);
|
||||
|
||||
{
|
||||
ScopedDisableMallocTimeout disable_malloc;
|
||||
{
|
||||
ScopedDisableMallocTimeout disable_malloc;
|
||||
|
||||
ThreadCapture thread_capture(child, heap);
|
||||
auto list_tids = allocator::vector<pid_t>(heap);
|
||||
ThreadCapture thread_capture(child, heap);
|
||||
auto list_tids = allocator::vector<pid_t>(heap);
|
||||
|
||||
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
|
||||
ASSERT_EQ(threads, list_tids.size());
|
||||
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
|
||||
ASSERT_EQ(threads, list_tids.size());
|
||||
|
||||
ASSERT_TRUE(thread_capture.CaptureThreads());
|
||||
ASSERT_TRUE(thread_capture.CaptureThreads());
|
||||
|
||||
auto thread_info = allocator::vector<ThreadInfo>(heap);
|
||||
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
|
||||
ASSERT_EQ(threads, thread_info.size());
|
||||
ASSERT_TRUE(thread_capture.ReleaseThreads());
|
||||
auto thread_info = allocator::vector<ThreadInfo>(heap);
|
||||
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
|
||||
ASSERT_EQ(threads, thread_info.size());
|
||||
ASSERT_TRUE(thread_capture.ReleaseThreads());
|
||||
|
||||
if (!HasFailure()) {
|
||||
ASSERT_FALSE(disable_malloc.timed_out());
|
||||
}
|
||||
}
|
||||
});
|
||||
if (!HasFailure()) {
|
||||
ASSERT_FALSE(disable_malloc.timed_out());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(ThreadCaptureTest, ThreadCaptureTest, ::testing::Values(1, 2, 10, 1024));
|
||||
|
@ -262,7 +257,7 @@ TEST_F(ThreadCaptureTest, capture_kill) {
|
|||
ScopedDisableMallocTimeout disable_malloc;
|
||||
|
||||
ThreadCapture thread_capture(ret, heap);
|
||||
thread_capture.InjectTestFunc([&](pid_t tid){
|
||||
thread_capture.InjectTestFunc([&](pid_t tid) {
|
||||
syscall(SYS_tgkill, ret, tid, SIGKILL);
|
||||
usleep(10000);
|
||||
});
|
||||
|
@ -288,62 +283,63 @@ TEST_F(ThreadCaptureTest, capture_signal) {
|
|||
// For signal handler
|
||||
static ScopedPipe* g_pipe;
|
||||
|
||||
Fork([&](){
|
||||
// child init
|
||||
pipe.CloseReceiver();
|
||||
Fork(
|
||||
[&]() {
|
||||
// child init
|
||||
pipe.CloseReceiver();
|
||||
|
||||
g_pipe = &pipe;
|
||||
g_pipe = &pipe;
|
||||
|
||||
struct sigaction act{};
|
||||
act.sa_handler = [](int){
|
||||
char buf = '+';
|
||||
write(g_pipe->Sender(), &buf, 1);
|
||||
g_pipe->CloseSender();
|
||||
};
|
||||
sigaction(sig, &act, NULL);
|
||||
sigset_t set;
|
||||
sigemptyset(&set);
|
||||
sigaddset(&set, sig);
|
||||
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
||||
},
|
||||
[&](){
|
||||
// child cleanup
|
||||
g_pipe = nullptr;
|
||||
pipe.Close();
|
||||
},
|
||||
[&](pid_t child){
|
||||
// parent
|
||||
ASSERT_GT(child, 0);
|
||||
pipe.CloseSender();
|
||||
struct sigaction act {};
|
||||
act.sa_handler = [](int) {
|
||||
char buf = '+';
|
||||
write(g_pipe->Sender(), &buf, 1);
|
||||
g_pipe->CloseSender();
|
||||
};
|
||||
sigaction(sig, &act, NULL);
|
||||
sigset_t set;
|
||||
sigemptyset(&set);
|
||||
sigaddset(&set, sig);
|
||||
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
||||
},
|
||||
[&]() {
|
||||
// child cleanup
|
||||
g_pipe = nullptr;
|
||||
pipe.Close();
|
||||
},
|
||||
[&](pid_t child) {
|
||||
// parent
|
||||
ASSERT_GT(child, 0);
|
||||
pipe.CloseSender();
|
||||
|
||||
{
|
||||
ScopedDisableMallocTimeout disable_malloc;
|
||||
{
|
||||
ScopedDisableMallocTimeout disable_malloc;
|
||||
|
||||
ThreadCapture thread_capture(child, heap);
|
||||
thread_capture.InjectTestFunc([&](pid_t tid){
|
||||
syscall(SYS_tgkill, child, tid, sig);
|
||||
usleep(10000);
|
||||
ThreadCapture thread_capture(child, heap);
|
||||
thread_capture.InjectTestFunc([&](pid_t tid) {
|
||||
syscall(SYS_tgkill, child, tid, sig);
|
||||
usleep(10000);
|
||||
});
|
||||
auto list_tids = allocator::vector<pid_t>(heap);
|
||||
|
||||
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
|
||||
ASSERT_EQ(1U, list_tids.size());
|
||||
|
||||
ASSERT_TRUE(thread_capture.CaptureThreads());
|
||||
|
||||
auto thread_info = allocator::vector<ThreadInfo>(heap);
|
||||
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
|
||||
ASSERT_EQ(1U, thread_info.size());
|
||||
ASSERT_TRUE(thread_capture.ReleaseThreads());
|
||||
|
||||
usleep(100000);
|
||||
char buf;
|
||||
ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1)));
|
||||
ASSERT_EQ(buf, '+');
|
||||
|
||||
if (!HasFailure()) {
|
||||
ASSERT_FALSE(disable_malloc.timed_out());
|
||||
}
|
||||
}
|
||||
});
|
||||
auto list_tids = allocator::vector<pid_t>(heap);
|
||||
|
||||
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
|
||||
ASSERT_EQ(1U, list_tids.size());
|
||||
|
||||
ASSERT_TRUE(thread_capture.CaptureThreads());
|
||||
|
||||
auto thread_info = allocator::vector<ThreadInfo>(heap);
|
||||
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
|
||||
ASSERT_EQ(1U, thread_info.size());
|
||||
ASSERT_TRUE(thread_capture.ReleaseThreads());
|
||||
|
||||
usleep(100000);
|
||||
char buf;
|
||||
ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1)));
|
||||
ASSERT_EQ(buf, '+');
|
||||
|
||||
if (!HasFailure()) {
|
||||
ASSERT_FALSE(disable_malloc.timed_out());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue