From a83881e33ce29ee236c924d669cb41a9d816962d Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Thu, 22 Jun 2017 10:50:05 -0700 Subject: [PATCH] libmemunreachable: clang-format everything clang-format -i --sort-includes $(find . -name "*.cpp" -o -name "*.h") Test: builds Change-Id: Ia8e0677fe7f3f26dddba3a851cd2dfab9f14e421 --- libmemunreachable/Allocator.cpp | 134 ++++++------- libmemunreachable/Allocator.h | 127 ++++++------ libmemunreachable/HeapWalker.cpp | 11 +- libmemunreachable/HeapWalker.h | 38 ++-- libmemunreachable/Leak.h | 6 +- libmemunreachable/LeakFolding.cpp | 64 +++--- libmemunreachable/LeakFolding.h | 27 ++- libmemunreachable/LeakPipe.cpp | 8 +- libmemunreachable/LeakPipe.h | 33 ++-- libmemunreachable/LineBuffer.cpp | 4 +- libmemunreachable/LineBuffer.h | 2 +- libmemunreachable/LinkedList.h | 65 +++--- libmemunreachable/MemUnreachable.cpp | 76 ++++--- libmemunreachable/ProcessMappings.cpp | 6 +- libmemunreachable/ProcessMappings.h | 2 +- libmemunreachable/PtracerThread.cpp | 23 +-- libmemunreachable/PtracerThread.h | 3 +- libmemunreachable/ScopedAlarm.h | 11 +- libmemunreachable/ScopedDisableMalloc.h | 34 ++-- libmemunreachable/ScopedPipe.h | 14 +- libmemunreachable/ScopedSignalHandler.h | 7 +- libmemunreachable/Semaphore.h | 6 +- libmemunreachable/Tarjan.h | 38 ++-- libmemunreachable/ThreadCapture.cpp | 30 +-- libmemunreachable/ThreadCapture.h | 4 +- libmemunreachable/anon_vma_naming.h | 6 +- libmemunreachable/bionic.h | 6 +- .../include/memunreachable/memunreachable.h | 4 +- libmemunreachable/log.h | 2 +- libmemunreachable/tests/Allocator_test.cpp | 22 +-- .../tests/DisableMalloc_test.cpp | 128 ++++++------ libmemunreachable/tests/HeapWalker_test.cpp | 8 +- libmemunreachable/tests/HostMallocStub.cpp | 6 +- libmemunreachable/tests/LeakFolding_test.cpp | 38 ++-- .../tests/MemUnreachable_test.cpp | 19 +- .../tests/ThreadCapture_test.cpp | 186 +++++++++--------- 36 files changed, 565 insertions(+), 633 deletions(-) diff --git a/libmemunreachable/Allocator.cpp b/libmemunreachable/Allocator.cpp index 6fe67a41f..da6db20f3 100644 --- a/libmemunreachable/Allocator.cpp +++ b/libmemunreachable/Allocator.cpp @@ -33,9 +33,9 @@ #include "android-base/macros.h" -#include "anon_vma_naming.h" #include "Allocator.h" #include "LinkedList.h" +#include "anon_vma_naming.h" // runtime interfaces used: // abort @@ -57,10 +57,9 @@ static constexpr size_t kChunkSize = 256 * 1024; static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize; static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4; static constexpr size_t kMinBucketAllocationSize = 8; -static constexpr unsigned int kNumBuckets = const_log2(kMaxBucketAllocationSize) - - const_log2(kMinBucketAllocationSize) + 1; -static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize - / kPageSize; +static constexpr unsigned int kNumBuckets = + const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1; +static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize; std::atomic heap_count; @@ -93,7 +92,7 @@ class HeapImpl { void FreeLocked(void* ptr); struct MapAllocation { - void *ptr; + void* ptr; size_t size; MapAllocation* next; }; @@ -107,8 +106,7 @@ static inline unsigned int log2(size_t n) { } static inline unsigned int size_to_bucket(size_t size) { - if (size < kMinBucketAllocationSize) - return kMinBucketAllocationSize; + if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize; return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize); } @@ -140,8 +138,7 @@ static void* MapAligned(size_t size, size_t align) { // Trim beginning if (aligned_ptr != ptr) { - ptrdiff_t extra = reinterpret_cast(aligned_ptr) - - reinterpret_cast(ptr); + ptrdiff_t extra = reinterpret_cast(aligned_ptr) - reinterpret_cast(ptr); munmap(ptr, extra); map_size -= extra; ptr = aligned_ptr; @@ -151,14 +148,13 @@ static void* MapAligned(size_t size, size_t align) { if (map_size != size) { assert(map_size > size); assert(ptr != NULL); - munmap(reinterpret_cast(reinterpret_cast(ptr) + size), - map_size - size); + munmap(reinterpret_cast(reinterpret_cast(ptr) + size), map_size - size); } -#define PR_SET_VMA 0x53564d41 -#define PR_SET_VMA_ANON_NAME 0 - prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, - reinterpret_cast(ptr), size, "leak_detector_malloc"); +#define PR_SET_VMA 0x53564d41 +#define PR_SET_VMA_ANON_NAME 0 + prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast(ptr), size, + "leak_detector_malloc"); return ptr; } @@ -170,36 +166,31 @@ class Chunk { Chunk(HeapImpl* heap, int bucket); ~Chunk() {} - void *Alloc(); + void* Alloc(); void Free(void* ptr); void Purge(); bool Empty(); static Chunk* ptr_to_chunk(void* ptr) { - return reinterpret_cast(reinterpret_cast(ptr) - & ~(kChunkSize - 1)); + return reinterpret_cast(reinterpret_cast(ptr) & ~(kChunkSize - 1)); } static bool is_chunk(void* ptr) { return (reinterpret_cast(ptr) & (kChunkSize - 1)) != 0; } - unsigned int free_count() { - return free_count_; - } - HeapImpl* heap() { - return heap_; - } - LinkedList node_; // linked list sorted by minimum free count + unsigned int free_count() { return free_count_; } + HeapImpl* heap() { return heap_; } + LinkedList node_; // linked list sorted by minimum free count private: DISALLOW_COPY_AND_ASSIGN(Chunk); HeapImpl* heap_; unsigned int bucket_; - unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes - unsigned int max_allocations_; // maximum number of allocations in the chunk - unsigned int first_free_bitmap_; // index into bitmap for first non-full entry - unsigned int free_count_; // number of available allocations - unsigned int frees_since_purge_; // number of calls to Free since last Purge + unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes + unsigned int max_allocations_; // maximum number of allocations in the chunk + unsigned int first_free_bitmap_; // index into bitmap for first non-full entry + unsigned int free_count_; // number of available allocations + unsigned int frees_since_purge_; // number of calls to Free since last Purge // bitmap of pages that have been dirtied uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)]; @@ -210,13 +201,10 @@ class Chunk { char data_[0]; unsigned int ptr_to_n(void* ptr) { - ptrdiff_t offset = reinterpret_cast(ptr) - - reinterpret_cast(data_); + ptrdiff_t offset = reinterpret_cast(ptr) - reinterpret_cast(data_); return offset / allocation_size_; } - void* n_to_ptr(unsigned int n) { - return data_ + n * allocation_size_; - } + void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; } }; static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page"); @@ -225,23 +213,27 @@ void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept { assert(count == sizeof(Chunk)); void* mem = MapAligned(kChunkSize, kChunkSize); if (!mem) { - abort(); //throw std::bad_alloc; + abort(); // throw std::bad_alloc; } return mem; } // Override new operator on chunk to use mmap to allocate kChunkSize -void Chunk::operator delete(void *ptr) { +void Chunk::operator delete(void* ptr) { assert(reinterpret_cast(ptr) == ptr_to_chunk(ptr)); munmap(ptr, kChunkSize); } -Chunk::Chunk(HeapImpl* heap, int bucket) : - node_(this), heap_(heap), bucket_(bucket), allocation_size_( - bucket_to_size(bucket)), max_allocations_( - kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_( - max_allocations_), frees_since_purge_(0) { +Chunk::Chunk(HeapImpl* heap, int bucket) + : node_(this), + heap_(heap), + bucket_(bucket), + allocation_size_(bucket_to_size(bucket)), + max_allocations_(kUsableChunkSize / allocation_size_), + first_free_bitmap_(0), + free_count_(max_allocations_), + frees_since_purge_(0) { memset(dirty_pages_, 0, sizeof(dirty_pages_)); memset(free_bitmap_, 0xff, sizeof(free_bitmap_)); } @@ -254,8 +246,7 @@ void* Chunk::Alloc() { assert(free_count_ > 0); unsigned int i = first_free_bitmap_; - while (free_bitmap_[i] == 0) - i++; + while (free_bitmap_[i] == 0) i++; assert(i < arraysize(free_bitmap_)); unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1; assert(free_bitmap_[i] & (1U << bit)); @@ -306,38 +297,35 @@ void Chunk::Free(void* ptr) { void Chunk::Purge() { frees_since_purge_ = 0; - //unsigned int allocsPerPage = kPageSize / allocation_size_; + // unsigned int allocsPerPage = kPageSize / allocation_size_; } // Override new operator on HeapImpl to use mmap to allocate a page -void* HeapImpl::operator new(std::size_t count __attribute__((unused))) - noexcept { +void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept { assert(count == sizeof(HeapImpl)); void* mem = MapAligned(kPageSize, kPageSize); if (!mem) { - abort(); //throw std::bad_alloc; + abort(); // throw std::bad_alloc; } heap_count++; return mem; } -void HeapImpl::operator delete(void *ptr) { +void HeapImpl::operator delete(void* ptr) { munmap(ptr, kPageSize); } -HeapImpl::HeapImpl() : - free_chunks_(), full_chunks_(), map_allocation_list_(NULL) { -} +HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {} bool HeapImpl::Empty() { for (unsigned int i = 0; i < kNumBuckets; i++) { - for (LinkedList *it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) { + for (LinkedList* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) { if (!it->data()->Empty()) { return false; } } - for (LinkedList *it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) { + for (LinkedList* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) { if (!it->data()->Empty()) { return false; } @@ -350,12 +338,12 @@ bool HeapImpl::Empty() { HeapImpl::~HeapImpl() { for (unsigned int i = 0; i < kNumBuckets; i++) { while (!free_chunks_[i].empty()) { - Chunk *chunk = free_chunks_[i].next()->data(); + Chunk* chunk = free_chunks_[i].next()->data(); chunk->node_.remove(); delete chunk; } while (!full_chunks_[i].empty()) { - Chunk *chunk = full_chunks_[i].next()->data(); + Chunk* chunk = full_chunks_[i].next()->data(); chunk->node_.remove(); delete chunk; } @@ -373,18 +361,18 @@ void* HeapImpl::AllocLocked(size_t size) { } int bucket = size_to_bucket(size); if (free_chunks_[bucket].empty()) { - Chunk *chunk = new Chunk(this, bucket); + Chunk* chunk = new Chunk(this, bucket); free_chunks_[bucket].insert(chunk->node_); } return free_chunks_[bucket].next()->data()->Alloc(); } -void HeapImpl::Free(void *ptr) { +void HeapImpl::Free(void* ptr) { std::lock_guard lk(m_); FreeLocked(ptr); } -void HeapImpl::FreeLocked(void *ptr) { +void HeapImpl::FreeLocked(void* ptr) { if (!Chunk::is_chunk(ptr)) { HeapImpl::MapFree(ptr); } else { @@ -397,12 +385,11 @@ void HeapImpl::FreeLocked(void *ptr) { void* HeapImpl::MapAlloc(size_t size) { size = (size + kPageSize - 1) & ~(kPageSize - 1); - MapAllocation* allocation = reinterpret_cast(AllocLocked( - sizeof(MapAllocation))); + MapAllocation* allocation = reinterpret_cast(AllocLocked(sizeof(MapAllocation))); void* ptr = MapAligned(size, kChunkSize); if (!ptr) { FreeLocked(allocation); - abort(); //throw std::bad_alloc; + abort(); // throw std::bad_alloc; } allocation->ptr = ptr; allocation->size = size; @@ -412,10 +399,9 @@ void* HeapImpl::MapAlloc(size_t size) { return ptr; } -void HeapImpl::MapFree(void *ptr) { - MapAllocation **allocation = &map_allocation_list_; - while (*allocation && (*allocation)->ptr != ptr) - allocation = &(*allocation)->next; +void HeapImpl::MapFree(void* ptr) { + MapAllocation** allocation = &map_allocation_list_; + while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next; assert(*allocation != nullptr); @@ -425,22 +411,22 @@ void HeapImpl::MapFree(void *ptr) { *allocation = (*allocation)->next; } -void HeapImpl::MoveToFreeList(Chunk *chunk, int bucket) { +void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) { MoveToList(chunk, &free_chunks_[bucket]); } -void HeapImpl::MoveToFullList(Chunk *chunk, int bucket) { +void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) { MoveToList(chunk, &full_chunks_[bucket]); } -void HeapImpl::MoveToList(Chunk *chunk, LinkedList* head) { +void HeapImpl::MoveToList(Chunk* chunk, LinkedList* head) { // Remove from old list chunk->node_.remove(); - LinkedList *node = head; + LinkedList* node = head; // Insert into new list, sorted by lowest free count - while (node->next() != head && node->data() != nullptr - && node->data()->free_count() < chunk->free_count()) + while (node->next() != head && node->data() != nullptr && + node->data()->free_count() < chunk->free_count()) node = node->next(); node->insert(chunk->node_); @@ -469,7 +455,7 @@ void Heap::deallocate(void* ptr) { impl_->Free(ptr); } -void Heap::deallocate(HeapImpl*impl, void* ptr) { +void Heap::deallocate(HeapImpl* impl, void* ptr) { impl->Free(ptr); } diff --git a/libmemunreachable/Allocator.h b/libmemunreachable/Allocator.h index 539073961..67a068f7d 100644 --- a/libmemunreachable/Allocator.h +++ b/libmemunreachable/Allocator.h @@ -31,14 +31,13 @@ extern std::atomic heap_count; class HeapImpl; -template +template class Allocator; - // Non-templated class that implements wraps HeapImpl to keep // implementation out of the header file class Heap { -public: + public: Heap(); ~Heap(); @@ -59,110 +58,99 @@ public: static void deallocate(HeapImpl* impl, void* ptr); // Allocate a class of type T - template + template T* allocate() { return reinterpret_cast(allocate(sizeof(T))); } // Comparators, copied objects will be equal - bool operator ==(const Heap& other) const { - return impl_ == other.impl_; - } - bool operator !=(const Heap& other) const { - return !(*this == other); - } + bool operator==(const Heap& other) const { return impl_ == other.impl_; } + bool operator!=(const Heap& other) const { return !(*this == other); } // std::unique_ptr wrapper that allocates using allocate and deletes using // deallocate - template + template using unique_ptr = std::unique_ptr>; - template + template unique_ptr make_unique(Args&&... args) { HeapImpl* impl = impl_; - return unique_ptr(new (allocate()) T(std::forward(args)...), - [impl](void* ptr) { - reinterpret_cast(ptr)->~T(); - deallocate(impl, ptr); - }); + return unique_ptr(new (allocate()) T(std::forward(args)...), [impl](void* ptr) { + reinterpret_cast(ptr)->~T(); + deallocate(impl, ptr); + }); } // std::unique_ptr wrapper that allocates using allocate and deletes using // deallocate - template + template using shared_ptr = std::shared_ptr; - template + template shared_ptr make_shared(Args&&... args); -protected: + protected: HeapImpl* impl_; bool owns_impl_; }; // STLAllocator implements the std allocator interface on top of a Heap -template +template class STLAllocator { -public: + public: using value_type = T; - ~STLAllocator() { - } + ~STLAllocator() {} // Construct an STLAllocator on top of a Heap - STLAllocator(const Heap& heap) : // NOLINT, implicit - heap_(heap) { - } + STLAllocator(const Heap& heap) + : // NOLINT, implicit + heap_(heap) {} // Rebind an STLAllocator from an another STLAllocator - template - STLAllocator(const STLAllocator& other) : // NOLINT, implicit - heap_(other.heap_) { - } + template + STLAllocator(const STLAllocator& other) + : // NOLINT, implicit + heap_(other.heap_) {} STLAllocator(const STLAllocator&) = default; STLAllocator& operator=(const STLAllocator&) = default; - T* allocate(std::size_t n) { - return reinterpret_cast(heap_.allocate(n * sizeof(T))); - } + T* allocate(std::size_t n) { return reinterpret_cast(heap_.allocate(n * sizeof(T))); } - void deallocate(T* ptr, std::size_t) { - heap_.deallocate(ptr); - } + void deallocate(T* ptr, std::size_t) { heap_.deallocate(ptr); } - template - bool operator ==(const STLAllocator& other) const { + template + bool operator==(const STLAllocator& other) const { return heap_ == other.heap_; } - template - inline bool operator !=(const STLAllocator& other) const { + template + inline bool operator!=(const STLAllocator& other) const { return !(this == other); } - template + template friend class STLAllocator; -protected: + protected: Heap heap_; }; - // Allocator extends STLAllocator with some convenience methods for allocating // a single object and for constructing unique_ptr and shared_ptr objects with // appropriate deleters. -template +template class Allocator : public STLAllocator { public: ~Allocator() {} - Allocator(const Heap& other) : // NOLINT, implicit - STLAllocator(other) { - } + Allocator(const Heap& other) + : // NOLINT, implicit + STLAllocator(other) {} - template - Allocator(const STLAllocator& other) : // NOLINT, implicit - STLAllocator(other) { - } + template + Allocator(const STLAllocator& other) + : // NOLINT, implicit + STLAllocator(other) {} Allocator(const Allocator&) = default; Allocator& operator=(const Allocator&) = default; @@ -171,24 +159,20 @@ class Allocator : public STLAllocator { using STLAllocator::deallocate; using STLAllocator::heap_; - T* allocate() { - return STLAllocator::allocate(1); - } - void deallocate(void* ptr) { - heap_.deallocate(ptr); - } + T* allocate() { return STLAllocator::allocate(1); } + void deallocate(void* ptr) { heap_.deallocate(ptr); } using shared_ptr = Heap::shared_ptr; - template - shared_ptr make_shared(Args&& ...args) { + template + shared_ptr make_shared(Args&&... args) { return heap_.template make_shared(std::forward(args)...); } using unique_ptr = Heap::unique_ptr; - template - unique_ptr make_unique(Args&& ...args) { + template + unique_ptr make_unique(Args&&... args) { return heap_.template make_unique(std::forward(args)...); } }; @@ -196,30 +180,31 @@ class Allocator : public STLAllocator { // std::unique_ptr wrapper that allocates using allocate and deletes using // deallocate. Implemented outside class definition in order to pass // Allocator to shared_ptr. -template +template inline Heap::shared_ptr Heap::make_shared(Args&&... args) { return std::allocate_shared, Args...>(Allocator(*this), - std::forward(args)...); + std::forward(args)...); } namespace allocator { -template +template using vector = std::vector>; -template +template using list = std::list>; -template> +template > using map = std::map>>; -template, class KeyEqual = std::equal_to> -using unordered_map = std::unordered_map>>; +template , class KeyEqual = std::equal_to> +using unordered_map = + std::unordered_map>>; -template, class KeyEqual = std::equal_to> +template , class KeyEqual = std::equal_to> using unordered_set = std::unordered_set>; -template> +template > using set = std::set>; using string = std::basic_string, Allocator>; diff --git a/libmemunreachable/HeapWalker.cpp b/libmemunreachable/HeapWalker.cpp index c365ae5b5..df16f4078 100644 --- a/libmemunreachable/HeapWalker.cpp +++ b/libmemunreachable/HeapWalker.cpp @@ -114,8 +114,8 @@ bool HeapWalker::DetectLeaks() { return true; } -bool HeapWalker::Leaked(allocator::vector& leaked, size_t limit, - size_t* num_leaks_out, size_t* leak_bytes_out) { +bool HeapWalker::Leaked(allocator::vector& leaked, size_t limit, size_t* num_leaks_out, + size_t* leak_bytes_out) { leaked.clear(); size_t num_leaks = 0; @@ -148,9 +148,9 @@ bool HeapWalker::Leaked(allocator::vector& leaked, size_t limit, static bool MapOverPage(void* addr) { const size_t page_size = sysconf(_SC_PAGE_SIZE); - void *page = reinterpret_cast(reinterpret_cast(addr) & ~(page_size-1)); + void* page = reinterpret_cast(reinterpret_cast(addr) & ~(page_size - 1)); - void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); + void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); if (ret == MAP_FAILED) { MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno)); return false; @@ -159,7 +159,8 @@ static bool MapOverPage(void* addr) { return true; } -void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si, void* /*uctx*/) { +void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si, + void* /*uctx*/) { uintptr_t addr = reinterpret_cast(si->si_addr); if (addr != walking_ptr_) { handler.reset(); diff --git a/libmemunreachable/HeapWalker.h b/libmemunreachable/HeapWalker.h index b25696fd4..865965a41 100644 --- a/libmemunreachable/HeapWalker.h +++ b/libmemunreachable/HeapWalker.h @@ -34,31 +34,31 @@ struct Range { bool operator==(const Range& other) const { return this->begin == other.begin && this->end == other.end; } - bool operator!=(const Range& other) const { - return !(*this == other); - } + bool operator!=(const Range& other) const { return !(*this == other); } }; // Comparator for Ranges that returns equivalence for overlapping ranges struct compare_range { - bool operator()(const Range& a, const Range& b) const { - return a.end <= b.begin; - } + bool operator()(const Range& a, const Range& b) const { return a.end <= b.begin; } }; class HeapWalker { public: - explicit HeapWalker(Allocator allocator) : allocator_(allocator), - allocations_(allocator), allocation_bytes_(0), - roots_(allocator), root_vals_(allocator), - segv_handler_(allocator), walking_ptr_(0) { + explicit HeapWalker(Allocator allocator) + : allocator_(allocator), + allocations_(allocator), + allocation_bytes_(0), + roots_(allocator), + root_vals_(allocator), + segv_handler_(allocator), + walking_ptr_(0) { valid_allocations_range_.end = 0; valid_allocations_range_.begin = ~valid_allocations_range_.end; - segv_handler_.install(SIGSEGV, - [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) { + segv_handler_.install( + SIGSEGV, [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) { this->HandleSegFault(handler, signal, siginfo, uctx); - }); + }); } ~HeapWalker() {} @@ -68,15 +68,14 @@ class HeapWalker { bool DetectLeaks(); - bool Leaked(allocator::vector&, size_t limit, size_t* num_leaks, - size_t* leak_bytes); + bool Leaked(allocator::vector&, size_t limit, size_t* num_leaks, size_t* leak_bytes); size_t Allocations(); size_t AllocationBytes(); - template + template void ForEachPtrInRange(const Range& range, F&& f); - template + template void ForEachAllocation(F&& f); struct AllocationInfo { @@ -84,7 +83,6 @@ class HeapWalker { }; private: - void RecurseRoot(const Range& root); bool WordContainsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info); void HandleSegFault(ScopedSignalHandler&, int, siginfo_t*, void*); @@ -103,7 +101,7 @@ class HeapWalker { uintptr_t walking_ptr_; }; -template +template inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) { uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1); // TODO(ccross): we might need to consider a pointer to the end of a buffer @@ -118,7 +116,7 @@ inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) { } } -template +template inline void HeapWalker::ForEachAllocation(F&& f) { for (auto& it : allocations_) { const Range& range = it.first; diff --git a/libmemunreachable/Leak.h b/libmemunreachable/Leak.h index eaeeea7cf..db88e2949 100644 --- a/libmemunreachable/Leak.h +++ b/libmemunreachable/Leak.h @@ -26,7 +26,7 @@ // as a key in std::unordered_map. namespace std { -template<> +template <> struct hash { std::size_t operator()(const Leak::Backtrace& key) const { std::size_t seed = 0; @@ -40,7 +40,7 @@ struct hash { } private: - template + template inline void hash_combine(std::size_t& seed, const T& v) const { std::hash hasher; seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); @@ -51,7 +51,7 @@ struct hash { static bool operator==(const Leak::Backtrace& lhs, const Leak::Backtrace& rhs) { return (lhs.num_frames == rhs.num_frames) && - memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0; + memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0; } #endif diff --git a/libmemunreachable/LeakFolding.cpp b/libmemunreachable/LeakFolding.cpp index be4d20c95..2dff6728f 100644 --- a/libmemunreachable/LeakFolding.cpp +++ b/libmemunreachable/LeakFolding.cpp @@ -31,11 +31,11 @@ void LeakFolding::ComputeDAG() { Allocator scc_allocator = allocator_; - for (auto& scc_nodes: scc_list) { + for (auto& scc_nodes : scc_list) { Allocator::unique_ptr leak_scc; leak_scc = scc_allocator.make_unique(scc_allocator); - for (auto& node: scc_nodes) { + for (auto& node : scc_nodes) { node->ptr->scc = leak_scc.get(); leak_scc->count++; leak_scc->size += node->ptr->range.size(); @@ -46,7 +46,7 @@ void LeakFolding::ComputeDAG() { for (auto& it : leak_map_) { LeakInfo& leak = it.second; - for (auto& ref: leak.node.references_out) { + for (auto& ref : leak.node.references_out) { if (leak.scc != ref->ptr->scc) { leak.scc->node.Edge(&ref->ptr->scc->node); } @@ -55,17 +55,14 @@ void LeakFolding::ComputeDAG() { } void LeakFolding::AccumulateLeaks(SCCInfo* dominator) { - std::function walk(std::allocator_arg, allocator_, - [&](SCCInfo* scc) { - if (scc->accumulator != dominator) { - scc->accumulator = dominator; - dominator->cuumulative_size += scc->size; - dominator->cuumulative_count += scc->count; - scc->node.Foreach([&](SCCInfo* ref) { - walk(ref); - }); - } - }); + std::function walk(std::allocator_arg, allocator_, [&](SCCInfo* scc) { + if (scc->accumulator != dominator) { + scc->accumulator = dominator; + dominator->cuumulative_size += scc->size; + dominator->cuumulative_count += scc->count; + scc->node.Foreach([&](SCCInfo* ref) { walk(ref); }); + } + }); walk(dominator); } @@ -73,27 +70,25 @@ bool LeakFolding::FoldLeaks() { Allocator leak_allocator = allocator_; // Find all leaked allocations insert them into leak_map_ and leak_graph_ - heap_walker_.ForEachAllocation( - [&](const Range& range, HeapWalker::AllocationInfo& allocation) { - if (!allocation.referenced_from_root) { - auto it = leak_map_.emplace(std::piecewise_construct, - std::forward_as_tuple(range), - std::forward_as_tuple(range, allocator_)); - LeakInfo& leak = it.first->second; - leak_graph_.push_back(&leak.node); - } - }); + heap_walker_.ForEachAllocation([&](const Range& range, HeapWalker::AllocationInfo& allocation) { + if (!allocation.referenced_from_root) { + auto it = leak_map_.emplace(std::piecewise_construct, std::forward_as_tuple(range), + std::forward_as_tuple(range, allocator_)); + LeakInfo& leak = it.first->second; + leak_graph_.push_back(&leak.node); + } + }); // Find references between leaked allocations and connect them in leak_graph_ for (auto& it : leak_map_) { LeakInfo& leak = it.second; heap_walker_.ForEachPtrInRange(leak.range, - [&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) { - if (!ptr_info->referenced_from_root) { - LeakInfo* ptr_leak = &leak_map_.at(ptr_range); - leak.node.Edge(&ptr_leak->node); - } - }); + [&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) { + if (!ptr_info->referenced_from_root) { + LeakInfo* ptr_leak = &leak_map_.at(ptr_range); + leak.node.Edge(&ptr_leak->node); + } + }); } // Convert the cyclic graph to a DAG by grouping strongly connected components @@ -110,8 +105,8 @@ bool LeakFolding::FoldLeaks() { return true; } -bool LeakFolding::Leaked(allocator::vector& leaked, - size_t* num_leaks_out, size_t* leak_bytes_out) { +bool LeakFolding::Leaked(allocator::vector& leaked, size_t* num_leaks_out, + size_t* leak_bytes_out) { size_t num_leaks = 0; size_t leak_bytes = 0; for (auto& it : leak_map_) { @@ -123,9 +118,8 @@ bool LeakFolding::Leaked(allocator::vector& leaked, for (auto& it : leak_map_) { const LeakInfo& leak = it.second; if (leak.scc->dominator) { - leaked.emplace_back(Leak{leak.range, - leak.scc->cuumulative_count - 1, - leak.scc->cuumulative_size - leak.range.size()}); + leaked.emplace_back(Leak{leak.range, leak.scc->cuumulative_count - 1, + leak.scc->cuumulative_size - leak.range.size()}); } } diff --git a/libmemunreachable/LeakFolding.h b/libmemunreachable/LeakFolding.h index 9c6a525fc..740b54fae 100644 --- a/libmemunreachable/LeakFolding.h +++ b/libmemunreachable/LeakFolding.h @@ -22,8 +22,11 @@ class LeakFolding { public: LeakFolding(Allocator allocator, HeapWalker& heap_walker) - : allocator_(allocator), heap_walker_(heap_walker), - leak_map_(allocator), leak_graph_(allocator), leak_scc_(allocator) {} + : allocator_(allocator), + heap_walker_(heap_walker), + leak_map_(allocator), + leak_graph_(allocator), + leak_scc_(allocator) {} bool FoldLeaks(); @@ -33,8 +36,7 @@ class LeakFolding { size_t referenced_size; }; - bool Leaked(allocator::vector& leaked, - size_t* num_leaks_out, size_t* leak_bytes_out); + bool Leaked(allocator::vector& leaked, size_t* num_leaks_out, size_t* leak_bytes_out); private: DISALLOW_COPY_AND_ASSIGN(LeakFolding); @@ -54,9 +56,15 @@ class LeakFolding { bool dominator; SCCInfo* accumulator; - explicit SCCInfo(Allocator allocator) : node(this, allocator), - count(0), size(0), cuumulative_count(0), cuumulative_size(0), - dominator(false), accumulator(nullptr) {} + explicit SCCInfo(Allocator allocator) + : node(this, allocator), + count(0), + size(0), + cuumulative_count(0), + cuumulative_size(0), + dominator(false), + accumulator(nullptr) {} + private: SCCInfo(SCCInfo&&) = delete; DISALLOW_COPY_AND_ASSIGN(SCCInfo); @@ -71,8 +79,7 @@ class LeakFolding { SCCInfo* scc; LeakInfo(const Range& range, Allocator allocator) - : node(this, allocator), range(range), - scc(nullptr) {} + : node(this, allocator), range(range), scc(nullptr) {} private: DISALLOW_COPY_AND_ASSIGN(LeakInfo); @@ -86,4 +93,4 @@ class LeakFolding { allocator::vector::unique_ptr> leak_scc_; }; -#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_ +#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_ diff --git a/libmemunreachable/LeakPipe.cpp b/libmemunreachable/LeakPipe.cpp index 78117e2b6..aac5701e1 100644 --- a/libmemunreachable/LeakPipe.cpp +++ b/libmemunreachable/LeakPipe.cpp @@ -22,8 +22,8 @@ #include "log.h" bool LeakPipe::SendFd(int sock, int fd) { - struct msghdr hdr{}; - struct iovec iov{}; + struct msghdr hdr {}; + struct iovec iov {}; unsigned int data = 0xfdfdfdfd; alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))]; @@ -56,8 +56,8 @@ bool LeakPipe::SendFd(int sock, int fd) { } int LeakPipe::ReceiveFd(int sock) { - struct msghdr hdr{}; - struct iovec iov{}; + struct msghdr hdr {}; + struct iovec iov {}; unsigned int data; alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))]; diff --git a/libmemunreachable/LeakPipe.h b/libmemunreachable/LeakPipe.h index 3ea2d8f3e..e6aee5f6b 100644 --- a/libmemunreachable/LeakPipe.h +++ b/libmemunreachable/LeakPipe.h @@ -34,15 +34,13 @@ class LeakPipe { public: LeakPipe() { - int ret = socketpair(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0, sv_); + int ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, sv_); if (ret < 0) { MEM_LOG_ALWAYS_FATAL("failed to create socketpair: %s", strerror(errno)); } } - ~LeakPipe() { - Close(); - } + ~LeakPipe() { Close(); } void Close() { close(sv_[0]); @@ -77,13 +75,9 @@ class LeakPipe { public: LeakPipeBase() : fd_(-1) {} - ~LeakPipeBase() { - Close(); - } + ~LeakPipeBase() { Close(); } - void SetFd(int fd) { - fd_ = fd; - } + void SetFd(int fd) { fd_ = fd; } void Close() { close(fd_); @@ -101,7 +95,7 @@ class LeakPipe { public: using LeakPipeBase::LeakPipeBase; - template + template bool Send(const T& value) { ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, &value, sizeof(T))); if (ret < 0) { @@ -115,7 +109,7 @@ class LeakPipe { return true; } - template> + template > bool SendVector(const std::vector& vector) { size_t size = vector.size() * sizeof(T); if (!Send(size)) { @@ -139,7 +133,7 @@ class LeakPipe { public: using LeakPipeBase::LeakPipeBase; - template + template bool Receive(T* value) { ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, reinterpret_cast(value), sizeof(T))); if (ret < 0) { @@ -153,7 +147,7 @@ class LeakPipe { return true; } - template> + template > bool ReceiveVector(std::vector& vector) { size_t size = 0; if (!Receive(&size)) { @@ -178,16 +172,11 @@ class LeakPipe { return true; } - }; - LeakPipeReceiver& Receiver() { - return receiver_; - } + LeakPipeReceiver& Receiver() { return receiver_; } - LeakPipeSender& Sender() { - return sender_; - } + LeakPipeSender& Sender() { return sender_; } private: LeakPipeReceiver receiver_; @@ -198,4 +187,4 @@ class LeakPipe { int sv_[2]; }; -#endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_ +#endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_ diff --git a/libmemunreachable/LineBuffer.cpp b/libmemunreachable/LineBuffer.cpp index d3580c017..0709fdd19 100644 --- a/libmemunreachable/LineBuffer.cpp +++ b/libmemunreachable/LineBuffer.cpp @@ -23,8 +23,8 @@ #include "LineBuffer.h" -LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len) : fd_(fd), buffer_(buffer), buffer_len_(buffer_len) { -} +LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len) + : fd_(fd), buffer_(buffer), buffer_len_(buffer_len) {} bool LineBuffer::GetLine(char** line, size_t* line_len) { while (true) { diff --git a/libmemunreachable/LineBuffer.h b/libmemunreachable/LineBuffer.h index a015c466e..604836bef 100644 --- a/libmemunreachable/LineBuffer.h +++ b/libmemunreachable/LineBuffer.h @@ -33,4 +33,4 @@ class LineBuffer { size_t bytes_ = 0; }; -#endif // _LIBMEMUNREACHABLE_LINE_BUFFER_H +#endif // _LIBMEMUNREACHABLE_LINE_BUFFER_H diff --git a/libmemunreachable/LinkedList.h b/libmemunreachable/LinkedList.h index 132842da1..f1b8a17fe 100644 --- a/libmemunreachable/LinkedList.h +++ b/libmemunreachable/LinkedList.h @@ -17,44 +17,43 @@ #ifndef LIBMEMUNREACHABLE_LINKED_LIST_H_ #define LIBMEMUNREACHABLE_LINKED_LIST_H_ -template +template class LinkedList { -public: - LinkedList() : next_(this), prev_(this), data_() {} - explicit LinkedList(T data) : LinkedList() { - data_ = data; - } - ~LinkedList() {} - void insert(LinkedList& node) { - assert(node.empty()); - node.next_ = this->next_; - node.next_->prev_ = &node; - this->next_ = &node; - node.prev_ = this; - } - void remove() { - this->next_->prev_ = this->prev_; - this->prev_->next_ = this->next_; - this->next_ = this; - this->prev_ = this; - } - T data() { return data_; } - bool empty() { return next_ == this && prev_ == this; } - LinkedList *next() { return next_; } -private: - LinkedList *next_; - LinkedList *prev_; - T data_; + public: + LinkedList() : next_(this), prev_(this), data_() {} + explicit LinkedList(T data) : LinkedList() { data_ = data; } + ~LinkedList() {} + void insert(LinkedList& node) { + assert(node.empty()); + node.next_ = this->next_; + node.next_->prev_ = &node; + this->next_ = &node; + node.prev_ = this; + } + void remove() { + this->next_->prev_ = this->prev_; + this->prev_->next_ = this->next_; + this->next_ = this; + this->prev_ = this; + } + T data() { return data_; } + bool empty() { return next_ == this && prev_ == this; } + LinkedList* next() { return next_; } + + private: + LinkedList* next_; + LinkedList* prev_; + T data_; }; -template +template class LinkedListHead { -public: - LinkedListHead() : node_() {} - ~LinkedListHead() {} + public: + LinkedListHead() : node_() {} + ~LinkedListHead() {} -private: - LinkedList node_; + private: + LinkedList node_; }; #endif diff --git a/libmemunreachable/MemUnreachable.cpp b/libmemunreachable/MemUnreachable.cpp index 1c8474428..870cd1d16 100644 --- a/libmemunreachable/MemUnreachable.cpp +++ b/libmemunreachable/MemUnreachable.cpp @@ -19,12 +19,12 @@ #include #include #include -#include #include +#include #include -#include #include +#include #include "Allocator.h" #include "HeapWalker.h" @@ -37,9 +37,9 @@ #include "Semaphore.h" #include "ThreadCapture.h" -#include "memunreachable/memunreachable.h" #include "bionic.h" #include "log.h" +#include "memunreachable/memunreachable.h" const size_t Leak::contents_length; @@ -47,20 +47,21 @@ using namespace std::chrono_literals; class MemUnreachable { public: - MemUnreachable(pid_t pid, Allocator allocator) : pid_(pid), allocator_(allocator), - heap_walker_(allocator_) {} + MemUnreachable(pid_t pid, Allocator allocator) + : pid_(pid), allocator_(allocator), heap_walker_(allocator_) {} bool CollectAllocations(const allocator::vector& threads, - const allocator::vector& mappings); - bool GetUnreachableMemory(allocator::vector& leaks, size_t limit, - size_t* num_leaks, size_t* leak_bytes); + const allocator::vector& mappings); + bool GetUnreachableMemory(allocator::vector& leaks, size_t limit, size_t* num_leaks, + size_t* leak_bytes); size_t Allocations() { return heap_walker_.Allocations(); } size_t AllocationBytes() { return heap_walker_.AllocationBytes(); } + private: bool ClassifyMappings(const allocator::vector& mappings, - allocator::vector& heap_mappings, - allocator::vector& anon_mappings, - allocator::vector& globals_mappings, - allocator::vector& stack_mappings); + allocator::vector& heap_mappings, + allocator::vector& anon_mappings, + allocator::vector& globals_mappings, + allocator::vector& stack_mappings); DISALLOW_COPY_AND_ASSIGN(MemUnreachable); pid_t pid_; Allocator allocator_; @@ -68,16 +69,17 @@ class MemUnreachable { }; static void HeapIterate(const Mapping& heap_mapping, - const std::function& func) { + const std::function& func) { malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin, - [](uintptr_t base, size_t size, void* arg) { - auto f = reinterpret_cast*>(arg); - (*f)(base, size); - }, const_cast(reinterpret_cast(&func))); + [](uintptr_t base, size_t size, void* arg) { + auto f = reinterpret_cast*>(arg); + (*f)(base, size); + }, + const_cast(reinterpret_cast(&func))); } bool MemUnreachable::CollectAllocations(const allocator::vector& threads, - const allocator::vector& mappings) { + const allocator::vector& mappings) { MEM_ALOGI("searching process %d for allocations", pid_); allocator::vector heap_mappings{mappings}; allocator::vector anon_mappings{mappings}; @@ -118,8 +120,8 @@ bool MemUnreachable::CollectAllocations(const allocator::vector& thr return true; } -bool MemUnreachable::GetUnreachableMemory(allocator::vector& leaks, - size_t limit, size_t* num_leaks, size_t* leak_bytes) { +bool MemUnreachable::GetUnreachableMemory(allocator::vector& leaks, size_t limit, + size_t* num_leaks, size_t* leak_bytes) { MEM_ALOGI("sweeping process %d for unreachable memory", pid_); leaks.clear(); @@ -127,7 +129,6 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector& leaks, return false; } - allocator::vector leaked1{allocator_}; heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes); @@ -152,12 +153,12 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector& leaks, // in backtrace_map. leaks.reserve(leaked.size()); - for (auto& it: leaked) { + for (auto& it : leaked) { leaks.emplace_back(); Leak* leak = &leaks.back(); - ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast(it.range.begin), - leak->backtrace.frames, leak->backtrace.max_frames); + ssize_t num_backtrace_frames = malloc_backtrace( + reinterpret_cast(it.range.begin), leak->backtrace.frames, leak->backtrace.max_frames); if (num_backtrace_frames > 0) { leak->backtrace.num_frames = num_backtrace_frames; @@ -183,14 +184,13 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector& leaks, leak->referenced_size = it.referenced_size; leak->total_size = leak->size + leak->referenced_size; memcpy(leak->contents, reinterpret_cast(it.range.begin), - std::min(leak->size, Leak::contents_length)); + std::min(leak->size, Leak::contents_length)); } MEM_ALOGI("folding done"); - std::sort(leaks.begin(), leaks.end(), [](const Leak& a, const Leak& b) { - return a.total_size > b.total_size; - }); + std::sort(leaks.begin(), leaks.end(), + [](const Leak& a, const Leak& b) { return a.total_size > b.total_size; }); if (leaks.size() > limit) { leaks.resize(limit); @@ -205,11 +205,10 @@ static bool has_prefix(const allocator::string& s, const char* prefix) { } bool MemUnreachable::ClassifyMappings(const allocator::vector& mappings, - allocator::vector& heap_mappings, - allocator::vector& anon_mappings, - allocator::vector& globals_mappings, - allocator::vector& stack_mappings) -{ + allocator::vector& heap_mappings, + allocator::vector& anon_mappings, + allocator::vector& globals_mappings, + allocator::vector& stack_mappings) { heap_mappings.clear(); anon_mappings.clear(); globals_mappings.clear(); @@ -245,7 +244,8 @@ bool MemUnreachable::ClassifyMappings(const allocator::vector& mappings stack_mappings.emplace_back(*it); } else if (mapping_name.size() == 0) { globals_mappings.emplace_back(*it); - } else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") { + } else if (has_prefix(mapping_name, "[anon:") && + mapping_name != "[anon:leak_detector_malloc]") { // TODO(ccross): it would be nice to treat named anonymous mappings as // possible leaks, but naming something in a .bss or .data section makes // it impossible to distinguish them from mmaped and then named mappings. @@ -256,7 +256,7 @@ bool MemUnreachable::ClassifyMappings(const allocator::vector& mappings return true; } -template +template static inline const char* plural(T val) { return (val == 1) ? "" : "s"; } @@ -403,7 +403,6 @@ bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) { } std::string Leak::ToString(bool log_contents) const { - std::ostringstream oss; oss << " " << std::dec << size; @@ -492,8 +491,8 @@ std::string UnreachableMemoryInfo::ToString(bool log_contents) const { oss << std::endl; for (auto it = leaks.begin(); it != leaks.end(); it++) { - oss << it->ToString(log_contents); - oss << std::endl; + oss << it->ToString(log_contents); + oss << std::endl; } return oss.str(); @@ -523,7 +522,6 @@ bool LogUnreachableMemory(bool log_contents, size_t limit) { return true; } - bool NoLeaks() { UnreachableMemoryInfo info; if (!GetUnreachableMemory(info, 0)) { diff --git a/libmemunreachable/ProcessMappings.cpp b/libmemunreachable/ProcessMappings.cpp index 57b232128..42e532627 100644 --- a/libmemunreachable/ProcessMappings.cpp +++ b/libmemunreachable/ProcessMappings.cpp @@ -14,8 +14,8 @@ * limitations under the License. */ -#include #include +#include #include #include @@ -42,8 +42,8 @@ bool ProcessMappings(pid_t pid, allocator::vector& mappings) { int name_pos; char perms[5]; Mapping mapping{}; - if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n", - &mapping.begin, &mapping.end, perms, &name_pos) == 3) { + if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n", &mapping.begin, + &mapping.end, perms, &name_pos) == 3) { if (perms[0] == 'r') { mapping.read = true; } diff --git a/libmemunreachable/ProcessMappings.h b/libmemunreachable/ProcessMappings.h index d3b7496df..81b33dc16 100644 --- a/libmemunreachable/ProcessMappings.h +++ b/libmemunreachable/ProcessMappings.h @@ -33,4 +33,4 @@ struct Mapping { // the line data. bool ProcessMappings(pid_t pid, allocator::vector& mappings); -#endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_ +#endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_ diff --git a/libmemunreachable/PtracerThread.cpp b/libmemunreachable/PtracerThread.cpp index 73b0493af..41efa9c6f 100644 --- a/libmemunreachable/PtracerThread.cpp +++ b/libmemunreachable/PtracerThread.cpp @@ -23,17 +23,17 @@ #include #include #include -#include #include #include #include #include +#include #include "android-base/macros.h" +#include "PtracerThread.h" #include "anon_vma_naming.h" #include "log.h" -#include "PtracerThread.h" class Stack { public: @@ -41,7 +41,7 @@ class Stack { int prot = PROT_READ | PROT_WRITE; int flags = MAP_PRIVATE | MAP_ANONYMOUS; page_size_ = sysconf(_SC_PAGE_SIZE); - size_ += page_size_*2; // guard pages + size_ += page_size_ * 2; // guard pages base_ = mmap(NULL, size_, prot, flags, -1, 0); if (base_ == MAP_FAILED) { base_ = NULL; @@ -52,22 +52,20 @@ class Stack { mprotect(base_, page_size_, PROT_NONE); mprotect(top(), page_size_, PROT_NONE); }; - ~Stack() { - munmap(base_, size_); - }; + ~Stack() { munmap(base_, size_); }; void* top() { return reinterpret_cast(reinterpret_cast(base_) + size_ - page_size_); }; + private: DISALLOW_COPY_AND_ASSIGN(Stack); - void *base_; + void* base_; size_t size_; size_t page_size_; }; -PtracerThread::PtracerThread(const std::function& func) : - child_pid_(0) { +PtracerThread::PtracerThread(const std::function& func) : child_pid_(0) { stack_ = std::make_unique(PTHREAD_STACK_MIN); if (stack_->top() == nullptr) { MEM_LOG_ALWAYS_FATAL("failed to mmap child stack: %s", strerror(errno)); @@ -93,14 +91,13 @@ bool PtracerThread::Start() { std::unique_lock lk(m_); // Convert from void(*)(void*) to lambda with captures - auto proxy = [](void *arg) -> int { + auto proxy = [](void* arg) -> int { prctl(PR_SET_NAME, "libmemunreachable ptrace thread"); return (*reinterpret_cast*>(arg))(); }; - child_pid_ = clone(proxy, stack_->top(), - CLONE_VM|CLONE_FS|CLONE_FILES/*|CLONE_UNTRACED*/, - reinterpret_cast(&func_)); + child_pid_ = clone(proxy, stack_->top(), CLONE_VM | CLONE_FS | CLONE_FILES /*|CLONE_UNTRACED*/, + reinterpret_cast(&func_)); if (child_pid_ < 0) { MEM_ALOGE("failed to clone child: %s", strerror(errno)); return false; diff --git a/libmemunreachable/PtracerThread.h b/libmemunreachable/PtracerThread.h index f88b5994f..ddf902677 100644 --- a/libmemunreachable/PtracerThread.h +++ b/libmemunreachable/PtracerThread.h @@ -36,6 +36,7 @@ class PtracerThread { ~PtracerThread(); bool Start(); int Join(); + private: void SetTracer(pid_t); void ClearTracer(); @@ -47,4 +48,4 @@ class PtracerThread { pid_t child_pid_; }; -#endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_ +#endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_ diff --git a/libmemunreachable/ScopedAlarm.h b/libmemunreachable/ScopedAlarm.h index 287f479a9..53ea11206 100644 --- a/libmemunreachable/ScopedAlarm.h +++ b/libmemunreachable/ScopedAlarm.h @@ -27,11 +27,9 @@ class ScopedAlarm { public: ScopedAlarm(std::chrono::microseconds us, std::function func) { func_ = func; - struct sigaction oldact{}; - struct sigaction act{}; - act.sa_handler = [](int) { - ScopedAlarm::func_(); - }; + struct sigaction oldact {}; + struct sigaction act {}; + act.sa_handler = [](int) { ScopedAlarm::func_(); }; sigaction(SIGALRM, &act, &oldact); std::chrono::seconds s = std::chrono::duration_cast(us); @@ -43,10 +41,11 @@ class ScopedAlarm { ~ScopedAlarm() { itimerval t = itimerval{}; setitimer(ITIMER_REAL, &t, NULL); - struct sigaction act{}; + struct sigaction act {}; act.sa_handler = SIG_DFL; sigaction(SIGALRM, &act, NULL); } + private: static std::function func_; }; diff --git a/libmemunreachable/ScopedDisableMalloc.h b/libmemunreachable/ScopedDisableMalloc.h index 758d317aa..7d2f630b4 100644 --- a/libmemunreachable/ScopedDisableMalloc.h +++ b/libmemunreachable/ScopedDisableMalloc.h @@ -21,16 +21,14 @@ #include "android-base/macros.h" +#include "ScopedAlarm.h" #include "bionic.h" #include "log.h" -#include "ScopedAlarm.h" -class DisableMallocGuard{ +class DisableMallocGuard { public: - DisableMallocGuard() : disabled_(false){} - ~DisableMallocGuard() { - Enable(); - } + DisableMallocGuard() : disabled_(false) {} + ~DisableMallocGuard() { Enable(); } void Disable() { if (!disabled_) { @@ -45,6 +43,7 @@ class DisableMallocGuard{ disabled_ = false; } } + private: DISALLOW_COPY_AND_ASSIGN(DisableMallocGuard); bool disabled_; @@ -59,13 +58,9 @@ class DisableMallocGuard{ // here. class ScopedDisableMalloc { public: - ScopedDisableMalloc() { - disable_malloc_.Disable(); - } + ScopedDisableMalloc() { disable_malloc_.Disable(); } - ~ScopedDisableMalloc() { - disable_malloc_.Enable(); - } + ~ScopedDisableMalloc() { disable_malloc_.Enable(); } private: DISALLOW_COPY_AND_ASSIGN(ScopedDisableMalloc); @@ -74,18 +69,15 @@ class ScopedDisableMalloc { class ScopedDisableMallocTimeout { public: - explicit ScopedDisableMallocTimeout(std::chrono::milliseconds timeout = std::chrono::milliseconds(2000)) : - timeout_(timeout), timed_out_(false), disable_malloc_() { + explicit ScopedDisableMallocTimeout( + std::chrono::milliseconds timeout = std::chrono::milliseconds(2000)) + : timeout_(timeout), timed_out_(false), disable_malloc_() { Disable(); } - ~ScopedDisableMallocTimeout() { - Enable(); - } + ~ScopedDisableMallocTimeout() { Enable(); } - bool timed_out() { - return timed_out_; - } + bool timed_out() { return timed_out_; } void Enable() { disable_malloc_.Enable(); @@ -110,4 +102,4 @@ class ScopedDisableMallocTimeout { DisableMallocGuard disable_malloc_; }; -#endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_ +#endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_ diff --git a/libmemunreachable/ScopedPipe.h b/libmemunreachable/ScopedPipe.h index 7f449533d..76bd974bb 100644 --- a/libmemunreachable/ScopedPipe.h +++ b/libmemunreachable/ScopedPipe.h @@ -29,28 +29,22 @@ class ScopedPipe { MEM_LOG_ALWAYS_FATAL("failed to open pipe"); } } - ~ScopedPipe() { - Close(); - } + ~ScopedPipe() { Close(); } ScopedPipe(ScopedPipe&& other) { SetReceiver(other.ReleaseReceiver()); SetSender(other.ReleaseSender()); } - ScopedPipe& operator = (ScopedPipe&& other) { + ScopedPipe& operator=(ScopedPipe&& other) { SetReceiver(other.ReleaseReceiver()); SetSender(other.ReleaseSender()); return *this; } - void CloseReceiver() { - close(ReleaseReceiver()); - } + void CloseReceiver() { close(ReleaseReceiver()); } - void CloseSender() { - close(ReleaseSender()); - } + void CloseSender() { close(ReleaseSender()); } void Close() { CloseReceiver(); diff --git a/libmemunreachable/ScopedSignalHandler.h b/libmemunreachable/ScopedSignalHandler.h index fab38ed3e..58ac2aaef 100644 --- a/libmemunreachable/ScopedSignalHandler.h +++ b/libmemunreachable/ScopedSignalHandler.h @@ -31,9 +31,7 @@ class ScopedSignalHandler { using Fn = std::function; explicit ScopedSignalHandler(Allocator allocator) : allocator_(allocator), signal_(-1) {} - ~ScopedSignalHandler() { - reset(); - } + ~ScopedSignalHandler() { reset(); } template void install(int signal, F&& f) { @@ -65,7 +63,6 @@ class ScopedSignalHandler { } } - private: using SignalFn = std::function; DISALLOW_COPY_AND_ASSIGN(ScopedSignalHandler); @@ -77,4 +74,4 @@ class ScopedSignalHandler { static SignalFn handler_; }; -#endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_ +#endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_ diff --git a/libmemunreachable/Semaphore.h b/libmemunreachable/Semaphore.h index 6bcf4ea97..6d39a9316 100644 --- a/libmemunreachable/Semaphore.h +++ b/libmemunreachable/Semaphore.h @@ -29,7 +29,7 @@ class Semaphore { void Wait(std::chrono::milliseconds ms) { std::unique_lock lk(m_); - cv_.wait_for(lk, ms, [&]{ + cv_.wait_for(lk, ms, [&] { if (count_ > 0) { count_--; return true; @@ -44,6 +44,7 @@ class Semaphore { } cv_.notify_one(); } + private: DISALLOW_COPY_AND_ASSIGN(Semaphore); @@ -52,5 +53,4 @@ class Semaphore { std::condition_variable cv_; }; - -#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_ +#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_ diff --git a/libmemunreachable/Tarjan.h b/libmemunreachable/Tarjan.h index 2546341d5..86c73605f 100644 --- a/libmemunreachable/Tarjan.h +++ b/libmemunreachable/Tarjan.h @@ -24,7 +24,7 @@ #include "Allocator.h" -template +template class Node { public: allocator::set*> references_in; @@ -34,39 +34,41 @@ class Node { T* ptr; - Node(T* ptr, Allocator allocator) : references_in(allocator), references_out(allocator), - ptr(ptr) {}; + Node(T* ptr, Allocator allocator) + : references_in(allocator), references_out(allocator), ptr(ptr){}; Node(Node&& rhs) = default; void Edge(Node* ref) { references_out.emplace(ref); ref->references_in.emplace(this); } - template + template void Foreach(F&& f) { - for (auto& node: references_out) { + for (auto& node : references_out) { f(node->ptr); } } + private: DISALLOW_COPY_AND_ASSIGN(Node); }; -template +template using Graph = allocator::vector*>; -template +template using SCC = allocator::vector*>; -template +template using SCCList = allocator::vector>; -template +template class TarjanAlgorithm { public: - explicit TarjanAlgorithm(Allocator allocator) : index_(0), - stack_(allocator), components_(allocator) {} + explicit TarjanAlgorithm(Allocator allocator) + : index_(0), stack_(allocator), components_(allocator) {} void Execute(Graph& graph, SCCList& out); + private: static constexpr size_t UNDEFINED_INDEX = static_cast(-1); void Tarjan(Node* vertex, Graph& graph); @@ -76,17 +78,17 @@ class TarjanAlgorithm { SCCList components_; }; -template +template void TarjanAlgorithm::Execute(Graph& graph, SCCList& out) { stack_.clear(); components_.clear(); index_ = 0; - for (auto& it: graph) { + for (auto& it : graph) { it->index = UNDEFINED_INDEX; it->lowlink = UNDEFINED_INDEX; } - for (auto& it: graph) { + for (auto& it : graph) { if (it->index == UNDEFINED_INDEX) { Tarjan(it, graph); } @@ -94,14 +96,14 @@ void TarjanAlgorithm::Execute(Graph& graph, SCCList& out) { out.swap(components_); } -template +template void TarjanAlgorithm::Tarjan(Node* vertex, Graph& graph) { assert(vertex->index == UNDEFINED_INDEX); vertex->index = index_; vertex->lowlink = index_; index_++; stack_.push_back(vertex); - for (auto& it: vertex->references_out) { + for (auto& it : vertex->references_out) { Node* vertex_next = it; if (vertex_next->index == UNDEFINED_INDEX) { Tarjan(vertex_next, graph); @@ -123,10 +125,10 @@ void TarjanAlgorithm::Tarjan(Node* vertex, Graph& graph) { } } -template +template void Tarjan(Graph& graph, SCCList& out) { TarjanAlgorithm tarjan{graph.get_allocator()}; tarjan.Execute(graph, out); } -#endif // LIBMEMUNREACHABLE_TARJAN_H_ +#endif // LIBMEMUNREACHABLE_TARJAN_H_ diff --git a/libmemunreachable/ThreadCapture.cpp b/libmemunreachable/ThreadCapture.cpp index 3891f2d3d..a7bd91cf0 100644 --- a/libmemunreachable/ThreadCapture.cpp +++ b/libmemunreachable/ThreadCapture.cpp @@ -21,13 +21,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include #include @@ -50,12 +50,12 @@ // Convert a pid > 0 to a string. sprintf might allocate, so we can't use it. // Returns a pointer somewhere in buf to a null terminated string, or NULL // on error. -static char *pid_to_str(char *buf, size_t len, pid_t pid) { +static char* pid_to_str(char* buf, size_t len, pid_t pid) { if (pid <= 0) { return nullptr; } - char *ptr = buf + len - 1; + char* ptr = buf + len - 1; *ptr = 0; while (pid > 0) { ptr--; @@ -79,6 +79,7 @@ class ThreadCaptureImpl { bool ReleaseThread(pid_t tid); bool CapturedThreadInfo(ThreadInfoList& threads); void InjectTestFunc(std::function&& f) { inject_test_func_ = f; } + private: int CaptureThread(pid_t tid); bool ReleaseThread(pid_t tid, unsigned int signal); @@ -92,9 +93,8 @@ class ThreadCaptureImpl { std::function inject_test_func_; }; -ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator& allocator) : - captured_threads_(allocator), allocator_(allocator), pid_(pid) { -} +ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator& allocator) + : captured_threads_(allocator), allocator_(allocator), pid_(pid) {} bool ThreadCaptureImpl::ListThreads(TidList& tids) { tids.clear(); @@ -115,11 +115,11 @@ bool ThreadCaptureImpl::ListThreads(TidList& tids) { } struct linux_dirent64 { - uint64_t d_ino; - int64_t d_off; - uint16_t d_reclen; - char d_type; - char d_name[]; + uint64_t d_ino; + int64_t d_off; + uint16_t d_reclen; + char d_type; + char d_name[]; } __attribute((packed)); char dirent_buf[4096]; ssize_t nread; @@ -209,7 +209,7 @@ int ThreadCaptureImpl::PtraceAttach(pid_t tid) { bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) { thread_info.tid = tid; - const unsigned int max_num_regs = 128; // larger than number of registers on any device + const unsigned int max_num_regs = 128; // larger than number of registers on any device uintptr_t regs[max_num_regs]; struct iovec iovec; iovec.iov_base = ®s; @@ -243,7 +243,7 @@ bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) { thread_info.stack = std::pair(regs[sp], 0); - return true; + return true; } int ThreadCaptureImpl::CaptureThread(pid_t tid) { @@ -266,7 +266,7 @@ int ThreadCaptureImpl::CaptureThread(pid_t tid) { unsigned int resume_signal = 0; - unsigned int signal = WSTOPSIG(status); + unsigned int signal = WSTOPSIG(status); if ((status >> 16) == PTRACE_EVENT_STOP) { switch (signal) { case SIGSTOP: @@ -307,7 +307,7 @@ bool ThreadCaptureImpl::ReleaseThread(pid_t tid, unsigned int signal) { bool ThreadCaptureImpl::ReleaseThreads() { bool ret = true; - for (auto it = captured_threads_.begin(); it != captured_threads_.end(); ) { + for (auto it = captured_threads_.begin(); it != captured_threads_.end();) { if (ReleaseThread(it->first, it->second)) { it = captured_threads_.erase(it); } else { diff --git a/libmemunreachable/ThreadCapture.h b/libmemunreachable/ThreadCapture.h index 1022cad1f..d20966028 100644 --- a/libmemunreachable/ThreadCapture.h +++ b/libmemunreachable/ThreadCapture.h @@ -33,7 +33,7 @@ using ThreadInfoList = allocator::vector; class ThreadCaptureImpl; class ThreadCapture { -public: + public: ThreadCapture(pid_t pid, Allocator allocator); ~ThreadCapture(); @@ -44,7 +44,7 @@ public: bool CapturedThreadInfo(ThreadInfoList& threads); void InjectTestFunc(std::function&& f); -private: + private: ThreadCapture(const ThreadCapture&) = delete; void operator=(const ThreadCapture&) = delete; diff --git a/libmemunreachable/anon_vma_naming.h b/libmemunreachable/anon_vma_naming.h index 1e4ade1d3..fb31e4175 100644 --- a/libmemunreachable/anon_vma_naming.h +++ b/libmemunreachable/anon_vma_naming.h @@ -19,7 +19,7 @@ #include -#define PR_SET_VMA 0x53564d41 -#define PR_SET_VMA_ANON_NAME 0 +#define PR_SET_VMA 0x53564d41 +#define PR_SET_VMA_ANON_NAME 0 -#endif // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_ +#endif // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_ diff --git a/libmemunreachable/bionic.h b/libmemunreachable/bionic.h index 83d07a8d0..dd1ec79d6 100644 --- a/libmemunreachable/bionic.h +++ b/libmemunreachable/bionic.h @@ -17,9 +17,9 @@ #ifndef LIBMEMUNREACHABLE_BIONIC_H_ #define LIBMEMUNREACHABLE_BIONIC_H_ -#include #include #include +#include __BEGIN_DECLS @@ -27,9 +27,9 @@ __BEGIN_DECLS extern void malloc_disable(); extern void malloc_enable(); extern int malloc_iterate(uintptr_t base, size_t size, - void (*callback)(uintptr_t base, size_t size, void* arg), void* arg); + void (*callback)(uintptr_t base, size_t size, void* arg), void* arg); extern ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count); __END_DECLS -#endif // LIBMEMUNREACHABLE_BIONIC_H_ +#endif // LIBMEMUNREACHABLE_BIONIC_H_ diff --git a/libmemunreachable/include/memunreachable/memunreachable.h b/libmemunreachable/include/memunreachable/memunreachable.h index 9b227fd3b..f6249e3a6 100644 --- a/libmemunreachable/include/memunreachable/memunreachable.h +++ b/libmemunreachable/include/memunreachable/memunreachable.h @@ -21,8 +21,8 @@ #ifdef __cplusplus -#include #include +#include struct Leak { uintptr_t begin; @@ -83,4 +83,4 @@ bool NoLeaks(); __END_DECLS -#endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_ +#endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_ diff --git a/libmemunreachable/log.h b/libmemunreachable/log.h index 10b83db21..0f1bb8a08 100644 --- a/libmemunreachable/log.h +++ b/libmemunreachable/log.h @@ -43,4 +43,4 @@ #endif -#endif // LIBMEMUNREACHABLE_LOG_H_ +#endif // LIBMEMUNREACHABLE_LOG_H_ diff --git a/libmemunreachable/tests/Allocator_test.cpp b/libmemunreachable/tests/Allocator_test.cpp index 21c821890..0bb4f3134 100644 --- a/libmemunreachable/tests/Allocator_test.cpp +++ b/libmemunreachable/tests/Allocator_test.cpp @@ -16,44 +16,42 @@ #include -#include #include - +#include std::function ScopedAlarm::func_; class AllocatorTest : public testing::Test { protected: AllocatorTest() : heap(), disable_malloc_() {} - virtual void SetUp() { - heap_count = 0; - } + virtual void SetUp() { heap_count = 0; } virtual void TearDown() { ASSERT_EQ(heap_count, 0); ASSERT_TRUE(heap.empty()); ASSERT_FALSE(disable_malloc_.timed_out()); } Heap heap; + private: ScopedDisableMallocTimeout disable_malloc_; }; TEST_F(AllocatorTest, simple) { Allocator allocator(heap); - void *ptr = allocator.allocate(); + void* ptr = allocator.allocate(); ASSERT_TRUE(ptr != NULL); allocator.deallocate(ptr); } TEST_F(AllocatorTest, multiple) { Allocator allocator(heap); - void *ptr1 = allocator.allocate(); + void* ptr1 = allocator.allocate(); ASSERT_TRUE(ptr1 != NULL); - void *ptr2 = allocator.allocate(); + void* ptr2 = allocator.allocate(); ASSERT_TRUE(ptr2 != NULL); ASSERT_NE(ptr1, ptr2); allocator.deallocate(ptr1); - void *ptr3 = allocator.allocate(); + void* ptr3 = allocator.allocate(); ASSERT_EQ(ptr1, ptr3); allocator.deallocate(ptr3); allocator.deallocate(ptr2); @@ -63,7 +61,7 @@ TEST_F(AllocatorTest, many) { const int num = 4096; const int size = 128; Allocator allocator(heap); - void *ptr[num]; + void* ptr[num]; for (int i = 0; i < num; i++) { ptr[i] = allocator.allocate(); memset(ptr[i], 0xaa, size); @@ -87,7 +85,7 @@ TEST_F(AllocatorTest, many) { TEST_F(AllocatorTest, large) { const size_t size = 1024 * 1024; Allocator allocator(heap); - void *ptr = allocator.allocate(); + void* ptr = allocator.allocate(); memset(ptr, 0xaa, size); allocator.deallocate(ptr); } @@ -96,7 +94,7 @@ TEST_F(AllocatorTest, many_large) { const int num = 128; const int size = 1024 * 1024; Allocator allocator(heap); - void *ptr[num]; + void* ptr[num]; for (int i = 0; i < num; i++) { ptr[i] = allocator.allocate(); memset(ptr[i], 0xaa, size); diff --git a/libmemunreachable/tests/DisableMalloc_test.cpp b/libmemunreachable/tests/DisableMalloc_test.cpp index 4e6155b81..2db584839 100644 --- a/libmemunreachable/tests/DisableMalloc_test.cpp +++ b/libmemunreachable/tests/DisableMalloc_test.cpp @@ -19,8 +19,8 @@ #include #include -#include #include +#include using namespace std::chrono_literals; @@ -36,75 +36,83 @@ class DisableMallocTest : public ::testing::Test { }; TEST_F(DisableMallocTest, reenable) { - ASSERT_EXIT({ - alarm(100ms); - void *ptr1 = malloc(128); - ASSERT_NE(ptr1, nullptr); - free(ptr1); - { - ScopedDisableMalloc disable_malloc; - } - void *ptr2 = malloc(128); - ASSERT_NE(ptr2, nullptr); - free(ptr2); - _exit(1); - }, ::testing::ExitedWithCode(1), ""); + ASSERT_EXIT( + { + alarm(100ms); + void* ptr1 = malloc(128); + ASSERT_NE(ptr1, nullptr); + free(ptr1); + { ScopedDisableMalloc disable_malloc; } + void* ptr2 = malloc(128); + ASSERT_NE(ptr2, nullptr); + free(ptr2); + _exit(1); + }, + ::testing::ExitedWithCode(1), ""); } TEST_F(DisableMallocTest, deadlock_allocate) { - ASSERT_DEATH({ - void *ptr = malloc(128); - ASSERT_NE(ptr, nullptr); - free(ptr); - { - alarm(100ms); - ScopedDisableMalloc disable_malloc; - void* ptr = malloc(128); - ASSERT_NE(ptr, nullptr); - free(ptr); - } - }, ""); + ASSERT_DEATH( + { + void* ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + free(ptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + void* ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + free(ptr); + } + }, + ""); } TEST_F(DisableMallocTest, deadlock_new) { - ASSERT_DEATH({ - char* ptr = new(char); - ASSERT_NE(ptr, nullptr); - delete(ptr); - { - alarm(100ms); - ScopedDisableMalloc disable_malloc; - char* ptr = new (std::nothrow)(char); - ASSERT_NE(ptr, nullptr); - delete(ptr); - } - }, ""); + ASSERT_DEATH( + { + char* ptr = new (char); + ASSERT_NE(ptr, nullptr); + delete (ptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + char* ptr = new (std::nothrow)(char); + ASSERT_NE(ptr, nullptr); + delete (ptr); + } + }, + ""); } TEST_F(DisableMallocTest, deadlock_delete) { - ASSERT_DEATH({ - char* ptr = new(char); - ASSERT_NE(ptr, nullptr); - { - alarm(250ms); - ScopedDisableMalloc disable_malloc; - delete(ptr); - // Force ptr usage or this code gets optimized away by the arm64 compiler. - ASSERT_NE(ptr, nullptr); - } - }, ""); + ASSERT_DEATH( + { + char* ptr = new (char); + ASSERT_NE(ptr, nullptr); + { + alarm(250ms); + ScopedDisableMalloc disable_malloc; + delete (ptr); + // Force ptr usage or this code gets optimized away by the arm64 compiler. + ASSERT_NE(ptr, nullptr); + } + }, + ""); } TEST_F(DisableMallocTest, deadlock_free) { - ASSERT_DEATH({ - void *ptr = malloc(128); - ASSERT_NE(ptr, nullptr); - { - alarm(100ms); - ScopedDisableMalloc disable_malloc; - free(ptr); - } - }, ""); + ASSERT_DEATH( + { + void* ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + free(ptr); + } + }, + ""); } TEST_F(DisableMallocTest, deadlock_fork) { @@ -113,6 +121,6 @@ TEST_F(DisableMallocTest, deadlock_fork) { alarm(100ms); ScopedDisableMalloc disable_malloc; fork(); - } - }, ""); +} +}, ""); } diff --git a/libmemunreachable/tests/HeapWalker_test.cpp b/libmemunreachable/tests/HeapWalker_test.cpp index 98e4aa1fd..1b258ee75 100644 --- a/libmemunreachable/tests/HeapWalker_test.cpp +++ b/libmemunreachable/tests/HeapWalker_test.cpp @@ -19,8 +19,8 @@ #include "HeapWalker.h" -#include #include +#include #include "Allocator.h" class HeapWalkerTest : public ::testing::Test { @@ -172,20 +172,20 @@ TEST_F(HeapWalkerTest, cycle) { ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes)); EXPECT_EQ(2U, num_leaks); - EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); + EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes); ASSERT_EQ(2U, leaked.size()); } TEST_F(HeapWalkerTest, segv) { const size_t page_size = sysconf(_SC_PAGE_SIZE); - void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); ASSERT_NE(buffer1, nullptr); void* buffer2; buffer2 = &buffer1; HeapWalker heap_walker(heap_); - heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1)+page_size); + heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1) + page_size); heap_walker.Root(buffer_begin(buffer2), buffer_end(buffer2)); ASSERT_EQ(true, heap_walker.DetectLeaks()); diff --git a/libmemunreachable/tests/HostMallocStub.cpp b/libmemunreachable/tests/HostMallocStub.cpp index a7e3f07d3..0ef04870f 100644 --- a/libmemunreachable/tests/HostMallocStub.cpp +++ b/libmemunreachable/tests/HostMallocStub.cpp @@ -16,8 +16,6 @@ #include "bionic.h" -void malloc_disable() { -} +void malloc_disable() {} -void malloc_enable() { -} +void malloc_enable() {} diff --git a/libmemunreachable/tests/LeakFolding_test.cpp b/libmemunreachable/tests/LeakFolding_test.cpp index e85df5f58..7ae7f7607 100644 --- a/libmemunreachable/tests/LeakFolding_test.cpp +++ b/libmemunreachable/tests/LeakFolding_test.cpp @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "HeapWalker.h" #include "LeakFolding.h" +#include "HeapWalker.h" -#include #include +#include #include "Allocator.h" class LeakFoldingTest : public ::testing::Test { @@ -84,7 +84,7 @@ TEST_F(LeakFoldingTest, two) { ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); EXPECT_EQ(2U, num_leaks); - EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); + EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes); ASSERT_EQ(2U, leaked.size()); EXPECT_EQ(0U, leaked[0].referenced_count); EXPECT_EQ(0U, leaked[0].referenced_size); @@ -113,7 +113,7 @@ TEST_F(LeakFoldingTest, dominator) { ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); EXPECT_EQ(2U, num_leaks); - EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); + EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes); ASSERT_EQ(1U, leaked.size()); EXPECT_EQ(1U, leaked[0].referenced_count); EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size); @@ -144,10 +144,10 @@ TEST_F(LeakFoldingTest, cycle) { ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); EXPECT_EQ(3U, num_leaks); - EXPECT_EQ(3*sizeof(uintptr_t), leaked_bytes); + EXPECT_EQ(3 * sizeof(uintptr_t), leaked_bytes); ASSERT_EQ(1U, leaked.size()); EXPECT_EQ(2U, leaked[0].referenced_count); - EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size); + EXPECT_EQ(2 * sizeof(uintptr_t), leaked[0].referenced_size); } TEST_F(LeakFoldingTest, dominator_cycle) { @@ -175,13 +175,13 @@ TEST_F(LeakFoldingTest, dominator_cycle) { ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); EXPECT_EQ(3U, num_leaks); - EXPECT_EQ(5*sizeof(uintptr_t), leaked_bytes); + EXPECT_EQ(5 * sizeof(uintptr_t), leaked_bytes); ASSERT_EQ(2U, leaked.size()); EXPECT_EQ(2U, leaked[0].referenced_count); - EXPECT_EQ(3*sizeof(uintptr_t), leaked[0].referenced_size); + EXPECT_EQ(3 * sizeof(uintptr_t), leaked[0].referenced_size); EXPECT_EQ(2U, leaked[1].referenced_count); - EXPECT_EQ(3*sizeof(uintptr_t), leaked[1].referenced_size); + EXPECT_EQ(3 * sizeof(uintptr_t), leaked[1].referenced_size); } TEST_F(LeakFoldingTest, two_cycles) { @@ -218,12 +218,12 @@ TEST_F(LeakFoldingTest, two_cycles) { ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); EXPECT_EQ(6U, num_leaks); - EXPECT_EQ(6*sizeof(uintptr_t), leaked_bytes); + EXPECT_EQ(6 * sizeof(uintptr_t), leaked_bytes); ASSERT_EQ(2U, leaked.size()); EXPECT_EQ(2U, leaked[0].referenced_count); - EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size); + EXPECT_EQ(2 * sizeof(uintptr_t), leaked[0].referenced_size); EXPECT_EQ(2U, leaked[1].referenced_count); - EXPECT_EQ(2*sizeof(uintptr_t), leaked[1].referenced_size); + EXPECT_EQ(2 * sizeof(uintptr_t), leaked[1].referenced_size); } TEST_F(LeakFoldingTest, two_dominator_cycles) { @@ -254,7 +254,7 @@ TEST_F(LeakFoldingTest, two_dominator_cycles) { ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); EXPECT_EQ(4U, num_leaks); - EXPECT_EQ(4*sizeof(uintptr_t), leaked_bytes); + EXPECT_EQ(4 * sizeof(uintptr_t), leaked_bytes); ASSERT_EQ(4U, leaked.size()); EXPECT_EQ(1U, leaked[0].referenced_count); EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size); @@ -272,13 +272,13 @@ TEST_F(LeakFoldingTest, giant_dominator_cycle) { HeapWalker heap_walker(heap_); - for (size_t i = 0; i < n; i ++) { + for (size_t i = 0; i < n; i++) { ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast(&buffer[i]), - reinterpret_cast(&buffer[i+1]))); + reinterpret_cast(&buffer[i + 1]))); } for (size_t i = 0; i < n - 1; i++) { - buffer[i] = &buffer[i+1]; + buffer[i] = &buffer[i + 1]; } buffer[n - 1] = &buffer[0]; @@ -306,15 +306,15 @@ TEST_F(LeakFoldingTest, giant_cycle) { HeapWalker heap_walker(heap_); for (size_t i = 0; i < n - 1; i++) { - buffer[i] = &buffer[i+1]; + buffer[i] = &buffer[i + 1]; } buffer[n - 1] = &buffer[0]; buffer1[0] = &buffer[0]; - for (size_t i = 0; i < n; i ++) { + for (size_t i = 0; i < n; i++) { ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast(&buffer[i]), - reinterpret_cast(&buffer[i+1]))); + reinterpret_cast(&buffer[i + 1]))); } ALLOCATION(heap_walker, buffer1); diff --git a/libmemunreachable/tests/MemUnreachable_test.cpp b/libmemunreachable/tests/MemUnreachable_test.cpp index 71da3655a..a231f4fb6 100644 --- a/libmemunreachable/tests/MemUnreachable_test.cpp +++ b/libmemunreachable/tests/MemUnreachable_test.cpp @@ -16,8 +16,8 @@ #include #include -#include #include +#include #include @@ -25,23 +25,16 @@ class HiddenPointer { public: - explicit HiddenPointer(size_t size = 256) { - Set(malloc(size)); - } - ~HiddenPointer() { - Free(); - } - void* Get() { - return reinterpret_cast(~ptr_); - } + explicit HiddenPointer(size_t size = 256) { Set(malloc(size)); } + ~HiddenPointer() { Free(); } + void* Get() { return reinterpret_cast(~ptr_); } void Free() { free(Get()); Set(nullptr); } + private: - void Set(void* ptr) { - ptr_ = ~reinterpret_cast(ptr); - } + void Set(void* ptr) { ptr_ = ~reinterpret_cast(ptr); } volatile uintptr_t ptr_; }; diff --git a/libmemunreachable/tests/ThreadCapture_test.cpp b/libmemunreachable/tests/ThreadCapture_test.cpp index 44aabd755..5bcb79e51 100644 --- a/libmemunreachable/tests/ThreadCapture_test.cpp +++ b/libmemunreachable/tests/ThreadCapture_test.cpp @@ -45,12 +45,10 @@ class ThreadListTest : public ::testing::TestWithParam { WaitForThreads(); } - virtual void TearDown() { - ASSERT_TRUE(heap.empty()); - } + virtual void TearDown() { ASSERT_TRUE(heap.empty()); } protected: - template + template void StartThreads(unsigned int threads, Function&& func) { threads_.reserve(threads); tids_.reserve(threads); @@ -68,14 +66,14 @@ class ThreadListTest : public ::testing::TestWithParam { { std::unique_lock lk(m_); - cv_stop_.wait(lk, [&] {return stop_;}); + cv_stop_.wait(lk, [&] { return stop_; }); } }); } { std::unique_lock lk(m_); - cv_start_.wait(lk, [&]{ return tids_.size() == threads; }); + cv_start_.wait(lk, [&] { return tids_.size() == threads; }); } } @@ -93,9 +91,7 @@ class ThreadListTest : public ::testing::TestWithParam { tids_.clear(); } - std::vector& tids() { - return tids_; - } + std::vector& tids() { return tids_; } Heap heap; @@ -143,7 +139,7 @@ TEST_F(ThreadListTest, list_one) { TEST_P(ThreadListTest, list_some) { const unsigned int threads = GetParam() - 1; - StartThreads(threads, [](){}); + StartThreads(threads, []() {}); std::vector expected_tids = tids(); expected_tids.push_back(getpid()); @@ -176,10 +172,8 @@ class ThreadCaptureTest : public ThreadListTest { public: ThreadCaptureTest() {} ~ThreadCaptureTest() {} - void Fork(std::function&& child_init, - std::function&& child_cleanup, - std::function&& parent) { - + void Fork(std::function&& child_init, std::function&& child_cleanup, + std::function&& parent) { ScopedPipe start_pipe; ScopedPipe stop_pipe; @@ -211,39 +205,40 @@ class ThreadCaptureTest : public ThreadListTest { TEST_P(ThreadCaptureTest, capture_some) { const unsigned int threads = GetParam(); - Fork([&](){ - // child init - StartThreads(threads - 1, [](){}); - }, - [&](){ - // child cleanup - StopThreads(); - }, - [&](pid_t child){ - // parent - ASSERT_GT(child, 0); + Fork( + [&]() { + // child init + StartThreads(threads - 1, []() {}); + }, + [&]() { + // child cleanup + StopThreads(); + }, + [&](pid_t child) { + // parent + ASSERT_GT(child, 0); - { - ScopedDisableMallocTimeout disable_malloc; + { + ScopedDisableMallocTimeout disable_malloc; - ThreadCapture thread_capture(child, heap); - auto list_tids = allocator::vector(heap); + ThreadCapture thread_capture(child, heap); + auto list_tids = allocator::vector(heap); - ASSERT_TRUE(thread_capture.ListThreads(list_tids)); - ASSERT_EQ(threads, list_tids.size()); + ASSERT_TRUE(thread_capture.ListThreads(list_tids)); + ASSERT_EQ(threads, list_tids.size()); - ASSERT_TRUE(thread_capture.CaptureThreads()); + ASSERT_TRUE(thread_capture.CaptureThreads()); - auto thread_info = allocator::vector(heap); - ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info)); - ASSERT_EQ(threads, thread_info.size()); - ASSERT_TRUE(thread_capture.ReleaseThreads()); + auto thread_info = allocator::vector(heap); + ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info)); + ASSERT_EQ(threads, thread_info.size()); + ASSERT_TRUE(thread_capture.ReleaseThreads()); - if (!HasFailure()) { - ASSERT_FALSE(disable_malloc.timed_out()); - } -} - }); + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc.timed_out()); + } + } + }); } INSTANTIATE_TEST_CASE_P(ThreadCaptureTest, ThreadCaptureTest, ::testing::Values(1, 2, 10, 1024)); @@ -262,7 +257,7 @@ TEST_F(ThreadCaptureTest, capture_kill) { ScopedDisableMallocTimeout disable_malloc; ThreadCapture thread_capture(ret, heap); - thread_capture.InjectTestFunc([&](pid_t tid){ + thread_capture.InjectTestFunc([&](pid_t tid) { syscall(SYS_tgkill, ret, tid, SIGKILL); usleep(10000); }); @@ -288,62 +283,63 @@ TEST_F(ThreadCaptureTest, capture_signal) { // For signal handler static ScopedPipe* g_pipe; - Fork([&](){ - // child init - pipe.CloseReceiver(); + Fork( + [&]() { + // child init + pipe.CloseReceiver(); - g_pipe = &pipe; + g_pipe = &pipe; - struct sigaction act{}; - act.sa_handler = [](int){ - char buf = '+'; - write(g_pipe->Sender(), &buf, 1); - g_pipe->CloseSender(); - }; - sigaction(sig, &act, NULL); - sigset_t set; - sigemptyset(&set); - sigaddset(&set, sig); - pthread_sigmask(SIG_UNBLOCK, &set, NULL); - }, - [&](){ - // child cleanup - g_pipe = nullptr; - pipe.Close(); - }, - [&](pid_t child){ - // parent - ASSERT_GT(child, 0); - pipe.CloseSender(); + struct sigaction act {}; + act.sa_handler = [](int) { + char buf = '+'; + write(g_pipe->Sender(), &buf, 1); + g_pipe->CloseSender(); + }; + sigaction(sig, &act, NULL); + sigset_t set; + sigemptyset(&set); + sigaddset(&set, sig); + pthread_sigmask(SIG_UNBLOCK, &set, NULL); + }, + [&]() { + // child cleanup + g_pipe = nullptr; + pipe.Close(); + }, + [&](pid_t child) { + // parent + ASSERT_GT(child, 0); + pipe.CloseSender(); - { - ScopedDisableMallocTimeout disable_malloc; + { + ScopedDisableMallocTimeout disable_malloc; - ThreadCapture thread_capture(child, heap); - thread_capture.InjectTestFunc([&](pid_t tid){ - syscall(SYS_tgkill, child, tid, sig); - usleep(10000); + ThreadCapture thread_capture(child, heap); + thread_capture.InjectTestFunc([&](pid_t tid) { + syscall(SYS_tgkill, child, tid, sig); + usleep(10000); + }); + auto list_tids = allocator::vector(heap); + + ASSERT_TRUE(thread_capture.ListThreads(list_tids)); + ASSERT_EQ(1U, list_tids.size()); + + ASSERT_TRUE(thread_capture.CaptureThreads()); + + auto thread_info = allocator::vector(heap); + ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info)); + ASSERT_EQ(1U, thread_info.size()); + ASSERT_TRUE(thread_capture.ReleaseThreads()); + + usleep(100000); + char buf; + ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1))); + ASSERT_EQ(buf, '+'); + + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc.timed_out()); + } + } }); - auto list_tids = allocator::vector(heap); - - ASSERT_TRUE(thread_capture.ListThreads(list_tids)); - ASSERT_EQ(1U, list_tids.size()); - - ASSERT_TRUE(thread_capture.CaptureThreads()); - - auto thread_info = allocator::vector(heap); - ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info)); - ASSERT_EQ(1U, thread_info.size()); - ASSERT_TRUE(thread_capture.ReleaseThreads()); - - usleep(100000); - char buf; - ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1))); - ASSERT_EQ(buf, '+'); - - if (!HasFailure()) { - ASSERT_FALSE(disable_malloc.timed_out()); - } - } - }); }