Merge "bionic: tests: Remove PAGE_SIZE usage" into main
This commit is contained in:
commit
a05918bd83
5 changed files with 44 additions and 36 deletions
|
@ -238,23 +238,27 @@ TEST(bionic_allocator, test_memalign_small) {
|
|||
TEST(bionic_allocator, test_memalign_large) {
|
||||
BionicAllocator allocator;
|
||||
void* ptr;
|
||||
size_t alignment;
|
||||
|
||||
// a large object with alignment < PAGE_SIZE
|
||||
ptr = allocator.memalign(0x100, 0x2000);
|
||||
// a large object with alignment < kPageSize
|
||||
alignment = kPageSize >> 1;
|
||||
ptr = allocator.memalign(alignment, 0x2000);
|
||||
ASSERT_TRUE(ptr != nullptr);
|
||||
ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % 0x100);
|
||||
ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % alignment);
|
||||
allocator.free(ptr);
|
||||
|
||||
// a large object with alignment == PAGE_SIZE
|
||||
ptr = allocator.memalign(0x1000, 0x2000);
|
||||
// a large object with alignment == kPageSize
|
||||
alignment = kPageSize;
|
||||
ptr = allocator.memalign(alignment, 0x2000);
|
||||
ASSERT_TRUE(ptr != nullptr);
|
||||
ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % 0x1000);
|
||||
ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % alignment);
|
||||
allocator.free(ptr);
|
||||
|
||||
// A large object with alignment > PAGE_SIZE is only guaranteed to have page
|
||||
// A large object with alignment > kPageSize is only guaranteed to have page
|
||||
// alignment.
|
||||
ptr = allocator.memalign(0x2000, 0x4000);
|
||||
alignment = kPageSize << 1;
|
||||
ptr = allocator.memalign(alignment, 0x4000);
|
||||
ASSERT_TRUE(ptr != nullptr);
|
||||
ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % 0x1000);
|
||||
ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % kPageSize);
|
||||
allocator.free(ptr);
|
||||
}
|
||||
|
|
|
@ -85,6 +85,7 @@ protected:
|
|||
}
|
||||
|
||||
void* handle_;
|
||||
const size_t kPageSize = getpagesize();
|
||||
};
|
||||
|
||||
TEST_F(DlExtTest, ExtInfoNull) {
|
||||
|
@ -159,12 +160,12 @@ TEST_F(DlExtTest, ExtInfoUseFdWithInvalidOffset) {
|
|||
ASSERT_STREQ("dlopen failed: file offset for the library \"libname_placeholder\" is not page-aligned: 17", dlerror());
|
||||
|
||||
// Test an address above 2^44, for http://b/18178121 .
|
||||
extinfo.library_fd_offset = (5LL<<48) + PAGE_SIZE;
|
||||
extinfo.library_fd_offset = (5LL << 48) + kPageSize;
|
||||
handle_ = android_dlopen_ext("libname_placeholder", RTLD_NOW, &extinfo);
|
||||
ASSERT_TRUE(handle_ == nullptr);
|
||||
ASSERT_SUBSTR("dlopen failed: file offset for the library \"libname_placeholder\" >= file size", dlerror());
|
||||
|
||||
extinfo.library_fd_offset = 0LL - PAGE_SIZE;
|
||||
extinfo.library_fd_offset = 0LL - kPageSize;
|
||||
handle_ = android_dlopen_ext("libname_placeholder", RTLD_NOW, &extinfo);
|
||||
ASSERT_TRUE(handle_ == nullptr);
|
||||
ASSERT_SUBSTR("dlopen failed: file offset for the library \"libname_placeholder\" is negative", dlerror());
|
||||
|
@ -340,17 +341,17 @@ TEST_F(DlExtTest, Reserved) {
|
|||
dlclose(handle_);
|
||||
handle_ = nullptr;
|
||||
|
||||
void* new_start = mmap(start, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* new_start = mmap(start, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ASSERT_NE(start, new_start) << "dlclose unmapped reserved space";
|
||||
}
|
||||
|
||||
TEST_F(DlExtTest, ReservedTooSmall) {
|
||||
void* start = mmap(nullptr, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* start = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ASSERT_TRUE(start != MAP_FAILED);
|
||||
android_dlextinfo extinfo;
|
||||
extinfo.flags = ANDROID_DLEXT_RESERVED_ADDRESS;
|
||||
extinfo.reserved_addr = start;
|
||||
extinfo.reserved_size = PAGE_SIZE;
|
||||
extinfo.reserved_size = kPageSize;
|
||||
handle_ = android_dlopen_ext(kLibName, RTLD_NOW, &extinfo);
|
||||
EXPECT_EQ(nullptr, handle_);
|
||||
}
|
||||
|
@ -389,12 +390,12 @@ TEST_F(DlExtTest, ReservedRecursive) {
|
|||
}
|
||||
|
||||
TEST_F(DlExtTest, ReservedRecursiveTooSmall) {
|
||||
void* start = mmap(nullptr, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* start = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ASSERT_TRUE(start != MAP_FAILED);
|
||||
android_dlextinfo extinfo;
|
||||
extinfo.flags = ANDROID_DLEXT_RESERVED_ADDRESS | ANDROID_DLEXT_RESERVED_ADDRESS_RECURSIVE;
|
||||
extinfo.reserved_addr = start;
|
||||
extinfo.reserved_size = PAGE_SIZE;
|
||||
extinfo.reserved_size = kPageSize;
|
||||
handle_ = android_dlopen_ext(kLibNameRecursive, RTLD_NOW, &extinfo);
|
||||
EXPECT_EQ(nullptr, handle_);
|
||||
}
|
||||
|
@ -417,19 +418,18 @@ TEST_F(DlExtTest, ReservedHint) {
|
|||
}
|
||||
|
||||
TEST_F(DlExtTest, ReservedHintTooSmall) {
|
||||
void* start = mmap(nullptr, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* start = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ASSERT_TRUE(start != MAP_FAILED);
|
||||
android_dlextinfo extinfo;
|
||||
extinfo.flags = ANDROID_DLEXT_RESERVED_ADDRESS_HINT;
|
||||
extinfo.reserved_addr = start;
|
||||
extinfo.reserved_size = PAGE_SIZE;
|
||||
extinfo.reserved_size = kPageSize;
|
||||
handle_ = android_dlopen_ext(kLibName, RTLD_NOW, &extinfo);
|
||||
ASSERT_DL_NOTNULL(handle_);
|
||||
fn f = reinterpret_cast<fn>(dlsym(handle_, "getRandomNumber"));
|
||||
ASSERT_DL_NOTNULL(f);
|
||||
EXPECT_TRUE(reinterpret_cast<void*>(f) < start ||
|
||||
(reinterpret_cast<void*>(f) >=
|
||||
reinterpret_cast<char*>(start) + PAGE_SIZE));
|
||||
(reinterpret_cast<void*>(f) >= reinterpret_cast<char*>(start) + kPageSize));
|
||||
EXPECT_EQ(4, f());
|
||||
}
|
||||
|
||||
|
|
|
@ -824,6 +824,8 @@ TEST(dlfcn, dlopen_failure) {
|
|||
}
|
||||
|
||||
TEST(dlfcn, dlclose_unload) {
|
||||
const size_t kPageSize = getpagesize();
|
||||
|
||||
void* handle = dlopen("libtest_simple.so", RTLD_NOW);
|
||||
ASSERT_TRUE(handle != nullptr) << dlerror();
|
||||
uint32_t* taxicab_number = static_cast<uint32_t*>(dlsym(handle, "dlopen_testlib_taxicab_number"));
|
||||
|
@ -833,8 +835,8 @@ TEST(dlfcn, dlclose_unload) {
|
|||
// Making sure that the library has been unmapped as part of library unload
|
||||
// process. Note that mprotect somewhat counter-intuitively returns ENOMEM in
|
||||
// this case.
|
||||
uintptr_t page_start = reinterpret_cast<uintptr_t>(taxicab_number) & ~(PAGE_SIZE - 1);
|
||||
ASSERT_TRUE(mprotect(reinterpret_cast<void*>(page_start), PAGE_SIZE, PROT_NONE) != 0);
|
||||
uintptr_t page_start = reinterpret_cast<uintptr_t>(taxicab_number) & ~(kPageSize - 1);
|
||||
ASSERT_TRUE(mprotect(reinterpret_cast<void*>(page_start), kPageSize, PROT_NONE) != 0);
|
||||
ASSERT_ERRNO(ENOMEM);
|
||||
}
|
||||
|
||||
|
|
|
@ -240,14 +240,14 @@ void test_longjmp() {
|
|||
}
|
||||
|
||||
void test_longjmp_sigaltstack() {
|
||||
constexpr size_t kAltStackSize = kStackAllocationSize + PAGE_SIZE * 16;
|
||||
const size_t kAltStackSize = kStackAllocationSize + getpagesize() * 16;
|
||||
SigAltStackScoped sigAltStackScoped(kAltStackSize);
|
||||
SigActionScoped sigActionScoped(
|
||||
SIGUSR1, [](int, siginfo_t*, void*) { check_longjmp_restores_tags(); });
|
||||
raise(SIGUSR1);
|
||||
|
||||
// same for a secondary thread
|
||||
std::thread t([]() {
|
||||
std::thread t([&]() {
|
||||
SigAltStackScoped sigAltStackScoped(kAltStackSize);
|
||||
raise(SIGUSR1);
|
||||
});
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
|
||||
#include "utils.h"
|
||||
|
||||
static const size_t kPageSize = getpagesize();
|
||||
|
||||
TEST(sys_mman, mmap_std) {
|
||||
void* map = mmap(nullptr, 4096, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
|
||||
ASSERT_NE(MAP_FAILED, map);
|
||||
|
@ -233,42 +235,42 @@ TEST(sys_mman, mmap_PTRDIFF_MAX) {
|
|||
}
|
||||
|
||||
TEST(sys_mman, mremap_PTRDIFF_MAX) {
|
||||
void* map = mmap(nullptr, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* map = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ASSERT_NE(MAP_FAILED, map);
|
||||
|
||||
ASSERT_EQ(MAP_FAILED, mremap(map, PAGE_SIZE, kHuge, MREMAP_MAYMOVE));
|
||||
ASSERT_EQ(MAP_FAILED, mremap(map, kPageSize, kHuge, MREMAP_MAYMOVE));
|
||||
|
||||
ASSERT_EQ(0, munmap(map, PAGE_SIZE));
|
||||
ASSERT_EQ(0, munmap(map, kPageSize));
|
||||
}
|
||||
|
||||
TEST(sys_mman, mmap_bug_27265969) {
|
||||
char* base = reinterpret_cast<char*>(mmap(nullptr, PAGE_SIZE * 2, PROT_EXEC | PROT_READ,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
|
||||
char* base = reinterpret_cast<char*>(
|
||||
mmap(nullptr, kPageSize * 2, PROT_EXEC | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
|
||||
// Some kernels had bugs that would cause segfaults here...
|
||||
__builtin___clear_cache(base, base + (PAGE_SIZE * 2));
|
||||
__builtin___clear_cache(base, base + (kPageSize * 2));
|
||||
}
|
||||
|
||||
TEST(sys_mman, mlock) {
|
||||
void* map = mmap(nullptr, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* map = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ASSERT_NE(MAP_FAILED, map);
|
||||
|
||||
// Not really anything we can assert about this.
|
||||
mlock(map, PAGE_SIZE);
|
||||
mlock(map, kPageSize);
|
||||
|
||||
ASSERT_EQ(0, munmap(map, PAGE_SIZE));
|
||||
ASSERT_EQ(0, munmap(map, kPageSize));
|
||||
}
|
||||
|
||||
TEST(sys_mman, mlock2) {
|
||||
#if defined(__GLIBC__)
|
||||
GTEST_SKIP() << "needs glibc 2.27";
|
||||
#else
|
||||
void* map = mmap(nullptr, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* map = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ASSERT_NE(MAP_FAILED, map);
|
||||
|
||||
// Not really anything we can assert about this.
|
||||
mlock2(map, PAGE_SIZE, MLOCK_ONFAULT);
|
||||
mlock2(map, kPageSize, MLOCK_ONFAULT);
|
||||
|
||||
ASSERT_EQ(0, munmap(map, PAGE_SIZE));
|
||||
ASSERT_EQ(0, munmap(map, kPageSize));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue