Merge "Remove PAGE_SIZE call sites." am: 826ea44822 am: 891e3b0aa8

Original change: https://android-review.googlesource.com/c/platform/bionic/+/2083840

Change-Id: I14d8ca6a7d4f9b3aeac0190232e9b6dd00b3d35d
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
This commit is contained in:
Elliott Hughes 2023-06-13 21:54:23 +00:00 committed by Automerger Merge Worker
commit efbf89a550
21 changed files with 116 additions and 98 deletions

View file

@ -73,8 +73,8 @@ class AtexitArray {
// restart concurrent __cxa_finalize passes.
uint64_t total_appends_;
static size_t page_start_of_index(size_t idx) { return PAGE_START(idx * sizeof(AtexitEntry)); }
static size_t page_end_of_index(size_t idx) { return PAGE_END(idx * sizeof(AtexitEntry)); }
static size_t page_start_of_index(size_t idx) { return page_start(idx * sizeof(AtexitEntry)); }
static size_t page_end_of_index(size_t idx) { return page_end(idx * sizeof(AtexitEntry)); }
// Recompact the array if it will save at least one page of memory at the end.
bool needs_recompaction() const {
@ -167,7 +167,7 @@ void AtexitArray::set_writable(bool writable, size_t start_idx, size_t num_entri
// than one.
bool AtexitArray::next_capacity(size_t capacity, size_t* result) {
if (capacity == 0) {
*result = PAGE_END(sizeof(AtexitEntry)) / sizeof(AtexitEntry);
*result = page_end(sizeof(AtexitEntry)) / sizeof(AtexitEntry);
return true;
}
size_t num_bytes;

View file

@ -95,12 +95,10 @@ static inline uint16_t log2(size_t number) {
return result;
}
BionicSmallObjectAllocator::BionicSmallObjectAllocator(uint32_t type,
size_t block_size)
BionicSmallObjectAllocator::BionicSmallObjectAllocator(uint32_t type, size_t block_size)
: type_(type),
block_size_(block_size),
blocks_per_page_((PAGE_SIZE - sizeof(small_object_page_info)) /
block_size),
blocks_per_page_((page_size() - sizeof(small_object_page_info)) / block_size),
free_pages_cnt_(0),
page_list_(nullptr) {}
@ -157,14 +155,13 @@ void BionicSmallObjectAllocator::free_page(small_object_page_info* page) {
if (page_list_ == page) {
page_list_ = page->next_page;
}
munmap(page, PAGE_SIZE);
munmap(page, page_size());
free_pages_cnt_--;
}
void BionicSmallObjectAllocator::free(void* ptr) {
small_object_page_info* const page =
reinterpret_cast<small_object_page_info*>(
PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
reinterpret_cast<small_object_page_info*>(page_start(reinterpret_cast<uintptr_t>(ptr)));
if (reinterpret_cast<uintptr_t>(ptr) % block_size_ != 0) {
async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
@ -192,14 +189,13 @@ void BionicSmallObjectAllocator::free(void* ptr) {
}
void BionicSmallObjectAllocator::alloc_page() {
void* const map_ptr = mmap(nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
void* const map_ptr =
mmap(nullptr, page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (map_ptr == MAP_FAILED) {
async_safe_fatal("mmap failed: %s", strerror(errno));
}
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE,
"bionic_alloc_small_objects");
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, page_size(), "bionic_alloc_small_objects");
small_object_page_info* const page =
reinterpret_cast<small_object_page_info*>(map_ptr);
@ -269,10 +265,10 @@ void* BionicAllocator::alloc_mmap(size_t align, size_t size) {
size_t header_size = __BIONIC_ALIGN(kPageInfoSize, align);
size_t allocated_size;
if (__builtin_add_overflow(header_size, size, &allocated_size) ||
PAGE_END(allocated_size) < allocated_size) {
page_end(allocated_size) < allocated_size) {
async_safe_fatal("overflow trying to alloc %zu bytes", size);
}
allocated_size = PAGE_END(allocated_size);
allocated_size = page_end(allocated_size);
void* map_ptr = mmap(nullptr, allocated_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS,
-1, 0);
@ -317,7 +313,7 @@ void* BionicAllocator::alloc(size_t size) {
void* BionicAllocator::memalign(size_t align, size_t size) {
// The Bionic allocator only supports alignment up to one page, which is good
// enough for ELF TLS.
align = MIN(align, PAGE_SIZE);
align = MIN(align, page_size());
align = MAX(align, 16);
if (!powerof2(align)) {
align = BIONIC_ROUND_UP_POWER_OF_2(align);
@ -327,7 +323,7 @@ void* BionicAllocator::memalign(size_t align, size_t size) {
}
inline page_info* BionicAllocator::get_page_info_unchecked(void* ptr) {
uintptr_t header_page = PAGE_START(reinterpret_cast<size_t>(ptr) - kPageInfoSize);
uintptr_t header_page = page_start(reinterpret_cast<size_t>(ptr) - kPageInfoSize);
return reinterpret_cast<page_info*>(header_page);
}

View file

@ -34,10 +34,11 @@
#include <sys/param.h>
#include <unistd.h>
#include "platform/bionic/macros.h"
#include "platform/bionic/page.h"
#include "private/ScopedRWLock.h"
#include "private/ScopedSignalBlocker.h"
#include "private/bionic_globals.h"
#include "platform/bionic/macros.h"
#include "private/bionic_tls.h"
#include "pthread_internal.h"
@ -81,7 +82,7 @@ bool __bionic_check_tls_alignment(size_t* alignment) {
return false;
}
// Bionic only respects TLS alignment up to one page.
*alignment = MIN(*alignment, PAGE_SIZE);
*alignment = MIN(*alignment, page_size());
return true;
}

View file

@ -40,6 +40,7 @@
#include <unistd.h>
#include <async_safe/log.h>
#include <platform/bionic/page.h>
#include <platform/bionic/reserved_signals.h>
#include <sys/system_properties.h>
@ -80,7 +81,7 @@ FdEntry* FdTableImpl<inline_fds>::at(size_t idx) {
size_t required_count = max - inline_fds;
size_t required_size = sizeof(FdTableOverflow) + required_count * sizeof(FdEntry);
size_t aligned_size = __BIONIC_ALIGN(required_size, PAGE_SIZE);
size_t aligned_size = __BIONIC_ALIGN(required_size, page_size());
size_t aligned_count = (aligned_size - sizeof(FdTableOverflow)) / sizeof(FdEntry);
void* allocation =

View file

@ -27,9 +27,10 @@
*/
#include <unistd.h>
#include "platform/bionic/page.h"
// Portable code should use sysconf(_SC_PAGE_SIZE) directly instead.
int getpagesize() {
// We dont use sysconf(3) here because that drags in stdio, which makes static binaries fat.
return PAGE_SIZE;
return page_size();
}

View file

@ -129,8 +129,8 @@ static void apply_gnu_relro() {
continue;
}
ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr);
ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz);
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr);
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz);
// Check return value here? What do we do if we fail?
mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, PROT_READ);
@ -354,9 +354,9 @@ __attribute__((no_sanitize("hwaddress", "memtag"))) void __libc_init_mte(const v
__libc_shared_globals()->initial_memtag_stack = memtag_stack;
if (memtag_stack) {
void* page_start =
reinterpret_cast<void*>(PAGE_START(reinterpret_cast<uintptr_t>(stack_top)));
if (mprotect(page_start, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_MTE | PROT_GROWSDOWN)) {
void* pg_start =
reinterpret_cast<void*>(page_start(reinterpret_cast<uintptr_t>(stack_top)));
if (mprotect(pg_start, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_MTE | PROT_GROWSDOWN)) {
async_safe_fatal("error: failed to set PROT_MTE on main thread stack: %s\n",
strerror(errno));
}

View file

@ -32,6 +32,7 @@
#include <unistd.h>
#include "platform/bionic/macros.h"
#include "platform/bionic/page.h"
#include "private/ErrnoRestorer.h"
// mmap2(2) is like mmap(2), but the offset is in 4096-byte blocks, not bytes.
@ -46,7 +47,7 @@ void* mmap64(void* addr, size_t size, int prot, int flags, int fd, off64_t offse
}
// Prevent allocations large enough for `end - start` to overflow.
size_t rounded = __BIONIC_ALIGN(size, PAGE_SIZE);
size_t rounded = __BIONIC_ALIGN(size, page_size());
if (rounded < size || rounded > PTRDIFF_MAX) {
errno = ENOMEM;
return MAP_FAILED;

View file

@ -33,12 +33,13 @@
#include <unistd.h>
#include "platform/bionic/macros.h"
#include "platform/bionic/page.h"
extern "C" void* __mremap(void*, size_t, size_t, int, void*);
void* mremap(void* old_address, size_t old_size, size_t new_size, int flags, ...) {
// prevent allocations large enough for `end - start` to overflow
size_t rounded = __BIONIC_ALIGN(new_size, PAGE_SIZE);
size_t rounded = __BIONIC_ALIGN(new_size, page_size());
if (rounded < new_size || rounded > PTRDIFF_MAX) {
errno = ENOMEM;
return MAP_FAILED;

View file

@ -36,8 +36,9 @@
#include <async_safe/log.h>
#include "private/bionic_defs.h"
#include "platform/bionic/page.h"
#include "private/ErrnoRestorer.h"
#include "private/bionic_defs.h"
#include "pthread_internal.h"
__BIONIC_WEAK_FOR_NATIVE_BRIDGE
@ -143,10 +144,10 @@ int pthread_attr_getstacksize(const pthread_attr_t* attr, size_t* stack_size) {
__BIONIC_WEAK_FOR_NATIVE_BRIDGE
int pthread_attr_setstack(pthread_attr_t* attr, void* stack_base, size_t stack_size) {
if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
if ((stack_size & (page_size() - 1) || stack_size < PTHREAD_STACK_MIN)) {
return EINVAL;
}
if (reinterpret_cast<uintptr_t>(stack_base) & (PAGE_SIZE - 1)) {
if (reinterpret_cast<uintptr_t>(stack_base) & (page_size() - 1)) {
return EINVAL;
}
attr->stack_base = stack_base;

View file

@ -42,6 +42,7 @@
#include "platform/bionic/macros.h"
#include "platform/bionic/mte.h"
#include "platform/bionic/page.h"
#include "private/ErrnoRestorer.h"
#include "private/ScopedRWLock.h"
#include "private/bionic_constants.h"
@ -71,7 +72,7 @@ void __init_bionic_tls_ptrs(bionic_tcb* tcb, bionic_tls* tls) {
// Allocate a temporary bionic_tls that the dynamic linker's main thread can
// use while it's loading the initial set of ELF modules.
bionic_tls* __allocate_temp_bionic_tls() {
size_t allocation_size = __BIONIC_ALIGN(sizeof(bionic_tls), PAGE_SIZE);
size_t allocation_size = __BIONIC_ALIGN(sizeof(bionic_tls), page_size());
void* allocation = mmap(nullptr, allocation_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
@ -84,7 +85,7 @@ bionic_tls* __allocate_temp_bionic_tls() {
}
void __free_temp_bionic_tls(bionic_tls* tls) {
munmap(tls, __BIONIC_ALIGN(sizeof(bionic_tls), PAGE_SIZE));
munmap(tls, __BIONIC_ALIGN(sizeof(bionic_tls), page_size()));
}
static void __init_alternate_signal_stack(pthread_internal_t* thread) {
@ -203,12 +204,11 @@ int __init_thread(pthread_internal_t* thread) {
return 0;
}
// Allocate a thread's primary mapping. This mapping includes static TLS and
// optionally a stack. Static TLS includes ELF TLS segments and the bionic_tls
// struct.
//
// The stack_guard_size must be a multiple of the PAGE_SIZE.
// The stack_guard_size must be a multiple of the page_size().
ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_size) {
const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
@ -220,7 +220,7 @@ ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_si
// Align the result to a page size.
const size_t unaligned_size = mmap_size;
mmap_size = __BIONIC_ALIGN(mmap_size, PAGE_SIZE);
mmap_size = __BIONIC_ALIGN(mmap_size, page_size());
if (mmap_size < unaligned_size) return {};
// Create a new private anonymous map. Make the entire mapping PROT_NONE, then carve out a
@ -271,9 +271,9 @@ static int __allocate_thread(pthread_attr_t* attr, bionic_tcb** tcbp, void** chi
if (attr->stack_base == nullptr) {
// The caller didn't provide a stack, so allocate one.
// Make sure the guard size is a multiple of PAGE_SIZE.
// Make sure the guard size is a multiple of page_size().
const size_t unaligned_guard_size = attr->guard_size;
attr->guard_size = __BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);
attr->guard_size = __BIONIC_ALIGN(attr->guard_size, page_size());
if (attr->guard_size < unaligned_guard_size) return EAGAIN;
mapping = __allocate_thread_mapping(attr->stack_size, attr->guard_size);

View file

@ -38,6 +38,7 @@
#include <time.h>
#include <unistd.h>
#include "platform/bionic/page.h"
#include "private/bionic_tls.h"
static long __sysconf_rlimit(int resource) {

View file

@ -33,8 +33,9 @@
#include <string.h>
#include <unistd.h>
#include "private/get_cpu_count_from_string.h"
#include "platform/bionic/page.h"
#include "private/ScopedReaddir.h"
#include "private/get_cpu_count_from_string.h"
int __get_cpu_count(const char* sys_file) {
int cpu_count = 1;
@ -64,11 +65,11 @@ int get_nprocs() {
long get_phys_pages() {
struct sysinfo si;
sysinfo(&si);
return (static_cast<int64_t>(si.totalram) * si.mem_unit) / PAGE_SIZE;
return (static_cast<int64_t>(si.totalram) * si.mem_unit) / page_size();
}
long get_avphys_pages() {
struct sysinfo si;
sysinfo(&si);
return ((static_cast<int64_t>(si.freeram) + si.bufferram) * si.mem_unit) / PAGE_SIZE;
return ((static_cast<int64_t>(si.freeram) + si.bufferram) * si.mem_unit) / page_size();
}

View file

@ -16,15 +16,39 @@
#pragma once
// Get PAGE_SIZE and PAGE_MASK.
#include <stddef.h>
#include <stdint.h>
#include <sys/auxv.h>
// For PAGE_SIZE.
#include <sys/user.h>
inline size_t page_size() {
/*
* PAGE_SIZE defines the maximum supported page size. Since 4096 is the
* minimum supported page size we can just let it be constant folded if it's
* also the maximum.
*/
#if PAGE_SIZE == 4096
return PAGE_SIZE;
#else
static size_t size = getauxval(AT_PAGESZ);
return size;
#endif
}
// Returns the address of the page containing address 'x'.
#define PAGE_START(x) ((x) & PAGE_MASK)
inline uintptr_t page_start(uintptr_t x) {
return x & ~(page_size() - 1);
}
// Returns the offset of address 'x' in its page.
#define PAGE_OFFSET(x) ((x) & ~PAGE_MASK)
inline uintptr_t page_offset(uintptr_t x) {
return x & (page_size() - 1);
}
// Returns the address of the next page after address 'x', unless 'x' is
// itself at the start of a page.
#define PAGE_END(x) PAGE_START((x) + (PAGE_SIZE-1))
inline uintptr_t page_end(uintptr_t x) {
return page_start(x + page_size() - 1);
}

View file

@ -962,7 +962,7 @@ static int open_library_in_zipfile(ZipArchiveCache* zip_archive_cache,
}
// Check if it is properly stored
if (entry.method != kCompressStored || (entry.offset % PAGE_SIZE) != 0) {
if (entry.method != kCompressStored || (entry.offset % page_size()) != 0) {
close(fd);
return -1;
}
@ -1171,7 +1171,7 @@ static bool load_library(android_namespace_t* ns,
"load_library(ns=%s, task=%s, flags=0x%x, realpath=%s, search_linked_namespaces=%d)",
ns->get_name(), name, rtld_flags, realpath.c_str(), search_linked_namespaces);
if ((file_offset % PAGE_SIZE) != 0) {
if ((file_offset % page_size()) != 0) {
DL_OPEN_ERR("file offset for the library \"%s\" is not page-aligned: %" PRId64, name, file_offset);
return false;
}

View file

@ -50,8 +50,8 @@ class ShadowWrite {
ShadowWrite(uint16_t* s, uint16_t* e) {
shadow_start = reinterpret_cast<char*>(s);
shadow_end = reinterpret_cast<char*>(e);
aligned_start = reinterpret_cast<char*>(PAGE_START(reinterpret_cast<uintptr_t>(shadow_start)));
aligned_end = reinterpret_cast<char*>(PAGE_END(reinterpret_cast<uintptr_t>(shadow_end)));
aligned_start = reinterpret_cast<char*>(page_start(reinterpret_cast<uintptr_t>(shadow_start)));
aligned_end = reinterpret_cast<char*>(page_end(reinterpret_cast<uintptr_t>(shadow_end)));
tmp_start =
reinterpret_cast<char*>(mmap(nullptr, aligned_end - aligned_start, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
@ -204,7 +204,7 @@ bool CFIShadowWriter::NotifyLibDl(soinfo* solist, uintptr_t p) {
shadow_start = reinterpret_cast<uintptr_t* (*)(uintptr_t)>(cfi_init)(p);
CHECK(shadow_start != nullptr);
CHECK(*shadow_start == p);
mprotect(shadow_start, PAGE_SIZE, PROT_READ);
mprotect(shadow_start, page_size(), PROT_READ);
return true;
}

View file

@ -580,8 +580,8 @@ static void set_bss_vma_name(soinfo* si) {
}
ElfW(Addr) seg_start = phdr->p_vaddr + si->load_bias;
ElfW(Addr) seg_page_end = PAGE_END(seg_start + phdr->p_memsz);
ElfW(Addr) seg_file_end = PAGE_END(seg_start + phdr->p_filesz);
ElfW(Addr) seg_page_end = page_end(seg_start + phdr->p_memsz);
ElfW(Addr) seg_file_end = page_end(seg_start + phdr->p_filesz);
if (seg_page_end > seg_file_end) {
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,

View file

@ -29,6 +29,7 @@
#include "linker_mapped_file_fragment.h"
#include "linker_debug.h"
#include "linker_utils.h"
#include "platform/bionic/page.h"
#include <inttypes.h>
#include <stdlib.h>

View file

@ -116,7 +116,7 @@ static int GetTargetElfMachine() {
can only memory-map at page boundaries, this means that the bias is
computed as:
load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
(NOTE: The value must be used as a 32-bit unsigned integer, to deal with
possible wrap around UINT32_MAX for possible large p_vaddr values).
@ -124,11 +124,11 @@ static int GetTargetElfMachine() {
And that the phdr0_load_address must start at a page boundary, with
the segment's real content starting at:
phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
phdr0_load_address + page_offset(phdr0->p_vaddr)
Note that ELF requires the following condition to make the mmap()-ing work:
PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
The load_bias must be added to any p_vaddr value read from the ELF file to
determine the corresponding memory address.
@ -529,8 +529,8 @@ size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
min_vaddr = 0;
}
min_vaddr = PAGE_START(min_vaddr);
max_vaddr = PAGE_END(max_vaddr);
min_vaddr = page_start(min_vaddr);
max_vaddr = page_end(max_vaddr);
if (out_min_vaddr != nullptr) {
*out_min_vaddr = min_vaddr;
@ -545,7 +545,7 @@ size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
// program header table. Used to determine whether the file should be loaded at
// a specific virtual address alignment for use with huge pages.
size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
size_t maximum_alignment = PAGE_SIZE;
size_t maximum_alignment = page_size();
for (size_t i = 0; i < phdr_count; ++i) {
const ElfW(Phdr)* phdr = &phdr_table[i];
@ -563,7 +563,7 @@ size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phd
#if defined(__LP64__)
return maximum_alignment;
#else
return PAGE_SIZE;
return page_size();
#endif
}
@ -574,7 +574,7 @@ static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
// Reserve enough space to properly align the library's start address.
mapping_align = std::max(mapping_align, start_align);
if (mapping_align == PAGE_SIZE) {
if (mapping_align == page_size()) {
void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
if (mmap_ptr == MAP_FAILED) {
return nullptr;
@ -593,7 +593,7 @@ static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size
constexpr size_t kMaxGapUnits = 32;
// Allocate enough space so that the end of the desired region aligned up is still inside the
// mapping.
size_t mmap_size = align_up(size, mapping_align) + mapping_align - PAGE_SIZE;
size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
uint8_t* mmap_ptr =
reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
if (mmap_ptr == MAP_FAILED) {
@ -610,7 +610,7 @@ static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size
mapping_align = std::max(mapping_align, kGapAlignment);
gap_size =
kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - PAGE_SIZE;
mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
if (mmap_ptr == MAP_FAILED) {
return nullptr;
@ -665,12 +665,12 @@ bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
load_size_ - address_space->reserved_size, load_size_, name_.c_str());
return false;
}
size_t start_alignment = PAGE_SIZE;
size_t start_alignment = page_size();
if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
// Limit alignment to PMD size as other alignments reduce the number of
// bits available for ASLR for no benefit.
start_alignment = maximum_alignment == kPmdSize ? kPmdSize : PAGE_SIZE;
start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
}
start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
&gap_size_);
@ -706,8 +706,8 @@ bool ElfReader::LoadSegments() {
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
ElfW(Addr) seg_page_start = PAGE_START(seg_start);
ElfW(Addr) seg_page_end = PAGE_END(seg_end);
ElfW(Addr) seg_page_start = page_start(seg_start);
ElfW(Addr) seg_page_end = page_end(seg_end);
ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
@ -715,7 +715,7 @@ bool ElfReader::LoadSegments() {
ElfW(Addr) file_start = phdr->p_offset;
ElfW(Addr) file_end = file_start + phdr->p_filesz;
ElfW(Addr) file_page_start = PAGE_START(file_start);
ElfW(Addr) file_page_start = page_start(file_start);
ElfW(Addr) file_length = file_end - file_page_start;
if (file_size_ <= 0) {
@ -768,11 +768,11 @@ bool ElfReader::LoadSegments() {
// if the segment is writable, and does not end on a page boundary,
// zero-fill it until the page limit.
if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
}
seg_file_end = PAGE_END(seg_file_end);
seg_file_end = page_end(seg_file_end);
// seg_file_end is now the first page address after the file
// content. If seg_end is larger, we need to zero anything
@ -811,8 +811,8 @@ static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_c
continue;
}
ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
if ((prot & PROT_WRITE) != 0) {
@ -912,8 +912,8 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p
// the program is likely to fail at runtime. So in effect the
// linker must only emit a PT_GNU_RELRO segment if it ensures
// that it starts on a page boundary.
ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
seg_page_end - seg_page_start,
@ -972,8 +972,8 @@ int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
continue;
}
ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
ssize_t size = seg_page_end - seg_page_start;
ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
@ -1035,8 +1035,8 @@ int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
continue;
}
ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
char* mem_base = reinterpret_cast<char*>(seg_page_start);
@ -1053,15 +1053,15 @@ int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
while (match_offset < size) {
// Skip over dissimilar pages.
while (match_offset < size &&
memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
match_offset += PAGE_SIZE;
memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
match_offset += page_size();
}
// Count similar pages.
size_t mismatch_offset = match_offset;
while (mismatch_offset < size &&
memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
mismatch_offset += PAGE_SIZE;
memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
mismatch_offset += page_size();
}
// Map over similar pages.

View file

@ -169,12 +169,6 @@ bool parse_zip_path(const char* input_path, std::string* zip_path, std::string*
return true;
}
constexpr off64_t kPageMask = ~static_cast<off64_t>(PAGE_SIZE-1);
off64_t page_start(off64_t offset) {
return offset & kPageMask;
}
bool safe_add(off64_t* out, off64_t a, size_t b) {
CHECK(a >= 0);
if (static_cast<uint64_t>(INT64_MAX - a) < b) {
@ -185,10 +179,6 @@ bool safe_add(off64_t* out, off64_t a, size_t b) {
return true;
}
size_t page_offset(off64_t offset) {
return static_cast<size_t>(offset & (PAGE_SIZE-1));
}
void split_path(const char* path, const char* delimiters,
std::vector<std::string>* paths) {
if (path != nullptr && path[0] != 0) {

View file

@ -55,7 +55,5 @@ void split_path(const char* path, const char* delimiters, std::vector<std::strin
std::string dirname(const char* path);
off64_t page_start(off64_t offset);
size_t page_offset(off64_t offset);
bool safe_add(off64_t* out, off64_t a, size_t b);
bool is_first_stage_init();

View file

@ -33,6 +33,7 @@
#include <gtest/gtest.h>
#include "linker_utils.h"
#include "platform/bionic/page.h"
TEST(linker_utils, format_string) {
std::vector<std::pair<std::string, std::string>> params = {{ "LIB", "lib32"}, { "SDKVER", "42"}};
@ -104,9 +105,9 @@ TEST(linker_utils, parse_zip_path_smoke) {
}
TEST(linker_utils, page_start) {
ASSERT_EQ(0x0001000, page_start(0x0001000));
ASSERT_EQ(0x3002000, page_start(0x300222f));
ASSERT_EQ(0x6001000, page_start(0x6001fff));
ASSERT_EQ(0x0001000U, page_start(0x0001000));
ASSERT_EQ(0x3002000U, page_start(0x300222f));
ASSERT_EQ(0x6001000U, page_start(0x6001fff));
}
TEST(linker_utils, page_offset) {