Merge changes from topic "loader_crt_pad_segment" into main am: 61a90188e5
am: b7826c5cf0
Original change: https://android-review.googlesource.com/c/platform/bionic/+/2803156 Change-Id: I3d4be70552b31a291b5b3d5609fd427c54c24df9 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
This commit is contained in:
commit
e44260f6bd
6 changed files with 150 additions and 34 deletions
|
@ -3346,7 +3346,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group
|
|||
"\"%s\" has text relocations",
|
||||
get_realpath());
|
||||
add_dlwarning(get_realpath(), "text relocations");
|
||||
if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
|
||||
if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
|
||||
DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno));
|
||||
return false;
|
||||
}
|
||||
|
@ -3362,7 +3362,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group
|
|||
#if !defined(__LP64__)
|
||||
if (has_text_relocations) {
|
||||
// All relocations are done, we can protect our segments back to read-only.
|
||||
if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
|
||||
if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
|
||||
DL_ERR("can't protect segments for \"%s\": %s",
|
||||
get_realpath(), strerror(errno));
|
||||
return false;
|
||||
|
@ -3400,7 +3400,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group
|
|||
}
|
||||
|
||||
bool soinfo::protect_relro() {
|
||||
if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
|
||||
if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
|
||||
DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
|
||||
get_realpath(), strerror(errno));
|
||||
return false;
|
||||
|
|
|
@ -201,6 +201,7 @@ struct ExecutableInfo {
|
|||
const ElfW(Phdr)* phdr;
|
||||
size_t phdr_count;
|
||||
ElfW(Addr) entry_point;
|
||||
bool should_pad_segments;
|
||||
};
|
||||
|
||||
static ExecutableInfo get_executable_info(const char* arg_path) {
|
||||
|
@ -293,6 +294,7 @@ static ExecutableInfo load_executable(const char* orig_path) {
|
|||
result.phdr = elf_reader.loaded_phdr();
|
||||
result.phdr_count = elf_reader.phdr_count();
|
||||
result.entry_point = elf_reader.entry_point();
|
||||
result.should_pad_segments = elf_reader.should_pad_segments();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -366,6 +368,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load
|
|||
somain = si;
|
||||
si->phdr = exe_info.phdr;
|
||||
si->phnum = exe_info.phdr_count;
|
||||
si->set_should_pad_segments(exe_info.should_pad_segments);
|
||||
get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias);
|
||||
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
|
||||
si->dynamic = nullptr;
|
||||
|
@ -399,7 +402,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load
|
|||
auto note_gnu_property = GnuPropertySection(somain);
|
||||
if (note_gnu_property.IsBTICompatible() &&
|
||||
(phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
|
||||
¬e_gnu_property) < 0)) {
|
||||
somain->should_pad_segments(), ¬e_gnu_property) < 0)) {
|
||||
__linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(),
|
||||
strerror(errno));
|
||||
}
|
||||
|
|
|
@ -196,7 +196,7 @@ bool ElfReader::Load(address_space_params* address_space) {
|
|||
// For Armv8.5-A loaded executable segments may require PROT_BTI.
|
||||
if (note_gnu_property_.IsBTICompatible()) {
|
||||
did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
|
||||
¬e_gnu_property_) == 0);
|
||||
should_pad_segments_, ¬e_gnu_property_) == 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -748,6 +748,36 @@ bool ElfReader::ReadPadSegmentNote() {
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
size_t phdr_idx, ElfW(Addr)* p_memsz,
|
||||
ElfW(Addr)* p_filesz) {
|
||||
const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
|
||||
const ElfW(Phdr)* next = nullptr;
|
||||
size_t next_idx = phdr_idx + 1;
|
||||
if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
|
||||
next = &phdr_table[next_idx];
|
||||
}
|
||||
|
||||
// If this is the last LOAD segment, no extension is needed
|
||||
if (!next || *p_memsz != *p_filesz) {
|
||||
return;
|
||||
}
|
||||
|
||||
ElfW(Addr) next_start = page_start(next->p_vaddr);
|
||||
ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
|
||||
|
||||
// If adjacent segment mappings overlap, no extension is needed.
|
||||
if (curr_end >= next_start) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Extend the LOAD segment mapping to be contiguous with that of
|
||||
// the next LOAD segment.
|
||||
ElfW(Addr) extend = next_start - curr_end;
|
||||
*p_memsz += extend;
|
||||
*p_filesz += extend;
|
||||
}
|
||||
|
||||
bool ElfReader::LoadSegments() {
|
||||
for (size_t i = 0; i < phdr_num_; ++i) {
|
||||
const ElfW(Phdr)* phdr = &phdr_table_[i];
|
||||
|
@ -756,18 +786,24 @@ bool ElfReader::LoadSegments() {
|
|||
continue;
|
||||
}
|
||||
|
||||
ElfW(Addr) p_memsz = phdr->p_memsz;
|
||||
ElfW(Addr) p_filesz = phdr->p_filesz;
|
||||
if (phdr->p_align > kPageSize && should_pad_segments_) {
|
||||
_extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz);
|
||||
}
|
||||
|
||||
// Segment addresses in memory.
|
||||
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
|
||||
ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
|
||||
ElfW(Addr) seg_end = seg_start + p_memsz;
|
||||
|
||||
ElfW(Addr) seg_page_start = page_start(seg_start);
|
||||
ElfW(Addr) seg_page_end = page_end(seg_end);
|
||||
|
||||
ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
|
||||
ElfW(Addr) seg_file_end = seg_start + p_filesz;
|
||||
|
||||
// File offsets.
|
||||
ElfW(Addr) file_start = phdr->p_offset;
|
||||
ElfW(Addr) file_end = file_start + phdr->p_filesz;
|
||||
ElfW(Addr) file_end = file_start + p_filesz;
|
||||
|
||||
ElfW(Addr) file_page_start = page_start(file_start);
|
||||
ElfW(Addr) file_length = file_end - file_page_start;
|
||||
|
@ -777,12 +813,12 @@ bool ElfReader::LoadSegments() {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (file_end > static_cast<size_t>(file_size_)) {
|
||||
if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
|
||||
DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
|
||||
" p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
|
||||
name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
|
||||
reinterpret_cast<void*>(phdr->p_filesz),
|
||||
reinterpret_cast<void*>(file_end), file_size_);
|
||||
reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -822,8 +858,18 @@ bool ElfReader::LoadSegments() {
|
|||
|
||||
// if the segment is writable, and does not end on a page boundary,
|
||||
// zero-fill it until the page limit.
|
||||
if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
|
||||
memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
|
||||
//
|
||||
// The intention is to zero the partial page at that may exist at the
|
||||
// end of a file backed mapping. With the extended seg_file_end, this
|
||||
// file offset as calculated from the mapping start can overrun the end
|
||||
// of the file. However pages in that range cannot be touched by userspace
|
||||
// because the kernel will not be able to handle a file map fault past the
|
||||
// extent of the file. No need to try zeroing this untouchable region.
|
||||
// Zero the partial page at the end of the original unextended seg_file_end.
|
||||
ElfW(Addr) seg_file_end_orig = seg_start + phdr->p_filesz;
|
||||
if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end_orig) > 0) {
|
||||
memset(reinterpret_cast<void*>(seg_file_end_orig), 0,
|
||||
kPageSize - page_offset(seg_file_end_orig));
|
||||
}
|
||||
|
||||
seg_file_end = page_end(seg_file_end);
|
||||
|
@ -856,17 +902,23 @@ bool ElfReader::LoadSegments() {
|
|||
* phdr_table_protect_segments and phdr_table_unprotect_segments.
|
||||
*/
|
||||
static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias, int extra_prot_flags) {
|
||||
const ElfW(Phdr)* phdr = phdr_table;
|
||||
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
|
||||
ElfW(Addr) load_bias, int extra_prot_flags,
|
||||
bool should_pad_segments) {
|
||||
for (size_t i = 0; i < phdr_count; ++i) {
|
||||
const ElfW(Phdr)* phdr = &phdr_table[i];
|
||||
|
||||
for (; phdr < phdr_limit; phdr++) {
|
||||
if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
|
||||
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
|
||||
ElfW(Addr) p_memsz = phdr->p_memsz;
|
||||
ElfW(Addr) p_filesz = phdr->p_filesz;
|
||||
if (phdr->p_align > kPageSize && should_pad_segments) {
|
||||
_extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz);
|
||||
}
|
||||
|
||||
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
|
||||
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
|
||||
|
||||
int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
|
||||
if ((prot & PROT_WRITE) != 0) {
|
||||
|
@ -901,19 +953,21 @@ static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_c
|
|||
* phdr_table -> program header table
|
||||
* phdr_count -> number of entries in tables
|
||||
* load_bias -> load bias
|
||||
* should_pad_segments -> Are segments extended to avoid gaps in the memory map
|
||||
* prop -> GnuPropertySection or nullptr
|
||||
* Return:
|
||||
* 0 on success, -1 on failure (error code in errno).
|
||||
*/
|
||||
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
|
||||
ElfW(Addr) load_bias, bool should_pad_segments,
|
||||
const GnuPropertySection* prop __unused) {
|
||||
int prot = 0;
|
||||
#if defined(__aarch64__)
|
||||
if ((prop != nullptr) && prop->IsBTICompatible()) {
|
||||
prot |= PROT_BTI;
|
||||
}
|
||||
#endif
|
||||
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
|
||||
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
|
||||
}
|
||||
|
||||
/* Change the protection of all loaded segments in memory to writable.
|
||||
|
@ -929,19 +983,53 @@ int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
|||
* phdr_table -> program header table
|
||||
* phdr_count -> number of entries in tables
|
||||
* load_bias -> load bias
|
||||
* should_pad_segments -> Are segments extended to avoid gaps in the memory map
|
||||
* Return:
|
||||
* 0 on success, -1 on failure (error code in errno).
|
||||
*/
|
||||
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
|
||||
size_t phdr_count, ElfW(Addr) load_bias) {
|
||||
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
|
||||
size_t phdr_count, ElfW(Addr) load_bias,
|
||||
bool should_pad_segments) {
|
||||
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
|
||||
should_pad_segments);
|
||||
}
|
||||
|
||||
static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
|
||||
const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end) {
|
||||
// Find the index and phdr of the LOAD containing the GNU_RELRO segment
|
||||
for (size_t index = 0; index < phdr_count; ++index) {
|
||||
const ElfW(Phdr)* phdr = &phdr_table[index];
|
||||
|
||||
if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
|
||||
// If the PT_GNU_RELRO mem size is not at least as large as the corresponding
|
||||
// LOAD segment mem size, we need to protect only a partial region of the
|
||||
// LOAD segment and therefore cannot avoid a VMA split.
|
||||
if (relro_phdr->p_memsz < phdr->p_memsz) {
|
||||
break;
|
||||
}
|
||||
|
||||
ElfW(Addr) p_memsz = phdr->p_memsz;
|
||||
ElfW(Addr) p_filesz = phdr->p_filesz;
|
||||
|
||||
// Attempt extending the VMA (mprotect range). Without extending the range
|
||||
// mprotect will only RO protect a part of the extend RW LOAD segment, which will
|
||||
// leave an extra split RW VMA (the gap).
|
||||
_extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz);
|
||||
|
||||
*seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Used internally by phdr_table_protect_gnu_relro and
|
||||
* phdr_table_unprotect_gnu_relro.
|
||||
*/
|
||||
static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias, int prot_flags) {
|
||||
ElfW(Addr) load_bias, int prot_flags,
|
||||
bool should_pad_segments) {
|
||||
const ElfW(Phdr)* phdr = phdr_table;
|
||||
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
|
||||
|
||||
|
@ -966,8 +1054,16 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p
|
|||
// the program is likely to fail at runtime. So in effect the
|
||||
// linker must only emit a PT_GNU_RELRO segment if it ensures
|
||||
// that it starts on a page boundary.
|
||||
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
|
||||
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
|
||||
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
|
||||
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz + load_bias);
|
||||
|
||||
// Before extending the RO protection, we need to ensure that the segments were extended
|
||||
// by bionic, because the kernel won't map gaps so it usually contains unrelated
|
||||
// mappings which will be incorrectly protected as RO likely leading to
|
||||
// segmentation fault.
|
||||
if (phdr->p_align > kPageSize && should_pad_segments) {
|
||||
_extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end);
|
||||
}
|
||||
|
||||
int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
|
||||
seg_page_end - seg_page_start,
|
||||
|
@ -992,12 +1088,14 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p
|
|||
* phdr_table -> program header table
|
||||
* phdr_count -> number of entries in tables
|
||||
* load_bias -> load bias
|
||||
* should_pad_segments -> Were segments extended to avoid gaps in the memory map
|
||||
* Return:
|
||||
* 0 on success, -1 on failure (error code in errno).
|
||||
*/
|
||||
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
|
||||
size_t phdr_count, ElfW(Addr) load_bias) {
|
||||
return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
|
||||
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias, bool should_pad_segments) {
|
||||
return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
|
||||
should_pad_segments);
|
||||
}
|
||||
|
||||
/* Serialize the GNU relro segments to the given file descriptor. This can be
|
||||
|
|
|
@ -128,13 +128,14 @@ size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
|||
size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count);
|
||||
|
||||
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr);
|
||||
ElfW(Addr) load_bias, bool should_pad_segments,
|
||||
const GnuPropertySection* prop = nullptr);
|
||||
|
||||
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias);
|
||||
ElfW(Addr) load_bias, bool should_pad_segments);
|
||||
|
||||
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias);
|
||||
ElfW(Addr) load_bias, bool should_pad_segments);
|
||||
|
||||
int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
|
||||
ElfW(Addr) load_bias, int fd, size_t* file_offset);
|
||||
|
|
|
@ -187,7 +187,8 @@ static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) {
|
|||
auto protect_segments = [&]() {
|
||||
// Make .text executable.
|
||||
if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
|
||||
relocator.si->load_bias) < 0) {
|
||||
relocator.si->load_bias,
|
||||
relocator.si->should_pad_segments()) < 0) {
|
||||
DL_ERR("can't protect segments for \"%s\": %s",
|
||||
relocator.si->get_realpath(), strerror(errno));
|
||||
return false;
|
||||
|
@ -197,7 +198,8 @@ static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) {
|
|||
auto unprotect_segments = [&]() {
|
||||
// Make .text writable.
|
||||
if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
|
||||
relocator.si->load_bias) < 0) {
|
||||
relocator.si->load_bias,
|
||||
relocator.si->should_pad_segments()) < 0) {
|
||||
DL_ERR("can't unprotect loadable segments for \"%s\": %s",
|
||||
relocator.si->get_realpath(), strerror(errno));
|
||||
return false;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <android-base/test_utils.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/vfs.h>
|
||||
#include <sys/wait.h>
|
||||
|
@ -2046,6 +2047,11 @@ TEST(dlext, ns_anonymous) {
|
|||
-1, 0));
|
||||
ASSERT_TRUE(reinterpret_cast<void*>(reserved_addr) != MAP_FAILED);
|
||||
|
||||
struct stat file_stat;
|
||||
int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat));
|
||||
ASSERT_EQ(ret, 0) << "Failed to stat library";
|
||||
size_t file_size = file_stat.st_size;
|
||||
|
||||
for (const auto& rec : maps_to_copy) {
|
||||
uintptr_t offset = rec.addr_start - addr_start;
|
||||
size_t size = rec.addr_end - rec.addr_start;
|
||||
|
@ -2053,7 +2059,13 @@ TEST(dlext, ns_anonymous) {
|
|||
void* map = mmap(addr, size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
||||
ASSERT_TRUE(map != MAP_FAILED);
|
||||
memcpy(map, reinterpret_cast<void*>(rec.addr_start), size);
|
||||
size_t seg_size = size;
|
||||
// See comment on file map fault in ElfReader::LoadSegments()
|
||||
// bionic/linker/linker_phdr.cpp
|
||||
if (rec.offset + size > file_size) {
|
||||
seg_size = file_size - rec.offset;
|
||||
}
|
||||
memcpy(map, reinterpret_cast<void*>(rec.addr_start), seg_size);
|
||||
mprotect(map, size, rec.perms);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue