Merge changes from topic "revert-2966884-NAVMRGEYJW" into main

* changes:
  Revert "RELAND: bionic: loader: Extend LOAD segment VMAs"
  Revert "RELAND: bionic: loader: Extend GNU_RELRO protection"
This commit is contained in:
Treehugger Robot 2024-03-08 03:41:08 +00:00 committed by Gerrit Code Review
commit fd9824d0b1
8 changed files with 34 additions and 182 deletions

View file

@ -116,7 +116,6 @@ cc_defaults {
"libziparchive",
"libbase",
"libz",
"libprocinfo", // For procinfo::MappedFileSize()
"libasync_safe",
@ -574,7 +573,6 @@ cc_test {
"libasync_safe",
"libbase",
"liblog_for_runtime_apex",
"libprocinfo", // For procinfo::MappedFileSize()
],
data_libs: [

View file

@ -3364,7 +3364,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group
"\"%s\" has text relocations",
get_realpath());
add_dlwarning(get_realpath(), "text relocations");
if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno));
return false;
}
@ -3380,7 +3380,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group
#if !defined(__LP64__)
if (has_text_relocations) {
// All relocations are done, we can protect our segments back to read-only.
if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
DL_ERR("can't protect segments for \"%s\": %s",
get_realpath(), strerror(errno));
return false;
@ -3418,7 +3418,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group
}
bool soinfo::protect_relro() {
if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
get_realpath(), strerror(errno));
return false;

View file

@ -201,7 +201,6 @@ struct ExecutableInfo {
const ElfW(Phdr)* phdr;
size_t phdr_count;
ElfW(Addr) entry_point;
bool should_pad_segments;
};
static ExecutableInfo get_executable_info(const char* arg_path) {
@ -294,7 +293,6 @@ static ExecutableInfo load_executable(const char* orig_path) {
result.phdr = elf_reader.loaded_phdr();
result.phdr_count = elf_reader.phdr_count();
result.entry_point = elf_reader.entry_point();
result.should_pad_segments = elf_reader.should_pad_segments();
return result;
}
@ -368,7 +366,6 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load
somain = si;
si->phdr = exe_info.phdr;
si->phnum = exe_info.phdr_count;
si->set_should_pad_segments(exe_info.should_pad_segments);
get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias);
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
si->dynamic = nullptr;
@ -402,7 +399,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load
auto note_gnu_property = GnuPropertySection(somain);
if (note_gnu_property.IsBTICompatible() &&
(phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
somain->should_pad_segments(), &note_gnu_property) < 0)) {
&note_gnu_property) < 0)) {
__linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(),
strerror(errno));
}

View file

@ -46,8 +46,6 @@
#include "private/CFIShadow.h" // For kLibraryAlignment
#include "private/elf_note.h"
#include <procinfo/process_map.h>
static int GetTargetElfMachine() {
#if defined(__arm__)
return EM_ARM;
@ -198,7 +196,7 @@ bool ElfReader::Load(address_space_params* address_space) {
// For Armv8.5-A loaded executable segments may require PROT_BTI.
if (note_gnu_property_.IsBTICompatible()) {
did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
should_pad_segments_, &note_gnu_property_) == 0);
&note_gnu_property_) == 0);
}
#endif
}
@ -758,41 +756,6 @@ bool ElfReader::ReadPadSegmentNote() {
return true;
}
static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
size_t phdr_idx, ElfW(Addr)* p_memsz,
ElfW(Addr)* p_filesz, bool should_pad_segments) {
const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
const ElfW(Phdr)* next = nullptr;
size_t next_idx = phdr_idx + 1;
if (phdr->p_align == kPageSize || !should_pad_segments) {
return;
}
if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
next = &phdr_table[next_idx];
}
// If this is the last LOAD segment, no extension is needed
if (!next || *p_memsz != *p_filesz) {
return;
}
ElfW(Addr) next_start = page_start(next->p_vaddr);
ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
// If adjacent segment mappings overlap, no extension is needed.
if (curr_end >= next_start) {
return;
}
// Extend the LOAD segment mapping to be contiguous with that of
// the next LOAD segment.
ElfW(Addr) extend = next_start - curr_end;
*p_memsz += extend;
*p_filesz += extend;
}
bool ElfReader::LoadSegments() {
for (size_t i = 0; i < phdr_num_; ++i) {
const ElfW(Phdr)* phdr = &phdr_table_[i];
@ -801,22 +764,18 @@ bool ElfReader::LoadSegments() {
continue;
}
ElfW(Addr) p_memsz = phdr->p_memsz;
ElfW(Addr) p_filesz = phdr->p_filesz;
_extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
// Segment addresses in memory.
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
ElfW(Addr) seg_end = seg_start + p_memsz;
ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
ElfW(Addr) seg_page_start = page_start(seg_start);
ElfW(Addr) seg_page_end = page_end(seg_end);
ElfW(Addr) seg_file_end = seg_start + p_filesz;
ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
// File offsets.
ElfW(Addr) file_start = phdr->p_offset;
ElfW(Addr) file_end = file_start + p_filesz;
ElfW(Addr) file_end = file_start + phdr->p_filesz;
ElfW(Addr) file_page_start = page_start(file_start);
ElfW(Addr) file_length = file_end - file_page_start;
@ -826,12 +785,12 @@ bool ElfReader::LoadSegments() {
return false;
}
if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
if (file_end > static_cast<size_t>(file_size_)) {
DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
" p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
reinterpret_cast<void*>(phdr->p_filesz),
reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
reinterpret_cast<void*>(file_end), file_size_);
return false;
}
@ -869,25 +828,10 @@ bool ElfReader::LoadSegments() {
}
}
// if the segment is writable, and its memory map extends beyond
// the segment contents on file (p_filesz); zero-fill it until the
// end of the mapping backed by the file, rounded to the next
// page boundary; as this portion of the mapping corresponds to either
// garbage (partial page at the end) or data from other segments.
//
// If any part of the mapping extends beyond the file size there is
// no need to zero it since that region is not touchable by userspace
// and attempting to do so will causes the kernel to throw a SIGBUS.
//
// See: system/libprocinfo/include/procinfo/process_map_size.h
uint64_t file_backed_size = ::android::procinfo::MappedFileSize(seg_page_start,
page_end(seg_page_start + file_length),
file_offset_ + file_page_start, file_size_);
// _seg_file_end = unextended seg_file_end
uint64_t _seg_file_end = seg_start + phdr->p_filesz;
uint64_t zero_fill_len = file_backed_size - (_seg_file_end - seg_page_start);
if ((phdr->p_flags & PF_W) != 0 && zero_fill_len > 0) {
memset(reinterpret_cast<void*>(_seg_file_end), 0, zero_fill_len);
// if the segment is writable, and does not end on a page boundary,
// zero-fill it until the page limit.
if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
}
seg_file_end = page_end(seg_file_end);
@ -920,21 +864,17 @@ bool ElfReader::LoadSegments() {
* phdr_table_protect_segments and phdr_table_unprotect_segments.
*/
static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int extra_prot_flags,
bool should_pad_segments) {
for (size_t i = 0; i < phdr_count; ++i) {
const ElfW(Phdr)* phdr = &phdr_table[i];
ElfW(Addr) load_bias, int extra_prot_flags) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
for (; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
continue;
}
ElfW(Addr) p_memsz = phdr->p_memsz;
ElfW(Addr) p_filesz = phdr->p_filesz;
_extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
if ((prot & PROT_WRITE) != 0) {
@ -969,21 +909,19 @@ static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_c
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Are segments extended to avoid gaps in the memory map
* prop -> GnuPropertySection or nullptr
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments,
const GnuPropertySection* prop __unused) {
ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
int prot = 0;
#if defined(__aarch64__)
if ((prop != nullptr) && prop->IsBTICompatible()) {
prot |= PROT_BTI;
}
#endif
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
}
/* Change the protection of all loaded segments in memory to writable.
@ -999,82 +937,19 @@ int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Are segments extended to avoid gaps in the memory map
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
size_t phdr_count, ElfW(Addr) load_bias,
bool should_pad_segments) {
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
should_pad_segments);
}
static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
bool should_pad_segments) {
// Find the index and phdr of the LOAD containing the GNU_RELRO segment
for (size_t index = 0; index < phdr_count; ++index) {
const ElfW(Phdr)* phdr = &phdr_table[index];
if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
// If the PT_GNU_RELRO mem size is not at least as large as the corresponding
// LOAD segment mem size, we need to protect only a partial region of the
// LOAD segment and therefore cannot avoid a VMA split.
//
// Note: Don't check the page-aligned mem sizes since the extended protection
// may incorrectly write protect non-relocation data.
//
// Example:
//
// |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
// ----------------------------------------------------------------
// | | | | |
// SEG X | RO | RO | RW | | SEG Y
// | | | | |
// ----------------------------------------------------------------
// | | |
// | | |
// | | |
// relro_vaddr relro_vaddr relro_vaddr
// (load_vaddr) + +
// relro_memsz load_memsz
//
// ----------------------------------------------------------------
// | PAGE | PAGE |
// ----------------------------------------------------------------
// | Potential |
// |----- Extended RO ----|
// | Protection |
//
// If the check below uses page aligned mem sizes it will cause incorrect write
// protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
if (relro_phdr->p_memsz < phdr->p_memsz) {
return;
}
ElfW(Addr) p_memsz = phdr->p_memsz;
ElfW(Addr) p_filesz = phdr->p_filesz;
// Attempt extending the VMA (mprotect range). Without extending the range,
// mprotect will only RO protect a part of the extended RW LOAD segment, which
// will leave an extra split RW VMA (the gap).
_extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
should_pad_segments);
*seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
return;
}
}
size_t phdr_count, ElfW(Addr) load_bias) {
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
}
/* Used internally by phdr_table_protect_gnu_relro and
* phdr_table_unprotect_gnu_relro.
*/
static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int prot_flags,
bool should_pad_segments) {
ElfW(Addr) load_bias, int prot_flags) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
@ -1101,8 +976,6 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p
// that it starts on a page boundary.
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
_extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
should_pad_segments);
int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
seg_page_end - seg_page_start,
@ -1127,14 +1000,12 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Were segments extended to avoid gaps in the memory map
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments) {
return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
should_pad_segments);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
size_t phdr_count, ElfW(Addr) load_bias) {
return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
}
/* Serialize the GNU relro segments to the given file descriptor. This can be

View file

@ -128,14 +128,13 @@ size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count);
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments,
const GnuPropertySection* prop = nullptr);
ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr);
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments);
ElfW(Addr) load_bias);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments);
ElfW(Addr) load_bias);
int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int fd, size_t* file_offset);

View file

@ -187,8 +187,7 @@ static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) {
auto protect_segments = [&]() {
// Make .text executable.
if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
relocator.si->load_bias,
relocator.si->should_pad_segments()) < 0) {
relocator.si->load_bias) < 0) {
DL_ERR("can't protect segments for \"%s\": %s",
relocator.si->get_realpath(), strerror(errno));
return false;
@ -198,8 +197,7 @@ static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) {
auto unprotect_segments = [&]() {
// Make .text writable.
if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
relocator.si->load_bias,
relocator.si->should_pad_segments()) < 0) {
relocator.si->load_bias) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %s",
relocator.si->get_realpath(), strerror(errno));
return false;

View file

@ -785,7 +785,6 @@ cc_test_library {
],
static_libs: [
"libbase",
"libprocinfo",
],
include_dirs: [
"bionic/libc",

View file

@ -31,7 +31,6 @@
#include <android-base/test_utils.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
#include <sys/wait.h>
@ -2047,11 +2046,6 @@ TEST(dlext, ns_anonymous) {
-1, 0));
ASSERT_TRUE(reinterpret_cast<void*>(reserved_addr) != MAP_FAILED);
struct stat file_stat;
int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat));
ASSERT_EQ(ret, 0) << "Failed to stat library";
size_t file_size = file_stat.st_size;
for (const auto& rec : maps_to_copy) {
uintptr_t offset = rec.addr_start - addr_start;
size_t size = rec.addr_end - rec.addr_start;
@ -2059,11 +2053,7 @@ TEST(dlext, ns_anonymous) {
void* map = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
ASSERT_TRUE(map != MAP_FAILED);
// Attempting the below memcpy from a portion of the map that is off the end of
// the backing file will cause the kernel to throw a SIGBUS
size_t _size = ::android::procinfo::MappedFileSize(rec.addr_start, rec.addr_end,
rec.offset, file_size);
memcpy(map, reinterpret_cast<void*>(rec.addr_start), _size);
memcpy(map, reinterpret_cast<void*>(rec.addr_start), size);
mprotect(map, size, rec.perms);
}