From 4084b555b2658f6bdbcb64781b3ff6d478228221 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Wed, 13 Mar 2024 13:35:49 -0700 Subject: [PATCH 1/3] Reapply "RELAND: bionic: loader: Extend LOAD segment VMAs" This reverts commit 7a04fedc78a1355ffad5cedbe7b0195236d26a46. Test: Dexcom G7 app Bug: 328797737 Change-Id: I575d626b1313d1c66bf25f29c43a9a101024a4f8 Signed-off-by: Kalesh Singh --- linker/Android.bp | 2 + linker/linker.cpp | 4 +- linker/linker_main.cpp | 5 +- linker/linker_phdr.cpp | 105 ++++++++++++++++++++++++++++++------- linker/linker_phdr.h | 5 +- linker/linker_relocate.cpp | 6 ++- tests/Android.bp | 1 + tests/dlext_test.cpp | 12 ++++- 8 files changed, 112 insertions(+), 28 deletions(-) diff --git a/linker/Android.bp b/linker/Android.bp index 0533ae97b..55daf2202 100644 --- a/linker/Android.bp +++ b/linker/Android.bp @@ -116,6 +116,7 @@ cc_defaults { "libziparchive", "libbase", "libz", + "libprocinfo", // For procinfo::MappedFileSize() "libasync_safe", @@ -573,6 +574,7 @@ cc_test { "libasync_safe", "libbase", "liblog_for_runtime_apex", + "libprocinfo", // For procinfo::MappedFileSize() ], data_libs: [ diff --git a/linker/linker.cpp b/linker/linker.cpp index b0caeddc1..4fb9d5b53 100644 --- a/linker/linker.cpp +++ b/linker/linker.cpp @@ -3364,7 +3364,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group "\"%s\" has text relocations", get_realpath()); add_dlwarning(get_realpath(), "text relocations"); - if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) { + if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) { DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno)); return false; } @@ -3380,7 +3380,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group #if !defined(__LP64__) if (has_text_relocations) { // All relocations are done, we can protect our segments back to read-only. - if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) { + if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) { DL_ERR("can't protect segments for \"%s\": %s", get_realpath(), strerror(errno)); return false; diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp index d6592af14..1860ccc14 100644 --- a/linker/linker_main.cpp +++ b/linker/linker_main.cpp @@ -201,6 +201,7 @@ struct ExecutableInfo { const ElfW(Phdr)* phdr; size_t phdr_count; ElfW(Addr) entry_point; + bool should_pad_segments; }; static ExecutableInfo get_executable_info(const char* arg_path) { @@ -293,6 +294,7 @@ static ExecutableInfo load_executable(const char* orig_path) { result.phdr = elf_reader.loaded_phdr(); result.phdr_count = elf_reader.phdr_count(); result.entry_point = elf_reader.entry_point(); + result.should_pad_segments = elf_reader.should_pad_segments(); return result; } @@ -366,6 +368,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load somain = si; si->phdr = exe_info.phdr; si->phnum = exe_info.phdr_count; + si->set_should_pad_segments(exe_info.should_pad_segments); get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias); si->size = phdr_table_get_load_size(si->phdr, si->phnum); si->dynamic = nullptr; @@ -399,7 +402,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load auto note_gnu_property = GnuPropertySection(somain); if (note_gnu_property.IsBTICompatible() && (phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias, - ¬e_gnu_property) < 0)) { + somain->should_pad_segments(), ¬e_gnu_property) < 0)) { __linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(), strerror(errno)); } diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp index 82b37a44d..07b54c5d2 100644 --- a/linker/linker_phdr.cpp +++ b/linker/linker_phdr.cpp @@ -46,6 +46,8 @@ #include "private/CFIShadow.h" // For kLibraryAlignment #include "private/elf_note.h" +#include + static int GetTargetElfMachine() { #if defined(__arm__) return EM_ARM; @@ -196,7 +198,7 @@ bool ElfReader::Load(address_space_params* address_space) { // For Armv8.5-A loaded executable segments may require PROT_BTI. if (note_gnu_property_.IsBTICompatible()) { did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_, - ¬e_gnu_property_) == 0); + should_pad_segments_, ¬e_gnu_property_) == 0); } #endif } @@ -756,6 +758,41 @@ bool ElfReader::ReadPadSegmentNote() { return true; } +static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count, + size_t phdr_idx, ElfW(Addr)* p_memsz, + ElfW(Addr)* p_filesz, bool should_pad_segments) { + const ElfW(Phdr)* phdr = &phdr_table[phdr_idx]; + const ElfW(Phdr)* next = nullptr; + size_t next_idx = phdr_idx + 1; + + if (phdr->p_align == kPageSize || !should_pad_segments) { + return; + } + + if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) { + next = &phdr_table[next_idx]; + } + + // If this is the last LOAD segment, no extension is needed + if (!next || *p_memsz != *p_filesz) { + return; + } + + ElfW(Addr) next_start = page_start(next->p_vaddr); + ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz); + + // If adjacent segment mappings overlap, no extension is needed. + if (curr_end >= next_start) { + return; + } + + // Extend the LOAD segment mapping to be contiguous with that of + // the next LOAD segment. + ElfW(Addr) extend = next_start - curr_end; + *p_memsz += extend; + *p_filesz += extend; +} + bool ElfReader::LoadSegments() { for (size_t i = 0; i < phdr_num_; ++i) { const ElfW(Phdr)* phdr = &phdr_table_[i]; @@ -764,18 +801,22 @@ bool ElfReader::LoadSegments() { continue; } + ElfW(Addr) p_memsz = phdr->p_memsz; + ElfW(Addr) p_filesz = phdr->p_filesz; + _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_); + // Segment addresses in memory. ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_; - ElfW(Addr) seg_end = seg_start + phdr->p_memsz; + ElfW(Addr) seg_end = seg_start + p_memsz; ElfW(Addr) seg_page_start = page_start(seg_start); ElfW(Addr) seg_page_end = page_end(seg_end); - ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz; + ElfW(Addr) seg_file_end = seg_start + p_filesz; // File offsets. ElfW(Addr) file_start = phdr->p_offset; - ElfW(Addr) file_end = file_start + phdr->p_filesz; + ElfW(Addr) file_end = file_start + p_filesz; ElfW(Addr) file_page_start = page_start(file_start); ElfW(Addr) file_length = file_end - file_page_start; @@ -785,12 +826,12 @@ bool ElfReader::LoadSegments() { return false; } - if (file_end > static_cast(file_size_)) { + if (file_start + phdr->p_filesz > static_cast(file_size_)) { DL_ERR("invalid ELF file \"%s\" load segment[%zd]:" " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")", name_.c_str(), i, reinterpret_cast(phdr->p_offset), reinterpret_cast(phdr->p_filesz), - reinterpret_cast(file_end), file_size_); + reinterpret_cast(file_start + phdr->p_filesz), file_size_); return false; } @@ -828,10 +869,25 @@ bool ElfReader::LoadSegments() { } } - // if the segment is writable, and does not end on a page boundary, - // zero-fill it until the page limit. - if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) { - memset(reinterpret_cast(seg_file_end), 0, page_size() - page_offset(seg_file_end)); + // if the segment is writable, and its memory map extends beyond + // the segment contents on file (p_filesz); zero-fill it until the + // end of the mapping backed by the file, rounded to the next + // page boundary; as this portion of the mapping corresponds to either + // garbage (partial page at the end) or data from other segments. + // + // If any part of the mapping extends beyond the file size there is + // no need to zero it since that region is not touchable by userspace + // and attempting to do so will causes the kernel to throw a SIGBUS. + // + // See: system/libprocinfo/include/procinfo/process_map_size.h + uint64_t file_backed_size = ::android::procinfo::MappedFileSize(seg_page_start, + page_end(seg_page_start + file_length), + file_offset_ + file_page_start, file_size_); + // _seg_file_end = unextended seg_file_end + uint64_t _seg_file_end = seg_start + phdr->p_filesz; + uint64_t zero_fill_len = file_backed_size - (_seg_file_end - seg_page_start); + if ((phdr->p_flags & PF_W) != 0 && zero_fill_len > 0) { + memset(reinterpret_cast(_seg_file_end), 0, zero_fill_len); } seg_file_end = page_end(seg_file_end); @@ -864,17 +920,21 @@ bool ElfReader::LoadSegments() { * phdr_table_protect_segments and phdr_table_unprotect_segments. */ static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias, int extra_prot_flags) { - const ElfW(Phdr)* phdr = phdr_table; - const ElfW(Phdr)* phdr_limit = phdr + phdr_count; + ElfW(Addr) load_bias, int extra_prot_flags, + bool should_pad_segments) { + for (size_t i = 0; i < phdr_count; ++i) { + const ElfW(Phdr)* phdr = &phdr_table[i]; - for (; phdr < phdr_limit; phdr++) { if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) { continue; } - ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias; - ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias; + ElfW(Addr) p_memsz = phdr->p_memsz; + ElfW(Addr) p_filesz = phdr->p_filesz; + _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments); + + ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias); + ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias); int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags; if ((prot & PROT_WRITE) != 0) { @@ -909,19 +969,21 @@ static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_c * phdr_table -> program header table * phdr_count -> number of entries in tables * load_bias -> load bias + * should_pad_segments -> Are segments extended to avoid gaps in the memory map * prop -> GnuPropertySection or nullptr * Return: * 0 on success, -1 on failure (error code in errno). */ int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) { + ElfW(Addr) load_bias, bool should_pad_segments, + const GnuPropertySection* prop __unused) { int prot = 0; #if defined(__aarch64__) if ((prop != nullptr) && prop->IsBTICompatible()) { prot |= PROT_BTI; } #endif - return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot); + return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments); } /* Change the protection of all loaded segments in memory to writable. @@ -937,12 +999,15 @@ int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, * phdr_table -> program header table * phdr_count -> number of entries in tables * load_bias -> load bias + * should_pad_segments -> Are segments extended to avoid gaps in the memory map * Return: * 0 on success, -1 on failure (error code in errno). */ int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, - size_t phdr_count, ElfW(Addr) load_bias) { - return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE); + size_t phdr_count, ElfW(Addr) load_bias, + bool should_pad_segments) { + return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE, + should_pad_segments); } /* Used internally by phdr_table_protect_gnu_relro and diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h index e5b87bb8c..239ecb947 100644 --- a/linker/linker_phdr.h +++ b/linker/linker_phdr.h @@ -128,10 +128,11 @@ size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count, size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count); int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr); + ElfW(Addr) load_bias, bool should_pad_segments, + const GnuPropertySection* prop = nullptr); int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias); + ElfW(Addr) load_bias, bool should_pad_segments); int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias); diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp index 952dade9e..5b58895ee 100644 --- a/linker/linker_relocate.cpp +++ b/linker/linker_relocate.cpp @@ -187,7 +187,8 @@ static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) { auto protect_segments = [&]() { // Make .text executable. if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum, - relocator.si->load_bias) < 0) { + relocator.si->load_bias, + relocator.si->should_pad_segments()) < 0) { DL_ERR("can't protect segments for \"%s\": %s", relocator.si->get_realpath(), strerror(errno)); return false; @@ -197,7 +198,8 @@ static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) { auto unprotect_segments = [&]() { // Make .text writable. if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum, - relocator.si->load_bias) < 0) { + relocator.si->load_bias, + relocator.si->should_pad_segments()) < 0) { DL_ERR("can't unprotect loadable segments for \"%s\": %s", relocator.si->get_realpath(), strerror(errno)); return false; diff --git a/tests/Android.bp b/tests/Android.bp index 0f4a9424e..89d226772 100644 --- a/tests/Android.bp +++ b/tests/Android.bp @@ -785,6 +785,7 @@ cc_test_library { ], static_libs: [ "libbase", + "libprocinfo", ], include_dirs: [ "bionic/libc", diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp index d078e500b..b702725a1 100644 --- a/tests/dlext_test.cpp +++ b/tests/dlext_test.cpp @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -2046,6 +2047,11 @@ TEST(dlext, ns_anonymous) { -1, 0)); ASSERT_TRUE(reinterpret_cast(reserved_addr) != MAP_FAILED); + struct stat file_stat; + int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat)); + ASSERT_EQ(ret, 0) << "Failed to stat library"; + size_t file_size = file_stat.st_size; + for (const auto& rec : maps_to_copy) { uintptr_t offset = rec.addr_start - addr_start; size_t size = rec.addr_end - rec.addr_start; @@ -2053,7 +2059,11 @@ TEST(dlext, ns_anonymous) { void* map = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); ASSERT_TRUE(map != MAP_FAILED); - memcpy(map, reinterpret_cast(rec.addr_start), size); + // Attempting the below memcpy from a portion of the map that is off the end of + // the backing file will cause the kernel to throw a SIGBUS + size_t _size = ::android::procinfo::MappedFileSize(rec.addr_start, rec.addr_end, + rec.offset, file_size); + memcpy(map, reinterpret_cast(rec.addr_start), _size); mprotect(map, size, rec.perms); } From 702d9b0bad7eac3d1d59e8e433f5a6a0345ea5f8 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Wed, 13 Mar 2024 13:38:04 -0700 Subject: [PATCH 2/3] Reapply "RELAND: bionic: loader: Extend GNU_RELRO protection" This reverts commit 26de64896cd339ec5e811523b375e3b4ade4860d. Bug: 328797737 Test: Dexcom G7 app Change-Id: I98882edd17f0ea5432ab254482ab9508bfaf4f56 Signed-off-by: Kalesh Singh --- linker/linker.cpp | 2 +- linker/linker_phdr.cpp | 72 +++++++++++++++++++++++++++++++++++++++--- linker/linker_phdr.h | 2 +- 3 files changed, 70 insertions(+), 6 deletions(-) diff --git a/linker/linker.cpp b/linker/linker.cpp index 4fb9d5b53..e54a52493 100644 --- a/linker/linker.cpp +++ b/linker/linker.cpp @@ -3418,7 +3418,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group } bool soinfo::protect_relro() { - if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) { + if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) { DL_ERR("can't enable GNU RELRO protection for \"%s\": %s", get_realpath(), strerror(errno)); return false; diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp index 07b54c5d2..821f30dff 100644 --- a/linker/linker_phdr.cpp +++ b/linker/linker_phdr.cpp @@ -1010,11 +1010,71 @@ int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, should_pad_segments); } +static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr, + const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end, + bool should_pad_segments) { + // Find the index and phdr of the LOAD containing the GNU_RELRO segment + for (size_t index = 0; index < phdr_count; ++index) { + const ElfW(Phdr)* phdr = &phdr_table[index]; + + if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) { + // If the PT_GNU_RELRO mem size is not at least as large as the corresponding + // LOAD segment mem size, we need to protect only a partial region of the + // LOAD segment and therefore cannot avoid a VMA split. + // + // Note: Don't check the page-aligned mem sizes since the extended protection + // may incorrectly write protect non-relocation data. + // + // Example: + // + // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --| + // ---------------------------------------------------------------- + // | | | | | + // SEG X | RO | RO | RW | | SEG Y + // | | | | | + // ---------------------------------------------------------------- + // | | | + // | | | + // | | | + // relro_vaddr relro_vaddr relro_vaddr + // (load_vaddr) + + + // relro_memsz load_memsz + // + // ---------------------------------------------------------------- + // | PAGE | PAGE | + // ---------------------------------------------------------------- + // | Potential | + // |----- Extended RO ----| + // | Protection | + // + // If the check below uses page aligned mem sizes it will cause incorrect write + // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO. + if (relro_phdr->p_memsz < phdr->p_memsz) { + return; + } + + ElfW(Addr) p_memsz = phdr->p_memsz; + ElfW(Addr) p_filesz = phdr->p_filesz; + + // Attempt extending the VMA (mprotect range). Without extending the range, + // mprotect will only RO protect a part of the extended RW LOAD segment, which + // will leave an extra split RW VMA (the gap). + _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz, + should_pad_segments); + + *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias); + return; + } + } +} + /* Used internally by phdr_table_protect_gnu_relro and * phdr_table_unprotect_gnu_relro. */ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias, int prot_flags) { + ElfW(Addr) load_bias, int prot_flags, + bool should_pad_segments) { const ElfW(Phdr)* phdr = phdr_table; const ElfW(Phdr)* phdr_limit = phdr + phdr_count; @@ -1041,6 +1101,8 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p // that it starts on a page boundary. ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias; ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias; + _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end, + should_pad_segments); int ret = mprotect(reinterpret_cast(seg_page_start), seg_page_end - seg_page_start, @@ -1065,12 +1127,14 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p * phdr_table -> program header table * phdr_count -> number of entries in tables * load_bias -> load bias + * should_pad_segments -> Were segments extended to avoid gaps in the memory map * Return: * 0 on success, -1 on failure (error code in errno). */ -int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, - size_t phdr_count, ElfW(Addr) load_bias) { - return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ); +int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr) load_bias, bool should_pad_segments) { + return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ, + should_pad_segments); } /* Serialize the GNU relro segments to the given file descriptor. This can be diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h index 239ecb947..4deed3349 100644 --- a/linker/linker_phdr.h +++ b/linker/linker_phdr.h @@ -135,7 +135,7 @@ int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_coun ElfW(Addr) load_bias, bool should_pad_segments); int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias); + ElfW(Addr) load_bias, bool should_pad_segments); int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias, int fd, size_t* file_offset); From 1d3ba112ab1b7b1d837f3be7efc032d14822d1aa Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Wed, 6 Mar 2024 17:33:36 -0800 Subject: [PATCH 3/3] bionic: loader: Only zero the last partial page in RW segments Only zero the partial page at the end of the segment. There may be entire pages beyond first page boundary after the segment -- due to segment padding (VMA extension); but these are not expected to be touched (faulted). Do not attempt to zero the extended region past the first partial page, since doing so may: 1) Result in a SIGBUS, as the region is not backed by the underlying file. 2) Break the COW backing, faulting in new anon pages for a region that will not be used. Bug: 327600007 Bug: 328797737 Test: Dexcom G7 app Test: atest -c linker-unit-tests Test: bootup/idle/system-processes-memory-direct Change-Id: Ib76b022f94bfc4a4b7eaca2c155af81478741b91 Signed-off-by: Kalesh Singh --- linker/Android.bp | 1 - linker/linker_phdr.cpp | 29 +++++++++++------------------ 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/linker/Android.bp b/linker/Android.bp index 55daf2202..f87a92e56 100644 --- a/linker/Android.bp +++ b/linker/Android.bp @@ -116,7 +116,6 @@ cc_defaults { "libziparchive", "libbase", "libz", - "libprocinfo", // For procinfo::MappedFileSize() "libasync_safe", diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp index 821f30dff..074012d3b 100644 --- a/linker/linker_phdr.cpp +++ b/linker/linker_phdr.cpp @@ -46,8 +46,6 @@ #include "private/CFIShadow.h" // For kLibraryAlignment #include "private/elf_note.h" -#include - static int GetTargetElfMachine() { #if defined(__arm__) return EM_ARM; @@ -869,25 +867,20 @@ bool ElfReader::LoadSegments() { } } - // if the segment is writable, and its memory map extends beyond - // the segment contents on file (p_filesz); zero-fill it until the - // end of the mapping backed by the file, rounded to the next - // page boundary; as this portion of the mapping corresponds to either - // garbage (partial page at the end) or data from other segments. + // if the segment is writable, and does not end on a page boundary, + // zero-fill it until the page limit. // - // If any part of the mapping extends beyond the file size there is - // no need to zero it since that region is not touchable by userspace - // and attempting to do so will causes the kernel to throw a SIGBUS. - // - // See: system/libprocinfo/include/procinfo/process_map_size.h - uint64_t file_backed_size = ::android::procinfo::MappedFileSize(seg_page_start, - page_end(seg_page_start + file_length), - file_offset_ + file_page_start, file_size_); + // Do not attempt to zero the extended region past the first partial page, + // since doing so may: + // 1) Result in a SIGBUS, as the region is not backed by the underlying + // file. + // 2) Break the COW backing, faulting in new anon pages for a region + // that will not be used. + // _seg_file_end = unextended seg_file_end uint64_t _seg_file_end = seg_start + phdr->p_filesz; - uint64_t zero_fill_len = file_backed_size - (_seg_file_end - seg_page_start); - if ((phdr->p_flags & PF_W) != 0 && zero_fill_len > 0) { - memset(reinterpret_cast(_seg_file_end), 0, zero_fill_len); + if ((phdr->p_flags & PF_W) != 0 && page_offset(_seg_file_end) > 0) { + memset(reinterpret_cast(_seg_file_end), 0, kPageSize - page_offset(_seg_file_end)); } seg_file_end = page_end(seg_file_end);