diff --git a/TEST_MAPPING b/TEST_MAPPING index 930401324..60a4f616b 100644 --- a/TEST_MAPPING +++ b/TEST_MAPPING @@ -9,6 +9,9 @@ { "name": "linker-unit-tests" }, + { + "name": "CtsBionicAppTestCases" + }, { "name": "CtsBionicTestCases" }, @@ -53,6 +56,9 @@ { "name": "linker-unit-tests" }, + { + "name": "CtsBionicAppTestCases" + }, { "name": "CtsBionicTestCases" }, @@ -88,10 +94,5 @@ { "name": "CtsBionicTestCases" } - ], - "postsubmit": [ - { - "name": "CtsBionicAppTestCases" - } ] } diff --git a/linker/Android.bp b/linker/Android.bp index 0533ae97b..55daf2202 100644 --- a/linker/Android.bp +++ b/linker/Android.bp @@ -116,6 +116,7 @@ cc_defaults { "libziparchive", "libbase", "libz", + "libprocinfo", // For procinfo::MappedFileSize() "libasync_safe", @@ -573,6 +574,7 @@ cc_test { "libasync_safe", "libbase", "liblog_for_runtime_apex", + "libprocinfo", // For procinfo::MappedFileSize() ], data_libs: [ diff --git a/linker/linker.cpp b/linker/linker.cpp index b0caeddc1..e54a52493 100644 --- a/linker/linker.cpp +++ b/linker/linker.cpp @@ -3364,7 +3364,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group "\"%s\" has text relocations", get_realpath()); add_dlwarning(get_realpath(), "text relocations"); - if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) { + if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) { DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno)); return false; } @@ -3380,7 +3380,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group #if !defined(__LP64__) if (has_text_relocations) { // All relocations are done, we can protect our segments back to read-only. - if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) { + if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) { DL_ERR("can't protect segments for \"%s\": %s", get_realpath(), strerror(errno)); return false; @@ -3418,7 +3418,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group } bool soinfo::protect_relro() { - if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) { + if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) { DL_ERR("can't enable GNU RELRO protection for \"%s\": %s", get_realpath(), strerror(errno)); return false; diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp index d6592af14..1860ccc14 100644 --- a/linker/linker_main.cpp +++ b/linker/linker_main.cpp @@ -201,6 +201,7 @@ struct ExecutableInfo { const ElfW(Phdr)* phdr; size_t phdr_count; ElfW(Addr) entry_point; + bool should_pad_segments; }; static ExecutableInfo get_executable_info(const char* arg_path) { @@ -293,6 +294,7 @@ static ExecutableInfo load_executable(const char* orig_path) { result.phdr = elf_reader.loaded_phdr(); result.phdr_count = elf_reader.phdr_count(); result.entry_point = elf_reader.entry_point(); + result.should_pad_segments = elf_reader.should_pad_segments(); return result; } @@ -366,6 +368,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load somain = si; si->phdr = exe_info.phdr; si->phnum = exe_info.phdr_count; + si->set_should_pad_segments(exe_info.should_pad_segments); get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias); si->size = phdr_table_get_load_size(si->phdr, si->phnum); si->dynamic = nullptr; @@ -399,7 +402,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load auto note_gnu_property = GnuPropertySection(somain); if (note_gnu_property.IsBTICompatible() && (phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias, - ¬e_gnu_property) < 0)) { + somain->should_pad_segments(), ¬e_gnu_property) < 0)) { __linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(), strerror(errno)); } diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp index 82b37a44d..821f30dff 100644 --- a/linker/linker_phdr.cpp +++ b/linker/linker_phdr.cpp @@ -46,6 +46,8 @@ #include "private/CFIShadow.h" // For kLibraryAlignment #include "private/elf_note.h" +#include + static int GetTargetElfMachine() { #if defined(__arm__) return EM_ARM; @@ -196,7 +198,7 @@ bool ElfReader::Load(address_space_params* address_space) { // For Armv8.5-A loaded executable segments may require PROT_BTI. if (note_gnu_property_.IsBTICompatible()) { did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_, - ¬e_gnu_property_) == 0); + should_pad_segments_, ¬e_gnu_property_) == 0); } #endif } @@ -756,6 +758,41 @@ bool ElfReader::ReadPadSegmentNote() { return true; } +static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count, + size_t phdr_idx, ElfW(Addr)* p_memsz, + ElfW(Addr)* p_filesz, bool should_pad_segments) { + const ElfW(Phdr)* phdr = &phdr_table[phdr_idx]; + const ElfW(Phdr)* next = nullptr; + size_t next_idx = phdr_idx + 1; + + if (phdr->p_align == kPageSize || !should_pad_segments) { + return; + } + + if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) { + next = &phdr_table[next_idx]; + } + + // If this is the last LOAD segment, no extension is needed + if (!next || *p_memsz != *p_filesz) { + return; + } + + ElfW(Addr) next_start = page_start(next->p_vaddr); + ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz); + + // If adjacent segment mappings overlap, no extension is needed. + if (curr_end >= next_start) { + return; + } + + // Extend the LOAD segment mapping to be contiguous with that of + // the next LOAD segment. + ElfW(Addr) extend = next_start - curr_end; + *p_memsz += extend; + *p_filesz += extend; +} + bool ElfReader::LoadSegments() { for (size_t i = 0; i < phdr_num_; ++i) { const ElfW(Phdr)* phdr = &phdr_table_[i]; @@ -764,18 +801,22 @@ bool ElfReader::LoadSegments() { continue; } + ElfW(Addr) p_memsz = phdr->p_memsz; + ElfW(Addr) p_filesz = phdr->p_filesz; + _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_); + // Segment addresses in memory. ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_; - ElfW(Addr) seg_end = seg_start + phdr->p_memsz; + ElfW(Addr) seg_end = seg_start + p_memsz; ElfW(Addr) seg_page_start = page_start(seg_start); ElfW(Addr) seg_page_end = page_end(seg_end); - ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz; + ElfW(Addr) seg_file_end = seg_start + p_filesz; // File offsets. ElfW(Addr) file_start = phdr->p_offset; - ElfW(Addr) file_end = file_start + phdr->p_filesz; + ElfW(Addr) file_end = file_start + p_filesz; ElfW(Addr) file_page_start = page_start(file_start); ElfW(Addr) file_length = file_end - file_page_start; @@ -785,12 +826,12 @@ bool ElfReader::LoadSegments() { return false; } - if (file_end > static_cast(file_size_)) { + if (file_start + phdr->p_filesz > static_cast(file_size_)) { DL_ERR("invalid ELF file \"%s\" load segment[%zd]:" " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")", name_.c_str(), i, reinterpret_cast(phdr->p_offset), reinterpret_cast(phdr->p_filesz), - reinterpret_cast(file_end), file_size_); + reinterpret_cast(file_start + phdr->p_filesz), file_size_); return false; } @@ -828,10 +869,25 @@ bool ElfReader::LoadSegments() { } } - // if the segment is writable, and does not end on a page boundary, - // zero-fill it until the page limit. - if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) { - memset(reinterpret_cast(seg_file_end), 0, page_size() - page_offset(seg_file_end)); + // if the segment is writable, and its memory map extends beyond + // the segment contents on file (p_filesz); zero-fill it until the + // end of the mapping backed by the file, rounded to the next + // page boundary; as this portion of the mapping corresponds to either + // garbage (partial page at the end) or data from other segments. + // + // If any part of the mapping extends beyond the file size there is + // no need to zero it since that region is not touchable by userspace + // and attempting to do so will causes the kernel to throw a SIGBUS. + // + // See: system/libprocinfo/include/procinfo/process_map_size.h + uint64_t file_backed_size = ::android::procinfo::MappedFileSize(seg_page_start, + page_end(seg_page_start + file_length), + file_offset_ + file_page_start, file_size_); + // _seg_file_end = unextended seg_file_end + uint64_t _seg_file_end = seg_start + phdr->p_filesz; + uint64_t zero_fill_len = file_backed_size - (_seg_file_end - seg_page_start); + if ((phdr->p_flags & PF_W) != 0 && zero_fill_len > 0) { + memset(reinterpret_cast(_seg_file_end), 0, zero_fill_len); } seg_file_end = page_end(seg_file_end); @@ -864,17 +920,21 @@ bool ElfReader::LoadSegments() { * phdr_table_protect_segments and phdr_table_unprotect_segments. */ static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias, int extra_prot_flags) { - const ElfW(Phdr)* phdr = phdr_table; - const ElfW(Phdr)* phdr_limit = phdr + phdr_count; + ElfW(Addr) load_bias, int extra_prot_flags, + bool should_pad_segments) { + for (size_t i = 0; i < phdr_count; ++i) { + const ElfW(Phdr)* phdr = &phdr_table[i]; - for (; phdr < phdr_limit; phdr++) { if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) { continue; } - ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias; - ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias; + ElfW(Addr) p_memsz = phdr->p_memsz; + ElfW(Addr) p_filesz = phdr->p_filesz; + _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments); + + ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias); + ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias); int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags; if ((prot & PROT_WRITE) != 0) { @@ -909,19 +969,21 @@ static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_c * phdr_table -> program header table * phdr_count -> number of entries in tables * load_bias -> load bias + * should_pad_segments -> Are segments extended to avoid gaps in the memory map * prop -> GnuPropertySection or nullptr * Return: * 0 on success, -1 on failure (error code in errno). */ int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) { + ElfW(Addr) load_bias, bool should_pad_segments, + const GnuPropertySection* prop __unused) { int prot = 0; #if defined(__aarch64__) if ((prop != nullptr) && prop->IsBTICompatible()) { prot |= PROT_BTI; } #endif - return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot); + return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments); } /* Change the protection of all loaded segments in memory to writable. @@ -937,19 +999,82 @@ int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, * phdr_table -> program header table * phdr_count -> number of entries in tables * load_bias -> load bias + * should_pad_segments -> Are segments extended to avoid gaps in the memory map * Return: * 0 on success, -1 on failure (error code in errno). */ int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, - size_t phdr_count, ElfW(Addr) load_bias) { - return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE); + size_t phdr_count, ElfW(Addr) load_bias, + bool should_pad_segments) { + return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE, + should_pad_segments); +} + +static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr, + const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end, + bool should_pad_segments) { + // Find the index and phdr of the LOAD containing the GNU_RELRO segment + for (size_t index = 0; index < phdr_count; ++index) { + const ElfW(Phdr)* phdr = &phdr_table[index]; + + if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) { + // If the PT_GNU_RELRO mem size is not at least as large as the corresponding + // LOAD segment mem size, we need to protect only a partial region of the + // LOAD segment and therefore cannot avoid a VMA split. + // + // Note: Don't check the page-aligned mem sizes since the extended protection + // may incorrectly write protect non-relocation data. + // + // Example: + // + // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --| + // ---------------------------------------------------------------- + // | | | | | + // SEG X | RO | RO | RW | | SEG Y + // | | | | | + // ---------------------------------------------------------------- + // | | | + // | | | + // | | | + // relro_vaddr relro_vaddr relro_vaddr + // (load_vaddr) + + + // relro_memsz load_memsz + // + // ---------------------------------------------------------------- + // | PAGE | PAGE | + // ---------------------------------------------------------------- + // | Potential | + // |----- Extended RO ----| + // | Protection | + // + // If the check below uses page aligned mem sizes it will cause incorrect write + // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO. + if (relro_phdr->p_memsz < phdr->p_memsz) { + return; + } + + ElfW(Addr) p_memsz = phdr->p_memsz; + ElfW(Addr) p_filesz = phdr->p_filesz; + + // Attempt extending the VMA (mprotect range). Without extending the range, + // mprotect will only RO protect a part of the extended RW LOAD segment, which + // will leave an extra split RW VMA (the gap). + _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz, + should_pad_segments); + + *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias); + return; + } + } } /* Used internally by phdr_table_protect_gnu_relro and * phdr_table_unprotect_gnu_relro. */ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias, int prot_flags) { + ElfW(Addr) load_bias, int prot_flags, + bool should_pad_segments) { const ElfW(Phdr)* phdr = phdr_table; const ElfW(Phdr)* phdr_limit = phdr + phdr_count; @@ -976,6 +1101,8 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p // that it starts on a page boundary. ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias; ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias; + _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end, + should_pad_segments); int ret = mprotect(reinterpret_cast(seg_page_start), seg_page_end - seg_page_start, @@ -1000,12 +1127,14 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p * phdr_table -> program header table * phdr_count -> number of entries in tables * load_bias -> load bias + * should_pad_segments -> Were segments extended to avoid gaps in the memory map * Return: * 0 on success, -1 on failure (error code in errno). */ -int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, - size_t phdr_count, ElfW(Addr) load_bias) { - return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ); +int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr) load_bias, bool should_pad_segments) { + return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ, + should_pad_segments); } /* Serialize the GNU relro segments to the given file descriptor. This can be diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h index e5b87bb8c..4deed3349 100644 --- a/linker/linker_phdr.h +++ b/linker/linker_phdr.h @@ -128,13 +128,14 @@ size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count, size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count); int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr); + ElfW(Addr) load_bias, bool should_pad_segments, + const GnuPropertySection* prop = nullptr); int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias); + ElfW(Addr) load_bias, bool should_pad_segments); int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, - ElfW(Addr) load_bias); + ElfW(Addr) load_bias, bool should_pad_segments); int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias, int fd, size_t* file_offset); diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp index 952dade9e..5b58895ee 100644 --- a/linker/linker_relocate.cpp +++ b/linker/linker_relocate.cpp @@ -187,7 +187,8 @@ static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) { auto protect_segments = [&]() { // Make .text executable. if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum, - relocator.si->load_bias) < 0) { + relocator.si->load_bias, + relocator.si->should_pad_segments()) < 0) { DL_ERR("can't protect segments for \"%s\": %s", relocator.si->get_realpath(), strerror(errno)); return false; @@ -197,7 +198,8 @@ static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) { auto unprotect_segments = [&]() { // Make .text writable. if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum, - relocator.si->load_bias) < 0) { + relocator.si->load_bias, + relocator.si->should_pad_segments()) < 0) { DL_ERR("can't unprotect loadable segments for \"%s\": %s", relocator.si->get_realpath(), strerror(errno)); return false; diff --git a/tests/Android.bp b/tests/Android.bp index e9683ab9a..0297bf551 100644 --- a/tests/Android.bp +++ b/tests/Android.bp @@ -788,6 +788,7 @@ cc_test_library { ], static_libs: [ "libbase", + "libprocinfo", ], include_dirs: [ "bionic/libc", diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp index d078e500b..b702725a1 100644 --- a/tests/dlext_test.cpp +++ b/tests/dlext_test.cpp @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -2046,6 +2047,11 @@ TEST(dlext, ns_anonymous) { -1, 0)); ASSERT_TRUE(reinterpret_cast(reserved_addr) != MAP_FAILED); + struct stat file_stat; + int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat)); + ASSERT_EQ(ret, 0) << "Failed to stat library"; + size_t file_size = file_stat.st_size; + for (const auto& rec : maps_to_copy) { uintptr_t offset = rec.addr_start - addr_start; size_t size = rec.addr_end - rec.addr_start; @@ -2053,7 +2059,11 @@ TEST(dlext, ns_anonymous) { void* map = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); ASSERT_TRUE(map != MAP_FAILED); - memcpy(map, reinterpret_cast(rec.addr_start), size); + // Attempting the below memcpy from a portion of the map that is off the end of + // the backing file will cause the kernel to throw a SIGBUS + size_t _size = ::android::procinfo::MappedFileSize(rec.addr_start, rec.addr_end, + rec.offset, file_size); + memcpy(map, reinterpret_cast(rec.addr_start), _size); mprotect(map, size, rec.perms); }