Merge changes Ia08e1b5c,I60e589dd,Ib7edb665,Ibf1bf5ec,Ibd623857 am: 9a238653c1 am: b387a9270c

am: 582280600e

Change-Id: I7548beb2cc0b92561566044ad3e3d9c5f26c5c7b
This commit is contained in:
Ryan Prichard 2019-01-17 15:53:29 -08:00 committed by android-build-merger
commit 8ad8abf233
13 changed files with 400 additions and 48 deletions

View file

@ -126,6 +126,7 @@ extern "C" void __libc_init_main_thread_final() {
auto new_tcb = reinterpret_cast<bionic_tcb*>(mapping.static_tls + layout.offset_bionic_tcb());
auto new_tls = reinterpret_cast<bionic_tls*>(mapping.static_tls + layout.offset_bionic_tls());
__init_static_tls(mapping.static_tls);
new_tcb->copy_from_bootstrap(temp_tcb);
new_tls->copy_from_bootstrap(temp_tls);
__init_tcb(new_tcb, &main_thread);

View file

@ -29,33 +29,25 @@
#include "private/bionic_elf_tls.h"
#include <async_safe/log.h>
#include <string.h>
#include <sys/param.h>
#include <unistd.h>
#include "private/ScopedRWLock.h"
#include "private/bionic_globals.h"
#include "private/bionic_macros.h"
#include "private/bionic_tls.h"
// Search for a TLS segment in the given phdr table. Returns true if it has a
// TLS segment and false otherwise.
bool __bionic_get_tls_segment(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, const char* mod_name,
TlsSegment* out) {
ElfW(Addr) load_bias, TlsSegment* out) {
for (size_t i = 0; i < phdr_count; ++i) {
const ElfW(Phdr)& phdr = phdr_table[i];
if (phdr.p_type == PT_TLS) {
// N.B. The size does not need to be a multiple of the alignment. With
// ld.bfd (or after using binutils' strip), the TLS segment's size isn't
// rounded up.
size_t alignment = phdr.p_align;
if (alignment == 0 || !powerof2(alignment)) {
async_safe_fatal("error: \"%s\": TLS segment alignment is not a power of 2: %zu",
mod_name, alignment);
}
// Bionic only respects TLS alignment up to one page.
alignment = MIN(alignment, PAGE_SIZE);
*out = TlsSegment {
phdr.p_memsz,
alignment,
phdr.p_align,
reinterpret_cast<void*>(load_bias + phdr.p_vaddr),
phdr.p_filesz,
};
@ -65,8 +57,75 @@ bool __bionic_get_tls_segment(const ElfW(Phdr)* phdr_table, size_t phdr_count,
return false;
}
void StaticTlsLayout::reserve_tcb() {
offset_bionic_tcb_ = reserve_type<bionic_tcb>();
// Return true if the alignment of a TLS segment is a valid power-of-two. Also
// cap the alignment if it's too high.
bool __bionic_check_tls_alignment(size_t* alignment) {
// N.B. The size does not need to be a multiple of the alignment. With
// ld.bfd (or after using binutils' strip), the TLS segment's size isn't
// rounded up.
if (*alignment == 0 || !powerof2(*alignment)) {
return false;
}
// Bionic only respects TLS alignment up to one page.
*alignment = MIN(*alignment, PAGE_SIZE);
return true;
}
size_t StaticTlsLayout::offset_thread_pointer() const {
return offset_bionic_tcb_ + (-MIN_TLS_SLOT * sizeof(void*));
}
// Reserves space for the Bionic TCB and the executable's TLS segment. Returns
// the offset of the executable's TLS segment.
size_t StaticTlsLayout::reserve_exe_segment_and_tcb(const TlsSegment* exe_segment,
const char* progname __attribute__((unused))) {
// Special case: if the executable has no TLS segment, then just allocate a
// TCB and skip the minimum alignment check on ARM.
if (exe_segment == nullptr) {
offset_bionic_tcb_ = reserve_type<bionic_tcb>();
return 0;
}
#if defined(__arm__) || defined(__aarch64__)
// First reserve enough space for the TCB before the executable segment.
reserve(sizeof(bionic_tcb), 1);
// Then reserve the segment itself.
const size_t result = reserve(exe_segment->size, exe_segment->alignment);
// The variant 1 ABI that ARM linkers follow specifies a 2-word TCB between
// the thread pointer and the start of the executable's TLS segment, but both
// the thread pointer and the TLS segment are aligned appropriately for the
// TLS segment. Calculate the distance between the thread pointer and the
// EXE's segment.
const size_t exe_tpoff = __BIONIC_ALIGN(sizeof(void*) * 2, exe_segment->alignment);
const size_t min_bionic_alignment = BIONIC_ROUND_UP_POWER_OF_2(MAX_TLS_SLOT) * sizeof(void*);
if (exe_tpoff < min_bionic_alignment) {
async_safe_fatal("error: \"%s\": executable's TLS segment is underaligned: "
"alignment is %zu, needs to be at least %zu for %s Bionic",
progname, exe_segment->alignment, min_bionic_alignment,
(sizeof(void*) == 4 ? "ARM" : "ARM64"));
}
offset_bionic_tcb_ = result - exe_tpoff - (-MIN_TLS_SLOT * sizeof(void*));
return result;
#elif defined(__i386__) || defined(__x86_64__)
// x86 uses variant 2 TLS layout. The executable's segment is located just
// before the TCB.
static_assert(MIN_TLS_SLOT == 0, "First slot of bionic_tcb must be slot #0 on x86");
const size_t exe_size = round_up_with_overflow_check(exe_segment->size, exe_segment->alignment);
reserve(exe_size, 1);
const size_t max_align = MAX(alignof(bionic_tcb), exe_segment->alignment);
offset_bionic_tcb_ = reserve(sizeof(bionic_tcb), max_align);
return offset_bionic_tcb_ - exe_size;
#else
#error "Unrecognized architecture"
#endif
}
void StaticTlsLayout::reserve_bionic_tls() {
@ -76,6 +135,10 @@ void StaticTlsLayout::reserve_bionic_tls() {
void StaticTlsLayout::finish_layout() {
// Round the offset up to the alignment.
offset_ = round_up_with_overflow_check(offset_, alignment_);
if (overflowed_) {
async_safe_fatal("error: TLS segments in static TLS overflowed");
}
}
// The size is not required to be a multiple of the alignment. The alignment
@ -94,3 +157,33 @@ size_t StaticTlsLayout::round_up_with_overflow_check(size_t value, size_t alignm
if (value < old_value) overflowed_ = true;
return value;
}
// Copy each TLS module's initialization image into a newly-allocated block of
// static TLS memory. To reduce dirty pages, this function only writes to pages
// within the static TLS that need initialization. The memory should already be
// zero-initialized on entry.
void __init_static_tls(void* static_tls) {
// The part of the table we care about (i.e. static TLS modules) never changes
// after startup, but we still need the mutex because the table could grow,
// moving the initial part. If this locking is too slow, we can duplicate the
// static part of the table.
TlsModules& modules = __libc_shared_globals()->tls_modules;
ScopedReadLock locker(&modules.rwlock);
for (size_t i = 0; i < modules.module_count; ++i) {
TlsModule& module = modules.module_table[i];
if (module.static_offset == SIZE_MAX) {
// All of the static modules come before all of the dynamic modules, so
// once we see the first dynamic module, we're done.
break;
}
if (module.segment.init_size == 0) {
// Skip the memcpy call for TLS segments with no initializer, which is
// common.
continue;
}
memcpy(static_cast<char*>(static_tls) + module.static_offset,
module.segment.init_ptr,
module.segment.init_size);
}
}

View file

@ -83,10 +83,29 @@ static void apply_gnu_relro() {
}
}
static void layout_static_tls() {
static void layout_static_tls(KernelArgumentBlock& args) {
StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
layout.reserve_bionic_tls();
layout.reserve_tcb();
const char* progname = args.argv[0];
ElfW(Phdr)* phdr_start = reinterpret_cast<ElfW(Phdr)*>(getauxval(AT_PHDR));
size_t phdr_ct = getauxval(AT_PHNUM);
static TlsModule mod;
if (__bionic_get_tls_segment(phdr_start, phdr_ct, 0, &mod.segment)) {
if (!__bionic_check_tls_alignment(&mod.segment.alignment)) {
async_safe_fatal("error: TLS segment alignment in \"%s\" is not a power of 2: %zu\n",
progname, mod.segment.alignment);
}
mod.static_offset = layout.reserve_exe_segment_and_tcb(&mod.segment, progname);
mod.first_generation = 1;
__libc_shared_globals()->tls_modules.generation = 1;
__libc_shared_globals()->tls_modules.module_count = 1;
__libc_shared_globals()->tls_modules.module_table = &mod;
} else {
layout.reserve_exe_segment_and_tcb(nullptr, progname);
}
layout.finish_layout();
}
@ -111,7 +130,7 @@ __noreturn static void __real_libc_init(void *raw_args,
__libc_init_globals();
__libc_shared_globals()->init_progname = args.argv[0];
__libc_init_AT_SECURE(args.envp);
layout_static_tls();
layout_static_tls(args);
__libc_init_main_thread_final();
__libc_init_common();

View file

@ -288,7 +288,8 @@ static int __allocate_thread(pthread_attr_t* attr, bionic_tcb** tcbp, void** chi
auto tcb = reinterpret_cast<bionic_tcb*>(mapping.static_tls + layout.offset_bionic_tcb());
auto tls = reinterpret_cast<bionic_tls*>(mapping.static_tls + layout.offset_bionic_tls());
// (Re)initialize TLS pointers.
// Initialize TLS memory.
__init_static_tls(mapping.static_tls);
__init_tcb(tcb, thread);
__init_tcb_stack_guard(tcb);
__init_bionic_tls_ptrs(tcb, tls);

View file

@ -29,6 +29,8 @@
#pragma once
#include <link.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdint.h>
#include <sys/cdefs.h>
@ -40,8 +42,9 @@ struct TlsSegment {
};
__LIBC_HIDDEN__ bool __bionic_get_tls_segment(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, const char* mod_name,
TlsSegment* out);
ElfW(Addr) load_bias, TlsSegment* out);
__LIBC_HIDDEN__ bool __bionic_check_tls_alignment(size_t* alignment);
struct StaticTlsLayout {
constexpr StaticTlsLayout() {}
@ -58,13 +61,17 @@ private:
public:
size_t offset_bionic_tcb() const { return offset_bionic_tcb_; }
size_t offset_bionic_tls() const { return offset_bionic_tls_; }
size_t offset_thread_pointer() const;
size_t size() const { return offset_; }
size_t alignment() const { return alignment_; }
bool overflowed() const { return overflowed_; }
void reserve_tcb();
size_t reserve_exe_segment_and_tcb(const TlsSegment* exe_segment, const char* progname);
void reserve_bionic_tls();
size_t reserve_solib_segment(const TlsSegment& segment) {
return reserve(segment.size, segment.alignment);
}
void finish_layout();
private:
@ -76,3 +83,39 @@ private:
size_t round_up_with_overflow_check(size_t value, size_t alignment);
};
// A descriptor for a single ELF TLS module.
struct TlsModule {
TlsSegment segment;
// Offset into the static TLS block or SIZE_MAX for a dynamic module.
size_t static_offset = SIZE_MAX;
// The generation in which this module was loaded. Dynamic TLS lookups use
// this field to detect when a module has been unloaded.
size_t first_generation = 0;
// Used by the dynamic linker to track the associated soinfo* object.
void* soinfo_ptr = nullptr;
};
// Table of the ELF TLS modules. Either the dynamic linker or the static
// initialization code prepares this table, and it's then used during thread
// creation and for dynamic TLS lookups.
struct TlsModules {
constexpr TlsModules() {}
// A generation counter. The value is incremented each time an solib is loaded
// or unloaded.
_Atomic(size_t) generation = 0;
// Access to the TlsModule[] table requires taking this lock.
pthread_rwlock_t rwlock = PTHREAD_RWLOCK_INITIALIZER;
// Pointer to a block of TlsModule objects. The first module has ID 1 and
// is stored at index 0 in this table.
size_t module_count = 0;
TlsModule* module_table = nullptr;
};
void __init_static_tls(void* static_tls);

View file

@ -69,6 +69,7 @@ struct libc_shared_globals {
abort_msg_t* abort_msg = nullptr;
StaticTlsLayout static_tls_layout;
TlsModules tls_modules;
// Values passed from the linker to libc.so.
const char* init_progname = nullptr;

View file

@ -65,8 +65,10 @@
#include "linker_phdr.h"
#include "linker_relocs.h"
#include "linker_reloc_iterators.h"
#include "linker_tls.h"
#include "linker_utils.h"
#include "private/bionic_globals.h"
#include "android-base/macros.h"
#include "android-base/strings.h"
#include "android-base/stringprintf.h"
@ -1655,6 +1657,7 @@ bool find_libraries(android_namespace_t* ns,
if (!si->is_linked() && !si->prelink_image()) {
return false;
}
register_soinfo_tls(si);
}
// Step 4: Construct the global group. Note: DF_1_GLOBAL bit of a library is
@ -1890,6 +1893,7 @@ static void soinfo_unload_impl(soinfo* root) {
si->get_realpath(),
si);
notify_gdb_of_unload(si);
unregister_soinfo_tls(si);
get_cfi_shadow()->BeforeUnload(si);
soinfo_free(si);
}
@ -2669,16 +2673,32 @@ static ElfW(Addr) get_addend(ElfW(Rela)* rela, ElfW(Addr) reloc_addr __unused) {
#else
static ElfW(Addr) get_addend(ElfW(Rel)* rel, ElfW(Addr) reloc_addr) {
if (ELFW(R_TYPE)(rel->r_info) == R_GENERIC_RELATIVE ||
ELFW(R_TYPE)(rel->r_info) == R_GENERIC_IRELATIVE) {
ELFW(R_TYPE)(rel->r_info) == R_GENERIC_IRELATIVE ||
ELFW(R_TYPE)(rel->r_info) == R_GENERIC_TLS_DTPREL ||
ELFW(R_TYPE)(rel->r_info) == R_GENERIC_TLS_TPREL) {
return *reinterpret_cast<ElfW(Addr)*>(reloc_addr);
}
return 0;
}
#endif
static bool is_tls_reloc(ElfW(Word) type) {
switch (type) {
case R_GENERIC_TLS_DTPMOD:
case R_GENERIC_TLS_DTPREL:
case R_GENERIC_TLS_TPREL:
case R_GENERIC_TLSDESC:
return true;
default:
return false;
}
}
template<typename ElfRelIteratorT>
bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& rel_iterator,
const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
const size_t tls_tp_base = __libc_shared_globals()->static_tls_layout.offset_thread_pointer();
for (size_t idx = 0; rel_iterator.has_next(); ++idx) {
const auto rel = rel_iterator.next();
if (rel == nullptr) {
@ -2701,7 +2721,22 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r
const ElfW(Sym)* s = nullptr;
soinfo* lsi = nullptr;
if (sym != 0) {
if (sym == 0) {
// Do nothing.
} else if (ELF_ST_BIND(symtab_[sym].st_info) == STB_LOCAL && is_tls_reloc(type)) {
// In certain situations, the Gold linker accesses a TLS symbol using a
// relocation to an STB_LOCAL symbol in .dynsym of either STT_SECTION or
// STT_TLS type. Bionic doesn't support these relocations, so issue an
// error. References:
// - https://groups.google.com/d/topic/generic-abi/dJ4_Y78aQ2M/discussion
// - https://sourceware.org/bugzilla/show_bug.cgi?id=17699
s = &symtab_[sym];
sym_name = get_string(s->st_name);
DL_ERR("unexpected TLS reference to local symbol \"%s\": "
"sym type %d, rel type %u (idx %zu of \"%s\")",
sym_name, ELF_ST_TYPE(s->st_info), type, idx, get_realpath());
return false;
} else {
sym_name = get_string(symtab_[sym].st_name);
const version_info* vi = nullptr;
@ -2738,6 +2773,10 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r
case R_GENERIC_GLOB_DAT:
case R_GENERIC_RELATIVE:
case R_GENERIC_IRELATIVE:
case R_GENERIC_TLS_DTPMOD:
case R_GENERIC_TLS_DTPREL:
case R_GENERIC_TLS_TPREL:
case R_GENERIC_TLSDESC:
#if defined(__aarch64__)
case R_AARCH64_ABS64:
case R_AARCH64_ABS32:
@ -2785,12 +2824,21 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r
}
}
#endif
if (ELF_ST_TYPE(s->st_info) == STT_TLS) {
DL_ERR("unsupported ELF TLS symbol \"%s\" referenced by \"%s\"",
sym_name, get_realpath());
return false;
if (is_tls_reloc(type)) {
if (ELF_ST_TYPE(s->st_info) != STT_TLS) {
DL_ERR("reference to non-TLS symbol \"%s\" from TLS relocation in \"%s\"",
sym_name, get_realpath());
return false;
}
sym_addr = s->st_value;
} else {
if (ELF_ST_TYPE(s->st_info) == STT_TLS) {
DL_ERR("reference to TLS symbol \"%s\" from non-TLS relocation in \"%s\"",
sym_name, get_realpath());
return false;
}
sym_addr = lsi->resolve_symbol_address(s);
}
sym_addr = lsi->resolve_symbol_address(s);
#if !defined(__LP64__)
if (protect_segments) {
if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
@ -2863,6 +2911,39 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r
*reinterpret_cast<ElfW(Addr)*>(reloc) = ifunc_addr;
}
break;
case R_GENERIC_TLS_TPREL:
count_relocation(kRelocRelative);
MARK(rel->r_offset);
{
ElfW(Addr) tpoff = 0;
if (sym == 0) {
// By convention in ld.bfd and lld, an omitted symbol
// (ELFW(R_SYM) == 0) refers to the local module.
lsi = this;
}
if (lsi == nullptr) {
// Unresolved weak relocation. Leave tpoff at 0 to resolve
// &weak_tls_symbol to __get_tls().
} else if (soinfo_tls* lsi_tls = lsi->get_tls()) {
if (lsi_tls->module->static_offset != SIZE_MAX) {
tpoff += lsi_tls->module->static_offset - tls_tp_base;
} else {
DL_ERR("TLS symbol \"%s\" in dlopened \"%s\" referenced from \"%s\" using IE access model",
sym_name, lsi->get_realpath(), get_realpath());
return false;
}
} else {
DL_ERR("TLS relocation refers to symbol \"%s\" in solib \"%s\" with no TLS segment",
sym_name, lsi->get_realpath());
return false;
}
tpoff += sym_addr + addend;
TRACE_TYPE(RELO, "RELO TLS_TPREL %16p <- %16p %s\n",
reinterpret_cast<void*>(reloc),
reinterpret_cast<void*>(tpoff), sym_name);
*reinterpret_cast<ElfW(Addr)*>(reloc) = tpoff;
}
break;
#if defined(__aarch64__)
case R_AARCH64_ABS64:
@ -2964,14 +3045,6 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r
*/
DL_ERR("%s R_AARCH64_COPY relocations are not supported", get_realpath());
return false;
case R_AARCH64_TLS_TPREL64:
TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
reloc, (sym_addr + addend), rel->r_offset);
break;
case R_AARCH64_TLSDESC:
TRACE_TYPE(RELO, "RELO TLSDESC *** %16llx <- %16llx - %16llx\n",
reloc, (sym_addr + addend), rel->r_offset);
break;
#elif defined(__x86_64__)
case R_X86_64_32:
count_relocation(kRelocRelative);
@ -3076,6 +3149,19 @@ bool soinfo::prelink_image() {
&ARM_exidx, &ARM_exidx_count);
#endif
TlsSegment tls_segment;
if (__bionic_get_tls_segment(phdr, phnum, load_bias, &tls_segment)) {
if (!__bionic_check_tls_alignment(&tls_segment.alignment)) {
if (!relocating_linker) {
DL_ERR("TLS segment alignment in \"%s\" is not a power of 2: %zu",
get_realpath(), tls_segment.alignment);
}
return false;
}
tls_ = std::make_unique<soinfo_tls>();
tls_->segment = tls_segment;
}
// Extract useful information from dynamic section.
// Note that: "Except for the DT_NULL element at the end of the array,
// and the relative order of DT_NEEDED elements, entries may appear in any order."

View file

@ -415,6 +415,8 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load
}
}
linker_setup_exe_static_tls(g_argv[0]);
// Load ld_preloads and dependencies.
std::vector<const char*> needed_library_name_list;
size_t ld_preloads_count = 0;
@ -452,8 +454,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load
si->increment_ref_count();
}
layout_linker_static_tls();
linker_finalize_static_tls();
__libc_init_main_thread_final();
if (!get_cfi_shadow()->InitialLinkDone(solist)) __linker_cannot_link(g_argv[0]);

View file

@ -628,6 +628,10 @@ android_namespace_list_t& soinfo::get_secondary_namespaces() {
return secondary_namespaces_;
}
soinfo_tls* soinfo::get_tls() const {
return has_min_version(5) ? tls_.get() : nullptr;
}
ElfW(Addr) soinfo::resolve_symbol_address(const ElfW(Sym)* s) const {
if (ELF_ST_TYPE(s->st_info) == STT_GNU_IFUNC) {
return call_ifunc_resolver(s->st_value + load_bias);

View file

@ -30,8 +30,10 @@
#include <link.h>
#include <memory>
#include <string>
#include "private/bionic_elf_tls.h"
#include "linker_namespaces.h"
#define FLAG_LINKED 0x00000001
@ -61,7 +63,7 @@
// unset.
#define FLAG_NEW_SOINFO 0x40000000 // new soinfo format
#define SOINFO_VERSION 4
#define SOINFO_VERSION 5
typedef void (*linker_dtor_function_t)();
typedef void (*linker_ctor_function_t)(int, char**, char**);
@ -100,6 +102,17 @@ struct version_info {
// TODO(dimitry): remove reference from soinfo member functions to this class.
class VersionTracker;
// The first ELF TLS module has ID 1. Zero is reserved for the first word of
// the DTV, a generation count, and unresolved weak symbols also use module
// ID 0.
static constexpr size_t kUninitializedModuleId = 0;
struct soinfo_tls {
TlsSegment segment;
size_t module_id = kUninitializedModuleId;
TlsModule* module = nullptr;
};
#if defined(__work_around_b_24465209__)
#define SOINFO_NAME_LEN 128
#endif
@ -284,6 +297,8 @@ struct soinfo {
void add_secondary_namespace(android_namespace_t* secondary_ns);
android_namespace_list_t& get_secondary_namespaces();
soinfo_tls* get_tls() const;
void set_mapped_by_caller(bool reserved_map);
bool is_mapped_by_caller() const;
@ -366,6 +381,9 @@ struct soinfo {
// version >= 4
ElfW(Relr)* relr_;
size_t relr_count_;
// version >= 5
std::unique_ptr<soinfo_tls> tls_;
};
// This function is used by dlvsym() to calculate hash of sym_ver

View file

@ -28,20 +28,75 @@
#include "linker_tls.h"
#include <vector>
#include "private/ScopedRWLock.h"
#include "private/bionic_defs.h"
#include "private/bionic_elf_tls.h"
#include "private/bionic_globals.h"
#include "private/linker_native_bridge.h"
#include "linker_main.h"
#include "linker_soinfo.h"
static bool g_static_tls_finished;
static std::vector<TlsModule> g_tls_modules;
static size_t get_unused_module_index() {
for (size_t i = 0; i < g_tls_modules.size(); ++i) {
if (g_tls_modules[i].soinfo_ptr == nullptr) {
return i;
}
}
g_tls_modules.push_back({});
__libc_shared_globals()->tls_modules.module_count = g_tls_modules.size();
__libc_shared_globals()->tls_modules.module_table = g_tls_modules.data();
return g_tls_modules.size() - 1;
}
static void register_tls_module(soinfo* si, size_t static_offset) {
// The global TLS module table points at the std::vector of modules declared
// in this file, so acquire a write lock before modifying the std::vector.
ScopedWriteLock locker(&__libc_shared_globals()->tls_modules.rwlock);
size_t module_idx = get_unused_module_index();
TlsModule* module = &g_tls_modules[module_idx];
soinfo_tls* si_tls = si->get_tls();
si_tls->module_id = module_idx + 1;
si_tls->module = module;
*module = {
.segment = si_tls->segment,
.static_offset = static_offset,
.first_generation = ++__libc_shared_globals()->tls_modules.generation,
.soinfo_ptr = si,
};
}
static void unregister_tls_module(soinfo* si) {
ScopedWriteLock locker(&__libc_shared_globals()->tls_modules.rwlock);
soinfo_tls* si_tls = si->get_tls();
CHECK(si_tls->module->static_offset == SIZE_MAX);
CHECK(si_tls->module->soinfo_ptr == si);
*si_tls->module = {};
si_tls->module_id = kUninitializedModuleId;
si_tls->module = nullptr;
}
__BIONIC_WEAK_FOR_NATIVE_BRIDGE
extern "C" void __linker_reserve_bionic_tls_in_static_tls() {
__libc_shared_globals()->static_tls_layout.reserve_bionic_tls();
}
// Stub for linker static TLS layout.
void layout_linker_static_tls() {
void linker_setup_exe_static_tls(const char* progname) {
soinfo* somain = solist_get_somain();
StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
layout.reserve_tcb();
if (somain->get_tls() == nullptr) {
layout.reserve_exe_segment_and_tcb(nullptr, progname);
} else {
register_tls_module(somain, layout.reserve_exe_segment_and_tcb(&somain->get_tls()->segment, progname));
}
// The pthread key data is located at the very front of bionic_tls. As a
// temporary workaround, allocate bionic_tls just after the thread pointer so
@ -49,8 +104,32 @@ void layout_linker_static_tls() {
// small enough. Specifically, Golang scans forward 384 words from the TP on
// ARM.
// - http://b/118381796
// - https://groups.google.com/d/msg/golang-dev/yVrkFnYrYPE/2G3aFzYqBgAJ
// - https://github.com/golang/go/issues/29674
__linker_reserve_bionic_tls_in_static_tls();
layout.finish_layout();
}
void linker_finalize_static_tls() {
g_static_tls_finished = true;
__libc_shared_globals()->static_tls_layout.finish_layout();
}
void register_soinfo_tls(soinfo* si) {
soinfo_tls* si_tls = si->get_tls();
if (si_tls == nullptr || si_tls->module_id != kUninitializedModuleId) {
return;
}
size_t static_offset = SIZE_MAX;
if (!g_static_tls_finished) {
StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
static_offset = layout.reserve_solib_segment(si_tls->segment);
}
register_tls_module(si, static_offset);
}
void unregister_soinfo_tls(soinfo* si) {
soinfo_tls* si_tls = si->get_tls();
if (si_tls == nullptr || si_tls->module_id == kUninitializedModuleId) {
return;
}
return unregister_tls_module(si);
}

View file

@ -28,4 +28,10 @@
#pragma once
void layout_linker_static_tls();
struct soinfo;
void linker_setup_exe_static_tls(const char* progname);
void linker_finalize_static_tls();
void register_soinfo_tls(soinfo* si);
void unregister_soinfo_tls(soinfo* si);

View file

@ -1086,7 +1086,7 @@ TEST(dlfcn, dlopen_library_with_ELF_TLS) {
dlerror(); // Clear any pending errors.
void* handle = dlopen("libelf-tls-library.so", RTLD_NOW);
ASSERT_TRUE(handle == nullptr);
ASSERT_SUBSTR("unsupported ELF TLS", dlerror());
ASSERT_SUBSTR("unknown reloc type ", dlerror());
}
TEST(dlfcn, dlopen_bad_flags) {