platform_system_core/libunwindstack/MapInfo.cpp

267 lines
8.1 KiB
C++
Raw Normal View History

/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
#include <memory>
#include <mutex>
#include <string>
#include <unwindstack/Elf.h>
#include <unwindstack/MapInfo.h>
#include <unwindstack/Maps.h>
#include <unwindstack/Memory.h>
namespace unwindstack {
bool MapInfo::InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset* memory) {
// One last attempt, see if the previous map is read-only with the
// same name and stretches across this map.
if (prev_map == nullptr || prev_map->flags != PROT_READ) {
return false;
}
uint64_t map_size = end - prev_map->end;
if (!memory->Init(name, prev_map->offset, map_size)) {
return false;
}
uint64_t max_size;
if (!Elf::GetInfo(memory, &max_size) || max_size < map_size) {
return false;
}
if (!memory->Init(name, prev_map->offset, max_size)) {
return false;
}
elf_offset = offset - prev_map->offset;
elf_start_offset = prev_map->offset;
return true;
}
Memory* MapInfo::GetFileMemory() {
std::unique_ptr<MemoryFileAtOffset> memory(new MemoryFileAtOffset);
if (offset == 0) {
if (memory->Init(name, 0)) {
return memory.release();
}
return nullptr;
}
// These are the possibilities when the offset is non-zero.
// - There is an elf file embedded in a file, and the offset is the
// the start of the elf in the file.
// - There is an elf file embedded in a file, and the offset is the
// the start of the executable part of the file. The actual start
// of the elf is in the read-only segment preceeding this map.
// - The whole file is an elf file, and the offset needs to be saved.
//
// Map in just the part of the file for the map. If this is not
// a valid elf, then reinit as if the whole file is an elf file.
// If the offset is a valid elf, then determine the size of the map
// and reinit to that size. This is needed because the dynamic linker
// only maps in a portion of the original elf, and never the symbol
// file data.
uint64_t map_size = end - start;
if (!memory->Init(name, offset, map_size)) {
return nullptr;
}
// Check if the start of this map is an embedded elf.
uint64_t max_size = 0;
if (Elf::GetInfo(memory.get(), &max_size)) {
if (max_size > map_size) {
if (memory->Init(name, offset, max_size)) {
return memory.release();
}
// Try to reinit using the default map_size.
if (memory->Init(name, offset, map_size)) {
return memory.release();
}
return nullptr;
}
return memory.release();
}
// No elf at offset, try to init as if the whole file is an elf.
if (memory->Init(name, 0) && Elf::IsValidElf(memory.get())) {
elf_offset = offset;
// Need to check how to set the elf start offset. If this map is not
// the r-x map of a r-- map, then use the real offset value. Otherwise,
// use 0.
if (prev_map == nullptr || prev_map->offset != 0 || prev_map->flags != PROT_READ ||
prev_map->name != name) {
elf_start_offset = offset;
}
return memory.release();
}
// See if the map previous to this one contains a read-only map
// that represents the real start of the elf data.
if (InitFileMemoryFromPreviousReadOnlyMap(memory.get())) {
return memory.release();
}
// Failed to find elf at start of file or at read-only map, return
// file object from the current map.
if (memory->Init(name, offset, map_size)) {
return memory.release();
}
return nullptr;
}
Memory* MapInfo::CreateMemory(const std::shared_ptr<Memory>& process_memory) {
if (end <= start) {
return nullptr;
}
elf_offset = 0;
// Fail on device maps.
if (flags & MAPS_FLAGS_DEVICE_MAP) {
return nullptr;
}
// First try and use the file associated with the info.
if (!name.empty()) {
Memory* memory = GetFileMemory();
if (memory != nullptr) {
return memory;
}
}
// Need to verify that this elf is valid. It's possible that
// only part of the elf file to be mapped into memory is in the executable
// map. In this case, there will be another read-only map that includes the
// first part of the elf file. This is done if the linker rosegment
// option is used.
std::unique_ptr<MemoryRange> memory(new MemoryRange(process_memory, start, end - start, 0));
if (Elf::IsValidElf(memory.get())) {
return memory.release();
}
// Find the read-only map by looking at the previous map. The linker
// doesn't guarantee that this invariant will always be true. However,
// if that changes, there is likely something else that will change and
// break something.
if (offset == 0 || name.empty() || prev_map == nullptr || prev_map->name != name ||
prev_map->offset >= offset) {
return nullptr;
}
// Make sure that relative pc values are corrected properly.
elf_offset = offset - prev_map->offset;
// Use this as the elf start offset, otherwise, you always get offsets into
// the r-x section, which is not quite the right information.
elf_start_offset = prev_map->offset;
MemoryRanges* ranges = new MemoryRanges;
ranges->Insert(
new MemoryRange(process_memory, prev_map->start, prev_map->end - prev_map->start, 0));
ranges->Insert(new MemoryRange(process_memory, start, end - start, elf_offset));
return ranges;
}
Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum expected_arch) {
// Make sure no other thread is trying to add the elf to this map.
std::lock_guard<std::mutex> guard(mutex_);
if (elf.get() != nullptr) {
return elf.get();
}
bool locked = false;
if (Elf::CachingEnabled() && !name.empty()) {
Elf::CacheLock();
locked = true;
if (Elf::CacheGet(this)) {
Elf::CacheUnlock();
return elf.get();
}
}
Multiple bugfixes, small restructuring. - Move the load bias stored out of ElfInterface into Elf. For the compressed sections, the load bias was not the same as the data from the uncompressed section. - Move the initialization of the compressed section into Init. It was too easy to forget to call the init of the compressed section. - Do not automatically add in load bias to the pc before calling ElfInterface code. Do all of the pc manipulations in the Elf object. - Change the interface GetFunctionName code to pass in the load_bias instead of modifying the pc inside the code. - Modify the Step function to pass in the elf offset, not add it to the pc. It is necessary to have two different relative values when executing the Step: a pc that is relative to the beginning of the elf for the reading data the actual instructions when trying to determine if this is in a signal frame, and a pc that is relative to the map for finding the appropriate unwind information. - Add a feature to Unwinder so that an unwind can be stopped if it ends up in map that has a specified suffix. This is so that the ART unwinding code doesn't require skipping the compressed section. Instead, stop at if trying to unwind through a known suffix code that means the code is in java code. This is important because the compressed section data is not only used by the jave compiled code, so that will continue to work. - Fix tests for restructuring, add new tests for new functionality. Test: Ran art test 137-cfi using new unwinder as default. Test: Ran new unit tests. Change-Id: I42e658c64c5e14f698ba34944a3043afac967884
2017-10-20 01:08:58 +02:00
Memory* memory = CreateMemory(process_memory);
if (locked) {
if (Elf::CacheAfterCreateMemory(this)) {
delete memory;
Elf::CacheUnlock();
return elf.get();
}
}
elf.reset(new Elf(memory));
// If the init fails, keep the elf around as an invalid object so we
// don't try to reinit the object.
elf->Init();
if (elf->valid() && expected_arch != elf->arch()) {
// Make the elf invalid, mismatch between arch and expected arch.
elf->Invalidate();
}
if (locked) {
Elf::CacheAdd(this);
Elf::CacheUnlock();
}
return elf.get();
}
bool MapInfo::GetFunctionName(uint64_t addr, std::string* name, uint64_t* func_offset) {
{
// Make sure no other thread is trying to update this elf object.
std::lock_guard<std::mutex> guard(mutex_);
if (elf == nullptr) {
return false;
}
}
// No longer need the lock, once the elf object is created, it is not deleted
// until this object is deleted.
return elf->GetFunctionName(addr, name, func_offset);
}
uint64_t MapInfo::GetLoadBias(const std::shared_ptr<Memory>& process_memory) {
uint64_t cur_load_bias = load_bias.load();
if (cur_load_bias != static_cast<uint64_t>(-1)) {
return cur_load_bias;
}
{
// Make sure no other thread is trying to add the elf to this map.
std::lock_guard<std::mutex> guard(mutex_);
if (elf != nullptr) {
if (elf->valid()) {
cur_load_bias = elf->GetLoadBias();
load_bias = cur_load_bias;
return cur_load_bias;
} else {
load_bias = 0;
return 0;
}
}
}
// Call lightweight static function that will only read enough of the
// elf data to get the load bias.
std::unique_ptr<Memory> memory(CreateMemory(process_memory));
cur_load_bias = Elf::GetLoadBias(memory.get());
load_bias = cur_load_bias;
return cur_load_bias;
}
} // namespace unwindstack