Revert "Add tagged pointers to bionic."

This reverts commit 43d5f9d4dd.

Bug: 135754954
Bug: 147147490

Exempt-From-Owner-Approval: clean revert

Reason for revert: Breaks ART gtest, see:
https://ci.chromium.org/p/art/builders/ci/angler-armv8-non-gen-cc/561

The crash happens on mprotect of a page, the test crashes with ENOMEM.

Change-Id: I52eea1abbfaf8d8e2226f92d30aa55aba3810528
This commit is contained in:
Nicolas Geoffray 2020-01-21 13:56:21 +00:00
parent 43d5f9d4dd
commit ce4c42e09f
8 changed files with 24 additions and 277 deletions

View file

@ -27,7 +27,6 @@
*/
#include "libc_init_common.h"
#include "malloc_tagged_pointers.h"
#include <elf.h>
#include <errno.h>
@ -40,7 +39,6 @@
#include <sys/auxv.h>
#include <sys/personality.h>
#include <sys/time.h>
#include <sys/utsname.h>
#include <unistd.h>
#include <async_safe/log.h>
@ -60,24 +58,6 @@ __LIBC_HIDDEN__ WriteProtected<libc_globals> __libc_globals;
// Not public, but well-known in the BSDs.
const char* __progname;
#ifdef __aarch64__
static bool KernelSupportsTaggedPointers() {
utsname buf;
utsname* tagged_buf =
reinterpret_cast<utsname*>(reinterpret_cast<uintptr_t>(&buf) |
(static_cast<uintptr_t>(0xAA) << TAG_SHIFT));
// We use `uname()` here as a system call to determine if the kernel supports
// tagged pointers. If the kernel supports tagged points, it will truncate the
// tag before populating `buf`, and `uname()` should return zero (indicating
// no error). If ARM TBI isn't enabled, the kernel should return an error code
// that indicates that the tagged memory couldn't be accessed. The exact
// system call that we use here isn't important, it's just a convenient system
// call that validates a pointer.
return uname(tagged_buf) == 0;
}
#endif
void __libc_init_globals() {
// Initialize libc globals that are needed in both the linker and in libc.
// In dynamic binaries, this is run at least twice for different copies of the
@ -86,13 +66,6 @@ void __libc_init_globals() {
__libc_globals.mutate([](libc_globals* globals) {
__libc_init_vdso(globals);
__libc_init_setjmp_cookie(globals);
#ifdef __aarch64__
globals->heap_pointer_tag = KernelSupportsTaggedPointers()
? (reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT)
: 0;
#else
globals->heap_pointer_tag = 0;
#endif
});
}

View file

@ -43,7 +43,6 @@
#include "malloc_common.h"
#include "malloc_limit.h"
#include "malloc_tagged_pointers.h"
// =============================================================================
// Global variables instantations.
@ -62,18 +61,17 @@ void* (*volatile __memalign_hook)(size_t, size_t, const void*);
extern "C" void* calloc(size_t n_elements, size_t elem_size) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return MaybeTagPointer(dispatch_table->calloc(n_elements, elem_size));
return dispatch_table->calloc(n_elements, elem_size);
}
void* result = Malloc(calloc)(n_elements, elem_size);
if (__predict_false(result == nullptr)) {
warning_log("calloc(%zu, %zu) failed: returning null pointer", n_elements, elem_size);
}
return MaybeTagPointer(result);
return result;
}
extern "C" void free(void* mem) {
auto dispatch_table = GetDispatchTable();
mem = MaybeUntagAndCheckPointer(mem);
if (__predict_false(dispatch_table != nullptr)) {
dispatch_table->free(mem);
} else {
@ -107,22 +105,18 @@ extern "C" int mallopt(int param, int value) {
extern "C" void* malloc(size_t bytes) {
auto dispatch_table = GetDispatchTable();
void *result;
if (__predict_false(dispatch_table != nullptr)) {
result = dispatch_table->malloc(bytes);
} else {
result = Malloc(malloc)(bytes);
return dispatch_table->malloc(bytes);
}
void* result = Malloc(malloc)(bytes);
if (__predict_false(result == nullptr)) {
warning_log("malloc(%zu) failed: returning null pointer", bytes);
return nullptr;
}
return MaybeTagPointer(result);
return result;
}
extern "C" size_t malloc_usable_size(const void* mem) {
auto dispatch_table = GetDispatchTable();
mem = MaybeUntagAndCheckPointer(mem);
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->malloc_usable_size(mem);
}
@ -132,52 +126,45 @@ extern "C" size_t malloc_usable_size(const void* mem) {
extern "C" void* memalign(size_t alignment, size_t bytes) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return MaybeTagPointer(dispatch_table->memalign(alignment, bytes));
return dispatch_table->memalign(alignment, bytes);
}
void* result = Malloc(memalign)(alignment, bytes);
if (__predict_false(result == nullptr)) {
warning_log("memalign(%zu, %zu) failed: returning null pointer", alignment, bytes);
}
return MaybeTagPointer(result);
return result;
}
extern "C" int posix_memalign(void** memptr, size_t alignment, size_t size) {
auto dispatch_table = GetDispatchTable();
int result;
if (__predict_false(dispatch_table != nullptr)) {
result = dispatch_table->posix_memalign(memptr, alignment, size);
} else {
result = Malloc(posix_memalign)(memptr, alignment, size);
return dispatch_table->posix_memalign(memptr, alignment, size);
}
if (result == 0) {
*memptr = MaybeTagPointer(*memptr);
}
return result;
return Malloc(posix_memalign)(memptr, alignment, size);
}
extern "C" void* aligned_alloc(size_t alignment, size_t size) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return MaybeTagPointer(dispatch_table->aligned_alloc(alignment, size));
return dispatch_table->aligned_alloc(alignment, size);
}
void* result = Malloc(aligned_alloc)(alignment, size);
if (__predict_false(result == nullptr)) {
warning_log("aligned_alloc(%zu, %zu) failed: returning null pointer", alignment, size);
}
return MaybeTagPointer(result);
return result;
}
extern "C" __attribute__((__noinline__)) void* realloc(void* old_mem, size_t bytes) {
auto dispatch_table = GetDispatchTable();
old_mem = MaybeUntagAndCheckPointer(old_mem);
if (__predict_false(dispatch_table != nullptr)) {
return MaybeTagPointer(dispatch_table->realloc(old_mem, bytes));
return dispatch_table->realloc(old_mem, bytes);
}
void* result = Malloc(realloc)(old_mem, bytes);
if (__predict_false(result == nullptr && bytes != 0)) {
warning_log("realloc(%p, %zu) failed: returning null pointer", old_mem, bytes);
}
return MaybeTagPointer(result);
return result;
}
extern "C" void* reallocarray(void* old_mem, size_t item_count, size_t item_size) {
@ -195,66 +182,42 @@ extern "C" void* reallocarray(void* old_mem, size_t item_count, size_t item_size
extern "C" void* pvalloc(size_t bytes) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return MaybeTagPointer(dispatch_table->pvalloc(bytes));
return dispatch_table->pvalloc(bytes);
}
void* result = Malloc(pvalloc)(bytes);
if (__predict_false(result == nullptr)) {
warning_log("pvalloc(%zu) failed: returning null pointer", bytes);
}
return MaybeTagPointer(result);
return result;
}
extern "C" void* valloc(size_t bytes) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return MaybeTagPointer(dispatch_table->valloc(bytes));
return dispatch_table->valloc(bytes);
}
void* result = Malloc(valloc)(bytes);
if (__predict_false(result == nullptr)) {
warning_log("valloc(%zu) failed: returning null pointer", bytes);
}
return MaybeTagPointer(result);
return result;
}
#endif
// =============================================================================
struct CallbackWrapperArg {
void (*callback)(uintptr_t base, size_t size, void* arg);
void* arg;
};
void CallbackWrapper(uintptr_t base, size_t size, void* arg) {
CallbackWrapperArg* wrapper_arg = reinterpret_cast<CallbackWrapperArg*>(arg);
wrapper_arg->callback(
reinterpret_cast<uintptr_t>(MaybeTagPointer(reinterpret_cast<void*>(base))),
size, wrapper_arg->arg);
}
// =============================================================================
// Exported for use by libmemunreachable.
// =============================================================================
// Calls callback for every allocation in the anonymous heap mapping
// [base, base+size). Must be called between malloc_disable and malloc_enable.
// `base` in this can take either a tagged or untagged pointer, but we always
// provide a tagged pointer to the `base` argument of `callback` if the kernel
// supports tagged pointers.
// [base, base+size). Must be called between malloc_disable and malloc_enable.
extern "C" int malloc_iterate(uintptr_t base, size_t size,
void (*callback)(uintptr_t base, size_t size, void* arg), void* arg) {
auto dispatch_table = GetDispatchTable();
// Wrap the malloc_iterate callback we were provided, in order to provide
// pointer tagging support.
CallbackWrapperArg wrapper_arg;
wrapper_arg.callback = callback;
wrapper_arg.arg = arg;
uintptr_t untagged_base =
reinterpret_cast<uintptr_t>(UntagPointer(reinterpret_cast<void*>(base)));
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->malloc_iterate(
untagged_base, size, CallbackWrapper, &wrapper_arg);
return dispatch_table->malloc_iterate(base, size, callback, arg);
}
return Malloc(malloc_iterate)(
untagged_base, size, CallbackWrapper, &wrapper_arg);
return Malloc(malloc_iterate)(base, size, callback, arg);
}
// Disable calls to malloc so malloc_iterate gets a consistent view of

View file

@ -1,128 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include <sys/reboot.h>
#include <unistd.h>
#include <async_safe/log.h>
#include <private/bionic_globals.h>
// We choose a static pointer tag here for performance reasons. Dynamic tagging
// doesn't improve our detection, and simply hurts performance. This tag is
// deliberately chosen to always point to inaccessible memory on a standard
// 64-bit userspace process, and be easily identifiable by developers. This tag
// is also deliberately different from the standard pattern-init tag (0xAA), as
// to be distinguishable from an uninitialized-pointer access. The first and
// second nibbles are also deliberately designed to be the bitset-mirror of each
// other (0b1100, 0b0011) in order to reduce incidental matches. Users must not
// rely on the implementation-defined value of this pointer tag, as it may
// change.
static constexpr uintptr_t POINTER_TAG = 0x3C;
static constexpr unsigned TAG_SHIFT = 56;
#if defined(__aarch64__)
static constexpr uintptr_t ADDRESS_MASK = (static_cast<uintptr_t>(1) << TAG_SHIFT) - 1;
static constexpr uintptr_t TAG_MASK = static_cast<uintptr_t>(0xFF) << TAG_SHIFT;
#endif // defined(__aarch64__)
// Return a forcibly-tagged pointer.
static inline void* TagPointer(void* ptr) {
#if defined(__aarch64__)
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) |
reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag));
#else
async_safe_fatal("Attempting to tag a pointer (%p) on non-aarch64.", ptr);
#endif
}
#if defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)
// Return a forcibly-untagged pointer. The pointer tag is not checked for
// validity.
static inline void* UntagPointer(const volatile void* ptr) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) & ADDRESS_MASK);
}
static void* SlowPathPointerCheck(const volatile void* ptr) {
uintptr_t ptr_tag = reinterpret_cast<uintptr_t>(ptr) & TAG_MASK;
uintptr_t heap_tag = reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag);
// Applications may disable pointer tagging, which will be propagated to
// libc in the zygote. This means that there may already be tagged heap
// allocations that will fail when checked against the zero-ed heap tag. The
// check bellow allows us to turn *off* pointer tagging and still allow
// tagged heap allocations to be freed, as long as they're using *our* tag.
if (__predict_false(heap_tag != 0 || ptr_tag != (POINTER_TAG << TAG_SHIFT))) {
// TODO(b/145604058) - Upstream tagged pointers documentation and provide
// a link to it in the abort message here.
async_safe_fatal("Pointer tag for %p was truncated.", ptr);
}
return UntagPointer(ptr);
}
// Return a tagged pointer iff the kernel supports tagged pointers, and `ptr` is
// non-null.
static inline void* MaybeTagPointer(void* ptr) {
if (__predict_true(ptr != nullptr)) {
return TagPointer(ptr);
}
return ptr;
}
// Untag the pointer, and check the pointer tag iff the kernel supports tagged
// pointers. If the tag is incorrect, trap.
static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
if (__predict_false(ptr == nullptr)) {
return nullptr;
}
uintptr_t ptr_tag = reinterpret_cast<uintptr_t>(ptr) & TAG_MASK;
uintptr_t heap_tag = reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag);
if (__predict_false(heap_tag != ptr_tag)) {
return SlowPathPointerCheck(ptr);
}
return UntagPointer(ptr);
}
#else // defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)
static inline void* UntagPointer(const volatile void* ptr) {
return const_cast<void*>(ptr);
}
static inline void* MaybeTagPointer(void* ptr) {
return ptr;
}
static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
return const_cast<void *>(ptr);
}
#endif // defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)

View file

@ -43,7 +43,6 @@
#include <android-base/file.h>
#include <android-base/properties.h>
#include <android-base/stringprintf.h>
#include <bionic/malloc_tagged_pointers.h>
#include <private/bionic_malloc_dispatch.h>
#include <private/MallocXmlElem.h>
@ -884,7 +883,6 @@ ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t max_fram
if (!(g_debug->config().options() & BACKTRACE)) {
return 0;
}
pointer = UntagPointer(pointer);
return PointerData::GetFrames(pointer, frames, max_frames);
}

View file

@ -44,7 +44,6 @@
struct libc_globals {
vdso_entry vdso[VDSO_END];
long setjmp_cookie;
uintptr_t heap_pointer_tag;
// In order to allow a complete switch between dispatch tables without
// the need for copying each function by function in the structure,

View file

@ -211,9 +211,6 @@ cc_test_library {
"libprocinfo",
"libsystemproperties",
],
srcs: [
"tagged_pointers_test.cpp",
],
},
},

View file

@ -1,57 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <sys/utsname.h>
#include "utils.h"
#include <bionic/malloc_tagged_pointers.h>
static bool KernelSupportsTaggedPointers() {
#ifdef __aarch64__
utsname buf;
utsname* tagged_buf =
reinterpret_cast<utsname*>(reinterpret_cast<uintptr_t>(&buf) |
(static_cast<uintptr_t>(0xAA) << TAG_SHIFT));
// We use `uname()` here as a system call to determine if the kernel supports
// tagged pointers. If ARM TBI is enabled, the kernel should truncate the tag
// before populating `buf`, and `uname()` should return zero (indicating no
// error). If ARM TBI isn't enabled, the kernel should return an error code
// that indicates that the tagged memory couldn't be accessed. The exact
// system call that we use here isn't important, it's just a convenient system
// call that validates a pointer.
return uname(tagged_buf) == 0;
#else
return false;
#endif
}
TEST(tagged_pointers, check_tagged_pointer_dies) {
if (!KernelSupportsTaggedPointers()) {
GTEST_SKIP() << "Kernel doesn't support tagged pointers.";
}
#ifdef __aarch64__
void *x = malloc(1);
// Ensure that `x` has a pointer tag.
EXPECT_NE(reinterpret_cast<uintptr_t>(x) >> 56, 0u);
x = untag_address(x);
EXPECT_DEATH(free(x), "Pointer tag for 0x[a-zA-Z0-9]* was truncated");
#endif // defined(__aarch64__)
}

View file

@ -68,8 +68,10 @@ static inline bool running_with_hwasan() {
static inline void* untag_address(void* addr) {
#if defined(__LP64__)
constexpr uintptr_t mask = (static_cast<uintptr_t>(1) << 56) - 1;
addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & mask);
if (running_with_hwasan()) {
constexpr uintptr_t mask = (static_cast<uintptr_t>(1) << 56) - 1;
addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & mask);
}
#endif
return addr;
}