Merge "Add tagged pointers to bionic."

This commit is contained in:
Evgenii Stepanov 2020-01-24 00:26:32 +00:00 committed by Gerrit Code Review
commit 3ff4245c70
11 changed files with 313 additions and 41 deletions

View file

@ -28,11 +28,37 @@
#include "heap_tagging.h"
#include "malloc_common.h"
#include "malloc_tagged_pointers.h"
#include <platform/bionic/malloc.h>
static HeapTaggingLevel heap_tagging_level = M_HEAP_TAGGING_LEVEL_NONE;
void SetDefaultHeapTaggingLevel() {
// Allow the kernel to accept tagged pointers in syscall arguments. This is a no-op (kernel
// returns -EINVAL) if the kernel doesn't understand the prctl.
#if defined(__aarch64__)
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
#ifdef ANDROID_EXPERIMENTAL_MTE
// First, try enabling MTE in asynchronous mode, with tag 0 excluded. This will fail if the kernel
// or hardware doesn't support MTE, and we will fall back to just enabling tagged pointers in
// syscall arguments.
if (prctl(PR_SET_TAGGED_ADDR_CTRL,
PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC | (1 << PR_MTE_EXCL_SHIFT), 0, 0, 0) == 0) {
return;
}
#endif // ANDROID_EXPERIMENTAL_MTE
if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0) {
heap_tagging_level = M_HEAP_TAGGING_LEVEL_TBI;
__libc_globals.mutate([](libc_globals* globals) {
globals->heap_pointer_tag = reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT;
});
}
#endif // aarch64
}
bool SetHeapTaggingLevel(void* arg, size_t arg_size) {
if (arg_size != sizeof(HeapTaggingLevel)) {
return false;
@ -42,11 +68,23 @@ bool SetHeapTaggingLevel(void* arg, size_t arg_size) {
switch (tag_level) {
case M_HEAP_TAGGING_LEVEL_NONE:
break;
case M_HEAP_TAGGING_LEVEL_TBI:
if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE) {
error_log(
"SetHeapTaggingLevel: re-enabling tagging after it was disabled is not supported");
return false;
}
break;
default:
error_log("SetHeapTaggingLevel: unknown tagging level");
return false;
}
heap_tagging_level = tag_level;
info_log("SetHeapTaggingLevel: tag level set to %d", tag_level);
if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE && __libc_globals->heap_pointer_tag != 0) {
__libc_globals.mutate([](libc_globals* globals) { globals->heap_pointer_tag = 0; });
}
return true;
}

View file

@ -30,4 +30,5 @@
#include <stddef.h>
void SetDefaultHeapTaggingLevel();
bool SetHeapTaggingLevel(void* arg, size_t arg_size);

View file

@ -27,6 +27,7 @@
*/
#include "libc_init_common.h"
#include "heap_tagging.h"
#include <elf.h>
#include <errno.h>
@ -105,23 +106,7 @@ void __libc_init_common() {
__system_properties_init(); // Requires 'environ'.
__libc_init_fdsan(); // Requires system properties (for debug.fdsan).
// Allow the kernel to accept tagged pointers in syscall arguments. This is a no-op (kernel
// returns -EINVAL) if the kernel doesn't understand the prctl.
#if defined(__aarch64__)
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
#ifdef ANDROID_EXPERIMENTAL_MTE
// First, try enabling MTE in asynchronous mode, with tag 0 excluded. This will fail if the kernel
// or hardware doesn't support MTE, and we will fall back to just enabling tagged pointers in
// syscall arguments.
if (prctl(PR_SET_TAGGED_ADDR_CTRL,
PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC | (1 << PR_MTE_EXCL_SHIFT), 0, 0, 0)) {
prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0);
}
#else
prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0);
#endif
#endif
SetDefaultHeapTaggingLevel();
}
void __libc_init_fork_handler() {

View file

@ -44,6 +44,7 @@
#include "heap_tagging.h"
#include "malloc_common.h"
#include "malloc_limit.h"
#include "malloc_tagged_pointers.h"
// =============================================================================
// Global variables instantations.
@ -62,17 +63,18 @@ void* (*volatile __memalign_hook)(size_t, size_t, const void*);
extern "C" void* calloc(size_t n_elements, size_t elem_size) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->calloc(n_elements, elem_size);
return MaybeTagPointer(dispatch_table->calloc(n_elements, elem_size));
}
void* result = Malloc(calloc)(n_elements, elem_size);
if (__predict_false(result == nullptr)) {
warning_log("calloc(%zu, %zu) failed: returning null pointer", n_elements, elem_size);
}
return result;
return MaybeTagPointer(result);
}
extern "C" void free(void* mem) {
auto dispatch_table = GetDispatchTable();
mem = MaybeUntagAndCheckPointer(mem);
if (__predict_false(dispatch_table != nullptr)) {
dispatch_table->free(mem);
} else {
@ -106,18 +108,22 @@ extern "C" int mallopt(int param, int value) {
extern "C" void* malloc(size_t bytes) {
auto dispatch_table = GetDispatchTable();
void *result;
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->malloc(bytes);
result = dispatch_table->malloc(bytes);
} else {
result = Malloc(malloc)(bytes);
}
void* result = Malloc(malloc)(bytes);
if (__predict_false(result == nullptr)) {
warning_log("malloc(%zu) failed: returning null pointer", bytes);
return nullptr;
}
return result;
return MaybeTagPointer(result);
}
extern "C" size_t malloc_usable_size(const void* mem) {
auto dispatch_table = GetDispatchTable();
mem = MaybeUntagAndCheckPointer(mem);
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->malloc_usable_size(mem);
}
@ -127,45 +133,52 @@ extern "C" size_t malloc_usable_size(const void* mem) {
extern "C" void* memalign(size_t alignment, size_t bytes) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->memalign(alignment, bytes);
return MaybeTagPointer(dispatch_table->memalign(alignment, bytes));
}
void* result = Malloc(memalign)(alignment, bytes);
if (__predict_false(result == nullptr)) {
warning_log("memalign(%zu, %zu) failed: returning null pointer", alignment, bytes);
}
return result;
return MaybeTagPointer(result);
}
extern "C" int posix_memalign(void** memptr, size_t alignment, size_t size) {
auto dispatch_table = GetDispatchTable();
int result;
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->posix_memalign(memptr, alignment, size);
result = dispatch_table->posix_memalign(memptr, alignment, size);
} else {
result = Malloc(posix_memalign)(memptr, alignment, size);
}
return Malloc(posix_memalign)(memptr, alignment, size);
if (result == 0) {
*memptr = MaybeTagPointer(*memptr);
}
return result;
}
extern "C" void* aligned_alloc(size_t alignment, size_t size) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->aligned_alloc(alignment, size);
return MaybeTagPointer(dispatch_table->aligned_alloc(alignment, size));
}
void* result = Malloc(aligned_alloc)(alignment, size);
if (__predict_false(result == nullptr)) {
warning_log("aligned_alloc(%zu, %zu) failed: returning null pointer", alignment, size);
}
return result;
return MaybeTagPointer(result);
}
extern "C" __attribute__((__noinline__)) void* realloc(void* old_mem, size_t bytes) {
auto dispatch_table = GetDispatchTable();
old_mem = MaybeUntagAndCheckPointer(old_mem);
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->realloc(old_mem, bytes);
return MaybeTagPointer(dispatch_table->realloc(old_mem, bytes));
}
void* result = Malloc(realloc)(old_mem, bytes);
if (__predict_false(result == nullptr && bytes != 0)) {
warning_log("realloc(%p, %zu) failed: returning null pointer", old_mem, bytes);
}
return result;
return MaybeTagPointer(result);
}
extern "C" void* reallocarray(void* old_mem, size_t item_count, size_t item_size) {
@ -183,42 +196,66 @@ extern "C" void* reallocarray(void* old_mem, size_t item_count, size_t item_size
extern "C" void* pvalloc(size_t bytes) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->pvalloc(bytes);
return MaybeTagPointer(dispatch_table->pvalloc(bytes));
}
void* result = Malloc(pvalloc)(bytes);
if (__predict_false(result == nullptr)) {
warning_log("pvalloc(%zu) failed: returning null pointer", bytes);
}
return result;
return MaybeTagPointer(result);
}
extern "C" void* valloc(size_t bytes) {
auto dispatch_table = GetDispatchTable();
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->valloc(bytes);
return MaybeTagPointer(dispatch_table->valloc(bytes));
}
void* result = Malloc(valloc)(bytes);
if (__predict_false(result == nullptr)) {
warning_log("valloc(%zu) failed: returning null pointer", bytes);
}
return result;
return MaybeTagPointer(result);
}
#endif
// =============================================================================
struct CallbackWrapperArg {
void (*callback)(uintptr_t base, size_t size, void* arg);
void* arg;
};
void CallbackWrapper(uintptr_t base, size_t size, void* arg) {
CallbackWrapperArg* wrapper_arg = reinterpret_cast<CallbackWrapperArg*>(arg);
wrapper_arg->callback(
reinterpret_cast<uintptr_t>(MaybeTagPointer(reinterpret_cast<void*>(base))),
size, wrapper_arg->arg);
}
// =============================================================================
// Exported for use by libmemunreachable.
// =============================================================================
// Calls callback for every allocation in the anonymous heap mapping
// [base, base+size). Must be called between malloc_disable and malloc_enable.
// [base, base+size). Must be called between malloc_disable and malloc_enable.
// `base` in this can take either a tagged or untagged pointer, but we always
// provide a tagged pointer to the `base` argument of `callback` if the kernel
// supports tagged pointers.
extern "C" int malloc_iterate(uintptr_t base, size_t size,
void (*callback)(uintptr_t base, size_t size, void* arg), void* arg) {
auto dispatch_table = GetDispatchTable();
// Wrap the malloc_iterate callback we were provided, in order to provide
// pointer tagging support.
CallbackWrapperArg wrapper_arg;
wrapper_arg.callback = callback;
wrapper_arg.arg = arg;
uintptr_t untagged_base =
reinterpret_cast<uintptr_t>(UntagPointer(reinterpret_cast<void*>(base)));
if (__predict_false(dispatch_table != nullptr)) {
return dispatch_table->malloc_iterate(base, size, callback, arg);
return dispatch_table->malloc_iterate(
untagged_base, size, CallbackWrapper, &wrapper_arg);
}
return Malloc(malloc_iterate)(base, size, callback, arg);
return Malloc(malloc_iterate)(
untagged_base, size, CallbackWrapper, &wrapper_arg);
}
// Disable calls to malloc so malloc_iterate gets a consistent view of

View file

@ -0,0 +1,128 @@
/*
* Copyright (C) 2019 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include <sys/reboot.h>
#include <unistd.h>
#include <async_safe/log.h>
#include <private/bionic_globals.h>
// We choose a static pointer tag here for performance reasons. Dynamic tagging
// doesn't improve our detection, and simply hurts performance. This tag is
// deliberately chosen to always point to inaccessible memory on a standard
// 64-bit userspace process, and be easily identifiable by developers. This tag
// is also deliberately different from the standard pattern-init tag (0xAA), as
// to be distinguishable from an uninitialized-pointer access. The first and
// second nibbles are also deliberately designed to be the bitset-mirror of each
// other (0b1100, 0b0011) in order to reduce incidental matches. Users must not
// rely on the implementation-defined value of this pointer tag, as it may
// change.
static constexpr uintptr_t POINTER_TAG = 0x3C;
static constexpr unsigned TAG_SHIFT = 56;
#if defined(__aarch64__)
static constexpr uintptr_t ADDRESS_MASK = (static_cast<uintptr_t>(1) << TAG_SHIFT) - 1;
static constexpr uintptr_t TAG_MASK = static_cast<uintptr_t>(0xFF) << TAG_SHIFT;
#endif // defined(__aarch64__)
// Return a forcibly-tagged pointer.
static inline void* TagPointer(void* ptr) {
#if defined(__aarch64__)
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) |
reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag));
#else
async_safe_fatal("Attempting to tag a pointer (%p) on non-aarch64.", ptr);
#endif
}
#if defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)
// Return a forcibly-untagged pointer. The pointer tag is not checked for
// validity.
static inline void* UntagPointer(const volatile void* ptr) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) & ADDRESS_MASK);
}
static void* SlowPathPointerCheck(const volatile void* ptr) {
uintptr_t ptr_tag = reinterpret_cast<uintptr_t>(ptr) & TAG_MASK;
uintptr_t heap_tag = reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag);
// Applications may disable pointer tagging, which will be propagated to
// libc in the zygote. This means that there may already be tagged heap
// allocations that will fail when checked against the zero-ed heap tag. The
// check bellow allows us to turn *off* pointer tagging and still allow
// tagged heap allocations to be freed, as long as they're using *our* tag.
if (__predict_false(heap_tag != 0 || ptr_tag != (POINTER_TAG << TAG_SHIFT))) {
// TODO(b/145604058) - Upstream tagged pointers documentation and provide
// a link to it in the abort message here.
async_safe_fatal("Pointer tag for %p was truncated.", ptr);
}
return UntagPointer(ptr);
}
// Return a tagged pointer iff the kernel supports tagged pointers, and `ptr` is
// non-null.
static inline void* MaybeTagPointer(void* ptr) {
if (__predict_true(ptr != nullptr)) {
return TagPointer(ptr);
}
return ptr;
}
// Untag the pointer, and check the pointer tag iff the kernel supports tagged
// pointers. If the tag is incorrect, trap.
static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
if (__predict_false(ptr == nullptr)) {
return nullptr;
}
uintptr_t ptr_tag = reinterpret_cast<uintptr_t>(ptr) & TAG_MASK;
uintptr_t heap_tag = reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag);
if (__predict_false(heap_tag != ptr_tag)) {
return SlowPathPointerCheck(ptr);
}
return UntagPointer(ptr);
}
#else // defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)
static inline void* UntagPointer(const volatile void* ptr) {
return const_cast<void*>(ptr);
}
static inline void* MaybeTagPointer(void* ptr) {
return ptr;
}
static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
return const_cast<void *>(ptr);
}
#endif // defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)

View file

@ -43,6 +43,7 @@
#include <android-base/file.h>
#include <android-base/properties.h>
#include <android-base/stringprintf.h>
#include <bionic/malloc_tagged_pointers.h>
#include <private/bionic_malloc_dispatch.h>
#include <private/MallocXmlElem.h>
@ -883,6 +884,7 @@ ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t max_fram
if (!(g_debug->config().options() & BACKTRACE)) {
return 0;
}
pointer = UntagPointer(pointer);
return PointerData::GetFrames(pointer, frames, max_frames);
}

View file

@ -103,6 +103,9 @@ enum HeapTaggingLevel {
// Disable heap tagging. The program must use prctl(PR_SET_TAGGED_ADDR_CTRL) to disable memory tag
// checks before disabling heap tagging. Heap tagging may not be re-enabled after being disabled.
M_HEAP_TAGGING_LEVEL_NONE = 0,
// Address-only tagging. Heap pointers have a non-zero tag in the most significant byte which is
// checked in free(). Memory accesses ignore the tag.
M_HEAP_TAGGING_LEVEL_TBI = 1,
};
// Manipulates bionic-specific handling of memory allocation APIs such as

View file

@ -44,6 +44,7 @@
struct libc_globals {
vdso_entry vdso[VDSO_END];
long setjmp_cookie;
uintptr_t heap_pointer_tag;
// In order to allow a complete switch between dispatch tables without
// the need for copying each function by function in the structure,

View file

@ -211,6 +211,9 @@ cc_test_library {
"libprocinfo",
"libsystemproperties",
],
srcs: [
"tagged_pointers_test.cpp",
],
},
},

View file

@ -0,0 +1,76 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <sys/prctl.h>
#include "platform/bionic/malloc.h"
#include "utils.h"
#include <bionic/malloc_tagged_pointers.h>
static bool KernelSupportsTaggedPointers() {
#ifdef __aarch64__
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
int res = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
return res >= 0 && res & PR_TAGGED_ADDR_ENABLE;
#else
return false;
#endif
}
TEST(tagged_pointers, check_tagged_pointer_dies) {
if (!KernelSupportsTaggedPointers()) {
GTEST_SKIP() << "Kernel doesn't support tagged pointers.";
}
#ifdef __aarch64__
void *x = malloc(1);
// Ensure that `x` has a pointer tag.
EXPECT_NE(reinterpret_cast<uintptr_t>(x) >> 56, 0u);
x = untag_address(x);
EXPECT_DEATH(free(x), "Pointer tag for 0x[a-zA-Z0-9]* was truncated");
HeapTaggingLevel tag_level = M_HEAP_TAGGING_LEVEL_TBI;
EXPECT_TRUE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
EXPECT_DEATH(free(untag_address(malloc(1))), "Pointer tag for 0x[a-zA-Z0-9]* was truncated");
x = malloc(1);
void *y = malloc(1);
// Disable heap tagging.
tag_level = M_HEAP_TAGGING_LEVEL_NONE;
EXPECT_TRUE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
// Ensure an older tagged pointer can still be freed.
free(x);
// Tag mismatch is not detected on old pointers.
free(untag_address(y));
// New pointers are not tagged.
x = malloc(1);
EXPECT_EQ(untag_address(x), x);
free(x);
// Switching back to checked mode is not possible.
tag_level = M_HEAP_TAGGING_LEVEL_TBI;
EXPECT_FALSE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
// We remain in the unchecked mode.
x = malloc(1);
EXPECT_EQ(untag_address(x), x);
free(x);
#endif // defined(__aarch64__)
}

View file

@ -68,10 +68,8 @@ static inline bool running_with_hwasan() {
static inline void* untag_address(void* addr) {
#if defined(__LP64__)
if (running_with_hwasan()) {
constexpr uintptr_t mask = (static_cast<uintptr_t>(1) << 56) - 1;
addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & mask);
}
constexpr uintptr_t mask = (static_cast<uintptr_t>(1) << 56) - 1;
addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & mask);
#endif
return addr;
}