Merge "Allow android_mallopt(M_SET_HEAP_TAGGING_LEVEL) to control scudo heap tagging." am: 935aae907c
Change-Id: I1429607de3bf0e60a282a48549432ee7d7050afb
This commit is contained in:
commit
f052569012
6 changed files with 144 additions and 35 deletions
|
@ -33,11 +33,11 @@
|
|||
#include <platform/bionic/malloc.h>
|
||||
#include <platform/bionic/mte_kernel.h>
|
||||
|
||||
extern "C" void scudo_malloc_disable_memory_tagging();
|
||||
|
||||
static HeapTaggingLevel heap_tagging_level = M_HEAP_TAGGING_LEVEL_NONE;
|
||||
|
||||
void SetDefaultHeapTaggingLevel() {
|
||||
// Allow the kernel to accept tagged pointers in syscall arguments. This is a no-op (kernel
|
||||
// returns -EINVAL) if the kernel doesn't understand the prctl.
|
||||
#if defined(__aarch64__)
|
||||
#define PR_SET_TAGGED_ADDR_CTRL 55
|
||||
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
|
||||
|
@ -47,15 +47,23 @@ void SetDefaultHeapTaggingLevel() {
|
|||
// syscall arguments.
|
||||
if (prctl(PR_SET_TAGGED_ADDR_CTRL,
|
||||
PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC | (1 << PR_MTE_EXCL_SHIFT), 0, 0, 0) == 0) {
|
||||
heap_tagging_level = M_HEAP_TAGGING_LEVEL_ASYNC;
|
||||
return;
|
||||
}
|
||||
#endif // ANDROID_EXPERIMENTAL_MTE
|
||||
|
||||
// Allow the kernel to accept tagged pointers in syscall arguments. This is a no-op (kernel
|
||||
// returns -EINVAL) if the kernel doesn't understand the prctl.
|
||||
if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0) {
|
||||
#if !__has_feature(hwaddress_sanitizer)
|
||||
heap_tagging_level = M_HEAP_TAGGING_LEVEL_TBI;
|
||||
__libc_globals.mutate([](libc_globals* globals) {
|
||||
globals->heap_pointer_tag = reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT;
|
||||
// Arrange for us to set pointer tags to POINTER_TAG, check tags on
|
||||
// deallocation and untag when passing pointers to the allocator.
|
||||
globals->heap_pointer_tag = (reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT) |
|
||||
(0xffull << CHECK_SHIFT) | (0xffull << UNTAG_SHIFT);
|
||||
});
|
||||
#endif // hwaddress_sanitizer
|
||||
}
|
||||
#endif // aarch64
|
||||
}
|
||||
|
@ -66,16 +74,22 @@ bool SetHeapTaggingLevel(void* arg, size_t arg_size) {
|
|||
}
|
||||
|
||||
auto tag_level = *reinterpret_cast<HeapTaggingLevel*>(arg);
|
||||
if (tag_level == heap_tagging_level) {
|
||||
return true;
|
||||
}
|
||||
|
||||
switch (tag_level) {
|
||||
case M_HEAP_TAGGING_LEVEL_NONE:
|
||||
break;
|
||||
case M_HEAP_TAGGING_LEVEL_TBI:
|
||||
case M_HEAP_TAGGING_LEVEL_ASYNC:
|
||||
if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE) {
|
||||
error_log(
|
||||
"SetHeapTaggingLevel: re-enabling tagging after it was disabled is not supported");
|
||||
return false;
|
||||
} else {
|
||||
error_log("SetHeapTaggingLevel: switching between TBI and ASYNC is not supported");
|
||||
}
|
||||
break;
|
||||
return false;
|
||||
default:
|
||||
error_log("SetHeapTaggingLevel: unknown tagging level");
|
||||
return false;
|
||||
|
@ -83,8 +97,16 @@ bool SetHeapTaggingLevel(void* arg, size_t arg_size) {
|
|||
heap_tagging_level = tag_level;
|
||||
info_log("SetHeapTaggingLevel: tag level set to %d", tag_level);
|
||||
|
||||
if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE && __libc_globals->heap_pointer_tag != 0) {
|
||||
__libc_globals.mutate([](libc_globals* globals) { globals->heap_pointer_tag = 0; });
|
||||
if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE) {
|
||||
#if defined(USE_SCUDO)
|
||||
scudo_malloc_disable_memory_tagging();
|
||||
#endif
|
||||
__libc_globals.mutate([](libc_globals* globals) {
|
||||
// Preserve the untag mask (we still want to untag pointers when passing them to the
|
||||
// allocator if we were doing so before), but clear the fixed tag and the check mask,
|
||||
// so that pointers are no longer tagged and checks no longer happen.
|
||||
globals->heap_pointer_tag &= 0xffull << UNTAG_SHIFT;
|
||||
});
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -47,44 +47,62 @@
|
|||
// rely on the implementation-defined value of this pointer tag, as it may
|
||||
// change.
|
||||
static constexpr uintptr_t POINTER_TAG = 0x3C;
|
||||
static constexpr unsigned UNTAG_SHIFT = 40;
|
||||
static constexpr unsigned CHECK_SHIFT = 48;
|
||||
static constexpr unsigned TAG_SHIFT = 56;
|
||||
#if defined(__aarch64__)
|
||||
static constexpr uintptr_t ADDRESS_MASK = (static_cast<uintptr_t>(1) << TAG_SHIFT) - 1;
|
||||
static constexpr uintptr_t TAG_MASK = static_cast<uintptr_t>(0xFF) << TAG_SHIFT;
|
||||
|
||||
static inline uintptr_t FixedPointerTag() {
|
||||
return __libc_globals->heap_pointer_tag & TAG_MASK;
|
||||
}
|
||||
|
||||
static inline uintptr_t PointerCheckMask() {
|
||||
return (__libc_globals->heap_pointer_tag << (TAG_SHIFT - CHECK_SHIFT)) & TAG_MASK;
|
||||
}
|
||||
|
||||
static inline uintptr_t PointerUntagMask() {
|
||||
return ~(__libc_globals->heap_pointer_tag << (TAG_SHIFT - UNTAG_SHIFT));
|
||||
}
|
||||
#endif // defined(__aarch64__)
|
||||
|
||||
// Return a forcibly-tagged pointer.
|
||||
static inline void* TagPointer(void* ptr) {
|
||||
#if defined(__aarch64__)
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) |
|
||||
reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag));
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) | FixedPointerTag());
|
||||
#else
|
||||
async_safe_fatal("Attempting to tag a pointer (%p) on non-aarch64.", ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)
|
||||
#if defined(__aarch64__)
|
||||
// Return a forcibly-untagged pointer. The pointer tag is not checked for
|
||||
// validity.
|
||||
static inline void* UntagPointer(const volatile void* ptr) {
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) & ADDRESS_MASK);
|
||||
}
|
||||
|
||||
static void* SlowPathPointerCheck(const volatile void* ptr) {
|
||||
uintptr_t ptr_tag = reinterpret_cast<uintptr_t>(ptr) & TAG_MASK;
|
||||
uintptr_t heap_tag = reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag);
|
||||
// Untag the pointer, and check the pointer tag iff the kernel supports tagged pointers and the
|
||||
// pointer tag isn't being used by HWASAN or MTE. If the tag is incorrect, trap.
|
||||
static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
|
||||
if (__predict_false(ptr == nullptr)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
|
||||
|
||||
// Applications may disable pointer tagging, which will be propagated to
|
||||
// libc in the zygote. This means that there may already be tagged heap
|
||||
// allocations that will fail when checked against the zero-ed heap tag. The
|
||||
// check bellow allows us to turn *off* pointer tagging and still allow
|
||||
// tagged heap allocations to be freed, as long as they're using *our* tag.
|
||||
if (__predict_false(heap_tag != 0 || ptr_tag != (POINTER_TAG << TAG_SHIFT))) {
|
||||
// check below allows us to turn *off* pointer tagging (by setting PointerCheckMask() and
|
||||
// FixedPointerTag() to zero) and still allow tagged heap allocations to be freed.
|
||||
if ((ptr_int & PointerCheckMask()) != FixedPointerTag()) {
|
||||
// TODO(b/145604058) - Upstream tagged pointers documentation and provide
|
||||
// a link to it in the abort message here.
|
||||
async_safe_fatal("Pointer tag for %p was truncated.", ptr);
|
||||
}
|
||||
return UntagPointer(ptr);
|
||||
return reinterpret_cast<void*>(ptr_int & PointerUntagMask());
|
||||
}
|
||||
|
||||
// Return a tagged pointer iff the kernel supports tagged pointers, and `ptr` is
|
||||
|
@ -96,23 +114,7 @@ static inline void* MaybeTagPointer(void* ptr) {
|
|||
return ptr;
|
||||
}
|
||||
|
||||
// Untag the pointer, and check the pointer tag iff the kernel supports tagged
|
||||
// pointers. If the tag is incorrect, trap.
|
||||
static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
|
||||
if (__predict_false(ptr == nullptr)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uintptr_t ptr_tag = reinterpret_cast<uintptr_t>(ptr) & TAG_MASK;
|
||||
uintptr_t heap_tag = reinterpret_cast<uintptr_t>(__libc_globals->heap_pointer_tag);
|
||||
|
||||
if (__predict_false(heap_tag != ptr_tag)) {
|
||||
return SlowPathPointerCheck(ptr);
|
||||
}
|
||||
return UntagPointer(ptr);
|
||||
}
|
||||
|
||||
#else // defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)
|
||||
#else // defined(__aarch64__)
|
||||
static inline void* UntagPointer(const volatile void* ptr) {
|
||||
return const_cast<void*>(ptr);
|
||||
}
|
||||
|
@ -125,4 +127,4 @@ static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
|
|||
return const_cast<void *>(ptr);
|
||||
}
|
||||
|
||||
#endif // defined(__aarch64__) && !__has_feature(hwaddress_sanitizer)
|
||||
#endif // defined(__aarch64__)
|
||||
|
|
|
@ -114,6 +114,8 @@ enum HeapTaggingLevel {
|
|||
// Address-only tagging. Heap pointers have a non-zero tag in the most significant byte which is
|
||||
// checked in free(). Memory accesses ignore the tag.
|
||||
M_HEAP_TAGGING_LEVEL_TBI = 1,
|
||||
// Enable heap tagging if supported, at a level appropriate for asynchronous memory tag checks.
|
||||
M_HEAP_TAGGING_LEVEL_ASYNC = 2,
|
||||
};
|
||||
|
||||
// Manipulates bionic-specific handling of memory allocation APIs such as
|
||||
|
|
|
@ -48,4 +48,7 @@
|
|||
#define PR_MTE_EXCL_SHIFT 3
|
||||
#define PR_MTE_EXCL_MASK (0xffffUL << PR_MTE_EXCL_SHIFT)
|
||||
|
||||
#define SEGV_MTEAERR 6
|
||||
#define SEGV_MTESERR 7
|
||||
|
||||
#endif
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/auxv.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
|
@ -40,7 +42,10 @@
|
|||
|
||||
#if defined(__BIONIC__)
|
||||
|
||||
#include "SignalUtils.h"
|
||||
|
||||
#include "platform/bionic/malloc.h"
|
||||
#include "platform/bionic/mte_kernel.h"
|
||||
#include "platform/bionic/reserved_signals.h"
|
||||
#include "private/bionic_config.h"
|
||||
|
||||
|
@ -1196,3 +1201,70 @@ TEST(android_mallopt, set_allocation_limit_multiple_threads) {
|
|||
GTEST_SKIP() << "bionic extension";
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__BIONIC__) && defined(__aarch64__) && defined(ANDROID_EXPERIMENTAL_MTE)
|
||||
template <int SiCode> void CheckSiCode(int, siginfo_t* info, void*) {
|
||||
if (info->si_code != SiCode) {
|
||||
_exit(2);
|
||||
}
|
||||
_exit(1);
|
||||
}
|
||||
|
||||
static bool SetTagCheckingLevel(int level) {
|
||||
int tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
|
||||
if (tagged_addr_ctrl < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
tagged_addr_ctrl = (tagged_addr_ctrl & ~PR_MTE_TCF_MASK) | level;
|
||||
return prctl(PR_SET_TAGGED_ADDR_CTRL, tagged_addr_ctrl, 0, 0, 0) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST(android_mallopt, tag_level) {
|
||||
#if defined(__BIONIC__) && defined(__aarch64__) && defined(ANDROID_EXPERIMENTAL_MTE)
|
||||
if (!(getauxval(AT_HWCAP2) & HWCAP2_MTE)) {
|
||||
GTEST_SKIP() << "requires MTE support";
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_ptr<int[]> p = std::make_unique<int[]>(4);
|
||||
|
||||
// First, check that memory tagging is enabled and the default tag checking level is async.
|
||||
// We assume that scudo is used on all MTE enabled hardware; scudo inserts a header with a
|
||||
// mismatching tag before each allocation.
|
||||
EXPECT_EXIT(
|
||||
{
|
||||
ScopedSignalHandler ssh(SIGSEGV, CheckSiCode<SEGV_MTEAERR>, SA_SIGINFO);
|
||||
p[-1] = 42;
|
||||
},
|
||||
testing::ExitedWithCode(1), "");
|
||||
|
||||
EXPECT_TRUE(SetTagCheckingLevel(PR_MTE_TCF_SYNC));
|
||||
EXPECT_EXIT(
|
||||
{
|
||||
ScopedSignalHandler ssh(SIGSEGV, CheckSiCode<SEGV_MTESERR>, SA_SIGINFO);
|
||||
p[-1] = 42;
|
||||
},
|
||||
testing::ExitedWithCode(1), "");
|
||||
|
||||
EXPECT_TRUE(SetTagCheckingLevel(PR_MTE_TCF_NONE));
|
||||
volatile int oob ATTRIBUTE_UNUSED = p[-1];
|
||||
|
||||
HeapTaggingLevel tag_level = M_HEAP_TAGGING_LEVEL_TBI;
|
||||
EXPECT_FALSE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
|
||||
|
||||
tag_level = M_HEAP_TAGGING_LEVEL_NONE;
|
||||
EXPECT_TRUE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
|
||||
std::unique_ptr<int[]> p2 = std::make_unique<int[]>(4);
|
||||
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(p2.get()) >> 56);
|
||||
|
||||
tag_level = M_HEAP_TAGGING_LEVEL_ASYNC;
|
||||
EXPECT_FALSE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
|
||||
|
||||
tag_level = M_HEAP_TAGGING_LEVEL_NONE;
|
||||
EXPECT_TRUE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
|
||||
#else
|
||||
GTEST_SKIP() << "arm64 only";
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <sys/prctl.h>
|
||||
|
||||
#include "platform/bionic/malloc.h"
|
||||
#include "platform/bionic/mte.h"
|
||||
#include "utils.h"
|
||||
|
||||
#include <bionic/malloc_tagged_pointers.h>
|
||||
|
@ -39,6 +40,10 @@ TEST(tagged_pointers, check_tagged_pointer_dies) {
|
|||
}
|
||||
|
||||
#ifdef __aarch64__
|
||||
if (mte_supported()) {
|
||||
GTEST_SKIP() << "Tagged pointers are not used on MTE hardware.";
|
||||
}
|
||||
|
||||
void *x = malloc(1);
|
||||
|
||||
// Ensure that `x` has a pointer tag.
|
||||
|
@ -51,6 +56,9 @@ TEST(tagged_pointers, check_tagged_pointer_dies) {
|
|||
EXPECT_TRUE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
|
||||
EXPECT_DEATH(free(untag_address(malloc(1))), "Pointer tag for 0x[a-zA-Z0-9]* was truncated");
|
||||
|
||||
tag_level = M_HEAP_TAGGING_LEVEL_ASYNC;
|
||||
EXPECT_FALSE(android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &tag_level, sizeof(tag_level)));
|
||||
|
||||
x = malloc(1);
|
||||
void *y = malloc(1);
|
||||
// Disable heap tagging.
|
||||
|
|
Loading…
Reference in a new issue