diff --git a/common/support/Android.bp b/common/support/Android.bp index 8aea306dae..730798d840 100644 --- a/common/support/Android.bp +++ b/common/support/Android.bp @@ -18,6 +18,11 @@ cc_library_static { "android.hardware.common-V2-ndk_platform", "libcutils", ], + apex_available: [ + "//apex_available:platform", + "com.android.neuralnetworks", + ], + min_sdk_version: "29", } cc_test { diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp index ad961cfe99..0ccc711ecf 100644 --- a/neuralnetworks/aidl/utils/Android.bp +++ b/neuralnetworks/aidl/utils/Android.bp @@ -31,6 +31,8 @@ cc_library_static { export_include_dirs: ["include"], cflags: ["-Wthread-safety"], static_libs: [ + "android.hardware.graphics.common-V2-ndk_platform", + "libaidlcommonsupport", "libarect", "neuralnetworks_types", "neuralnetworks_utils_hal_common", @@ -51,7 +53,9 @@ cc_test { ], static_libs: [ "android.hardware.common-V2-ndk_platform", + "android.hardware.graphics.common-V2-ndk_platform", "android.hardware.neuralnetworks-V1-ndk_platform", + "libaidlcommonsupport", "libgmock", "libneuralnetworks_common", "neuralnetworks_types", diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp index d5f7f81663..93ac51c233 100644 --- a/neuralnetworks/aidl/utils/src/Conversions.cpp +++ b/neuralnetworks/aidl/utils/src/Conversions.cpp @@ -16,8 +16,13 @@ #include "Conversions.h" +#include +#include #include +#include +#include #include +#include #include #include #include @@ -125,28 +130,17 @@ struct NativeHandleDeleter { using UniqueNativeHandle = std::unique_ptr; -static GeneralResult nativeHandleFromAidlHandle(const NativeHandle& handle) { - std::vector fds; - fds.reserve(handle.fds.size()); - for (const auto& fd : handle.fds) { - auto duplicatedFd = NN_TRY(dupFd(fd.get())); - fds.emplace_back(duplicatedFd.release()); +GeneralResult nativeHandleFromAidlHandle(const NativeHandle& handle) { + auto nativeHandle = UniqueNativeHandle(dupFromAidl(handle)); + if (nativeHandle.get() == nullptr) { + return NN_ERROR() << "android::dupFromAidl failed to convert the common::NativeHandle to a " + "native_handle_t"; } - - constexpr size_t kIntMax = std::numeric_limits::max(); - CHECK_LE(handle.fds.size(), kIntMax); - CHECK_LE(handle.ints.size(), kIntMax); - native_handle_t* nativeHandle = native_handle_create(static_cast(handle.fds.size()), - static_cast(handle.ints.size())); - if (nativeHandle == nullptr) { - return NN_ERROR() << "Failed to create native_handle"; + if (!std::all_of(nativeHandle->data + 0, nativeHandle->data + nativeHandle->numFds, + [](int fd) { return fd >= 0; })) { + return NN_ERROR() << "android::dupFromAidl returned an invalid native_handle_t"; } - for (size_t i = 0; i < fds.size(); ++i) { - nativeHandle->data[i] = fds[i].release(); - } - std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]); - - return UniqueNativeHandle(nativeHandle); + return nativeHandle; } } // anonymous namespace @@ -353,67 +347,66 @@ GeneralResult unvalidatedConvert(bool measureTiming) { return measureTiming ? MeasureTiming::YES : MeasureTiming::NO; } -static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) { - return (value + multiple - 1) / multiple * multiple; -} - GeneralResult unvalidatedConvert(const aidl_hal::Memory& memory) { - VERIFY_NON_NEGATIVE(memory.size) << "Memory size must not be negative"; - if (memory.size > std::numeric_limits::max()) { - return NN_ERROR() << "Memory: size must be <= std::numeric_limits::max()"; - } + using Tag = aidl_hal::Memory::Tag; + switch (memory.getTag()) { + case Tag::ashmem: { + const auto& ashmem = memory.get(); + VERIFY_NON_NEGATIVE(ashmem.size) << "Memory size must not be negative"; + if (ashmem.size > std::numeric_limits::max()) { + return NN_ERROR() << "Memory: size must be <= std::numeric_limits::max()"; + } - if (memory.name != "hardware_buffer_blob") { - return std::make_shared(Memory{ - .handle = NN_TRY(unvalidatedConvertHelper(memory.handle)), - .size = static_cast(memory.size), - .name = memory.name, - }); - } + auto handle = Memory::Ashmem{ + .fd = NN_TRY(dupFd(ashmem.fd.get())), + .size = static_cast(ashmem.size), + }; + return std::make_shared(Memory{.handle = std::move(handle)}); + } + case Tag::mappableFile: { + const auto& mappableFile = memory.get(); + VERIFY_NON_NEGATIVE(mappableFile.length) << "Memory size must not be negative"; + VERIFY_NON_NEGATIVE(mappableFile.offset) << "Memory offset must not be negative"; + if (mappableFile.length > std::numeric_limits::max()) { + return NN_ERROR() << "Memory: size must be <= std::numeric_limits::max()"; + } + if (mappableFile.offset > std::numeric_limits::max()) { + return NN_ERROR() << "Memory: offset must be <= std::numeric_limits::max()"; + } - const auto size = static_cast(memory.size); - const auto format = AHARDWAREBUFFER_FORMAT_BLOB; - const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; - const uint32_t width = size; - const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer. - const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer. + const size_t size = static_cast(mappableFile.length); + const int prot = mappableFile.prot; + const int fd = mappableFile.fd.get(); + const size_t offset = static_cast(mappableFile.offset); - const UniqueNativeHandle handle = NN_TRY(nativeHandleFromAidlHandle(memory.handle)); - const native_handle_t* nativeHandle = handle.get(); + return createSharedMemoryFromFd(size, prot, fd, offset); + } + case Tag::hardwareBuffer: { + const auto& hardwareBuffer = memory.get(); - // AHardwareBuffer_createFromHandle() might fail because an allocator - // expects a specific stride value. In that case, we try to guess it by - // aligning the width to small powers of 2. - // TODO(b/174120849): Avoid stride assumptions. - AHardwareBuffer* hardwareBuffer = nullptr; - status_t status = UNKNOWN_ERROR; - for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) { - const uint32_t stride = roundUpToMultiple(width, alignment); - AHardwareBuffer_Desc desc{ - .width = width, - .height = height, - .layers = layers, - .format = format, - .usage = usage, - .stride = stride, - }; - status = AHardwareBuffer_createFromHandle(&desc, nativeHandle, - AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, - &hardwareBuffer); - if (status == NO_ERROR) { - break; + const UniqueNativeHandle handle = + NN_TRY(nativeHandleFromAidlHandle(hardwareBuffer.handle)); + const native_handle_t* nativeHandle = handle.get(); + + const AHardwareBuffer_Desc desc{ + .width = static_cast(hardwareBuffer.description.width), + .height = static_cast(hardwareBuffer.description.height), + .layers = static_cast(hardwareBuffer.description.layers), + .format = static_cast(hardwareBuffer.description.format), + .usage = static_cast(hardwareBuffer.description.usage), + .stride = static_cast(hardwareBuffer.description.stride), + }; + AHardwareBuffer* ahwb = nullptr; + const status_t status = AHardwareBuffer_createFromHandle( + &desc, nativeHandle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &ahwb); + if (status != NO_ERROR) { + return NN_ERROR() << "createFromHandle failed"; + } + + return createSharedMemoryFromAHWB(ahwb, /*takeOwnership=*/true); } } - if (status != NO_ERROR) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "Can't create AHardwareBuffer from handle. Error: " << status; - } - - return std::make_shared(Memory{ - .handle = HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true), - .size = static_cast(memory.size), - .name = memory.name, - }); + return NN_ERROR() << "Unrecognized Memory::Tag: " << memory.getTag(); } GeneralResult unvalidatedConvert(const aidl_hal::Timing& timing) { @@ -645,20 +638,95 @@ struct overloaded : Ts... { template overloaded(Ts...)->overloaded; -static nn::GeneralResult aidlHandleFromNativeHandle( - const native_handle_t& handle) { - common::NativeHandle aidlNativeHandle; +nn::GeneralResult aidlHandleFromNativeHandle( + const native_handle_t& nativeHandle) { + auto handle = ::android::dupToAidl(&nativeHandle); + if (!std::all_of(handle.fds.begin(), handle.fds.end(), + [](const ndk::ScopedFileDescriptor& fd) { return fd.get() >= 0; })) { + return NN_ERROR() << "android::dupToAidl returned an invalid common::NativeHandle"; + } + return handle; +} - aidlNativeHandle.fds.reserve(handle.numFds); - for (int i = 0; i < handle.numFds; ++i) { - auto duplicatedFd = NN_TRY(nn::dupFd(handle.data[i])); - aidlNativeHandle.fds.emplace_back(duplicatedFd.release()); +nn::GeneralResult unvalidatedConvert(const nn::Memory::Ashmem& memory) { + if constexpr (std::numeric_limits::max() > std::numeric_limits::max()) { + if (memory.size > std::numeric_limits::max()) { + return ( + NN_ERROR() + << "Memory::Ashmem: size must be <= std::numeric_limits::max()") + . + operator nn::GeneralResult(); + } } - aidlNativeHandle.ints = std::vector(&handle.data[handle.numFds], - &handle.data[handle.numFds + handle.numInts]); + auto fd = NN_TRY(nn::dupFd(memory.fd)); + auto handle = common::Ashmem{ + .fd = ndk::ScopedFileDescriptor(fd.release()), + .size = static_cast(memory.size), + }; + return Memory::make(std::move(handle)); +} - return aidlNativeHandle; +nn::GeneralResult unvalidatedConvert(const nn::Memory::Fd& memory) { + if constexpr (std::numeric_limits::max() > std::numeric_limits::max()) { + if (memory.size > std::numeric_limits::max()) { + return (NN_ERROR() << "Memory::Fd: size must be <= std::numeric_limits::max()") + . + operator nn::GeneralResult(); + } + if (memory.offset > std::numeric_limits::max()) { + return ( + NN_ERROR() + << "Memory::Fd: offset must be <= std::numeric_limits::max()") + . + operator nn::GeneralResult(); + } + } + + auto fd = NN_TRY(nn::dupFd(memory.fd)); + auto handle = common::MappableFile{ + .length = static_cast(memory.size), + .prot = memory.prot, + .fd = ndk::ScopedFileDescriptor(fd.release()), + .offset = static_cast(memory.offset), + }; + return Memory::make(std::move(handle)); +} + +nn::GeneralResult unvalidatedConvert(const nn::Memory::HardwareBuffer& memory) { + const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(memory.handle.get()); + if (nativeHandle == nullptr) { + return (NN_ERROR() << "unvalidatedConvert failed because AHardwareBuffer_getNativeHandle " + "returned nullptr") + . + operator nn::GeneralResult(); + } + + auto handle = NN_TRY(aidlHandleFromNativeHandle(*nativeHandle)); + + AHardwareBuffer_Desc desc; + AHardwareBuffer_describe(memory.handle.get(), &desc); + + const auto description = graphics::common::HardwareBufferDescription{ + .width = static_cast(desc.width), + .height = static_cast(desc.height), + .layers = static_cast(desc.layers), + .format = static_cast(desc.format), + .usage = static_cast(desc.usage), + .stride = static_cast(desc.stride), + }; + + auto hardwareBuffer = graphics::common::HardwareBuffer{ + .description = std::move(description), + .handle = std::move(handle), + }; + return Memory::make(std::move(hardwareBuffer)); +} + +nn::GeneralResult unvalidatedConvert(const nn::Memory::Unknown& /*memory*/) { + return (NN_ERROR() << "Unable to convert Unknown memory type") + . + operator nn::GeneralResult(); } } // namespace @@ -693,41 +761,12 @@ nn::GeneralResult unvalidatedConvert(const nn::SharedHandl } nn::GeneralResult unvalidatedConvert(const nn::SharedMemory& memory) { - CHECK(memory != nullptr); - if (memory->size > std::numeric_limits::max()) { - return NN_ERROR() << "Memory size doesn't fit into int64_t."; + if (memory == nullptr) { + return (NN_ERROR() << "Unable to convert nullptr memory") + . + operator nn::GeneralResult(); } - if (const auto* handle = std::get_if(&memory->handle)) { - return Memory{ - .handle = NN_TRY(unvalidatedConvert(*handle)), - .size = static_cast(memory->size), - .name = memory->name, - }; - } - - const auto* ahwb = std::get(memory->handle).get(); - AHardwareBuffer_Desc bufferDesc; - AHardwareBuffer_describe(ahwb, &bufferDesc); - - if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) { - CHECK_EQ(memory->size, bufferDesc.width); - CHECK_EQ(memory->name, "hardware_buffer_blob"); - } else { - CHECK_EQ(memory->size, 0u); - CHECK_EQ(memory->name, "hardware_buffer"); - } - - const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb); - if (nativeHandle == nullptr) { - return NN_ERROR() << "unvalidatedConvert failed because AHardwareBuffer_getNativeHandle " - "returned nullptr"; - } - - return Memory{ - .handle = NN_TRY(aidlHandleFromNativeHandle(*nativeHandle)), - .size = static_cast(memory->size), - .name = memory->name, - }; + return std::visit([](const auto& x) { return unvalidatedConvert(x); }, memory->handle); } nn::GeneralResult unvalidatedConvert(const nn::ErrorStatus& errorStatus) { diff --git a/neuralnetworks/aidl/utils/src/Utils.cpp b/neuralnetworks/aidl/utils/src/Utils.cpp index 95516c854b..03407be4ce 100644 --- a/neuralnetworks/aidl/utils/src/Utils.cpp +++ b/neuralnetworks/aidl/utils/src/Utils.cpp @@ -16,12 +16,20 @@ #include "Utils.h" +#include +#include +#include +#include #include #include +#include namespace aidl::android::hardware::neuralnetworks::utils { namespace { +nn::GeneralResult clone(const ndk::ScopedFileDescriptor& fd); +using utils::clone; + template nn::GeneralResult> cloneVec(const std::vector& arguments) { std::vector clonedObjects; @@ -37,24 +45,52 @@ nn::GeneralResult> clone(const std::vector& arguments) { return cloneVec(arguments); } +nn::GeneralResult clone(const ndk::ScopedFileDescriptor& fd) { + auto duplicatedFd = NN_TRY(nn::dupFd(fd.get())); + return ndk::ScopedFileDescriptor(duplicatedFd.release()); +} + +nn::GeneralResult clone(const common::NativeHandle& handle) { + return common::NativeHandle{ + .fds = NN_TRY(cloneVec(handle.fds)), + .ints = handle.ints, + }; +} + } // namespace nn::GeneralResult clone(const Memory& memory) { - common::NativeHandle nativeHandle; - nativeHandle.ints = memory.handle.ints; - nativeHandle.fds.reserve(memory.handle.fds.size()); - for (const auto& fd : memory.handle.fds) { - const int newFd = dup(fd.get()); - if (newFd < 0) { - return NN_ERROR() << "Couldn't dup a file descriptor"; + switch (memory.getTag()) { + case Memory::Tag::ashmem: { + const auto& ashmem = memory.get(); + auto handle = common::Ashmem{ + .fd = NN_TRY(clone(ashmem.fd)), + .size = ashmem.size, + }; + return Memory::make(std::move(handle)); + } + case Memory::Tag::mappableFile: { + const auto& memFd = memory.get(); + auto handle = common::MappableFile{ + .length = memFd.length, + .prot = memFd.prot, + .fd = NN_TRY(clone(memFd.fd)), + .offset = memFd.offset, + }; + return Memory::make(std::move(handle)); + } + case Memory::Tag::hardwareBuffer: { + const auto& hardwareBuffer = memory.get(); + auto handle = graphics::common::HardwareBuffer{ + .description = hardwareBuffer.description, + .handle = NN_TRY(clone(hardwareBuffer.handle)), + }; + return Memory::make(std::move(handle)); } - nativeHandle.fds.emplace_back(newFd); } - return Memory{ - .handle = std::move(nativeHandle), - .size = memory.size, - .name = memory.name, - }; + return (NN_ERROR() << "Unrecognized Memory::Tag: " << memory.getTag()) + . + operator nn::GeneralResult(); } nn::GeneralResult clone(const RequestMemoryPool& requestPool) { diff --git a/neuralnetworks/aidl/vts/functional/Android.bp b/neuralnetworks/aidl/vts/functional/Android.bp index 7804c2a765..d5b150a934 100644 --- a/neuralnetworks/aidl/vts/functional/Android.bp +++ b/neuralnetworks/aidl/vts/functional/Android.bp @@ -50,9 +50,11 @@ cc_test { ], static_libs: [ "android.hardware.common-V2-ndk_platform", + "android.hardware.graphics.common-V2-ndk_platform", "android.hardware.neuralnetworks-V1-ndk_platform", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", + "libaidlcommonsupport", "libgmock", "libhidlmemory", "libneuralnetworks_generated_test_harness", diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp index 596f8ae58e..e8313f19eb 100644 --- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp +++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp @@ -16,6 +16,7 @@ #define LOG_TAG "neuralnetworks_aidl_hal_test" +#include #include #include #include @@ -659,10 +660,26 @@ class MemoryDomainCopyTestBase : public MemoryDomainTestBase { return allocateBuffer(preparedModel, inputIndexes, outputIndexes, {}); } + size_t getSize(const Memory& memory) { + switch (memory.getTag()) { + case Memory::Tag::ashmem: + return memory.get().size; + case Memory::Tag::mappableFile: + return memory.get().length; + case Memory::Tag::hardwareBuffer: { + const auto& hardwareBuffer = memory.get(); + const bool isBlob = + hardwareBuffer.description.format == graphics::common::PixelFormat::BLOB; + return isBlob ? hardwareBuffer.description.width : 0; + } + } + return 0; + } + Memory allocateSharedMemory(uint32_t size) { const auto sharedMemory = nn::createSharedMemory(size).value(); auto memory = utils::convert(sharedMemory).value(); - EXPECT_EQ(memory.size, size); + EXPECT_EQ(getSize(memory), size); return memory; } @@ -690,7 +707,7 @@ class MemoryDomainCopyTestBase : public MemoryDomainTestBase { void initializeDeviceMemory(const std::shared_ptr& buffer) { Memory memory = allocateSharedMemory(kTestOperandDataSize); - ASSERT_EQ(memory.size, kTestOperandDataSize); + ASSERT_EQ(getSize(memory), kTestOperandDataSize); testCopyFrom(buffer, memory, utils::toSigned(kTestOperand.dimensions).value(), ErrorStatus::NONE); } diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp index 94d3daf6bb..698c054941 100644 --- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp @@ -259,12 +259,16 @@ template <> size_t sizeForBinder(const Memory& memory) { // This is just a guess. - size_t size = 0; - const NativeHandle& handle = memory.handle; - size += sizeof(decltype(handle.fds)::value_type) * handle.fds.size(); - size += sizeof(decltype(handle.ints)::value_type) * handle.ints.size(); - size += sizeForBinder(memory.name); - size += sizeof(memory); + size_t size = sizeof(Memory); + + // Only hardwareBuffer type memory has dynamic memory that needs to be accounted for (in the + // form of a NativeHandle type). The other other types of memory (MappableFile, Ashmem) use a + // single file descriptor (with metadata) instead. + if (memory.getTag() == Memory::Tag::hardwareBuffer) { + const NativeHandle& handle = memory.get().handle; + size += sizeof(decltype(handle.fds)::value_type) * handle.fds.size(); + size += sizeof(decltype(handle.ints)::value_type) * handle.ints.size(); + } return size; } diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp index 924ecb2d1b..4d26795d89 100644 --- a/neuralnetworks/utils/common/src/CommonUtils.cpp +++ b/neuralnetworks/utils/common/src/CommonUtils.cpp @@ -89,6 +89,59 @@ void copyPointersToSharedMemory(nn::Model::Subgraph* subgraph, }); } +nn::GeneralResult createNativeHandleFrom(base::unique_fd fd, + const std::vector& ints) { + constexpr size_t kIntMax = std::numeric_limits::max(); + CHECK_LE(ints.size(), kIntMax); + native_handle_t* nativeHandle = native_handle_create(1, static_cast(ints.size())); + if (nativeHandle == nullptr) { + return NN_ERROR() << "Failed to create native_handle"; + } + + nativeHandle->data[0] = fd.release(); + std::copy(ints.begin(), ints.end(), nativeHandle->data + 1); + + hidl_handle handle; + handle.setTo(nativeHandle, /*shouldOwn=*/true); + return handle; +} + +nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Ashmem& memory) { + auto fd = NN_TRY(nn::dupFd(memory.fd)); + auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), {})); + return hidl_memory("ashmem", std::move(handle), memory.size); +} + +nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Fd& memory) { + auto fd = NN_TRY(nn::dupFd(memory.fd)); + + const auto [lowOffsetBits, highOffsetBits] = nn::getIntsFromOffset(memory.offset); + const std::vector ints = {memory.prot, lowOffsetBits, highOffsetBits}; + + auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), ints)); + return hidl_memory("mmap_fd", std::move(handle), memory.size); +} + +nn::GeneralResult createHidlMemoryFrom(const nn::Memory::HardwareBuffer& memory) { + const auto* ahwb = memory.handle.get(); + AHardwareBuffer_Desc bufferDesc; + AHardwareBuffer_describe(ahwb, &bufferDesc); + + const bool isBlob = bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB; + const size_t size = isBlob ? bufferDesc.width : 0; + const char* const name = isBlob ? "hardware_buffer_blob" : "hardware_buffer"; + + const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb); + const hidl_handle hidlHandle(nativeHandle); + hidl_handle copiedHandle(hidlHandle); + + return hidl_memory(name, std::move(copiedHandle), size); +} + +nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Unknown& memory) { + return hidl_memory(memory.name, NN_TRY(hidlHandleFromSharedHandle(memory.handle)), memory.size); +} + } // anonymous namespace nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWithP( @@ -255,27 +308,7 @@ nn::GeneralResult createHidlMemoryFromSharedMemory(const nn::Shared if (memory == nullptr) { return NN_ERROR() << "Memory must be non-empty"; } - if (const auto* handle = std::get_if(&memory->handle)) { - return hidl_memory(memory->name, NN_TRY(hidlHandleFromSharedHandle(*handle)), memory->size); - } - - const auto* ahwb = std::get(memory->handle).get(); - AHardwareBuffer_Desc bufferDesc; - AHardwareBuffer_describe(ahwb, &bufferDesc); - - if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) { - CHECK_EQ(memory->size, bufferDesc.width); - CHECK_EQ(memory->name, "hardware_buffer_blob"); - } else { - CHECK_EQ(memory->size, 0u); - CHECK_EQ(memory->name, "hardware_buffer"); - } - - const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb); - const hidl_handle hidlHandle(nativeHandle); - hidl_handle handle(hidlHandle); - - return hidl_memory(memory->name, std::move(handle), memory->size); + return std::visit([](const auto& x) { return createHidlMemoryFrom(x); }, memory->handle); } static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) { @@ -283,14 +316,53 @@ static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) { } nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_memory& memory) { - CHECK_LE(memory.size(), std::numeric_limits::max()); + CHECK_LE(memory.size(), std::numeric_limits::max()); + if (!memory.valid()) { + return NN_ERROR() << "Unable to convert invalid hidl_memory"; + } + + if (memory.name() == "ashmem") { + if (memory.handle()->numFds != 1) { + return NN_ERROR() << "Unable to convert invalid ashmem memory object with " + << memory.handle()->numFds << " numFds, but expected 1"; + } + if (memory.handle()->numInts != 0) { + return NN_ERROR() << "Unable to convert invalid ashmem memory object with " + << memory.handle()->numInts << " numInts, but expected 0"; + } + auto handle = nn::Memory::Ashmem{ + .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])), + .size = static_cast(memory.size()), + }; + return std::make_shared(nn::Memory{.handle = std::move(handle)}); + } + + if (memory.name() == "mmap_fd") { + if (memory.handle()->numFds != 1) { + return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with " + << memory.handle()->numFds << " numFds, but expected 1"; + } + if (memory.handle()->numInts != 3) { + return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with " + << memory.handle()->numInts << " numInts, but expected 3"; + } + + const int fd = memory.handle()->data[0]; + const int prot = memory.handle()->data[1]; + const int lower = memory.handle()->data[2]; + const int higher = memory.handle()->data[3]; + const size_t offset = nn::getOffsetFromInts(lower, higher); + + return nn::createSharedMemoryFromFd(static_cast(memory.size()), prot, fd, offset); + } if (memory.name() != "hardware_buffer_blob") { - return std::make_shared(nn::Memory{ + auto handle = nn::Memory::Unknown{ .handle = NN_TRY(sharedHandleFromNativeHandle(memory.handle())), - .size = static_cast(memory.size()), + .size = static_cast(memory.size()), .name = memory.name(), - }); + }; + return std::make_shared(nn::Memory{.handle = std::move(handle)}); } const auto size = memory.size(); @@ -328,11 +400,7 @@ nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_ << "Can't create AHardwareBuffer from handle. Error: " << status; } - return std::make_shared(nn::Memory{ - .handle = nn::HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true), - .size = static_cast(memory.size()), - .name = memory.name(), - }); + return nn::createSharedMemoryFromAHWB(hardwareBuffer, /*takeOwnership=*/true); } nn::GeneralResult hidlHandleFromSharedHandle(const nn::Handle& handle) {