Merge changes from topic "nnapi-aidl-burst" am: e24a7e57e2

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1645423

Change-Id: Iee324e1209c70e255d336cf6823e505346bba960
This commit is contained in:
Przemyslaw Szczepaniak 2021-03-22 19:19:58 +00:00 committed by Automerger Merge Worker
commit 7f4c0a220d
7 changed files with 237 additions and 13 deletions

View file

@ -39,6 +39,8 @@ class MockPreparedModel final : public BnPreparedModel {
bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration,
int64_t duration, FencedExecutionResult* fencedExecutionResult),
(override));
MOCK_METHOD(ndk::ScopedAStatus, configureExecutionBurst, (std::shared_ptr<IBurst> * burst),
(override));
};
inline std::shared_ptr<MockPreparedModel> MockPreparedModel::create() {

View file

@ -17,6 +17,7 @@
#include "GeneratedTestHarness.h"
#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
#include <android-base/logging.h>
#include <android/binder_auto_utils.h>
#include <android/sync.h>
@ -582,6 +583,53 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
}
break;
}
case Executor::BURST: {
SCOPED_TRACE("burst");
// create burst
std::shared_ptr<IBurst> burst;
auto ret = preparedModel->configureExecutionBurst(&burst);
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
ASSERT_NE(nullptr, burst.get());
// associate a unique slot with each memory pool
int64_t currentSlot = 0;
std::vector<int64_t> slots;
slots.reserve(request.pools.size());
for (const auto& pool : request.pools) {
if (pool.getTag() == RequestMemoryPool::Tag::pool) {
slots.push_back(currentSlot++);
} else {
EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
slots.push_back(-1);
}
}
ExecutionResult executionResult;
// execute
ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, kNoDeadline,
loopTimeoutDuration, &executionResult);
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
if (ret.isOk()) {
executionStatus = executionResult.outputSufficientSize
? ErrorStatus::NONE
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
outputShapes = std::move(executionResult.outputShapes);
timing = executionResult.timing;
} else {
executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
}
// Mark each slot as unused after the execution. This is unnecessary because the burst
// is freed after this scope ends, but this is here to test the functionality.
for (int64_t slot : slots) {
ret = burst->releaseMemoryResource(slot);
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
}
break;
}
case Executor::FENCED: {
SCOPED_TRACE("fenced");
ErrorStatus result = ErrorStatus::NONE;
@ -727,19 +775,19 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
case TestKind::GENERAL: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {false, true};
executorList = {Executor::SYNC};
executorList = {Executor::SYNC, Executor::BURST};
memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::DYNAMIC_SHAPE: {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {false, true};
executorList = {Executor::SYNC, Executor::FENCED};
executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::MEMORY_DOMAIN: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {false};
executorList = {Executor::SYNC, Executor::FENCED};
executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
} break;
case TestKind::FENCED_COMPUTE: {
@ -755,7 +803,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
case TestKind::INTINITE_LOOP_TIMEOUT: {
outputTypesList = {OutputType::MISSED_DEADLINE};
measureTimingList = {false, true};
executorList = {Executor::SYNC, Executor::FENCED};
executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
memoryTypeList = {MemoryType::ASHMEM};
} break;
}
@ -779,7 +827,7 @@ void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
const TestModel& coupledModel) {
const std::vector<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
const std::vector<bool> measureTimingList = {false, true};
const std::vector<Executor> executorList = {Executor::SYNC, Executor::FENCED};
const std::vector<Executor> executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
for (const OutputType outputType : outputTypesList) {
for (const bool measureTiming : measureTimingList) {

View file

@ -203,6 +203,10 @@ class InvalidPreparedModel : public BnPreparedModel {
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>*) override {
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
};
template <typename... Args>
@ -866,6 +870,9 @@ class MemoryDomainExecutionTest
case Executor::SYNC:
EXPECT_EQ(executeSync(preparedModel, request), expectedStatus);
break;
case Executor::BURST:
EXPECT_EQ(executeBurst(preparedModel, request), expectedStatus);
break;
case Executor::FENCED:
EXPECT_EQ(executeFenced(preparedModel, request), expectedStatus);
break;
@ -916,6 +923,35 @@ class MemoryDomainExecutionTest
return executionStatus;
}
ErrorStatus executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request) {
// create burst
std::shared_ptr<IBurst> burst;
auto ret = preparedModel->configureExecutionBurst(&burst);
EXPECT_TRUE(ret.isOk()) << ret.getDescription();
EXPECT_NE(nullptr, burst.get());
if (!ret.isOk() || burst.get() == nullptr) {
return ErrorStatus::GENERAL_FAILURE;
}
// use -1 for all memory identifier tokens
const std::vector<int64_t> slots(request.pools.size(), -1);
ExecutionResult executionResult;
ret = burst->executeSynchronously(request, slots, false, kNoDeadline,
kOmittedTimeoutDuration, &executionResult);
if (!ret.isOk()) {
EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
return static_cast<ErrorStatus>(ret.getServiceSpecificError());
}
const ErrorStatus executionStatus = executionResult.outputSufficientSize
? ErrorStatus::NONE
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
EXPECT_EQ(executionResult.timing, kNoTiming);
return executionStatus;
}
const Executor kExecutor = std::get<Executor>(GetParam());
};
@ -1159,7 +1195,7 @@ TEST_P(MemoryDomainExecutionTest, InvalidDimensions) {
ErrorStatus::GENERAL_FAILURE);
}
const auto kExecutorChoices = testing::Values(Executor::SYNC, Executor::FENCED);
const auto kExecutorChoices = testing::Values(Executor::SYNC, Executor::BURST, Executor::FENCED);
std::string printMemoryDomainExecutionTest(
const testing::TestParamInfo<MemoryDomainExecutionTestParam>& info) {

View file

@ -51,6 +51,10 @@ constexpr auto kShortDuration = std::chrono::milliseconds{5};
using Results = std::tuple<ErrorStatus, std::vector<OutputShape>, Timing>;
using MaybeResults = std::optional<Results>;
using ExecutionFunction =
std::function<MaybeResults(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request, int64_t deadline)>;
static int64_t makeDeadline(DeadlineBoundType deadlineBoundType) {
const auto getNanosecondsSinceEpoch = [](const auto& time) -> int64_t {
const auto timeSinceEpoch = time.time_since_epoch();
@ -177,13 +181,53 @@ static MaybeResults executeSynchronously(const std::shared_ptr<IPreparedModel>&
std::move(executionResult.outputShapes), executionResult.timing});
}
static MaybeResults executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request, int64_t deadline) {
SCOPED_TRACE("burst");
const bool measure = false;
// create burst
std::shared_ptr<IBurst> burst;
auto ret = preparedModel->configureExecutionBurst(&burst);
EXPECT_TRUE(ret.isOk()) << ret.getDescription();
EXPECT_NE(nullptr, burst.get());
if (!ret.isOk() || burst.get() == nullptr) {
return std::nullopt;
}
// use -1 for all memory identifier tokens
const std::vector<int64_t> slots(request.pools.size(), -1);
// run execution
ExecutionResult executionResult;
ret = burst->executeSynchronously(request, slots, measure, deadline, kOmittedTimeoutDuration,
&executionResult);
EXPECT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
if (!ret.isOk()) {
if (ret.getExceptionCode() != EX_SERVICE_SPECIFIC) {
return std::nullopt;
}
return MaybeResults(
{static_cast<ErrorStatus>(ret.getServiceSpecificError()), {}, kNoTiming});
}
// return results
return MaybeResults({executionResult.outputSufficientSize
? ErrorStatus::NONE
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
std::move(executionResult.outputShapes), executionResult.timing});
}
void runExecutionTest(const std::shared_ptr<IPreparedModel>& preparedModel,
const TestModel& testModel, const Request& request,
const ExecutionContext& context, DeadlineBoundType deadlineBound) {
const ExecutionContext& context, bool synchronous,
DeadlineBoundType deadlineBound) {
const ExecutionFunction execute = synchronous ? executeSynchronously : executeBurst;
const auto deadline = makeDeadline(deadlineBound);
// Perform execution and unpack results.
const auto results = executeSynchronously(preparedModel, request, deadline);
const auto results = execute(preparedModel, request, deadline);
if (!results.has_value()) return;
const auto& [status, outputShapes, timing] = results.value();
@ -235,8 +279,11 @@ void runExecutionTest(const std::shared_ptr<IPreparedModel>& preparedModel,
void runExecutionTests(const std::shared_ptr<IPreparedModel>& preparedModel,
const TestModel& testModel, const Request& request,
const ExecutionContext& context) {
for (auto deadlineBound : deadlineBounds) {
runExecutionTest(preparedModel, testModel, request, context, deadlineBound);
for (bool synchronous : {false, true}) {
for (auto deadlineBound : deadlineBounds) {
runExecutionTest(preparedModel, testModel, request, context, synchronous,
deadlineBound);
}
}
}

View file

@ -16,7 +16,9 @@
#define LOG_TAG "neuralnetworks_aidl_hal_test"
#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
#include <android/binder_auto_utils.h>
#include <variant>
#include <chrono>
@ -77,6 +79,35 @@ static void validate(const std::shared_ptr<IPreparedModel>& preparedModel,
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
ErrorStatus::INVALID_ARGUMENT);
}
// burst
{
SCOPED_TRACE(message + " [burst]");
// create burst
std::shared_ptr<IBurst> burst;
auto ret = preparedModel->configureExecutionBurst(&burst);
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
ASSERT_NE(nullptr, burst.get());
// use -1 for all memory identifier tokens
const std::vector<int64_t> slots(request.pools.size(), -1);
ExecutionResult executionResult;
const auto executeStatus = burst->executeSynchronously(
request, slots, measure, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
ASSERT_FALSE(executeStatus.isOk());
ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
ErrorStatus::INVALID_ARGUMENT);
}
}
std::shared_ptr<IBurst> createBurst(const std::shared_ptr<IPreparedModel>& preparedModel) {
std::shared_ptr<IBurst> burst;
const auto ret = preparedModel->configureExecutionBurst(&burst);
if (!ret.isOk()) return nullptr;
return burst;
}
///////////////////////// REMOVE INPUT ////////////////////////////////////
@ -110,6 +141,65 @@ void validateRequest(const std::shared_ptr<IPreparedModel>& preparedModel, const
removeOutputTest(preparedModel, request);
}
void validateBurst(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request) {
// create burst
std::shared_ptr<IBurst> burst;
auto ret = preparedModel->configureExecutionBurst(&burst);
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
ASSERT_NE(nullptr, burst.get());
const auto test = [&burst, &request](const std::vector<int64_t>& slots) {
ExecutionResult executionResult;
const auto executeStatus =
burst->executeSynchronously(request, slots, /*measure=*/false, kNoDeadline,
kOmittedTimeoutDuration, &executionResult);
ASSERT_FALSE(executeStatus.isOk());
ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
ErrorStatus::INVALID_ARGUMENT);
};
int64_t currentSlot = 0;
std::vector<int64_t> slots;
slots.reserve(request.pools.size());
for (const auto& pool : request.pools) {
if (pool.getTag() == RequestMemoryPool::Tag::pool) {
slots.push_back(currentSlot++);
} else {
slots.push_back(-1);
}
}
constexpr int64_t invalidSlot = -2;
// validate failure when invalid memory identifier token value
for (size_t i = 0; i < request.pools.size(); ++i) {
const int64_t oldSlotValue = slots[i];
slots[i] = invalidSlot;
test(slots);
slots[i] = oldSlotValue;
}
// validate failure when request.pools.size() != memoryIdentifierTokens.size()
if (request.pools.size() > 0) {
slots = std::vector<int64_t>(request.pools.size() - 1, -1);
test(slots);
}
// validate failure when request.pools.size() != memoryIdentifierTokens.size()
slots = std::vector<int64_t>(request.pools.size() + 1, -1);
test(slots);
// validate failure when invalid memory identifier token value
const auto freeStatus = burst->releaseMemoryResource(invalidSlot);
ASSERT_FALSE(freeStatus.isOk());
ASSERT_EQ(freeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
ASSERT_EQ(static_cast<ErrorStatus>(freeStatus.getServiceSpecificError()),
ErrorStatus::INVALID_ARGUMENT);
}
void validateRequestFailure(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request) {
SCOPED_TRACE("Expecting request to fail [executeSynchronously]");

View file

@ -127,6 +127,8 @@ void validateModel(const std::shared_ptr<IDevice>& device, const Model& model);
// Forward declaration from ValidateRequest.cpp
void validateRequest(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request);
// Forward declaration from ValidateRequest.cpp
void validateBurst(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request);
// Forward declaration from ValidateRequest.cpp
void validateRequestFailure(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request);
@ -140,6 +142,7 @@ void validateEverything(const std::shared_ptr<IDevice>& device, const Model& mod
if (preparedModel == nullptr) return;
validateRequest(preparedModel, request);
validateBurst(preparedModel, request);
// HIDL also had test that expected executeFenced to fail on received null fd (-1). This is not
// allowed in AIDL and will result in EX_TRANSACTION_FAILED.
}
@ -178,8 +181,6 @@ INSTANTIATE_GENERATED_TEST(ValidationTest, [](const std::string& testName) {
std::string toString(Executor executor) {
switch (executor) {
case Executor::ASYNC:
return "ASYNC";
case Executor::SYNC:
return "SYNC";
case Executor::BURST:

View file

@ -52,7 +52,7 @@ void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& mo
std::shared_ptr<IPreparedModel>* preparedModel,
bool reportSkipping = true);
enum class Executor { ASYNC, SYNC, BURST, FENCED };
enum class Executor { SYNC, BURST, FENCED };
std::string toString(Executor executor);