Merge "Follow up CL to "Add validation tests for NNAPI Burst serialized format"" into qt-dev
This commit is contained in:
commit
e84f442c4c
9 changed files with 206 additions and 189 deletions
|
@ -34,7 +34,6 @@ namespace vts {
|
|||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using test_helper::for_all;
|
||||
using test_helper::MixedTyped;
|
||||
|
@ -42,53 +41,6 @@ using test_helper::MixedTypedExample;
|
|||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
|
||||
sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
|
||||
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel is called, and
|
||||
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
|
||||
// If a driver has any doubt that it can prepare an operation, it must
|
||||
// return false. So here, if a driver isn't sure if it can support an
|
||||
// operation, but reports that it successfully prepared the model, the test
|
||||
// can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Unable to test Request validation because vendor service "
|
||||
"cannot prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
// Primary validation function. This function will take a valid request, apply a
|
||||
// mutation to it to invalidate the request, then pass it to interface calls
|
||||
// that use the request. Note that the request here is passed by value, and any
|
||||
|
@ -237,15 +189,8 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
|
|||
return requests;
|
||||
}
|
||||
|
||||
void ValidationTest::validateRequests(const V1_0::Model& model,
|
||||
void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
|
||||
const std::vector<Request>& requests) {
|
||||
// create IPreparedModel
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// validate each request
|
||||
for (const Request& request : requests) {
|
||||
removeInputTest(preparedModel, request);
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
@ -25,6 +29,55 @@ namespace V1_0 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
|
||||
static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
|
||||
sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
|
||||
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel = std::all_of(supported.begin(), supported.end(),
|
||||
[](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel is called, and
|
||||
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
|
||||
// If a driver has any doubt that it can prepare an operation, it must
|
||||
// return false. So here, if a driver isn't sure if it can support an
|
||||
// operation, but reports that it successfully prepared the model, the test
|
||||
// can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Unable to test Request validation because vendor service "
|
||||
"cannot prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
|
@ -68,9 +121,17 @@ void NeuralnetworksHidlTest::TearDown() {
|
|||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
|
||||
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
|
||||
validateModel(model);
|
||||
validateRequests(model, request);
|
||||
|
||||
// create IPreparedModel
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
validateRequests(preparedModel, requests);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
|
|
|
@ -67,7 +67,8 @@ class ValidationTest : public NeuralnetworksHidlTest {
|
|||
|
||||
private:
|
||||
void validateModel(const Model& model);
|
||||
void validateRequests(const Model& model, const std::vector<Request>& request);
|
||||
void validateRequests(const sp<IPreparedModel>& preparedModel,
|
||||
const std::vector<Request>& requests);
|
||||
};
|
||||
|
||||
// Tag for the generated tests
|
||||
|
|
|
@ -34,7 +34,6 @@ namespace vts {
|
|||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using test_helper::for_all;
|
||||
using test_helper::MixedTyped;
|
||||
|
@ -42,54 +41,6 @@ using test_helper::MixedTypedExample;
|
|||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
|
||||
sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
|
||||
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations_1_1 call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel_1_1 is called, and
|
||||
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
|
||||
// If a driver has any doubt that it can prepare an operation, it must
|
||||
// return false. So here, if a driver isn't sure if it can support an
|
||||
// operation, but reports that it successfully prepared the model, the test
|
||||
// can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Unable to test Request validation because vendor service "
|
||||
"cannot prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
// Primary validation function. This function will take a valid request, apply a
|
||||
// mutation to it to invalidate the request, then pass it to interface calls
|
||||
// that use the request. Note that the request here is passed by value, and any
|
||||
|
@ -238,15 +189,8 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
|
|||
return requests;
|
||||
}
|
||||
|
||||
void ValidationTest::validateRequests(const V1_1::Model& model,
|
||||
void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
|
||||
const std::vector<Request>& requests) {
|
||||
// create IPreparedModel
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// validate each request
|
||||
for (const Request& request : requests) {
|
||||
removeInputTest(preparedModel, request);
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
@ -25,6 +29,56 @@ namespace V1_1 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
|
||||
static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
|
||||
sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
|
||||
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel = std::all_of(supported.begin(), supported.end(),
|
||||
[](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations_1_1 call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel_1_1 is called, and
|
||||
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
|
||||
// If a driver has any doubt that it can prepare an operation, it must
|
||||
// return false. So here, if a driver isn't sure if it can support an
|
||||
// operation, but reports that it successfully prepared the model, the test
|
||||
// can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Unable to test Request validation because vendor service "
|
||||
"cannot prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
|
@ -68,9 +122,17 @@ void NeuralnetworksHidlTest::TearDown() {
|
|||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
|
||||
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
|
||||
validateModel(model);
|
||||
validateRequests(model, request);
|
||||
|
||||
// create IPreparedModel
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
validateRequests(preparedModel, requests);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
|
|
|
@ -76,7 +76,8 @@ class ValidationTest : public NeuralnetworksHidlTest {
|
|||
|
||||
private:
|
||||
void validateModel(const Model& model);
|
||||
void validateRequests(const Model& model, const std::vector<Request>& request);
|
||||
void validateRequests(const sp<IPreparedModel>& preparedModel,
|
||||
const std::vector<Request>& requests);
|
||||
};
|
||||
|
||||
// Tag for the generated tests
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
* Copyright (C) 2019 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -39,7 +39,13 @@ using ::android::nn::RequestChannelSender;
|
|||
using ::android::nn::ResultChannelReceiver;
|
||||
using ExecutionBurstCallback = ::android::nn::ExecutionBurstController::ExecutionBurstCallback;
|
||||
|
||||
// This constant value represents the length of an FMQ that is large enough to
|
||||
// return a result from a burst execution for all of the generated test cases.
|
||||
constexpr size_t kExecutionBurstChannelLength = 1024;
|
||||
|
||||
// This constant value represents a length of an FMQ that is not large enough
|
||||
// to return a result from a burst execution for some of the generated test
|
||||
// cases.
|
||||
constexpr size_t kExecutionBurstChannelSmallLength = 8;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
@ -51,7 +57,8 @@ static bool badTiming(Timing timing) {
|
|||
static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurstCallback>& callback,
|
||||
std::unique_ptr<RequestChannelSender>* sender,
|
||||
std::unique_ptr<ResultChannelReceiver>* receiver,
|
||||
sp<IBurstContext>* context) {
|
||||
sp<IBurstContext>* context,
|
||||
size_t resultChannelLength = kExecutionBurstChannelLength) {
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
ASSERT_NE(nullptr, sender);
|
||||
ASSERT_NE(nullptr, receiver);
|
||||
|
@ -61,7 +68,7 @@ static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurst
|
|||
auto [fmqRequestChannel, fmqRequestDescriptor] =
|
||||
RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
|
||||
auto [fmqResultChannel, fmqResultDescriptor] =
|
||||
ResultChannelReceiver::create(kExecutionBurstChannelLength, /*blocking=*/true);
|
||||
ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
|
||||
ASSERT_NE(nullptr, fmqRequestChannel.get());
|
||||
ASSERT_NE(nullptr, fmqResultChannel.get());
|
||||
ASSERT_NE(nullptr, fmqRequestDescriptor);
|
||||
|
@ -87,38 +94,25 @@ static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurst
|
|||
}
|
||||
|
||||
static void createBurstWithResultChannelLength(
|
||||
const sp<IPreparedModel>& preparedModel,
|
||||
std::shared_ptr<ExecutionBurstController>* controller, size_t resultChannelLength) {
|
||||
const sp<IPreparedModel>& preparedModel, size_t resultChannelLength,
|
||||
std::shared_ptr<ExecutionBurstController>* controller) {
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
ASSERT_NE(nullptr, controller);
|
||||
|
||||
// create FMQ objects
|
||||
auto [fmqRequestChannel, fmqRequestDescriptor] =
|
||||
RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
|
||||
auto [fmqResultChannel, fmqResultDescriptor] =
|
||||
ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
|
||||
ASSERT_NE(nullptr, fmqRequestChannel.get());
|
||||
ASSERT_NE(nullptr, fmqResultChannel.get());
|
||||
ASSERT_NE(nullptr, fmqRequestDescriptor);
|
||||
ASSERT_NE(nullptr, fmqResultDescriptor);
|
||||
|
||||
// configure burst
|
||||
std::unique_ptr<RequestChannelSender> sender;
|
||||
std::unique_ptr<ResultChannelReceiver> receiver;
|
||||
sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
|
||||
ErrorStatus errorStatus;
|
||||
sp<IBurstContext> burstContext;
|
||||
const Return<void> ret = preparedModel->configureExecutionBurst(
|
||||
callback, *fmqRequestDescriptor, *fmqResultDescriptor,
|
||||
[&errorStatus, &burstContext](ErrorStatus status, const sp<IBurstContext>& context) {
|
||||
errorStatus = status;
|
||||
burstContext = context;
|
||||
});
|
||||
ASSERT_TRUE(ret.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, errorStatus);
|
||||
ASSERT_NE(nullptr, burstContext.get());
|
||||
sp<IBurstContext> context;
|
||||
ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context,
|
||||
resultChannelLength));
|
||||
ASSERT_NE(nullptr, sender.get());
|
||||
ASSERT_NE(nullptr, receiver.get());
|
||||
ASSERT_NE(nullptr, context.get());
|
||||
|
||||
// return values
|
||||
*controller = std::make_shared<ExecutionBurstController>(
|
||||
std::move(fmqRequestChannel), std::move(fmqResultChannel), burstContext, callback);
|
||||
*controller = std::make_shared<ExecutionBurstController>(std::move(sender), std::move(receiver),
|
||||
context, callback);
|
||||
}
|
||||
|
||||
// Primary validation function. This function will take a valid serialized
|
||||
|
@ -139,7 +133,7 @@ static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiv
|
|||
SCOPED_TRACE(message);
|
||||
|
||||
// send invalid packet
|
||||
sender->sendPacket(serialized);
|
||||
ASSERT_TRUE(sender->sendPacket(serialized));
|
||||
|
||||
// receive error
|
||||
auto results = receiver->getBlocking();
|
||||
|
@ -150,27 +144,34 @@ static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiv
|
|||
EXPECT_TRUE(badTiming(timing));
|
||||
}
|
||||
|
||||
static std::vector<FmqRequestDatum> createUniqueDatum() {
|
||||
// For validation, valid packet entries are mutated to invalid packet entries,
|
||||
// or invalid packet entries are inserted into valid packets. This function
|
||||
// creates pre-set invalid packet entries for convenience.
|
||||
static std::vector<FmqRequestDatum> createBadRequestPacketEntries() {
|
||||
const FmqRequestDatum::PacketInformation packetInformation = {
|
||||
/*.packetSize=*/10, /*.numberOfInputOperands=*/10, /*.numberOfOutputOperands=*/10,
|
||||
/*.numberOfPools=*/10};
|
||||
const FmqRequestDatum::OperandInformation operandInformation = {
|
||||
/*.hasNoValue=*/false, /*.location=*/{}, /*.numberOfDimensions=*/10};
|
||||
const int32_t invalidPoolIdentifier = std::numeric_limits<int32_t>::max();
|
||||
std::vector<FmqRequestDatum> unique(7);
|
||||
unique[0].packetInformation(packetInformation);
|
||||
unique[1].inputOperandInformation(operandInformation);
|
||||
unique[2].inputOperandDimensionValue(0);
|
||||
unique[3].outputOperandInformation(operandInformation);
|
||||
unique[4].outputOperandDimensionValue(0);
|
||||
unique[5].poolIdentifier(invalidPoolIdentifier);
|
||||
unique[6].measureTiming(MeasureTiming::YES);
|
||||
return unique;
|
||||
std::vector<FmqRequestDatum> bad(7);
|
||||
bad[0].packetInformation(packetInformation);
|
||||
bad[1].inputOperandInformation(operandInformation);
|
||||
bad[2].inputOperandDimensionValue(0);
|
||||
bad[3].outputOperandInformation(operandInformation);
|
||||
bad[4].outputOperandDimensionValue(0);
|
||||
bad[5].poolIdentifier(invalidPoolIdentifier);
|
||||
bad[6].measureTiming(MeasureTiming::YES);
|
||||
return bad;
|
||||
}
|
||||
|
||||
static const std::vector<FmqRequestDatum>& getUniqueDatum() {
|
||||
static const std::vector<FmqRequestDatum> unique = createUniqueDatum();
|
||||
return unique;
|
||||
// For validation, valid packet entries are mutated to invalid packet entries,
|
||||
// or invalid packet entries are inserted into valid packets. This function
|
||||
// retrieves pre-set invalid packet entries for convenience. This function
|
||||
// caches these data so they can be reused on subsequent validation checks.
|
||||
static const std::vector<FmqRequestDatum>& getBadRequestPacketEntries() {
|
||||
static const std::vector<FmqRequestDatum> bad = createBadRequestPacketEntries();
|
||||
return bad;
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE DATUM ////////////////////////////////////
|
||||
|
@ -190,7 +191,7 @@ static void removeDatumTest(RequestChannelSender* sender, ResultChannelReceiver*
|
|||
|
||||
static void addDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
|
||||
const std::vector<FmqRequestDatum>& serialized) {
|
||||
const std::vector<FmqRequestDatum>& extra = getUniqueDatum();
|
||||
const std::vector<FmqRequestDatum>& extra = getBadRequestPacketEntries();
|
||||
for (size_t index = 0; index <= serialized.size(); ++index) {
|
||||
for (size_t type = 0; type < extra.size(); ++type) {
|
||||
const std::string message = "addDatum: added datum type " + std::to_string(type) +
|
||||
|
@ -209,17 +210,17 @@ static bool interestingCase(const FmqRequestDatum& lhs, const FmqRequestDatum& r
|
|||
using Discriminator = FmqRequestDatum::hidl_discriminator;
|
||||
|
||||
const bool differentValues = (lhs != rhs);
|
||||
const bool sameSumType = (lhs.getDiscriminator() == rhs.getDiscriminator());
|
||||
const bool sameDiscriminator = (lhs.getDiscriminator() == rhs.getDiscriminator());
|
||||
const auto discriminator = rhs.getDiscriminator();
|
||||
const bool isDimensionValue = (discriminator == Discriminator::inputOperandDimensionValue ||
|
||||
discriminator == Discriminator::outputOperandDimensionValue);
|
||||
|
||||
return differentValues && !(sameSumType && isDimensionValue);
|
||||
return differentValues && !(sameDiscriminator && isDimensionValue);
|
||||
}
|
||||
|
||||
static void mutateDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
|
||||
const std::vector<FmqRequestDatum>& serialized) {
|
||||
const std::vector<FmqRequestDatum>& change = getUniqueDatum();
|
||||
const std::vector<FmqRequestDatum>& change = getBadRequestPacketEntries();
|
||||
for (size_t index = 0; index < serialized.size(); ++index) {
|
||||
for (size_t type = 0; type < change.size(); ++type) {
|
||||
if (interestingCase(serialized[index], change[type])) {
|
||||
|
@ -252,17 +253,17 @@ static void validateBurstSerialization(const sp<IPreparedModel>& preparedModel,
|
|||
// validate each request
|
||||
for (const Request& request : requests) {
|
||||
// load memory into callback slots
|
||||
std::vector<intptr_t> keys(request.pools.size());
|
||||
for (size_t i = 0; i < keys.size(); ++i) {
|
||||
keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
|
||||
}
|
||||
std::vector<intptr_t> keys;
|
||||
keys.reserve(request.pools.size());
|
||||
std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
|
||||
[](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
|
||||
const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
|
||||
|
||||
// ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
|
||||
// subsequent slot validation testing)
|
||||
const auto maxElement = std::max_element(slots.begin(), slots.end());
|
||||
ASSERT_NE(slots.end(), maxElement);
|
||||
ASSERT_NE(std::numeric_limits<int32_t>::max(), *maxElement);
|
||||
ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
|
||||
return slot != std::numeric_limits<int32_t>::max();
|
||||
}));
|
||||
|
||||
// serialize the request
|
||||
const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
|
||||
|
@ -274,18 +275,20 @@ static void validateBurstSerialization(const sp<IPreparedModel>& preparedModel,
|
|||
}
|
||||
}
|
||||
|
||||
// This test validates that when the Result message size exceeds length of the
|
||||
// result FMQ, the service instance gracefully fails and returns an error.
|
||||
static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
|
||||
const std::vector<Request>& requests) {
|
||||
// create regular burst
|
||||
std::shared_ptr<ExecutionBurstController> controllerRegular;
|
||||
ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(preparedModel, &controllerRegular,
|
||||
kExecutionBurstChannelLength));
|
||||
ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
|
||||
preparedModel, kExecutionBurstChannelLength, &controllerRegular));
|
||||
ASSERT_NE(nullptr, controllerRegular.get());
|
||||
|
||||
// create burst with small output channel
|
||||
std::shared_ptr<ExecutionBurstController> controllerSmall;
|
||||
ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(preparedModel, &controllerSmall,
|
||||
kExecutionBurstChannelSmallLength));
|
||||
ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
|
||||
preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall));
|
||||
ASSERT_NE(nullptr, controllerSmall.get());
|
||||
|
||||
// validate each request
|
||||
|
@ -297,24 +300,25 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
|
|||
}
|
||||
|
||||
// collect serialized result by running regular burst
|
||||
const auto [status1, outputShapes1, timing1] =
|
||||
const auto [statusRegular, outputShapesRegular, timingRegular] =
|
||||
controllerRegular->compute(request, MeasureTiming::NO, keys);
|
||||
|
||||
// skip test if synchronous output isn't useful
|
||||
// skip test if regular burst output isn't useful for testing a failure
|
||||
// caused by having too small of a length for the result FMQ
|
||||
const std::vector<FmqResultDatum> serialized =
|
||||
::android::nn::serialize(status1, outputShapes1, timing1);
|
||||
if (status1 != ErrorStatus::NONE ||
|
||||
::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
|
||||
if (statusRegular != ErrorStatus::NONE ||
|
||||
serialized.size() <= kExecutionBurstChannelSmallLength) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// by this point, execution should fail because the result channel isn't
|
||||
// large enough to return the serialized result
|
||||
const auto [status2, outputShapes2, timing2] =
|
||||
const auto [statusSmall, outputShapesSmall, timingSmall] =
|
||||
controllerSmall->compute(request, MeasureTiming::NO, keys);
|
||||
EXPECT_NE(ErrorStatus::NONE, status2);
|
||||
EXPECT_EQ(0u, outputShapes2.size());
|
||||
EXPECT_TRUE(badTiming(timing2));
|
||||
EXPECT_NE(ErrorStatus::NONE, statusSmall);
|
||||
EXPECT_EQ(0u, outputShapesSmall.size());
|
||||
EXPECT_TRUE(badTiming(timingSmall));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ namespace V1_2 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
|
||||
using V1_1::ExecutionPreference;
|
||||
|
@ -127,7 +126,7 @@ void NeuralnetworksHidlTest::TearDown() {
|
|||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
|
||||
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
|
||||
validateModel(model);
|
||||
|
||||
// create IPreparedModel
|
||||
|
@ -137,8 +136,8 @@ void ValidationTest::validateEverything(const Model& model, const std::vector<Re
|
|||
return;
|
||||
}
|
||||
|
||||
validateRequests(preparedModel, request);
|
||||
validateBurst(preparedModel, request);
|
||||
validateRequests(preparedModel, requests);
|
||||
validateBurst(preparedModel, requests);
|
||||
}
|
||||
|
||||
sp<IPreparedModel> getPreparedModel_1_2(
|
||||
|
|
|
@ -72,7 +72,7 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
|
|||
// Tag for the validation tests
|
||||
class ValidationTest : public NeuralnetworksHidlTest {
|
||||
protected:
|
||||
void validateEverything(const Model& model, const std::vector<Request>& request);
|
||||
void validateEverything(const Model& model, const std::vector<Request>& requests);
|
||||
|
||||
private:
|
||||
void validateModel(const Model& model);
|
||||
|
|
Loading…
Reference in a new issue