Merge changes from topic "quant_coupling"

* changes:
  Add QUANT8_ASYMM_SIGNED support to SELECT op
  Add quantization coupling test
  Combine test parameters into TestConfig structure
This commit is contained in:
Slava Shklyaev 2019-11-26 17:44:27 +00:00 committed by Gerrit Code Review
commit 76f5263d6f
9 changed files with 259 additions and 153 deletions

View file

@ -578,7 +578,7 @@ f1109cbb10297b7429a11fab42afa912710b303c9bf20bd5cdb8bd57b9c84186 android.hardwar
9d8ee57c490ffeaa28f702eaea8d198cb510e4bbfb99e6cb5f63e73341057c7c android.hardware.neuralnetworks@1.1::types
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
71c0f7127335e5b74d1615d5e7f129831b43ffbae5318ad0924d7d8d8910a859 android.hardware.neuralnetworks@1.2::types
72de91c3feba4b19c159cd1c413cbea596b78240caa43e31194e20e6f5b05c49 android.hardware.neuralnetworks@1.2::types
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@ -597,7 +597,7 @@ adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardwar
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
c511b1427b1c3f76af90967bbddaaf250db983a8d3abb9ff189fb5a807cf3d4d android.hardware.neuralnetworks@1.3::types
554baa3b317e077b850afcbaac99daeef56861b1786540e56275a4fcad1f43e3 android.hardware.neuralnetworks@1.3::types
274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication

View file

@ -2448,15 +2448,17 @@ enum OperationType : int32_t {
* then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
* otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
* this scalar must be of type {@link OperandType::FLOAT16}.
* otherwise if all the input tensors have the type
* {@link OperandType::TENSOR_FLOAT16}, this scalar must be
* of type {@link OperandType::FLOAT16}.
* * 50: The clipping threshold for the output from the
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
* otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
* this scalar must be of type {@link OperandType::FLOAT16}.
* otherwise if all the input tensors have the type
* {@link OperandType::TENSOR_FLOAT16}, this scalar must be
* of type {@link OperandType::FLOAT16}.
* * 51: merge_outputs
* An {@link OperandType::BOOL} scalar specifying if the outputs
* from forward and backward cells should be merged.
@ -4124,7 +4126,6 @@ enum OperationType : int32_t {
* * 0: A tensor of the same type and shape as input1 and input2.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*
*/
SELECT = 84,

View file

@ -58,8 +58,20 @@ using V1_0::Request;
using V1_1::ExecutionPreference;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
namespace {
enum class Executor { ASYNC, SYNC, BURST };
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
struct TestConfig {
Executor executor;
MeasureTiming measureTiming;
OutputType outputType;
};
} // namespace
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
@ -194,31 +206,31 @@ static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
return android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
}
enum class Executor { ASYNC, SYNC, BURST };
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
Executor executor, MeasureTiming measure, OutputType outputType) {
const TestConfig& testConfig) {
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
if (testConfig.outputType == OutputType::INSUFFICIENT &&
!isOutputSizeGreaterThanOne(testModel, 0)) {
return;
}
Request request = createRequest(testModel);
if (outputType == OutputType::INSUFFICIENT) {
if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
switch (executor) {
switch (testConfig.executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executionLaunchStatus =
ExecutePreparedModel(preparedModel, request, measure, executionCallback);
Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
preparedModel, request, testConfig.measureTiming, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
@ -234,8 +246,8 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
SCOPED_TRACE("synchronous");
// execute
Return<ErrorStatus> executionReturnStatus =
ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
@ -258,14 +270,14 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
// execute burst
int n;
std::tie(n, outputShapes, timing, std::ignore) =
controller->compute(request, measure, keys);
controller->compute(request, testConfig.measureTiming, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}
}
if (outputType != OutputType::FULLY_SPECIFIED &&
if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
@ -274,7 +286,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
<< std::endl;
GTEST_SKIP();
}
if (measure == MeasureTiming::NO) {
if (testConfig.measureTiming == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
@ -283,7 +295,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}
}
switch (outputType) {
switch (testConfig.outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
@ -321,44 +333,29 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
bool testDynamicOutputShape) {
std::initializer_list<OutputType> outputTypesList;
std::initializer_list<MeasureTiming> measureTimingList;
std::initializer_list<Executor> executorList;
if (testDynamicOutputShape) {
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::INSUFFICIENT);
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
} else {
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
const TestConfig testConfig = {.executor = executor,
.measureTiming = measureTiming,
.outputType = outputType};
EvaluatePreparedModel(preparedModel, testModel, testConfig);
}
}
}
}

View file

@ -2375,15 +2375,17 @@ enum OperationType : int32_t {
* then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
* otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
* this scalar must be of type {@link OperandType::FLOAT16}.
* otherwise if all the input tensors have the type
* {@link OperandType::TENSOR_FLOAT16}, this scalar must be
* of type {@link OperandType::FLOAT16}.
* * 50: The clipping threshold for the output from the
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
* otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
* this scalar must be of type {@link OperandType::FLOAT16}.
* otherwise if all the input tensors have the type
* {@link OperandType::TENSOR_FLOAT16}, this scalar must be
* of type {@link OperandType::FLOAT16}.
* * 51: merge_outputs
* An {@link OperandType::BOOL} scalar specifying if the outputs
* from forward and backward cells should be merged.
@ -4034,6 +4036,7 @@ enum OperationType : int32_t {
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@ -4044,14 +4047,14 @@ enum OperationType : int32_t {
* true) or input2 (if false).
* * 1: An input tensor of the same shape as input0.
* * 2: An input tensor of the same shape and type as input1.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* For a {@link OperandType::TENSOR_QUANT8_ASYMM}
* and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scales and zeroPoint can be different from input1 scale and zeroPoint.
*
* Outputs:
* * 0: A tensor of the same type and shape as input1 and input2.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*
*/
SELECT = @1.2::OperationType:SELECT,

View file

@ -456,8 +456,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
}
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
@ -519,8 +518,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
}
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
@ -541,8 +539,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -566,8 +563,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -590,8 +586,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -615,8 +610,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -727,8 +721,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -752,8 +745,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -776,8 +768,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -801,8 +792,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -914,8 +904,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -937,8 +926,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@ -1082,8 +1070,7 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
EvaluatePreparedModel(preparedModel, testModelAdd,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
}
}
}
@ -1144,8 +1131,7 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
EvaluatePreparedModel(preparedModel, testModelAdd,
/*testDynamicOutputShape=*/false);
EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
}
}
}

View file

@ -69,8 +69,35 @@ using V1_2::Timing;
using V1_2::implementation::ExecutionCallback;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
namespace {
enum class Executor { ASYNC, SYNC, BURST };
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
struct TestConfig {
Executor executor;
MeasureTiming measureTiming;
OutputType outputType;
// `reportSkipping` indicates if a test should print an info message in case
// it is skipped. The field is set to true by default and is set to false in
// quantization coupling tests to suppress skipping a test
bool reportSkipping;
TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType)
: executor(executor),
measureTiming(measureTiming),
outputType(outputType),
reportSkipping(true) {}
TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
bool reportSkipping)
: executor(executor),
measureTiming(measureTiming),
outputType(outputType),
reportSkipping(reportSkipping) {}
};
} // namespace
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
@ -205,31 +232,34 @@ static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
return android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
}
enum class Executor { ASYNC, SYNC, BURST };
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
Executor executor, MeasureTiming measure, OutputType outputType) {
const TestConfig& testConfig, bool* skipped = nullptr) {
if (skipped != nullptr) {
*skipped = false;
}
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
if (testConfig.outputType == OutputType::INSUFFICIENT &&
!isOutputSizeGreaterThanOne(testModel, 0)) {
return;
}
Request request = createRequest(testModel);
if (outputType == OutputType::INSUFFICIENT) {
if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
switch (executor) {
switch (testConfig.executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executionLaunchStatus =
ExecutePreparedModel(preparedModel, request, measure, executionCallback);
Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
preparedModel, request, testConfig.measureTiming, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
@ -245,8 +275,8 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
SCOPED_TRACE("synchronous");
// execute
Return<ErrorStatus> executionReturnStatus =
ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
@ -269,15 +299,21 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
// execute burst
int n;
std::tie(n, outputShapes, timing, std::ignore) =
controller->compute(request, measure, keys);
controller->compute(request, testConfig.measureTiming, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}
}
if (outputType != OutputType::FULLY_SPECIFIED &&
if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
if (skipped != nullptr) {
*skipped = true;
}
if (!testConfig.reportSkipping) {
return;
}
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
@ -285,7 +321,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
<< std::endl;
GTEST_SKIP();
}
if (measure == MeasureTiming::NO) {
if (testConfig.measureTiming == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
@ -294,7 +330,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}
}
switch (outputType) {
switch (testConfig.outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
@ -331,59 +367,117 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
bool testDynamicOutputShape) {
if (testDynamicOutputShape) {
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::INSUFFICIENT);
} else {
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
TestKind testKind) {
std::initializer_list<OutputType> outputTypesList;
std::initializer_list<MeasureTiming> measureTimingList;
std::initializer_list<Executor> executorList;
switch (testKind) {
case TestKind::GENERAL: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
} break;
case TestKind::DYNAMIC_SHAPE: {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
} break;
case TestKind::QUANTIZATION_COUPLING: {
LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
return;
} break;
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
const TestConfig testConfig(executor, measureTiming, outputType);
EvaluatePreparedModel(preparedModel, testModel, testConfig);
}
}
}
}
void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
void EvaluatePreparedCoupledModels(const sp<IPreparedModel>& preparedModel,
const TestModel& testModel,
const sp<IPreparedModel>& preparedCoupledModel,
const TestModel& coupledModel) {
std::initializer_list<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
std::initializer_list<MeasureTiming> measureTimingList = {MeasureTiming::NO,
MeasureTiming::YES};
std::initializer_list<Executor> executorList = {Executor::ASYNC, Executor::SYNC,
Executor::BURST};
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
const TestConfig testConfig(executor, measureTiming, outputType,
/*reportSkipping=*/false);
bool baseSkipped = false;
EvaluatePreparedModel(preparedModel, testModel, testConfig, &baseSkipped);
bool coupledSkipped = false;
EvaluatePreparedModel(preparedCoupledModel, coupledModel, testConfig,
&coupledSkipped);
ASSERT_EQ(baseSkipped, coupledSkipped);
if (baseSkipped) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service "
"cannot "
"execute model that it does not support."
<< std::endl;
GTEST_SKIP();
}
}
}
}
}
void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) {
Model model = createModel(testModel);
if (testDynamicOutputShape) {
if (testKind == TestKind::DYNAMIC_SHAPE) {
makeOutputDimensionsUnspecified(&model);
}
sp<IPreparedModel> preparedModel;
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
switch (testKind) {
case TestKind::GENERAL: {
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
EvaluatePreparedModel(preparedModel, testModel, TestKind::GENERAL);
} break;
case TestKind::DYNAMIC_SHAPE: {
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
EvaluatePreparedModel(preparedModel, testModel, TestKind::DYNAMIC_SHAPE);
} break;
case TestKind::QUANTIZATION_COUPLING: {
ASSERT_TRUE(testModel.hasQuant8AsymmOperands());
createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
sp<IPreparedModel> preparedCoupledModel;
createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
/*reportSkipping*/ false);
// If we couldn't prepare a model with unsigned quantization, we must
// fail to prepare a model with signed quantization as well.
if (preparedModel == nullptr) {
ASSERT_EQ(preparedCoupledModel, nullptr);
// If we failed to prepare both of the models, we can safely skip
// the test.
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
std::cout
<< "[ ] Early termination of test because vendor service cannot "
"prepare model that it does not support."
<< std::endl;
GTEST_SKIP();
}
ASSERT_NE(preparedCoupledModel, nullptr);
EvaluatePreparedCoupledModels(preparedModel, testModel, preparedCoupledModel,
signedQuantizedModel);
} break;
}
}
void GeneratedTestBase::SetUp() {
@ -406,12 +500,19 @@ class GeneratedTest : public GeneratedTestBase {};
// Tag for the dynamic output shape tests
class DynamicOutputShapeTest : public GeneratedTest {};
// Tag for the dynamic output shape tests
class DISABLED_QuantizationCouplingTest : public GeneratedTest {};
TEST_P(GeneratedTest, Test) {
Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false);
Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(DynamicOutputShapeTest, Test) {
Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true);
Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE);
}
TEST_P(DISABLED_QuantizationCouplingTest, Test) {
Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING);
}
INSTANTIATE_GENERATED_TEST(GeneratedTest,
@ -420,4 +521,8 @@ INSTANTIATE_GENERATED_TEST(GeneratedTest,
INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
INSTANTIATE_GENERATED_TEST(DISABLED_QuantizationCouplingTest, [](const TestModel& testModel) {
return testModel.hasQuant8AsymmOperands() && testModel.operations.size() == 1;
});
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -57,8 +57,19 @@ Model createModel(const test_helper::TestModel& testModel);
void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
enum class TestKind {
// Runs a test model and compares the results to a golden data
GENERAL,
// Same as GENERAL but sets dimensions for the output tensors to zeros
DYNAMIC_SHAPE,
// Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result
// (OK/SKIPPED/FAILED) as the model with all such tensors converted to
// TENSOR_QUANT8_ASYMM_SIGNED.
QUANTIZATION_COUPLING
};
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
const test_helper::TestModel& testModel, bool testDynamicOutputShape);
const test_helper::TestModel& testModel, TestKind testKind);
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -37,7 +37,7 @@ using V1_1::ExecutionPreference;
// internal helper function
void createPreparedModel(const sp<IDevice>& device, const Model& model,
sp<IPreparedModel>* preparedModel) {
sp<IPreparedModel>* preparedModel, bool reportSkipping) {
ASSERT_NE(nullptr, preparedModel);
*preparedModel = nullptr;
@ -74,6 +74,9 @@ void createPreparedModel(const sp<IDevice>& device, const Model& model,
// can continue.
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel->get());
if (!reportSkipping) {
return;
}
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
"model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "

View file

@ -47,7 +47,7 @@ std::string printNeuralnetworksHidlTest(
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
sp<IPreparedModel>* preparedModel);
sp<IPreparedModel>* preparedModel, bool reportSkipping = true);
// Utility function to get PreparedModel from callback and downcast to V1_2.
sp<IPreparedModel> getPreparedModel_1_3(const sp<implementation::PreparedModelCallback>& callback);