Add tests to make sure executeFenced validate unspecified output shapes am: 406890769a
am: 7274c0d40b
Change-Id: I5f5ddc1ec3ed87ce0d1cb42412d916cbc4a2aa25
This commit is contained in:
commit
717fdba917
1 changed files with 20 additions and 7 deletions
|
@ -640,7 +640,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
|
|||
if (result != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(syncFenceHandle.getNativeHandle(), nullptr);
|
||||
ASSERT_EQ(fencedCallback, nullptr);
|
||||
executionStatus = ErrorStatus::GENERAL_FAILURE;
|
||||
executionStatus = result;
|
||||
} else if (syncFenceHandle.getNativeHandle()) {
|
||||
// If a sync fence is returned, try start another run waiting for the sync fence.
|
||||
ret = preparedModel->executeFenced(request, {syncFenceHandle},
|
||||
|
@ -663,9 +663,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
|
|||
}
|
||||
}
|
||||
|
||||
// The driver is allowed to reject executeFenced, and if they do, we should skip.
|
||||
if ((testConfig.outputType != OutputType::FULLY_SPECIFIED ||
|
||||
testConfig.executor == Executor::FENCED) &&
|
||||
if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
|
||||
executionStatus == ErrorStatus::GENERAL_FAILURE) {
|
||||
if (skipped != nullptr) {
|
||||
*skipped = true;
|
||||
|
@ -698,12 +696,22 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
|
|||
outputShapes.size() == testModel.main.outputIndexes.size());
|
||||
break;
|
||||
case OutputType::UNSPECIFIED:
|
||||
if (testConfig.executor == Executor::FENCED) {
|
||||
// For Executor::FENCED, the output shape must be fully specified.
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||
return;
|
||||
}
|
||||
// If the model output operands are not fully specified, outputShapes must have
|
||||
// the same number of elements as the number of outputs.
|
||||
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
|
||||
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
||||
break;
|
||||
case OutputType::INSUFFICIENT:
|
||||
if (testConfig.executor == Executor::FENCED) {
|
||||
// For Executor::FENCED, the output shape must be fully specified.
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
|
||||
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
||||
ASSERT_FALSE(outputShapes[0].isSufficient);
|
||||
|
@ -746,7 +754,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
|
|||
case TestKind::DYNAMIC_SHAPE: {
|
||||
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
|
||||
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
|
||||
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
|
||||
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED};
|
||||
} break;
|
||||
case TestKind::MEMORY_DOMAIN: {
|
||||
outputTypesList = {OutputType::FULLY_SPECIFIED};
|
||||
|
@ -928,8 +936,13 @@ INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel
|
|||
INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(FencedComputeTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
|
||||
return !testModel.expectFailure &&
|
||||
std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
|
||||
[&testModel](uint32_t index) {
|
||||
return testModel.main.operands[index].data.size() > 0;
|
||||
});
|
||||
});
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
|
||||
return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1;
|
||||
|
|
Loading…
Reference in a new issue