Add @1.2::IPreparedModel::executeSynchronously() and corresponding VTS tests.
Bug: 119274127
Test: all of the following, with the appropriate android.hardware.neuralnetworks@1.${X}::IDevice/sample-all
VtsHalNeuralnetworksV1_0TargetTest
VtsHalNeuralnetworksV1_0TargetTest
VtsHalNeuralnetworksV1_1CompatV1_0TargetTest
VtsHalNeuralnetworksV1_1CompatV1_0TargetTest
VtsHalNeuralnetworksV1_1TargetTest
VtsHalNeuralnetworksV1_1TargetTest
VtsHalNeuralnetworksV1_2CompatV1_0TargetTest
VtsHalNeuralnetworksV1_2CompatV1_0TargetTest
VtsHalNeuralnetworksV1_2CompatV1_1TargetTest
VtsHalNeuralnetworksV1_2CompatV1_1TargetTest
VtsHalNeuralnetworksV1_2TargetTest
VtsHalNeuralnetworksV1_2TargetTest
Change-Id: Iedfa485b4008d9cec3b81ff4c0ce3ebc0b83c823
(cherry picked from commit 49e41678f5
)
This commit is contained in:
parent
ed2eb3de0a
commit
4592ed15cd
3 changed files with 106 additions and 27 deletions
|
@ -88,11 +88,22 @@ static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& prepar
|
|||
sp<ExecutionCallback>& callback) {
|
||||
return preparedModel->execute_1_2(request, callback);
|
||||
}
|
||||
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&) {
|
||||
ADD_FAILURE() << "asking for synchronous execution at V1_0";
|
||||
return ErrorStatus::GENERAL_FAILURE;
|
||||
}
|
||||
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
|
||||
const Request& request) {
|
||||
return preparedModel->executeSynchronously(request);
|
||||
}
|
||||
enum class Synchronously { NO, YES };
|
||||
const float kDefaultAtol = 1e-5f;
|
||||
const float kDefaultRtol = 1e-5f;
|
||||
template <typename T_IPreparedModel>
|
||||
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExample>& examples,
|
||||
bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
|
||||
float fpRtol = 1e-5f) {
|
||||
bool hasRelaxedFloat32Model = false, float fpAtol = kDefaultAtol,
|
||||
float fpRtol = kDefaultRtol, Synchronously sync = Synchronously::NO) {
|
||||
const uint32_t INPUT = 0;
|
||||
const uint32_t OUTPUT = 1;
|
||||
|
||||
|
@ -185,19 +196,31 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
|
|||
inputMemory->commit();
|
||||
outputMemory->commit();
|
||||
|
||||
// launch execution
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
|
||||
preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
|
||||
executionCallback);
|
||||
ASSERT_TRUE(executionLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
|
||||
if (sync == Synchronously::NO) {
|
||||
SCOPED_TRACE("asynchronous");
|
||||
|
||||
// retrieve execution status
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
|
||||
// launch execution
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
|
||||
preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
|
||||
executionCallback);
|
||||
ASSERT_TRUE(executionLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
|
||||
|
||||
// retrieve execution status
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
|
||||
} else {
|
||||
SCOPED_TRACE("synchronous");
|
||||
|
||||
// execute
|
||||
Return<ErrorStatus> executionStatus = ExecutePreparedModel(
|
||||
preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
|
||||
ASSERT_TRUE(executionStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionStatus));
|
||||
}
|
||||
|
||||
// validate results
|
||||
outputMemory->read();
|
||||
|
@ -215,6 +238,13 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
|
|||
}
|
||||
}
|
||||
}
|
||||
template <typename T_IPreparedModel>
|
||||
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExample>& examples,
|
||||
bool hasRelaxedFloat32Model, Synchronously sync) {
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
|
||||
kDefaultRtol, sync);
|
||||
}
|
||||
|
||||
static void getPreparedModel(sp<PreparedModelCallback> callback,
|
||||
sp<V1_0::IPreparedModel>* preparedModel) {
|
||||
|
@ -362,7 +392,9 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c
|
|||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples,
|
||||
model.relaxComputationFloat32toFloat16);
|
||||
model.relaxComputationFloat32toFloat16, Synchronously::NO);
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples,
|
||||
model.relaxComputationFloat32toFloat16, Synchronously::YES);
|
||||
}
|
||||
|
||||
} // namespace generated_tests
|
||||
|
|
|
@ -51,8 +51,9 @@ interface IPreparedModel extends @1.0::IPreparedModel {
|
|||
* and complete successfully (ErrorStatus::NONE). There must be
|
||||
* no failure unless the device itself is in a bad state.
|
||||
*
|
||||
* Multiple threads can call the execute_1_2 function on the same IPreparedModel
|
||||
* object concurrently with different requests.
|
||||
* Any number of calls to the execute, execute_1_2, and executeSynchronously
|
||||
* functions, in any combination, may be made concurrently, even on the same
|
||||
* IPreparedModel object.
|
||||
*
|
||||
* @param request The input and output information on which the prepared
|
||||
* model is to be executed.
|
||||
|
@ -71,4 +72,39 @@ interface IPreparedModel extends @1.0::IPreparedModel {
|
|||
*/
|
||||
execute_1_2(Request request, IExecutionCallback callback)
|
||||
generates (ErrorStatus status);
|
||||
|
||||
/**
|
||||
* Performs a synchronous execution on a prepared model.
|
||||
*
|
||||
* The execution is performed synchronously with respect to the caller.
|
||||
* executeSynchronously must verify the inputs to the function are
|
||||
* correct. If there is an error, executeSynchronously must immediately
|
||||
* return with the appropriate ErrorStatus value. If the inputs to the
|
||||
* function are valid and there is no error, executeSynchronously must
|
||||
* perform the execution, and must not return until the execution is
|
||||
* complete.
|
||||
*
|
||||
* If the prepared model was prepared from a model wherein all tensor
|
||||
* operands have fully specified dimensions, and the inputs to the function
|
||||
* are valid, then the execution should complete successfully
|
||||
* (ErrorStatus::NONE). There must be no failure unless the device itself is
|
||||
* in a bad state.
|
||||
*
|
||||
* Any number of calls to the execute, execute_1_2, and executeSynchronously
|
||||
* functions, in any combination, may be made concurrently, even on the same
|
||||
* IPreparedModel object.
|
||||
*
|
||||
* @param request The input and output information on which the prepared
|
||||
* model is to be executed.
|
||||
* @return status Error status of the execution, must be:
|
||||
* - NONE if execution is performed successfully
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
|
||||
* not large enough to store the resultant values
|
||||
* - INVALID_ARGUMENT if one of the input arguments is
|
||||
* invalid
|
||||
*/
|
||||
executeSynchronously(Request request)
|
||||
generates (ErrorStatus status);
|
||||
};
|
||||
|
|
|
@ -97,18 +97,29 @@ static void createPreparedModel(const sp<IDevice>& device, const Model& model,
|
|||
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
|
||||
Request request, const std::function<void(Request*)>& mutation) {
|
||||
mutation(&request);
|
||||
SCOPED_TRACE(message + " [execute]");
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus =
|
||||
preparedModel->execute_1_2(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
{
|
||||
SCOPED_TRACE(message + " [execute_1_2]");
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus =
|
||||
preparedModel->execute_1_2(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE(message + " [executeSynchronously]");
|
||||
|
||||
Return<ErrorStatus> executeStatus = preparedModel->executeSynchronously(request);
|
||||
ASSERT_TRUE(executeStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
|
||||
}
|
||||
}
|
||||
|
||||
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
|
||||
|
|
Loading…
Reference in a new issue