NN HAL: Upgrade IPreparedModel::execute to 1.3.

Bug: 143242728
Test: 1.3 VTS with sample driver
Change-Id: I9ca1e28ddc97fe880a72885afe7afb6c93903697
Merged-In: I9ca1e28ddc97fe880a72885afe7afb6c93903697
(cherry picked from commit 62a760c32d)
This commit is contained in:
Xusong Wang 2019-10-25 12:07:17 -07:00
parent cc47dffa57
commit 1b3f426648
14 changed files with 109 additions and 24 deletions

View file

@ -590,7 +590,8 @@ ce8dbe76eb9ee94b46ef98f725be992e760a5751073d4f4912484026541371f3 android.hardwar
26f04510a0b57aba5167c5c0a7c2f077c2acbb98b81902a072517829fd9fd67f android.hardware.health@2.1::IHealthInfoCallback
db47f4ceceb1f06c656f39caa70c557b0f8471ef59fd58611bea667ffca20101 android.hardware.health@2.1::types
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
fd5a2b723b75acbdd9f31bd07e0f83293c52f99f8d9b87bf58eeb6018f665fde android.hardware.neuralnetworks@1.3::IPreparedModelCallback
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
b74fe72cfe438f50e772e6a307657ff449d5bde83c15dd1f140ff2edbe73499c android.hardware.neuralnetworks@1.3::types
274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio

View file

@ -9,6 +9,7 @@ hidl_interface {
srcs: [
"types.hal",
"IDevice.hal",
"IPreparedModel.hal",
"IPreparedModelCallback.hal",
],
interfaces: [

View file

@ -0,0 +1,90 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.neuralnetworks@1.3;
import @1.0::ErrorStatus;
import @1.0::Request;
import @1.2::MeasureTiming;
import @1.2::IExecutionCallback;
import @1.2::IPreparedModel;
/**
* IPreparedModel describes a model that has been prepared for execution and
* is used to launch executions.
*/
interface IPreparedModel extends @1.2::IPreparedModel {
/**
* Launches an asynchronous execution on a prepared model.
*
* The execution is performed asynchronously with respect to the caller.
* execute_1_3 must verify the inputs to the function are correct. If there is
* an error, execute_1_3 must immediately invoke the callback with the
* appropriate ErrorStatus value, then return with the same ErrorStatus. If
* the inputs to the function are valid and there is no error, execute_1_3 must
* launch an asynchronous task to perform the execution in the background,
* and immediately return with ErrorStatus::NONE. If the asynchronous task
* fails to launch, execute_1_3 must immediately invoke the callback with
* ErrorStatus::GENERAL_FAILURE, then return with
* ErrorStatus::GENERAL_FAILURE.
*
* When the asynchronous task has finished its execution, it must
* immediately invoke the callback object provided as an input to the
* execute_1_3 function. This callback must be provided with the ErrorStatus of
* the execution.
*
* If the launch is successful, the caller must not change the content of
* any data object referenced by 'request' (described by the
* {@link @1.0::DataLocation} of a {@link @1.0::RequestArgument}) until the
* asynchronous task has invoked the callback object. The asynchronous task
* must not change the content of any of the data objects corresponding to
* 'request' inputs.
*
* If the prepared model was prepared from a model wherein all tensor
* operands have fully specified dimensions, and the inputs to the function
* are valid, then:
* - the execution should launch successfully (ErrorStatus::NONE): There
* must be no failure unless the device itself is in a bad state.
* - if at execution time every operation's input operands have legal
* values, the execution should complete successfully (ErrorStatus::NONE):
* There must be no failure unless the device itself is in a bad state.
*
* Any number of calls to the execute, execute_1_2, execute_1_3, and executeSynchronously
* functions, in any combination, may be made concurrently, even on the same
* IPreparedModel object.
*
* @param request The input and output information on which the prepared
* model is to be executed.
* @param measure Specifies whether or not to measure duration of the execution.
* The duration runs from the time the driver sees the call
* to the execute_1_3 function to the time the driver invokes
* the callback.
* @param callback A callback object used to return the error status of
* the execution. The callback object's notify function must
* be called exactly once, even if the execution was
* unsuccessful.
* @return status Error status of the call, must be:
* - NONE if task is successfully launched
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
* not large enough to store the resultant values
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
*/
execute_1_3(Request request, MeasureTiming measure, IExecutionCallback callback)
generates (ErrorStatus status);
};

View file

@ -18,7 +18,7 @@ package android.hardware.neuralnetworks@1.3;
import @1.0::ErrorStatus;
import @1.2::IPreparedModelCallback;
import @1.2::IPreparedModel;
import IPreparedModel;
/**
* IPreparedModelCallback must be used to return a prepared model produced by an

View file

@ -54,7 +54,7 @@ Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
}
Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus,
const sp<V1_2::IPreparedModel>& preparedModel) {
const sp<V1_3::IPreparedModel>& preparedModel) {
return notify(errorStatus, preparedModel);
}

View file

@ -52,7 +52,6 @@ using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::IPreparedModel;
using V1_2::OperationType;
namespace float32_model {

View file

@ -29,6 +29,7 @@
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
@ -61,7 +62,6 @@ using V1_0::OperandLifeTime;
using V1_0::Request;
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
using V1_2::OperationType;
using V1_2::OutputShape;
@ -181,7 +181,7 @@ static void makeOutputDimensionsUnspecified(Model* model) {
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
return preparedModel->execute_1_2(request, measure, callback);
return preparedModel->execute_1_3(request, measure, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,

View file

@ -17,8 +17,8 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <functional>
#include <vector>
@ -55,10 +55,9 @@ class ValidationTest : public GeneratedTestBase {};
Model createModel(const test_helper::TestModel& testModel);
void PrepareModel(const sp<IDevice>& device, const Model& model,
sp<V1_2::IPreparedModel>* preparedModel);
void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
void EvaluatePreparedModel(const sp<V1_2::IPreparedModel>& preparedModel,
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
const test_helper::TestModel& testModel, bool testDynamicOutputShape);
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -40,7 +40,6 @@ using V1_2::FmqRequestDatum;
using V1_2::FmqResultDatum;
using V1_2::IBurstCallback;
using V1_2::IBurstContext;
using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
using V1_2::Timing;
using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback;

View file

@ -27,7 +27,6 @@ using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_1::ExecutionPreference;
using V1_2::IPreparedModel;
using V1_2::OperationType;
using V1_2::OperationTypeRange;
using V1_2::SymmPerChannelQuantParams;
@ -61,7 +60,7 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
sp<IPreparedModel> preparedModel = getPreparedModel_1_2(preparedModelCallback);
sp<IPreparedModel> preparedModel = getPreparedModel_1_3(preparedModelCallback);
ASSERT_EQ(nullptr, preparedModel.get());
}

View file

@ -29,7 +29,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using V1_0::ErrorStatus;
using V1_0::Request;
using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
using V1_2::OutputShape;
using V1_2::Timing;
@ -61,11 +60,11 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
// asynchronous
{
SCOPED_TRACE(message + " [execute_1_2]");
SCOPED_TRACE(message + " [execute_1_3]");
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executeLaunchStatus =
preparedModel->execute_1_2(request, measure, executionCallback);
preparedModel->execute_1_3(request, measure, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));

View file

@ -34,7 +34,6 @@ using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_0::Request;
using V1_1::ExecutionPreference;
using V1_2::IPreparedModel;
// internal helper function
void createPreparedModel(const sp<IDevice>& device, const Model& model,
@ -64,7 +63,7 @@ void createPreparedModel(const sp<IDevice>& device, const Model& model,
// retrieve prepared model
preparedModelCallback->wait();
const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
*preparedModel = getPreparedModel_1_2(preparedModelCallback);
*preparedModel = getPreparedModel_1_3(preparedModelCallback);
// The getSupportedOperations_1_3 call returns a list of operations that are
// guaranteed not to fail if prepareModel_1_3 is called, and
@ -165,7 +164,7 @@ TEST_P(ValidationTest, Test) {
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
sp<IPreparedModel> getPreparedModel_1_2(const sp<PreparedModelCallback>& callback) {
sp<IPreparedModel> getPreparedModel_1_3(const sp<PreparedModelCallback>& callback) {
sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
}

View file

@ -17,8 +17,8 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <gtest/gtest.h>
#include "1.0/Utils.h"
@ -47,11 +47,10 @@ std::string printNeuralnetworksHidlTest(
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
sp<V1_2::IPreparedModel>* preparedModel);
sp<IPreparedModel>* preparedModel);
// Utility function to get PreparedModel from callback and downcast to V1_2.
sp<V1_2::IPreparedModel> getPreparedModel_1_2(
const sp<implementation::PreparedModelCallback>& callback);
sp<IPreparedModel> getPreparedModel_1_3(const sp<implementation::PreparedModelCallback>& callback);
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -137,7 +137,7 @@ class PreparedModelCallback : public IPreparedModelCallback {
* nullptr if the model was unable to be prepared.
*/
Return<void> notify_1_3(V1_0::ErrorStatus status,
const sp<V1_2::IPreparedModel>& preparedModel) override;
const sp<V1_3::IPreparedModel>& preparedModel) override;
/**
* PreparedModelCallback::wait blocks until notify* has been called on the