Add 1.2 NN HAL: IPreparedModel & callbacks.
Create 1.2 version IPreparedModel, IPreparedModelCallback, and
IExecutionCallback.
Currently the new interfaces are created the same as 1.0 version,
but will have more methods introduced in later CLs.
Bug: 73506513
Test: VtsHalNeuralnetworksV1_xTargetTest with 1.2 sample driver
Change-Id: Icf4d04c22f88e825d87562f1489377fdf6bf585d
Merged-In: Icf4d04c22f88e825d87562f1489377fdf6bf585d
(cherry picked from commit b5cb8f7632
)
This commit is contained in:
parent
9820845e61
commit
1a06e77831
23 changed files with 347 additions and 71 deletions
|
@ -1,10 +1,26 @@
|
|||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include <android-base/logging.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace V1_2 {
|
||||
namespace implementation {
|
||||
|
||||
CallbackBase::CallbackBase() : mNotified(false) {}
|
||||
|
@ -88,7 +104,15 @@ PreparedModelCallback::PreparedModelCallback() :
|
|||
PreparedModelCallback::~PreparedModelCallback() {}
|
||||
|
||||
Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
|
||||
const sp<IPreparedModel>& preparedModel) {
|
||||
const sp<V1_0::IPreparedModel>& preparedModel) {
|
||||
mErrorStatus = errorStatus;
|
||||
mPreparedModel = preparedModel;
|
||||
CallbackBase::notify();
|
||||
return Void();
|
||||
}
|
||||
|
||||
Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
|
||||
const sp<V1_2::IPreparedModel>& preparedModel) {
|
||||
mErrorStatus = errorStatus;
|
||||
mPreparedModel = preparedModel;
|
||||
CallbackBase::notify();
|
||||
|
@ -100,7 +124,7 @@ ErrorStatus PreparedModelCallback::getStatus() {
|
|||
return mErrorStatus;
|
||||
}
|
||||
|
||||
sp<IPreparedModel> PreparedModelCallback::getPreparedModel() {
|
||||
sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() {
|
||||
wait();
|
||||
return mPreparedModel;
|
||||
}
|
||||
|
@ -115,13 +139,19 @@ Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
|
|||
return Void();
|
||||
}
|
||||
|
||||
Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus) {
|
||||
mErrorStatus = errorStatus;
|
||||
CallbackBase::notify();
|
||||
return Void();
|
||||
}
|
||||
|
||||
ErrorStatus ExecutionCallback::getStatus() {
|
||||
wait();
|
||||
return mErrorStatus;
|
||||
}
|
||||
|
||||
} // namespace implementation
|
||||
} // namespace V1_0
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
|
|
@ -1,22 +1,42 @@
|
|||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
|
||||
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
|
||||
#include <hidl/MQDescriptor.h>
|
||||
#include <hidl/Status.h>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <hidl/MQDescriptor.h>
|
||||
#include <hidl/Status.h>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace V1_2 {
|
||||
namespace implementation {
|
||||
|
||||
using V1_0::ErrorStatus;
|
||||
|
||||
/**
|
||||
* The CallbackBase class is used internally by the NeuralNetworks runtime to
|
||||
* synchronize between different threads. An asynchronous task is launched
|
||||
|
@ -156,11 +176,11 @@ class CallbackBase {
|
|||
* asynchronously with respect to the runtime. If a calling thread calls wait*
|
||||
* or get* on a PreparedModelCallback object and the corresponding asynchronous
|
||||
* task has not finished preparing the model, the calling thread will block
|
||||
* until the asynchronous task has called notify. For more information on the
|
||||
* synchronization behavior, refer to the CallbackBase class.
|
||||
* until the asynchronous task has either called notify or notify_1_2. For more
|
||||
* information on the synchronization behavior, refer to the CallbackBase class.
|
||||
*
|
||||
* This class inherits the basic blocking and signaling calls from
|
||||
* CallbackBase, and implements the HIDL notify call from
|
||||
* CallbackBase, and implements the HIDL notify and notify_1_2 calls from
|
||||
* IPreparedModelCallback. This callback object is passed as an argument to
|
||||
* IDevice::prepareModel.
|
||||
*/
|
||||
|
@ -170,15 +190,15 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
|
|||
~PreparedModelCallback() override;
|
||||
|
||||
/**
|
||||
* IPreparedModelCallback::notify marks the callback object with the return
|
||||
* status of the asynchronous model preparation along with the prepared
|
||||
* model, and calls CallbackBase::notify, enabling all prior and future
|
||||
* wait* calls on the PreparedModelCallback object to proceed. For more
|
||||
* information on the synchronization behavior, refer to the CallbackBase
|
||||
* class.
|
||||
* IPreparedModelCallback::notify and IPreparedModelCallback::notify_1_2
|
||||
* mark the callback object with the return status of the asynchronous
|
||||
* model preparation along with the prepared model, and call
|
||||
* CallbackBase::notify, enabling all prior and future wait* calls on the
|
||||
* PreparedModelCallback object to proceed. For more information on the
|
||||
* synchronization behavior, refer to the CallbackBase class.
|
||||
*
|
||||
* IPreparedModelCallback::notify must be called exactly once on a given
|
||||
* PreparedModelCallback object.
|
||||
* Either IPreparedModelCallback::notify or IPreparedModelCallback::notify_1_2
|
||||
* must be called exactly once on a given PreparedModelCallback object.
|
||||
*
|
||||
* @param status Error status returned from asynchronously preparing the
|
||||
* model; will be:
|
||||
|
@ -189,7 +209,9 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
|
|||
* @param preparedModel Returned model that has been prepared for execution,
|
||||
* nullptr if the model was unable to be prepared.
|
||||
*/
|
||||
Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
|
||||
Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
|
||||
Return<void> notify_1_2(ErrorStatus status,
|
||||
const sp<V1_2::IPreparedModel>& preparedModel) override;
|
||||
|
||||
/**
|
||||
* Retrieves the error status returned from the asynchronous task launched
|
||||
|
@ -217,11 +239,11 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
|
|||
* execution, nullptr if the model was unable to be
|
||||
* prepared.
|
||||
*/
|
||||
sp<IPreparedModel> getPreparedModel();
|
||||
sp<V1_0::IPreparedModel> getPreparedModel();
|
||||
|
||||
private:
|
||||
ErrorStatus mErrorStatus;
|
||||
sp<IPreparedModel> mPreparedModel;
|
||||
sp<V1_0::IPreparedModel> mPreparedModel;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -229,12 +251,12 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
|
|||
* execution from a task executing asynchronously with respect to the runtime.
|
||||
* If a calling thread calls wait* or get* on a PreparedModelCallback object and
|
||||
* the corresponding asynchronous task has not finished the execution, the
|
||||
* calling thread will block until the asynchronous task has called notify. For
|
||||
* more information on the synchronization behavior, refer to the CallbackBase
|
||||
* class.
|
||||
* calling thread will block until the asynchronous task has either called notify
|
||||
* or notify_1_2. For more information on the synchronization behavior, refer to
|
||||
* the CallbackBase class.
|
||||
*
|
||||
* This class inherits the basic blocking and signaling calls from
|
||||
* CallbackBase, and implements the HIDL notify call from
|
||||
* CallbackBase, and implements the HIDL notify and notify_1_2 calls from
|
||||
* IExecutionCallback. This callback object is passed as an argument to
|
||||
* IPreparedModel::execute.
|
||||
*/
|
||||
|
@ -244,14 +266,14 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback {
|
|||
~ExecutionCallback() override;
|
||||
|
||||
/**
|
||||
* IExecutionCallback::notify marks the callback object with the return
|
||||
* status of the asynchronous execution that held this callback and enables
|
||||
* all prior and future wait* calls on the ExecutionCallback object to
|
||||
* proceed. For more information on the synchronization behavior, refer to
|
||||
* the CallbackBase class.
|
||||
* IExecutionCallback::notify and IExecutionCallback::notify_1_2 mark the
|
||||
* callback object with the return status of the asynchronous execution that
|
||||
* held this callback and enable all prior and future wait* calls on the
|
||||
* ExecutionCallback object to proceed. For more information on the
|
||||
* synchronization behavior, refer to the CallbackBase class.
|
||||
*
|
||||
* IExecutionCallback::notify must be called exactly once on a given
|
||||
* ExecutionCallback object.
|
||||
* Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
|
||||
* be called exactly once on a given ExecutionCallback object.
|
||||
*
|
||||
* @param status Error status returned from asynchronously preparing the
|
||||
* model; will be:
|
||||
|
@ -263,6 +285,7 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback {
|
|||
* - INVALID_ARGUMENT if the input request is invalid
|
||||
*/
|
||||
Return<void> notify(ErrorStatus status) override;
|
||||
Return<void> notify_1_2(ErrorStatus status) override;
|
||||
|
||||
/**
|
||||
* Retrieves the error status returned from the asynchronous task launched
|
||||
|
@ -299,7 +322,7 @@ std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep,Period>& t
|
|||
}
|
||||
|
||||
} // namespace implementation
|
||||
} // namespace V1_0
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
|
|
@ -24,6 +24,11 @@
|
|||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
|
||||
#include <android/hidl/allocator/1.0/IAllocator.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
@ -34,8 +39,8 @@ namespace hardware {
|
|||
namespace neuralnetworks {
|
||||
|
||||
namespace generated_tests {
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::test_helper::bool8;
|
||||
using ::test_helper::compare;
|
||||
using ::test_helper::expectMultinomialDistributionWithinTolerance;
|
||||
|
@ -73,7 +78,18 @@ void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* sr
|
|||
|
||||
// Top level driver for models and examples generated by test_generator.py
|
||||
// Test driver for those generated from ml/nn/runtime/test/spec
|
||||
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
|
||||
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>& preparedModel,
|
||||
const Request& request,
|
||||
sp<ExecutionCallback>& callback) {
|
||||
return preparedModel->execute(request, callback);
|
||||
}
|
||||
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
|
||||
const Request& request,
|
||||
sp<ExecutionCallback>& callback) {
|
||||
return preparedModel->execute_1_2(request, callback);
|
||||
}
|
||||
template <typename T_IPreparedModel>
|
||||
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExample>& examples,
|
||||
bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
|
||||
float fpRtol = 1e-5f) {
|
||||
|
@ -172,8 +188,9 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool
|
|||
// launch execution
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(
|
||||
{.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionCallback);
|
||||
Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
|
||||
preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
|
||||
executionCallback);
|
||||
ASSERT_TRUE(executionLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
|
||||
|
||||
|
@ -199,6 +216,16 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool
|
|||
}
|
||||
}
|
||||
|
||||
static void getPreparedModel(sp<PreparedModelCallback> callback,
|
||||
sp<V1_0::IPreparedModel>* preparedModel) {
|
||||
*preparedModel = callback->getPreparedModel();
|
||||
}
|
||||
static void getPreparedModel(sp<PreparedModelCallback> callback,
|
||||
sp<V1_2::IPreparedModel>* preparedModel) {
|
||||
sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
|
||||
*preparedModel = V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
|
||||
}
|
||||
|
||||
void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
|
||||
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
|
||||
V1_0::Model model = create_model();
|
||||
|
@ -224,7 +251,8 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c
|
|||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
sp<V1_0::IPreparedModel> preparedModel;
|
||||
getPreparedModel(preparedModelCallback, &preparedModel);
|
||||
|
||||
// early termination if vendor service cannot fully prepare model
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
|
@ -270,7 +298,8 @@ void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> c
|
|||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
sp<V1_0::IPreparedModel> preparedModel;
|
||||
getPreparedModel(preparedModelCallback, &preparedModel);
|
||||
|
||||
// early termination if vendor service cannot fully prepare model
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
|
@ -316,7 +345,8 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c
|
|||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
sp<V1_2::IPreparedModel> preparedModel;
|
||||
getPreparedModel(preparedModelCallback, &preparedModel);
|
||||
|
||||
// early termination if vendor service cannot fully prepare model
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
|
|
|
@ -40,8 +40,8 @@ namespace V1_0 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
|
|
|
@ -27,8 +27,8 @@ namespace V1_0 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
|
|
|
@ -33,8 +33,8 @@ namespace V1_0 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using test_helper::for_all;
|
||||
using test_helper::MixedTyped;
|
||||
|
|
|
@ -40,8 +40,8 @@ namespace V1_1 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@ namespace V1_1 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
|
|
|
@ -33,8 +33,8 @@ using V1_0::OperandType;
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
|
|
|
@ -33,8 +33,8 @@ namespace V1_1 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using test_helper::for_all;
|
||||
using test_helper::MixedTyped;
|
||||
|
|
|
@ -36,6 +36,7 @@ namespace V1_1 {
|
|||
|
||||
using V1_0::DeviceStatus;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::IPreparedModel;
|
||||
using V1_0::Operand;
|
||||
using V1_0::OperandType;
|
||||
using V1_0::Request;
|
||||
|
|
|
@ -9,6 +9,9 @@ hidl_interface {
|
|||
srcs: [
|
||||
"types.hal",
|
||||
"IDevice.hal",
|
||||
"IExecutionCallback.hal",
|
||||
"IPreparedModel.hal",
|
||||
"IPreparedModelCallback.hal",
|
||||
],
|
||||
interfaces: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
package android.hardware.neuralnetworks@1.2;
|
||||
|
||||
import @1.0::ErrorStatus;
|
||||
import @1.0::IPreparedModelCallback;
|
||||
import @1.1::ExecutionPreference;
|
||||
import @1.1::IDevice;
|
||||
import IPreparedModelCallback;
|
||||
|
||||
/**
|
||||
* This interface represents a device driver.
|
||||
|
|
48
neuralnetworks/1.2/IExecutionCallback.hal
Normal file
48
neuralnetworks/1.2/IExecutionCallback.hal
Normal file
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.neuralnetworks@1.2;
|
||||
|
||||
import @1.0::ErrorStatus;
|
||||
import @1.0::IExecutionCallback;
|
||||
|
||||
/**
|
||||
* IExecutionCallback must be used to return the error status result from an
|
||||
* execution asynchronously launched from IPreparedModel::execute.
|
||||
*/
|
||||
interface IExecutionCallback extends @1.0::IExecutionCallback {
|
||||
|
||||
/**
|
||||
* Either notify_1_2 or notify must be invoked immediately after the asynchronous
|
||||
* task has finished performing the execution. Either notify_1_2 or notify must be
|
||||
* provided with the ErrorStatus from the execution. If the asynchronous task is
|
||||
* not launched, either notify_1_2 or notify must be invoked with the appropriate
|
||||
* error.
|
||||
*
|
||||
* @param status Error status returned from launching the asynchronous task
|
||||
* (if the launch fails) or from the asynchronous task itself
|
||||
* (if the launch succeeds). Must be:
|
||||
* - NONE if the asynchronous execution was successful
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if the asynchronous task resulted in an
|
||||
* unspecified error
|
||||
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
|
||||
* not large enough to store the resultant values
|
||||
* - INVALID_ARGUMENT if one of the input arguments to
|
||||
* prepareModel is invalid
|
||||
*/
|
||||
oneway notify_1_2(ErrorStatus status);
|
||||
};
|
74
neuralnetworks/1.2/IPreparedModel.hal
Normal file
74
neuralnetworks/1.2/IPreparedModel.hal
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.neuralnetworks@1.2;
|
||||
|
||||
import @1.0::ErrorStatus;
|
||||
import @1.0::IPreparedModel;
|
||||
import @1.0::Request;
|
||||
import IExecutionCallback;
|
||||
|
||||
/**
|
||||
* IPreparedModel describes a model that has been prepared for execution and
|
||||
* is used to launch executions.
|
||||
*/
|
||||
interface IPreparedModel extends @1.0::IPreparedModel {
|
||||
/**
|
||||
* Launches an asynchronous execution on a prepared model.
|
||||
*
|
||||
* The execution is performed asynchronously with respect to the caller.
|
||||
* execute_1_2 must verify the inputs to the function are correct. If there is
|
||||
* an error, execute_1_2 must immediately invoke the callback with the
|
||||
* appropriate ErrorStatus value, then return with the same ErrorStatus. If
|
||||
* the inputs to the function are valid and there is no error, execute_1_2 must
|
||||
* launch an asynchronous task to perform the execution in the background,
|
||||
* and immediately return with ErrorStatus::NONE. If the asynchronous task
|
||||
* fails to launch, execute_1_2 must immediately invoke the callback with
|
||||
* ErrorStatus::GENERAL_FAILURE, then return with
|
||||
* ErrorStatus::GENERAL_FAILURE.
|
||||
*
|
||||
* When the asynchronous task has finished its execution, it must
|
||||
* immediately invoke the callback object provided as an input to the
|
||||
* execute_1_2 function. This callback must be provided with the ErrorStatus of
|
||||
* the execution.
|
||||
*
|
||||
* If the prepared model was prepared from a model wherein all
|
||||
* tensor operands have fully specified dimensions, and the inputs
|
||||
* to the function are valid, then the execution should launch
|
||||
* and complete successfully (ErrorStatus::NONE). There must be
|
||||
* no failure unless the device itself is in a bad state.
|
||||
*
|
||||
* Multiple threads can call the execute_1_2 function on the same IPreparedModel
|
||||
* object concurrently with different requests.
|
||||
*
|
||||
* @param request The input and output information on which the prepared
|
||||
* model is to be executed.
|
||||
* @param callback A callback object used to return the error status of
|
||||
* the execution. The callback object's notify function must
|
||||
* be called exactly once, even if the execution was
|
||||
* unsuccessful.
|
||||
* @return status Error status of the call, must be:
|
||||
* - NONE if task is successfully launched
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
|
||||
* not large enough to store the resultant values
|
||||
* - INVALID_ARGUMENT if one of the input arguments is
|
||||
* invalid
|
||||
*/
|
||||
execute_1_2(Request request, IExecutionCallback callback)
|
||||
generates (ErrorStatus status);
|
||||
};
|
55
neuralnetworks/1.2/IPreparedModelCallback.hal
Normal file
55
neuralnetworks/1.2/IPreparedModelCallback.hal
Normal file
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.neuralnetworks@1.2;
|
||||
|
||||
import @1.0::ErrorStatus;
|
||||
import @1.0::IPreparedModelCallback;
|
||||
import IPreparedModel;
|
||||
|
||||
/**
|
||||
* IPreparedModelCallback must be used to return a prepared model produced by an
|
||||
* asynchronous task launched from IDevice::prepareModel.
|
||||
*/
|
||||
interface IPreparedModelCallback extends @1.0::IPreparedModelCallback {
|
||||
|
||||
/**
|
||||
* Either notify_1_2 or notify must be invoked immediately after the asynchronous
|
||||
* task holding this callback has finished preparing the model. If the model was
|
||||
* successfully prepared, either notify_1_2 or notify must be invoked with
|
||||
* ErrorStatus::NONE and the prepared model. If the model was not able to be
|
||||
* successfully prepared, either notify_1_2 or notify must be invoked with the
|
||||
* appropriate ErrorStatus and nullptr as the IPreparedModel. If the asynchronous
|
||||
* task holding this callback fails to launch or if the model provided to
|
||||
* IDevice::prepareModel is invalid, either notify_1_2 or notify must be invoked
|
||||
* with the appropriate error as well as nullptr for the IPreparedModel.
|
||||
*
|
||||
* @param status Error status returned from the asynchronous model
|
||||
* preparation task; must be:
|
||||
* - NONE if the asynchronous task successfully prepared the
|
||||
* model
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if the asynchronous task resulted in an
|
||||
* unspecified error
|
||||
* - INVALID_ARGUMENT if one of the input arguments to
|
||||
* prepareModel is invalid
|
||||
* @param preparedModel A model that has been asynchronously prepared for
|
||||
* execution. If the model was unable to be prepared
|
||||
* due to an error, nullptr must be passed in place of
|
||||
* the IPreparedModel object.
|
||||
*/
|
||||
oneway notify_1_2(ErrorStatus status, IPreparedModel preparedModel);
|
||||
};
|
|
@ -40,8 +40,8 @@ namespace V1_2 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@ namespace V1_2 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@ namespace V1_2 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
|
|
|
@ -25,15 +25,14 @@ namespace hardware {
|
|||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
|
||||
using V1_0::IPreparedModel;
|
||||
using V1_0::OperandLifeTime;
|
||||
using V1_1::ExecutionPreference;
|
||||
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
|
@ -62,7 +61,7 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
|
|||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
sp<IPreparedModel> preparedModel = getPreparedModel_1_2(preparedModelCallback);
|
||||
ASSERT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
|
|
|
@ -33,8 +33,8 @@ namespace V1_2 {
|
|||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using test_helper::for_all;
|
||||
using test_helper::MixedTyped;
|
||||
|
@ -68,7 +68,7 @@ static void createPreparedModel(const sp<IDevice>& device, const Model& model,
|
|||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
*preparedModel = getPreparedModel_1_2(preparedModelCallback);
|
||||
|
||||
// The getSupportedOperations_1_2 call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel_1_2 is called, and
|
||||
|
@ -101,7 +101,8 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
|
|||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
Return<ErrorStatus> executeLaunchStatus =
|
||||
preparedModel->execute_1_2(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
|
|
|
@ -58,6 +58,12 @@ void NeuralnetworksHidlTest::TearDown() {
|
|||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
sp<IPreparedModel> getPreparedModel_1_2(
|
||||
const sp<V1_2::implementation::PreparedModelCallback>& callback) {
|
||||
sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
|
||||
return V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#ifndef VTS_HAL_NEURALNETWORKS_V1_2_H
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_2_H
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
|
||||
|
@ -77,6 +79,10 @@ class ValidationTest : public NeuralnetworksHidlTest {
|
|||
// Tag for the generated tests
|
||||
class GeneratedTest : public NeuralnetworksHidlTest {};
|
||||
|
||||
// Utility function to get PreparedModel from callback and downcast to V1_2.
|
||||
sp<IPreparedModel> getPreparedModel_1_2(
|
||||
const sp<V1_2::implementation::PreparedModelCallback>& callback);
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
|
||||
|
|
Loading…
Reference in a new issue