Merge changes from topic "nnapi-QoS"

* changes:
  Create VTS tests for QoS in NNAPI
  Update NNAPI 1.3 VTS tests with new types
  Add Quality of Service to NNAPI HAL
This commit is contained in:
Michael Butler 2020-01-22 08:52:25 +00:00 committed by Android (Google) Code Review
commit 9e638b54a0
26 changed files with 1086 additions and 88 deletions

View file

@ -650,11 +650,12 @@ adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardwar
ac429fca0da4ce91218768ec31b64ded88251f8a26d8c4f27c06abdc5b1926d9 android.hardware.keymaster@4.1::types
df9c79c4fdde2821550c6d5c3d07f5ec0adfb1b702561ce543c906ddef698703 android.hardware.media.c2@1.1::IComponent
a3eddd9bbdc87e8c22764070037dd1154f1cf006e6fba93364c4f85d4c134a19 android.hardware.media.c2@1.1::IComponentStore
4b5c8546533db9412fec6d32c0ef42b22e5e68dbf390c775ec3c22bb2d501102 android.hardware.neuralnetworks@1.3::IBuffer
5a6b75f13f0e010a4268defa4f627b862ab2899fb04f9d985194a25bd8f9fe0d android.hardware.neuralnetworks@1.3::IDevice
058b48f0e2e725bb2b3fa2b7917b0f0a696383d03a4c57afe26f0eadb6a7af28 android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
12c51f9d04a52324510419aeee3e37bb3607e6900556cdde79774d80ed989855 android.hardware.neuralnetworks@1.3::types
65c16331e57f6dd68b3971f06f78fe9e3209afb60630c31705aa355f9a52bf0d android.hardware.neuralnetworks@1.3::IBuffer
d1f382d14e1384b907d5bb5780df7f01934650d556fedbed2f15a90773c657d6 android.hardware.neuralnetworks@1.3::IDevice
4167dc3ad35e9cd0d2057d4868c7675ae2c3c9d05bbd614c1f5dccfa5fd68797 android.hardware.neuralnetworks@1.3::IExecutionCallback
7d23020248194abbee8091cc624f39a5a6d7ccba338b172d5d2d3df0cceffbee android.hardware.neuralnetworks@1.3::IPreparedModel
0439a1fbbec7f16e5e4c653d85ac685d51bfafbae15b8f8cca530acdd7d6a8ce android.hardware.neuralnetworks@1.3::IPreparedModelCallback
ee65638f8af3f9f4f222e7208eaa9f1f8e7f8e0a21545846ba67d0e27624efa1 android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
c67aaf26a7a40d14ea61e70e20afacbd0bb906df1704d585ac8599fbb69dd44b android.hardware.wifi.hostapd@1.2::IHostapd
11f6448d15336361180391c8ebcdfd2d7cf77b3782d577e594d583aadc9c2877 android.hardware.wifi.hostapd@1.2::types

View file

@ -272,7 +272,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
int n;
std::tie(n, outputShapes, timing, std::ignore) =
controller->compute(request, testConfig.measureTiming, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
executionStatus = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n));
break;
}

View file

@ -296,7 +296,8 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
// collect serialized result by running regular burst
const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
controllerRegular->compute(request, MeasureTiming::NO, keys);
const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular);
const ErrorStatus statusRegular =
nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular));
EXPECT_FALSE(fallbackRegular);
// skip test if regular burst output isn't useful for testing a failure
@ -312,7 +313,7 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
// large enough to return the serialized result
const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
controllerSmall->compute(request, MeasureTiming::NO, keys);
const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall);
const ErrorStatus statusSmall = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall));
EXPECT_NE(ErrorStatus::NONE, statusSmall);
EXPECT_EQ(0u, outputShapesSmall.size());
EXPECT_TRUE(badTiming(timingSmall));

View file

@ -107,7 +107,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
// execute and verify
const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys);
const ErrorStatus status = nn::convertResultCodeToErrorStatus(n);
const ErrorStatus status = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n));
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));

View file

@ -10,6 +10,7 @@ hidl_interface {
"types.hal",
"IBuffer.hal",
"IDevice.hal",
"IExecutionCallback.hal",
"IPreparedModel.hal",
"IPreparedModelCallback.hal",
],

View file

@ -16,7 +16,7 @@
package android.hardware.neuralnetworks@1.3;
import @1.0::ErrorStatus;
import ErrorStatus;
/**
* This interface represents a device memory buffer.

View file

@ -16,7 +16,6 @@
package android.hardware.neuralnetworks@1.3;
import @1.0::ErrorStatus;
import @1.1::ExecutionPreference;
import @1.2::Constant;
import @1.2::DeviceType;
@ -25,7 +24,10 @@ import @1.2::IDevice;
import BufferDesc;
import BufferRole;
import Capabilities;
import ErrorStatus;
import Model;
import OptionalTimePoint;
import Priority;
import IBuffer;
import IPreparedModel;
import IPreparedModelCallback;
@ -45,6 +47,19 @@ interface IDevice extends @1.2::IDevice {
*/
getCapabilities_1_3() generates (ErrorStatus status, Capabilities capabilities);
/**
* Returns whether the device is able to complete or abort a task within a
* specified duration.
*
* @return prepareModelDeadline 'true' if the device supports completing or
* aborting model preparation by the deadline when the deadline is supplied,
* 'false' otherwise.
* @return executionDeadline 'true' if the device supports completing or
* aborting an execution by the deadline when the deadline is supplied,
* 'false' otherwise.
*/
supportsDeadlines() generates (bool prepareModelDeadline, bool executionDeadline);
/**
* Gets the supported operations in a model.
*
@ -118,6 +133,22 @@ interface IDevice extends @1.2::IDevice {
* the callback object must be invoked with the appropriate ErrorStatus
* value and nullptr for the IPreparedModel.
*
* The model is prepared with a priority. This priority is relative to other
* prepared models owned by the same client. Higher priority executions may
* use more compute resources than lower priority executions, and may
* preempt or starve lower priority executions.
*
* prepareModel_1_3 can be called with an optional deadline. If the model
* is not able to be prepared before the provided deadline, the model
* preparation must be aborted, and either {@link
* ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due
* to an abort must be sent the same way as other errors, described above.
* If the service reports that it does not support preparation deadlines via
* IDevice::supportsDeadlines, and prepareModel_1_3 is called with a
* deadline, then the argument is invalid, and {@link
* ErrorStatus::INVALID_ARGUMENT} must be returned.
*
* Optionally, the driver may save the prepared model to cache during the
* asynchronous preparation. Any error that occurs when saving to cache must
* not affect the status of preparing the model. Even if the input arguments
@ -139,6 +170,11 @@ interface IDevice extends @1.2::IDevice {
* @param model The model to be prepared for execution.
* @param preference Indicates the intended execution behavior of a prepared
* model.
* @param priority The priority of the prepared model relative to other
* prepared models owned by the client.
* @param deadline The time by which the model must be prepared. If the
* model cannot be prepared by the deadline, the preparation must be
* aborted.
* @param modelCache A vector of handles with each entry holding exactly one
* cache file descriptor for the security-sensitive cache. The length of
* the vector must either be 0 indicating that caching information is
@ -173,8 +209,12 @@ interface IDevice extends @1.2::IDevice {
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if one of the input arguments related to preparing
* the model is invalid
* - MISSED_DEADLINE_* if the deadline for preparing a model cannot be
* met
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
prepareModel_1_3(Model model, ExecutionPreference preference,
Priority priority, OptionalTimePoint deadline,
vec<handle> modelCache, vec<handle> dataCache,
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback)
@ -220,6 +260,22 @@ interface IDevice extends @1.2::IDevice {
* the model, the callback object must be invoked with the appropriate
* ErrorStatus value and nullptr for the IPreparedModel.
*
* The model is prepared with a priority. This priority is relative to other
* prepared models owned by the same client. Higher priority executions may
* use more compute resources than lower priority executions, and may
* preempt or starve lower priority executions.
*
* prepareModelFromCache_1_3 can be called with an optional deadline. If the
* model is not able to prepared before the provided deadline, the model
* preparation must be aborted, and either {@link
* ErrorStatus::MISSED_DEADLINE_TRANSIENT}
* or {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The
* error due to an abort must be sent the same way as other errors,
* described above. If the service reports that it does not support
* preparation deadlines via IDevice::supportsDeadlines, and
* prepareModelFromCache_1_3 is called with a deadline, then the argument is
* invalid, and {@link ErrorStatus::INVALID_ARGUMENT} must be returned.
*
* The only information that may be unknown to the model at this stage is
* the shape of the tensors, which may only be known at execution time. As
* such, some driver services may return partially prepared models, where
@ -228,6 +284,11 @@ interface IDevice extends @1.2::IDevice {
* used with different shapes of inputs on different (possibly concurrent)
* executions.
*
* @param priority The priority of the prepared model relative to other
* prepared models owned by the client.
* @param deadline The time by which the model must be prepared. If the
* model cannot be prepared by the deadline, the preparation must be
* aborted.
* @param modelCache A vector of handles with each entry holding exactly one
* cache file descriptor for the security-sensitive cache. The length of
* the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded.
@ -253,8 +314,12 @@ interface IDevice extends @1.2::IDevice {
* - GENERAL_FAILURE if caching is not supported or if there is an
* unspecified error
* - INVALID_ARGUMENT if one of the input arguments is invalid
* - MISSED_DEADLINE_* if the deadline for preparing a model cannot be
* met
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
prepareModelFromCache_1_3(vec<handle> modelCache, vec<handle> dataCache,
prepareModelFromCache_1_3(Priority priority, OptionalTimePoint deadline,
vec<handle> modelCache, vec<handle> dataCache,
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback)
generates (ErrorStatus status);

View file

@ -0,0 +1,64 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.neuralnetworks@1.3;
import @1.2::IExecutionCallback;
import @1.2::OutputShape;
import @1.2::Timing;
/**
* IExecutionCallback must be used to return the error status result from an
* execution asynchronously launched from IPreparedModel::execute*.
*/
interface IExecutionCallback extends @1.2::IExecutionCallback {
/**
* There are three notify methods declared for the IExecutionCallback
* interface: notify_1_3, notify_1_2, and notify. One of the three notify
* methods must be invoked immediately after the asynchronous task has
* finished performing the execution. One of the notify methods must be
* provided with the ErrorStatus from the execution. If the asynchronous
* task is not launched, one of the notify methods must be invoked with the
* appropriate error.
*
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself
* (if the launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an
* unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if at least one output
* operand buffer is not large enough to store the
* corresponding output
* - INVALID_ARGUMENT if one of the input arguments to
* prepareModel is invalid
* - MISSED_DEADLINE_* if the deadline could not be met
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
* @param outputShapes A list of shape information of model output operands.
* The index into "outputShapes" corresponds with to index
* of the output operand in the Request outputs vector.
* outputShapes must be empty unless the status is either
* NONE or OUTPUT_INSUFFICIENT_SIZE.
* @param timing Duration of execution. Unless MeasureTiming::YES was passed when
* launching the execution and status is NONE, all times must
* be reported as UINT64_MAX. A driver may choose to report
* any time as UINT64_MAX, indicating that particular measurement is
* not available.
*/
oneway notify_1_3(ErrorStatus status, vec<OutputShape> outputShapes, Timing timing);
};

View file

@ -16,13 +16,14 @@
package android.hardware.neuralnetworks@1.3;
import @1.0::ErrorStatus;
import @1.2::IExecutionCallback;
import @1.2::IPreparedModel;
import @1.2::MeasureTiming;
import @1.2::OutputShape;
import @1.2::Timing;
import ErrorStatus;
import OptionalTimePoint;
import Request;
import IExecutionCallback;
/**
* IPreparedModel describes a model that has been prepared for execution and
@ -65,6 +66,17 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* values, the execution should complete successfully (ErrorStatus::NONE):
* There must be no failure unless the device itself is in a bad state.
*
* execute_1_3 can be called with an optional deadline. If the execution
* is not able to completed before the provided deadline, the execution
* must be aborted, and either {@link
* ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due
* to an abort must be sent the same way as other errors, described above.
* If the service reports that it does not support execution deadlines via
* IDevice::supportsDeadlines, and execute_1_3 is called with a deadline,
* then the argument is invalid, and {@link ErrorStatus::INVALID_ARGUMENT}
* must be returned.
*
* Any number of calls to the execute* and executeSynchronously* functions,
* in any combination, may be made concurrently, even on the same
* IPreparedModel object.
@ -75,6 +87,9 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* The duration runs from the time the driver sees the call
* to the execute_1_3 function to the time the driver invokes
* the callback.
* @param deadline The time by which execution must complete. If the
* execution cannot be finished by the deadline, the
* execution must be aborted.
* @param callback A callback object used to return the error status of
* the execution. The callback object's notify function must
* be called exactly once, even if the execution was
@ -87,8 +102,13 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* not large enough to store the resultant values
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
* - MISSED_DEADLINE_* if the deadline for executing a model
* cannot be met
* - RESOURCE_EXHAUSTED_* if the task was aborted by the
* driver
*/
execute_1_3(Request request, MeasureTiming measure, IExecutionCallback callback)
execute_1_3(Request request, MeasureTiming measure, OptionalTimePoint deadline,
IExecutionCallback callback)
generates (ErrorStatus status);
/**
@ -116,6 +136,17 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* (ErrorStatus::NONE): There must be no failure unless the device itself is
* in a bad state.
*
* executeSynchronously_1_3 can be called with an optional deadline. If the
* execution is not able to completed before the provided deadline, the
* execution must be aborted, and either {@link
* ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due
* to an abort must be sent the same way as other errors, described above.
* If the service reports that it does not support execution deadlines via
* IDevice::supportsDeadlines, and executeSynchronously_1_3 is called with a
* deadline, then the argument is invalid, and
* {@link ErrorStatus::INVALID_ARGUMENT} must be returned.
*
* Any number of calls to the execute* and executeSynchronously* functions,
* in any combination, may be made concurrently, even on the same
* IPreparedModel object.
@ -126,6 +157,9 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* The duration runs from the time the driver sees the call
* to the executeSynchronously_1_3 function to the time the driver
* returns from the function.
* @param deadline The time by which execution must complete. If the
* execution cannot be finished by the deadline, the
* execution must be aborted.
* @return status Error status of the execution, must be:
* - NONE if execution is performed successfully
* - DEVICE_UNAVAILABLE if driver is offline or busy
@ -135,16 +169,22 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* corresponding output
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
* - MISSED_DEADLINE_* if the deadline for executing a model
* cannot be met
* - RESOURCE_EXHAUSTED_* if the task was aborted by the
* driver
* @return outputShapes A list of shape information of model output operands.
* The index into "outputShapes" corresponds to the index
* of the output operand in the Request outputs vector.
* outputShapes must be empty unless the status is either
* NONE or OUTPUT_INSUFFICIENT_SIZE.
* @return Timing Duration of execution. Unless measure is YES and status is
* @return timing Duration of execution. Unless measure is YES and status is
* NONE, all times must be reported as UINT64_MAX. A driver may
* choose to report any time as UINT64_MAX, indicating that
* measurement is not available.
*/
executeSynchronously_1_3(Request request, MeasureTiming measure)
generates (ErrorStatus status, vec<OutputShape> outputShapes, Timing timing);
executeSynchronously_1_3(Request request, MeasureTiming measure,
OptionalTimePoint deadline)
generates (ErrorStatus status, vec<OutputShape> outputShapes,
Timing timing);
};

View file

@ -16,7 +16,6 @@
package android.hardware.neuralnetworks@1.3;
import @1.0::ErrorStatus;
import @1.2::IPreparedModelCallback;
import IPreparedModel;
@ -48,6 +47,10 @@ interface IPreparedModelCallback extends @1.2::IPreparedModelCallback {
* unspecified error
* - INVALID_ARGUMENT if one of the input arguments to
* prepareModel is invalid
* - MISSED_DEADLINE_* if the deadline for executing a model
* cannot be met
* - RESOURCE_EXHAUSTED_* if the task was aborted by the
* driver
* @param preparedModel A model that has been asynchronously prepared for
* execution. If the model was unable to be prepared
* due to an error, nullptr must be passed in place of

View file

@ -17,6 +17,7 @@
package android.hardware.neuralnetworks@1.3;
import @1.0::DataLocation;
import @1.0::ErrorStatus;
import @1.0::PerformanceInfo;
import @1.0::RequestArgument;
import @1.2::Model.ExtensionNameAndPrefix;
@ -4998,6 +4999,16 @@ enum OperationTypeRange : uint32_t {
BASE_MAX = 0xFFFF,
};
/**
* Priority given to a prepared model for execution.
*/
enum Priority : int32_t {
LOW,
MEDIUM,
HIGH,
};
/**
* The capabilities of a driver.
*
@ -5434,3 +5445,49 @@ struct Request {
*/
vec<MemoryPool> pools;
};
/**
* Optional time point of the steady clock (as from std::chrono::steady_clock)
* measured in nanoseconds.
*/
safe_union OptionalTimePoint {
/** No time point provided. */
Monostate none;
/**
* Time point of the steady clock (as from std::chrono::steady_clock)
* measured in nanoseconds.
*/
uint64_t nanoseconds;
};
/**
* Return status of a function.
*/
enum ErrorStatus : @1.0::ErrorStatus {
/**
* Failure because a deadline could not be met for a task, but future
* deadlines may still be met for the same task after a short delay.
*/
MISSED_DEADLINE_TRANSIENT,
/**
* Failure because a deadline could not be met for a task, and future
* deadlines will likely also not be met for the same task even after a
* short delay.
*/
MISSED_DEADLINE_PERSISTENT,
/**
* Failure because of a resource limitation within the driver, but future
* calls for the same task may still succeed after a short delay.
*/
RESOURCE_EXHAUSTED_TRANSIENT,
/**
* Failure because of a resource limitation within the driver, and future
* calls for the same task will likely also fail even after a short
* delay.
*/
RESOURCE_EXHAUSTED_PERSISTENT,
};

View file

@ -19,6 +19,7 @@
package android.hardware.neuralnetworks@1.3;
import @1.0::DataLocation;
import @1.0::ErrorStatus;
import @1.0::PerformanceInfo;
import @1.0::RequestArgument;
import @1.2::Model.ExtensionNameAndPrefix;
@ -89,6 +90,16 @@ enum OperationTypeRange : uint32_t {
BASE_MAX = 0xFFFF,
};
/**
* Priority given to a prepared model for execution.
*/
enum Priority : int32_t {
LOW,
MEDIUM,
HIGH,
};
/**
* The capabilities of a driver.
*
@ -525,3 +536,49 @@ struct Request {
*/
vec<MemoryPool> pools;
};
/**
* Optional time point of the steady clock (as from std::chrono::steady_clock)
* measured in nanoseconds.
*/
safe_union OptionalTimePoint {
/** No time point provided. */
Monostate none;
/**
* Time point of the steady clock (as from std::chrono::steady_clock)
* measured in nanoseconds.
*/
uint64_t nanoseconds;
};
/**
* Return status of a function.
*/
enum ErrorStatus : @1.0::ErrorStatus {
/**
* Failure because a deadline could not be met for a task, but future
* deadlines may still be met for the same task after a short delay.
*/
MISSED_DEADLINE_TRANSIENT,
/**
* Failure because a deadline could not be met for a task, and future
* deadlines will likely also not be met for the same task even after a
* short delay.
*/
MISSED_DEADLINE_PERSISTENT,
/**
* Failure because of a resource limitation within the driver, but future
* calls for the same task may still succeed after a short delay.
*/
RESOURCE_EXHAUSTED_TRANSIENT,
/**
* Failure because of a resource limitation within the driver, and future
* calls for the same task will likely also fail even after a short
* delay.
*/
RESOURCE_EXHAUSTED_PERSISTENT,
};

View file

@ -15,11 +15,12 @@
//
cc_library_static {
name: "VtsHalNeuralNetworksV1_3Callbacks",
name: "VtsHalNeuralNetworksV1_3_utils",
defaults: ["VtsHalTargetTestDefaults"],
export_include_dirs: ["include"],
srcs: [
"Callbacks.cpp",
"Utils.cpp",
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
@ -29,7 +30,7 @@ cc_library_static {
],
header_libs: [
"libbase_headers",
]
],
}
cc_test {
@ -39,6 +40,7 @@ cc_test {
"BasicTests.cpp",
"CompilationCachingTests.cpp",
"GeneratedTestHarness.cpp",
"QualityOfServiceTests.cpp",
"TestAssertions.cpp",
"ValidateBurst.cpp",
"ValidateModel.cpp",
@ -50,6 +52,9 @@ cc_test {
"libnativewindow",
],
static_libs: [
"VtsHalNeuralNetworksV1_0_utils",
"VtsHalNeuralNetworksV1_2Callbacks",
"VtsHalNeuralNetworksV1_3_utils",
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
@ -60,9 +65,6 @@ cc_test {
"libhidlmemory",
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
"VtsHalNeuralNetworksV1_2Callbacks",
"VtsHalNeuralNetworksV1_3Callbacks",
],
whole_static_libs: [
"neuralnetworks_generated_V1_0_example",

View file

@ -21,7 +21,6 @@
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using V1_0::DeviceStatus;
using V1_0::ErrorStatus;
using V1_0::PerformanceInfo;
using V1_2::Constant;
using V1_2::DeviceType;

View file

@ -24,12 +24,16 @@
namespace android::hardware::neuralnetworks::V1_3::implementation {
using V1_0::ErrorStatus;
using V1_2::OutputShape;
using V1_2::Timing;
constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
.timeInDriver = std::numeric_limits<uint64_t>::max()};
// PreparedModelCallback methods begin here
Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
const sp<V1_0::IPreparedModel>& preparedModel) {
Return<void> PreparedModelCallback::notifyInternal(ErrorStatus errorStatus,
const sp<V1_0::IPreparedModel>& preparedModel) {
{
std::lock_guard<std::mutex> hold(mMutex);
@ -48,14 +52,19 @@ Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
return Void();
}
Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
const sp<V1_2::IPreparedModel>& preparedModel) {
return notify(errorStatus, preparedModel);
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus errorStatus,
const sp<V1_0::IPreparedModel>& preparedModel) {
return notifyInternal(static_cast<ErrorStatus>(errorStatus), preparedModel);
}
Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus,
Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
const sp<V1_2::IPreparedModel>& preparedModel) {
return notifyInternal(static_cast<ErrorStatus>(errorStatus), preparedModel);
}
Return<void> PreparedModelCallback::notify_1_3(V1_3::ErrorStatus errorStatus,
const sp<V1_3::IPreparedModel>& preparedModel) {
return notify(errorStatus, preparedModel);
return notifyInternal(errorStatus, preparedModel);
}
void PreparedModelCallback::wait() const {
@ -73,4 +82,82 @@ sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() const {
return mPreparedModel;
}
// ExecutionCallback methods begin here
Return<void> ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) {
return notifyInternal(static_cast<ErrorStatus>(errorStatus), {}, kNoTiming);
}
Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
return notifyInternal(static_cast<ErrorStatus>(errorStatus), outputShapes, timing);
}
Return<void> ExecutionCallback::notify_1_3(V1_3::ErrorStatus errorStatus,
const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
return notifyInternal(errorStatus, outputShapes, timing);
}
void ExecutionCallback::wait() const {
std::unique_lock<std::mutex> lock(mMutex);
mCondition.wait(lock, [this] { return mNotified; });
}
ErrorStatus ExecutionCallback::getStatus() const {
wait();
return mErrorStatus;
}
const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() const {
wait();
return mOutputShapes;
}
Timing ExecutionCallback::getTiming() const {
wait();
return mTiming;
}
Return<void> ExecutionCallback::notifyInternal(ErrorStatus errorStatus,
hidl_vec<OutputShape> outputShapes, Timing timing) {
// check results
if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
// outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE.
if (outputShapes.size() == 0) {
LOG(ERROR) << "Notifid with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE";
errorStatus = ErrorStatus::GENERAL_FAILURE;
outputShapes = {};
timing = kNoTiming;
}
} else if (errorStatus != ErrorStatus::NONE) {
// outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE.
if (outputShapes.size() != 0) {
LOG(ERROR) << "Notified with non-empty output shape vector when error status is "
"neither NONE nor OUTPUT_INSUFFICIENT_SIZE";
errorStatus = ErrorStatus::GENERAL_FAILURE;
outputShapes = {};
timing = kNoTiming;
}
}
// store results
{
std::lock_guard<std::mutex> hold(mMutex);
// quick-return if object has already been notified
if (mNotified) {
return Void();
}
mErrorStatus = errorStatus;
mOutputShapes = std::move(outputShapes);
mTiming = timing;
mNotified = true;
}
mCondition.notify_all();
return Void();
}
} // namespace android::hardware::neuralnetworks::V1_3::implementation

View file

@ -29,6 +29,7 @@
#include <thread>
#include "1.3/Callbacks.h"
#include "1.3/Utils.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
@ -49,7 +50,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using namespace test_helper;
using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::OperationType;
@ -238,8 +238,8 @@ class CompilationCachingTestBase : public testing::Test {
mCacheDir.push_back('/');
Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
[this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
EXPECT_EQ(ErrorStatus::NONE, status);
[this](V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
EXPECT_EQ(V1_0::ErrorStatus::NONE, status);
mNumModelCache = numModelCache;
mNumDataCache = numDataCache;
});
@ -324,9 +324,9 @@ class CompilationCachingTestBase : public testing::Test {
// Launch prepare model.
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
Return<ErrorStatus> prepareLaunchStatus =
kDevice->prepareModel_1_3(model, ExecutionPreference::FAST_SINGLE_ANSWER,
modelCache, dataCache, cacheToken, preparedModelCallback);
Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModel_1_3(
model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {}, modelCache,
dataCache, cacheToken, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
@ -370,7 +370,7 @@ class CompilationCachingTestBase : public testing::Test {
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModelFromCache_1_3(
modelCache, dataCache, cacheToken, preparedModelCallback);
kDefaultPriority, {}, modelCache, dataCache, cacheToken, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
*preparedModel = nullptr;

View file

@ -44,8 +44,8 @@
#include <vector>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "1.3/Callbacks.h"
#include "1.3/Utils.h"
#include "ExecutionBurstController.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
@ -56,9 +56,9 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using namespace test_helper;
using hidl::memory::V1_0::IMemory;
using implementation::ExecutionCallback;
using implementation::PreparedModelCallback;
using V1_0::DataLocation;
using V1_0::ErrorStatus;
using V1_0::RequestArgument;
using V1_1::ExecutionPreference;
using V1_2::Constant;
@ -66,7 +66,6 @@ using V1_2::MeasureTiming;
using V1_2::OutputShape;
using V1_2::SymmPerChannelQuantParams;
using V1_2::Timing;
using V1_2::implementation::ExecutionCallback;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
namespace {
@ -453,7 +452,7 @@ static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, cons
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
return preparedModel->execute_1_3(request, measure, callback);
return preparedModel->execute_1_3(request, measure, {}, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
@ -461,7 +460,7 @@ static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& prepar
Timing* timing) {
ErrorStatus result;
Return<void> ret = preparedModel->executeSynchronously_1_3(
request, measure,
request, measure, {},
[&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
const Timing& time) {
result = error;
@ -716,7 +715,8 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind tes
} break;
case TestKind::QUANTIZATION_COUPLING: {
ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
createPreparedModel(device, model, &preparedModel,
/*reportSkipping*/ false);
TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
sp<IPreparedModel> preparedCoupledModel;
createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
@ -745,6 +745,12 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind tes
void GeneratedTestBase::SetUp() {
testing::TestWithParam<GeneratedTestParam>::SetUp();
ASSERT_NE(kDevice, nullptr);
const Return<void> ret =
kDevice->supportsDeadlines([this](bool prepareModelDeadline, bool executionDeadline) {
mSupportsDeadlines = {prepareModelDeadline, executionDeadline};
});
ASSERT_TRUE(ret.isOk());
}
std::vector<NamedModel> getNamedModels(const FilterFn& filter) {

View file

@ -36,6 +36,7 @@ class GeneratedTestBase : public testing::TestWithParam<GeneratedTestParam> {
void SetUp() override;
const sp<IDevice> kDevice = getData(std::get<NamedDevice>(GetParam()));
const test_helper::TestModel& kTestModel = *getData(std::get<NamedModel>(GetParam()));
std::pair<bool, bool> mSupportsDeadlines;
};
using FilterFn = std::function<bool(const test_helper::TestModel&)>;

View file

@ -0,0 +1,299 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "1.0/Utils.h"
#include "1.3/Callbacks.h"
#include "1.3/Utils.h"
#include "GeneratedTestHarness.h"
#include "Utils.h"
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using implementation::ExecutionCallback;
using implementation::PreparedModelCallback;
using test_helper::TestBuffer;
using test_helper::TestModel;
using V1_1::ExecutionPreference;
using V1_2::MeasureTiming;
using V1_2::OutputShape;
using V1_2::Timing;
using HidlToken =
hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
enum class DeadlineBoundType { NOW, UNLIMITED };
constexpr std::array<DeadlineBoundType, 2> deadlineBounds = {DeadlineBoundType::NOW,
DeadlineBoundType::UNLIMITED};
std::string toString(DeadlineBoundType type) {
switch (type) {
case DeadlineBoundType::NOW:
return "NOW";
case DeadlineBoundType::UNLIMITED:
return "UNLIMITED";
}
LOG(FATAL) << "Unrecognized DeadlineBoundType: " << static_cast<int>(type);
return {};
}
using Results = std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing>;
using MaybeResults = std::optional<Results>;
using ExecutionFunction =
std::function<MaybeResults(const sp<IPreparedModel>& preparedModel, const Request& request,
DeadlineBoundType deadlineBound)>;
static OptionalTimePoint makeOptionalTimePoint(DeadlineBoundType deadlineBoundType) {
OptionalTimePoint deadline;
switch (deadlineBoundType) {
case DeadlineBoundType::NOW: {
const auto currentTime = std::chrono::steady_clock::now();
const auto currentTimeInNanoseconds =
std::chrono::time_point_cast<std::chrono::nanoseconds>(currentTime);
const uint64_t nanosecondsSinceEpoch =
currentTimeInNanoseconds.time_since_epoch().count();
deadline.nanoseconds(nanosecondsSinceEpoch);
} break;
case DeadlineBoundType::UNLIMITED: {
uint64_t unlimited = std::numeric_limits<uint64_t>::max();
deadline.nanoseconds(unlimited);
} break;
}
return deadline;
}
void runPrepareModelTest(const sp<IDevice>& device, const Model& model, Priority priority,
std::optional<DeadlineBoundType> deadlineBound) {
OptionalTimePoint deadline;
if (deadlineBound.has_value()) {
deadline = makeOptionalTimePoint(deadlineBound.value());
}
// see if service can handle model
bool fullySupportsModel = false;
const Return<void> supportedCall = device->getSupportedOperations_1_3(
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel = std::all_of(supported.begin(), supported.end(),
[](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
// launch prepare model
const sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
const Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
model, ExecutionPreference::FAST_SINGLE_ANSWER, priority, deadline,
hidl_vec<hidl_handle>(), hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
const sp<V1_0::IPreparedModel> preparedModelV1_0 = preparedModelCallback->getPreparedModel();
const sp<IPreparedModel> preparedModel =
IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
// The getSupportedOperations_1_3 call returns a list of operations that are
// guaranteed not to fail if prepareModel_1_3 is called, and
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
// If a driver has any doubt that it can prepare an operation, it must
// return false. So here, if a driver isn't sure if it can support an
// operation, but reports that it successfully prepared the model, the test
// can continue.
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel.get());
return;
}
// verify return status
if (!deadlineBound.has_value()) {
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
} else {
switch (deadlineBound.value()) {
case DeadlineBoundType::NOW:
// If the execution was launched with a deadline of NOW, the
// deadline has already passed when the driver would launch the
// execution. In this case, the driver must return
// MISSED_DEADLINE_*.
EXPECT_TRUE(prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT);
break;
case DeadlineBoundType::UNLIMITED:
// If an unlimited deadline is supplied, we expect the execution to
// proceed normally. In this case, check it normally by breaking out
// of the switch statement.
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
break;
}
}
ASSERT_EQ(prepareReturnStatus == ErrorStatus::NONE, preparedModel.get() != nullptr);
}
void runPrepareModelTests(const sp<IDevice>& device, const Model& model,
bool supportsPrepareModelDeadline) {
// test priority
for (auto priority : hidl_enum_range<Priority>{}) {
SCOPED_TRACE("priority: " + toString(priority));
if (priority == kDefaultPriority) continue;
runPrepareModelTest(device, model, priority, {});
}
// test deadline
if (supportsPrepareModelDeadline) {
for (auto deadlineBound : deadlineBounds) {
SCOPED_TRACE("deadlineBound: " + toString(deadlineBound));
runPrepareModelTest(device, model, kDefaultPriority, deadlineBound);
}
}
}
static MaybeResults executeAsynchronously(const sp<IPreparedModel>& preparedModel,
const Request& request, DeadlineBoundType deadlineBound) {
SCOPED_TRACE("asynchronous");
const MeasureTiming measure = MeasureTiming::NO;
const OptionalTimePoint deadline = makeOptionalTimePoint(deadlineBound);
// launch execution
const sp<ExecutionCallback> callback = new ExecutionCallback();
Return<ErrorStatus> ret = preparedModel->execute_1_3(request, measure, deadline, callback);
EXPECT_TRUE(ret.isOk());
EXPECT_EQ(ErrorStatus::NONE, ret.withDefault(ErrorStatus::GENERAL_FAILURE));
if (!ret.isOk() || ret != ErrorStatus::NONE) return std::nullopt;
// retrieve execution results
callback->wait();
const ErrorStatus status = callback->getStatus();
hidl_vec<OutputShape> outputShapes = callback->getOutputShapes();
const Timing timing = callback->getTiming();
// return results
return Results{status, std::move(outputShapes), timing};
}
static MaybeResults executeSynchronously(const sp<IPreparedModel>& preparedModel,
const Request& request, DeadlineBoundType deadlineBound) {
SCOPED_TRACE("synchronous");
const MeasureTiming measure = MeasureTiming::NO;
const OptionalTimePoint deadline = makeOptionalTimePoint(deadlineBound);
// configure results callback
MaybeResults results;
const auto cb = [&results](const auto&... args) { *results = {args...}; };
// run execution
const Return<void> ret =
preparedModel->executeSynchronously_1_3(request, measure, deadline, cb);
EXPECT_TRUE(ret.isOk());
if (!ret.isOk()) return std::nullopt;
// return results
return results;
}
void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
const Request& request, bool synchronous, DeadlineBoundType deadlineBound) {
const ExecutionFunction execute = synchronous ? executeSynchronously : executeAsynchronously;
// Perform execution and unpack results.
const auto results = execute(preparedModel, request, deadlineBound);
if (!results.has_value()) return;
const auto& [status, outputShapes, timing] = results.value();
// Verify no timing information was returned
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
// Validate deadline information if applicable.
switch (deadlineBound) {
case DeadlineBoundType::NOW:
// If the execution was launched with a deadline of NOW, the
// deadline has already passed when the driver would launch the
// execution. In this case, the driver must return
// MISSED_DEADLINE_*.
ASSERT_TRUE(status == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
status == ErrorStatus::MISSED_DEADLINE_PERSISTENT);
return;
case DeadlineBoundType::UNLIMITED:
// If an unlimited deadline is supplied, we expect the execution to
// proceed normally. In this case, check it normally by breaking out
// of the switch statement.
ASSERT_EQ(ErrorStatus::NONE, status);
break;
}
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
ASSERT_TRUE(outputShapes.size() == 0 || outputShapes.size() == testModel.outputIndexes.size());
// Go through all outputs, check returned output shapes.
for (uint32_t i = 0; i < outputShapes.size(); i++) {
EXPECT_TRUE(outputShapes[i].isSufficient);
const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
const std::vector<uint32_t> actual = outputShapes[i].dimensions;
EXPECT_EQ(expect, actual);
}
// Retrieve execution results.
ASSERT_TRUE(nn::compliantWithV1_0(request));
const V1_0::Request request10 = nn::convertToV1_0(request);
const std::vector<TestBuffer> outputs = getOutputBuffers(request10);
// We want "close-enough" results.
checkResults(testModel, outputs);
}
void runExecutionTests(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
const Request& request) {
for (bool synchronous : {false, true}) {
for (auto deadlineBound : deadlineBounds) {
runExecutionTest(preparedModel, testModel, request, synchronous, deadlineBound);
}
}
}
void runTests(const sp<IDevice>& device, const TestModel& testModel,
std::pair<bool, bool> supportsDeadlines) {
// setup
const auto [supportsPrepareModelDeadline, supportsExecutionDeadline] = supportsDeadlines;
if (!supportsPrepareModelDeadline && !supportsExecutionDeadline) return;
const Model model = createModel(testModel);
// run prepare model tests
runPrepareModelTests(device, model, supportsPrepareModelDeadline);
if (supportsExecutionDeadline) {
// prepare model
sp<IPreparedModel> preparedModel;
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
// run execution tests
const Request request = nn::convertToV1_3(createRequest(testModel));
runExecutionTests(preparedModel, testModel, request);
}
}
class DeadlineTest : public GeneratedTestBase {};
TEST_P(DeadlineTest, Test) {
runTests(kDevice, kTestModel, mSupportsDeadlines);
}
INSTANTIATE_GENERATED_TEST(DeadlineTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -0,0 +1,27 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "1.3/Utils.h"
#include <iostream>
namespace android::hardware::neuralnetworks::V1_3 {
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
return os << toString(errorStatus);
}
} // namespace android::hardware::neuralnetworks::V1_3

View file

@ -34,7 +34,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using nn::ExecutionBurstController;
using nn::RequestChannelSender;
using nn::ResultChannelReceiver;
using V1_0::ErrorStatus;
using V1_0::Request;
using V1_2::FmqRequestDatum;
using V1_2::FmqResultDatum;
@ -80,16 +79,17 @@ static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurst
ASSERT_NE(nullptr, fmqResultDescriptor);
// configure burst
ErrorStatus errorStatus;
V1_0::ErrorStatus errorStatus;
sp<IBurstContext> burstContext;
const Return<void> ret = preparedModel->configureExecutionBurst(
callback, *fmqRequestDescriptor, *fmqResultDescriptor,
[&errorStatus, &burstContext](ErrorStatus status, const sp<IBurstContext>& context) {
[&errorStatus, &burstContext](V1_0::ErrorStatus status,
const sp<IBurstContext>& context) {
errorStatus = status;
burstContext = context;
});
ASSERT_TRUE(ret.isOk());
ASSERT_EQ(ErrorStatus::NONE, errorStatus);
ASSERT_EQ(V1_0::ErrorStatus::NONE, errorStatus);
ASSERT_NE(nullptr, burstContext.get());
// return values
@ -144,7 +144,7 @@ static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiv
auto results = receiver->getBlocking();
ASSERT_TRUE(results.has_value());
const auto [status, outputShapes, timing] = std::move(*results);
EXPECT_NE(ErrorStatus::NONE, status);
EXPECT_NE(V1_0::ErrorStatus::NONE, status);
EXPECT_EQ(0u, outputShapes.size());
EXPECT_TRUE(badTiming(timing));
}
@ -302,14 +302,15 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
// collect serialized result by running regular burst
const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
controllerRegular->compute(request, MeasureTiming::NO, keys);
const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular);
const V1_0::ErrorStatus statusRegular =
nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular));
EXPECT_FALSE(fallbackRegular);
// skip test if regular burst output isn't useful for testing a failure
// caused by having too small of a length for the result FMQ
const std::vector<FmqResultDatum> serialized =
android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
if (statusRegular != ErrorStatus::NONE ||
if (statusRegular != V1_0::ErrorStatus::NONE ||
serialized.size() <= kExecutionBurstChannelSmallLength) {
return;
}
@ -318,8 +319,9 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
// large enough to return the serialized result
const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
controllerSmall->compute(request, MeasureTiming::NO, keys);
const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall);
EXPECT_NE(ErrorStatus::NONE, statusSmall);
const V1_0::ErrorStatus statusSmall =
nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall));
EXPECT_NE(V1_0::ErrorStatus::NONE, statusSmall);
EXPECT_EQ(0u, outputShapesSmall.size());
EXPECT_TRUE(badTiming(timingSmall));
EXPECT_FALSE(fallbackSmall);

View file

@ -18,13 +18,13 @@
#include "1.0/Utils.h"
#include "1.3/Callbacks.h"
#include "1.3/Utils.h"
#include "GeneratedTestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_1::ExecutionPreference;
using V1_2::SymmPerChannelQuantParams;
using HidlToken =
@ -44,13 +44,19 @@ static void validateGetSupportedOperations(const sp<IDevice>& device, const std:
}
static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
const Model& model, ExecutionPreference preference) {
const Model& model, ExecutionPreference preference,
bool testDeadline) {
SCOPED_TRACE(message + " [prepareModel_1_3]");
OptionalTimePoint deadline;
if (testDeadline) {
deadline.nanoseconds(std::numeric_limits<uint64_t>::max());
}
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
Return<ErrorStatus> prepareLaunchStatus =
device->prepareModel_1_3(model, preference, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
model, preference, kDefaultPriority, deadline, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@ -73,12 +79,13 @@ static bool validExecutionPreference(ExecutionPreference preference) {
// to the model does not leave this function.
static void validate(const sp<IDevice>& device, const std::string& message, Model model,
const std::function<void(Model*)>& mutation,
ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER,
bool testDeadline = false) {
mutation(&model);
if (validExecutionPreference(preference)) {
if (validExecutionPreference(preference) && !testDeadline) {
validateGetSupportedOperations(device, message, model);
}
validatePrepareModel(device, message, model, preference);
validatePrepareModel(device, message, model, preference, testDeadline);
}
static uint32_t addOperand(Model* model) {
@ -714,9 +721,19 @@ static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model
}
}
///////////////////////// DEADLINE /////////////////////////
static void deadlineTest(const sp<IDevice>& device, const Model& model) {
const std::string message = "deadlineTest: deadline not supported";
const auto noop = [](Model*) {};
validate(device, message, model, noop, ExecutionPreference::FAST_SINGLE_ANSWER,
/*testDeadline=*/true);
}
////////////////////////// ENTRY POINT //////////////////////////////
void validateModel(const sp<IDevice>& device, const Model& model) {
void validateModel(const sp<IDevice>& device, const Model& model,
bool prepareModelDeadlineSupported) {
mutateOperandTypeTest(device, model);
mutateOperandRankTest(device, model);
mutateOperandScaleTest(device, model);
@ -732,6 +749,9 @@ void validateModel(const sp<IDevice>& device, const Model& model) {
addOperationInputTest(device, model);
addOperationOutputTest(device, model);
mutateExecutionPreferenceTest(device, model);
if (!prepareModelDeadlineSupported) {
deadlineTest(device, model);
}
}
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -18,7 +18,7 @@
#include <chrono>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "1.3/Callbacks.h"
#include "ExecutionBurstController.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
@ -27,11 +27,10 @@
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using V1_0::ErrorStatus;
using implementation::ExecutionCallback;
using V1_2::MeasureTiming;
using V1_2::OutputShape;
using V1_2::Timing;
using V1_2::implementation::ExecutionCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@ -44,7 +43,8 @@ static bool badTiming(Timing timing) {
// that use the request. Note that the request here is passed by value, and any
// mutation to the request does not leave this function.
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
Request request, const std::function<void(Request*)>& mutation) {
Request request, const std::function<void(Request*)>& mutation,
bool testDeadline = false) {
mutation(&request);
// We'd like to test both with timing requested and without timing
@ -57,13 +57,18 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
};
MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO;
OptionalTimePoint deadline;
if (testDeadline) {
deadline.nanoseconds(std::numeric_limits<uint64_t>::max());
}
// asynchronous
{
SCOPED_TRACE(message + " [execute_1_3]");
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executeLaunchStatus =
preparedModel->execute_1_3(request, measure, executionCallback);
preparedModel->execute_1_3(request, measure, deadline, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
@ -81,7 +86,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
SCOPED_TRACE(message + " [executeSynchronously_1_3]");
Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
request, measure,
request, measure, deadline,
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
@ -93,7 +98,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
// burst
// TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2.
{
if (!testDeadline) {
SCOPED_TRACE(message + " [burst]");
ASSERT_TRUE(nn::compliantWithV1_0(request));
@ -153,17 +158,29 @@ static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Requ
}
}
///////////////////////// DEADLINE ////////////////////////////////////
static void deadlineTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
const std::string message = "deadlineTest: deadline not supported";
const auto noop = [](Request*) {};
validate(preparedModel, message, request, noop, /*testDeadline=*/true);
}
///////////////////////////// ENTRY POINT //////////////////////////////////
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) {
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request,
bool executionDeadlineSupported) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
if (!executionDeadlineSupported) {
deadlineTest(preparedModel, request);
}
}
void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request) {
SCOPED_TRACE("Expecting request to fail [executeSynchronously_1_3]");
Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
request, MeasureTiming::NO,
request, MeasureTiming::NO, {},
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
ASSERT_NE(ErrorStatus::NONE, error);
EXPECT_EQ(outputShapes.size(), 0);

View file

@ -23,6 +23,7 @@
#include <utility>
#include "1.0/Utils.h"
#include "1.3/Callbacks.h"
#include "1.3/Utils.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
@ -32,7 +33,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using HidlToken =
hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_1::ExecutionPreference;
// internal helper function
@ -55,8 +55,8 @@ void createPreparedModel(const sp<IDevice>& device, const Model& model,
// launch prepare model
const sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
const Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {},
hidl_vec<hidl_handle>(), hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
@ -84,6 +84,7 @@ void createPreparedModel(const sp<IDevice>& device, const Model& model,
<< std::endl;
GTEST_SKIP();
}
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel->get());
}
@ -122,23 +123,27 @@ std::string printNeuralnetworksHidlTest(
INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest);
// Forward declaration from ValidateModel.cpp
void validateModel(const sp<IDevice>& device, const Model& model);
void validateModel(const sp<IDevice>& device, const Model& model,
bool prepareModelDeadlineSupported);
// Forward declaration from ValidateRequest.cpp
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request,
bool executionDeadlineSupported);
// Forward declaration from ValidateRequest.cpp
void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request);
// Forward declaration from ValidateBurst.cpp
void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
void validateEverything(const sp<IDevice>& device, const Model& model, const Request& request) {
validateModel(device, model);
void validateEverything(const sp<IDevice>& device, const Model& model, const Request& request,
std::pair<bool, bool> supportsDeadlines) {
const auto [prepareModelDeadlineSupported, executionDeadlineSupported] = supportsDeadlines;
validateModel(device, model, prepareModelDeadlineSupported);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
validateRequest(preparedModel, request);
validateRequest(preparedModel, request, executionDeadlineSupported);
// TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2.
ASSERT_TRUE(nn::compliantWithV1_0(request));
@ -146,10 +151,12 @@ void validateEverything(const sp<IDevice>& device, const Model& model, const Req
validateBurst(preparedModel, request10);
}
void validateFailure(const sp<IDevice>& device, const Model& model, const Request& request) {
void validateFailure(const sp<IDevice>& device, const Model& model, const Request& request,
std::pair<bool, bool> supportsDeadlines) {
const bool prepareModelDeadlineSupported = supportsDeadlines.first;
// TODO: Should this always succeed?
// What if the invalid input is part of the model (i.e., a parameter).
validateModel(device, model);
validateModel(device, model, prepareModelDeadlineSupported);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
@ -163,9 +170,9 @@ TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
const Request request = nn::convertToV1_3(createRequest(kTestModel));
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request);
validateFailure(kDevice, model, request, mSupportsDeadlines);
} else {
validateEverything(kDevice, model, request);
validateEverything(kDevice, model, request, mSupportsDeadlines);
}
}

View file

@ -18,8 +18,11 @@
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H
#include <android-base/thread_annotations.h>
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
#include <hidl/Status.h>
#include <condition_variable>
@ -136,7 +139,7 @@ class PreparedModelCallback : public IPreparedModelCallback {
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
Return<void> notify_1_3(V1_0::ErrorStatus status,
Return<void> notify_1_3(V1_3::ErrorStatus status,
const sp<V1_3::IPreparedModel>& preparedModel) override;
/**
@ -158,7 +161,7 @@ class PreparedModelCallback : public IPreparedModelCallback {
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if the input model is invalid
*/
V1_0::ErrorStatus getStatus() const;
ErrorStatus getStatus() const;
/**
* Retrieves the model that has been prepared for execution from the
@ -173,13 +176,216 @@ class PreparedModelCallback : public IPreparedModelCallback {
sp<V1_0::IPreparedModel> getPreparedModel() const;
private:
Return<void> notifyInternal(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel);
mutable std::mutex mMutex;
mutable std::condition_variable mCondition;
bool mNotified GUARDED_BY(mMutex) = false;
V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
sp<V1_0::IPreparedModel> mPreparedModel;
};
/**
* The ExecutionCallback class is used to receive the results of the execution
* from a task executing asynchronously with respect to the runtime. If a
* calling thread calls wait or get* on a ExecutionCallback object and the
* corresponding asynchronous task has not finished the execution, the calling
* thread will block until the asynchronous task has either called one of the
* notify* methods.
*
* If the callback object is notified more than once, only the results of the
* first call to notify* are used, and the results from subsequent calls are
* discarded.
*
* This callback object is passed as an argument to IPreparedModel::execute*.
*/
class ExecutionCallback : public IExecutionCallback {
public:
/**
* IExecutionCallback::notify marks the callback object with the return
* status of the asynchronous execution that held this callback and enables
* all prior and future wait calls on the ExecutionCallback object to
* proceed.
*
* One of the IExecutionCallback::notify* methods must be called on a given
* ExecutionCallback object.
*
* If the callback object is notified more than once, only the results of
* the first call to notify* are used, and the results from subsequent calls
* are discarded.
*
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself (if the
* launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is not large
* enough to store the resultant values
* - INVALID_ARGUMENT if the input request is invalid
*/
Return<void> notify(V1_0::ErrorStatus status) override;
/**
* IExecutionCallback::notify_1_2 marks the callback object with the results
* (error status, dynamic output shapes, and timing information) of the
* asynchronous execution that held this callback and enables all prior and
* future wait calls on the ExecutionCallback object to proceed.
*
* One of the IExecutionCallback::notify* methods must be called on a given
* ExecutionCallback object.
*
* If the callback object is notified more than once, only the results of
* the first call to notify* are used, and the results from subsequent calls
* are discarded.
*
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself (if the
* launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
* error
* - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
* not large enough to store the corresponding output
* - INVALID_ARGUMENT if one of the input arguments to prepareModel is
* invalid
* @param outputShapes A list of shape information of model output operands.
* The index into "outputShapes" corresponds to the index of the output
* operand in the Request outputs vector. outputShapes must be empty
* unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE.
* @param Timing Duration of execution. Unless MeasureTiming::YES was passed
* when launching the execution and status is NONE, all times must be
* reported as UINT64_MAX. A driver may choose to report any time as
* UINT64_MAX, indicating that particular measurement is not available.
*/
Return<void> notify_1_2(V1_0::ErrorStatus status,
const hidl_vec<V1_2::OutputShape>& outputShapes,
const V1_2::Timing& timing) override;
/**
* IExecutionCallback::notify_1_3 marks the callback object with the results
* (error status, dynamic output shapes, and timing information) of the
* asynchronous execution that held this callback and enables all prior and
* future wait calls on the ExecutionCallback object to proceed.
*
* One of the IExecutionCallback::notify* methods must be called on a given
* ExecutionCallback object.
*
* If the callback object is notified more than once, only the results of
* the first call to notify* are used, and the results from subsequent calls
* are discarded.
*
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself (if the
* launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
* error
* - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
* not large enough to store the corresponding output
* - INVALID_ARGUMENT if one of the input arguments to prepareModel is
* invalid
* - MISSED_DEADLINE_* if the deadline was not met
* @param outputShapes A list of shape information of model output operands.
* The index into "outputShapes" corresponds to the index of the output
* operand in the Request outputs vector. outputShapes must be empty
* unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE.
* @param Timing Duration of execution. Unless MeasureTiming::YES was passed
* when launching the execution and status is NONE, all times must be
* reported as UINT64_MAX. A driver may choose to report any time as
* UINT64_MAX, indicating that particular measurement is not available.
*/
Return<void> notify_1_3(V1_3::ErrorStatus status,
const hidl_vec<V1_2::OutputShape>& outputShapes,
const V1_2::Timing& timing) override;
/**
* ExecutionCallback::wait blocks until notify* has been called on the
* callback object.
*/
void wait() const;
/**
* Retrieves the error status returned from the asynchronous task launched
* by one of the IPreparedModel::execute* methods. If
* IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*)
* has not finished asynchronously executing, this call will block until the
* asynchronous task notifies the object.
*
* @return status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself (if the
* launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
* error
* - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
* not large enough to store the corresponding output
* - INVALID_ARGUMENT if one of the input arguments to prepareModel is
* invalid
* - MISSED_DEADLINE_* if the deadline could not be met
*/
V1_3::ErrorStatus getStatus() const;
/**
* Retrieves the error status returned from the asynchronous task launched
* by one of the IPreparedModel::execute* methods. If
* IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*)
* has not finished asynchronously executing, this call will block until the
* asynchronous task notifies the object.
*
* If the asynchronous task was launched by IPreparedModel::execute, an
* empty vector will be returned.
*
* @return outputShapes A list of shape information of model output
* operands. The index into "outputShapes" corresponds to the index of
* the output operand in the Request outputs vector. outputShapes must
* be empty unless the status is either NONE or
* OUTPUT_INSUFFICIENT_SIZE. outputShaps may be empty if the status is
* NONE and all model output operands are fully-specified at execution
* time. outputShapes must have the same number of elements as the
* number of model output operands if the status is
* OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has
* at least one output operand that is not fully-specified.
*/
const std::vector<V1_2::OutputShape>& getOutputShapes() const;
/**
* Retrieves the error status returned from the asynchronous task launched
* by one of the IPreparedModel::execute* methods. If
* IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*)
* has not finished asynchronously executing, this call will block until the
* asynchronous task notifies the object.
*
* If the asynchronous task was launched by IPreparedModel::execute, every
* time must be UINT64_MAX.
*
* @return timing Duration of the execution. Every time must be UINT64_MAX
* unless the status is NONE.
*/
V1_2::Timing getTiming() const;
private:
/*
* ExecutionCallback::notifyInternal stores the results of the execution
* (status, output shapes, and timing information) in the ExecutionCallback
* object before any call to wait or get* return. It then enables all prior
* and future wait calls on the ExecutionCallback object to proceed.
*/
Return<void> notifyInternal(V1_3::ErrorStatus errorStatus,
hidl_vec<V1_2::OutputShape> outputShapes, V1_2::Timing timing);
// members
mutable std::mutex mMutex;
mutable std::condition_variable mCondition;
bool mNotified GUARDED_BY(mMutex) = false;
V1_3::ErrorStatus mErrorStatus = V1_3::ErrorStatus::GENERAL_FAILURE;
std::vector<V1_2::OutputShape> mOutputShapes = {};
V1_2::Timing mTiming = {};
};
} // namespace android::hardware::neuralnetworks::V1_3::implementation
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H

View file

@ -0,0 +1,36 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <iosfwd>
namespace android::hardware::neuralnetworks {
inline constexpr V1_3::Priority kDefaultPriority = V1_3::Priority::MEDIUM;
} // namespace android::hardware::neuralnetworks
namespace android::hardware::neuralnetworks::V1_3 {
// pretty-print values for error messages
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
} // namespace android::hardware::neuralnetworks::V1_3
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H