Merge changes from topic "cp-resize-params"

* changes:
  Add align_corners and half_pixel_centers parameters to resize ops
  Fix NNAPI QoS Deadline test
  Relax NNAPI QoS deadline parameter
This commit is contained in:
Slava Shklyaev 2020-03-09 10:20:27 +00:00 committed by Gerrit Code Review
commit 37cf4fd14b
13 changed files with 196 additions and 220 deletions

View file

@ -631,12 +631,12 @@ bbeee9604128ede83ee755b67e73b5ad29e6e1dbac9ec41fea6ffe2745b0c50a android.hardwar
adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation
ddcf89cd8ee2df0d32aee55050826446fb64f7aafde0a7cd946c64f61b1a364c android.hardware.keymaster@4.1::types
65c16331e57f6dd68b3971f06f78fe9e3209afb60630c31705aa355f9a52bf0d android.hardware.neuralnetworks@1.3::IBuffer
9db064ee44268a876be0367ff771e618362d39ec603b6ecab17e1575725fcd87 android.hardware.neuralnetworks@1.3::IDevice
4167dc3ad35e9cd0d2057d4868c7675ae2c3c9d05bbd614c1f5dccfa5fd68797 android.hardware.neuralnetworks@1.3::IExecutionCallback
2fa3679ad7c94b5e88724adcd560c561041068a4ca565c63830e68101988746a android.hardware.neuralnetworks@1.3::IFencedExecutionCallback
43088ffc71945b463a7279262cfe2e290f6ed2f15d3fd6032798a3be299fb08f android.hardware.neuralnetworks@1.3::IPreparedModel
0439a1fbbec7f16e5e4c653d85ac685d51bfafbae15b8f8cca530acdd7d6a8ce android.hardware.neuralnetworks@1.3::IPreparedModelCallback
dd39887aa4fb60ce60ea9cc043edeadbbae6e922d09d3946311b0b410024ae14 android.hardware.neuralnetworks@1.3::types
278817920bfd5292a7713f97f1832cca53de3de640f7670e413d97c6e7fd581c android.hardware.neuralnetworks@1.3::IDevice
127ba11efb8220dc3aec9a8f441b59eaf1c68d7f03f577833e1824de75a36b17 android.hardware.neuralnetworks@1.3::IExecutionCallback
6e904be0ddca5ae1de8eba020e6c38ed935ea7d80cd08f47787f137a0ca58555 android.hardware.neuralnetworks@1.3::IFencedExecutionCallback
2b0b10d2ea7a18a4048cd0eb83d35c19a817aeee95f65807fc31f4ef21381397 android.hardware.neuralnetworks@1.3::IPreparedModel
eee3430cc86c97c7b407495863d8fb61da6f1a64b7721e77b9b4909b11b174e9 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
c9320b04ec302624985180a02d591bea5e435601fc411a6cabb58878e4e1ad68 android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface

View file

@ -47,19 +47,6 @@ interface IDevice extends @1.2::IDevice {
*/
getCapabilities_1_3() generates (ErrorStatus status, Capabilities capabilities);
/**
* Returns whether the device is able to complete or abort a task within a
* specified duration.
*
* @return prepareModelDeadline 'true' if the device supports completing or
* aborting model preparation by the deadline when the deadline is supplied,
* 'false' otherwise.
* @return executionDeadline 'true' if the device supports completing or
* aborting an execution by the deadline when the deadline is supplied,
* 'false' otherwise.
*/
supportsDeadlines() generates (bool prepareModelDeadline, bool executionDeadline);
/**
* Gets the supported operations in a model.
*
@ -140,14 +127,10 @@ interface IDevice extends @1.2::IDevice {
*
* prepareModel_1_3 can be called with an optional deadline. If the model
* is not able to be prepared before the provided deadline, the model
* preparation must be aborted, and either {@link
* preparation may be aborted, and either {@link
* ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due
* to an abort must be sent the same way as other errors, described above.
* If the service reports that it does not support preparation deadlines via
* IDevice::supportsDeadlines, and prepareModel_1_3 is called with a
* deadline, then the argument is invalid, and {@link
* ErrorStatus::INVALID_ARGUMENT} must be returned.
*
* Optionally, the driver may save the prepared model to cache during the
* asynchronous preparation. Any error that occurs when saving to cache must
@ -172,9 +155,9 @@ interface IDevice extends @1.2::IDevice {
* model.
* @param priority The priority of the prepared model relative to other
* prepared models owned by the client.
* @param deadline The time by which the model must be prepared. If the
* model cannot be prepared by the deadline, the preparation must be
* aborted.
* @param deadline The time by which the model is expected to be prepared.
* If the model cannot be prepared by the deadline, the preparation may
* be aborted.
* @param modelCache A vector of handles with each entry holding exactly one
* cache file descriptor for the security-sensitive cache. The length of
* the vector must either be 0 indicating that caching information is
@ -209,8 +192,8 @@ interface IDevice extends @1.2::IDevice {
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if one of the input arguments related to preparing
* the model is invalid
* - MISSED_DEADLINE_* if the deadline for preparing a model cannot be
* met
* - MISSED_DEADLINE_* if the preparation is aborted because the model
* cannot be prepared by the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
prepareModel_1_3(Model model, ExecutionPreference preference,
@ -262,14 +245,11 @@ interface IDevice extends @1.2::IDevice {
*
* prepareModelFromCache_1_3 can be called with an optional deadline. If the
* model is not able to prepared before the provided deadline, the model
* preparation must be aborted, and either {@link
* preparation may be aborted, and either {@link
* ErrorStatus::MISSED_DEADLINE_TRANSIENT}
* or {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The
* or {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The
* error due to an abort must be sent the same way as other errors,
* described above. If the service reports that it does not support
* preparation deadlines via IDevice::supportsDeadlines, and
* prepareModelFromCache_1_3 is called with a deadline, then the argument is
* invalid, and {@link ErrorStatus::INVALID_ARGUMENT} must be returned.
* described above.
*
* The only information that may be unknown to the model at this stage is
* the shape of the tensors, which may only be known at execution time. As
@ -279,9 +259,9 @@ interface IDevice extends @1.2::IDevice {
* used with different shapes of inputs on different (possibly concurrent)
* executions.
*
* @param deadline The time by which the model must be prepared. If the
* model cannot be prepared by the deadline, the preparation must be
* aborted.
* @param deadline The time by which the model is expected to be prepared.
* If the model cannot be prepared by the deadline, the preparation may
* be aborted.
* @param modelCache A vector of handles with each entry holding exactly one
* cache file descriptor for the security-sensitive cache. The length of
* the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded.
@ -307,8 +287,8 @@ interface IDevice extends @1.2::IDevice {
* - GENERAL_FAILURE if caching is not supported or if there is an
* unspecified error
* - INVALID_ARGUMENT if one of the input arguments is invalid
* - MISSED_DEADLINE_* if the deadline for preparing a model cannot be
* met
* - MISSED_DEADLINE_* if the preparation is aborted because the model
* cannot be prepared by the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
prepareModelFromCache_1_3(OptionalTimePoint deadline,

View file

@ -47,7 +47,8 @@ interface IExecutionCallback extends @1.2::IExecutionCallback {
* corresponding output
* - INVALID_ARGUMENT if one of the input arguments to
* prepareModel is invalid
* - MISSED_DEADLINE_* if the deadline could not be met
* - MISSED_DEADLINE_* if the execution is aborted because it
* cannot be completed by the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
* @param outputShapes A list of shape information of model output operands.
* The index into "outputShapes" corresponds with to index

View file

@ -38,8 +38,8 @@ interface IFencedExecutionCallback {
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an
* unspecified error
* - MISSED_DEADLINE_* if the deadline for executing a model
* cannot be met
* - MISSED_DEADLINE_* if the execution is aborted because it
* cannot be completed by the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the
* driver
* @return timingLaunched The duration starts when executeFenced is called and ends when

View file

@ -70,14 +70,10 @@ interface IPreparedModel extends @1.2::IPreparedModel {
*
* execute_1_3 can be called with an optional deadline. If the execution
* is not able to be completed before the provided deadline, the execution
* must be aborted, and either {@link
* may be aborted, and either {@link
* ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due
* to an abort must be sent the same way as other errors, described above.
* If the service reports that it does not support execution deadlines via
* IDevice::supportsDeadlines, and execute_1_3 is called with a deadline,
* then the argument is invalid, and {@link ErrorStatus::INVALID_ARGUMENT}
* must be returned.
*
* Any number of calls to the execute* and executeSynchronously* functions,
* in any combination, may be made concurrently, even on the same
@ -89,9 +85,9 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* The duration runs from the time the driver sees the call
* to the execute_1_3 function to the time the driver invokes
* the callback.
* @param deadline The time by which the execution must complete. If the
* execution cannot be finished by the deadline, the
* execution must be aborted.
* @param deadline The time by which the execution is expected to complete.
* If the execution cannot be completed by the deadline, the
* execution may be aborted.
* @param loopTimeoutDuration The maximum amount of time that should be spent
* executing a {@link OperationType::WHILE}
* operation. If a loop condition model does not
@ -116,8 +112,8 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* not large enough to store the resultant values
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
* - MISSED_DEADLINE_* if the deadline for executing a model
* cannot be met
* - MISSED_DEADLINE_* if the execution is aborted because it
* cannot be completed by the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the
* driver
*/
@ -150,16 +146,12 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* (ErrorStatus::NONE): There must be no failure unless the device itself is
* in a bad state.
*
* executeSynchronously_1_3 can be called with an optional deadline. If the
* executeSynchronously_1_3 may be called with an optional deadline. If the
* execution is not able to be completed before the provided deadline, the
* execution must be aborted, and either {@link
* execution may be aborted, and either {@link
* ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due
* to an abort must be sent the same way as other errors, described above.
* If the service reports that it does not support execution deadlines via
* IDevice::supportsDeadlines, and executeSynchronously_1_3 is called with a
* deadline, then the argument is invalid, and
* {@link ErrorStatus::INVALID_ARGUMENT} must be returned.
*
* Any number of calls to the execute* and executeSynchronously* functions,
* in any combination, may be made concurrently, even on the same
@ -171,9 +163,9 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* The duration runs from the time the driver sees the call
* to the executeSynchronously_1_3 function to the time the driver
* returns from the function.
* @param deadline The time by which the execution must complete. If the
* execution cannot be finished by the deadline, the
* execution must be aborted.
* @param deadline The time by which the execution is expected to complete.
* If the execution cannot be finished by the deadline, the
* execution may be aborted.
* @param loopTimeoutDuration The maximum amount of time that should be spent
* executing a {@link OperationType::WHILE}
* operation. If a loop condition model does not
@ -194,8 +186,8 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* corresponding output
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
* - MISSED_DEADLINE_* if the deadline for executing a model
* cannot be met
* - MISSED_DEADLINE_* if the execution is aborted because it
* cannot be completed by the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the
* driver
* @return outputShapes A list of shape information of model output operands.
@ -236,17 +228,13 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* any data object referenced by 'request' (described by the
* {@link @1.0::DataLocation} of a {@link @1.0::RequestArgument}).
*
* executeFenced can be called with an optional deadline and an optional duration.
* executeFenced may be called with an optional deadline and an optional duration.
* If the execution is not able to be completed before the provided deadline or
* within the timeout duration (measured from when all sync fences in waitFor are
* signaled), whichever comes earlier, the execution must be aborted, and either
* signaled), whichever comes earlier, the execution may be aborted, and either
* {@link ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due
* ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due
* to an abort must be sent the same way as other errors, described above.
* If the service reports that it does not support execution deadlines via
* IDevice::supportsDeadlines, and executeFenced is called with a
* deadline or duration, then the argument is invalid, and
* {@link ErrorStatus::INVALID_ARGUMENT} must be returned.
*
* If any of the sync fences in waitFor changes to error status after the executeFenced
* call succeeds, or the execution is aborted because it cannot finish before the deadline
@ -263,9 +251,9 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* @param waitFor A vector of sync fence file descriptors.
* Execution must not start until all sync fences have been signaled.
* @param measure Specifies whether or not to measure duration of the execution.
* @param deadline The time by which the execution must complete. If the
* execution cannot be finished by the deadline, the
* execution must be aborted.
* @param deadline The time by which the execution is expected to complete.
* If the execution cannot be finished by the deadline, the
* execution may be aborted.
* @param loopTimeoutDuration The maximum amount of time that should be spent
* executing a {@link OperationType::WHILE}
* operation. If a loop condition model does not
@ -277,18 +265,18 @@ interface IPreparedModel extends @1.2::IPreparedModel {
* LoopTimeoutDurationNs::DEFAULT}. When
* provided, the duration must not exceed {@link
* LoopTimeoutDurationNs::MAXIMUM}.
* @param duration The length of time within which the execution must
* complete after all sync fences in waitFor are signaled. If the
* execution cannot be finished within the duration, the execution
* must be aborted.
* @param duration The length of time within which the execution is expected
* to complete after all sync fences in waitFor are signaled.
* If the execution cannot be finished within the duration,
* the execution may be aborted.
* @return status Error status of the call, must be:
* - NONE if task is successfully launched
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if one of the input arguments is invalid, including
* fences in error states.
* - MISSED_DEADLINE_* if the deadline for executing a model
* cannot be met
* - MISSED_DEADLINE_* if the execution is aborted because it
* cannot be completed by the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the
* driver
* @return syncFence The sync fence that will be signaled when the task is completed.

View file

@ -47,8 +47,8 @@ interface IPreparedModelCallback extends @1.2::IPreparedModelCallback {
* unspecified error
* - INVALID_ARGUMENT if one of the input arguments to
* prepareModel is invalid
* - MISSED_DEADLINE_* if the deadline for executing a model
* cannot be met
* - MISSED_DEADLINE_* if the preparation is aborted because
* the model cannot be prepared by the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the
* driver
* @param preparedModel A model that has been asynchronously prepared for

View file

@ -1584,6 +1584,17 @@ enum OperationType : int32_t {
* * 3: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
* Available since HAL version 1.2.
* * 4: Align corners. An optional {@link OperandType::BOOL}
* scalar, default to false. If True, the centers of the 4 corner
* pixels of the input and output tensors are aligned, preserving the
* values at the corner pixels.
* Available since HAL version 1.3.
* * 5: Half pixel centers. An optional {@link OperandType::BOOL}
* scalar, default to false. If True, the pixel centers are assumed to
* be at (0.5, 0.5). This is the default behavior of image.resize in
* TF 2.0. If this parameter is True, then align_corners parameter
* must be False.
* Available since HAL version 1.3.
*
* Inputs (resizing by scale, since HAL version 1.2):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
@ -1602,6 +1613,17 @@ enum OperationType : int32_t {
* {@link OperandType::FLOAT32} otherwise.
* * 3: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
* * 4: Align corners. An optional {@link OperandType::BOOL}
* scalar, default to false. If True, the centers of the 4 corner
* pixels of the input and output tensors are aligned, preserving the
* values at the corner pixels.
* Available since HAL version 1.3.
* * 5: Half pixel centers. An optional {@link OperandType::BOOL}
* scalar, default to false. If True, the pixel centers are assumed to
* be at (0.5, 0.5). This is the default behavior of image.resize in
* TF 2.0. If this parameter is True, then align_corners parameter
* must be False.
* Available since HAL version 1.3.
*
* Outputs:
* * 0: The output 4-D tensor, of shape
@ -4870,6 +4892,17 @@ enum OperationType : int32_t {
* height of the output tensor.
* * 3: An {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
* * 4: Align corners. An optional {@link OperandType::BOOL}
* scalar, default to false. If True, the centers of the 4 corner
* pixels of the input and output tensors are aligned, preserving the
* values at the corner pixels.
* Available since HAL version 1.3.
* * 5: Half pixel centers. An optional {@link OperandType::BOOL}
* scalar, default to false. If True, the pixel centers are assumed to
* be at (0.5, 0.5). This is the default behavior of image.resize in
* TF 2.0. If this parameter is True, then align_corners parameter
* must be False.
* Available since HAL version 1.3.
*
* Inputs (resizing by scale):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
@ -4888,6 +4921,17 @@ enum OperationType : int32_t {
* {@link OperandType::FLOAT32} otherwise.
* * 3: An {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
* * 4: Align corners. An optional {@link OperandType::BOOL}
* scalar, default to false. If True, the centers of the 4 corner
* pixels of the input and output tensors are aligned, preserving the
* values at the corner pixels.
* Available since HAL version 1.3.
* * 5: Half pixel centers. An optional {@link OperandType::BOOL}
* scalar, default to false. If True, the pixel centers are assumed to
* be at (0.5, 0.5). This is the default behavior of image.resize in
* TF 2.0. If this parameter is True, then align_corners parameter
* must be False.
* Available since HAL version 1.3.
*
* Outputs:
* * 0: The output 4-D tensor, of shape

View file

@ -858,12 +858,6 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind tes
void GeneratedTestBase::SetUp() {
testing::TestWithParam<GeneratedTestParam>::SetUp();
ASSERT_NE(kDevice, nullptr);
const Return<void> ret =
kDevice->supportsDeadlines([this](bool prepareModelDeadline, bool executionDeadline) {
mSupportsDeadlines = {prepareModelDeadline, executionDeadline};
});
ASSERT_TRUE(ret.isOk());
}
std::vector<NamedModel> getNamedModels(const FilterFn& filter) {

View file

@ -36,7 +36,6 @@ class GeneratedTestBase : public testing::TestWithParam<GeneratedTestParam> {
void SetUp() override;
const sp<IDevice> kDevice = getData(std::get<NamedDevice>(GetParam()));
const test_helper::TestModel& kTestModel = *getData(std::get<NamedModel>(GetParam()));
std::pair<bool, bool> mSupportsDeadlines;
};
using FilterFn = std::function<bool(const test_helper::TestModel&)>;

View file

@ -34,45 +34,52 @@ using V1_2::Timing;
using HidlToken =
hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
enum class DeadlineBoundType { NOW, UNLIMITED };
constexpr std::array<DeadlineBoundType, 2> deadlineBounds = {DeadlineBoundType::NOW,
DeadlineBoundType::UNLIMITED};
enum class DeadlineBoundType { NOW, UNLIMITED, SHORT };
constexpr std::array<DeadlineBoundType, 3> deadlineBounds = {
DeadlineBoundType::NOW, DeadlineBoundType::UNLIMITED, DeadlineBoundType::SHORT};
std::string toString(DeadlineBoundType type) {
switch (type) {
case DeadlineBoundType::NOW:
return "NOW";
case DeadlineBoundType::UNLIMITED:
return "UNLIMITED";
case DeadlineBoundType::SHORT:
return "SHORT";
}
LOG(FATAL) << "Unrecognized DeadlineBoundType: " << static_cast<int>(type);
return {};
}
constexpr auto kShortDuration = std::chrono::milliseconds{5};
using Results = std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing>;
using MaybeResults = std::optional<Results>;
using ExecutionFunction =
std::function<MaybeResults(const sp<IPreparedModel>& preparedModel, const Request& request,
DeadlineBoundType deadlineBound)>;
const OptionalTimePoint& deadline)>;
static OptionalTimePoint makeOptionalTimePoint(DeadlineBoundType deadlineBoundType) {
OptionalTimePoint deadline;
static OptionalTimePoint makeDeadline(DeadlineBoundType deadlineBoundType) {
const auto getNanosecondsSinceEpoch = [](const auto& time) -> uint64_t {
const auto timeSinceEpoch = time.time_since_epoch();
return std::chrono::duration_cast<std::chrono::nanoseconds>(timeSinceEpoch).count();
};
std::chrono::steady_clock::time_point timePoint;
switch (deadlineBoundType) {
case DeadlineBoundType::NOW: {
const auto currentTime = std::chrono::steady_clock::now();
const auto currentTimeInNanoseconds =
std::chrono::time_point_cast<std::chrono::nanoseconds>(currentTime);
const uint64_t nanosecondsSinceEpoch =
currentTimeInNanoseconds.time_since_epoch().count();
deadline.nanosecondsSinceEpoch(nanosecondsSinceEpoch);
} break;
case DeadlineBoundType::UNLIMITED: {
const auto maxTime = std::chrono::time_point<std::chrono::steady_clock,
std::chrono::nanoseconds>::max();
const uint64_t nanosecondsSinceEpoch = maxTime.time_since_epoch().count();
deadline.nanosecondsSinceEpoch(nanosecondsSinceEpoch);
} break;
case DeadlineBoundType::NOW:
timePoint = std::chrono::steady_clock::now();
break;
case DeadlineBoundType::UNLIMITED:
timePoint = std::chrono::steady_clock::time_point::max();
break;
case DeadlineBoundType::SHORT:
timePoint = std::chrono::steady_clock::now() + kShortDuration;
break;
}
OptionalTimePoint deadline;
deadline.nanosecondsSinceEpoch(getNanosecondsSinceEpoch(timePoint));
return deadline;
}
@ -80,7 +87,7 @@ void runPrepareModelTest(const sp<IDevice>& device, const Model& model, Priority
std::optional<DeadlineBoundType> deadlineBound) {
OptionalTimePoint deadline;
if (deadlineBound.has_value()) {
deadline = makeOptionalTimePoint(deadlineBound.value());
deadline = makeDeadline(deadlineBound.value());
}
// see if service can handle model
@ -127,11 +134,11 @@ void runPrepareModelTest(const sp<IDevice>& device, const Model& model, Priority
} else {
switch (deadlineBound.value()) {
case DeadlineBoundType::NOW:
// If the execution was launched with a deadline of NOW, the
// deadline has already passed when the driver would launch the
// execution. In this case, the driver must return
// MISSED_DEADLINE_*.
EXPECT_TRUE(prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
case DeadlineBoundType::SHORT:
// Either the driver successfully completed the task or it
// aborted and returned MISSED_DEADLINE_*.
EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT);
break;
case DeadlineBoundType::UNLIMITED:
@ -145,8 +152,7 @@ void runPrepareModelTest(const sp<IDevice>& device, const Model& model, Priority
ASSERT_EQ(prepareReturnStatus == ErrorStatus::NONE, preparedModel.get() != nullptr);
}
void runPrepareModelTests(const sp<IDevice>& device, const Model& model,
bool supportsPrepareModelDeadline) {
void runPrepareModelTests(const sp<IDevice>& device, const Model& model) {
// test priority
for (auto priority : hidl_enum_range<Priority>{}) {
SCOPED_TRACE("priority: " + toString(priority));
@ -155,19 +161,17 @@ void runPrepareModelTests(const sp<IDevice>& device, const Model& model,
}
// test deadline
if (supportsPrepareModelDeadline) {
for (auto deadlineBound : deadlineBounds) {
SCOPED_TRACE("deadlineBound: " + toString(deadlineBound));
runPrepareModelTest(device, model, kDefaultPriority, deadlineBound);
}
for (auto deadlineBound : deadlineBounds) {
SCOPED_TRACE("deadlineBound: " + toString(deadlineBound));
runPrepareModelTest(device, model, kDefaultPriority, deadlineBound);
}
}
static MaybeResults executeAsynchronously(const sp<IPreparedModel>& preparedModel,
const Request& request, DeadlineBoundType deadlineBound) {
const Request& request,
const OptionalTimePoint& deadline) {
SCOPED_TRACE("asynchronous");
const MeasureTiming measure = MeasureTiming::NO;
const OptionalTimePoint deadline = makeOptionalTimePoint(deadlineBound);
// launch execution
const sp<ExecutionCallback> callback = new ExecutionCallback();
@ -187,14 +191,17 @@ static MaybeResults executeAsynchronously(const sp<IPreparedModel>& preparedMode
}
static MaybeResults executeSynchronously(const sp<IPreparedModel>& preparedModel,
const Request& request, DeadlineBoundType deadlineBound) {
const Request& request,
const OptionalTimePoint& deadline) {
SCOPED_TRACE("synchronous");
const MeasureTiming measure = MeasureTiming::NO;
const OptionalTimePoint deadline = makeOptionalTimePoint(deadlineBound);
// configure results callback
MaybeResults results;
const auto cb = [&results](const auto&... args) { *results = {args...}; };
const auto cb = [&results](ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
results.emplace(status, outputShapes, timing);
};
// run execution
const Return<void> ret =
@ -209,9 +216,10 @@ static MaybeResults executeSynchronously(const sp<IPreparedModel>& preparedModel
void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
const Request& request, bool synchronous, DeadlineBoundType deadlineBound) {
const ExecutionFunction execute = synchronous ? executeSynchronously : executeAsynchronously;
const auto deadline = makeDeadline(deadlineBound);
// Perform execution and unpack results.
const auto results = execute(preparedModel, request, deadlineBound);
const auto results = execute(preparedModel, request, deadline);
if (!results.has_value()) return;
const auto& [status, outputShapes, timing] = results.value();
@ -222,13 +230,13 @@ void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel&
// Validate deadline information if applicable.
switch (deadlineBound) {
case DeadlineBoundType::NOW:
// If the execution was launched with a deadline of NOW, the
// deadline has already passed when the driver would launch the
// execution. In this case, the driver must return
// MISSED_DEADLINE_*.
ASSERT_TRUE(status == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
case DeadlineBoundType::SHORT:
// Either the driver successfully completed the task or it
// aborted and returned MISSED_DEADLINE_*.
ASSERT_TRUE(status == ErrorStatus::NONE ||
status == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
status == ErrorStatus::MISSED_DEADLINE_PERSISTENT);
return;
break;
case DeadlineBoundType::UNLIMITED:
// If an unlimited deadline is supplied, we expect the execution to
// proceed normally. In this case, check it normally by breaking out
@ -256,7 +264,9 @@ void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel&
const std::vector<TestBuffer> outputs = getOutputBuffers(request10);
// We want "close-enough" results.
checkResults(testModel, outputs);
if (status == ErrorStatus::NONE) {
checkResults(testModel, outputs);
}
}
void runExecutionTests(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
@ -268,32 +278,27 @@ void runExecutionTests(const sp<IPreparedModel>& preparedModel, const TestModel&
}
}
void runTests(const sp<IDevice>& device, const TestModel& testModel,
std::pair<bool, bool> supportsDeadlines) {
void runTests(const sp<IDevice>& device, const TestModel& testModel) {
// setup
const auto [supportsPrepareModelDeadline, supportsExecutionDeadline] = supportsDeadlines;
if (!supportsPrepareModelDeadline && !supportsExecutionDeadline) return;
const Model model = createModel(testModel);
// run prepare model tests
runPrepareModelTests(device, model, supportsPrepareModelDeadline);
runPrepareModelTests(device, model);
if (supportsExecutionDeadline) {
// prepare model
sp<IPreparedModel> preparedModel;
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
// prepare model
sp<IPreparedModel> preparedModel;
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
// run execution tests
const Request request = nn::convertToV1_3(createRequest(testModel));
runExecutionTests(preparedModel, testModel, request);
}
// run execution tests
const Request request = nn::convertToV1_3(createRequest(testModel));
runExecutionTests(preparedModel, testModel, request);
}
class DeadlineTest : public GeneratedTestBase {};
TEST_P(DeadlineTest, Test) {
runTests(kDevice, kTestModel, mSupportsDeadlines);
runTests(kDevice, kTestModel);
}
INSTANTIATE_GENERATED_TEST(DeadlineTest,

View file

@ -44,18 +44,12 @@ static void validateGetSupportedOperations(const sp<IDevice>& device, const std:
}
static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
const Model& model, ExecutionPreference preference,
bool testDeadline) {
const Model& model, ExecutionPreference preference) {
SCOPED_TRACE(message + " [prepareModel_1_3]");
OptionalTimePoint deadline;
if (testDeadline) {
deadline.nanosecondsSinceEpoch(std::numeric_limits<uint64_t>::max());
}
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
model, preference, kDefaultPriority, deadline, hidl_vec<hidl_handle>(),
model, preference, kDefaultPriority, {}, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@ -79,13 +73,12 @@ static bool validExecutionPreference(ExecutionPreference preference) {
// to the model does not leave this function.
static void validate(const sp<IDevice>& device, const std::string& message, Model model,
const std::function<void(Model*)>& mutation,
ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER,
bool testDeadline = false) {
ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
mutation(&model);
if (validExecutionPreference(preference) && !testDeadline) {
if (validExecutionPreference(preference)) {
validateGetSupportedOperations(device, message, model);
}
validatePrepareModel(device, message, model, preference, testDeadline);
validatePrepareModel(device, message, model, preference);
}
static uint32_t addOperand(Model* model) {
@ -585,6 +578,8 @@ static bool removeOperationInputSkip(const Operation& op, size_t input) {
// - CONV_2D, DEPTHWISE_CONV_2D, MAX_POOL_2D, AVERAGE_POOL_2D, L2_POOL_2D, RESIZE_BILINEAR,
// SPACE_TO_DEPTH, SPACE_TO_DEPTH, SPACE_TO_BATCH_ND, BATCH_TO_SPACE_ND can have an optional
// layout parameter.
// RESIZE_BILINEAR and RESIZE_NEAREST_NEIGHBOR can have optional
// align_corners and half_pixel_centers parameters.
// - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional axis
// parameter.
switch (op.type) {
@ -607,7 +602,12 @@ static bool removeOperationInputSkip(const Operation& op, size_t input) {
}
} break;
case OperationType::RESIZE_BILINEAR: {
if (op.inputs.size() == 4 && input == 3) {
if (op.inputs.size() >= 4 && input >= 3) {
return true;
}
} break;
case OperationType::RESIZE_NEAREST_NEIGHBOR: {
if (op.inputs.size() >= 5 && input >= 3) {
return true;
}
} break;
@ -693,7 +693,9 @@ static bool addOperationInputSkip(const Operation& op) {
// parameter.
if ((op.type == OperationType::L2_NORMALIZATION && op.inputs.size() == 1) ||
(op.type == OperationType::LOCAL_RESPONSE_NORMALIZATION && op.inputs.size() == 5) ||
(op.type == OperationType::SOFTMAX && op.inputs.size() == 2)) {
(op.type == OperationType::SOFTMAX && op.inputs.size() == 2) ||
(op.type == OperationType::RESIZE_BILINEAR && op.inputs.size() < 6) ||
(op.type == OperationType::RESIZE_NEAREST_NEIGHBOR && op.inputs.size() < 6)) {
return true;
}
return false;
@ -744,19 +746,9 @@ static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model
}
}
///////////////////////// DEADLINE /////////////////////////
static void deadlineTest(const sp<IDevice>& device, const Model& model) {
const std::string message = "deadlineTest: deadline not supported";
const auto noop = [](Model*) {};
validate(device, message, model, noop, ExecutionPreference::FAST_SINGLE_ANSWER,
/*testDeadline=*/true);
}
////////////////////////// ENTRY POINT //////////////////////////////
void validateModel(const sp<IDevice>& device, const Model& model,
bool prepareModelDeadlineSupported) {
void validateModel(const sp<IDevice>& device, const Model& model) {
mutateOperandTypeTest(device, model);
mutateOperandRankTest(device, model);
mutateOperandScaleTest(device, model);
@ -772,9 +764,6 @@ void validateModel(const sp<IDevice>& device, const Model& model,
addOperationInputTest(device, model);
addOperationOutputTest(device, model);
mutateExecutionPreferenceTest(device, model);
if (!prepareModelDeadlineSupported) {
deadlineTest(device, model);
}
}
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -45,8 +45,7 @@ static bool badTiming(Timing timing) {
// that use the request. Note that the request here is passed by value, and any
// mutation to the request does not leave this function.
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
Request request, const std::function<void(Request*)>& mutation,
bool testDeadline = false) {
Request request, const std::function<void(Request*)>& mutation) {
mutation(&request);
// We'd like to test both with timing requested and without timing
@ -59,18 +58,13 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
};
MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO;
OptionalTimePoint deadline;
if (testDeadline) {
deadline.nanosecondsSinceEpoch(std::numeric_limits<uint64_t>::max());
}
// asynchronous
{
SCOPED_TRACE(message + " [execute_1_3]");
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executeLaunchStatus =
preparedModel->execute_1_3(request, measure, deadline, {}, executionCallback);
preparedModel->execute_1_3(request, measure, {}, {}, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
@ -88,7 +82,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
SCOPED_TRACE(message + " [executeSynchronously_1_3]");
Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
request, measure, deadline, {},
request, measure, {}, {},
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
@ -100,7 +94,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
// burst
// TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2.
if (!testDeadline) {
{
SCOPED_TRACE(message + " [burst]");
ASSERT_TRUE(nn::compliantWithV1_0(request));
@ -143,7 +137,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
{
SCOPED_TRACE(message + " [executeFenced]");
Return<void> ret =
preparedModel->executeFenced(request, {}, MeasureTiming::NO, deadline, {}, {},
preparedModel->executeFenced(request, {}, MeasureTiming::NO, {}, {}, {},
[](ErrorStatus error, const hidl_handle& handle,
const sp<IFencedExecutionCallback>& callback) {
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
@ -174,23 +168,11 @@ static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Requ
}
}
///////////////////////// DEADLINE ////////////////////////////////////
static void deadlineTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
const std::string message = "deadlineTest: deadline not supported";
const auto noop = [](Request*) {};
validate(preparedModel, message, request, noop, /*testDeadline=*/true);
}
///////////////////////////// ENTRY POINT //////////////////////////////////
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request,
bool executionDeadlineSupported) {
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
if (!executionDeadlineSupported) {
deadlineTest(preparedModel, request);
}
}
void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request) {

View file

@ -123,11 +123,9 @@ std::string printNeuralnetworksHidlTest(
INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest);
// Forward declaration from ValidateModel.cpp
void validateModel(const sp<IDevice>& device, const Model& model,
bool prepareModelDeadlineSupported);
void validateModel(const sp<IDevice>& device, const Model& model);
// Forward declaration from ValidateRequest.cpp
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request,
bool executionDeadlineSupported);
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
// Forward declaration from ValidateRequest.cpp
void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request);
// Forward declaration from ValidateBurst.cpp
@ -147,17 +145,15 @@ void validateExecuteFenced(const sp<IPreparedModel>& preparedModel, const Reques
ASSERT_TRUE(ret_null.isOk());
}
void validateEverything(const sp<IDevice>& device, const Model& model, const Request& request,
std::pair<bool, bool> supportsDeadlines) {
const auto [prepareModelDeadlineSupported, executionDeadlineSupported] = supportsDeadlines;
validateModel(device, model, prepareModelDeadlineSupported);
void validateEverything(const sp<IDevice>& device, const Model& model, const Request& request) {
validateModel(device, model);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
validateRequest(preparedModel, request, executionDeadlineSupported);
validateRequest(preparedModel, request);
validateExecuteFenced(preparedModel, request);
// TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2.
@ -166,12 +162,10 @@ void validateEverything(const sp<IDevice>& device, const Model& model, const Req
validateBurst(preparedModel, request10);
}
void validateFailure(const sp<IDevice>& device, const Model& model, const Request& request,
std::pair<bool, bool> supportsDeadlines) {
const bool prepareModelDeadlineSupported = supportsDeadlines.first;
void validateFailure(const sp<IDevice>& device, const Model& model, const Request& request) {
// TODO: Should this always succeed?
// What if the invalid input is part of the model (i.e., a parameter).
validateModel(device, model, prepareModelDeadlineSupported);
validateModel(device, model);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
@ -185,9 +179,9 @@ TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
const Request request = nn::convertToV1_3(createRequest(kTestModel));
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request, mSupportsDeadlines);
validateFailure(kDevice, model, request);
} else {
validateEverything(kDevice, model, request, mSupportsDeadlines);
validateEverything(kDevice, model, request);
}
}