From 55a3d328b72f93cdbc0c2ac987c85677e0f1d826 Mon Sep 17 00:00:00 2001 From: David Gross Date: Wed, 23 Jan 2019 14:01:52 -0800 Subject: [PATCH] Update neuralnetworks HAL to allow collecting execution duration. Test: VtsHalNeuralnetworksV1_0TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.0::IDevice/sample-all Test: VtsHalNeuralnetworksV1_1TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.1::IDevice/sample-all Test: VtsHalNeuralnetworksV1_2TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all Bug: 115390094 Change-Id: If67a5ffe39cfdd78498e01f26251734fdc8e66c7 Merged-In: If67a5ffe39cfdd78498e01f26251734fdc8e66c7 (cherry picked from commit e301349b0597f2cc136703f2aaa182e6a546b50c) --- .../1.0/vts/functional/Callbacks.cpp | 11 +- neuralnetworks/1.0/vts/functional/Callbacks.h | 33 ++++- .../vts/functional/GeneratedTestHarness.cpp | 121 ++++++++++++------ neuralnetworks/1.2/IExecutionCallback.hal | 8 +- neuralnetworks/1.2/IPreparedModel.hal | 18 ++- neuralnetworks/1.2/types.hal | 55 +++++++- .../1.2/vts/functional/ValidateRequest.cpp | 29 ++++- 7 files changed, 214 insertions(+), 61 deletions(-) diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.cpp b/neuralnetworks/1.0/vts/functional/Callbacks.cpp index 03afcd0751..c30702cd99 100644 --- a/neuralnetworks/1.0/vts/functional/Callbacks.cpp +++ b/neuralnetworks/1.0/vts/functional/Callbacks.cpp @@ -135,14 +135,18 @@ ExecutionCallback::~ExecutionCallback() {} Return ExecutionCallback::notify(ErrorStatus errorStatus) { mErrorStatus = errorStatus; + mOutputShapes = {}; + mTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; CallbackBase::notify(); return Void(); } Return ExecutionCallback::notify_1_2(ErrorStatus errorStatus, - const hidl_vec& outputShapes) { + const hidl_vec& outputShapes, + const Timing& timing) { mErrorStatus = errorStatus; mOutputShapes = outputShapes; + mTiming = timing; CallbackBase::notify(); return Void(); } @@ -157,6 +161,11 @@ const std::vector& ExecutionCallback::getOutputShapes() { return mOutputShapes; } +Timing ExecutionCallback::getTiming() { + wait(); + return mTiming; +} + } // namespace implementation } // namespace V1_2 } // namespace neuralnetworks diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h index 46f29a60e7..4707d0a251 100644 --- a/neuralnetworks/1.0/vts/functional/Callbacks.h +++ b/neuralnetworks/1.0/vts/functional/Callbacks.h @@ -308,8 +308,20 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback { * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. + * @return Timing Duration of execution. Unless MeasureTiming::YES was passed when + * launching the execution and status is NONE, all times must + * be reported as UINT64_MAX. A driver may choose to report + * any time as UINT64_MAX, indicating that particular measurement is + * not available. */ - Return notify_1_2(ErrorStatus status, const hidl_vec& outputShapes) override; + Return notify_1_2(ErrorStatus status, const hidl_vec& outputShapes, + const Timing& timing) override; + + // An overload of the latest notify interface to hide the version from ExecutionBuilder. + Return notify(ErrorStatus status, const hidl_vec& outputShapes, + const Timing& timing) { + return notify_1_2(status, outputShapes, timing); + } /** * Retrieves the error status returned from the asynchronous task launched @@ -350,9 +362,24 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback { */ const std::vector& getOutputShapes(); + /** + * Retrieves the duration of execution ofthe asynchronous task launched + * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished + * asynchronously executing, this call will block until the asynchronous task + * notifies the object. + * + * If the asynchronous task was launched by IPreparedModel::execute, every time + * must be UINT64_MAX. + * + * @return timing Duration of the execution. Every time must be UINT64_MAX unless + * the status is NONE. + */ + Timing getTiming(); + private: - ErrorStatus mErrorStatus; - std::vector mOutputShapes; + ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE; + std::vector mOutputShapes = {}; + Timing mTiming = {}; }; diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index b5a860771f..65c425ee1e 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -45,20 +45,16 @@ using ::test_helper::bool8; using ::test_helper::compare; using ::test_helper::expectMultinomialDistributionWithinTolerance; using ::test_helper::filter; -using ::test_helper::Float32Operands; using ::test_helper::for_all; using ::test_helper::for_each; -using ::test_helper::Int32Operands; using ::test_helper::MixedTyped; using ::test_helper::MixedTypedExample; -using ::test_helper::MixedTypedIndex; -using ::test_helper::Quant8Operands; using ::test_helper::resize_accordingly; template -void copy_back_(MixedTyped* dst, const std::vector& ra, char* src) { - MixedTyped& test = *dst; - for_each(test, [&ra, src](int index, std::vector& m) { +void copy_back_(std::map>* dst, const std::vector& ra, + char* src) { + for_each(*dst, [&ra, src](int index, std::vector& m) { ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T)); char* begin = src + ra[index].location.offset; memcpy(m.data(), begin, ra[index].location.length); @@ -66,43 +62,48 @@ void copy_back_(MixedTyped* dst, const std::vector& ra, char* s } void copy_back(MixedTyped* dst, const std::vector& ra, char* src) { - copy_back_(dst, ra, src); - copy_back_(dst, ra, src); - copy_back_(dst, ra, src); - copy_back_(dst, ra, src); - copy_back_<_Float16>(dst, ra, src); - copy_back_(dst, ra, src); - copy_back_(dst, ra, src); - static_assert(7 == std::tuple_size::value, + copy_back_(&dst->float32Operands, ra, src); + copy_back_(&dst->int32Operands, ra, src); + copy_back_(&dst->quant8AsymmOperands, ra, src); + copy_back_(&dst->quant16SymmOperands, ra, src); + copy_back_(&dst->float16Operands, ra, src); + copy_back_(&dst->bool8Operands, ra, src); + copy_back_(&dst->quant8ChannelOperands, ra, src); + copy_back_(&dst->quant16AsymmOperands, ra, src); + static_assert(8 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but copy_back function wasn't updated"); } // Top level driver for models and examples generated by test_generator.py // Test driver for those generated from ml/nn/runtime/test/spec static Return ExecutePreparedModel(sp& preparedModel, - const Request& request, + const Request& request, MeasureTiming, sp& callback) { return preparedModel->execute(request, callback); } static Return ExecutePreparedModel(sp& preparedModel, - const Request& request, + const Request& request, MeasureTiming measure, sp& callback) { - return preparedModel->execute_1_2(request, callback); + return preparedModel->execute_1_2(request, measure, callback); } static Return ExecutePreparedModel(sp&, const Request&, - hidl_vec*) { + MeasureTiming, hidl_vec*, Timing*) { ADD_FAILURE() << "asking for synchronous execution at V1_0"; return ErrorStatus::GENERAL_FAILURE; } static Return ExecutePreparedModel(sp& preparedModel, - const Request& request, - hidl_vec* outputShapes) { + const Request& request, MeasureTiming measure, + hidl_vec* outputShapes, + Timing* timing) { ErrorStatus result; Return ret = preparedModel->executeSynchronously( - request, [&result, &outputShapes](ErrorStatus error, const hidl_vec& shapes) { - result = error; - *outputShapes = shapes; - }); + request, measure, + [&result, outputShapes, timing](ErrorStatus error, const hidl_vec& shapes, + const Timing& time) { + result = error; + *outputShapes = shapes; + *timing = time; + }); if (!ret.isOk()) { return ErrorStatus::GENERAL_FAILURE; } @@ -114,8 +115,8 @@ const float kDefaultRtol = 1e-5f; template void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, const std::vector& examples, - bool hasRelaxedFloat32Model = false, float fpAtol = kDefaultAtol, - float fpRtol = kDefaultRtol, Synchronously sync = Synchronously::NO) { + bool hasRelaxedFloat32Model, float fpAtol, float fpRtol, + Synchronously sync, MeasureTiming measure, bool testDynamicOutputShape) { const uint32_t INPUT = 0; const uint32_t OUTPUT = 1; @@ -125,7 +126,7 @@ void EvaluatePreparedModel(sp& preparedModel, std::function::index>(inputs).empty(); + const bool hasFloat16Inputs = !inputs.float16Operands.empty(); if (hasRelaxedFloat32Model || hasFloat16Inputs) { // TODO: Adjust the error limit based on testing. // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. @@ -210,6 +211,7 @@ void EvaluatePreparedModel(sp& preparedModel, std::function outputShapes; + Timing timing; if (sync == Synchronously::NO) { SCOPED_TRACE("asynchronous"); @@ -217,8 +219,8 @@ void EvaluatePreparedModel(sp& preparedModel, std::function executionCallback = new ExecutionCallback(); ASSERT_NE(nullptr, executionCallback.get()); Return executionLaunchStatus = ExecutePreparedModel( - preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, - executionCallback); + preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, + measure, executionCallback); ASSERT_TRUE(executionLaunchStatus.isOk()); EXPECT_EQ(ErrorStatus::NONE, static_cast(executionLaunchStatus)); @@ -226,21 +228,44 @@ void EvaluatePreparedModel(sp& preparedModel, std::functionwait(); executionStatus = executionCallback->getStatus(); outputShapes = executionCallback->getOutputShapes(); + timing = executionCallback->getTiming(); } else { SCOPED_TRACE("synchronous"); // execute Return executionReturnStatus = ExecutePreparedModel( - preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, - &outputShapes); + preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, + measure, &outputShapes, &timing); ASSERT_TRUE(executionReturnStatus.isOk()); executionStatus = static_cast(executionReturnStatus); } + if (testDynamicOutputShape && executionStatus != ErrorStatus::NONE) { + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "execute model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "execute model that it does not support." + << std::endl; + return; + } ASSERT_EQ(ErrorStatus::NONE, executionStatus); - // TODO(xusongw): Check if the returned output shapes match with expectation once the - // sample driver implementation of dynamic output shape is finished. - ASSERT_EQ(outputShapes.size(), 0); + if (measure == MeasureTiming::NO) { + EXPECT_EQ(UINT64_MAX, timing.timeOnDevice); + EXPECT_EQ(UINT64_MAX, timing.timeInDriver); + } else { + if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) { + EXPECT_LE(timing.timeOnDevice, timing.timeInDriver); + } + } + + // Go through all outputs, overwrite output dimensions with returned output shapes + if (testDynamicOutputShape) { + ASSERT_NE(outputShapes.size(), 0); + for_each(test.operandDimensions, + [&outputShapes](int idx, std::vector& dim) { + dim = outputShapes[idx].dimensions; + }); + } // validate results outputMemory->read(); @@ -261,9 +286,10 @@ void EvaluatePreparedModel(sp& preparedModel, std::function void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, const std::vector& examples, - bool hasRelaxedFloat32Model, Synchronously sync) { + bool hasRelaxedFloat32Model, Synchronously sync, MeasureTiming measure, + bool testDynamicOutputShape) { EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol, - kDefaultRtol, sync); + kDefaultRtol, sync, measure, testDynamicOutputShape); } static void getPreparedModel(sp callback, @@ -319,7 +345,8 @@ void Execute(const sp& device, std::function c float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f; EvaluatePreparedModel(preparedModel, is_ignored, examples, - /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol); + /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Synchronously::NO, + MeasureTiming::NO, /*testDynamicOutputShape=*/false); } void Execute(const sp& device, std::function create_model, @@ -365,12 +392,14 @@ void Execute(const sp& device, std::function c ASSERT_NE(nullptr, preparedModel.get()); EvaluatePreparedModel(preparedModel, is_ignored, examples, - model.relaxComputationFloat32toFloat16); + model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Synchronously::NO, + MeasureTiming::NO, /*testDynamicOutputShape=*/false); } // TODO: Reduce code duplication. void Execute(const sp& device, std::function create_model, - std::function is_ignored, const std::vector& examples) { + std::function is_ignored, const std::vector& examples, + bool testDynamicOutputShape) { V1_2::Model model = create_model(); // see if service can handle model @@ -412,9 +441,17 @@ void Execute(const sp& device, std::function c ASSERT_NE(nullptr, preparedModel.get()); EvaluatePreparedModel(preparedModel, is_ignored, examples, - model.relaxComputationFloat32toFloat16, Synchronously::NO); + model.relaxComputationFloat32toFloat16, Synchronously::NO, + MeasureTiming::NO, testDynamicOutputShape); EvaluatePreparedModel(preparedModel, is_ignored, examples, - model.relaxComputationFloat32toFloat16, Synchronously::YES); + model.relaxComputationFloat32toFloat16, Synchronously::YES, + MeasureTiming::NO, testDynamicOutputShape); + EvaluatePreparedModel(preparedModel, is_ignored, examples, + model.relaxComputationFloat32toFloat16, Synchronously::NO, + MeasureTiming::YES, testDynamicOutputShape); + EvaluatePreparedModel(preparedModel, is_ignored, examples, + model.relaxComputationFloat32toFloat16, Synchronously::YES, + MeasureTiming::YES, testDynamicOutputShape); } } // namespace generated_tests diff --git a/neuralnetworks/1.2/IExecutionCallback.hal b/neuralnetworks/1.2/IExecutionCallback.hal index 47de1b60ec..7f6c9eeffe 100644 --- a/neuralnetworks/1.2/IExecutionCallback.hal +++ b/neuralnetworks/1.2/IExecutionCallback.hal @@ -18,7 +18,6 @@ package android.hardware.neuralnetworks@1.2; import @1.0::ErrorStatus; import @1.0::IExecutionCallback; -import OutputShape; /** * IExecutionCallback must be used to return the error status result from an @@ -50,6 +49,11 @@ interface IExecutionCallback extends @1.0::IExecutionCallback { * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. + * @return Timing Duration of execution. Unless MeasureTiming::YES was passed when + * launching the execution and status is NONE, all times must + * be reported as UINT64_MAX. A driver may choose to report + * any time as UINT64_MAX, indicating that particular measurement is + * not available. */ - oneway notify_1_2(ErrorStatus status, vec outputShapes); + oneway notify_1_2(ErrorStatus status, vec outputShapes, Timing timing); }; diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal index 2d4e572333..5d2d80ff71 100644 --- a/neuralnetworks/1.2/IPreparedModel.hal +++ b/neuralnetworks/1.2/IPreparedModel.hal @@ -59,6 +59,10 @@ interface IPreparedModel extends @1.0::IPreparedModel { * * @param request The input and output information on which the prepared * model is to be executed. + * @param measure Specifies whether or not to measure duration of the execution. + * The duration runs from the time the driver sees the call + * to the execute_1_2 function to the time the driver invokes + * the callback. * @param callback A callback object used to return the error status of * the execution. The callback object's notify function must * be called exactly once, even if the execution was @@ -72,7 +76,7 @@ interface IPreparedModel extends @1.0::IPreparedModel { * - INVALID_ARGUMENT if one of the input arguments is * invalid */ - execute_1_2(Request request, IExecutionCallback callback) + execute_1_2(Request request, MeasureTiming measure, IExecutionCallback callback) generates (ErrorStatus status); /** @@ -98,6 +102,10 @@ interface IPreparedModel extends @1.0::IPreparedModel { * * @param request The input and output information on which the prepared * model is to be executed. + * @param measure Specifies whether or not to measure duration of the execution. + * The duration runs from the time the driver sees the call + * to the executeSynchronously function to the time the driver + * returns from the function. * @return status Error status of the execution, must be: * - NONE if execution is performed successfully * - DEVICE_UNAVAILABLE if driver is offline or busy @@ -112,9 +120,13 @@ interface IPreparedModel extends @1.0::IPreparedModel { * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. + * @return Timing Duration of execution. Unless measure is YES and status is + * NONE, all times must be reported as UINT64_MAX. A driver may + * choose to report any time as UINT64_MAX, indicating that + * measurement is not available. */ - executeSynchronously(Request request) - generates (ErrorStatus status, vec outputShapes); + executeSynchronously(Request request, MeasureTiming measure) + generates (ErrorStatus status, vec outputShapes, Timing timing); /** * Configure a Burst object used to execute multiple inferences on a diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal index 5bf21a986d..bd8354fecf 100644 --- a/neuralnetworks/1.2/types.hal +++ b/neuralnetworks/1.2/types.hal @@ -443,8 +443,34 @@ struct OutputShape { }; /** - * FmqRequestDatum is a single element of a serialized representation of a - * {@link @1.0::Request} object which is sent across FastMessageQueue. + * Specifies whether or not to measure timing information during execution. + */ +enum MeasureTiming : int32_t { + NO = 0, + YES = 1, +}; + +/** + + * Timing information measured during execution. Each time is a duration from + * the beginning of some task to the end of that task, including time when that + * task is not active (for example, preempted by some other task, or + * waiting for some resource to become available). + * + * Times are measured in microseconds. + * When a time is not available, it must be reported as UINT64_MAX. + */ +struct Timing { + /** Execution time on device (not driver, which runs on host processor). */ + uint64_t timeOnDevice; + /** Execution time in driver (including time on device). */ + uint64_t timeInDriver; +}; + +/** + * FmqRequestDatum is a single element of a serialized representation of an + * execution request (a {@link @1.0::Request} object and a {@link MeasureTiming} + * value) which is sent across FastMessageQueue. * * The serialized representation for a particular execution is referred to later * in these descriptions as a 'packet'. @@ -452,7 +478,7 @@ struct OutputShape { * FastMessageQueue can only pass HIDL-defined types that do not involve nested * buffers, handles, or interfaces. * - * The {@link @1.0::Request} is serialized as follows: + * The request is serialized as follows: * 1) 'packetInformation' * 2) For each input operand: * 2.1) 'inputOperandInformation' @@ -464,6 +490,7 @@ struct OutputShape { * 3.2.1) 'outputOperandDimensionValue' * 4) For each pool: * 4.1) 'poolIdentifier' + * 5) 'measureTiming' */ safe_union FmqRequestDatum { /** @@ -557,12 +584,21 @@ safe_union FmqRequestDatum { * identifier. */ int32_t poolIdentifier; + + /** + * Specifies whether or not to measure duration of the execution. The + * duration runs from the time the driver dequeues the request from a + * FastMessageQueue to the time the driver enqueues results to a + * FastMessageQueue. + */ + MeasureTiming measureTiming; }; /** * FmqResultDatum is a single element of a serialized representation of the - * values returned from an execution ({@link @1.0::ErrorStatus} and - * vec<{@link OutputShape}>) which is returned via FastMessageQueue. + * values returned from an execution ({@link @1.0::ErrorStatus}, + * vec<{@link OutputShape}>, and {@link Timing}) which is returned via + * FastMessageQueue. * * The serialized representation for a particular execution is referred to later * in these descriptions as a 'packet'. @@ -577,6 +613,7 @@ safe_union FmqRequestDatum { * 2.1) 'operandInformation' * 2.2) For each dimension element of the operand: * 2.2.1) 'operandDimensionValue' + * 3) 'executionTiming' */ safe_union FmqResultDatum { /** @@ -632,4 +669,12 @@ safe_union FmqResultDatum { * Element of the dimensions vector. */ uint32_t operandDimensionValue; + + /** + * Duration of execution. Unless measurement was requested and execution + * succeeds, all times must be reported as UINT64_MAX. A driver may choose + * to report any time as UINT64_MAX, indicating that measurement is not + * available. + */ + Timing executionTiming; }; diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp index 1eaea4b9a6..00a7c3ec4f 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp @@ -42,6 +42,10 @@ using test_helper::MixedTypedExample; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// +static bool badTiming(Timing timing) { + return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX; +} + static void createPreparedModel(const sp& device, const Model& model, sp* preparedModel) { ASSERT_NE(nullptr, preparedModel); @@ -98,31 +102,46 @@ static void validate(const sp& preparedModel, const std::string& Request request, const std::function& mutation) { mutation(&request); + // We'd like to test both with timing requested and without timing + // requested. Rather than running each test both ways, we'll decide whether + // to request timing by hashing the message. We do not use std::hash because + // it is not guaranteed stable across executions. + char hash = 0; + for (auto c : message) { + hash ^= c; + }; + MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO; + { SCOPED_TRACE(message + " [execute_1_2]"); sp executionCallback = new ExecutionCallback(); ASSERT_NE(nullptr, executionCallback.get()); Return executeLaunchStatus = - preparedModel->execute_1_2(request, executionCallback); + preparedModel->execute_1_2(request, measure, executionCallback); ASSERT_TRUE(executeLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); executionCallback->wait(); ErrorStatus executionReturnStatus = executionCallback->getStatus(); const auto& outputShapes = executionCallback->getOutputShapes(); + Timing timing = executionCallback->getTiming(); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); ASSERT_EQ(outputShapes.size(), 0); + ASSERT_TRUE(badTiming(timing)); } { SCOPED_TRACE(message + " [executeSynchronously]"); Return executeStatus = preparedModel->executeSynchronously( - request, [](ErrorStatus error, const hidl_vec& outputShapes) { - ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); - EXPECT_EQ(outputShapes.size(), 0); - }); + request, measure, + [](ErrorStatus error, const hidl_vec& outputShapes, + const Timing& timing) { + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); + EXPECT_EQ(outputShapes.size(), 0); + EXPECT_TRUE(badTiming(timing)); + }); ASSERT_TRUE(executeStatus.isOk()); } }