diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp index 4909214a3f..599fd1d9be 100644 --- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp @@ -272,7 +272,7 @@ void EvaluatePreparedModel(const sp& preparedModel, const TestMo int n; std::tie(n, outputShapes, timing, std::ignore) = controller->compute(request, testConfig.measureTiming, keys); - executionStatus = nn::convertResultCodeToErrorStatus(n); + executionStatus = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n)); break; } diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp index 416744f902..ec9629bccb 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp @@ -296,7 +296,8 @@ static void validateBurstFmqLength(const sp& preparedModel, // collect serialized result by running regular burst const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] = controllerRegular->compute(request, MeasureTiming::NO, keys); - const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular); + const ErrorStatus statusRegular = + nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular)); EXPECT_FALSE(fallbackRegular); // skip test if regular burst output isn't useful for testing a failure @@ -312,7 +313,7 @@ static void validateBurstFmqLength(const sp& preparedModel, // large enough to return the serialized result const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] = controllerSmall->compute(request, MeasureTiming::NO, keys); - const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall); + const ErrorStatus statusSmall = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall)); EXPECT_NE(ErrorStatus::NONE, statusSmall); EXPECT_EQ(0u, outputShapesSmall.size()); EXPECT_TRUE(badTiming(timingSmall)); diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp index 2d83b8186c..7b5ff9b8e4 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp @@ -107,7 +107,7 @@ static void validate(const sp& preparedModel, const std::string& // execute and verify const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys); - const ErrorStatus status = nn::convertResultCodeToErrorStatus(n); + const ErrorStatus status = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n)); EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); EXPECT_EQ(outputShapes.size(), 0); EXPECT_TRUE(badTiming(timing)); diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp index e2795de420..e7a9fd34c3 100644 --- a/neuralnetworks/1.3/vts/functional/Android.bp +++ b/neuralnetworks/1.3/vts/functional/Android.bp @@ -15,11 +15,12 @@ // cc_library_static { - name: "VtsHalNeuralNetworksV1_3Callbacks", + name: "VtsHalNeuralNetworksV1_3_utils", defaults: ["VtsHalTargetTestDefaults"], export_include_dirs: ["include"], srcs: [ "Callbacks.cpp", + "Utils.cpp", ], static_libs: [ "android.hardware.neuralnetworks@1.0", @@ -29,7 +30,7 @@ cc_library_static { ], header_libs: [ "libbase_headers", - ] + ], } cc_test { @@ -50,6 +51,9 @@ cc_test { "libnativewindow", ], static_libs: [ + "VtsHalNeuralNetworksV1_0_utils", + "VtsHalNeuralNetworksV1_2Callbacks", + "VtsHalNeuralNetworksV1_3_utils", "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", "android.hardware.neuralnetworks@1.2", @@ -60,9 +64,6 @@ cc_test { "libhidlmemory", "libneuralnetworks_generated_test_harness", "libneuralnetworks_utils", - "VtsHalNeuralNetworksV1_0_utils", - "VtsHalNeuralNetworksV1_2Callbacks", - "VtsHalNeuralNetworksV1_3Callbacks", ], whole_static_libs: [ "neuralnetworks_generated_V1_0_example", diff --git a/neuralnetworks/1.3/vts/functional/BasicTests.cpp b/neuralnetworks/1.3/vts/functional/BasicTests.cpp index b64dc2f61b..891850cfa4 100644 --- a/neuralnetworks/1.3/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.3/vts/functional/BasicTests.cpp @@ -21,7 +21,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional { using V1_0::DeviceStatus; -using V1_0::ErrorStatus; using V1_0::PerformanceInfo; using V1_2::Constant; using V1_2::DeviceType; diff --git a/neuralnetworks/1.3/vts/functional/Callbacks.cpp b/neuralnetworks/1.3/vts/functional/Callbacks.cpp index 4f08e72a86..5768e3794a 100644 --- a/neuralnetworks/1.3/vts/functional/Callbacks.cpp +++ b/neuralnetworks/1.3/vts/functional/Callbacks.cpp @@ -24,12 +24,16 @@ namespace android::hardware::neuralnetworks::V1_3::implementation { -using V1_0::ErrorStatus; +using V1_2::OutputShape; +using V1_2::Timing; + +constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits::max(), + .timeInDriver = std::numeric_limits::max()}; // PreparedModelCallback methods begin here -Return PreparedModelCallback::notify(ErrorStatus errorStatus, - const sp& preparedModel) { +Return PreparedModelCallback::notifyInternal(ErrorStatus errorStatus, + const sp& preparedModel) { { std::lock_guard hold(mMutex); @@ -48,14 +52,19 @@ Return PreparedModelCallback::notify(ErrorStatus errorStatus, return Void(); } -Return PreparedModelCallback::notify_1_2(ErrorStatus errorStatus, - const sp& preparedModel) { - return notify(errorStatus, preparedModel); +Return PreparedModelCallback::notify(V1_0::ErrorStatus errorStatus, + const sp& preparedModel) { + return notifyInternal(static_cast(errorStatus), preparedModel); } -Return PreparedModelCallback::notify_1_3(ErrorStatus errorStatus, +Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus errorStatus, + const sp& preparedModel) { + return notifyInternal(static_cast(errorStatus), preparedModel); +} + +Return PreparedModelCallback::notify_1_3(V1_3::ErrorStatus errorStatus, const sp& preparedModel) { - return notify(errorStatus, preparedModel); + return notifyInternal(errorStatus, preparedModel); } void PreparedModelCallback::wait() const { @@ -73,4 +82,82 @@ sp PreparedModelCallback::getPreparedModel() const { return mPreparedModel; } +// ExecutionCallback methods begin here + +Return ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) { + return notifyInternal(static_cast(errorStatus), {}, kNoTiming); +} + +Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus errorStatus, + const hidl_vec& outputShapes, + const Timing& timing) { + return notifyInternal(static_cast(errorStatus), outputShapes, timing); +} + +Return ExecutionCallback::notify_1_3(V1_3::ErrorStatus errorStatus, + const hidl_vec& outputShapes, + const Timing& timing) { + return notifyInternal(errorStatus, outputShapes, timing); +} + +void ExecutionCallback::wait() const { + std::unique_lock lock(mMutex); + mCondition.wait(lock, [this] { return mNotified; }); +} + +ErrorStatus ExecutionCallback::getStatus() const { + wait(); + return mErrorStatus; +} + +const std::vector& ExecutionCallback::getOutputShapes() const { + wait(); + return mOutputShapes; +} + +Timing ExecutionCallback::getTiming() const { + wait(); + return mTiming; +} + +Return ExecutionCallback::notifyInternal(ErrorStatus errorStatus, + hidl_vec outputShapes, Timing timing) { + // check results + if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE. + if (outputShapes.size() == 0) { + LOG(ERROR) << "Notifid with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE"; + errorStatus = ErrorStatus::GENERAL_FAILURE; + outputShapes = {}; + timing = kNoTiming; + } + } else if (errorStatus != ErrorStatus::NONE) { + // outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE. + if (outputShapes.size() != 0) { + LOG(ERROR) << "Notified with non-empty output shape vector when error status is " + "neither NONE nor OUTPUT_INSUFFICIENT_SIZE"; + errorStatus = ErrorStatus::GENERAL_FAILURE; + outputShapes = {}; + timing = kNoTiming; + } + } + + // store results + { + std::lock_guard hold(mMutex); + + // quick-return if object has already been notified + if (mNotified) { + return Void(); + } + + mErrorStatus = errorStatus; + mOutputShapes = std::move(outputShapes); + mTiming = timing; + mNotified = true; + } + mCondition.notify_all(); + return Void(); +} + } // namespace android::hardware::neuralnetworks::V1_3::implementation diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp index 5cb466fe28..576e5240b0 100644 --- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp @@ -29,6 +29,7 @@ #include #include "1.3/Callbacks.h" +#include "1.3/Utils.h" #include "GeneratedTestHarness.h" #include "MemoryUtils.h" #include "TestHarness.h" @@ -49,7 +50,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional { using namespace test_helper; using implementation::PreparedModelCallback; -using V1_0::ErrorStatus; using V1_1::ExecutionPreference; using V1_2::Constant; using V1_2::OperationType; @@ -238,8 +238,8 @@ class CompilationCachingTestBase : public testing::Test { mCacheDir.push_back('/'); Return ret = kDevice->getNumberOfCacheFilesNeeded( - [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { - EXPECT_EQ(ErrorStatus::NONE, status); + [this](V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { + EXPECT_EQ(V1_0::ErrorStatus::NONE, status); mNumModelCache = numModelCache; mNumDataCache = numDataCache; }); @@ -324,9 +324,9 @@ class CompilationCachingTestBase : public testing::Test { // Launch prepare model. sp preparedModelCallback = new PreparedModelCallback(); hidl_array cacheToken(mToken); - Return prepareLaunchStatus = - kDevice->prepareModel_1_3(model, ExecutionPreference::FAST_SINGLE_ANSWER, - modelCache, dataCache, cacheToken, preparedModelCallback); + Return prepareLaunchStatus = kDevice->prepareModel_1_3( + model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {}, modelCache, + dataCache, cacheToken, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(static_cast(prepareLaunchStatus), ErrorStatus::NONE); @@ -370,7 +370,7 @@ class CompilationCachingTestBase : public testing::Test { sp preparedModelCallback = new PreparedModelCallback(); hidl_array cacheToken(mToken); Return prepareLaunchStatus = kDevice->prepareModelFromCache_1_3( - modelCache, dataCache, cacheToken, preparedModelCallback); + kDefaultPriority, {}, modelCache, dataCache, cacheToken, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); if (static_cast(prepareLaunchStatus) != ErrorStatus::NONE) { *preparedModel = nullptr; diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp index 805d5b53aa..82e63ac546 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -44,7 +44,6 @@ #include #include "1.0/Utils.h" -#include "1.2/Callbacks.h" #include "1.3/Callbacks.h" #include "ExecutionBurstController.h" #include "MemoryUtils.h" @@ -56,9 +55,9 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional { using namespace test_helper; using hidl::memory::V1_0::IMemory; +using implementation::ExecutionCallback; using implementation::PreparedModelCallback; using V1_0::DataLocation; -using V1_0::ErrorStatus; using V1_0::RequestArgument; using V1_1::ExecutionPreference; using V1_2::Constant; @@ -66,7 +65,6 @@ using V1_2::MeasureTiming; using V1_2::OutputShape; using V1_2::SymmPerChannelQuantParams; using V1_2::Timing; -using V1_2::implementation::ExecutionCallback; using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; namespace { @@ -453,7 +451,7 @@ static std::vector getOutputBuffers(const TestModel& testModel, cons static Return ExecutePreparedModel(const sp& preparedModel, const Request& request, MeasureTiming measure, sp& callback) { - return preparedModel->execute_1_3(request, measure, callback); + return preparedModel->execute_1_3(request, measure, {}, callback); } static Return ExecutePreparedModel(const sp& preparedModel, const Request& request, MeasureTiming measure, @@ -461,7 +459,7 @@ static Return ExecutePreparedModel(const sp& prepar Timing* timing) { ErrorStatus result; Return ret = preparedModel->executeSynchronously_1_3( - request, measure, + request, measure, {}, [&result, outputShapes, timing](ErrorStatus error, const hidl_vec& shapes, const Timing& time) { result = error; diff --git a/neuralnetworks/1.3/vts/functional/Utils.cpp b/neuralnetworks/1.3/vts/functional/Utils.cpp new file mode 100644 index 0000000000..23e2af823e --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/Utils.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "1.3/Utils.h" + +#include + +namespace android::hardware::neuralnetworks::V1_3 { + +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { + return os << toString(errorStatus); +} + +} // namespace android::hardware::neuralnetworks::V1_3 diff --git a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp index 7df804645a..6ff9dfd3a8 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp @@ -34,7 +34,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional { using nn::ExecutionBurstController; using nn::RequestChannelSender; using nn::ResultChannelReceiver; -using V1_0::ErrorStatus; using V1_0::Request; using V1_2::FmqRequestDatum; using V1_2::FmqResultDatum; @@ -80,16 +79,17 @@ static void createBurst(const sp& preparedModel, const sp burstContext; const Return ret = preparedModel->configureExecutionBurst( callback, *fmqRequestDescriptor, *fmqResultDescriptor, - [&errorStatus, &burstContext](ErrorStatus status, const sp& context) { + [&errorStatus, &burstContext](V1_0::ErrorStatus status, + const sp& context) { errorStatus = status; burstContext = context; }); ASSERT_TRUE(ret.isOk()); - ASSERT_EQ(ErrorStatus::NONE, errorStatus); + ASSERT_EQ(V1_0::ErrorStatus::NONE, errorStatus); ASSERT_NE(nullptr, burstContext.get()); // return values @@ -144,7 +144,7 @@ static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiv auto results = receiver->getBlocking(); ASSERT_TRUE(results.has_value()); const auto [status, outputShapes, timing] = std::move(*results); - EXPECT_NE(ErrorStatus::NONE, status); + EXPECT_NE(V1_0::ErrorStatus::NONE, status); EXPECT_EQ(0u, outputShapes.size()); EXPECT_TRUE(badTiming(timing)); } @@ -302,14 +302,15 @@ static void validateBurstFmqLength(const sp& preparedModel, // collect serialized result by running regular burst const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] = controllerRegular->compute(request, MeasureTiming::NO, keys); - const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular); + const V1_0::ErrorStatus statusRegular = + nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular)); EXPECT_FALSE(fallbackRegular); // skip test if regular burst output isn't useful for testing a failure // caused by having too small of a length for the result FMQ const std::vector serialized = android::nn::serialize(statusRegular, outputShapesRegular, timingRegular); - if (statusRegular != ErrorStatus::NONE || + if (statusRegular != V1_0::ErrorStatus::NONE || serialized.size() <= kExecutionBurstChannelSmallLength) { return; } @@ -318,8 +319,9 @@ static void validateBurstFmqLength(const sp& preparedModel, // large enough to return the serialized result const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] = controllerSmall->compute(request, MeasureTiming::NO, keys); - const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall); - EXPECT_NE(ErrorStatus::NONE, statusSmall); + const V1_0::ErrorStatus statusSmall = + nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall)); + EXPECT_NE(V1_0::ErrorStatus::NONE, statusSmall); EXPECT_EQ(0u, outputShapesSmall.size()); EXPECT_TRUE(badTiming(timingSmall)); EXPECT_FALSE(fallbackSmall); diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index cc862645a5..43e53ef55d 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -18,13 +18,13 @@ #include "1.0/Utils.h" #include "1.3/Callbacks.h" +#include "1.3/Utils.h" #include "GeneratedTestHarness.h" #include "VtsHalNeuralnetworks.h" namespace android::hardware::neuralnetworks::V1_3::vts::functional { using implementation::PreparedModelCallback; -using V1_0::ErrorStatus; using V1_1::ExecutionPreference; using V1_2::SymmPerChannelQuantParams; using HidlToken = @@ -48,9 +48,9 @@ static void validatePrepareModel(const sp& device, const std::string& m SCOPED_TRACE(message + " [prepareModel_1_3]"); sp preparedModelCallback = new PreparedModelCallback(); - Return prepareLaunchStatus = - device->prepareModel_1_3(model, preference, hidl_vec(), - hidl_vec(), HidlToken(), preparedModelCallback); + Return prepareLaunchStatus = device->prepareModel_1_3( + model, preference, kDefaultPriority, {}, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp index 96dc589d50..9fb4c6e55b 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp @@ -18,7 +18,7 @@ #include #include "1.0/Utils.h" -#include "1.2/Callbacks.h" +#include "1.3/Callbacks.h" #include "ExecutionBurstController.h" #include "GeneratedTestHarness.h" #include "TestHarness.h" @@ -27,11 +27,10 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional { -using V1_0::ErrorStatus; +using implementation::ExecutionCallback; using V1_2::MeasureTiming; using V1_2::OutputShape; using V1_2::Timing; -using V1_2::implementation::ExecutionCallback; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -63,7 +62,7 @@ static void validate(const sp& preparedModel, const std::string& sp executionCallback = new ExecutionCallback(); Return executeLaunchStatus = - preparedModel->execute_1_3(request, measure, executionCallback); + preparedModel->execute_1_3(request, measure, {}, executionCallback); ASSERT_TRUE(executeLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); @@ -81,7 +80,7 @@ static void validate(const sp& preparedModel, const std::string& SCOPED_TRACE(message + " [executeSynchronously_1_3]"); Return executeStatus = preparedModel->executeSynchronously_1_3( - request, measure, + request, measure, {}, [](ErrorStatus error, const hidl_vec& outputShapes, const Timing& timing) { ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); @@ -163,7 +162,7 @@ void validateRequest(const sp& preparedModel, const Request& req void validateRequestFailure(const sp& preparedModel, const Request& request) { SCOPED_TRACE("Expecting request to fail [executeSynchronously_1_3]"); Return executeStatus = preparedModel->executeSynchronously_1_3( - request, MeasureTiming::NO, + request, MeasureTiming::NO, {}, [](ErrorStatus error, const hidl_vec& outputShapes, const Timing& timing) { ASSERT_NE(ErrorStatus::NONE, error); EXPECT_EQ(outputShapes.size(), 0); diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp index 1140b68635..7a32b0441c 100644 --- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp @@ -23,6 +23,7 @@ #include #include "1.0/Utils.h" #include "1.3/Callbacks.h" +#include "1.3/Utils.h" #include "GeneratedTestHarness.h" #include "TestHarness.h" #include "Utils.h" @@ -32,7 +33,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional { using HidlToken = hidl_array(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; using implementation::PreparedModelCallback; -using V1_0::ErrorStatus; using V1_1::ExecutionPreference; // internal helper function @@ -55,8 +55,8 @@ void createPreparedModel(const sp& device, const Model& model, // launch prepare model const sp preparedModelCallback = new PreparedModelCallback(); const Return prepareLaunchStatus = device->prepareModel_1_3( - model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec(), - hidl_vec(), HidlToken(), preparedModelCallback); + model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {}, + hidl_vec(), hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h b/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h index fb19a841e1..e9dec2d7ae 100644 --- a/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h +++ b/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h @@ -18,8 +18,11 @@ #define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H #include +#include #include +#include #include +#include #include #include #include @@ -136,7 +139,7 @@ class PreparedModelCallback : public IPreparedModelCallback { * @param preparedModel Returned model that has been prepared for execution, * nullptr if the model was unable to be prepared. */ - Return notify_1_3(V1_0::ErrorStatus status, + Return notify_1_3(V1_3::ErrorStatus status, const sp& preparedModel) override; /** @@ -158,7 +161,7 @@ class PreparedModelCallback : public IPreparedModelCallback { * - GENERAL_FAILURE if there is an unspecified error * - INVALID_ARGUMENT if the input model is invalid */ - V1_0::ErrorStatus getStatus() const; + ErrorStatus getStatus() const; /** * Retrieves the model that has been prepared for execution from the @@ -173,13 +176,216 @@ class PreparedModelCallback : public IPreparedModelCallback { sp getPreparedModel() const; private: + Return notifyInternal(ErrorStatus status, const sp& preparedModel); + mutable std::mutex mMutex; mutable std::condition_variable mCondition; bool mNotified GUARDED_BY(mMutex) = false; - V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE; + ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE; sp mPreparedModel; }; +/** + * The ExecutionCallback class is used to receive the results of the execution + * from a task executing asynchronously with respect to the runtime. If a + * calling thread calls wait or get* on a ExecutionCallback object and the + * corresponding asynchronous task has not finished the execution, the calling + * thread will block until the asynchronous task has either called one of the + * notify* methods. + * + * If the callback object is notified more than once, only the results of the + * first call to notify* are used, and the results from subsequent calls are + * discarded. + * + * This callback object is passed as an argument to IPreparedModel::execute*. + */ +class ExecutionCallback : public IExecutionCallback { + public: + /** + * IExecutionCallback::notify marks the callback object with the return + * status of the asynchronous execution that held this callback and enables + * all prior and future wait calls on the ExecutionCallback object to + * proceed. + * + * One of the IExecutionCallback::notify* methods must be called on a given + * ExecutionCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is not large + * enough to store the resultant values + * - INVALID_ARGUMENT if the input request is invalid + */ + Return notify(V1_0::ErrorStatus status) override; + + /** + * IExecutionCallback::notify_1_2 marks the callback object with the results + * (error status, dynamic output shapes, and timing information) of the + * asynchronous execution that held this callback and enables all prior and + * future wait calls on the ExecutionCallback object to proceed. + * + * One of the IExecutionCallback::notify* methods must be called on a given + * ExecutionCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified + * error + * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is + * not large enough to store the corresponding output + * - INVALID_ARGUMENT if one of the input arguments to prepareModel is + * invalid + * @param outputShapes A list of shape information of model output operands. + * The index into "outputShapes" corresponds to the index of the output + * operand in the Request outputs vector. outputShapes must be empty + * unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE. + * @param Timing Duration of execution. Unless MeasureTiming::YES was passed + * when launching the execution and status is NONE, all times must be + * reported as UINT64_MAX. A driver may choose to report any time as + * UINT64_MAX, indicating that particular measurement is not available. + */ + Return notify_1_2(V1_0::ErrorStatus status, + const hidl_vec& outputShapes, + const V1_2::Timing& timing) override; + + /** + * IExecutionCallback::notify_1_3 marks the callback object with the results + * (error status, dynamic output shapes, and timing information) of the + * asynchronous execution that held this callback and enables all prior and + * future wait calls on the ExecutionCallback object to proceed. + * + * One of the IExecutionCallback::notify* methods must be called on a given + * ExecutionCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified + * error + * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is + * not large enough to store the corresponding output + * - INVALID_ARGUMENT if one of the input arguments to prepareModel is + * invalid + * - MISSED_DEADLINE_* if the deadline was not met + * @param outputShapes A list of shape information of model output operands. + * The index into "outputShapes" corresponds to the index of the output + * operand in the Request outputs vector. outputShapes must be empty + * unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE. + * @param Timing Duration of execution. Unless MeasureTiming::YES was passed + * when launching the execution and status is NONE, all times must be + * reported as UINT64_MAX. A driver may choose to report any time as + * UINT64_MAX, indicating that particular measurement is not available. + */ + Return notify_1_3(V1_3::ErrorStatus status, + const hidl_vec& outputShapes, + const V1_2::Timing& timing) override; + + /** + * ExecutionCallback::wait blocks until notify* has been called on the + * callback object. + */ + void wait() const; + + /** + * Retrieves the error status returned from the asynchronous task launched + * by one of the IPreparedModel::execute* methods. If + * IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*) + * has not finished asynchronously executing, this call will block until the + * asynchronous task notifies the object. + * + * @return status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified + * error + * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is + * not large enough to store the corresponding output + * - INVALID_ARGUMENT if one of the input arguments to prepareModel is + * invalid + * - MISSED_DEADLINE_* if the deadline could not be met + */ + V1_3::ErrorStatus getStatus() const; + + /** + * Retrieves the error status returned from the asynchronous task launched + * by one of the IPreparedModel::execute* methods. If + * IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*) + * has not finished asynchronously executing, this call will block until the + * asynchronous task notifies the object. + * + * If the asynchronous task was launched by IPreparedModel::execute, an + * empty vector will be returned. + * + * @return outputShapes A list of shape information of model output + * operands. The index into "outputShapes" corresponds to the index of + * the output operand in the Request outputs vector. outputShapes must + * be empty unless the status is either NONE or + * OUTPUT_INSUFFICIENT_SIZE. outputShaps may be empty if the status is + * NONE and all model output operands are fully-specified at execution + * time. outputShapes must have the same number of elements as the + * number of model output operands if the status is + * OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has + * at least one output operand that is not fully-specified. + */ + const std::vector& getOutputShapes() const; + + /** + * Retrieves the error status returned from the asynchronous task launched + * by one of the IPreparedModel::execute* methods. If + * IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*) + * has not finished asynchronously executing, this call will block until the + * asynchronous task notifies the object. + * + * If the asynchronous task was launched by IPreparedModel::execute, every + * time must be UINT64_MAX. + * + * @return timing Duration of the execution. Every time must be UINT64_MAX + * unless the status is NONE. + */ + V1_2::Timing getTiming() const; + + private: + /* + * ExecutionCallback::notifyInternal stores the results of the execution + * (status, output shapes, and timing information) in the ExecutionCallback + * object before any call to wait or get* return. It then enables all prior + * and future wait calls on the ExecutionCallback object to proceed. + */ + Return notifyInternal(V1_3::ErrorStatus errorStatus, + hidl_vec outputShapes, V1_2::Timing timing); + + // members + mutable std::mutex mMutex; + mutable std::condition_variable mCondition; + bool mNotified GUARDED_BY(mMutex) = false; + V1_3::ErrorStatus mErrorStatus = V1_3::ErrorStatus::GENERAL_FAILURE; + std::vector mOutputShapes = {}; + V1_2::Timing mTiming = {}; +}; + } // namespace android::hardware::neuralnetworks::V1_3::implementation #endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h new file mode 100644 index 0000000000..3661b66445 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H +#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H + +#include +#include + +namespace android::hardware::neuralnetworks { + +inline constexpr V1_3::Priority kDefaultPriority = V1_3::Priority::MEDIUM; + +} // namespace android::hardware::neuralnetworks + +namespace android::hardware::neuralnetworks::V1_3 { + +// pretty-print values for error messages +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus); + +} // namespace android::hardware::neuralnetworks::V1_3 + +#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H