Modify NNAPI VTS tests to run on version 1.3

Bug: 139120468
Test: VtsHalNeuralnetworksV1_3TargetTest
Change-Id: I4654dc75c17f8801103015dc1da91663dfa28d52
Merged-In: I4654dc75c17f8801103015dc1da91663dfa28d52
(cherry picked from commit b49dadfb64)
This commit is contained in:
Lev Proleev 2019-08-30 11:57:18 +01:00
parent 4d00307c5c
commit 5ef23f16ea
14 changed files with 170 additions and 577 deletions

View file

@ -14,12 +14,28 @@
// limitations under the License.
//
cc_library_static {
name: "VtsHalNeuralNetworksV1_2Callbacks",
defaults: ["VtsHalTargetTestDefaults"],
export_include_dirs: ["include"],
srcs: [
"Callbacks.cpp",
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
],
header_libs: [
"libbase_headers",
]
}
cc_test {
name: "VtsHalNeuralnetworksV1_2TargetTest",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"Callbacks.cpp",
"CompilationCachingTests.cpp",
"GeneratedTestHarness.cpp",
"TestAssertions.cpp",
@ -45,6 +61,7 @@ cc_test {
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
"VtsHalNeuralNetworksV1_2Callbacks",
],
whole_static_libs: [
"neuralnetworks_generated_V1_0_example",

View file

@ -0,0 +1,58 @@
//
// Copyright (C) 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
cc_test {
name: "VtsHalNeuralNetworksV1_3TargetTest",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"CompilationCachingTests.cpp",
"GeneratedTestHarness.cpp",
"TestAssertions.cpp",
"ValidateBurst.cpp",
"ValidateModel.cpp",
"ValidateRequest.cpp",
"VtsHalNeuralnetworks.cpp",
],
shared_libs: [
"libfmq",
"libnativewindow",
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
"android.hardware.neuralnetworks@1.3",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
"VtsHalNeuralNetworksV1_2Callbacks",
],
whole_static_libs: [
"neuralnetworks_generated_V1_0_example",
"neuralnetworks_generated_V1_1_example",
"neuralnetworks_generated_V1_2_example",
"neuralnetworks_generated_V1_3_example",
],
header_libs: [
"libneuralnetworks_headers",
],
test_suites: ["general-tests"],
}

View file

@ -18,11 +18,14 @@
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using V1_0::DeviceStatus;
using V1_0::ErrorStatus;
using V1_0::PerformanceInfo;
using V1_2::Constant;
using V1_2::DeviceType;
using V1_2::Extension;
// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@ -37,7 +40,7 @@ TEST_P(NeuralnetworksHidlTest, StatusTest) {
// initialization
TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
using OperandPerformance = Capabilities::OperandPerformance;
Return<void> ret = kDevice->getCapabilities_1_2([](ErrorStatus status,
Return<void> ret = kDevice->getCapabilities_1_3([](ErrorStatus status,
const Capabilities& capabilities) {
EXPECT_EQ(ErrorStatus::NONE, status);
@ -58,57 +61,4 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
});
EXPECT_TRUE(ret.isOk());
}
// device version test
TEST_P(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
Return<void> ret =
kDevice->getVersionString([](ErrorStatus status, const hidl_string& version) {
EXPECT_EQ(ErrorStatus::NONE, status);
EXPECT_LT(0, version.size());
});
EXPECT_TRUE(ret.isOk());
}
// device type test
TEST_P(NeuralnetworksHidlTest, GetDeviceTypeTest) {
Return<void> ret = kDevice->getType([](ErrorStatus status, DeviceType type) {
EXPECT_EQ(ErrorStatus::NONE, status);
EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU ||
type == DeviceType::GPU || type == DeviceType::ACCELERATOR);
});
EXPECT_TRUE(ret.isOk());
}
// device supported extensions test
TEST_P(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) {
Return<void> ret = kDevice->getSupportedExtensions(
[](ErrorStatus status, const hidl_vec<Extension>& extensions) {
EXPECT_EQ(ErrorStatus::NONE, status);
for (auto& extension : extensions) {
std::string extensionName = extension.name;
EXPECT_FALSE(extensionName.empty());
for (char c : extensionName) {
EXPECT_TRUE(('a' <= c && c <= 'z') || ('0' <= c && c <= '9') || c == '_' ||
c == '.')
<< "Extension name contains an illegal character: " << c;
}
EXPECT_NE(extensionName.find('.'), std::string::npos)
<< "Extension name must start with the reverse domain name of the "
"vendor";
}
});
EXPECT_TRUE(ret.isOk());
}
// getNumberOfCacheFilesNeeded test
TEST_P(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
[](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
EXPECT_EQ(ErrorStatus::NONE, status);
EXPECT_LE(numModelCache,
static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
EXPECT_LE(numDataCache, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
});
EXPECT_TRUE(ret.isOk());
}
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -1,143 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "Callbacks"
#include "1.2/Callbacks.h"
#include <android-base/logging.h>
#include <limits>
namespace android::hardware::neuralnetworks::V1_2::implementation {
using V1_0::ErrorStatus;
constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
.timeInDriver = std::numeric_limits<uint64_t>::max()};
// PreparedModelCallback methods begin here
Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
const sp<V1_0::IPreparedModel>& preparedModel) {
{
std::lock_guard<std::mutex> hold(mMutex);
// quick-return if object has already been notified
if (mNotified) {
return Void();
}
// store results and mark as notified
mErrorStatus = errorStatus;
mPreparedModel = preparedModel;
mNotified = true;
}
mCondition.notify_all();
return Void();
}
Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
const sp<V1_2::IPreparedModel>& preparedModel) {
return notify(errorStatus, preparedModel);
}
void PreparedModelCallback::wait() const {
std::unique_lock<std::mutex> lock(mMutex);
mCondition.wait(lock, [this] { return mNotified; });
}
ErrorStatus PreparedModelCallback::getStatus() const {
wait();
return mErrorStatus;
}
sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() const {
wait();
return mPreparedModel;
}
// ExecutionCallback methods begin here
Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
notifyInternal(errorStatus, {}, kNoTiming);
return Void();
}
Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
// outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE.
if (outputShapes.size() == 0) {
LOG(ERROR) << "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE";
notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
return Void();
}
} else if (errorStatus != ErrorStatus::NONE) {
// outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE.
if (outputShapes.size() != 0) {
LOG(ERROR) << "Notified with non-empty output shape vector when error status is "
"neither NONE nor OUTPUT_INSUFFICIENT_SIZE";
notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
return Void();
}
}
notifyInternal(errorStatus, outputShapes, timing);
return Void();
}
void ExecutionCallback::wait() const {
std::unique_lock<std::mutex> lock(mMutex);
mCondition.wait(lock, [this] { return mNotified; });
}
ErrorStatus ExecutionCallback::getStatus() const {
wait();
return mErrorStatus;
}
const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() const {
wait();
return mOutputShapes;
}
Timing ExecutionCallback::getTiming() const {
wait();
return mTiming;
}
void ExecutionCallback::notifyInternal(ErrorStatus errorStatus,
const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
{
std::lock_guard<std::mutex> hold(mMutex);
// quick-return if object has already been notified
if (mNotified) {
return;
}
mErrorStatus = errorStatus;
mOutputShapes = outputShapes;
mTiming = timing;
mNotified = true;
}
mCondition.notify_all();
}
} // namespace android::hardware::neuralnetworks::V1_2::implementation

View file

@ -45,12 +45,15 @@ namespace generated_tests::mobilenet_quantized {
const test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_quantized
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using namespace test_helper;
using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::IPreparedModel;
using V1_2::OperationType;
using V1_2::implementation::PreparedModelCallback;
namespace float32_model {
@ -302,7 +305,7 @@ class CompilationCachingTestBase : public testing::Test {
// See if the service can handle the model.
bool isModelFullySupported(const Model& model) {
bool fullySupportsModel = false;
Return<void> supportedCall = kDevice->getSupportedOperations_1_2(
Return<void> supportedCall = kDevice->getSupportedOperations_1_3(
model,
[&fullySupportsModel, &model](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
@ -323,7 +326,7 @@ class CompilationCachingTestBase : public testing::Test {
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
Return<ErrorStatus> prepareLaunchStatus =
kDevice->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER,
kDevice->prepareModel_1_3(model, ExecutionPreference::FAST_SINGLE_ANSWER,
modelCache, dataCache, cacheToken, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
@ -1371,4 +1374,4 @@ INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
testing::Range(0U, 10U)),
printCompilationCachingSecurityTest);
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -27,6 +27,9 @@
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
@ -44,17 +47,24 @@
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using namespace test_helper;
using hidl::memory::V1_0::IMemory;
using implementation::ExecutionCallback;
using implementation::PreparedModelCallback;
using V1_0::DataLocation;
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_0::Request;
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
using V1_2::OperationType;
using V1_2::OutputShape;
using V1_2::SymmPerChannelQuantParams;
using V1_2::Timing;
using V1_2::implementation::ExecutionCallback;
using V1_2::implementation::PreparedModelCallback;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
@ -405,4 +415,4 @@ INSTANTIATE_GENERATED_TEST(GeneratedTest,
INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -14,19 +14,19 @@
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <functional>
#include <vector>
#include "1.0/Utils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using NamedModel = Named<const test_helper::TestModel*>;
using GeneratedTestParam = std::tuple<NamedDevice, NamedModel>;
@ -55,11 +55,12 @@ class ValidationTest : public GeneratedTestBase {};
Model createModel(const test_helper::TestModel& testModel);
void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
void PrepareModel(const sp<IDevice>& device, const Model& model,
sp<V1_2::IPreparedModel>* preparedModel);
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
void EvaluatePreparedModel(const sp<V1_2::IPreparedModel>& preparedModel,
const test_helper::TestModel& testModel, bool testDynamicOutputShape);
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H

View file

@ -14,10 +14,10 @@
* limitations under the License.
*/
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include "TestHarness.h"
namespace android::hardware::neuralnetworks::V1_2 {
namespace android::hardware::neuralnetworks::V1_3 {
// Make sure that the HIDL enums are compatible with the values defined in
// frameworks/ml/nn/tools/test_generator/test_harness/include/TestHarness.h.
@ -25,6 +25,8 @@ using namespace test_helper;
#define CHECK_TEST_ENUM(EnumType, enumValue) \
static_assert(static_cast<EnumType>(Test##EnumType::enumValue) == EnumType::enumValue)
using V1_2::OperationType;
CHECK_TEST_ENUM(OperandType, FLOAT32);
CHECK_TEST_ENUM(OperandType, INT32);
CHECK_TEST_ENUM(OperandType, UINT32);
@ -39,6 +41,7 @@ CHECK_TEST_ENUM(OperandType, FLOAT16);
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM_PER_CHANNEL);
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT16_ASYMM);
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM);
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_ASYMM_SIGNED);
CHECK_TEST_ENUM(OperationType, ADD);
CHECK_TEST_ENUM(OperationType, AVERAGE_POOL_2D);
@ -138,4 +141,4 @@ CHECK_TEST_ENUM(OperationType, RESIZE_NEAREST_NEIGHBOR);
#undef CHECK_TEST_ENUM
} // namespace android::hardware::neuralnetworks::V1_2
} // namespace android::hardware::neuralnetworks::V1_3

View file

@ -28,13 +28,20 @@
#include <android-base/logging.h>
#include <cstring>
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using nn::ExecutionBurstController;
using nn::RequestChannelSender;
using nn::ResultChannelReceiver;
using V1_0::ErrorStatus;
using V1_0::Request;
using V1_2::FmqRequestDatum;
using V1_2::FmqResultDatum;
using V1_2::IBurstCallback;
using V1_2::IBurstContext;
using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
using V1_2::Timing;
using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback;
// This constant value represents the length of an FMQ that is large enough to
@ -397,4 +404,4 @@ void validateBurst(const sp<IPreparedModel>& preparedModel, const Request& reque
ASSERT_NO_FATAL_FAILURE(validateBurstSanitized(preparedModel, request));
}
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -21,21 +21,26 @@
#include "GeneratedTestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_1::ExecutionPreference;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using V1_2::IPreparedModel;
using V1_2::OperationType;
using V1_2::OperationTypeRange;
using V1_2::SymmPerChannelQuantParams;
using V1_2::implementation::PreparedModelCallback;
using HidlToken =
hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
const Model& model) {
SCOPED_TRACE(message + " [getSupportedOperations_1_2]");
SCOPED_TRACE(message + " [getSupportedOperations_1_3]");
Return<void> ret = device->getSupportedOperations_1_2(
Return<void> ret = device->getSupportedOperations_1_3(
model, [&](ErrorStatus status, const hidl_vec<bool>&) {
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
});
@ -44,11 +49,11 @@ static void validateGetSupportedOperations(const sp<IDevice>& device, const std:
static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
const Model& model, ExecutionPreference preference) {
SCOPED_TRACE(message + " [prepareModel_1_2]");
SCOPED_TRACE(message + " [prepareModel_1_3]");
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
Return<ErrorStatus> prepareLaunchStatus =
device->prepareModel_1_2(model, preference, hidl_vec<hidl_handle>(),
device->prepareModel_1_3(model, preference, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@ -710,4 +715,4 @@ void validateModel(const sp<IDevice>& device, const Model& model) {
mutateExecutionPreferenceTest(device, model);
}
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -24,11 +24,15 @@
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using implementation::ExecutionCallback;
using V1_0::ErrorStatus;
using V1_0::Request;
using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
using V1_2::OutputShape;
using V1_2::Timing;
using V1_2::implementation::ExecutionCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@ -165,4 +169,4 @@ void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Reque
ASSERT_TRUE(executeStatus.isOk());
}
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -26,13 +26,15 @@
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using implementation::PreparedModelCallback;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using HidlToken =
hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using V1_0::ErrorStatus;
using V1_0::Request;
using V1_1::ExecutionPreference;
using V1_2::IPreparedModel;
using V1_2::implementation::PreparedModelCallback;
// internal helper function
void createPreparedModel(const sp<IDevice>& device, const Model& model,
@ -42,7 +44,7 @@ void createPreparedModel(const sp<IDevice>& device, const Model& model,
// see if service can handle model
bool fullySupportsModel = false;
const Return<void> supportedCall = device->getSupportedOperations_1_2(
const Return<void> supportedCall = device->getSupportedOperations_1_3(
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
@ -53,7 +55,7 @@ void createPreparedModel(const sp<IDevice>& device, const Model& model,
// launch prepare model
const sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
const Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
const Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
@ -64,8 +66,8 @@ void createPreparedModel(const sp<IDevice>& device, const Model& model,
const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
*preparedModel = getPreparedModel_1_2(preparedModelCallback);
// The getSupportedOperations_1_2 call returns a list of operations that are
// guaranteed not to fail if prepareModel_1_2 is called, and
// The getSupportedOperations_1_3 call returns a list of operations that are
// guaranteed not to fail if prepareModel_1_3 is called, and
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
// If a driver has any doubt that it can prepare an operation, it must
// return false. So here, if a driver isn't sure if it can support an
@ -163,9 +165,9 @@ TEST_P(ValidationTest, Test) {
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
sp<IPreparedModel> getPreparedModel_1_2(const sp<implementation::PreparedModelCallback>& callback) {
sp<IPreparedModel> getPreparedModel_1_2(const sp<PreparedModelCallback>& callback) {
sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
}
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional

View file

@ -14,17 +14,17 @@
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <gtest/gtest.h>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using NamedDevice = Named<sp<IDevice>>;
using NeuralnetworksHidlTestParam = NamedDevice;
@ -47,11 +47,12 @@ std::string printNeuralnetworksHidlTest(
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
sp<IPreparedModel>* preparedModel);
sp<V1_2::IPreparedModel>* preparedModel);
// Utility function to get PreparedModel from callback and downcast to V1_2.
sp<IPreparedModel> getPreparedModel_1_2(const sp<implementation::PreparedModelCallback>& callback);
sp<V1_2::IPreparedModel> getPreparedModel_1_2(
const sp<V1_2::implementation::PreparedModelCallback>& callback);
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H

View file

@ -1,325 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H
#include <android-base/thread_annotations.h>
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <hidl/Status.h>
#include <condition_variable>
#include <mutex>
/*
* The Callback classes are used internally by the NeuralNetworks runtime to
* synchronize between different threads. An asynchronous task is launched
* paired with a callback object. When a client thread requires the output being
* generated by the asynchronous task, the client thread can wait for the result
* and be blocked until it has completed. Any wait may safely be called
* concurrently, even on the same callback object. When the asynchronous task
* has finished its workload, it must immediately call "notify*". If the
* asynchronous task has failed to launch, the function that tried to launch the
* asynchronous task must immediately call "notify*". This "notify*" call
* awakens any client threads waiting on the callback object.
*
* These classes exist to enable synchronization across HIDL. When
* synchronization is only required in the same process, consider using
* std::future, std::mutex, std::condition_variable, or std::experimental::latch
* instead.
*/
namespace android::hardware::neuralnetworks::V1_2::implementation {
/**
* The PreparedModelCallback class is used to receive the error status of
* preparing a model as well as the prepared model from a task executing
* asynchronously with respect to the runtime. If a calling thread calls wait
* or get* on a PreparedModelCallback object and the corresponding asynchronous
* task has not finished preparing the model, the calling thread will block
* until the asynchronous task has either called notify or notify_1_2.
*
* If the callback object is notified more than once, only the results of the
* first call to notify* are used, and the results from subsequent calls are
* discarded.
*
* This callback object is passed as an argument to IDevice::prepareModel*.
*/
class PreparedModelCallback : public IPreparedModelCallback {
public:
/**
* IPreparedModelCallback::notify marks the callback object with the return
* status of the asynchronous model preparation along with the prepared
* model, and allows all prior and future wait calls on the
* PreparedModelCallback object to proceed.
*
* Either IPreparedModelCallback::notify or
* IPreparedModelCallback::notify_1_2 must be called on a given
* PreparedModelCallback object.
*
* If the callback object is notified more than once, only the results of
* the first call to notify* are used, and the results from subsequent calls
* are discarded.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous preparation was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if the input model is invalid
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
Return<void> notify(V1_0::ErrorStatus status,
const sp<V1_0::IPreparedModel>& preparedModel) override;
/**
* IPreparedModelCallback::notify_1_2 marks the callback object with the
* return status of the asynchronous model preparation along with the
* prepared model, and allows all prior and future wait calls on the
* PreparedModelCallback object to proceed.
*
* Either IPreparedModelCallback::notify or
* IPreparedModelCallback::notify_1_2 must be called on a given
* PreparedModelCallback object.
*
* If the callback object is notified more than once, only the results of
* the first call to notify* are used, and the results from subsequent calls
* are discarded.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous preparation was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if the input model is invalid
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
Return<void> notify_1_2(V1_0::ErrorStatus status,
const sp<V1_2::IPreparedModel>& preparedModel) override;
/**
* PreparedModelCallback::wait blocks until notify* has been called on the
* callback object.
*/
void wait() const;
/**
* Retrieves the error status returned from the asynchronous task launched
* by IDevice::prepareModel*. If IDevice::prepareModel* has not finished
* asynchronously preparing the model, this call will block until the
* asynchronous task notifies the object.
*
* @return status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous preparation was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if the input model is invalid
*/
V1_0::ErrorStatus getStatus() const;
/**
* Retrieves the model that has been prepared for execution from the
* asynchronous task launched by IDevice::prepareModel*. If
* IDevice::prepareModel* has not finished asynchronously preparing the
* model, this call will block until the asynchronous task notifies the
* object.
*
* @return preparedModel Returned model that has been prepared for
* execution, nullptr if the model was unable to be prepared.
*/
sp<V1_0::IPreparedModel> getPreparedModel() const;
private:
mutable std::mutex mMutex;
mutable std::condition_variable mCondition;
bool mNotified GUARDED_BY(mMutex) = false;
V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
sp<V1_0::IPreparedModel> mPreparedModel;
};
/**
* The ExecutionCallback class is used to receive the results of the execution
* from a task executing asynchronously with respect to the runtime. If a
* calling thread calls wait or get* on a ExecutionCallback object and the
* corresponding asynchronous task has not finished the execution, the calling
* thread will block until the asynchronous task has either called notify or
* notify_1_2.
*
* If the callback object is notified more than once, only the results of the
* first call to notify* are used, and the results from subsequent calls are
* discarded.
*
* This callback object is passed as an argument to IPreparedModel::execute*.
*/
class ExecutionCallback : public IExecutionCallback {
public:
/**
* IExecutionCallback::notify marks the callback object with the return
* status of the asynchronous execution that held this callback and enables
* all prior and future wait calls on the ExecutionCallback object to
* proceed.
*
* Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
* be called on a given ExecutionCallback object.
*
* If the callback object is notified more than once, only the results of
* the first call to notify* are used, and the results from subsequent calls
* are discarded.
*
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself (if the
* launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is not large
* enough to store the resultant values
* - INVALID_ARGUMENT if the input request is invalid
*/
Return<void> notify(V1_0::ErrorStatus status) override;
/**
* IExecutionCallback::notify_1_2 marks the callback object with the results
* (error status, dynamic output shapes, and timing information) of the
* asynchronous execution that held this callback and enables all prior and
* future wait calls on the ExecutionCallback object to proceed.
*
* Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
* be called on a given ExecutionCallback object.
*
* If the callback object is notified more than once, only the results of
* the first call to notify* are used, and the results from subsequent calls
* are discarded.
*
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself (if the
* launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
* error
* - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
* not large enough to store the corresponding output
* - INVALID_ARGUMENT if one of the input arguments to prepareModel is
* invalid
* @param outputShapes A list of shape information of model output operands.
* The index into "outputShapes" corresponds to the index of the output
* operand in the Request outputs vector. outputShapes must be empty
* unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE.
* @param Timing Duration of execution. Unless MeasureTiming::YES was passed
* when launching the execution and status is NONE, all times must be
* reported as UINT64_MAX. A driver may choose to report any time as
* UINT64_MAX, indicating that particular measurement is not available.
*/
Return<void> notify_1_2(V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) override;
// An overload of the latest notify interface to hide the version from ExecutionBuilder.
Return<void> notify(V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
return notify_1_2(status, outputShapes, timing);
}
/**
* ExecutionCallback::wait blocks until notify* has been called on the
* callback object.
*/
void wait() const;
/**
* Retrieves the error status returned from the asynchronous task launched
* by either IPreparedModel::execute or IPreparedModel::execute_1_2. If
* IPreparedModel::execute or IPreparedModel::execute_1_2 has not finished
* asynchronously executing, this call will block until the asynchronous
* task notifies the object.
*
* @return status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself (if the
* launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
* error
* - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
* not large enough to store the corresponding output
* - INVALID_ARGUMENT if one of the input arguments to prepareModel is
* invalid
*/
V1_0::ErrorStatus getStatus() const;
/**
* Retrieves the output shapes returned from the asynchronous task launched
* by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not
* finished asynchronously executing, this call will block until the
* asynchronous task notifies the object.
*
* If the asynchronous task was launched by IPreparedModel::execute, an
* empty vector will be returned.
*
* @return outputShapes A list of shape information of model output
* operands. The index into "outputShapes" corresponds to the index of
* the output operand in the Request outputs vector. outputShapes must
* be empty unless the status is either NONE or
* OUTPUT_INSUFFICIENT_SIZE. outputShaps may be empty if the status is
* NONE and all model output operands are fully-specified at execution
* time. outputShapes must have the same number of elements as the
* number of model output operands if the status is
* OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has
* at least one output operand that is not fully-specified.
*/
const std::vector<OutputShape>& getOutputShapes() const;
/**
* Retrieves the duration of execution of the asynchronous task launched by
* IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not
* finished asynchronously executing, this call will block until the
* asynchronous task notifies the object.
*
* If the asynchronous task was launched by IPreparedModel::execute, every
* time must be UINT64_MAX.
*
* @return timing Duration of the execution. Every time must be UINT64_MAX
* unless the status is NONE.
*/
Timing getTiming() const;
private:
/*
* ExecutionCallback::notifyInternal stores the results of the execution
* (status, output shapes, and timing information) in the ExecutionCallback
* object before any call to wait or get* return. It then enables all prior
* and future wait calls on the ExecutionCallback object to proceed.
*/
void notifyInternal(V1_0::ErrorStatus errorStatus, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing);
// members
mutable std::mutex mMutex;
mutable std::condition_variable mCondition;
bool mNotified GUARDED_BY(mMutex) = false;
V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
std::vector<OutputShape> mOutputShapes = {};
Timing mTiming = {};
};
} // namespace android::hardware::neuralnetworks::V1_2::implementation
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H