Cleanup NNAPI VTS tests
This CL includes the following cleanups:
* namespace compression
* remove "using" from header files
* remove no-op code, default no-op constructors
* clang-formats the code
Bug: N/A
Test: mma
Test: VtsHalNeuralnetworksV1_*TargetTest
Change-Id: I023997d8686ca65223858eed3a0881f5444ed2d6
Merged-In: I023997d8686ca65223858eed3a0881f5444ed2d6
(cherry picked from commit bbe5dad266
)
This commit is contained in:
parent
15b826ad6a
commit
62749b917e
27 changed files with 220 additions and 529 deletions
|
@ -18,12 +18,7 @@
|
|||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
|
||||
|
||||
// create device test
|
||||
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
|
||||
|
@ -38,19 +33,14 @@ TEST_F(NeuralnetworksHidlTest, StatusTest) {
|
|||
// initialization
|
||||
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
Return<void> ret =
|
||||
device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
});
|
||||
device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
|
||||
|
|
|
@ -33,23 +33,12 @@
|
|||
#include <gtest/gtest.h>
|
||||
#include <iostream>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
|
||||
|
||||
using namespace test_helper;
|
||||
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using hidl::memory::V1_0::IMemory;
|
||||
using implementation::ExecutionCallback;
|
||||
using implementation::PreparedModelCallback;
|
||||
|
||||
Model createModel(const TestModel& testModel) {
|
||||
// Model operands.
|
||||
|
@ -206,9 +195,4 @@ TEST_P(GeneratedTest, Test) {
|
|||
INSTANTIATE_GENERATED_TEST(GeneratedTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
|
||||
|
|
|
@ -21,12 +21,7 @@
|
|||
#include "TestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
|
||||
|
||||
class GeneratedTestBase
|
||||
: public NeuralnetworksHidlTest,
|
||||
|
@ -59,11 +54,6 @@ class ValidationTest : public GeneratedTestBase {
|
|||
|
||||
Model createModel(const ::test_helper::TestModel& testModel);
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H
|
||||
|
|
|
@ -28,15 +28,13 @@
|
|||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace android::hardware::neuralnetworks {
|
||||
|
||||
using namespace test_helper;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DataLocation;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using hidl::memory::V1_0::IMemory;
|
||||
using V1_0::DataLocation;
|
||||
using V1_0::Request;
|
||||
using V1_0::RequestArgument;
|
||||
|
||||
constexpr uint32_t kInputPoolIndex = 0;
|
||||
constexpr uint32_t kOutputPoolIndex = 1;
|
||||
|
@ -118,6 +116,4 @@ std::vector<TestBuffer> getOutputBuffers(const Request& request) {
|
|||
return outputBuffers;
|
||||
}
|
||||
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks
|
||||
|
|
|
@ -20,15 +20,9 @@
|
|||
#include "GeneratedTestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using implementation::PreparedModelCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
|
@ -37,9 +31,9 @@ static void validateGetSupportedOperations(const sp<IDevice>& device, const std:
|
|||
SCOPED_TRACE(message + " [getSupportedOperations]");
|
||||
|
||||
Return<void> ret =
|
||||
device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
});
|
||||
device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
|
@ -48,7 +42,6 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
|
|||
SCOPED_TRACE(message + " [prepareModel]");
|
||||
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
@ -94,13 +87,13 @@ static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
|
|||
static uint32_t addOperand(Model* model) {
|
||||
return hidl_vec_push_back(&model->operands,
|
||||
{
|
||||
.type = OperandType::INT32,
|
||||
.dimensions = {},
|
||||
.numberOfConsumers = 0,
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::MODEL_INPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
.type = OperandType::INT32,
|
||||
.dimensions = {},
|
||||
.numberOfConsumers = 0,
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::MODEL_INPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -114,10 +107,10 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
|
|||
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
|
||||
|
||||
static const int32_t invalidOperandTypes[] = {
|
||||
static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
|
||||
static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
|
||||
static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
|
||||
static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
|
||||
static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
|
||||
static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
|
||||
static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
|
||||
static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
|
||||
};
|
||||
|
||||
static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
|
@ -210,7 +203,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
|
|||
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const std::vector<int32_t> invalidZeroPoints =
|
||||
getInvalidZeroPoints(model.operands[operand].type);
|
||||
getInvalidZeroPoints(model.operands[operand].type);
|
||||
for (int32_t invalidZeroPoint : invalidZeroPoints) {
|
||||
const std::string message = "mutateOperandZeroPointTest: operand " +
|
||||
std::to_string(operand) + " has zero point of " +
|
||||
|
@ -242,18 +235,18 @@ static void mutateOperand(Operand* operand, OperandType type) {
|
|||
break;
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.scale = 0.0f;
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_INT32:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
|
||||
break;
|
||||
case OperandType::OEM:
|
||||
|
@ -303,10 +296,10 @@ static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_0
|
|||
///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
|
||||
|
||||
static const int32_t invalidOperationTypes[] = {
|
||||
static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
|
||||
static_cast<int32_t>(OperationType::TANH) + 1, // upper bound fundamental
|
||||
static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
|
||||
static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
|
||||
static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
|
||||
static_cast<int32_t>(OperationType::TANH) + 1, // upper bound fundamental
|
||||
static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
|
||||
static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
|
||||
};
|
||||
|
||||
static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
|
@ -317,7 +310,7 @@ static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model
|
|||
std::to_string(invalidOperationType);
|
||||
validate(device, message, model, [operation, invalidOperationType](Model* model) {
|
||||
model->operations[operation].type =
|
||||
static_cast<OperationType>(invalidOperationType);
|
||||
static_cast<OperationType>(invalidOperationType);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -470,7 +463,7 @@ static void addOperationInputTest(const sp<IDevice>& device, const V1_0::Model&
|
|||
static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const std::string message =
|
||||
"addOperationOutputTest: operation " + std::to_string(operation);
|
||||
"addOperationOutputTest: operation " + std::to_string(operation);
|
||||
validate(device, message, model, [operation](Model* model) {
|
||||
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
|
||||
hidl_vec_push_back(&model->operations[operation].outputs, index);
|
||||
|
@ -498,9 +491,4 @@ void ValidationTest::validateModel(const V1_0::Model& model) {
|
|||
addOperationOutputTest(device, model);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
|
||||
|
|
|
@ -20,14 +20,9 @@
|
|||
#include "GeneratedTestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using implementation::ExecutionCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
|
@ -41,7 +36,6 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
|
|||
SCOPED_TRACE(message + " [execute]");
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
@ -99,9 +93,4 @@ void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
|
|||
removeOutputTest(preparedModel, request);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
|
||||
|
|
|
@ -24,16 +24,11 @@
|
|||
|
||||
#include <android-base/logging.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using implementation::PreparedModelCallback;
|
||||
|
||||
static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
|
||||
static void createPreparedModel(const sp<IDevice>& device, const Model& model,
|
||||
sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
|
||||
|
@ -50,7 +45,6 @@ static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& mo
|
|||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
@ -81,10 +75,6 @@ static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& mo
|
|||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
|
||||
// This has to return a "new" object because it is freed inside
|
||||
// ::testing::AddGlobalTestEnvironment when the gtest is being torn down
|
||||
|
@ -97,14 +87,8 @@ void NeuralnetworksHidlEnvironment::registerTestServices() {
|
|||
}
|
||||
|
||||
// The main test class for NEURALNETWORK HIDL HAL.
|
||||
NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
|
||||
|
||||
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
|
||||
|
||||
void NeuralnetworksHidlTest::SetUp() {
|
||||
::testing::VtsHalHidlTargetTestBase::SetUp();
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
|
||||
#ifdef PRESUBMIT_NOT_VTS
|
||||
const std::string name =
|
||||
|
@ -119,7 +103,6 @@ void NeuralnetworksHidlTest::SetUp() {
|
|||
}
|
||||
|
||||
void NeuralnetworksHidlTest::TearDown() {
|
||||
device = nullptr;
|
||||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
|
@ -128,10 +111,8 @@ void ValidationTest::validateEverything(const Model& model, const Request& reque
|
|||
|
||||
// create IPreparedModel
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
createPreparedModel(device, model, &preparedModel);
|
||||
if (preparedModel == nullptr) return;
|
||||
|
||||
validateRequest(preparedModel, request);
|
||||
}
|
||||
|
@ -145,12 +126,7 @@ TEST_P(ValidationTest, Test) {
|
|||
|
||||
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0 {
|
||||
|
||||
|
|
|
@ -30,20 +30,14 @@
|
|||
|
||||
#include "TestHarness.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
|
||||
|
||||
// A class for test environment setup
|
||||
class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
|
||||
NeuralnetworksHidlEnvironment();
|
||||
~NeuralnetworksHidlEnvironment() override;
|
||||
NeuralnetworksHidlEnvironment() = default;
|
||||
|
||||
public:
|
||||
public:
|
||||
static NeuralnetworksHidlEnvironment* getInstance();
|
||||
void registerTestServices() override;
|
||||
};
|
||||
|
@ -52,22 +46,17 @@ class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvB
|
|||
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
|
||||
|
||||
public:
|
||||
NeuralnetworksHidlTest();
|
||||
~NeuralnetworksHidlTest() override;
|
||||
public:
|
||||
NeuralnetworksHidlTest() = default;
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
|
||||
protected:
|
||||
sp<IDevice> device;
|
||||
protected:
|
||||
const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0 {
|
||||
|
||||
|
|
|
@ -17,14 +17,13 @@
|
|||
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
|
||||
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include "TestHarness.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace android::hardware::neuralnetworks {
|
||||
|
||||
// Create HIDL Request from the TestModel struct.
|
||||
V1_0::Request createRequest(const ::test_helper::TestModel& testModel);
|
||||
|
@ -37,23 +36,20 @@ std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& req
|
|||
// resizing the hidl_vec to one less.
|
||||
template <typename Type>
|
||||
inline void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
|
||||
if (vec) {
|
||||
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
|
||||
vec->resize(vec->size() - 1);
|
||||
}
|
||||
CHECK(vec != nullptr);
|
||||
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
|
||||
vec->resize(vec->size() - 1);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
|
||||
// assume vec is valid
|
||||
CHECK(vec != nullptr);
|
||||
const uint32_t index = vec->size();
|
||||
vec->resize(index + 1);
|
||||
(*vec)[index] = value;
|
||||
return index;
|
||||
}
|
||||
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
|
||||
|
|
|
@ -18,12 +18,10 @@
|
|||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
|
||||
|
||||
using V1_0::DeviceStatus;
|
||||
using V1_0::ErrorStatus;
|
||||
|
||||
// create device test
|
||||
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
|
||||
|
@ -38,21 +36,16 @@ TEST_F(NeuralnetworksHidlTest, StatusTest) {
|
|||
// initialization
|
||||
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
Return<void> ret =
|
||||
device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
|
||||
});
|
||||
device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
|
||||
|
|
|
@ -33,28 +33,19 @@
|
|||
#include "TestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
|
||||
|
||||
using namespace test_helper;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DataLocation;
|
||||
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Operand;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
|
||||
using ::android::hardware::neuralnetworks::V1_1::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Model;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using hidl::memory::V1_0::IMemory;
|
||||
using V1_0::DataLocation;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::IPreparedModel;
|
||||
using V1_0::Operand;
|
||||
using V1_0::OperandLifeTime;
|
||||
using V1_0::OperandType;
|
||||
using V1_0::Request;
|
||||
using V1_0::implementation::ExecutionCallback;
|
||||
using V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
Model createModel(const TestModel& testModel) {
|
||||
// Model operands.
|
||||
|
@ -212,9 +203,4 @@ TEST_P(GeneratedTest, Test) {
|
|||
INSTANTIATE_GENERATED_TEST(GeneratedTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
|
||||
|
|
|
@ -21,12 +21,7 @@
|
|||
#include "TestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
|
||||
|
||||
class GeneratedTestBase
|
||||
: public NeuralnetworksHidlTest,
|
||||
|
@ -50,20 +45,16 @@ class GeneratedTestBase
|
|||
// TODO: Clean up the hierarchy for ValidationTest.
|
||||
class ValidationTest : public GeneratedTestBase {
|
||||
protected:
|
||||
void validateEverything(const Model& model, const Request& request);
|
||||
void validateEverything(const Model& model, const V1_0::Request& request);
|
||||
|
||||
private:
|
||||
void validateModel(const Model& model);
|
||||
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
|
||||
void validateRequest(const sp<V1_0::IPreparedModel>& preparedModel,
|
||||
const V1_0::Request& request);
|
||||
};
|
||||
|
||||
Model createModel(const ::test_helper::TestModel& testModel);
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
|
||||
|
|
|
@ -21,18 +21,14 @@
|
|||
#include "GeneratedTestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Operand;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::IPreparedModel;
|
||||
using V1_0::Operand;
|
||||
using V1_0::OperandLifeTime;
|
||||
using V1_0::OperandType;
|
||||
using V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
|
@ -52,7 +48,6 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
|
|||
SCOPED_TRACE(message + " [prepareModel_1_1]");
|
||||
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preference, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
|
@ -484,8 +479,9 @@ static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const V1_1:
|
|||
for (int32_t preference : invalidExecutionPreferences) {
|
||||
const std::string message =
|
||||
"mutateExecutionPreferenceTest: preference " + std::to_string(preference);
|
||||
validate(device, message, model, [](Model*) {},
|
||||
static_cast<ExecutionPreference>(preference));
|
||||
validate(
|
||||
device, message, model, [](Model*) {},
|
||||
static_cast<ExecutionPreference>(preference));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -509,9 +505,4 @@ void ValidationTest::validateModel(const V1_1::Model& model) {
|
|||
mutateExecutionPreferenceTest(device, model);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
|
||||
|
|
|
@ -21,17 +21,12 @@
|
|||
#include "GeneratedTestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_1::IPreparedModel;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::IPreparedModel;
|
||||
using V1_0::Request;
|
||||
using V1_0::implementation::ExecutionCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
|
@ -45,7 +40,6 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
|
|||
SCOPED_TRACE(message + " [execute]");
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
@ -83,9 +77,4 @@ void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
|
|||
removeOutputTest(preparedModel, request);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
|
||||
|
|
|
@ -24,16 +24,14 @@
|
|||
|
||||
#include <android-base/logging.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::IPreparedModel;
|
||||
using V1_0::Request;
|
||||
using V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
|
||||
static void createPreparedModel(const sp<IDevice>& device, const Model& model,
|
||||
sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
|
||||
|
@ -50,7 +48,6 @@ static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& mo
|
|||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
|
@ -82,10 +79,6 @@ static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& mo
|
|||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
|
||||
// This has to return a "new" object because it is freed inside
|
||||
// ::testing::AddGlobalTestEnvironment when the gtest is being torn down
|
||||
|
@ -98,14 +91,8 @@ void NeuralnetworksHidlEnvironment::registerTestServices() {
|
|||
}
|
||||
|
||||
// The main test class for NEURALNETWORK HIDL HAL.
|
||||
NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
|
||||
|
||||
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
|
||||
|
||||
void NeuralnetworksHidlTest::SetUp() {
|
||||
::testing::VtsHalHidlTargetTestBase::SetUp();
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
|
||||
#ifdef PRESUBMIT_NOT_VTS
|
||||
const std::string name =
|
||||
|
@ -120,7 +107,6 @@ void NeuralnetworksHidlTest::SetUp() {
|
|||
}
|
||||
|
||||
void NeuralnetworksHidlTest::TearDown() {
|
||||
device = nullptr;
|
||||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
|
@ -146,12 +132,7 @@ TEST_P(ValidationTest, Test) {
|
|||
|
||||
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0 {
|
||||
|
||||
|
|
|
@ -31,28 +31,14 @@
|
|||
|
||||
#include "TestHarness.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
|
||||
using V1_0::DeviceStatus;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::IPreparedModel;
|
||||
using V1_0::Operand;
|
||||
using V1_0::OperandType;
|
||||
using V1_0::Request;
|
||||
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
|
||||
|
||||
// A class for test environment setup
|
||||
class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
|
||||
NeuralnetworksHidlEnvironment();
|
||||
~NeuralnetworksHidlEnvironment() override;
|
||||
NeuralnetworksHidlEnvironment() = default;
|
||||
|
||||
public:
|
||||
public:
|
||||
static NeuralnetworksHidlEnvironment* getInstance();
|
||||
void registerTestServices() override;
|
||||
};
|
||||
|
@ -61,22 +47,17 @@ class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvB
|
|||
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
|
||||
|
||||
public:
|
||||
NeuralnetworksHidlTest();
|
||||
~NeuralnetworksHidlTest() override;
|
||||
public:
|
||||
NeuralnetworksHidlTest() = default;
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
|
||||
protected:
|
||||
sp<IDevice> device;
|
||||
protected:
|
||||
const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0 {
|
||||
|
||||
|
|
|
@ -18,13 +18,10 @@
|
|||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
using V1_0::DeviceStatus;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::PerformanceInfo;
|
||||
|
||||
// create device test
|
||||
|
@ -113,9 +110,4 @@ TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
|
|||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
|
||||
namespace android::hardware::neuralnetworks::V1_2::implementation {
|
||||
|
||||
using V1_0::ErrorStatus;
|
||||
|
||||
constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
|
||||
.timeInDriver = std::numeric_limits<uint64_t>::max()};
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <ftw.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
@ -45,20 +44,12 @@ namespace generated_tests::mobilenet_quantized {
|
|||
const ::test_helper::TestModel& get_test_model();
|
||||
} // namespace generated_tests::mobilenet_quantized
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
using namespace test_helper;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
|
||||
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using implementation::PreparedModelCallback;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_1::ExecutionPreference;
|
||||
|
||||
namespace float32_model {
|
||||
|
||||
|
@ -307,7 +298,7 @@ class CompilationCachingTestBase : public NeuralnetworksHidlTest {
|
|||
}
|
||||
|
||||
// See if the service can handle the model.
|
||||
bool isModelFullySupported(const V1_2::Model& model) {
|
||||
bool isModelFullySupported(const Model& model) {
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedCall = device->getSupportedOperations_1_2(
|
||||
model,
|
||||
|
@ -321,14 +312,13 @@ class CompilationCachingTestBase : public NeuralnetworksHidlTest {
|
|||
return fullySupportsModel;
|
||||
}
|
||||
|
||||
void saveModelToCache(const V1_2::Model& model, const hidl_vec<hidl_handle>& modelCache,
|
||||
void saveModelToCache(const Model& model, const hidl_vec<hidl_handle>& modelCache,
|
||||
const hidl_vec<hidl_handle>& dataCache,
|
||||
sp<IPreparedModel>* preparedModel = nullptr) {
|
||||
if (preparedModel != nullptr) *preparedModel = nullptr;
|
||||
|
||||
// Launch prepare model.
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache,
|
||||
|
@ -340,9 +330,8 @@ class CompilationCachingTestBase : public NeuralnetworksHidlTest {
|
|||
preparedModelCallback->wait();
|
||||
ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE);
|
||||
if (preparedModel != nullptr) {
|
||||
*preparedModel =
|
||||
V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
|
||||
.withDefault(nullptr);
|
||||
*preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
|
||||
.withDefault(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -358,7 +347,7 @@ class CompilationCachingTestBase : public NeuralnetworksHidlTest {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool checkEarlyTermination(const V1_2::Model& model) {
|
||||
bool checkEarlyTermination(const Model& model) {
|
||||
if (!isModelFullySupported(model)) {
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
|
@ -375,7 +364,6 @@ class CompilationCachingTestBase : public NeuralnetworksHidlTest {
|
|||
sp<IPreparedModel>* preparedModel, ErrorStatus* status) {
|
||||
// Launch prepare model from cache.
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModelFromCache(
|
||||
modelCache, dataCache, cacheToken, preparedModelCallback);
|
||||
|
@ -389,7 +377,7 @@ class CompilationCachingTestBase : public NeuralnetworksHidlTest {
|
|||
// Retrieve prepared model.
|
||||
preparedModelCallback->wait();
|
||||
*status = preparedModelCallback->getStatus();
|
||||
*preparedModel = V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
|
||||
*preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
|
||||
.withDefault(nullptr);
|
||||
}
|
||||
|
||||
|
@ -1353,9 +1341,4 @@ TEST_P(CompilationCachingSecurityTest, WrongToken) {
|
|||
INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
|
||||
::testing::Combine(kOperandTypeChoices, ::testing::Range(0U, 10U)));
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
|
|
@ -44,30 +44,17 @@
|
|||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
using namespace test_helper;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DataLocation;
|
||||
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
|
||||
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
|
||||
using ::android::hardware::neuralnetworks::V1_2::Constant;
|
||||
using ::android::hardware::neuralnetworks::V1_2::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_2::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_2::MeasureTiming;
|
||||
using ::android::hardware::neuralnetworks::V1_2::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_2::OutputShape;
|
||||
using ::android::hardware::neuralnetworks::V1_2::Timing;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using hidl::memory::V1_0::IMemory;
|
||||
using implementation::ExecutionCallback;
|
||||
using implementation::PreparedModelCallback;
|
||||
using V1_0::DataLocation;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::OperandLifeTime;
|
||||
using V1_0::Request;
|
||||
using V1_1::ExecutionPreference;
|
||||
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
|
||||
|
||||
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
|
||||
|
@ -447,9 +434,4 @@ INSTANTIATE_GENERATED_TEST(GeneratedTest,
|
|||
INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
|
|
@ -25,12 +25,7 @@
|
|||
#include "TestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
class GeneratedTestBase
|
||||
: public NeuralnetworksHidlTest,
|
||||
|
@ -54,29 +49,24 @@ class GeneratedTestBase
|
|||
// TODO: Clean up the hierarchy for ValidationTest.
|
||||
class ValidationTest : public GeneratedTestBase {
|
||||
protected:
|
||||
void validateEverything(const Model& model, const Request& request);
|
||||
void validateFailure(const Model& model, const Request& request);
|
||||
void validateEverything(const Model& model, const V1_0::Request& request);
|
||||
void validateFailure(const Model& model, const V1_0::Request& request);
|
||||
|
||||
private:
|
||||
void validateModel(const Model& model);
|
||||
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
|
||||
void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request);
|
||||
void validateBurst(const sp<IPreparedModel>& preparedModel, const Request& request);
|
||||
void validateRequest(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
|
||||
void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
|
||||
const V1_0::Request& request);
|
||||
void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
|
||||
};
|
||||
|
||||
Model createModel(const ::test_helper::TestModel& testModel);
|
||||
|
||||
void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
|
||||
sp<V1_2::IPreparedModel>* preparedModel);
|
||||
void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
|
||||
|
||||
void EvaluatePreparedModel(const sp<V1_2::IPreparedModel>& preparedModel,
|
||||
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
|
||||
const ::test_helper::TestModel& testModel, bool testDynamicOutputShape);
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
|
||||
|
|
|
@ -27,17 +27,14 @@
|
|||
|
||||
#include <android-base/logging.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
using ::android::nn::ExecutionBurstController;
|
||||
using ::android::nn::RequestChannelSender;
|
||||
using ::android::nn::ResultChannelReceiver;
|
||||
using ExecutionBurstCallback = ::android::nn::ExecutionBurstController::ExecutionBurstCallback;
|
||||
using nn::ExecutionBurstController;
|
||||
using nn::RequestChannelSender;
|
||||
using nn::ResultChannelReceiver;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::Request;
|
||||
using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback;
|
||||
|
||||
// This constant value represents the length of an FMQ that is large enough to
|
||||
// return a result from a burst execution for all of the generated test cases.
|
||||
|
@ -324,9 +321,4 @@ void ValidationTest::validateBurst(const sp<IPreparedModel>& preparedModel,
|
|||
ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, request));
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
|
|
@ -21,19 +21,12 @@
|
|||
#include "GeneratedTestHarness.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
using implementation::PreparedModelCallback;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::OperandLifeTime;
|
||||
using V1_1::ExecutionPreference;
|
||||
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
@ -54,7 +47,6 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
|
|||
SCOPED_TRACE(message + " [prepareModel_1_2]");
|
||||
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_2(model, preference, hidl_vec<hidl_handle>(),
|
||||
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
|
||||
|
@ -692,8 +684,9 @@ static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model
|
|||
for (int32_t preference : invalidExecutionPreferences) {
|
||||
const std::string message =
|
||||
"mutateExecutionPreferenceTest: preference " + std::to_string(preference);
|
||||
validate(device, message, model, [](Model*) {},
|
||||
static_cast<ExecutionPreference>(preference));
|
||||
validate(
|
||||
device, message, model, [](Model*) {},
|
||||
static_cast<ExecutionPreference>(preference));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -717,9 +710,4 @@ void ValidationTest::validateModel(const Model& model) {
|
|||
mutateExecutionPreferenceTest(device, model);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
|
|
@ -24,14 +24,11 @@
|
|||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using implementation::ExecutionCallback;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::Request;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
|
@ -62,7 +59,6 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
|
|||
SCOPED_TRACE(message + " [execute_1_2]");
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus =
|
||||
preparedModel->execute_1_2(request, measure, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
|
@ -171,9 +167,4 @@ void ValidationTest::validateRequestFailure(const sp<IPreparedModel>& preparedMo
|
|||
ASSERT_TRUE(executeStatus.isOk());
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
|
|
@ -24,15 +24,12 @@
|
|||
|
||||
#include <android-base/logging.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using implementation::PreparedModelCallback;
|
||||
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::Request;
|
||||
using V1_1::ExecutionPreference;
|
||||
|
||||
// internal helper function
|
||||
|
@ -53,7 +50,6 @@ static void createPreparedModel(const sp<IDevice>& device, const Model& model,
|
|||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
|
||||
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
|
||||
|
@ -86,10 +82,6 @@ static void createPreparedModel(const sp<IDevice>& device, const Model& model,
|
|||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
|
||||
// This has to return a "new" object because it is freed inside
|
||||
// ::testing::AddGlobalTestEnvironment when the gtest is being torn down
|
||||
|
@ -102,14 +94,8 @@ void NeuralnetworksHidlEnvironment::registerTestServices() {
|
|||
}
|
||||
|
||||
// The main test class for NEURALNETWORK HIDL HAL.
|
||||
NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
|
||||
|
||||
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
|
||||
|
||||
void NeuralnetworksHidlTest::SetUp() {
|
||||
::testing::VtsHalHidlTargetTestBase::SetUp();
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
|
||||
#ifdef PRESUBMIT_NOT_VTS
|
||||
const std::string name =
|
||||
|
@ -124,7 +110,6 @@ void NeuralnetworksHidlTest::SetUp() {
|
|||
}
|
||||
|
||||
void NeuralnetworksHidlTest::TearDown() {
|
||||
device = nullptr;
|
||||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
|
@ -168,18 +153,12 @@ TEST_P(ValidationTest, Test) {
|
|||
|
||||
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
|
||||
|
||||
sp<IPreparedModel> getPreparedModel_1_2(
|
||||
const sp<V1_2::implementation::PreparedModelCallback>& callback) {
|
||||
sp<IPreparedModel> getPreparedModel_1_2(const sp<implementation::PreparedModelCallback>& callback) {
|
||||
sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
|
||||
return V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
|
||||
return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0 {
|
||||
|
||||
|
|
|
@ -32,23 +32,12 @@
|
|||
#include "1.2/Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
|
||||
using V1_0::DeviceStatus;
|
||||
using V1_0::ErrorStatus;
|
||||
using V1_0::Request;
|
||||
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
|
||||
// A class for test environment setup
|
||||
class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
|
||||
NeuralnetworksHidlEnvironment();
|
||||
~NeuralnetworksHidlEnvironment() override;
|
||||
NeuralnetworksHidlEnvironment() = default;
|
||||
|
||||
public:
|
||||
static NeuralnetworksHidlEnvironment* getInstance();
|
||||
|
@ -60,25 +49,19 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
|
|||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
|
||||
|
||||
public:
|
||||
NeuralnetworksHidlTest();
|
||||
~NeuralnetworksHidlTest() override;
|
||||
NeuralnetworksHidlTest() = default;
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
|
||||
protected:
|
||||
sp<IDevice> device;
|
||||
const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
};
|
||||
|
||||
// Utility function to get PreparedModel from callback and downcast to V1_2.
|
||||
sp<IPreparedModel> getPreparedModel_1_2(
|
||||
const sp<V1_2::implementation::PreparedModelCallback>& callback);
|
||||
sp<IPreparedModel> getPreparedModel_1_2(const sp<implementation::PreparedModelCallback>& callback);
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0 {
|
||||
|
||||
|
|
|
@ -46,8 +46,6 @@
|
|||
|
||||
namespace android::hardware::neuralnetworks::V1_2::implementation {
|
||||
|
||||
using V1_0::ErrorStatus;
|
||||
|
||||
/**
|
||||
* The PreparedModelCallback class is used to receive the error status of
|
||||
* preparing a model as well as the prepared model from a task executing
|
||||
|
@ -87,7 +85,8 @@ class PreparedModelCallback : public IPreparedModelCallback {
|
|||
* @param preparedModel Returned model that has been prepared for execution,
|
||||
* nullptr if the model was unable to be prepared.
|
||||
*/
|
||||
Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
|
||||
Return<void> notify(V1_0::ErrorStatus status,
|
||||
const sp<V1_0::IPreparedModel>& preparedModel) override;
|
||||
|
||||
/**
|
||||
* IPreparedModelCallback::notify_1_2 marks the callback object with the
|
||||
|
@ -112,7 +111,7 @@ class PreparedModelCallback : public IPreparedModelCallback {
|
|||
* @param preparedModel Returned model that has been prepared for execution,
|
||||
* nullptr if the model was unable to be prepared.
|
||||
*/
|
||||
Return<void> notify_1_2(ErrorStatus status,
|
||||
Return<void> notify_1_2(V1_0::ErrorStatus status,
|
||||
const sp<V1_2::IPreparedModel>& preparedModel) override;
|
||||
|
||||
/**
|
||||
|
@ -134,7 +133,7 @@ class PreparedModelCallback : public IPreparedModelCallback {
|
|||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* - INVALID_ARGUMENT if the input model is invalid
|
||||
*/
|
||||
ErrorStatus getStatus() const;
|
||||
V1_0::ErrorStatus getStatus() const;
|
||||
|
||||
/**
|
||||
* Retrieves the model that has been prepared for execution from the
|
||||
|
@ -152,7 +151,7 @@ class PreparedModelCallback : public IPreparedModelCallback {
|
|||
mutable std::mutex mMutex;
|
||||
mutable std::condition_variable mCondition;
|
||||
bool mNotified GUARDED_BY(mMutex) = false;
|
||||
ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
|
||||
V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
|
||||
sp<V1_0::IPreparedModel> mPreparedModel;
|
||||
};
|
||||
|
||||
|
@ -195,7 +194,7 @@ class ExecutionCallback : public IExecutionCallback {
|
|||
* enough to store the resultant values
|
||||
* - INVALID_ARGUMENT if the input request is invalid
|
||||
*/
|
||||
Return<void> notify(ErrorStatus status) override;
|
||||
Return<void> notify(V1_0::ErrorStatus status) override;
|
||||
|
||||
/**
|
||||
* IExecutionCallback::notify_1_2 marks the callback object with the results
|
||||
|
@ -230,11 +229,11 @@ class ExecutionCallback : public IExecutionCallback {
|
|||
* reported as UINT64_MAX. A driver may choose to report any time as
|
||||
* UINT64_MAX, indicating that particular measurement is not available.
|
||||
*/
|
||||
Return<void> notify_1_2(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
|
||||
Return<void> notify_1_2(V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
|
||||
const Timing& timing) override;
|
||||
|
||||
// An overload of the latest notify interface to hide the version from ExecutionBuilder.
|
||||
Return<void> notify(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
|
||||
Return<void> notify(V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
|
||||
const Timing& timing) {
|
||||
return notify_1_2(status, outputShapes, timing);
|
||||
}
|
||||
|
@ -264,7 +263,7 @@ class ExecutionCallback : public IExecutionCallback {
|
|||
* - INVALID_ARGUMENT if one of the input arguments to prepareModel is
|
||||
* invalid
|
||||
*/
|
||||
ErrorStatus getStatus() const;
|
||||
V1_0::ErrorStatus getStatus() const;
|
||||
|
||||
/**
|
||||
* Retrieves the output shapes returned from the asynchronous task launched
|
||||
|
@ -309,14 +308,14 @@ class ExecutionCallback : public IExecutionCallback {
|
|||
* object before any call to wait or get* return. It then enables all prior
|
||||
* and future wait calls on the ExecutionCallback object to proceed.
|
||||
*/
|
||||
void notifyInternal(ErrorStatus errorStatus, const hidl_vec<OutputShape>& outputShapes,
|
||||
void notifyInternal(V1_0::ErrorStatus errorStatus, const hidl_vec<OutputShape>& outputShapes,
|
||||
const Timing& timing);
|
||||
|
||||
// members
|
||||
mutable std::mutex mMutex;
|
||||
mutable std::condition_variable mCondition;
|
||||
bool mNotified GUARDED_BY(mMutex) = false;
|
||||
ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
|
||||
V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
|
||||
std::vector<OutputShape> mOutputShapes = {};
|
||||
Timing mTiming = {};
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue