Implement VTS tests for NNAPI AIDL interface
The tests are copied from HIDL 1.0-3 VTS tests and updated to use AIDL.
Bug: 172922059
Test: VtsHalNeuralnetworksTargetTest
Change-Id: Ife08409e9b46420685a1ccb0b3256286c973dbf5
Merged-In: Ife08409e9b46420685a1ccb0b3256286c973dbf5
(cherry picked from commit b38bb4f12a
)
This commit is contained in:
parent
6b6dfcd439
commit
c185e88ccf
23 changed files with 6543 additions and 24 deletions
|
@ -57,6 +57,7 @@ cc_test {
|
|||
"VtsHalNeuralNetworksV1_0_utils",
|
||||
"VtsHalNeuralNetworksV1_2_utils",
|
||||
"VtsHalNeuralNetworksV1_3_utils",
|
||||
"android.hardware.neuralnetworks-V1-ndk_platform",
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hardware.neuralnetworks@1.2",
|
||||
|
|
|
@ -47,7 +47,10 @@ bool valid(const Type& halObject) {
|
|||
return result.has_value();
|
||||
}
|
||||
|
||||
nn::GeneralResult<Model> copyModel(const Model& model);
|
||||
nn::GeneralResult<Memory> clone(const Memory& memory);
|
||||
nn::GeneralResult<Request> clone(const Request& request);
|
||||
nn::GeneralResult<RequestMemoryPool> clone(const RequestMemoryPool& requestPool);
|
||||
nn::GeneralResult<Model> clone(const Model& model);
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::utils
|
||||
|
||||
|
|
|
@ -19,38 +19,77 @@
|
|||
#include <nnapi/Result.h>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
using ::android::nn::GeneralResult;
|
||||
|
||||
GeneralResult<Model> copyModel(const Model& model) {
|
||||
Model newModel{
|
||||
template <typename Type>
|
||||
nn::GeneralResult<std::vector<Type>> cloneVec(const std::vector<Type>& arguments) {
|
||||
std::vector<Type> clonedObjects;
|
||||
clonedObjects.reserve(arguments.size());
|
||||
for (const auto& argument : arguments) {
|
||||
clonedObjects.push_back(NN_TRY(clone(argument)));
|
||||
}
|
||||
return clonedObjects;
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
GeneralResult<std::vector<Type>> clone(const std::vector<Type>& arguments) {
|
||||
return cloneVec(arguments);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
GeneralResult<Memory> clone(const Memory& memory) {
|
||||
common::NativeHandle nativeHandle;
|
||||
nativeHandle.ints = memory.handle.ints;
|
||||
nativeHandle.fds.reserve(memory.handle.fds.size());
|
||||
for (const auto& fd : memory.handle.fds) {
|
||||
const int newFd = dup(fd.get());
|
||||
if (newFd < 0) {
|
||||
return NN_ERROR() << "Couldn't dup a file descriptor";
|
||||
}
|
||||
nativeHandle.fds.emplace_back(newFd);
|
||||
}
|
||||
return Memory{
|
||||
.handle = std::move(nativeHandle),
|
||||
.size = memory.size,
|
||||
.name = memory.name,
|
||||
};
|
||||
}
|
||||
|
||||
GeneralResult<RequestMemoryPool> clone(const RequestMemoryPool& requestPool) {
|
||||
using Tag = RequestMemoryPool::Tag;
|
||||
switch (requestPool.getTag()) {
|
||||
case Tag::pool:
|
||||
return RequestMemoryPool::make<Tag::pool>(NN_TRY(clone(requestPool.get<Tag::pool>())));
|
||||
case Tag::token:
|
||||
return RequestMemoryPool::make<Tag::token>(requestPool.get<Tag::token>());
|
||||
}
|
||||
// Using explicit type conversion because std::variant inside the RequestMemoryPool confuses the
|
||||
// compiler.
|
||||
return (NN_ERROR() << "Unrecognized request pool tag: " << requestPool.getTag())
|
||||
.
|
||||
operator GeneralResult<RequestMemoryPool>();
|
||||
}
|
||||
|
||||
GeneralResult<Request> clone(const Request& request) {
|
||||
return Request{
|
||||
.inputs = request.inputs,
|
||||
.outputs = request.outputs,
|
||||
.pools = NN_TRY(clone(request.pools)),
|
||||
};
|
||||
}
|
||||
|
||||
GeneralResult<Model> clone(const Model& model) {
|
||||
return Model{
|
||||
.main = model.main,
|
||||
.referenced = model.referenced,
|
||||
.operandValues = model.operandValues,
|
||||
.pools = {},
|
||||
.pools = NN_TRY(clone(model.pools)),
|
||||
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
|
||||
.extensionNameToPrefix = model.extensionNameToPrefix,
|
||||
};
|
||||
newModel.pools.reserve(model.pools.size());
|
||||
for (const auto& pool : model.pools) {
|
||||
common::NativeHandle nativeHandle;
|
||||
nativeHandle.ints = pool.handle.ints;
|
||||
nativeHandle.fds.reserve(pool.handle.fds.size());
|
||||
for (const auto& fd : pool.handle.fds) {
|
||||
const int newFd = dup(fd.get());
|
||||
if (newFd == -1) {
|
||||
return NN_ERROR() << "Couldn't dup a file descriptor.";
|
||||
}
|
||||
nativeHandle.fds.emplace_back(newFd);
|
||||
}
|
||||
Memory memory = {
|
||||
.handle = std::move(nativeHandle),
|
||||
.size = pool.size,
|
||||
.name = pool.name,
|
||||
};
|
||||
newModel.pools.push_back(std::move(memory));
|
||||
}
|
||||
return newModel;
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::utils
|
||||
|
|
12
neuralnetworks/aidl/vts/OWNERS
Normal file
12
neuralnetworks/aidl/vts/OWNERS
Normal file
|
@ -0,0 +1,12 @@
|
|||
# Neuralnetworks team
|
||||
butlermichael@google.com
|
||||
dgross@google.com
|
||||
jeanluc@google.com
|
||||
levp@google.com
|
||||
miaowang@google.com
|
||||
mikie@google.com
|
||||
mks@google.com
|
||||
pszczepaniak@google.com
|
||||
slavash@google.com
|
||||
vddang@google.com
|
||||
xusongw@google.com
|
68
neuralnetworks/aidl/vts/functional/Android.bp
Normal file
68
neuralnetworks/aidl/vts/functional/Android.bp
Normal file
|
@ -0,0 +1,68 @@
|
|||
//
|
||||
// Copyright (C) 2021 The Android Open Source Project
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksTargetTest",
|
||||
defaults: [
|
||||
"neuralnetworks_vts_functional_defaults",
|
||||
"use_libaidlvintf_gtest_helper_static",
|
||||
],
|
||||
srcs: [
|
||||
"BasicTests.cpp",
|
||||
"Callbacks.cpp",
|
||||
"CompilationCachingTests.cpp",
|
||||
"GeneratedTestHarness.cpp",
|
||||
"MemoryDomainTests.cpp",
|
||||
"QualityOfServiceTests.cpp",
|
||||
"TestAssertions.cpp",
|
||||
"TestMain.cpp",
|
||||
"Utils.cpp",
|
||||
"ValidateModel.cpp",
|
||||
"ValidateRequest.cpp",
|
||||
"VtsHalNeuralnetworks.cpp",
|
||||
],
|
||||
shared_libs: [
|
||||
"libbinder_ndk",
|
||||
"libnativewindow",
|
||||
"libvndksupport",
|
||||
],
|
||||
static_libs: [
|
||||
"android.hardware.common-V2-ndk_platform",
|
||||
"android.hardware.neuralnetworks-V1-ndk_platform",
|
||||
"android.hidl.allocator@1.0",
|
||||
"android.hidl.memory@1.0",
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_generated_test_harness",
|
||||
"libneuralnetworks_utils",
|
||||
"libsync",
|
||||
"neuralnetworks_utils_hal_aidl",
|
||||
],
|
||||
whole_static_libs: [
|
||||
"neuralnetworks_generated_V1_0_example",
|
||||
"neuralnetworks_generated_V1_1_example",
|
||||
"neuralnetworks_generated_V1_2_example",
|
||||
"neuralnetworks_generated_V1_3_example",
|
||||
],
|
||||
header_libs: [
|
||||
"libbase_headers",
|
||||
"libneuralnetworks_headers",
|
||||
],
|
||||
test_suites: [
|
||||
"general-tests",
|
||||
"vts",
|
||||
],
|
||||
}
|
33
neuralnetworks/aidl/vts/functional/AndroidTest.xml
Normal file
33
neuralnetworks/aidl/vts/functional/AndroidTest.xml
Normal file
|
@ -0,0 +1,33 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Copyright (C) 2020 The Android Open Source Project
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<configuration description="Runs VtsHalNeuralnetworksTargetTest.">
|
||||
<option name="test-suite-tag" value="apct" />
|
||||
<option name="test-suite-tag" value="apct-native" />
|
||||
|
||||
<target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
|
||||
</target_preparer>
|
||||
|
||||
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
|
||||
<option name="cleanup" value="true" />
|
||||
<option name="push" value="VtsHalNeuralnetworksTargetTest->/data/local/tmp/VtsHalNeuralnetworksTargetTest" />
|
||||
</target_preparer>
|
||||
|
||||
<test class="com.android.tradefed.testtype.GTest" >
|
||||
<option name="native-test-device-path" value="/data/local/tmp" />
|
||||
<option name="module-name" value="VtsHalNeuralnetworksTargetTest" />
|
||||
<option name="native-test-timeout" value="20m" />
|
||||
</test>
|
||||
</configuration>
|
193
neuralnetworks/aidl/vts/functional/BasicTests.cpp
Normal file
193
neuralnetworks/aidl/vts/functional/BasicTests.cpp
Normal file
|
@ -0,0 +1,193 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_aidl_hal_test"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/Capabilities.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IDevice.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Operand.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperandType.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Priority.h>
|
||||
#include <android/binder_interface_utils.h>
|
||||
|
||||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::vts::functional {
|
||||
|
||||
using implementation::PreparedModelCallback;
|
||||
|
||||
// create device test
|
||||
TEST_P(NeuralNetworksAidlTest, CreateDevice) {}
|
||||
|
||||
// initialization
|
||||
TEST_P(NeuralNetworksAidlTest, GetCapabilitiesTest) {
|
||||
Capabilities capabilities;
|
||||
const auto retStatus = kDevice->getCapabilities(&capabilities);
|
||||
ASSERT_TRUE(retStatus.isOk());
|
||||
|
||||
auto isPositive = [](const PerformanceInfo& perf) {
|
||||
return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
|
||||
};
|
||||
|
||||
EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
|
||||
EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
|
||||
const auto& opPerf = capabilities.operandPerformance;
|
||||
EXPECT_TRUE(
|
||||
std::all_of(opPerf.begin(), opPerf.end(),
|
||||
[isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
|
||||
EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
|
||||
[](const OperandPerformance& a, const OperandPerformance& b) {
|
||||
return a.type < b.type;
|
||||
}));
|
||||
EXPECT_TRUE(std::all_of(opPerf.begin(), opPerf.end(), [](const OperandPerformance& a) {
|
||||
return a.type != OperandType::SUBGRAPH;
|
||||
}));
|
||||
EXPECT_TRUE(isPositive(capabilities.ifPerformance));
|
||||
EXPECT_TRUE(isPositive(capabilities.whilePerformance));
|
||||
}
|
||||
|
||||
// detect cycle
|
||||
TEST_P(NeuralNetworksAidlTest, CycleTest) {
|
||||
// opnd0 = TENSOR_FLOAT32 // model input
|
||||
// opnd1 = TENSOR_FLOAT32 // model input
|
||||
// opnd2 = INT32 // model input
|
||||
// opnd3 = ADD(opnd0, opnd4, opnd2)
|
||||
// opnd4 = ADD(opnd1, opnd3, opnd2)
|
||||
// opnd5 = ADD(opnd4, opnd0, opnd2) // model output
|
||||
//
|
||||
// +-----+
|
||||
// | |
|
||||
// v |
|
||||
// 3 = ADD(0, 4, 2) |
|
||||
// | |
|
||||
// +----------+ |
|
||||
// | |
|
||||
// v |
|
||||
// 4 = ADD(1, 3, 2) |
|
||||
// | |
|
||||
// +----------------+
|
||||
// |
|
||||
// |
|
||||
// +-------+
|
||||
// |
|
||||
// v
|
||||
// 5 = ADD(4, 0, 2)
|
||||
|
||||
const std::vector<Operand> operands = {
|
||||
{
|
||||
// operands[0]
|
||||
.type = OperandType::TENSOR_FLOAT32,
|
||||
.dimensions = {1},
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::SUBGRAPH_INPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
},
|
||||
{
|
||||
// operands[1]
|
||||
.type = OperandType::TENSOR_FLOAT32,
|
||||
.dimensions = {1},
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::SUBGRAPH_INPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
},
|
||||
{
|
||||
// operands[2]
|
||||
.type = OperandType::INT32,
|
||||
.dimensions = {},
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::SUBGRAPH_INPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
},
|
||||
{
|
||||
// operands[3]
|
||||
.type = OperandType::TENSOR_FLOAT32,
|
||||
.dimensions = {1},
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
},
|
||||
{
|
||||
// operands[4]
|
||||
.type = OperandType::TENSOR_FLOAT32,
|
||||
.dimensions = {1},
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
},
|
||||
{
|
||||
// operands[5]
|
||||
.type = OperandType::TENSOR_FLOAT32,
|
||||
.dimensions = {1},
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::SUBGRAPH_OUTPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
},
|
||||
};
|
||||
|
||||
const std::vector<Operation> operations = {
|
||||
{.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
|
||||
{.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
|
||||
{.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
|
||||
};
|
||||
|
||||
Subgraph subgraph = {
|
||||
.operands = operands,
|
||||
.operations = operations,
|
||||
.inputIndexes = {0, 1, 2},
|
||||
.outputIndexes = {5},
|
||||
};
|
||||
const Model model = {
|
||||
.main = std::move(subgraph),
|
||||
.referenced = {},
|
||||
.operandValues = {},
|
||||
.pools = {},
|
||||
};
|
||||
|
||||
// ensure that getSupportedOperations() checks model validity
|
||||
std::vector<bool> supportedOps;
|
||||
const auto supportedOpsStatus = kDevice->getSupportedOperations(model, &supportedOps);
|
||||
ASSERT_FALSE(supportedOpsStatus.isOk());
|
||||
ASSERT_EQ(supportedOpsStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||
ASSERT_EQ(static_cast<ErrorStatus>(supportedOpsStatus.getServiceSpecificError()),
|
||||
ErrorStatus::INVALID_ARGUMENT);
|
||||
|
||||
// ensure that prepareModel() checks model validity
|
||||
auto preparedModelCallback = ndk::SharedRefBase::make<PreparedModelCallback>();
|
||||
auto prepareLaunchStatus =
|
||||
kDevice->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority,
|
||||
kNoDeadline, {}, {}, kEmptyCacheToken, preparedModelCallback);
|
||||
// Note that preparation can fail for reasons other than an
|
||||
// invalid model (invalid model should result in
|
||||
// INVALID_ARGUMENT) -- for example, perhaps not all
|
||||
// operations are supported, or perhaps the device hit some
|
||||
// kind of capacity limit.
|
||||
ASSERT_FALSE(prepareLaunchStatus.isOk());
|
||||
EXPECT_EQ(prepareLaunchStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||
EXPECT_NE(static_cast<ErrorStatus>(prepareLaunchStatus.getServiceSpecificError()),
|
||||
ErrorStatus::NONE);
|
||||
|
||||
EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
|
||||
EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::vts::functional
|
59
neuralnetworks/aidl/vts/functional/Callbacks.cpp
Normal file
59
neuralnetworks/aidl/vts/functional/Callbacks.cpp
Normal file
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "Callbacks"
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <limits>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::implementation {
|
||||
|
||||
ndk::ScopedAStatus PreparedModelCallback::notify(
|
||||
ErrorStatus errorStatus, const std::shared_ptr<IPreparedModel>& preparedModel) {
|
||||
{
|
||||
std::lock_guard<std::mutex> hold(mMutex);
|
||||
// quick-return if object has already been notified
|
||||
if (mNotified) {
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
// store results and mark as notified
|
||||
mErrorStatus = errorStatus;
|
||||
mPreparedModel = preparedModel;
|
||||
mNotified = true;
|
||||
}
|
||||
mCondition.notify_all();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
void PreparedModelCallback::wait() const {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
mCondition.wait(lock, [this] { return mNotified; });
|
||||
}
|
||||
|
||||
ErrorStatus PreparedModelCallback::getStatus() const {
|
||||
wait();
|
||||
return mErrorStatus;
|
||||
}
|
||||
|
||||
std::shared_ptr<IPreparedModel> PreparedModelCallback::getPreparedModel() const {
|
||||
wait();
|
||||
return mPreparedModel;
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::implementation
|
131
neuralnetworks/aidl/vts/functional/Callbacks.h
Normal file
131
neuralnetworks/aidl/vts/functional/Callbacks.h
Normal file
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_AIDL_CALLBACKS_H
|
||||
#define ANDROID_HARDWARE_NEURALNETWORKS_AIDL_CALLBACKS_H
|
||||
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnPreparedModelCallback.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
|
||||
|
||||
/*
|
||||
* The Callback classes are used internally by the NeuralNetworks runtime to
|
||||
* synchronize between different threads. An asynchronous task is launched
|
||||
* paired with a callback object. When a client thread requires the output being
|
||||
* generated by the asynchronous task, the client thread can wait for the result
|
||||
* and be blocked until it has completed. Any wait may safely be called
|
||||
* concurrently, even on the same callback object. When the asynchronous task
|
||||
* has finished its workload, it must immediately call "notify". If the
|
||||
* asynchronous task has failed to launch, the function that tried to launch the
|
||||
* asynchronous task must immediately call "notify". This "notify" call
|
||||
* awakens any client threads waiting on the callback object.
|
||||
*
|
||||
* These classes exist to enable synchronization across AIDL. When
|
||||
* synchronization is only required in the same process, consider using
|
||||
* std::future, std::mutex, std::condition_variable, or std::experimental::latch
|
||||
* instead.
|
||||
*/
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::implementation {
|
||||
|
||||
/**
|
||||
* The PreparedModelCallback class is used to receive the error status of
|
||||
* preparing a model as well as the prepared model from a task executing
|
||||
* asynchronously with respect to the runtime. If a calling thread calls wait
|
||||
* or get* on a PreparedModelCallback object and the corresponding asynchronous
|
||||
* task has not finished preparing the model, the calling thread will block
|
||||
* until the asynchronous task has called notify.
|
||||
*
|
||||
* If the callback object is notified more than once, only the results of the
|
||||
* first call to notify are used, and the results from subsequent calls are
|
||||
* discarded.
|
||||
*
|
||||
* This callback object is passed as an argument to IDevice::prepareModel*.
|
||||
*/
|
||||
class PreparedModelCallback : public BnPreparedModelCallback {
|
||||
public:
|
||||
/**
|
||||
* IPreparedModelCallback::notify marks the callback object with the return
|
||||
* status of the asynchronous model preparation along with the prepared
|
||||
* model, and allows all prior and future wait calls on the
|
||||
* PreparedModelCallback object to proceed.
|
||||
*
|
||||
* IPreparedModelCallback::notify must be called on a given PreparedModelCallback object.
|
||||
*
|
||||
* If the callback object is notified more than once, only the results of
|
||||
* the first call to notify are used, and the results from subsequent calls
|
||||
* are discarded.
|
||||
*
|
||||
* @param status Error status returned from asynchronously preparing the
|
||||
* model; will be:
|
||||
* - NONE if the asynchronous preparation was successful
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* - INVALID_ARGUMENT if the input model is invalid
|
||||
* @param preparedModel Returned model that has been prepared for execution,
|
||||
* nullptr if the model was unable to be prepared.
|
||||
*/
|
||||
ndk::ScopedAStatus notify(ErrorStatus status,
|
||||
const std::shared_ptr<IPreparedModel>& preparedModel) override;
|
||||
|
||||
/**
|
||||
* PreparedModelCallback::wait blocks until notify has been called on the
|
||||
* callback object.
|
||||
*/
|
||||
void wait() const;
|
||||
|
||||
/**
|
||||
* Retrieves the error status returned from the asynchronous task launched
|
||||
* by IDevice::prepareModel*. If IDevice::prepareModel* has not finished
|
||||
* asynchronously preparing the model, this call will block until the
|
||||
* asynchronous task notifies the object.
|
||||
*
|
||||
* @return status Error status returned from asynchronously preparing the
|
||||
* model; will be:
|
||||
* - NONE if the asynchronous preparation was successful
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* - INVALID_ARGUMENT if the input model is invalid
|
||||
*/
|
||||
ErrorStatus getStatus() const;
|
||||
|
||||
/**
|
||||
* Retrieves the model that has been prepared for execution from the
|
||||
* asynchronous task launched by IDevice::prepareModel*. If
|
||||
* IDevice::prepareModel* has not finished asynchronously preparing the
|
||||
* model, this call will block until the asynchronous task notifies the
|
||||
* object.
|
||||
*
|
||||
* @return preparedModel Returned model that has been prepared for
|
||||
* execution, nullptr if the model was unable to be prepared.
|
||||
*/
|
||||
std::shared_ptr<IPreparedModel> getPreparedModel() const;
|
||||
|
||||
private:
|
||||
mutable std::mutex mMutex;
|
||||
mutable std::condition_variable mCondition;
|
||||
bool mNotified GUARDED_BY(mMutex) = false;
|
||||
ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
|
||||
std::shared_ptr<IPreparedModel> mPreparedModel;
|
||||
};
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::implementation
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_AIDL_CALLBACKS_H
|
1177
neuralnetworks/aidl/vts/functional/CompilationCachingTests.cpp
Normal file
1177
neuralnetworks/aidl/vts/functional/CompilationCachingTests.cpp
Normal file
File diff suppressed because it is too large
Load diff
925
neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
Normal file
925
neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
Normal file
|
@ -0,0 +1,925 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "GeneratedTestHarness.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
|
||||
#include <android-base/logging.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <android/sync.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <iterator>
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
|
||||
#include <MemoryUtils.h>
|
||||
#include <android/binder_status.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/SharedMemory.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
#include <nnapi/hal/aidl/Utils.h>
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::vts::functional {
|
||||
|
||||
namespace nn = ::android::nn;
|
||||
using namespace test_helper;
|
||||
using implementation::PreparedModelCallback;
|
||||
|
||||
namespace {
|
||||
|
||||
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE };
|
||||
|
||||
struct TestConfig {
|
||||
Executor executor;
|
||||
bool measureTiming;
|
||||
OutputType outputType;
|
||||
MemoryType memoryType;
|
||||
// `reportSkipping` indicates if a test should print an info message in case
|
||||
// it is skipped. The field is set to true by default and is set to false in
|
||||
// quantization coupling tests to suppress skipping a test
|
||||
bool reportSkipping;
|
||||
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType)
|
||||
: executor(executor),
|
||||
measureTiming(measureTiming),
|
||||
outputType(outputType),
|
||||
memoryType(memoryType),
|
||||
reportSkipping(true) {}
|
||||
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
|
||||
bool reportSkipping)
|
||||
: executor(executor),
|
||||
measureTiming(measureTiming),
|
||||
outputType(outputType),
|
||||
memoryType(memoryType),
|
||||
reportSkipping(reportSkipping) {}
|
||||
};
|
||||
|
||||
enum class IOType { INPUT, OUTPUT };
|
||||
|
||||
class DeviceMemoryAllocator {
|
||||
public:
|
||||
DeviceMemoryAllocator(const std::shared_ptr<IDevice>& device,
|
||||
const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const TestModel& testModel)
|
||||
: kDevice(device), kPreparedModel(preparedModel), kTestModel(testModel) {}
|
||||
|
||||
// Allocate device memory for a target input/output operand.
|
||||
// Return {IBuffer object, token} if successful.
|
||||
// Return {nullptr, 0} if device memory is not supported.
|
||||
template <IOType ioType>
|
||||
std::pair<std::shared_ptr<IBuffer>, int32_t> allocate(uint32_t index) {
|
||||
std::pair<std::shared_ptr<IBuffer>, int32_t> buffer;
|
||||
allocateInternal<ioType>(index, &buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private:
|
||||
template <IOType ioType>
|
||||
void allocateInternal(int32_t index, std::pair<std::shared_ptr<IBuffer>, int32_t>* result) {
|
||||
ASSERT_NE(result, nullptr);
|
||||
|
||||
// Prepare arguments.
|
||||
BufferRole role = {.modelIndex = 0, .ioIndex = index, .frequency = 1.0f};
|
||||
std::vector<BufferRole> inputRoles, outputRoles;
|
||||
if constexpr (ioType == IOType::INPUT) {
|
||||
inputRoles = {role};
|
||||
} else {
|
||||
outputRoles = {role};
|
||||
}
|
||||
|
||||
// Allocate device memory.
|
||||
DeviceBuffer buffer;
|
||||
IPreparedModelParcel parcel;
|
||||
parcel.preparedModel = kPreparedModel;
|
||||
const auto ret = kDevice->allocate({}, {parcel}, inputRoles, outputRoles, &buffer);
|
||||
|
||||
// Check allocation results.
|
||||
if (ret.isOk()) {
|
||||
ASSERT_NE(buffer.buffer, nullptr);
|
||||
ASSERT_GT(buffer.token, 0);
|
||||
} else {
|
||||
ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||
ASSERT_EQ(static_cast<ErrorStatus>(ret.getServiceSpecificError()),
|
||||
ErrorStatus::GENERAL_FAILURE);
|
||||
buffer.buffer = nullptr;
|
||||
buffer.token = 0;
|
||||
}
|
||||
|
||||
// Initialize input data from TestBuffer.
|
||||
if constexpr (ioType == IOType::INPUT) {
|
||||
if (buffer.buffer != nullptr) {
|
||||
// TestBuffer -> Shared memory.
|
||||
const auto& testBuffer =
|
||||
kTestModel.main.operands[kTestModel.main.inputIndexes[index]].data;
|
||||
ASSERT_GT(testBuffer.size(), 0);
|
||||
const auto sharedMemory = nn::createSharedMemory(testBuffer.size()).value();
|
||||
const auto memory = utils::convert(sharedMemory).value();
|
||||
const auto mapping = nn::map(sharedMemory).value();
|
||||
uint8_t* inputPtr = static_cast<uint8_t*>(std::get<void*>(mapping.pointer));
|
||||
ASSERT_NE(inputPtr, nullptr);
|
||||
const uint8_t* begin = testBuffer.get<uint8_t>();
|
||||
const uint8_t* end = begin + testBuffer.size();
|
||||
std::copy(begin, end, inputPtr);
|
||||
|
||||
// Shared memory -> IBuffer.
|
||||
auto ret = buffer.buffer->copyFrom(memory, {});
|
||||
ASSERT_TRUE(ret.isOk());
|
||||
}
|
||||
}
|
||||
*result = {std::move(buffer.buffer), buffer.token};
|
||||
}
|
||||
|
||||
const std::shared_ptr<IDevice> kDevice;
|
||||
const std::shared_ptr<IPreparedModel> kPreparedModel;
|
||||
const TestModel& kTestModel;
|
||||
};
|
||||
|
||||
Subgraph createSubgraph(const TestSubgraph& testSubgraph, uint32_t* constCopySize,
|
||||
std::vector<const TestBuffer*>* constCopies, uint32_t* constRefSize,
|
||||
std::vector<const TestBuffer*>* constReferences) {
|
||||
CHECK(constCopySize != nullptr);
|
||||
CHECK(constCopies != nullptr);
|
||||
CHECK(constRefSize != nullptr);
|
||||
CHECK(constReferences != nullptr);
|
||||
|
||||
// Operands.
|
||||
std::vector<Operand> operands(testSubgraph.operands.size());
|
||||
for (uint32_t i = 0; i < testSubgraph.operands.size(); i++) {
|
||||
const auto& op = testSubgraph.operands[i];
|
||||
|
||||
DataLocation loc = {};
|
||||
if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
|
||||
loc = {
|
||||
.poolIndex = 0,
|
||||
.offset = *constCopySize,
|
||||
.length = static_cast<int64_t>(op.data.size()),
|
||||
};
|
||||
constCopies->push_back(&op.data);
|
||||
*constCopySize += op.data.alignedSize();
|
||||
} else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
|
||||
loc = {
|
||||
.poolIndex = 0,
|
||||
.offset = *constRefSize,
|
||||
.length = static_cast<int64_t>(op.data.size()),
|
||||
};
|
||||
constReferences->push_back(&op.data);
|
||||
*constRefSize += op.data.alignedSize();
|
||||
} else if (op.lifetime == TestOperandLifeTime::SUBGRAPH) {
|
||||
loc = {
|
||||
.poolIndex = 0,
|
||||
.offset = *op.data.get<uint32_t>(),
|
||||
.length = 0,
|
||||
};
|
||||
}
|
||||
|
||||
std::optional<OperandExtraParams> extraParams;
|
||||
if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
|
||||
using Tag = OperandExtraParams::Tag;
|
||||
extraParams = OperandExtraParams::make<Tag::channelQuant>(SymmPerChannelQuantParams{
|
||||
.scales = op.channelQuant.scales,
|
||||
.channelDim = static_cast<int32_t>(op.channelQuant.channelDim)});
|
||||
}
|
||||
|
||||
operands[i] = {.type = static_cast<OperandType>(op.type),
|
||||
.dimensions = utils::toSigned(op.dimensions).value(),
|
||||
.scale = op.scale,
|
||||
.zeroPoint = op.zeroPoint,
|
||||
.lifetime = static_cast<OperandLifeTime>(op.lifetime),
|
||||
.location = loc,
|
||||
.extraParams = std::move(extraParams)};
|
||||
}
|
||||
|
||||
// Operations.
|
||||
std::vector<Operation> operations(testSubgraph.operations.size());
|
||||
std::transform(testSubgraph.operations.begin(), testSubgraph.operations.end(),
|
||||
operations.begin(), [](const TestOperation& op) -> Operation {
|
||||
return {.type = static_cast<OperationType>(op.type),
|
||||
.inputs = utils::toSigned(op.inputs).value(),
|
||||
.outputs = utils::toSigned(op.outputs).value()};
|
||||
});
|
||||
|
||||
return {.operands = std::move(operands),
|
||||
.operations = std::move(operations),
|
||||
.inputIndexes = utils::toSigned(testSubgraph.inputIndexes).value(),
|
||||
.outputIndexes = utils::toSigned(testSubgraph.outputIndexes).value()};
|
||||
}
|
||||
|
||||
void copyTestBuffers(const std::vector<const TestBuffer*>& buffers, uint8_t* output) {
|
||||
uint32_t offset = 0;
|
||||
for (const TestBuffer* buffer : buffers) {
|
||||
const uint8_t* begin = buffer->get<uint8_t>();
|
||||
const uint8_t* end = begin + buffer->size();
|
||||
std::copy(begin, end, output + offset);
|
||||
offset += buffer->alignedSize();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void waitForSyncFence(int syncFd) {
|
||||
constexpr int kInfiniteTimeout = -1;
|
||||
ASSERT_GT(syncFd, 0);
|
||||
int r = sync_wait(syncFd, kInfiniteTimeout);
|
||||
ASSERT_GE(r, 0);
|
||||
}
|
||||
|
||||
Model createModel(const TestModel& testModel) {
|
||||
uint32_t constCopySize = 0;
|
||||
uint32_t constRefSize = 0;
|
||||
std::vector<const TestBuffer*> constCopies;
|
||||
std::vector<const TestBuffer*> constReferences;
|
||||
|
||||
Subgraph mainSubgraph = createSubgraph(testModel.main, &constCopySize, &constCopies,
|
||||
&constRefSize, &constReferences);
|
||||
std::vector<Subgraph> refSubgraphs(testModel.referenced.size());
|
||||
std::transform(testModel.referenced.begin(), testModel.referenced.end(), refSubgraphs.begin(),
|
||||
[&constCopySize, &constCopies, &constRefSize,
|
||||
&constReferences](const TestSubgraph& testSubgraph) {
|
||||
return createSubgraph(testSubgraph, &constCopySize, &constCopies,
|
||||
&constRefSize, &constReferences);
|
||||
});
|
||||
|
||||
// Constant copies.
|
||||
std::vector<uint8_t> operandValues(constCopySize);
|
||||
copyTestBuffers(constCopies, operandValues.data());
|
||||
|
||||
// Shared memory.
|
||||
std::vector<nn::Memory> pools = {};
|
||||
if (constRefSize > 0) {
|
||||
const auto pool = nn::createSharedMemory(constRefSize).value();
|
||||
pools.push_back(pool);
|
||||
|
||||
// load data
|
||||
const auto mappedMemory = nn::map(pool).value();
|
||||
uint8_t* mappedPtr = static_cast<uint8_t*>(std::get<void*>(mappedMemory.pointer));
|
||||
CHECK(mappedPtr != nullptr);
|
||||
|
||||
copyTestBuffers(constReferences, mappedPtr);
|
||||
}
|
||||
|
||||
std::vector<Memory> aidlPools;
|
||||
aidlPools.reserve(pools.size());
|
||||
for (auto& pool : pools) {
|
||||
auto aidlPool = utils::convert(pool).value();
|
||||
aidlPools.push_back(std::move(aidlPool));
|
||||
}
|
||||
|
||||
return {.main = std::move(mainSubgraph),
|
||||
.referenced = std::move(refSubgraphs),
|
||||
.operandValues = std::move(operandValues),
|
||||
.pools = std::move(aidlPools),
|
||||
.relaxComputationFloat32toFloat16 = testModel.isRelaxed};
|
||||
}
|
||||
|
||||
static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
|
||||
const auto byteSize = testModel.main.operands[testModel.main.outputIndexes[index]].data.size();
|
||||
return byteSize > 1u;
|
||||
}
|
||||
|
||||
static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
|
||||
auto& length = request->outputs[outputIndex].location.length;
|
||||
ASSERT_GT(length, 1u);
|
||||
length -= 1u;
|
||||
}
|
||||
|
||||
static void makeOutputDimensionsUnspecified(Model* model) {
|
||||
for (auto i : model->main.outputIndexes) {
|
||||
auto& dims = model->main.operands[i].dimensions;
|
||||
std::fill(dims.begin(), dims.end(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Manages the lifetime of memory resources used in an execution.
|
||||
class ExecutionContext {
|
||||
public:
|
||||
ExecutionContext(std::shared_ptr<IDevice> device, std::shared_ptr<IPreparedModel> preparedModel)
|
||||
: kDevice(std::move(device)), kPreparedModel(std::move(preparedModel)) {}
|
||||
|
||||
std::optional<Request> createRequest(const TestModel& testModel, MemoryType memoryType);
|
||||
std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel,
|
||||
const Request& request) const;
|
||||
|
||||
private:
|
||||
// Get a TestBuffer with data copied from an IBuffer object.
|
||||
void getBuffer(const std::shared_ptr<IBuffer>& buffer, size_t size,
|
||||
TestBuffer* testBuffer) const;
|
||||
|
||||
static constexpr uint32_t kInputPoolIndex = 0;
|
||||
static constexpr uint32_t kOutputPoolIndex = 1;
|
||||
static constexpr uint32_t kDeviceMemoryBeginIndex = 2;
|
||||
|
||||
const std::shared_ptr<IDevice> kDevice;
|
||||
const std::shared_ptr<IPreparedModel> kPreparedModel;
|
||||
std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
|
||||
std::vector<std::shared_ptr<IBuffer>> mBuffers;
|
||||
};
|
||||
|
||||
std::optional<Request> ExecutionContext::createRequest(const TestModel& testModel,
|
||||
MemoryType memoryType) {
|
||||
// Memory pools are organized as:
|
||||
// - 0: Input shared memory pool
|
||||
// - 1: Output shared memory pool
|
||||
// - [2, 2+i): Input device memories
|
||||
// - [2+i, 2+i+o): Output device memories
|
||||
DeviceMemoryAllocator allocator(kDevice, kPreparedModel, testModel);
|
||||
std::vector<int32_t> tokens;
|
||||
mBuffers.clear();
|
||||
|
||||
// Model inputs.
|
||||
std::vector<RequestArgument> inputs(testModel.main.inputIndexes.size());
|
||||
size_t inputSize = 0;
|
||||
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
|
||||
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
|
||||
if (op.data.size() == 0) {
|
||||
// Omitted input.
|
||||
inputs[i] = {.hasNoValue = true};
|
||||
continue;
|
||||
} else if (memoryType == MemoryType::DEVICE) {
|
||||
SCOPED_TRACE("Input index = " + std::to_string(i));
|
||||
auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
|
||||
if (buffer != nullptr) {
|
||||
DataLocation loc = {.poolIndex = static_cast<int32_t>(mBuffers.size() +
|
||||
kDeviceMemoryBeginIndex)};
|
||||
mBuffers.push_back(std::move(buffer));
|
||||
tokens.push_back(token);
|
||||
inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve shared memory for input.
|
||||
DataLocation loc = {.poolIndex = kInputPoolIndex,
|
||||
.offset = static_cast<int64_t>(inputSize),
|
||||
.length = static_cast<int64_t>(op.data.size())};
|
||||
inputSize += op.data.alignedSize();
|
||||
inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
|
||||
}
|
||||
|
||||
// Model outputs.
|
||||
std::vector<RequestArgument> outputs(testModel.main.outputIndexes.size());
|
||||
size_t outputSize = 0;
|
||||
for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
|
||||
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
|
||||
if (memoryType == MemoryType::DEVICE) {
|
||||
SCOPED_TRACE("Output index = " + std::to_string(i));
|
||||
auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
|
||||
if (buffer != nullptr) {
|
||||
DataLocation loc = {.poolIndex = static_cast<int32_t>(mBuffers.size() +
|
||||
kDeviceMemoryBeginIndex)};
|
||||
mBuffers.push_back(std::move(buffer));
|
||||
tokens.push_back(token);
|
||||
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// In the case of zero-sized output, we should at least provide a one-byte buffer.
|
||||
// This is because zero-sized tensors are only supported internally to the driver, or
|
||||
// reported in output shapes. It is illegal for the client to pre-specify a zero-sized
|
||||
// tensor as model output. Otherwise, we will have two semantic conflicts:
|
||||
// - "Zero dimension" conflicts with "unspecified dimension".
|
||||
// - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
|
||||
size_t bufferSize = std::max<size_t>(op.data.size(), 1);
|
||||
|
||||
// Reserve shared memory for output.
|
||||
DataLocation loc = {.poolIndex = kOutputPoolIndex,
|
||||
.offset = static_cast<int64_t>(outputSize),
|
||||
.length = static_cast<int64_t>(bufferSize)};
|
||||
outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize();
|
||||
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
|
||||
}
|
||||
|
||||
if (memoryType == MemoryType::DEVICE && mBuffers.empty()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Memory pools.
|
||||
if (memoryType == MemoryType::BLOB_AHWB) {
|
||||
mInputMemory = TestBlobAHWB::create(std::max<size_t>(inputSize, 1));
|
||||
mOutputMemory = TestBlobAHWB::create(std::max<size_t>(outputSize, 1));
|
||||
} else {
|
||||
mInputMemory = TestAshmem::create(std::max<size_t>(inputSize, 1));
|
||||
mOutputMemory = TestAshmem::create(std::max<size_t>(outputSize, 1));
|
||||
}
|
||||
CHECK_NE(mInputMemory, nullptr);
|
||||
CHECK_NE(mOutputMemory, nullptr);
|
||||
std::vector<RequestMemoryPool> pools;
|
||||
pools.reserve(kDeviceMemoryBeginIndex + mBuffers.size());
|
||||
|
||||
auto copiedInputMemory = utils::clone(*mInputMemory->getAidlMemory());
|
||||
CHECK(copiedInputMemory.has_value()) << copiedInputMemory.error().message;
|
||||
auto copiedOutputMemory = utils::clone(*mOutputMemory->getAidlMemory());
|
||||
CHECK(copiedOutputMemory.has_value()) << copiedOutputMemory.error().message;
|
||||
|
||||
pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
|
||||
std::move(copiedInputMemory).value()));
|
||||
pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
|
||||
std::move(copiedOutputMemory).value()));
|
||||
for (const auto& token : tokens) {
|
||||
pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::token>(token));
|
||||
}
|
||||
|
||||
// Copy input data to the input shared memory pool.
|
||||
uint8_t* inputPtr = mInputMemory->getPointer();
|
||||
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
|
||||
if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
|
||||
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
|
||||
const uint8_t* begin = op.data.get<uint8_t>();
|
||||
const uint8_t* end = begin + op.data.size();
|
||||
std::copy(begin, end, inputPtr + inputs[i].location.offset);
|
||||
}
|
||||
}
|
||||
return Request{
|
||||
.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
|
||||
}
|
||||
|
||||
std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const TestModel& testModel,
|
||||
const Request& request) const {
|
||||
// Copy out output results.
|
||||
uint8_t* outputPtr = mOutputMemory->getPointer();
|
||||
std::vector<TestBuffer> outputBuffers;
|
||||
for (uint32_t i = 0; i < request.outputs.size(); i++) {
|
||||
const auto& outputLoc = request.outputs[i].location;
|
||||
if (outputLoc.poolIndex == kOutputPoolIndex) {
|
||||
outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
|
||||
} else {
|
||||
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
|
||||
if (op.data.size() == 0) {
|
||||
outputBuffers.emplace_back(0, nullptr);
|
||||
} else {
|
||||
SCOPED_TRACE("Output index = " + std::to_string(i));
|
||||
const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
|
||||
TestBuffer buffer;
|
||||
getBuffer(mBuffers[bufferIndex], op.data.size(), &buffer);
|
||||
outputBuffers.push_back(std::move(buffer));
|
||||
}
|
||||
}
|
||||
}
|
||||
return outputBuffers;
|
||||
}
|
||||
|
||||
// Get a TestBuffer with data copied from an IBuffer object.
|
||||
void ExecutionContext::getBuffer(const std::shared_ptr<IBuffer>& buffer, size_t size,
|
||||
TestBuffer* testBuffer) const {
|
||||
// IBuffer -> Shared memory.
|
||||
auto sharedMemory = nn::createSharedMemory(size).value();
|
||||
auto aidlMemory = utils::convert(sharedMemory).value();
|
||||
const auto ret = buffer->copyTo(aidlMemory);
|
||||
ASSERT_TRUE(ret.isOk());
|
||||
|
||||
// Shared memory -> TestBuffer.
|
||||
const auto outputMemory = nn::map(sharedMemory).value();
|
||||
const uint8_t* outputPtr = std::visit(
|
||||
[](auto* ptr) { return static_cast<const uint8_t*>(ptr); }, outputMemory.pointer);
|
||||
ASSERT_NE(outputPtr, nullptr);
|
||||
ASSERT_NE(testBuffer, nullptr);
|
||||
*testBuffer = TestBuffer(size, outputPtr);
|
||||
}
|
||||
|
||||
static bool hasZeroSizedOutput(const TestModel& testModel) {
|
||||
return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
|
||||
[&testModel](uint32_t index) {
|
||||
return testModel.main.operands[index].data.size() == 0;
|
||||
});
|
||||
}
|
||||
|
||||
void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
|
||||
const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const TestModel& testModel, const TestConfig& testConfig,
|
||||
bool* skipped = nullptr) {
|
||||
if (skipped != nullptr) {
|
||||
*skipped = false;
|
||||
}
|
||||
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
|
||||
if (testConfig.outputType == OutputType::INSUFFICIENT &&
|
||||
!isOutputSizeGreaterThanOne(testModel, 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
ExecutionContext context(device, preparedModel);
|
||||
auto maybeRequest = context.createRequest(testModel, testConfig.memoryType);
|
||||
// Skip if testing memory domain but no device memory has been allocated.
|
||||
if (!maybeRequest.has_value()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Request request = std::move(maybeRequest).value();
|
||||
|
||||
constexpr uint32_t kInsufficientOutputIndex = 0;
|
||||
if (testConfig.outputType == OutputType::INSUFFICIENT) {
|
||||
makeOutputInsufficientSize(kInsufficientOutputIndex, &request);
|
||||
}
|
||||
|
||||
int64_t loopTimeoutDuration = kOmittedTimeoutDuration;
|
||||
// OutputType::MISSED_DEADLINE is only used by
|
||||
// TestKind::INTINITE_LOOP_TIMEOUT tests to verify that an infinite loop is
|
||||
// aborted after a timeout.
|
||||
if (testConfig.outputType == OutputType::MISSED_DEADLINE) {
|
||||
// Override the default loop timeout duration with a small value to
|
||||
// speed up test execution.
|
||||
constexpr int64_t kMillisecond = 1'000'000;
|
||||
loopTimeoutDuration = 1 * kMillisecond;
|
||||
}
|
||||
|
||||
ErrorStatus executionStatus;
|
||||
std::vector<OutputShape> outputShapes;
|
||||
Timing timing = kNoTiming;
|
||||
switch (testConfig.executor) {
|
||||
case Executor::SYNC: {
|
||||
SCOPED_TRACE("synchronous");
|
||||
|
||||
ExecutionResult executionResult;
|
||||
// execute
|
||||
const auto ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
|
||||
kNoDeadline, loopTimeoutDuration,
|
||||
&executionResult);
|
||||
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
||||
<< ret.getDescription();
|
||||
if (ret.isOk()) {
|
||||
executionStatus = executionResult.outputSufficientSize
|
||||
? ErrorStatus::NONE
|
||||
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
|
||||
outputShapes = std::move(executionResult.outputShapes);
|
||||
timing = executionResult.timing;
|
||||
} else {
|
||||
executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Executor::FENCED: {
|
||||
SCOPED_TRACE("fenced");
|
||||
ErrorStatus result = ErrorStatus::NONE;
|
||||
ndk::ScopedFileDescriptor syncFenceFd;
|
||||
std::shared_ptr<IFencedExecutionCallback> fencedCallback;
|
||||
auto ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
|
||||
kNoDeadline, loopTimeoutDuration, kNoDuration,
|
||||
&syncFenceFd, &fencedCallback);
|
||||
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
||||
<< ret.getDescription();
|
||||
if (!ret.isOk()) {
|
||||
result = static_cast<ErrorStatus>(ret.getServiceSpecificError());
|
||||
executionStatus = result;
|
||||
} else if (syncFenceFd.get() != -1) {
|
||||
std::vector<ndk::ScopedFileDescriptor> waitFor;
|
||||
auto dupFd = dup(syncFenceFd.get());
|
||||
ASSERT_NE(dupFd, -1);
|
||||
waitFor.emplace_back(dupFd);
|
||||
// If a sync fence is returned, try start another run waiting for the sync fence.
|
||||
ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
|
||||
kNoDeadline, loopTimeoutDuration, kNoDuration,
|
||||
&syncFenceFd, &fencedCallback);
|
||||
ASSERT_TRUE(ret.isOk());
|
||||
waitForSyncFence(syncFenceFd.get());
|
||||
}
|
||||
if (result == ErrorStatus::NONE) {
|
||||
ASSERT_NE(fencedCallback, nullptr);
|
||||
Timing timingFenced;
|
||||
auto ret =
|
||||
fencedCallback->getExecutionInfo(&timing, &timingFenced, &executionStatus);
|
||||
ASSERT_TRUE(ret.isOk());
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
FAIL() << "Unsupported execution mode for AIDL interface.";
|
||||
}
|
||||
}
|
||||
|
||||
if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
|
||||
executionStatus == ErrorStatus::GENERAL_FAILURE) {
|
||||
if (skipped != nullptr) {
|
||||
*skipped = true;
|
||||
}
|
||||
if (!testConfig.reportSkipping) {
|
||||
return;
|
||||
}
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"execute model that it does not support.";
|
||||
std::cout << "[ ] Early termination of test because vendor service cannot "
|
||||
"execute model that it does not support."
|
||||
<< std::endl;
|
||||
GTEST_SKIP();
|
||||
}
|
||||
if (!testConfig.measureTiming) {
|
||||
EXPECT_EQ(timing, kNoTiming);
|
||||
} else {
|
||||
if (timing.timeOnDevice != -1 && timing.timeInDriver != -1) {
|
||||
EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
|
||||
}
|
||||
}
|
||||
|
||||
switch (testConfig.outputType) {
|
||||
case OutputType::FULLY_SPECIFIED:
|
||||
if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) {
|
||||
// Executor::FENCED does not support zero-sized output.
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||
return;
|
||||
}
|
||||
// If the model output operands are fully specified, outputShapes must be either
|
||||
// either empty, or have the same number of elements as the number of outputs.
|
||||
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
|
||||
ASSERT_TRUE(outputShapes.size() == 0 ||
|
||||
outputShapes.size() == testModel.main.outputIndexes.size());
|
||||
break;
|
||||
case OutputType::UNSPECIFIED:
|
||||
if (testConfig.executor == Executor::FENCED) {
|
||||
// For Executor::FENCED, the output shape must be fully specified.
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||
return;
|
||||
}
|
||||
// If the model output operands are not fully specified, outputShapes must have
|
||||
// the same number of elements as the number of outputs.
|
||||
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
|
||||
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
||||
break;
|
||||
case OutputType::INSUFFICIENT:
|
||||
if (testConfig.executor == Executor::FENCED) {
|
||||
// For Executor::FENCED, the output shape must be fully specified.
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
|
||||
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
||||
// Check that all returned output dimensions are at least as fully specified as the
|
||||
// union of the information about the corresponding operand in the model and in the
|
||||
// request. In this test, all model outputs have known rank with all dimensions
|
||||
// unspecified, and no dimensional information is provided in the request.
|
||||
for (uint32_t i = 0; i < outputShapes.size(); i++) {
|
||||
ASSERT_EQ(outputShapes[i].isSufficient, i != kInsufficientOutputIndex);
|
||||
const auto& actual = outputShapes[i].dimensions;
|
||||
const auto& golden =
|
||||
testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
|
||||
ASSERT_EQ(actual.size(), golden.size());
|
||||
for (uint32_t j = 0; j < actual.size(); j++) {
|
||||
if (actual[j] == 0) continue;
|
||||
EXPECT_EQ(actual[j], golden[j]) << "index: " << j;
|
||||
}
|
||||
}
|
||||
return;
|
||||
case OutputType::MISSED_DEADLINE:
|
||||
ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
|
||||
executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT)
|
||||
<< "executionStatus = " << executionStatus;
|
||||
return;
|
||||
}
|
||||
|
||||
// Go through all outputs, check returned output shapes.
|
||||
for (uint32_t i = 0; i < outputShapes.size(); i++) {
|
||||
EXPECT_TRUE(outputShapes[i].isSufficient);
|
||||
const auto& expect = testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
|
||||
const auto unsignedActual = nn::toUnsigned(outputShapes[i].dimensions);
|
||||
ASSERT_TRUE(unsignedActual.has_value());
|
||||
const std::vector<uint32_t>& actual = unsignedActual.value();
|
||||
EXPECT_EQ(expect, actual);
|
||||
}
|
||||
|
||||
// Retrieve execution results.
|
||||
const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
|
||||
|
||||
// We want "close-enough" results.
|
||||
checkResults(testModel, outputs);
|
||||
}
|
||||
|
||||
void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
|
||||
const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const TestModel& testModel, TestKind testKind) {
|
||||
std::vector<OutputType> outputTypesList;
|
||||
std::vector<bool> measureTimingList;
|
||||
std::vector<Executor> executorList;
|
||||
std::vector<MemoryType> memoryTypeList;
|
||||
|
||||
switch (testKind) {
|
||||
case TestKind::GENERAL: {
|
||||
outputTypesList = {OutputType::FULLY_SPECIFIED};
|
||||
measureTimingList = {false, true};
|
||||
executorList = {Executor::SYNC};
|
||||
memoryTypeList = {MemoryType::ASHMEM};
|
||||
} break;
|
||||
case TestKind::DYNAMIC_SHAPE: {
|
||||
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
|
||||
measureTimingList = {false, true};
|
||||
executorList = {Executor::SYNC, Executor::FENCED};
|
||||
memoryTypeList = {MemoryType::ASHMEM};
|
||||
} break;
|
||||
case TestKind::MEMORY_DOMAIN: {
|
||||
outputTypesList = {OutputType::FULLY_SPECIFIED};
|
||||
measureTimingList = {false};
|
||||
executorList = {Executor::SYNC, Executor::FENCED};
|
||||
memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
|
||||
} break;
|
||||
case TestKind::FENCED_COMPUTE: {
|
||||
outputTypesList = {OutputType::FULLY_SPECIFIED};
|
||||
measureTimingList = {false, true};
|
||||
executorList = {Executor::FENCED};
|
||||
memoryTypeList = {MemoryType::ASHMEM};
|
||||
} break;
|
||||
case TestKind::QUANTIZATION_COUPLING: {
|
||||
LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
|
||||
return;
|
||||
} break;
|
||||
case TestKind::INTINITE_LOOP_TIMEOUT: {
|
||||
outputTypesList = {OutputType::MISSED_DEADLINE};
|
||||
measureTimingList = {false, true};
|
||||
executorList = {Executor::SYNC, Executor::FENCED};
|
||||
memoryTypeList = {MemoryType::ASHMEM};
|
||||
} break;
|
||||
}
|
||||
|
||||
for (const OutputType outputType : outputTypesList) {
|
||||
for (const bool measureTiming : measureTimingList) {
|
||||
for (const Executor executor : executorList) {
|
||||
for (const MemoryType memoryType : memoryTypeList) {
|
||||
const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
|
||||
EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
|
||||
const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const TestModel& testModel,
|
||||
const std::shared_ptr<IPreparedModel>& preparedCoupledModel,
|
||||
const TestModel& coupledModel) {
|
||||
const std::vector<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
|
||||
const std::vector<bool> measureTimingList = {false, true};
|
||||
const std::vector<Executor> executorList = {Executor::SYNC, Executor::FENCED};
|
||||
|
||||
for (const OutputType outputType : outputTypesList) {
|
||||
for (const bool measureTiming : measureTimingList) {
|
||||
for (const Executor executor : executorList) {
|
||||
const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
|
||||
/*reportSkipping=*/false);
|
||||
bool baseSkipped = false;
|
||||
EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
|
||||
bool coupledSkipped = false;
|
||||
EvaluatePreparedModel(device, preparedCoupledModel, coupledModel, testConfig,
|
||||
&coupledSkipped);
|
||||
ASSERT_EQ(baseSkipped, coupledSkipped);
|
||||
if (baseSkipped) {
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"execute model that it does not support.";
|
||||
std::cout << "[ ] Early termination of test because vendor service "
|
||||
"cannot "
|
||||
"execute model that it does not support."
|
||||
<< std::endl;
|
||||
GTEST_SKIP();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Execute(const std::shared_ptr<IDevice>& device, const TestModel& testModel,
|
||||
TestKind testKind) {
|
||||
Model model = createModel(testModel);
|
||||
if (testKind == TestKind::DYNAMIC_SHAPE) {
|
||||
makeOutputDimensionsUnspecified(&model);
|
||||
}
|
||||
|
||||
std::shared_ptr<IPreparedModel> preparedModel;
|
||||
switch (testKind) {
|
||||
case TestKind::GENERAL:
|
||||
case TestKind::DYNAMIC_SHAPE:
|
||||
case TestKind::MEMORY_DOMAIN:
|
||||
case TestKind::FENCED_COMPUTE:
|
||||
case TestKind::INTINITE_LOOP_TIMEOUT: {
|
||||
createPreparedModel(device, model, &preparedModel);
|
||||
if (preparedModel == nullptr) return;
|
||||
EvaluatePreparedModel(device, preparedModel, testModel, testKind);
|
||||
} break;
|
||||
case TestKind::QUANTIZATION_COUPLING: {
|
||||
ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
|
||||
createPreparedModel(device, model, &preparedModel,
|
||||
/*reportSkipping*/ false);
|
||||
TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
|
||||
std::shared_ptr<IPreparedModel> preparedCoupledModel;
|
||||
createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
|
||||
/*reportSkipping*/ false);
|
||||
// If we couldn't prepare a model with unsigned quantization, we must
|
||||
// fail to prepare a model with signed quantization as well.
|
||||
if (preparedModel == nullptr) {
|
||||
ASSERT_EQ(preparedCoupledModel, nullptr);
|
||||
// If we failed to prepare both of the models, we can safely skip
|
||||
// the test.
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout
|
||||
<< "[ ] Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support."
|
||||
<< std::endl;
|
||||
GTEST_SKIP();
|
||||
}
|
||||
ASSERT_NE(preparedCoupledModel, nullptr);
|
||||
EvaluatePreparedCoupledModels(device, preparedModel, testModel, preparedCoupledModel,
|
||||
signedQuantizedModel);
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
void GeneratedTestBase::SetUp() {
|
||||
testing::TestWithParam<GeneratedTestParam>::SetUp();
|
||||
ASSERT_NE(kDevice, nullptr);
|
||||
}
|
||||
|
||||
std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
|
||||
return TestModelManager::get().getTestModels(filter);
|
||||
}
|
||||
|
||||
std::vector<NamedModel> getNamedModels(const FilterNameFn& filter) {
|
||||
return TestModelManager::get().getTestModels(filter);
|
||||
}
|
||||
|
||||
std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
|
||||
const auto& [namedDevice, namedModel] = info.param;
|
||||
return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
|
||||
}
|
||||
|
||||
// Tag for the generated tests
|
||||
class GeneratedTest : public GeneratedTestBase {};
|
||||
|
||||
// Tag for the dynamic output shape tests
|
||||
class DynamicOutputShapeTest : public GeneratedTest {};
|
||||
|
||||
// Tag for the memory domain tests
|
||||
class MemoryDomainTest : public GeneratedTest {};
|
||||
|
||||
// Tag for the fenced compute tests
|
||||
class FencedComputeTest : public GeneratedTest {};
|
||||
|
||||
// Tag for the dynamic output shape tests
|
||||
class QuantizationCouplingTest : public GeneratedTest {};
|
||||
|
||||
// Tag for the loop timeout tests
|
||||
class InfiniteLoopTimeoutTest : public GeneratedTest {};
|
||||
|
||||
TEST_P(GeneratedTest, Test) {
|
||||
Execute(kDevice, kTestModel, TestKind::GENERAL);
|
||||
}
|
||||
|
||||
TEST_P(DynamicOutputShapeTest, Test) {
|
||||
Execute(kDevice, kTestModel, TestKind::DYNAMIC_SHAPE);
|
||||
}
|
||||
|
||||
TEST_P(MemoryDomainTest, Test) {
|
||||
Execute(kDevice, kTestModel, TestKind::MEMORY_DOMAIN);
|
||||
}
|
||||
|
||||
TEST_P(FencedComputeTest, Test) {
|
||||
Execute(kDevice, kTestModel, TestKind::FENCED_COMPUTE);
|
||||
}
|
||||
|
||||
TEST_P(QuantizationCouplingTest, Test) {
|
||||
Execute(kDevice, kTestModel, TestKind::QUANTIZATION_COUPLING);
|
||||
}
|
||||
|
||||
TEST_P(InfiniteLoopTimeoutTest, Test) {
|
||||
Execute(kDevice, kTestModel, TestKind::INTINITE_LOOP_TIMEOUT);
|
||||
}
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(GeneratedTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
|
||||
return !testModel.expectFailure && !testModel.hasScalarOutputs();
|
||||
});
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(FencedComputeTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
|
||||
return !testModel.expectFailure && testModel.hasQuant8CoupledOperands() &&
|
||||
testModel.main.operations.size() == 1;
|
||||
});
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(InfiniteLoopTimeoutTest, [](const TestModel& testModel) {
|
||||
return testModel.isInfiniteLoopTimeoutTest();
|
||||
});
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::vts::functional
|
88
neuralnetworks/aidl/vts/functional/GeneratedTestHarness.h
Normal file
88
neuralnetworks/aidl/vts/functional/GeneratedTestHarness.h
Normal file
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_AIDL_GENERATED_TEST_HARNESS_H
|
||||
#define ANDROID_HARDWARE_NEURALNETWORKS_AIDL_GENERATED_TEST_HARNESS_H
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
#include <TestHarness.h>
|
||||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::vts::functional {
|
||||
|
||||
using NamedModel = Named<const test_helper::TestModel*>;
|
||||
using GeneratedTestParam = std::tuple<NamedDevice, NamedModel>;
|
||||
|
||||
class GeneratedTestBase : public testing::TestWithParam<GeneratedTestParam> {
|
||||
protected:
|
||||
void SetUp() override;
|
||||
const std::shared_ptr<IDevice> kDevice = getData(std::get<NamedDevice>(GetParam()));
|
||||
const test_helper::TestModel& kTestModel = *getData(std::get<NamedModel>(GetParam()));
|
||||
};
|
||||
|
||||
using FilterFn = std::function<bool(const test_helper::TestModel&)>;
|
||||
std::vector<NamedModel> getNamedModels(const FilterFn& filter);
|
||||
|
||||
using FilterNameFn = std::function<bool(const std::string&)>;
|
||||
std::vector<NamedModel> getNamedModels(const FilterNameFn& filter);
|
||||
|
||||
std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
|
||||
|
||||
#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \
|
||||
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(TestSuite); \
|
||||
INSTANTIATE_TEST_SUITE_P(TestGenerated, TestSuite, \
|
||||
testing::Combine(testing::ValuesIn(getNamedDevices()), \
|
||||
testing::ValuesIn(getNamedModels(filter))), \
|
||||
printGeneratedTest)
|
||||
|
||||
// Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
|
||||
// TODO: Clean up the hierarchy for ValidationTest.
|
||||
class ValidationTest : public GeneratedTestBase {};
|
||||
|
||||
Model createModel(const test_helper::TestModel& testModel);
|
||||
|
||||
void PrepareModel(const std::shared_ptr<IDevice>& device, const Model& model,
|
||||
std::shared_ptr<IPreparedModel>* preparedModel);
|
||||
|
||||
enum class TestKind {
|
||||
// Runs a test model and compares the results to a golden data
|
||||
GENERAL,
|
||||
// Same as GENERAL but sets dimensions for the output tensors to zeros
|
||||
DYNAMIC_SHAPE,
|
||||
// Same as GENERAL but use device memories for inputs and outputs
|
||||
MEMORY_DOMAIN,
|
||||
// Same as GENERAL but use executeFenced for exeuction
|
||||
FENCED_COMPUTE,
|
||||
// Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result
|
||||
// (OK/SKIPPED/FAILED) as the model with all such tensors converted to
|
||||
// TENSOR_QUANT8_ASYMM_SIGNED.
|
||||
QUANTIZATION_COUPLING,
|
||||
// Runs a test model and verifies that MISSED_DEADLINE_* is returned.
|
||||
INTINITE_LOOP_TIMEOUT
|
||||
};
|
||||
|
||||
void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
|
||||
const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const test_helper::TestModel& testModel, TestKind testKind);
|
||||
|
||||
void waitForSyncFence(int syncFd);
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::vts::functional
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_AIDL_GENERATED_TEST_HARNESS_H
|
40
neuralnetworks/aidl/vts/functional/LogTestCaseToLogcat.h
Normal file
40
neuralnetworks/aidl/vts/functional/LogTestCaseToLogcat.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_AIDL_LOG_TEST_CASE_TO_LOGCAT_H
|
||||
#define ANDROID_HARDWARE_NEURALNETWORKS_AIDL_LOG_TEST_CASE_TO_LOGCAT_H
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks {
|
||||
|
||||
class LogTestCaseToLogcat : public ::testing::EmptyTestEventListener {
|
||||
public:
|
||||
void OnTestStart(const ::testing::TestInfo& test_info) override {
|
||||
LOG(INFO) << "[Test Case] " << test_info.test_suite_name() << "." << test_info.name()
|
||||
<< " BEGIN";
|
||||
}
|
||||
|
||||
void OnTestEnd(const ::testing::TestInfo& test_info) override {
|
||||
LOG(INFO) << "[Test Case] " << test_info.test_suite_name() << "." << test_info.name()
|
||||
<< " END";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_AIDL_LOG_TEST_CASE_TO_LOGCAT_H
|
1176
neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
Normal file
1176
neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
Normal file
File diff suppressed because it is too large
Load diff
270
neuralnetworks/aidl/vts/functional/QualityOfServiceTests.cpp
Normal file
270
neuralnetworks/aidl/vts/functional/QualityOfServiceTests.cpp
Normal file
|
@ -0,0 +1,270 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <android/binder_enums.h>
|
||||
#include <android/binder_interface_utils.h>
|
||||
#include <android/binder_status.h>
|
||||
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "GeneratedTestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::vts::functional {
|
||||
|
||||
using implementation::PreparedModelCallback;
|
||||
using test_helper::TestBuffer;
|
||||
using test_helper::TestModel;
|
||||
|
||||
enum class DeadlineBoundType { NOW, UNLIMITED, SHORT };
|
||||
constexpr std::array<DeadlineBoundType, 3> deadlineBounds = {
|
||||
DeadlineBoundType::NOW, DeadlineBoundType::UNLIMITED, DeadlineBoundType::SHORT};
|
||||
std::string toString(DeadlineBoundType type) {
|
||||
switch (type) {
|
||||
case DeadlineBoundType::NOW:
|
||||
return "NOW";
|
||||
case DeadlineBoundType::UNLIMITED:
|
||||
return "UNLIMITED";
|
||||
case DeadlineBoundType::SHORT:
|
||||
return "SHORT";
|
||||
}
|
||||
LOG(FATAL) << "Unrecognized DeadlineBoundType: " << static_cast<int>(type);
|
||||
return {};
|
||||
}
|
||||
|
||||
constexpr auto kShortDuration = std::chrono::milliseconds{5};
|
||||
|
||||
using Results = std::tuple<ErrorStatus, std::vector<OutputShape>, Timing>;
|
||||
using MaybeResults = std::optional<Results>;
|
||||
|
||||
static int64_t makeDeadline(DeadlineBoundType deadlineBoundType) {
|
||||
const auto getNanosecondsSinceEpoch = [](const auto& time) -> int64_t {
|
||||
const auto timeSinceEpoch = time.time_since_epoch();
|
||||
return std::chrono::duration_cast<std::chrono::nanoseconds>(timeSinceEpoch).count();
|
||||
};
|
||||
|
||||
std::chrono::steady_clock::time_point timePoint;
|
||||
switch (deadlineBoundType) {
|
||||
case DeadlineBoundType::NOW:
|
||||
timePoint = std::chrono::steady_clock::now();
|
||||
break;
|
||||
case DeadlineBoundType::UNLIMITED:
|
||||
timePoint = std::chrono::steady_clock::time_point::max();
|
||||
break;
|
||||
case DeadlineBoundType::SHORT:
|
||||
timePoint = std::chrono::steady_clock::now() + kShortDuration;
|
||||
break;
|
||||
}
|
||||
|
||||
return getNanosecondsSinceEpoch(timePoint);
|
||||
}
|
||||
|
||||
void runPrepareModelTest(const std::shared_ptr<IDevice>& device, const Model& model,
|
||||
Priority priority, std::optional<DeadlineBoundType> deadlineBound) {
|
||||
int64_t deadline = kNoDeadline;
|
||||
if (deadlineBound.has_value()) {
|
||||
deadline = makeDeadline(deadlineBound.value());
|
||||
}
|
||||
|
||||
// see if service can handle model
|
||||
std::vector<bool> supportedOps;
|
||||
const auto supportedCallStatus = device->getSupportedOperations(model, &supportedOps);
|
||||
ASSERT_TRUE(supportedCallStatus.isOk());
|
||||
ASSERT_NE(0ul, supportedOps.size());
|
||||
const bool fullySupportsModel =
|
||||
std::all_of(supportedOps.begin(), supportedOps.end(), [](bool valid) { return valid; });
|
||||
|
||||
// launch prepare model
|
||||
const std::shared_ptr<PreparedModelCallback> preparedModelCallback =
|
||||
ndk::SharedRefBase::make<PreparedModelCallback>();
|
||||
const auto prepareLaunchStatus =
|
||||
device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, priority, deadline,
|
||||
{}, {}, kEmptyCacheToken, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk())
|
||||
<< "prepareLaunchStatus: " << prepareLaunchStatus.getDescription();
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
const std::shared_ptr<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations call returns a list of operations that are guaranteed not to fail
|
||||
// if prepareModel is called, and 'fullySupportsModel' is true i.f.f. the entire model is
|
||||
// guaranteed. If a driver has any doubt that it can prepare an operation, it must return false.
|
||||
// So here, if a driver isn't sure if it can support an operation, but reports that it
|
||||
// successfully prepared the model, the test can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel.get());
|
||||
return;
|
||||
}
|
||||
|
||||
// verify return status
|
||||
if (!deadlineBound.has_value()) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
} else {
|
||||
switch (deadlineBound.value()) {
|
||||
case DeadlineBoundType::NOW:
|
||||
case DeadlineBoundType::SHORT:
|
||||
// Either the driver successfully completed the task or it
|
||||
// aborted and returned MISSED_DEADLINE_*.
|
||||
EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
|
||||
prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
|
||||
prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT);
|
||||
break;
|
||||
case DeadlineBoundType::UNLIMITED:
|
||||
// If an unlimited deadline is supplied, we expect the execution to
|
||||
// proceed normally. In this case, check it normally by breaking out
|
||||
// of the switch statement.
|
||||
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(prepareReturnStatus == ErrorStatus::NONE, preparedModel.get() != nullptr);
|
||||
}
|
||||
|
||||
void runPrepareModelTests(const std::shared_ptr<IDevice>& device, const Model& model) {
|
||||
// test priority
|
||||
for (auto priority : ndk::enum_range<Priority>{}) {
|
||||
SCOPED_TRACE("priority: " + toString(priority));
|
||||
if (priority == kDefaultPriority) continue;
|
||||
runPrepareModelTest(device, model, priority, {});
|
||||
}
|
||||
|
||||
// test deadline
|
||||
for (auto deadlineBound : deadlineBounds) {
|
||||
SCOPED_TRACE("deadlineBound: " + toString(deadlineBound));
|
||||
runPrepareModelTest(device, model, kDefaultPriority, deadlineBound);
|
||||
}
|
||||
}
|
||||
|
||||
static MaybeResults executeSynchronously(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const Request& request, int64_t deadline) {
|
||||
SCOPED_TRACE("synchronous");
|
||||
const bool measure = false;
|
||||
|
||||
// run execution
|
||||
ExecutionResult executionResult;
|
||||
const auto ret = preparedModel->executeSynchronously(request, measure, deadline,
|
||||
kOmittedTimeoutDuration, &executionResult);
|
||||
EXPECT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
||||
<< ret.getDescription();
|
||||
if (!ret.isOk()) {
|
||||
if (ret.getExceptionCode() != EX_SERVICE_SPECIFIC) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return MaybeResults(
|
||||
{static_cast<ErrorStatus>(ret.getServiceSpecificError()), {}, kNoTiming});
|
||||
}
|
||||
|
||||
// return results
|
||||
return MaybeResults({executionResult.outputSufficientSize
|
||||
? ErrorStatus::NONE
|
||||
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
|
||||
std::move(executionResult.outputShapes), executionResult.timing});
|
||||
}
|
||||
|
||||
void runExecutionTest(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const TestModel& testModel, const Request& request,
|
||||
const ExecutionContext& context, DeadlineBoundType deadlineBound) {
|
||||
const auto deadline = makeDeadline(deadlineBound);
|
||||
|
||||
// Perform execution and unpack results.
|
||||
const auto results = executeSynchronously(preparedModel, request, deadline);
|
||||
if (!results.has_value()) return;
|
||||
const auto& [status, outputShapes, timing] = results.value();
|
||||
|
||||
// Verify no timing information was returned
|
||||
EXPECT_EQ(timing, kNoTiming);
|
||||
|
||||
// Validate deadline information if applicable.
|
||||
switch (deadlineBound) {
|
||||
case DeadlineBoundType::NOW:
|
||||
case DeadlineBoundType::SHORT:
|
||||
// Either the driver successfully completed the task or it
|
||||
// aborted and returned MISSED_DEADLINE_*.
|
||||
ASSERT_TRUE(status == ErrorStatus::NONE ||
|
||||
status == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
|
||||
status == ErrorStatus::MISSED_DEADLINE_PERSISTENT);
|
||||
break;
|
||||
case DeadlineBoundType::UNLIMITED:
|
||||
// If an unlimited deadline is supplied, we expect the execution to
|
||||
// proceed normally. In this case, check it normally by breaking out
|
||||
// of the switch statement.
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
break;
|
||||
}
|
||||
|
||||
// If the model output operands are fully specified, outputShapes must be either
|
||||
// either empty, or have the same number of elements as the number of outputs.
|
||||
ASSERT_TRUE(outputShapes.size() == 0 ||
|
||||
outputShapes.size() == testModel.main.outputIndexes.size());
|
||||
|
||||
// Go through all outputs, check returned output shapes.
|
||||
for (uint32_t i = 0; i < outputShapes.size(); i++) {
|
||||
EXPECT_TRUE(outputShapes[i].isSufficient);
|
||||
const auto expect =
|
||||
utils::toSigned(testModel.main.operands[testModel.main.outputIndexes[i]].dimensions)
|
||||
.value();
|
||||
const std::vector<int32_t>& actual = outputShapes[i].dimensions;
|
||||
EXPECT_EQ(expect, actual);
|
||||
}
|
||||
|
||||
// Retrieve execution results.
|
||||
const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
|
||||
|
||||
// We want "close-enough" results.
|
||||
if (status == ErrorStatus::NONE) {
|
||||
checkResults(testModel, outputs);
|
||||
}
|
||||
}
|
||||
|
||||
void runExecutionTests(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const TestModel& testModel, const Request& request,
|
||||
const ExecutionContext& context) {
|
||||
for (auto deadlineBound : deadlineBounds) {
|
||||
runExecutionTest(preparedModel, testModel, request, context, deadlineBound);
|
||||
}
|
||||
}
|
||||
|
||||
void runTests(const std::shared_ptr<IDevice>& device, const TestModel& testModel) {
|
||||
// setup
|
||||
const Model model = createModel(testModel);
|
||||
|
||||
// run prepare model tests
|
||||
runPrepareModelTests(device, model);
|
||||
|
||||
// prepare model
|
||||
std::shared_ptr<IPreparedModel> preparedModel;
|
||||
createPreparedModel(device, model, &preparedModel);
|
||||
if (preparedModel == nullptr) return;
|
||||
|
||||
// run execution tests
|
||||
ExecutionContext context;
|
||||
const Request request = context.createRequest(testModel);
|
||||
runExecutionTests(preparedModel, testModel, request, context);
|
||||
}
|
||||
|
||||
class DeadlineTest : public GeneratedTestBase {};
|
||||
|
||||
TEST_P(DeadlineTest, Test) {
|
||||
runTests(kDevice, kTestModel);
|
||||
}
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(DeadlineTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::vts::functional
|
153
neuralnetworks/aidl/vts/functional/TestAssertions.cpp
Normal file
153
neuralnetworks/aidl/vts/functional/TestAssertions.cpp
Normal file
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperandType.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperationType.h>
|
||||
|
||||
#include <ControlFlow.h>
|
||||
#include <TestHarness.h>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks {
|
||||
|
||||
namespace nn = ::android::nn;
|
||||
|
||||
static_assert(static_cast<uint64_t>(IPreparedModel::DEFAULT_LOOP_TIMEOUT_DURATION_NS) ==
|
||||
nn::operation_while::kTimeoutNsDefault);
|
||||
static_assert(static_cast<uint64_t>(IPreparedModel::MAXIMUM_LOOP_TIMEOUT_DURATION_NS) ==
|
||||
nn::operation_while::kTimeoutNsMaximum);
|
||||
|
||||
// Make sure that the HIDL enums are compatible with the values defined in
|
||||
// frameworks/ml/nn/tools/test_generator/test_harness/include/TestHarness.h.
|
||||
using namespace test_helper;
|
||||
#define CHECK_TEST_ENUM(EnumType, enumValue) \
|
||||
static_assert(static_cast<EnumType>(Test##EnumType::enumValue) == EnumType::enumValue)
|
||||
|
||||
CHECK_TEST_ENUM(OperandType, FLOAT32);
|
||||
CHECK_TEST_ENUM(OperandType, INT32);
|
||||
CHECK_TEST_ENUM(OperandType, UINT32);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_FLOAT32);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_INT32);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_ASYMM);
|
||||
CHECK_TEST_ENUM(OperandType, BOOL);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT16_SYMM);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_FLOAT16);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_BOOL8);
|
||||
CHECK_TEST_ENUM(OperandType, FLOAT16);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM_PER_CHANNEL);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT16_ASYMM);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM);
|
||||
CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_ASYMM_SIGNED);
|
||||
|
||||
CHECK_TEST_ENUM(OperationType, ADD);
|
||||
CHECK_TEST_ENUM(OperationType, AVERAGE_POOL_2D);
|
||||
CHECK_TEST_ENUM(OperationType, CONCATENATION);
|
||||
CHECK_TEST_ENUM(OperationType, CONV_2D);
|
||||
CHECK_TEST_ENUM(OperationType, DEPTHWISE_CONV_2D);
|
||||
CHECK_TEST_ENUM(OperationType, DEPTH_TO_SPACE);
|
||||
CHECK_TEST_ENUM(OperationType, DEQUANTIZE);
|
||||
CHECK_TEST_ENUM(OperationType, EMBEDDING_LOOKUP);
|
||||
CHECK_TEST_ENUM(OperationType, FLOOR);
|
||||
CHECK_TEST_ENUM(OperationType, FULLY_CONNECTED);
|
||||
CHECK_TEST_ENUM(OperationType, HASHTABLE_LOOKUP);
|
||||
CHECK_TEST_ENUM(OperationType, L2_NORMALIZATION);
|
||||
CHECK_TEST_ENUM(OperationType, L2_POOL_2D);
|
||||
CHECK_TEST_ENUM(OperationType, LOCAL_RESPONSE_NORMALIZATION);
|
||||
CHECK_TEST_ENUM(OperationType, LOGISTIC);
|
||||
CHECK_TEST_ENUM(OperationType, LSH_PROJECTION);
|
||||
CHECK_TEST_ENUM(OperationType, LSTM);
|
||||
CHECK_TEST_ENUM(OperationType, MAX_POOL_2D);
|
||||
CHECK_TEST_ENUM(OperationType, MUL);
|
||||
CHECK_TEST_ENUM(OperationType, RELU);
|
||||
CHECK_TEST_ENUM(OperationType, RELU1);
|
||||
CHECK_TEST_ENUM(OperationType, RELU6);
|
||||
CHECK_TEST_ENUM(OperationType, RESHAPE);
|
||||
CHECK_TEST_ENUM(OperationType, RESIZE_BILINEAR);
|
||||
CHECK_TEST_ENUM(OperationType, RNN);
|
||||
CHECK_TEST_ENUM(OperationType, SOFTMAX);
|
||||
CHECK_TEST_ENUM(OperationType, SPACE_TO_DEPTH);
|
||||
CHECK_TEST_ENUM(OperationType, SVDF);
|
||||
CHECK_TEST_ENUM(OperationType, TANH);
|
||||
CHECK_TEST_ENUM(OperationType, BATCH_TO_SPACE_ND);
|
||||
CHECK_TEST_ENUM(OperationType, DIV);
|
||||
CHECK_TEST_ENUM(OperationType, MEAN);
|
||||
CHECK_TEST_ENUM(OperationType, PAD);
|
||||
CHECK_TEST_ENUM(OperationType, SPACE_TO_BATCH_ND);
|
||||
CHECK_TEST_ENUM(OperationType, SQUEEZE);
|
||||
CHECK_TEST_ENUM(OperationType, STRIDED_SLICE);
|
||||
CHECK_TEST_ENUM(OperationType, SUB);
|
||||
CHECK_TEST_ENUM(OperationType, TRANSPOSE);
|
||||
CHECK_TEST_ENUM(OperationType, ABS);
|
||||
CHECK_TEST_ENUM(OperationType, ARGMAX);
|
||||
CHECK_TEST_ENUM(OperationType, ARGMIN);
|
||||
CHECK_TEST_ENUM(OperationType, AXIS_ALIGNED_BBOX_TRANSFORM);
|
||||
CHECK_TEST_ENUM(OperationType, BIDIRECTIONAL_SEQUENCE_LSTM);
|
||||
CHECK_TEST_ENUM(OperationType, BIDIRECTIONAL_SEQUENCE_RNN);
|
||||
CHECK_TEST_ENUM(OperationType, BOX_WITH_NMS_LIMIT);
|
||||
CHECK_TEST_ENUM(OperationType, CAST);
|
||||
CHECK_TEST_ENUM(OperationType, CHANNEL_SHUFFLE);
|
||||
CHECK_TEST_ENUM(OperationType, DETECTION_POSTPROCESSING);
|
||||
CHECK_TEST_ENUM(OperationType, EQUAL);
|
||||
CHECK_TEST_ENUM(OperationType, EXP);
|
||||
CHECK_TEST_ENUM(OperationType, EXPAND_DIMS);
|
||||
CHECK_TEST_ENUM(OperationType, GATHER);
|
||||
CHECK_TEST_ENUM(OperationType, GENERATE_PROPOSALS);
|
||||
CHECK_TEST_ENUM(OperationType, GREATER);
|
||||
CHECK_TEST_ENUM(OperationType, GREATER_EQUAL);
|
||||
CHECK_TEST_ENUM(OperationType, GROUPED_CONV_2D);
|
||||
CHECK_TEST_ENUM(OperationType, HEATMAP_MAX_KEYPOINT);
|
||||
CHECK_TEST_ENUM(OperationType, INSTANCE_NORMALIZATION);
|
||||
CHECK_TEST_ENUM(OperationType, LESS);
|
||||
CHECK_TEST_ENUM(OperationType, LESS_EQUAL);
|
||||
CHECK_TEST_ENUM(OperationType, LOG);
|
||||
CHECK_TEST_ENUM(OperationType, LOGICAL_AND);
|
||||
CHECK_TEST_ENUM(OperationType, LOGICAL_NOT);
|
||||
CHECK_TEST_ENUM(OperationType, LOGICAL_OR);
|
||||
CHECK_TEST_ENUM(OperationType, LOG_SOFTMAX);
|
||||
CHECK_TEST_ENUM(OperationType, MAXIMUM);
|
||||
CHECK_TEST_ENUM(OperationType, MINIMUM);
|
||||
CHECK_TEST_ENUM(OperationType, NEG);
|
||||
CHECK_TEST_ENUM(OperationType, NOT_EQUAL);
|
||||
CHECK_TEST_ENUM(OperationType, PAD_V2);
|
||||
CHECK_TEST_ENUM(OperationType, POW);
|
||||
CHECK_TEST_ENUM(OperationType, PRELU);
|
||||
CHECK_TEST_ENUM(OperationType, QUANTIZE);
|
||||
CHECK_TEST_ENUM(OperationType, QUANTIZED_16BIT_LSTM);
|
||||
CHECK_TEST_ENUM(OperationType, RANDOM_MULTINOMIAL);
|
||||
CHECK_TEST_ENUM(OperationType, REDUCE_ALL);
|
||||
CHECK_TEST_ENUM(OperationType, REDUCE_ANY);
|
||||
CHECK_TEST_ENUM(OperationType, REDUCE_MAX);
|
||||
CHECK_TEST_ENUM(OperationType, REDUCE_MIN);
|
||||
CHECK_TEST_ENUM(OperationType, REDUCE_PROD);
|
||||
CHECK_TEST_ENUM(OperationType, REDUCE_SUM);
|
||||
CHECK_TEST_ENUM(OperationType, ROI_ALIGN);
|
||||
CHECK_TEST_ENUM(OperationType, ROI_POOLING);
|
||||
CHECK_TEST_ENUM(OperationType, RSQRT);
|
||||
CHECK_TEST_ENUM(OperationType, SELECT);
|
||||
CHECK_TEST_ENUM(OperationType, SIN);
|
||||
CHECK_TEST_ENUM(OperationType, SLICE);
|
||||
CHECK_TEST_ENUM(OperationType, SPLIT);
|
||||
CHECK_TEST_ENUM(OperationType, SQRT);
|
||||
CHECK_TEST_ENUM(OperationType, TILE);
|
||||
CHECK_TEST_ENUM(OperationType, TOPK_V2);
|
||||
CHECK_TEST_ENUM(OperationType, TRANSPOSE_CONV_2D);
|
||||
CHECK_TEST_ENUM(OperationType, UNIDIRECTIONAL_SEQUENCE_LSTM);
|
||||
CHECK_TEST_ENUM(OperationType, UNIDIRECTIONAL_SEQUENCE_RNN);
|
||||
CHECK_TEST_ENUM(OperationType, RESIZE_NEAREST_NEIGHBOR);
|
||||
|
||||
#undef CHECK_TEST_ENUM
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks
|
27
neuralnetworks/aidl/vts/functional/TestMain.cpp
Normal file
27
neuralnetworks/aidl/vts/functional/TestMain.cpp
Normal file
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <android/binder_process.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include "LogTestCaseToLogcat.h"
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
testing::UnitTest::GetInstance()->listeners().Append(
|
||||
new aidl::android::hardware::neuralnetworks::LogTestCaseToLogcat());
|
||||
ABinderProcess_startThreadPool();
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
252
neuralnetworks/aidl/vts/functional/Utils.cpp
Normal file
252
neuralnetworks/aidl/vts/functional/Utils.cpp
Normal file
|
@ -0,0 +1,252 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Utils.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Operand.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperandType.h>
|
||||
#include <android-base/logging.h>
|
||||
#include <android/binder_status.h>
|
||||
#include <android/hardware_buffer.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <numeric>
|
||||
|
||||
#include <MemoryUtils.h>
|
||||
#include <nnapi/SharedMemory.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
#include <nnapi/hal/aidl/Utils.h>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks {
|
||||
|
||||
using test_helper::TestBuffer;
|
||||
using test_helper::TestModel;
|
||||
|
||||
uint32_t sizeOfData(OperandType type) {
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
case OperandType::TENSOR_INT32:
|
||||
return 4;
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
case OperandType::TENSOR_FLOAT16:
|
||||
case OperandType::FLOAT16:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
return 2;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::BOOL:
|
||||
case OperandType::TENSOR_BOOL8:
|
||||
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
|
||||
case OperandType::TENSOR_QUANT8_SYMM:
|
||||
case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
|
||||
return 1;
|
||||
case OperandType::SUBGRAPH:
|
||||
return 0;
|
||||
default:
|
||||
CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static bool isTensor(OperandType type) {
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
case OperandType::FLOAT16:
|
||||
case OperandType::BOOL:
|
||||
case OperandType::SUBGRAPH:
|
||||
return false;
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
case OperandType::TENSOR_INT32:
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
case OperandType::TENSOR_FLOAT16:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::TENSOR_BOOL8:
|
||||
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
|
||||
case OperandType::TENSOR_QUANT8_SYMM:
|
||||
case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
|
||||
return true;
|
||||
default:
|
||||
CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t sizeOfData(const Operand& operand) {
|
||||
const uint32_t dataSize = sizeOfData(operand.type);
|
||||
if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0;
|
||||
return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize,
|
||||
std::multiplies<>{});
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAshmem> TestAshmem::create(uint32_t size) {
|
||||
auto ashmem = std::make_unique<TestAshmem>(size);
|
||||
return ashmem->mIsValid ? std::move(ashmem) : nullptr;
|
||||
}
|
||||
|
||||
void TestAshmem::initialize(uint32_t size) {
|
||||
mIsValid = false;
|
||||
ASSERT_GT(size, 0);
|
||||
const auto sharedMemory = nn::createSharedMemory(size).value();
|
||||
mMappedMemory = nn::map(sharedMemory).value();
|
||||
mPtr = static_cast<uint8_t*>(std::get<void*>(mMappedMemory.pointer));
|
||||
CHECK_NE(mPtr, nullptr);
|
||||
mAidlMemory = utils::convert(sharedMemory).value();
|
||||
mIsValid = true;
|
||||
}
|
||||
|
||||
std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) {
|
||||
auto ahwb = std::make_unique<TestBlobAHWB>(size);
|
||||
return ahwb->mIsValid ? std::move(ahwb) : nullptr;
|
||||
}
|
||||
|
||||
void TestBlobAHWB::initialize(uint32_t size) {
|
||||
mIsValid = false;
|
||||
ASSERT_GT(size, 0);
|
||||
const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
|
||||
const AHardwareBuffer_Desc desc = {
|
||||
.width = size,
|
||||
.height = 1,
|
||||
.layers = 1,
|
||||
.format = AHARDWAREBUFFER_FORMAT_BLOB,
|
||||
.usage = usage,
|
||||
.stride = size,
|
||||
};
|
||||
|
||||
ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
|
||||
ASSERT_NE(mAhwb, nullptr);
|
||||
|
||||
const auto sharedMemory = nn::createSharedMemoryFromAHWB(*mAhwb).value();
|
||||
mMapping = nn::map(sharedMemory).value();
|
||||
mPtr = static_cast<uint8_t*>(std::get<void*>(mMapping.pointer));
|
||||
CHECK_NE(mPtr, nullptr);
|
||||
mAidlMemory = utils::convert(sharedMemory).value();
|
||||
|
||||
mIsValid = true;
|
||||
}
|
||||
|
||||
TestBlobAHWB::~TestBlobAHWB() {
|
||||
if (mAhwb) {
|
||||
AHardwareBuffer_unlock(mAhwb, nullptr);
|
||||
AHardwareBuffer_release(mAhwb);
|
||||
}
|
||||
}
|
||||
|
||||
std::string gtestCompliantName(std::string name) {
|
||||
// gtest test names must only contain alphanumeric characters
|
||||
std::replace_if(
|
||||
name.begin(), name.end(), [](char c) { return !std::isalnum(c); }, '_');
|
||||
return name;
|
||||
}
|
||||
|
||||
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
|
||||
return os << toString(errorStatus);
|
||||
}
|
||||
|
||||
Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
|
||||
CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
|
||||
|
||||
// Model inputs.
|
||||
std::vector<RequestArgument> inputs(testModel.main.inputIndexes.size());
|
||||
size_t inputSize = 0;
|
||||
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
|
||||
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
|
||||
if (op.data.size() == 0) {
|
||||
// Omitted input.
|
||||
inputs[i] = {.hasNoValue = true};
|
||||
} else {
|
||||
DataLocation loc = {.poolIndex = kInputPoolIndex,
|
||||
.offset = static_cast<int64_t>(inputSize),
|
||||
.length = static_cast<int64_t>(op.data.size())};
|
||||
inputSize += op.data.alignedSize();
|
||||
inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
|
||||
}
|
||||
}
|
||||
|
||||
// Model outputs.
|
||||
std::vector<RequestArgument> outputs(testModel.main.outputIndexes.size());
|
||||
size_t outputSize = 0;
|
||||
for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
|
||||
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
|
||||
|
||||
// In the case of zero-sized output, we should at least provide a one-byte buffer.
|
||||
// This is because zero-sized tensors are only supported internally to the driver, or
|
||||
// reported in output shapes. It is illegal for the client to pre-specify a zero-sized
|
||||
// tensor as model output. Otherwise, we will have two semantic conflicts:
|
||||
// - "Zero dimension" conflicts with "unspecified dimension".
|
||||
// - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
|
||||
size_t bufferSize = std::max<size_t>(op.data.size(), 1);
|
||||
|
||||
DataLocation loc = {.poolIndex = kOutputPoolIndex,
|
||||
.offset = static_cast<int64_t>(outputSize),
|
||||
.length = static_cast<int64_t>(bufferSize)};
|
||||
outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize();
|
||||
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
|
||||
}
|
||||
|
||||
// Allocate memory pools.
|
||||
if (memoryType == MemoryType::ASHMEM) {
|
||||
mInputMemory = TestAshmem::create(inputSize);
|
||||
mOutputMemory = TestAshmem::create(outputSize);
|
||||
} else {
|
||||
mInputMemory = TestBlobAHWB::create(inputSize);
|
||||
mOutputMemory = TestBlobAHWB::create(outputSize);
|
||||
}
|
||||
CHECK_NE(mInputMemory, nullptr);
|
||||
CHECK_NE(mOutputMemory, nullptr);
|
||||
|
||||
auto copiedInputMemory = utils::clone(*mInputMemory->getAidlMemory());
|
||||
CHECK(copiedInputMemory.has_value()) << copiedInputMemory.error().message;
|
||||
auto copiedOutputMemory = utils::clone(*mOutputMemory->getAidlMemory());
|
||||
CHECK(copiedOutputMemory.has_value()) << copiedOutputMemory.error().message;
|
||||
|
||||
std::vector<RequestMemoryPool> pools;
|
||||
pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
|
||||
std::move(copiedInputMemory).value()));
|
||||
pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
|
||||
std::move(copiedOutputMemory).value()));
|
||||
|
||||
// Copy input data to the memory pool.
|
||||
uint8_t* inputPtr = mInputMemory->getPointer();
|
||||
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
|
||||
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
|
||||
if (op.data.size() > 0) {
|
||||
const uint8_t* begin = op.data.get<uint8_t>();
|
||||
const uint8_t* end = begin + op.data.size();
|
||||
std::copy(begin, end, inputPtr + inputs[i].location.offset);
|
||||
}
|
||||
}
|
||||
|
||||
return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
|
||||
}
|
||||
|
||||
std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& request) const {
|
||||
// Copy out output results.
|
||||
uint8_t* outputPtr = mOutputMemory->getPointer();
|
||||
std::vector<TestBuffer> outputBuffers;
|
||||
for (const auto& output : request.outputs) {
|
||||
outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
|
||||
}
|
||||
return outputBuffers;
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks
|
153
neuralnetworks/aidl/vts/functional/Utils.h
Normal file
153
neuralnetworks/aidl/vts/functional/Utils.h
Normal file
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_AIDL_UTILS_H
|
||||
#define ANDROID_HARDWARE_NEURALNETWORKS_AIDL_UTILS_H
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware_buffer.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <iosfwd>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/IDevice.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Memory.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Operand.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperandType.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Priority.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Request.h>
|
||||
|
||||
#include <TestHarness.h>
|
||||
#include <nnapi/SharedMemory.h>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks {
|
||||
|
||||
namespace nn = ::android::nn;
|
||||
|
||||
inline constexpr Priority kDefaultPriority = Priority::MEDIUM;
|
||||
|
||||
inline constexpr Timing kNoTiming = {.timeOnDevice = -1, .timeInDriver = -1};
|
||||
inline constexpr int64_t kNoDeadline = -1;
|
||||
inline constexpr int64_t kOmittedTimeoutDuration = -1;
|
||||
inline constexpr int64_t kNoDuration = -1;
|
||||
inline const std::vector<uint8_t> kEmptyCacheToken(IDevice::BYTE_SIZE_OF_CACHE_TOKEN);
|
||||
|
||||
// Returns the amount of space needed to store a value of the specified type.
|
||||
//
|
||||
// Aborts if the specified type is an extension type or OEM type.
|
||||
uint32_t sizeOfData(OperandType type);
|
||||
|
||||
// Returns the amount of space needed to store a value of the dimensions and
|
||||
// type of this operand. For a non-extension, non-OEM tensor with unspecified
|
||||
// rank or at least one unspecified dimension, returns zero.
|
||||
//
|
||||
// Aborts if the specified type is an extension type or OEM type.
|
||||
uint32_t sizeOfData(const Operand& operand);
|
||||
|
||||
// Convenience class to manage the lifetime of memory resources.
|
||||
class TestMemoryBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(TestMemoryBase);
|
||||
|
||||
public:
|
||||
TestMemoryBase() = default;
|
||||
virtual ~TestMemoryBase() = default;
|
||||
uint8_t* getPointer() const { return mPtr; }
|
||||
const Memory* getAidlMemory() const { return &mAidlMemory; }
|
||||
|
||||
protected:
|
||||
uint8_t* mPtr = nullptr;
|
||||
Memory mAidlMemory;
|
||||
bool mIsValid = false;
|
||||
};
|
||||
|
||||
class TestAshmem : public TestMemoryBase {
|
||||
public:
|
||||
static std::unique_ptr<TestAshmem> create(uint32_t size);
|
||||
|
||||
// Prefer TestAshmem::create.
|
||||
// The constructor calls initialize, which constructs the memory resources. This is a workaround
|
||||
// that gtest macros cannot be used directly in a constructor.
|
||||
TestAshmem(uint32_t size) { initialize(size); }
|
||||
|
||||
private:
|
||||
void initialize(uint32_t size);
|
||||
nn::Mapping mMappedMemory;
|
||||
};
|
||||
|
||||
class TestBlobAHWB : public TestMemoryBase {
|
||||
public:
|
||||
static std::unique_ptr<TestBlobAHWB> create(uint32_t size);
|
||||
|
||||
// Prefer TestBlobAHWB::create.
|
||||
// The constructor calls initialize, which constructs the memory resources. This is a
|
||||
// workaround that gtest macros cannot be used directly in a constructor.
|
||||
TestBlobAHWB(uint32_t size) { initialize(size); }
|
||||
~TestBlobAHWB();
|
||||
|
||||
private:
|
||||
void initialize(uint32_t size);
|
||||
AHardwareBuffer* mAhwb = nullptr;
|
||||
nn::Mapping mMapping;
|
||||
};
|
||||
|
||||
enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };
|
||||
|
||||
// Manages the lifetime of memory resources used in an execution.
|
||||
class ExecutionContext {
|
||||
DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
|
||||
|
||||
public:
|
||||
static constexpr uint32_t kInputPoolIndex = 0;
|
||||
static constexpr uint32_t kOutputPoolIndex = 1;
|
||||
|
||||
ExecutionContext() = default;
|
||||
|
||||
// Create HIDL Request from the TestModel struct.
|
||||
Request createRequest(const test_helper::TestModel& testModel,
|
||||
MemoryType memoryType = MemoryType::ASHMEM);
|
||||
|
||||
// After execution, copy out output results from the output memory pool.
|
||||
std::vector<test_helper::TestBuffer> getOutputBuffers(const Request& request) const;
|
||||
|
||||
private:
|
||||
std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
|
||||
};
|
||||
|
||||
template <typename Type>
|
||||
using Named = std::pair<std::string, Type>;
|
||||
|
||||
template <typename Type>
|
||||
const std::string& getName(const Named<Type>& namedData) {
|
||||
return namedData.first;
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
const Type& getData(const Named<Type>& namedData) {
|
||||
return namedData.second;
|
||||
}
|
||||
|
||||
std::string gtestCompliantName(std::string name);
|
||||
|
||||
// pretty-print values for error messages
|
||||
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_AIDL_UTILS_H
|
1338
neuralnetworks/aidl/vts/functional/ValidateModel.cpp
Normal file
1338
neuralnetworks/aidl/vts/functional/ValidateModel.cpp
Normal file
File diff suppressed because it is too large
Load diff
126
neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
Normal file
126
neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
Normal file
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_aidl_hal_test"
|
||||
|
||||
#include <android/binder_auto_utils.h>
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#include <TestHarness.h>
|
||||
#include <nnapi/hal/aidl/Utils.h>
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "GeneratedTestHarness.h"
|
||||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::vts::functional {
|
||||
|
||||
using ExecutionMutation = std::function<void(Request*)>;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
// Primary validation function. This function will take a valid request, apply a
|
||||
// mutation to it to invalidate the request, then pass it to interface calls
|
||||
// that use the request.
|
||||
static void validate(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const std::string& message, const Request& originalRequest,
|
||||
const ExecutionMutation& mutate) {
|
||||
Request request = utils::clone(originalRequest).value();
|
||||
mutate(&request);
|
||||
|
||||
// We'd like to test both with timing requested and without timing
|
||||
// requested. Rather than running each test both ways, we'll decide whether
|
||||
// to request timing by hashing the message. We do not use std::hash because
|
||||
// it is not guaranteed stable across executions.
|
||||
char hash = 0;
|
||||
for (auto c : message) {
|
||||
hash ^= c;
|
||||
};
|
||||
bool measure = (hash & 1);
|
||||
|
||||
// synchronous
|
||||
{
|
||||
SCOPED_TRACE(message + " [executeSynchronously]");
|
||||
ExecutionResult executionResult;
|
||||
const auto executeStatus = preparedModel->executeSynchronously(
|
||||
request, measure, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
|
||||
ASSERT_FALSE(executeStatus.isOk());
|
||||
ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
|
||||
ErrorStatus::INVALID_ARGUMENT);
|
||||
}
|
||||
|
||||
// fenced
|
||||
{
|
||||
SCOPED_TRACE(message + " [executeFenced]");
|
||||
ndk::ScopedFileDescriptor syncFence;
|
||||
std::shared_ptr<IFencedExecutionCallback> callback;
|
||||
const auto executeStatus = preparedModel->executeFenced(request, {}, false, kNoDeadline,
|
||||
kOmittedTimeoutDuration,
|
||||
kNoDuration, &syncFence, &callback);
|
||||
ASSERT_FALSE(executeStatus.isOk());
|
||||
ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
|
||||
ErrorStatus::INVALID_ARGUMENT);
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE INPUT ////////////////////////////////////
|
||||
|
||||
static void removeInputTest(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const Request& request) {
|
||||
for (size_t input = 0; input < request.inputs.size(); ++input) {
|
||||
const std::string message = "removeInput: removed input " + std::to_string(input);
|
||||
validate(preparedModel, message, request, [input](Request* request) {
|
||||
request->inputs.erase(request->inputs.begin() + input);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OUTPUT ////////////////////////////////////
|
||||
|
||||
static void removeOutputTest(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const Request& request) {
|
||||
for (size_t output = 0; output < request.outputs.size(); ++output) {
|
||||
const std::string message = "removeOutput: removed Output " + std::to_string(output);
|
||||
validate(preparedModel, message, request, [output](Request* request) {
|
||||
request->outputs.erase(request->outputs.begin() + output);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////// ENTRY POINT //////////////////////////////////
|
||||
|
||||
void validateRequest(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request) {
|
||||
removeInputTest(preparedModel, request);
|
||||
removeOutputTest(preparedModel, request);
|
||||
}
|
||||
|
||||
void validateRequestFailure(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const Request& request) {
|
||||
SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
|
||||
ExecutionResult executionResult;
|
||||
const auto executeStatus = preparedModel->executeSynchronously(
|
||||
request, false, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
|
||||
|
||||
ASSERT_FALSE(executeStatus.isOk());
|
||||
ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||
ASSERT_NE(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()), ErrorStatus::NONE);
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::vts::functional
|
194
neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
Normal file
194
neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
Normal file
|
@ -0,0 +1,194 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_aidl_hal_test"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <android/binder_interface_utils.h>
|
||||
#include <android/binder_manager.h>
|
||||
#include <android/binder_status.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include <TestHarness.h>
|
||||
#include <aidl/Vintf.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "GeneratedTestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::vts::functional {
|
||||
|
||||
using implementation::PreparedModelCallback;
|
||||
|
||||
// internal helper function
|
||||
void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
|
||||
std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
*preparedModel = nullptr;
|
||||
|
||||
// see if service can handle model
|
||||
std::vector<bool> supportedOperations;
|
||||
const auto supportedCallStatus = device->getSupportedOperations(model, &supportedOperations);
|
||||
ASSERT_TRUE(supportedCallStatus.isOk());
|
||||
ASSERT_NE(0ul, supportedOperations.size());
|
||||
const bool fullySupportsModel = std::all_of(
|
||||
supportedOperations.begin(), supportedOperations.end(), [](bool v) { return v; });
|
||||
|
||||
// launch prepare model
|
||||
const std::shared_ptr<PreparedModelCallback> preparedModelCallback =
|
||||
ndk::SharedRefBase::make<PreparedModelCallback>();
|
||||
const auto prepareLaunchStatus =
|
||||
device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority,
|
||||
kNoDeadline, {}, {}, kEmptyCacheToken, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations call returns a list of operations that are guaranteed not to fail
|
||||
// if prepareModel is called, and 'fullySupportsModel' is true i.f.f. the entire model is
|
||||
// guaranteed. If a driver has any doubt that it can prepare an operation, it must return false.
|
||||
// So here, if a driver isn't sure if it can support an operation, but reports that it
|
||||
// successfully prepared the model, the test can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
if (!reportSkipping) {
|
||||
return;
|
||||
}
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
|
||||
"model that it does not support.";
|
||||
std::cout << "[ ] Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support."
|
||||
<< std::endl;
|
||||
GTEST_SKIP();
|
||||
}
|
||||
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
void NeuralNetworksAidlTest::SetUp() {
|
||||
testing::TestWithParam<NeuralNetworksAidlTestParam>::SetUp();
|
||||
ASSERT_NE(kDevice, nullptr);
|
||||
}
|
||||
|
||||
static NamedDevice makeNamedDevice(const std::string& name) {
|
||||
ndk::SpAIBinder binder(AServiceManager_getService(name.c_str()));
|
||||
return {name, IDevice::fromBinder(binder)};
|
||||
}
|
||||
|
||||
static std::vector<NamedDevice> getNamedDevicesImpl() {
|
||||
// Retrieves the name of all service instances that implement IDevice,
|
||||
// including any Lazy HAL instances.
|
||||
const std::vector<std::string> names = ::android::getAidlHalInstanceNames(IDevice::descriptor);
|
||||
|
||||
// Get a handle to each device and pair it with its name.
|
||||
std::vector<NamedDevice> namedDevices;
|
||||
namedDevices.reserve(names.size());
|
||||
std::transform(names.begin(), names.end(), std::back_inserter(namedDevices), makeNamedDevice);
|
||||
return namedDevices;
|
||||
}
|
||||
|
||||
const std::vector<NamedDevice>& getNamedDevices() {
|
||||
const static std::vector<NamedDevice> devices = getNamedDevicesImpl();
|
||||
return devices;
|
||||
}
|
||||
|
||||
std::string printNeuralNetworksAidlTest(
|
||||
const testing::TestParamInfo<NeuralNetworksAidlTestParam>& info) {
|
||||
return gtestCompliantName(getName(info.param));
|
||||
}
|
||||
|
||||
INSTANTIATE_DEVICE_TEST(NeuralNetworksAidlTest);
|
||||
|
||||
// Forward declaration from ValidateModel.cpp
|
||||
void validateModel(const std::shared_ptr<IDevice>& device, const Model& model);
|
||||
// Forward declaration from ValidateRequest.cpp
|
||||
void validateRequest(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request);
|
||||
// Forward declaration from ValidateRequest.cpp
|
||||
void validateRequestFailure(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||
const Request& request);
|
||||
|
||||
void validateEverything(const std::shared_ptr<IDevice>& device, const Model& model,
|
||||
const Request& request) {
|
||||
validateModel(device, model);
|
||||
|
||||
// Create IPreparedModel.
|
||||
std::shared_ptr<IPreparedModel> preparedModel;
|
||||
createPreparedModel(device, model, &preparedModel);
|
||||
if (preparedModel == nullptr) return;
|
||||
|
||||
validateRequest(preparedModel, request);
|
||||
// HIDL also had test that expected executeFenced to fail on received null fd (-1). This is not
|
||||
// allowed in AIDL and will result in EX_TRANSACTION_FAILED.
|
||||
}
|
||||
|
||||
void validateFailure(const std::shared_ptr<IDevice>& device, const Model& model,
|
||||
const Request& request) {
|
||||
// TODO: Should this always succeed?
|
||||
// What if the invalid input is part of the model (i.e., a parameter).
|
||||
validateModel(device, model);
|
||||
|
||||
// Create IPreparedModel.
|
||||
std::shared_ptr<IPreparedModel> preparedModel;
|
||||
createPreparedModel(device, model, &preparedModel);
|
||||
if (preparedModel == nullptr) return;
|
||||
|
||||
validateRequestFailure(preparedModel, request);
|
||||
}
|
||||
|
||||
TEST_P(ValidationTest, Test) {
|
||||
const Model model = createModel(kTestModel);
|
||||
ExecutionContext context;
|
||||
const Request request = context.createRequest(kTestModel);
|
||||
if (kTestModel.expectFailure) {
|
||||
validateFailure(kDevice, model, request);
|
||||
} else {
|
||||
validateEverything(kDevice, model, request);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const std::string& testName) {
|
||||
// Skip validation for the "inputs_as_internal" and "all_tensors_as_inputs"
|
||||
// generated tests.
|
||||
return testName.find("inputs_as_internal") == std::string::npos &&
|
||||
testName.find("all_tensors_as_inputs") == std::string::npos;
|
||||
});
|
||||
|
||||
std::string toString(Executor executor) {
|
||||
switch (executor) {
|
||||
case Executor::ASYNC:
|
||||
return "ASYNC";
|
||||
case Executor::SYNC:
|
||||
return "SYNC";
|
||||
case Executor::BURST:
|
||||
return "BURST";
|
||||
case Executor::FENCED:
|
||||
return "FENCED";
|
||||
default:
|
||||
CHECK(false);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::vts::functional
|
61
neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
Normal file
61
neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
Normal file
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_AIDL_VTS_HAL_NEURALNETWORKS_H
|
||||
#define ANDROID_HARDWARE_NEURALNETWORKS_AIDL_VTS_HAL_NEURALNETWORKS_H
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/IDevice.h>
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Utils.h"
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::vts::functional {
|
||||
|
||||
using NamedDevice = Named<std::shared_ptr<IDevice>>;
|
||||
using NeuralNetworksAidlTestParam = NamedDevice;
|
||||
|
||||
class NeuralNetworksAidlTest : public testing::TestWithParam<NeuralNetworksAidlTestParam> {
|
||||
protected:
|
||||
void SetUp() override;
|
||||
const std::shared_ptr<IDevice> kDevice = getData(GetParam());
|
||||
};
|
||||
|
||||
const std::vector<NamedDevice>& getNamedDevices();
|
||||
|
||||
std::string printNeuralNetworksAidlTest(
|
||||
const testing::TestParamInfo<NeuralNetworksAidlTestParam>& info);
|
||||
|
||||
#define INSTANTIATE_DEVICE_TEST(TestSuite) \
|
||||
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(TestSuite); \
|
||||
INSTANTIATE_TEST_SUITE_P(PerInstance, TestSuite, testing::ValuesIn(getNamedDevices()), \
|
||||
printNeuralNetworksAidlTest)
|
||||
|
||||
// Create an IPreparedModel object. If the model cannot be prepared,
|
||||
// "preparedModel" will be nullptr instead.
|
||||
void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
|
||||
std::shared_ptr<IPreparedModel>* preparedModel,
|
||||
bool reportSkipping = true);
|
||||
|
||||
enum class Executor { ASYNC, SYNC, BURST, FENCED };
|
||||
|
||||
std::string toString(Executor executor);
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::vts::functional
|
||||
|
||||
#endif // ANDROID_HARDWARE_NEURALNETWORKS_AIDL_VTS_HAL_NEURALNETWORKS_H
|
Loading…
Reference in a new issue