Merge "NNAPI VTS update in response to utility function change"

This commit is contained in:
Michael Butler 2019-10-28 21:53:52 +00:00 committed by Gerrit Code Review
commit 7261ba7d10
6 changed files with 44 additions and 24 deletions

View file

@ -33,6 +33,7 @@
#include <gtest/gtest.h>
#include <algorithm>
#include <chrono>
#include <iostream>
#include <numeric>
@ -190,7 +191,8 @@ static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& prepar
}
static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
const sp<IPreparedModel>& preparedModel) {
return android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
return android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
}
enum class Executor { ASYNC, SYNC, BURST };
@ -254,8 +256,10 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}
// execute burst
std::tie(executionStatus, outputShapes, timing) =
int n;
std::tie(n, outputShapes, timing, std::ignore) =
controller->compute(request, measure, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}

View file

@ -26,6 +26,7 @@
#include "Utils.h"
#include <android-base/logging.h>
#include <chrono>
#include <cstring>
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
@ -64,9 +65,9 @@ static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurst
// create FMQ objects
auto [fmqRequestChannel, fmqRequestDescriptor] =
RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
RequestChannelSender::create(kExecutionBurstChannelLength);
auto [fmqResultChannel, fmqResultDescriptor] =
ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
ResultChannelReceiver::create(resultChannelLength, std::chrono::microseconds{0});
ASSERT_NE(nullptr, fmqRequestChannel.get());
ASSERT_NE(nullptr, fmqResultChannel.get());
ASSERT_NE(nullptr, fmqRequestDescriptor);
@ -293,8 +294,10 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
}
// collect serialized result by running regular burst
const auto [statusRegular, outputShapesRegular, timingRegular] =
const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
controllerRegular->compute(request, MeasureTiming::NO, keys);
const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular);
EXPECT_FALSE(fallbackRegular);
// skip test if regular burst output isn't useful for testing a failure
// caused by having too small of a length for the result FMQ
@ -307,11 +310,13 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
// by this point, execution should fail because the result channel isn't
// large enough to return the serialized result
const auto [statusSmall, outputShapesSmall, timingSmall] =
const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
controllerSmall->compute(request, MeasureTiming::NO, keys);
const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall);
EXPECT_NE(ErrorStatus::NONE, statusSmall);
EXPECT_EQ(0u, outputShapesSmall.size());
EXPECT_TRUE(badTiming(timingSmall));
EXPECT_FALSE(fallbackSmall);
}
static bool isSanitized(const FmqResultDatum& datum) {

View file

@ -16,6 +16,7 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include <chrono>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
@ -94,7 +95,8 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
// create burst
std::shared_ptr<::android::nn::ExecutionBurstController> burst =
android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
ASSERT_NE(nullptr, burst.get());
// create memory keys
@ -104,13 +106,12 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
}
// execute and verify
ErrorStatus error;
std::vector<OutputShape> outputShapes;
Timing timing;
std::tie(error, outputShapes, timing) = burst->compute(request, measure, keys);
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys);
const ErrorStatus status = nn::convertResultCodeToErrorStatus(n);
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));
EXPECT_FALSE(fallback);
// additional burst testing
if (request.pools.size() > 0) {

View file

@ -36,6 +36,7 @@
#include <gtest/gtest.h>
#include <algorithm>
#include <chrono>
#include <iostream>
#include <numeric>
@ -200,7 +201,8 @@ static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& prepar
}
static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
const sp<IPreparedModel>& preparedModel) {
return android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
return android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
}
enum class Executor { ASYNC, SYNC, BURST };
@ -264,8 +266,10 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}
// execute burst
std::tie(executionStatus, outputShapes, timing) =
int n;
std::tie(n, outputShapes, timing, std::ignore) =
controller->compute(request, measure, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}

View file

@ -26,6 +26,7 @@
#include "Utils.h"
#include <android-base/logging.h>
#include <chrono>
#include <cstring>
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
@ -71,9 +72,9 @@ static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurst
// create FMQ objects
auto [fmqRequestChannel, fmqRequestDescriptor] =
RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
RequestChannelSender::create(kExecutionBurstChannelLength);
auto [fmqResultChannel, fmqResultDescriptor] =
ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
ResultChannelReceiver::create(resultChannelLength, std::chrono::microseconds{0});
ASSERT_NE(nullptr, fmqRequestChannel.get());
ASSERT_NE(nullptr, fmqResultChannel.get());
ASSERT_NE(nullptr, fmqRequestDescriptor);
@ -300,8 +301,10 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
}
// collect serialized result by running regular burst
const auto [statusRegular, outputShapesRegular, timingRegular] =
const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
controllerRegular->compute(request, MeasureTiming::NO, keys);
const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular);
EXPECT_FALSE(fallbackRegular);
// skip test if regular burst output isn't useful for testing a failure
// caused by having too small of a length for the result FMQ
@ -314,11 +317,13 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
// by this point, execution should fail because the result channel isn't
// large enough to return the serialized result
const auto [statusSmall, outputShapesSmall, timingSmall] =
const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
controllerSmall->compute(request, MeasureTiming::NO, keys);
const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall);
EXPECT_NE(ErrorStatus::NONE, statusSmall);
EXPECT_EQ(0u, outputShapesSmall.size());
EXPECT_TRUE(badTiming(timingSmall));
EXPECT_FALSE(fallbackSmall);
}
static bool isSanitized(const FmqResultDatum& datum) {

View file

@ -16,6 +16,7 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include <chrono>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
@ -98,7 +99,8 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
// create burst
std::shared_ptr<::android::nn::ExecutionBurstController> burst =
android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
ASSERT_NE(nullptr, burst.get());
// create memory keys
@ -108,13 +110,12 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
}
// execute and verify
ErrorStatus error;
std::vector<OutputShape> outputShapes;
Timing timing;
std::tie(error, outputShapes, timing) = burst->compute(request, measure, keys);
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys);
const ErrorStatus status = nn::convertResultCodeToErrorStatus(n);
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));
EXPECT_FALSE(fallback);
// additional burst testing
if (request.pools.size() > 0) {