Merge "Allow implicit conversions for NN errors -- hal" am: 68e98f40dc
am: d236dbf3b3
am: 36c599d513
am: 0e0dc63cff
Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1859936 Change-Id: I1eb53c2749850f8442ef6aba8089294590068c51
This commit is contained in:
commit
2542d951d0
22 changed files with 99 additions and 149 deletions
|
@ -50,8 +50,8 @@ bool valid(const Type& halObject) {
|
|||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
|
||||
nn::Result<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
if (version > kVersion) {
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
|
||||
}
|
||||
|
|
|
@ -63,12 +63,11 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
|
||||
&maybeRequestInShared, &relocation)));
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
|
||||
&maybeRequestInShared, &relocation));
|
||||
|
||||
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto hidlRequest = NN_TRY(convert(requestInShared));
|
||||
|
||||
return executeInternal(hidlRequest, relocation);
|
||||
}
|
||||
|
|
|
@ -52,8 +52,8 @@ bool valid(const Type& halObject) {
|
|||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
|
||||
nn::Result<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
if (version > kVersion) {
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
|
||||
}
|
||||
|
|
|
@ -61,8 +61,8 @@ bool valid(const Type& halObject) {
|
|||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
|
||||
nn::Result<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
if (version > kVersion) {
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
|
||||
}
|
||||
|
|
|
@ -75,8 +75,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executi
|
|||
<< "execution failed with " << toString(status);
|
||||
}
|
||||
HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
|
||||
return hal::utils::makeExecutionFailure(
|
||||
convertExecutionGeneralResultsHelper(outputShapes, timing));
|
||||
return convertExecutionGeneralResultsHelper(outputShapes, timing);
|
||||
}
|
||||
|
||||
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
|
||||
|
|
|
@ -120,9 +120,8 @@ GeneralResult<Capabilities> unvalidatedConvert(const hal::V1_2::Capabilities& ca
|
|||
NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
|
||||
auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
|
||||
|
||||
auto table = NN_TRY(hal::utils::makeGeneralFailure(
|
||||
Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
|
||||
nn::ErrorStatus::GENERAL_FAILURE));
|
||||
auto table =
|
||||
NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
|
||||
|
||||
return Capabilities{
|
||||
.relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
|
||||
|
|
|
@ -320,8 +320,7 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
|||
|
||||
// if the request is valid but of a higher version than what's supported in burst execution,
|
||||
// fall back to another execution path
|
||||
if (const auto version = NN_TRY(hal::utils::makeExecutionFailure(nn::validate(request)));
|
||||
version > nn::Version::ANDROID_Q) {
|
||||
if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) {
|
||||
// fallback to another execution path if the packet could not be sent
|
||||
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
}
|
||||
|
@ -329,17 +328,15 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
|||
// ensure that request is ready for IPC
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
|
||||
&maybeRequestInShared, &relocation)));
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
|
||||
&maybeRequestInShared, &relocation));
|
||||
|
||||
// clear pools field of request, as they will be provided via slots
|
||||
const auto requestWithoutPools = nn::Request{
|
||||
.inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
|
||||
auto hidlRequest = NN_TRY(
|
||||
hal::utils::makeExecutionFailure(V1_0::utils::unvalidatedConvert(requestWithoutPools)));
|
||||
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
|
||||
const auto hidlMeasure = NN_TRY(convert(measure));
|
||||
|
||||
std::vector<int32_t> slots;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
|
@ -367,8 +364,7 @@ nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableE
|
|||
|
||||
// if the request is valid but of a higher version than what's supported in burst execution,
|
||||
// fall back to another execution path
|
||||
if (const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(request)));
|
||||
version > nn::Version::ANDROID_Q) {
|
||||
if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) {
|
||||
// fallback to another execution path if the packet could not be sent
|
||||
return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
|
||||
}
|
||||
|
@ -430,8 +426,7 @@ ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& re
|
|||
}
|
||||
|
||||
// get result packet
|
||||
const auto [status, outputShapes, timing] =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(mResultChannelReceiver->getBlocking()));
|
||||
const auto [status, outputShapes, timing] = NN_TRY(mResultChannelReceiver->getBlocking());
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
|
|
|
@ -45,8 +45,6 @@
|
|||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
namespace {
|
||||
|
||||
using neuralnetworks::utils::makeExecutionFailure;
|
||||
|
||||
constexpr V1_2::Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
|
||||
std::numeric_limits<uint64_t>::max()};
|
||||
|
||||
|
@ -241,28 +239,25 @@ nn::ExecutionResult<std::pair<hidl_vec<OutputShape>, Timing>> ExecutionBurstServ
|
|||
"ExecutionBurstServer getting memory, executing, and returning results");
|
||||
|
||||
// ensure executor with cache has required memory
|
||||
const auto cacheEntries =
|
||||
NN_TRY(makeExecutionFailure(mMemoryCache.getCacheEntries(slotsOfPools)));
|
||||
const auto cacheEntries = NN_TRY(mMemoryCache.getCacheEntries(slotsOfPools));
|
||||
|
||||
// convert request, populating its pools
|
||||
// This code performs an unvalidated convert because the request object without its pools is
|
||||
// invalid because it is incomplete. Instead, the validation is performed after the memory pools
|
||||
// have been added to the request.
|
||||
auto canonicalRequest =
|
||||
NN_TRY(makeExecutionFailure(nn::unvalidatedConvert(requestWithoutPools)));
|
||||
auto canonicalRequest = NN_TRY(nn::unvalidatedConvert(requestWithoutPools));
|
||||
CHECK(canonicalRequest.pools.empty());
|
||||
std::transform(cacheEntries.begin(), cacheEntries.end(),
|
||||
std::back_inserter(canonicalRequest.pools),
|
||||
[](const auto& cacheEntry) { return cacheEntry.first; });
|
||||
NN_TRY(makeExecutionFailure(validate(canonicalRequest)));
|
||||
NN_TRY(validate(canonicalRequest));
|
||||
|
||||
nn::MeasureTiming canonicalMeasure = NN_TRY(makeExecutionFailure(nn::convert(measure)));
|
||||
nn::MeasureTiming canonicalMeasure = NN_TRY(nn::convert(measure));
|
||||
|
||||
const auto [outputShapes, timing] =
|
||||
NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
|
||||
|
||||
return std::make_pair(NN_TRY(makeExecutionFailure(convert(outputShapes))),
|
||||
NN_TRY(makeExecutionFailure(convert(timing))));
|
||||
return std::make_pair(NN_TRY(convert(outputShapes)), NN_TRY(convert(timing)));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
|
|
@ -95,13 +95,12 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
|
||||
&maybeRequestInShared, &relocation)));
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
|
||||
&maybeRequestInShared, &relocation));
|
||||
|
||||
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
const auto hidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto hidlMeasure = NN_TRY(convert(measure));
|
||||
|
||||
return executeInternal(hidlRequest, hidlMeasure, relocation);
|
||||
}
|
||||
|
|
|
@ -61,8 +61,8 @@ bool valid(const Type& halObject) {
|
|||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
|
||||
nn::Result<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
if (version > kVersion) {
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
|
||||
}
|
||||
|
|
|
@ -91,8 +91,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executi
|
|||
<< "execution failed with " << toString(status);
|
||||
}
|
||||
HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
|
||||
return hal::utils::makeExecutionFailure(
|
||||
convertExecutionGeneralResultsHelper(outputShapes, timing));
|
||||
return convertExecutionGeneralResultsHelper(outputShapes, timing);
|
||||
}
|
||||
|
||||
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
|
||||
|
|
|
@ -131,9 +131,8 @@ GeneralResult<Capabilities> unvalidatedConvert(const hal::V1_3::Capabilities& ca
|
|||
}
|
||||
|
||||
auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
|
||||
auto table = NN_TRY(hal::utils::makeGeneralFailure(
|
||||
Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
|
||||
nn::ErrorStatus::GENERAL_FAILURE));
|
||||
auto table =
|
||||
NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
|
||||
|
||||
return Capabilities{
|
||||
.relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
|
||||
|
|
|
@ -65,7 +65,7 @@ Execution::Execution(PrivateConstructorTag /*tag*/,
|
|||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
|
||||
const nn::OptionalTimePoint& deadline) const {
|
||||
const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
const auto hidlDeadline = NN_TRY(convert(deadline));
|
||||
return kPreparedModel->executeInternal(kRequest, kMeasure, hidlDeadline, kLoopTimeoutDuration,
|
||||
kRelocation);
|
||||
}
|
||||
|
|
|
@ -62,8 +62,7 @@ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> fence
|
|||
auto resultSyncFence = nn::SyncFence::createAsSignaled();
|
||||
if (syncFence.getNativeHandle() != nullptr) {
|
||||
auto sharedHandle = NN_TRY(nn::convert(syncFence));
|
||||
resultSyncFence = NN_TRY(hal::utils::makeGeneralFailure(
|
||||
nn::SyncFence::create(std::move(sharedHandle)), nn::ErrorStatus::GENERAL_FAILURE));
|
||||
resultSyncFence = NN_TRY(nn::SyncFence::create(std::move(sharedHandle)));
|
||||
}
|
||||
|
||||
if (callback == nullptr) {
|
||||
|
@ -141,16 +140,14 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
|
||||
&maybeRequestInShared, &relocation)));
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
|
||||
&maybeRequestInShared, &relocation));
|
||||
|
||||
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
const auto hidlLoopTimeoutDuration =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
|
||||
const auto hidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto hidlMeasure = NN_TRY(convert(measure));
|
||||
const auto hidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
|
||||
return executeInternal(hidlRequest, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration,
|
||||
relocation);
|
||||
|
|
|
@ -50,9 +50,8 @@ bool valid(const Type& halObject) {
|
|||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(::android::hardware::neuralnetworks::utils::makeGeneralFailure(
|
||||
nn::validate(canonical)));
|
||||
nn::Result<void> compliantVersion(const Type& canonical) {
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
if (version > kVersion) {
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
|
||||
}
|
||||
|
|
|
@ -176,16 +176,14 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
|||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
|
||||
&maybeRequestInShared, &relocation)));
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
|
||||
&maybeRequestInShared, &relocation));
|
||||
|
||||
const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
const auto aidlLoopTimeoutDuration =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
|
||||
const auto aidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto aidlMeasure = NN_TRY(convert(measure));
|
||||
const auto aidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
|
||||
std::vector<int64_t> memoryIdentifierTokens;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
|
@ -233,8 +231,8 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
|||
return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
|
||||
<< "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
|
||||
}
|
||||
auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
|
||||
auto [outputShapes, timing] =
|
||||
NN_TRY(convertExecutionResults(executionResult.outputShapes, executionResult.timing));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
|
@ -308,7 +306,7 @@ BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/, std::shared_ptr<co
|
|||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
|
||||
const nn::OptionalTimePoint& deadline) const {
|
||||
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
const auto aidlDeadline = NN_TRY(convert(deadline));
|
||||
return kBurst->executeInternal(kRequest, kMemoryIdentifierTokens, kMeasure, aidlDeadline,
|
||||
kLoopTimeoutDuration, kRelocation);
|
||||
}
|
||||
|
|
|
@ -178,9 +178,8 @@ GeneralResult<Capabilities> unvalidatedConvert(const aidl_hal::Capabilities& cap
|
|||
}
|
||||
|
||||
auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
|
||||
auto table = NN_TRY(hal::utils::makeGeneralFailure(
|
||||
Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
|
||||
nn::ErrorStatus::GENERAL_FAILURE));
|
||||
auto table =
|
||||
NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
|
||||
|
||||
return Capabilities{
|
||||
.relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
|
||||
|
|
|
@ -60,7 +60,7 @@ Execution::Execution(PrivateConstructorTag /*tag*/,
|
|||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
|
||||
const nn::OptionalTimePoint& deadline) const {
|
||||
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
const auto aidlDeadline = NN_TRY(convert(deadline));
|
||||
return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
|
||||
kRelocation);
|
||||
}
|
||||
|
|
|
@ -78,16 +78,14 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
|
||||
&maybeRequestInShared, &relocation)));
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
|
||||
&maybeRequestInShared, &relocation));
|
||||
|
||||
const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
const auto aidlLoopTimeoutDuration =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
|
||||
const auto aidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto aidlMeasure = NN_TRY(convert(measure));
|
||||
const auto aidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration,
|
||||
relocation);
|
||||
}
|
||||
|
@ -110,8 +108,8 @@ PreparedModel::executeInternal(const Request& request, bool measure, int64_t dea
|
|||
return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
|
||||
<< "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
|
||||
}
|
||||
auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
|
||||
auto [outputShapes, timing] =
|
||||
NN_TRY(convertExecutionResults(executionResult.outputShapes, executionResult.timing));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
|
|
|
@ -57,6 +57,15 @@ auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>
|
|||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::Version> validateRequestForModel(const nn::Request& request,
|
||||
const nn::Model& model) {
|
||||
nn::GeneralResult<nn::Version> version = nn::validateRequestForModel(request, model);
|
||||
if (!version.ok()) {
|
||||
version.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
return version;
|
||||
}
|
||||
|
||||
class FencedExecutionCallback final : public V1_3::IFencedExecutionCallback {
|
||||
public:
|
||||
explicit FencedExecutionCallback(const nn::ExecuteFencedInfoCallback& callback)
|
||||
|
@ -148,8 +157,7 @@ nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel, ui
|
|||
const std::any resource = preparedModel->getUnderlyingResource();
|
||||
if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
|
||||
CHECK(*model != nullptr);
|
||||
NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model),
|
||||
nn::ErrorStatus::INVALID_ARGUMENT));
|
||||
NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
|
||||
}
|
||||
|
||||
Task task = [preparedModel, nnRequest = std::move(nnRequest), callback] {
|
||||
|
@ -175,8 +183,7 @@ nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel
|
|||
const std::any resource = preparedModel->getUnderlyingResource();
|
||||
if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
|
||||
CHECK(*model != nullptr);
|
||||
NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model),
|
||||
nn::ErrorStatus::INVALID_ARGUMENT));
|
||||
NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
|
||||
}
|
||||
|
||||
Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, callback] {
|
||||
|
@ -206,8 +213,7 @@ nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel
|
|||
const std::any resource = preparedModel->getUnderlyingResource();
|
||||
if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
|
||||
CHECK(*model != nullptr);
|
||||
NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model),
|
||||
nn::ErrorStatus::INVALID_ARGUMENT));
|
||||
NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
|
||||
}
|
||||
|
||||
Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, nnDeadline,
|
||||
|
@ -224,14 +230,14 @@ nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel
|
|||
nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> executeSynchronously(
|
||||
const nn::SharedPreparedModel& preparedModel, const V1_0::Request& request,
|
||||
V1_2::MeasureTiming measure) {
|
||||
const auto nnRequest = NN_TRY(utils::makeExecutionFailure(convertInput(request)));
|
||||
const auto nnMeasure = NN_TRY(utils::makeExecutionFailure(convertInput(measure)));
|
||||
const auto nnRequest = NN_TRY(convertInput(request));
|
||||
const auto nnMeasure = NN_TRY(convertInput(measure));
|
||||
|
||||
const auto [outputShapes, timing] =
|
||||
NN_TRY(preparedModel->execute(nnRequest, nnMeasure, {}, {}));
|
||||
|
||||
auto hidlOutputShapes = NN_TRY(utils::makeExecutionFailure(V1_2::utils::convert(outputShapes)));
|
||||
const auto hidlTiming = NN_TRY(utils::makeExecutionFailure(V1_2::utils::convert(timing)));
|
||||
auto hidlOutputShapes = NN_TRY(V1_2::utils::convert(outputShapes));
|
||||
const auto hidlTiming = NN_TRY(V1_2::utils::convert(timing));
|
||||
return std::make_pair(std::move(hidlOutputShapes), hidlTiming);
|
||||
}
|
||||
|
||||
|
@ -239,29 +245,30 @@ nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> execut
|
|||
const nn::SharedPreparedModel& preparedModel, const V1_3::Request& request,
|
||||
V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline,
|
||||
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration) {
|
||||
const auto nnRequest = NN_TRY(utils::makeExecutionFailure(convertInput(request)));
|
||||
const auto nnMeasure = NN_TRY(utils::makeExecutionFailure(convertInput(measure)));
|
||||
const auto nnDeadline = NN_TRY(utils::makeExecutionFailure(convertInput(deadline)));
|
||||
const auto nnLoopTimeoutDuration =
|
||||
NN_TRY(utils::makeExecutionFailure(convertInput(loopTimeoutDuration)));
|
||||
const auto nnRequest = NN_TRY(convertInput(request));
|
||||
const auto nnMeasure = NN_TRY(convertInput(measure));
|
||||
const auto nnDeadline = NN_TRY(convertInput(deadline));
|
||||
const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
|
||||
|
||||
const auto [outputShapes, timing] =
|
||||
NN_TRY(preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration));
|
||||
|
||||
auto hidlOutputShapes = NN_TRY(utils::makeExecutionFailure(V1_3::utils::convert(outputShapes)));
|
||||
const auto hidlTiming = NN_TRY(utils::makeExecutionFailure(V1_3::utils::convert(timing)));
|
||||
auto hidlOutputShapes = NN_TRY(V1_3::utils::convert(outputShapes));
|
||||
const auto hidlTiming = NN_TRY(V1_3::utils::convert(timing));
|
||||
return std::make_pair(std::move(hidlOutputShapes), hidlTiming);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<nn::SyncFence>> convertSyncFences(
|
||||
const hidl_vec<hidl_handle>& handles) {
|
||||
auto nnHandles = NN_TRY(convertInput(handles));
|
||||
std::vector<nn::SyncFence> syncFences;
|
||||
syncFences.reserve(handles.size());
|
||||
for (const auto& handle : handles) {
|
||||
auto nativeHandle = NN_TRY(convertInput(handle));
|
||||
auto syncFence = NN_TRY(utils::makeGeneralFailure(
|
||||
nn::SyncFence::create(std::move(nativeHandle)), nn::ErrorStatus::INVALID_ARGUMENT));
|
||||
syncFences.push_back(std::move(syncFence));
|
||||
for (auto&& handle : nnHandles) {
|
||||
if (auto syncFence = nn::SyncFence::create(std::move(handle)); !syncFence.ok()) {
|
||||
return nn::error(nn::ErrorStatus::INVALID_ARGUMENT) << std::move(syncFence).error();
|
||||
} else {
|
||||
syncFences.push_back(std::move(syncFence).value());
|
||||
}
|
||||
}
|
||||
return syncFences;
|
||||
}
|
||||
|
|
|
@ -52,38 +52,6 @@ nn::GeneralResult<Type> handleTransportError(const hardware::Return<Type>& ret)
|
|||
std::move(result).value(); \
|
||||
})
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<Type> makeGeneralFailure(
|
||||
nn::Result<Type> result, nn::ErrorStatus status = nn::ErrorStatus::GENERAL_FAILURE) {
|
||||
if (!result.has_value()) {
|
||||
return nn::error(status) << std::move(result).error();
|
||||
}
|
||||
if constexpr (!std::is_same_v<Type, void>) {
|
||||
return std::move(result).value();
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::ExecutionResult<Type> makeExecutionFailure(nn::GeneralResult<Type> result) {
|
||||
if (!result.has_value()) {
|
||||
const auto [message, status] = std::move(result).error();
|
||||
return nn::error(status) << message;
|
||||
}
|
||||
if constexpr (!std::is_same_v<Type, void>) {
|
||||
return std::move(result).value();
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::ExecutionResult<Type> makeExecutionFailure(
|
||||
nn::Result<Type> result, nn::ErrorStatus status = nn::ErrorStatus::GENERAL_FAILURE) {
|
||||
return makeExecutionFailure(makeGeneralFailure(result, status));
|
||||
}
|
||||
|
||||
#define HANDLE_HAL_STATUS(status) \
|
||||
if (const auto canonical = ::android::nn::convert(status).value_or( \
|
||||
::android::nn::ErrorStatus::GENERAL_FAILURE); \
|
||||
|
|
|
@ -333,7 +333,7 @@ nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromP
|
|||
|
||||
nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
|
||||
size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
|
||||
return makeGeneralFailure(nn::countNumberOfConsumers(numberOfOperands, operations));
|
||||
return nn::countNumberOfConsumers(numberOfOperands, operations);
|
||||
}
|
||||
|
||||
nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) {
|
||||
|
|
Loading…
Reference in a new issue