Merge changes from topic "aosp-nnapi-reusable-execution-canonical"
* changes: Introduce reusable burst to canonical interface -- HAL. Introduce reusable execution to canonical interface -- HAL.
This commit is contained in:
commit
43ae2ecfe4
44 changed files with 2935 additions and 161 deletions
|
@ -48,6 +48,10 @@ class Burst final : public nn::IBurst {
|
|||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
private:
|
||||
const nn::SharedPreparedModel kPreparedModel;
|
||||
};
|
||||
|
|
64
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
Normal file
64
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
Normal file
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Execution>> create(
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation);
|
||||
|
||||
Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
|
||||
Request request, hal::utils::RequestRelocation relocation);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const PreparedModel> kPreparedModel;
|
||||
const Request kRequest;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
|
|
@ -57,10 +57,17 @@ class PreparedModel final : public nn::IPreparedModel,
|
|||
const nn::OptionalDuration& loopTimeoutDuration,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
|
||||
const V1_0::Request& request, const hal::utils::RequestRelocation& relocation) const;
|
||||
|
||||
private:
|
||||
const sp<V1_0::IPreparedModel> kPreparedModel;
|
||||
const hal::utils::DeathHandler kDeathHandler;
|
||||
|
|
|
@ -55,4 +55,10 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
|||
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
|
|
72
neuralnetworks/1.0/utils/src/Execution.cpp
Normal file
72
neuralnetworks/1.0/utils/src/Execution.cpp
Normal file
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Execution.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation) {
|
||||
if (preparedModel == nullptr) {
|
||||
return NN_ERROR() << "V1_0::utils::Execution::create must have non-null preparedModel";
|
||||
}
|
||||
|
||||
return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
|
||||
std::move(request), std::move(relocation));
|
||||
}
|
||||
|
||||
Execution::Execution(PrivateConstructorTag /*tag*/,
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation)
|
||||
: kPreparedModel(std::move(preparedModel)),
|
||||
kRequest(std::move(request)),
|
||||
kRelocation(std::move(relocation)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
|
||||
const nn::OptionalTimePoint& /*deadline*/) const {
|
||||
return kPreparedModel->executeInternal(kRequest, kRelocation);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
|
||||
const std::vector<nn::SyncFence>& /*waitFor*/, const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IExecution::computeFenced is not supported on 1.0 HAL service";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
|
@ -19,6 +19,7 @@
|
|||
#include "Burst.h"
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Execution.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
|
@ -61,22 +62,34 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation)));
|
||||
|
||||
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
|
||||
return executeInternal(hidlRequest, relocation);
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
PreparedModel::executeInternal(const V1_0::Request& request,
|
||||
const hal::utils::RequestRelocation& relocation) const {
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
const auto cb = sp<ExecutionCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret = kPreparedModel->execute(hidlRequest, cb);
|
||||
const auto ret = kPreparedModel->execute(request, cb);
|
||||
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
|
||||
HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
|
||||
|
||||
auto result = NN_TRY(cb->get());
|
||||
NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -91,6 +104,19 @@ PreparedModel::executeFenced(const nn::Request& /*request*/,
|
|||
<< "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming /*measure*/,
|
||||
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
auto hidlRequest = NN_TRY(convert(requestInShared));
|
||||
return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
|
||||
return Burst::create(shared_from_this());
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
@ -224,6 +225,150 @@ TEST(PreparedModelTest, executeFencedNotSupported) {
|
|||
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecute) {
|
||||
// setup call
|
||||
const uint32_t kNumberOfComputations = 2;
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute(_, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(Invoke(makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute repeatedly
|
||||
for (uint32_t i = 0; i < kNumberOfComputations; i++) {
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
|
||||
<< ": " << computeResult.error().message;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteLaunchError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute(_, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeExecute(V1_0::ErrorStatus::GENERAL_FAILURE,
|
||||
V1_0::ErrorStatus::GENERAL_FAILURE)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteReturnError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute(_, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(
|
||||
makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteTransportFailure) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute(_, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteDeadObject) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute(_, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteCrash) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_0::ErrorStatus> {
|
||||
mockPreparedModel->simulateCrash();
|
||||
return V1_0::ErrorStatus::NONE;
|
||||
};
|
||||
EXPECT_CALL(*mockPreparedModel, execute(_, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedNotSupported) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, configureExecutionBurst) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
|
|
66
neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
Normal file
66
neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
Normal file
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Execution>> create(
|
||||
std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
|
||||
hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure);
|
||||
|
||||
Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
|
||||
V1_0::Request request, hal::utils::RequestRelocation relocation,
|
||||
V1_2::MeasureTiming measure);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const PreparedModel> kPreparedModel;
|
||||
const V1_0::Request kRequest;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
const MeasureTiming kMeasure;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
|
|
@ -28,9 +28,11 @@
|
|||
#include <fmq/MessageQueue.h>
|
||||
#include <hidl/MQDescriptor.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <atomic>
|
||||
|
@ -51,14 +53,14 @@ namespace android::hardware::neuralnetworks::V1_2::utils {
|
|||
* across FMQ, making it appear to the runtime as a regular synchronous inference. Additionally,
|
||||
* this class manages the burst's memory cache.
|
||||
*/
|
||||
class ExecutionBurstController final : public nn::IBurst {
|
||||
class ExecutionBurstController final
|
||||
: public nn::IBurst,
|
||||
public std::enable_shared_from_this<ExecutionBurstController> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
using FallbackFunction =
|
||||
std::function<nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>(
|
||||
const nn::Request&, nn::MeasureTiming, const nn::OptionalTimePoint&,
|
||||
const nn::OptionalDuration&)>;
|
||||
using FallbackFunction = std::function<
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>()>;
|
||||
|
||||
/**
|
||||
* NN runtime memory cache.
|
||||
|
@ -154,10 +156,10 @@ class ExecutionBurstController final : public nn::IBurst {
|
|||
* @return ExecutionBurstController Execution burst controller object.
|
||||
*/
|
||||
static nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> create(
|
||||
const sp<IPreparedModel>& preparedModel, FallbackFunction fallback,
|
||||
nn::SharedPreparedModel preparedModel, const sp<IPreparedModel>& hidlPreparedModel,
|
||||
std::chrono::microseconds pollingTimeWindow);
|
||||
|
||||
ExecutionBurstController(PrivateConstructorTag tag, FallbackFunction fallback,
|
||||
ExecutionBurstController(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel,
|
||||
std::unique_ptr<RequestChannelSender> requestChannelSender,
|
||||
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
|
||||
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
|
||||
|
@ -173,9 +175,21 @@ class ExecutionBurstController final : public nn::IBurst {
|
|||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
// See IBurst::createReusableExecution for information on this method.
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
// If fallback is not nullptr, this method will invoke the fallback function to try another
|
||||
// execution path if the packet could not be sent. Otherwise, failing to send the packet will
|
||||
// result in an error.
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
|
||||
const std::vector<FmqRequestDatum>& requestPacket,
|
||||
const hal::utils::RequestRelocation& relocation, FallbackFunction fallback) const;
|
||||
|
||||
private:
|
||||
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
|
||||
const FallbackFunction kFallback;
|
||||
const nn::SharedPreparedModel kPreparedModel;
|
||||
const std::unique_ptr<RequestChannelSender> mRequestChannelSender;
|
||||
const std::unique_ptr<ResultChannelReceiver> mResultChannelReceiver;
|
||||
const sp<ExecutionBurstCallback> mBurstCallback;
|
||||
|
|
|
@ -58,10 +58,18 @@ class PreparedModel final : public nn::IPreparedModel,
|
|||
const nn::OptionalDuration& loopTimeoutDuration,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
|
||||
const V1_0::Request& request, MeasureTiming measure,
|
||||
const hal::utils::RequestRelocation& relocation) const;
|
||||
|
||||
private:
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
|
||||
const V1_0::Request& request, MeasureTiming measure) const;
|
||||
|
|
74
neuralnetworks/1.2/utils/src/Execution.cpp
Normal file
74
neuralnetworks/1.2/utils/src/Execution.cpp
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Execution.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
|
||||
std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
|
||||
hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure) {
|
||||
if (preparedModel == nullptr) {
|
||||
return NN_ERROR() << "V1_2::utils::Execution::create must have non-null preparedModel";
|
||||
}
|
||||
|
||||
return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
|
||||
std::move(request), std::move(relocation), measure);
|
||||
}
|
||||
|
||||
Execution::Execution(PrivateConstructorTag /*tag*/,
|
||||
std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
|
||||
hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure)
|
||||
: kPreparedModel(std::move(preparedModel)),
|
||||
kRequest(std::move(request)),
|
||||
kRelocation(std::move(relocation)),
|
||||
kMeasure(measure) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
|
||||
const nn::OptionalTimePoint& /*deadline*/) const {
|
||||
return kPreparedModel->executeInternal(kRequest, kMeasure, kRelocation);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
|
||||
const std::vector<nn::SyncFence>& /*waitFor*/, const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IExecution::computeFenced is not supported on 1.2 HAL service";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
|
@ -28,6 +28,7 @@
|
|||
#include <nnapi/Types.h>
|
||||
#include <nnapi/Validation.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
|
@ -50,6 +51,35 @@
|
|||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
namespace {
|
||||
|
||||
class BurstExecution final : public nn::IExecution,
|
||||
public std::enable_shared_from_this<BurstExecution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
|
||||
|
||||
BurstExecution(PrivateConstructorTag tag,
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const ExecutionBurstController> kController;
|
||||
const std::vector<FmqRequestDatum> kRequest;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
const std::vector<ExecutionBurstController::OptionalCacheHold> kCacheHolds;
|
||||
};
|
||||
|
||||
nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
|
||||
V1_0::ErrorStatus status, const sp<IBurstContext>& burstContext) {
|
||||
HANDLE_HAL_STATUS(status) << "IPreparedModel::configureExecutionBurst failed with status "
|
||||
|
@ -209,10 +239,10 @@ Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
|
|||
// ExecutionBurstController methods
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurstController::create(
|
||||
const sp<V1_2::IPreparedModel>& preparedModel, FallbackFunction fallback,
|
||||
nn::SharedPreparedModel preparedModel, const sp<V1_2::IPreparedModel>& hidlPreparedModel,
|
||||
std::chrono::microseconds pollingTimeWindow) {
|
||||
// check inputs
|
||||
if (preparedModel == nullptr) {
|
||||
if (preparedModel == nullptr || hidlPreparedModel == nullptr) {
|
||||
return NN_ERROR() << "ExecutionBurstController::create passed a nullptr";
|
||||
}
|
||||
|
||||
|
@ -236,7 +266,7 @@ nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurs
|
|||
auto cb = hal::utils::CallbackValue(executionBurstResultCallback);
|
||||
|
||||
// configure burst
|
||||
const Return<void> ret = preparedModel->configureExecutionBurst(
|
||||
const Return<void> ret = hidlPreparedModel->configureExecutionBurst(
|
||||
burstCallback, *requestChannelDescriptor, *resultChannelDescriptor, cb);
|
||||
HANDLE_TRANSPORT_FAILURE(ret);
|
||||
|
||||
|
@ -250,18 +280,18 @@ nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurs
|
|||
|
||||
// make and return controller
|
||||
return std::make_shared<const ExecutionBurstController>(
|
||||
PrivateConstructorTag{}, std::move(fallback), std::move(requestChannelSender),
|
||||
PrivateConstructorTag{}, std::move(preparedModel), std::move(requestChannelSender),
|
||||
std::move(resultChannelReceiver), std::move(burstCallback), std::move(burstContext),
|
||||
std::move(memoryCache), std::move(deathHandler));
|
||||
}
|
||||
|
||||
ExecutionBurstController::ExecutionBurstController(
|
||||
PrivateConstructorTag /*tag*/, FallbackFunction fallback,
|
||||
PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
|
||||
std::unique_ptr<RequestChannelSender> requestChannelSender,
|
||||
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
|
||||
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
|
||||
std::shared_ptr<MemoryCache> memoryCache, neuralnetworks::utils::DeathHandler deathHandler)
|
||||
: kFallback(std::move(fallback)),
|
||||
: kPreparedModel(std::move(preparedModel)),
|
||||
mRequestChannelSender(std::move(requestChannelSender)),
|
||||
mResultChannelReceiver(std::move(resultChannelReceiver)),
|
||||
mBurstCallback(std::move(callback)),
|
||||
|
@ -283,26 +313,96 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
|||
// systraces. Note that the first point we can begin collecting systraces in
|
||||
// ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
|
||||
// ExecutionBurstServer collects systraces at different points in the code.
|
||||
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
|
||||
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
|
||||
|
||||
// if the request is valid but of a higher version than what's supported in burst execution,
|
||||
// fall back to another execution path
|
||||
if (const auto version = NN_TRY(hal::utils::makeExecutionFailure(nn::validate(request)));
|
||||
version > nn::Version::ANDROID_Q) {
|
||||
// fallback to another execution path if the packet could not be sent
|
||||
if (kFallback) {
|
||||
return kFallback(request, measure, deadline, loopTimeoutDuration);
|
||||
}
|
||||
return NN_ERROR() << "Request object has features not supported by IBurst::execute";
|
||||
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
// ensure that request is ready for IPC
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation)));
|
||||
|
||||
// clear pools field of request, as they will be provided via slots
|
||||
const auto requestWithoutPools =
|
||||
nn::Request{.inputs = request.inputs, .outputs = request.outputs, .pools = {}};
|
||||
const auto requestWithoutPools = nn::Request{
|
||||
.inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
|
||||
auto hidlRequest = NN_TRY(
|
||||
hal::utils::makeExecutionFailure(V1_0::utils::unvalidatedConvert(requestWithoutPools)));
|
||||
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
|
||||
std::vector<int32_t> slots;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
slots.reserve(requestInShared.pools.size());
|
||||
holds.reserve(requestInShared.pools.size());
|
||||
for (const auto& memoryPool : requestInShared.pools) {
|
||||
auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
|
||||
slots.push_back(slot);
|
||||
holds.push_back(std::move(hold));
|
||||
}
|
||||
|
||||
// send request packet
|
||||
const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
|
||||
const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] {
|
||||
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
};
|
||||
return executeInternal(requestPacket, relocation, fallback);
|
||||
}
|
||||
|
||||
// See IBurst::createReusableExecution for information on this method.
|
||||
nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::createReusableExecution");
|
||||
|
||||
// if the request is valid but of a higher version than what's supported in burst execution,
|
||||
// fall back to another execution path
|
||||
if (const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(request)));
|
||||
version > nn::Version::ANDROID_Q) {
|
||||
// fallback to another execution path if the packet could not be sent
|
||||
return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
// ensure that request is ready for IPC
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
// clear pools field of request, as they will be provided via slots
|
||||
const auto requestWithoutPools = nn::Request{
|
||||
.inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
|
||||
auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
|
||||
const auto hidlMeasure = NN_TRY(convert(measure));
|
||||
|
||||
std::vector<int32_t> slots;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
slots.reserve(requestInShared.pools.size());
|
||||
holds.reserve(requestInShared.pools.size());
|
||||
for (const auto& memoryPool : requestInShared.pools) {
|
||||
auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
|
||||
slots.push_back(slot);
|
||||
holds.push_back(std::move(hold));
|
||||
}
|
||||
|
||||
const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
|
||||
return BurstExecution::create(shared_from_this(), std::move(requestPacket),
|
||||
std::move(relocation), std::move(holds));
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& requestPacket,
|
||||
const hal::utils::RequestRelocation& relocation,
|
||||
FallbackFunction fallback) const {
|
||||
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
|
||||
"ExecutionBurstController::executeInternal");
|
||||
|
||||
// Ensure that at most one execution is in flight at any given time.
|
||||
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
|
||||
if (alreadyInFlight) {
|
||||
|
@ -310,22 +410,16 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
|||
}
|
||||
const auto guard = base::make_scope_guard([this] { mExecutionInFlight.clear(); });
|
||||
|
||||
std::vector<int32_t> slots;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
slots.reserve(request.pools.size());
|
||||
holds.reserve(request.pools.size());
|
||||
for (const auto& memoryPool : request.pools) {
|
||||
auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
|
||||
slots.push_back(slot);
|
||||
holds.push_back(std::move(hold));
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
// send request packet
|
||||
const auto sendStatus = mRequestChannelSender->send(hidlRequest, hidlMeasure, slots);
|
||||
const auto sendStatus = mRequestChannelSender->sendPacket(requestPacket);
|
||||
if (!sendStatus.ok()) {
|
||||
// fallback to another execution path if the packet could not be sent
|
||||
if (kFallback) {
|
||||
return kFallback(request, measure, deadline, loopTimeoutDuration);
|
||||
if (fallback) {
|
||||
return fallback();
|
||||
}
|
||||
return NN_ERROR() << "Error sending FMQ packet: " << sendStatus.error();
|
||||
}
|
||||
|
@ -333,7 +427,47 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
|||
// get result packet
|
||||
const auto [status, outputShapes, timing] =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(mResultChannelReceiver->getBlocking()));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
}
|
||||
return executionCallback(status, outputShapes, timing);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds) {
|
||||
if (controller == nullptr) {
|
||||
return NN_ERROR() << "V1_2::utils::BurstExecution::create must have non-null controller";
|
||||
}
|
||||
|
||||
return std::make_shared<const BurstExecution>(PrivateConstructorTag{}, std::move(controller),
|
||||
std::move(request), std::move(relocation),
|
||||
std::move(cacheHolds));
|
||||
}
|
||||
|
||||
BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/,
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds)
|
||||
: kController(std::move(controller)),
|
||||
kRequest(std::move(request)),
|
||||
kRelocation(std::move(relocation)),
|
||||
kCacheHolds(std::move(cacheHolds)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
|
||||
const nn::OptionalTimePoint& /*deadline*/) const {
|
||||
return kController->executeInternal(kRequest, kRelocation, /*fallback=*/nullptr);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
|
||||
const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IExecution::computeFenced is not supported on burst object";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Execution.h"
|
||||
#include "ExecutionBurstController.h"
|
||||
#include "ExecutionBurstUtils.h"
|
||||
#include "Utils.h"
|
||||
|
@ -93,19 +94,31 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation)));
|
||||
|
||||
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
|
||||
auto result = kExecuteSynchronously ? executeSynchronously(hidlRequest, hidlMeasure)
|
||||
: executeAsynchronously(hidlRequest, hidlMeasure);
|
||||
return executeInternal(hidlRequest, hidlMeasure, relocation);
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
PreparedModel::executeInternal(const V1_0::Request& request, MeasureTiming measure,
|
||||
const hal::utils::RequestRelocation& relocation) const {
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
auto result = kExecuteSynchronously ? executeSynchronously(request, measure)
|
||||
: executeAsynchronously(request, measure);
|
||||
auto [outputShapes, timing] = NN_TRY(std::move(result));
|
||||
|
||||
NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
}
|
||||
return std::make_pair(std::move(outputShapes), timing);
|
||||
}
|
||||
|
||||
|
@ -120,6 +133,21 @@ PreparedModel::executeFenced(const nn::Request& /*request*/,
|
|||
<< "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
auto hidlRequest = NN_TRY(convert(requestInShared));
|
||||
auto hidlMeasure = NN_TRY(convert(measure));
|
||||
return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation),
|
||||
hidlMeasure);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
|
||||
auto self = shared_from_this();
|
||||
auto fallback = [preparedModel = std::move(self)](
|
||||
|
@ -130,7 +158,7 @@ nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() cons
|
|||
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
};
|
||||
const auto pollingTimeWindow = getBurstControllerPollingTimeWindow();
|
||||
return ExecutionBurstController::create(kPreparedModel, std::move(fallback), pollingTimeWindow);
|
||||
return ExecutionBurstController::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
|
||||
}
|
||||
|
||||
std::any PreparedModel::getUnderlyingResource() const {
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
@ -334,6 +335,248 @@ TEST(PreparedModelTest, executeFencedNotSupported) {
|
|||
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSync) {
|
||||
// setup call
|
||||
const uint32_t kNumberOfComputations = 2;
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(
|
||||
Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute repeatedly
|
||||
for (uint32_t i = 0; i < kNumberOfComputations; i++) {
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
|
||||
<< ": " << computeResult.error().message;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(
|
||||
makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsync) {
|
||||
// setup call
|
||||
const uint32_t kNumberOfComputations = 2;
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(Invoke(makeExecuteAsynchronously(
|
||||
V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute repeatedly
|
||||
for (uint32_t i = 0; i < kNumberOfComputations; i++) {
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
|
||||
<< ": " << computeResult.error().message;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncLaunchError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeExecuteAsynchronously(V1_0::ErrorStatus::GENERAL_FAILURE,
|
||||
V1_0::ErrorStatus::GENERAL_FAILURE, {},
|
||||
kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncReturnError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeExecuteAsynchronously(
|
||||
V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncTransportFailure) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncDeadObject) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncCrash) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_0::ErrorStatus> {
|
||||
mockPreparedModel->simulateCrash();
|
||||
return V1_0::ErrorStatus::NONE;
|
||||
};
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedNotSupported) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, configureExecutionBurst) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
|
|
66
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Execution.h
Normal file
66
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Execution.h
Normal file
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
|
||||
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Execution>> create(
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
|
||||
OptionalTimeoutDuration loopTimeoutDuration);
|
||||
|
||||
Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
|
||||
Request request, hal::utils::RequestRelocation relocation,
|
||||
V1_2::MeasureTiming measure, OptionalTimeoutDuration loopTimeoutDuration);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const PreparedModel> kPreparedModel;
|
||||
const Request kRequest;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
const V1_2::MeasureTiming kMeasure;
|
||||
const OptionalTimeoutDuration kLoopTimeoutDuration;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
|
|
@ -57,10 +57,26 @@ class PreparedModel final : public nn::IPreparedModel,
|
|||
const nn::OptionalDuration& loopTimeoutDuration,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
|
||||
const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
|
||||
const OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const hal::utils::RequestRelocation& relocation) const;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
executeFencedInternal(const Request& request, const hidl_vec<hidl_handle>& waitFor,
|
||||
V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
|
||||
const OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const OptionalTimeoutDuration& timeoutDurationAfterFence,
|
||||
const hal::utils::RequestRelocation& relocation) const;
|
||||
|
||||
private:
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
|
||||
const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
|
||||
|
|
84
neuralnetworks/1.3/utils/src/Execution.cpp
Normal file
84
neuralnetworks/1.3/utils/src/Execution.cpp
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Execution.h"
|
||||
|
||||
#include "Conversions.h"
|
||||
#include "PreparedModel.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
|
||||
OptionalTimeoutDuration loopTimeoutDuration) {
|
||||
if (preparedModel == nullptr) {
|
||||
return NN_ERROR() << "V1_3::utils::Execution::create must have non-null preparedModel";
|
||||
}
|
||||
|
||||
return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
|
||||
std::move(request), std::move(relocation), measure,
|
||||
std::move(loopTimeoutDuration));
|
||||
}
|
||||
|
||||
Execution::Execution(PrivateConstructorTag /*tag*/,
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
|
||||
OptionalTimeoutDuration loopTimeoutDuration)
|
||||
: kPreparedModel(std::move(preparedModel)),
|
||||
kRequest(std::move(request)),
|
||||
kRelocation(std::move(relocation)),
|
||||
kMeasure(measure),
|
||||
kLoopTimeoutDuration(std::move(loopTimeoutDuration)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
|
||||
const nn::OptionalTimePoint& deadline) const {
|
||||
const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
return kPreparedModel->executeInternal(kRequest, kMeasure, hidlDeadline, kLoopTimeoutDuration,
|
||||
kRelocation);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const {
|
||||
const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
|
||||
const auto hidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
|
||||
return kPreparedModel->executeFencedInternal(kRequest, hidlWaitFor, kMeasure, hidlDeadline,
|
||||
kLoopTimeoutDuration,
|
||||
hidlTimeoutDurationAfterFence, kRelocation);
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Execution.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
|
@ -139,8 +140,10 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation)));
|
||||
|
||||
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
|
@ -148,16 +151,27 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
const auto hidlLoopTimeoutDuration =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
|
||||
|
||||
return executeInternal(hidlRequest, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration,
|
||||
relocation);
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
PreparedModel::executeInternal(const Request& request, V1_2::MeasureTiming measure,
|
||||
const OptionalTimePoint& deadline,
|
||||
const OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const hal::utils::RequestRelocation& relocation) const {
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
auto result = kExecuteSynchronously
|
||||
? executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
|
||||
hidlLoopTimeoutDuration)
|
||||
: executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
|
||||
hidlLoopTimeoutDuration);
|
||||
? executeSynchronously(request, measure, deadline, loopTimeoutDuration)
|
||||
: executeAsynchronously(request, measure, deadline, loopTimeoutDuration);
|
||||
auto [outputShapes, timing] = NN_TRY(std::move(result));
|
||||
|
||||
NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
}
|
||||
return std::make_pair(std::move(outputShapes), timing);
|
||||
}
|
||||
|
||||
|
@ -168,8 +182,9 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
|
|||
const nn::OptionalDuration& timeoutDurationAfterFence) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
const auto hidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
|
||||
|
@ -178,27 +193,58 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
|
|||
const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
|
||||
|
||||
return executeFencedInternal(hidlRequest, hidlWaitFor, hidlMeasure, hidlDeadline,
|
||||
hidlLoopTimeoutDuration, hidlTimeoutDurationAfterFence,
|
||||
relocation);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
PreparedModel::executeFencedInternal(const Request& request, const hidl_vec<hidl_handle>& waitFor,
|
||||
V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
|
||||
const OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const OptionalTimeoutDuration& timeoutDurationAfterFence,
|
||||
const hal::utils::RequestRelocation& relocation) const {
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
auto cb = hal::utils::CallbackValue(fencedExecutionCallback);
|
||||
|
||||
const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure,
|
||||
hidlDeadline, hidlLoopTimeoutDuration,
|
||||
hidlTimeoutDurationAfterFence, cb);
|
||||
const auto ret =
|
||||
kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
|
||||
timeoutDurationAfterFence, cb);
|
||||
HANDLE_TRANSPORT_FAILURE(ret);
|
||||
auto [syncFence, callback] = NN_TRY(cb.take());
|
||||
|
||||
// If executeFenced required the request memory to be moved into shared memory, block here until
|
||||
// the fenced execution has completed and flush the memory back.
|
||||
if (maybeRequestInShared.has_value()) {
|
||||
if (relocation.output) {
|
||||
const auto state = syncFence.syncWait({});
|
||||
if (state != nn::SyncFence::FenceState::SIGNALED) {
|
||||
return NN_ERROR() << "syncWait failed with " << state;
|
||||
}
|
||||
NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared));
|
||||
relocation.output->flush();
|
||||
}
|
||||
|
||||
return std::make_pair(std::move(syncFence), std::move(callback));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
auto hidlRequest = NN_TRY(convert(requestInShared));
|
||||
auto hidlMeasure = NN_TRY(convert(measure));
|
||||
auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation),
|
||||
hidlMeasure, std::move(hidlLoopTimeoutDuration));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
|
||||
auto self = shared_from_this();
|
||||
auto fallback = [preparedModel = std::move(self)](
|
||||
|
@ -209,7 +255,7 @@ nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() cons
|
|||
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
};
|
||||
const auto pollingTimeWindow = V1_2::utils::getBurstControllerPollingTimeWindow();
|
||||
return V1_2::utils::ExecutionBurstController::create(kPreparedModel, std::move(fallback),
|
||||
return V1_2::utils::ExecutionBurstController::create(shared_from_this(), kPreparedModel,
|
||||
pollingTimeWindow);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
@ -462,6 +463,363 @@ TEST(PreparedModelTest, executeFencedDeadObject) {
|
|||
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSync) {
|
||||
// setup call
|
||||
const uint32_t kNumberOfComputations = 2;
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(
|
||||
Invoke(makeExecuteSynchronously(V1_3::ErrorStatus::NONE, {}, kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute repeatedly
|
||||
for (uint32_t i = 0; i < kNumberOfComputations; i++) {
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
|
||||
<< ": " << computeResult.error().message;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(
|
||||
makeExecuteSynchronously(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsync) {
|
||||
// setup call
|
||||
const uint32_t kNumberOfComputations = 2;
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(Invoke(makeExecuteAsynchronously(
|
||||
V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute repeatedly
|
||||
for (uint32_t i = 0; i < kNumberOfComputations; i++) {
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
|
||||
<< ": " << computeResult.error().message;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncLaunchError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeExecuteAsynchronously(V1_3::ErrorStatus::GENERAL_FAILURE,
|
||||
V1_3::ErrorStatus::GENERAL_FAILURE, {},
|
||||
kNoTiming)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncReturnError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeExecuteAsynchronously(
|
||||
V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
|
||||
|
||||
// run test
|
||||
const auto result = preparedModel->execute({}, {}, {}, {});
|
||||
|
||||
// verify result
|
||||
ASSERT_FALSE(result.has_value());
|
||||
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncTransportFailure) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncDeadObject) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteAsyncCrash) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
|
||||
const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_3::ErrorStatus> {
|
||||
mockPreparedModel->simulateCrash();
|
||||
return V1_3::ErrorStatus::NONE;
|
||||
};
|
||||
EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(ret));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFenced) {
|
||||
// setup call
|
||||
const uint32_t kNumberOfComputations = 2;
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
const auto mockCallback = MockFencedExecutionCallback::create();
|
||||
EXPECT_CALL(*mockCallback, getExecutionInfo(_))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(Invoke(makeExecuteFencedCallbackReturn(V1_3::ErrorStatus::NONE,
|
||||
kNoTiming, kNoTiming)));
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(
|
||||
Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute repeatedly
|
||||
for (uint32_t i = 0; i < kNumberOfComputations; i++) {
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
|
||||
<< ": " << computeResult.error().message;
|
||||
const auto& [syncFence, callback] = computeResult.value();
|
||||
EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
|
||||
ASSERT_NE(callback, nullptr);
|
||||
|
||||
// get results from callback
|
||||
const auto callbackResult = callback();
|
||||
ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code
|
||||
<< ": " << callbackResult.error().message;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedCallbackError) {
|
||||
// setup call
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
const auto mockCallback = MockFencedExecutionCallback::create();
|
||||
EXPECT_CALL(*mockCallback, getExecutionInfo(_))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeExecuteFencedCallbackReturn(V1_3::ErrorStatus::GENERAL_FAILURE,
|
||||
kNoTiming, kNoTiming)));
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code << ": "
|
||||
<< computeResult.error().message;
|
||||
const auto& [syncFence, callback] = computeResult.value();
|
||||
EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
|
||||
ASSERT_NE(callback, nullptr);
|
||||
|
||||
// verify callback failure
|
||||
const auto callbackResult = callback();
|
||||
ASSERT_FALSE(callbackResult.has_value());
|
||||
EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(
|
||||
makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, {}, nullptr)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedTransportFailure) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedDeadObject) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = createMockPreparedModel();
|
||||
const auto preparedModel =
|
||||
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
TEST(PreparedModelTest, configureExecutionBurst) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
|
||||
// Class that adapts aidl_hal::IBurst to nn::IBurst.
|
||||
class Burst final : public nn::IBurst {
|
||||
class Burst final : public nn::IBurst, public std::enable_shared_from_this<Burst> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
|
@ -100,6 +100,16 @@ class Burst final : public nn::IBurst {
|
|||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
// See IBurst::createReusableExecution for information.
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
|
||||
const aidl_hal::Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
|
||||
bool measure, int64_t deadline, int64_t loopTimeoutDuration,
|
||||
const hal::utils::RequestRelocation& relocation) const;
|
||||
|
||||
private:
|
||||
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
|
||||
const std::shared_ptr<aidl_hal::IBurst> kBurst;
|
||||
|
|
65
neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
Normal file
65
neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
|
||||
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
|
||||
class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Execution>> create(
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration);
|
||||
|
||||
Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
|
||||
Request request, hal::utils::RequestRelocation relocation, bool measure,
|
||||
int64_t loopTimeoutDuration);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const PreparedModel> kPreparedModel;
|
||||
const Request kRequest;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
const bool kMeasure;
|
||||
const int64_t kLoopTimeoutDuration;
|
||||
};
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
|
|
@ -18,6 +18,7 @@
|
|||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_PREPARED_MODEL_H
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Request.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
@ -34,7 +35,8 @@
|
|||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
|
||||
// Class that adapts aidl_hal::IPreparedModel to nn::IPreparedModel.
|
||||
class PreparedModel final : public nn::IPreparedModel {
|
||||
class PreparedModel final : public nn::IPreparedModel,
|
||||
public std::enable_shared_from_this<PreparedModel> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
|
@ -55,10 +57,25 @@ class PreparedModel final : public nn::IPreparedModel {
|
|||
const nn::OptionalDuration& loopTimeoutDuration,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
|
||||
const Request& request, bool measure, int64_t deadline, int64_t loopTimeoutDuration,
|
||||
const hal::utils::RequestRelocation& relocation) const;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
executeFencedInternal(const Request& request,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure,
|
||||
int64_t deadline, int64_t loopTimeoutDuration,
|
||||
int64_t timeoutDurationAfterFence,
|
||||
const hal::utils::RequestRelocation& relocation) const;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<aidl_hal::IPreparedModel> kPreparedModel;
|
||||
};
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <android-base/logging.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
@ -35,6 +36,39 @@
|
|||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
class BurstExecution final : public nn::IExecution,
|
||||
public std::enable_shared_from_this<BurstExecution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
|
||||
std::shared_ptr<const Burst> burst, Request request,
|
||||
std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds);
|
||||
|
||||
BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> burst, Request request,
|
||||
std::vector<int64_t> memoryIdentifierTokens, bool measure,
|
||||
int64_t loopTimeoutDuration, hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const Burst> kBurst;
|
||||
const Request kRequest;
|
||||
const std::vector<int64_t>& kMemoryIdentifierTokens;
|
||||
const bool kMeasure;
|
||||
const int64_t kLoopTimeoutDuration;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
const std::vector<Burst::OptionalCacheHold> kCacheHolds;
|
||||
};
|
||||
|
||||
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
|
||||
const std::vector<OutputShape>& outputShapes, const Timing& timing) {
|
||||
return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
|
||||
|
@ -139,17 +173,12 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
|||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// Ensure that at most one execution is in flight at any given time.
|
||||
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
|
||||
if (alreadyInFlight) {
|
||||
return NN_ERROR() << "IBurst already has an execution in flight";
|
||||
}
|
||||
const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
|
||||
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation)));
|
||||
|
||||
const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
|
@ -159,9 +188,9 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
|||
|
||||
std::vector<int64_t> memoryIdentifierTokens;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
memoryIdentifierTokens.reserve(request.pools.size());
|
||||
holds.reserve(request.pools.size());
|
||||
for (const auto& memoryPool : request.pools) {
|
||||
memoryIdentifierTokens.reserve(requestInShared.pools.size());
|
||||
holds.reserve(requestInShared.pools.size());
|
||||
for (const auto& memoryPool : requestInShared.pools) {
|
||||
if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
|
||||
if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
|
||||
auto& [identifier, hold] = *cached;
|
||||
|
@ -172,12 +201,30 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
|||
}
|
||||
memoryIdentifierTokens.push_back(-1);
|
||||
}
|
||||
CHECK_EQ(request.pools.size(), memoryIdentifierTokens.size());
|
||||
CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
|
||||
|
||||
return executeInternal(aidlRequest, memoryIdentifierTokens, aidlMeasure, aidlDeadline,
|
||||
aidlLoopTimeoutDuration, relocation);
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
|
||||
const Request& request, const std::vector<int64_t>& memoryIdentifierTokens, bool measure,
|
||||
int64_t deadline, int64_t loopTimeoutDuration,
|
||||
const hal::utils::RequestRelocation& relocation) const {
|
||||
// Ensure that at most one execution is in flight at any given time.
|
||||
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
|
||||
if (alreadyInFlight) {
|
||||
return NN_ERROR() << "IBurst already has an execution in flight";
|
||||
}
|
||||
const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
|
||||
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
ExecutionResult executionResult;
|
||||
const auto ret =
|
||||
kBurst->executeSynchronously(aidlRequest, memoryIdentifierTokens, aidlMeasure,
|
||||
aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
|
||||
const auto ret = kBurst->executeSynchronously(request, memoryIdentifierTokens, measure,
|
||||
deadline, loopTimeoutDuration, &executionResult);
|
||||
HANDLE_ASTATUS(ret) << "execute failed";
|
||||
if (!executionResult.outputSufficientSize) {
|
||||
auto canonicalOutputShapes =
|
||||
|
@ -188,10 +235,88 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
|||
auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
|
||||
|
||||
NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
}
|
||||
return std::make_pair(std::move(outputShapes), timing);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
auto aidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto aidlMeasure = NN_TRY(convert(measure));
|
||||
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
|
||||
std::vector<int64_t> memoryIdentifierTokens;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
memoryIdentifierTokens.reserve(requestInShared.pools.size());
|
||||
holds.reserve(requestInShared.pools.size());
|
||||
for (const auto& memoryPool : requestInShared.pools) {
|
||||
if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
|
||||
if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
|
||||
auto& [identifier, hold] = *cached;
|
||||
memoryIdentifierTokens.push_back(identifier);
|
||||
holds.push_back(std::move(hold));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
memoryIdentifierTokens.push_back(-1);
|
||||
}
|
||||
CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
|
||||
|
||||
return BurstExecution::create(shared_from_this(), std::move(aidlRequest),
|
||||
std::move(memoryIdentifierTokens), aidlMeasure,
|
||||
aidlLoopTimeoutDuration, std::move(relocation), std::move(holds));
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
|
||||
std::shared_ptr<const Burst> burst, Request request,
|
||||
std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds) {
|
||||
if (burst == nullptr) {
|
||||
return NN_ERROR() << "aidl::utils::BurstExecution::create must have non-null burst";
|
||||
}
|
||||
|
||||
return std::make_shared<const BurstExecution>(
|
||||
PrivateConstructorTag{}, std::move(burst), std::move(request),
|
||||
std::move(memoryIdentifierTokens), measure, loopTimeoutDuration, std::move(relocation),
|
||||
std::move(cacheHolds));
|
||||
}
|
||||
|
||||
BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/, std::shared_ptr<const Burst> burst,
|
||||
Request request, std::vector<int64_t> memoryIdentifierTokens,
|
||||
bool measure, int64_t loopTimeoutDuration,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds)
|
||||
: kBurst(std::move(burst)),
|
||||
kRequest(std::move(request)),
|
||||
kMemoryIdentifierTokens(std::move(memoryIdentifierTokens)),
|
||||
kMeasure(measure),
|
||||
kLoopTimeoutDuration(loopTimeoutDuration),
|
||||
kRelocation(std::move(relocation)),
|
||||
kCacheHolds(std::move(cacheHolds)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
|
||||
const nn::OptionalTimePoint& deadline) const {
|
||||
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
return kBurst->executeInternal(kRequest, kMemoryIdentifierTokens, kMeasure, aidlDeadline,
|
||||
kLoopTimeoutDuration, kRelocation);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
|
||||
const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IExecution::computeFenced is not supported on burst object";
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::utils
|
||||
|
|
79
neuralnetworks/aidl/utils/src/Execution.cpp
Normal file
79
neuralnetworks/aidl/utils/src/Execution.cpp
Normal file
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Execution.h"
|
||||
|
||||
#include "Conversions.h"
|
||||
#include "PreparedModel.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/Request.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration) {
|
||||
if (preparedModel == nullptr) {
|
||||
return NN_ERROR() << "aidl::utils::Execution::create must have non-null preparedModel";
|
||||
}
|
||||
|
||||
return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
|
||||
std::move(request), std::move(relocation), measure,
|
||||
loopTimeoutDuration);
|
||||
}
|
||||
|
||||
Execution::Execution(PrivateConstructorTag /*tag*/,
|
||||
std::shared_ptr<const PreparedModel> preparedModel, Request request,
|
||||
hal::utils::RequestRelocation relocation, bool measure,
|
||||
int64_t loopTimeoutDuration)
|
||||
: kPreparedModel(std::move(preparedModel)),
|
||||
kRequest(std::move(request)),
|
||||
kRelocation(std::move(relocation)),
|
||||
kMeasure(measure),
|
||||
kLoopTimeoutDuration(loopTimeoutDuration) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
|
||||
const nn::OptionalTimePoint& deadline) const {
|
||||
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
|
||||
kRelocation);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const {
|
||||
const auto aidlWaitFor = NN_TRY(convert(waitFor));
|
||||
const auto aidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
|
||||
return kPreparedModel->executeFencedInternal(kRequest, aidlWaitFor, kMeasure, aidlDeadline,
|
||||
kLoopTimeoutDuration,
|
||||
aidlTimeoutDurationAfterFence, kRelocation);
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::utils
|
|
@ -19,8 +19,11 @@
|
|||
#include "Burst.h"
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Execution.h"
|
||||
#include "ProtectCallback.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/Request.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
|
@ -74,18 +77,31 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation)));
|
||||
|
||||
const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
const auto aidlLoopTimeoutDuration =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
|
||||
return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration,
|
||||
relocation);
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
PreparedModel::executeInternal(const Request& request, bool measure, int64_t deadline,
|
||||
int64_t loopTimeoutDuration,
|
||||
const hal::utils::RequestRelocation& relocation) const {
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
ExecutionResult executionResult;
|
||||
const auto ret = kPreparedModel->executeSynchronously(
|
||||
aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
|
||||
const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline,
|
||||
loopTimeoutDuration, &executionResult);
|
||||
HANDLE_ASTATUS(ret) << "executeSynchronously failed";
|
||||
if (!executionResult.outputSufficientSize) {
|
||||
auto canonicalOutputShapes =
|
||||
|
@ -96,9 +112,9 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
|
|||
auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
|
||||
|
||||
NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
}
|
||||
return std::make_pair(std::move(outputShapes), timing);
|
||||
}
|
||||
|
||||
|
@ -109,8 +125,9 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
|
|||
const nn::OptionalDuration& timeoutDurationAfterFence) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
const auto aidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto aidlWaitFor = NN_TRY(convert(waitFor));
|
||||
|
@ -118,11 +135,25 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
|
|||
const auto aidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
|
||||
return executeFencedInternal(aidlRequest, aidlWaitFor, aidlMeasure, aidlDeadline,
|
||||
aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence,
|
||||
relocation);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
PreparedModel::executeFencedInternal(const Request& request,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& waitFor,
|
||||
bool measure, int64_t deadline, int64_t loopTimeoutDuration,
|
||||
int64_t timeoutDurationAfterFence,
|
||||
const hal::utils::RequestRelocation& relocation) const {
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
FencedExecutionResult result;
|
||||
const auto ret = kPreparedModel->executeFenced(aidlRequest, aidlWaitFor, aidlMeasure,
|
||||
aidlDeadline, aidlLoopTimeoutDuration,
|
||||
aidlTimeoutDurationAfterFence, &result);
|
||||
const auto ret =
|
||||
kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
|
||||
timeoutDurationAfterFence, &result);
|
||||
HANDLE_ASTATUS(ret) << "executeFenced failed";
|
||||
|
||||
auto resultSyncFence = nn::SyncFence::createAsSignaled();
|
||||
|
@ -137,12 +168,12 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
|
|||
|
||||
// If executeFenced required the request memory to be moved into shared memory, block here until
|
||||
// the fenced execution has completed and flush the memory back.
|
||||
if (maybeRequestInShared.has_value()) {
|
||||
if (relocation.output) {
|
||||
const auto state = resultSyncFence.syncWait({});
|
||||
if (state != nn::SyncFence::FenceState::SIGNALED) {
|
||||
return NN_ERROR() << "syncWait failed with " << state;
|
||||
}
|
||||
NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared));
|
||||
relocation.output->flush();
|
||||
}
|
||||
|
||||
// Create callback which can be used to retrieve the execution error status and timings.
|
||||
|
@ -159,6 +190,22 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
|
|||
return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
auto aidlRequest = NN_TRY(convert(requestInShared));
|
||||
auto aidlMeasure = NN_TRY(convert(measure));
|
||||
auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
return Execution::create(shared_from_this(), std::move(aidlRequest), std::move(relocation),
|
||||
aidlMeasure, aidlLoopTimeoutDuration);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
|
||||
std::shared_ptr<IBurst> burst;
|
||||
const auto ret = kPreparedModel->configureExecutionBurst(&burst);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
@ -253,6 +254,225 @@ TEST(PreparedModelTest, executeFencedDeadObject) {
|
|||
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSync) {
|
||||
// setup call
|
||||
const uint32_t kNumberOfComputations = 2;
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
const auto mockExecutionResult = ExecutionResult{
|
||||
.outputSufficientSize = true,
|
||||
.outputShapes = {},
|
||||
.timing = kNoTiming,
|
||||
};
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(
|
||||
DoAll(SetArgPointee<4>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute repeatedly
|
||||
for (uint32_t i = 0; i < kNumberOfComputations; i++) {
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
|
||||
<< ": " << computeResult.error().message;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeGeneralFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->compute({});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFenced) {
|
||||
// setup call
|
||||
const uint32_t kNumberOfComputations = 2;
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
const auto mockCallback = MockFencedExecutionCallback::create();
|
||||
EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
|
||||
SetArgPointee<2>(ErrorStatus::NONE), Invoke(makeStatusOk)));
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(kNumberOfComputations)
|
||||
.WillRepeatedly(Invoke(makeFencedExecutionResult(mockCallback)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute repeatedly
|
||||
for (uint32_t i = 0; i < kNumberOfComputations; i++) {
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
|
||||
<< ": " << computeResult.error().message;
|
||||
const auto& [syncFence, callback] = computeResult.value();
|
||||
EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
|
||||
ASSERT_NE(callback, nullptr);
|
||||
|
||||
// get results from callback
|
||||
const auto callbackResult = callback();
|
||||
ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code
|
||||
<< ": " << callbackResult.error().message;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedCallbackError) {
|
||||
// setup call
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
const auto mockCallback = MockFencedExecutionCallback::create();
|
||||
EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
|
||||
SetArgPointee<2>(ErrorStatus::GENERAL_FAILURE),
|
||||
Invoke(makeStatusOk))));
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code << ": "
|
||||
<< computeResult.error().message;
|
||||
const auto& [syncFence, callback] = computeResult.value();
|
||||
EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
|
||||
ASSERT_NE(callback, nullptr);
|
||||
|
||||
// verify callback failure
|
||||
const auto callbackResult = callback();
|
||||
ASSERT_FALSE(callbackResult.has_value());
|
||||
EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedError) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedTransportFailure) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, reusableExecuteFencedDeadObject) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
|
||||
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
|
||||
|
||||
// create execution
|
||||
const auto createResult = preparedModel->createReusableExecution({}, {}, {});
|
||||
ASSERT_TRUE(createResult.has_value())
|
||||
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
|
||||
ASSERT_NE(createResult.value(), nullptr);
|
||||
|
||||
// invoke compute
|
||||
const auto computeResult = createResult.value()->computeFenced({}, {}, {});
|
||||
ASSERT_FALSE(computeResult.has_value());
|
||||
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(PreparedModelTest, configureExecutionBurst) {
|
||||
// setup test
|
||||
const auto mockPreparedModel = MockPreparedModel::create();
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <cutils/native_handle.h>
|
||||
#include <hidl/HidlSupport.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/SharedMemory.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
@ -59,19 +60,70 @@ bool hasNoPointerData(const nn::Request& request);
|
|||
nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
|
||||
const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut);
|
||||
|
||||
// Record a relocation mapping between pointer-based data and shared memory.
|
||||
// Only two specializations of this template may exist:
|
||||
// - RelocationInfo<const void*> for request inputs
|
||||
// - RelocationInfo<void*> for request outputs
|
||||
template <typename PointerType>
|
||||
struct RelocationInfo {
|
||||
PointerType data;
|
||||
size_t length;
|
||||
size_t offset;
|
||||
};
|
||||
using InputRelocationInfo = RelocationInfo<const void*>;
|
||||
using OutputRelocationInfo = RelocationInfo<void*>;
|
||||
|
||||
// Keep track of the relocation mapping between pointer-based data and shared memory pool,
|
||||
// and provide method to copy the data between pointers and the shared memory pool.
|
||||
// Only two specializations of this template may exist:
|
||||
// - RelocationTracker<InputRelocationInfo> for request inputs
|
||||
// - RelocationTracker<OutputRelocationInfo> for request outputs
|
||||
template <typename RelocationInfoType>
|
||||
class RelocationTracker {
|
||||
public:
|
||||
static nn::GeneralResult<std::unique_ptr<RelocationTracker>> create(
|
||||
std::vector<RelocationInfoType> relocationInfos, nn::SharedMemory memory) {
|
||||
auto mapping = NN_TRY(map(memory));
|
||||
return std::make_unique<RelocationTracker<RelocationInfoType>>(
|
||||
std::move(relocationInfos), std::move(memory), std::move(mapping));
|
||||
}
|
||||
|
||||
RelocationTracker(std::vector<RelocationInfoType> relocationInfos, nn::SharedMemory memory,
|
||||
nn::Mapping mapping)
|
||||
: kRelocationInfos(std::move(relocationInfos)),
|
||||
kMemory(std::move(memory)),
|
||||
kMapping(std::move(mapping)) {}
|
||||
|
||||
// Specializations defined in CommonUtils.cpp.
|
||||
// For InputRelocationTracker, this method will copy pointer data to the shared memory pool.
|
||||
// For OutputRelocationTracker, this method will copy shared memory data to the pointers.
|
||||
void flush() const;
|
||||
|
||||
private:
|
||||
const std::vector<RelocationInfoType> kRelocationInfos;
|
||||
const nn::SharedMemory kMemory;
|
||||
const nn::Mapping kMapping;
|
||||
};
|
||||
using InputRelocationTracker = RelocationTracker<InputRelocationInfo>;
|
||||
using OutputRelocationTracker = RelocationTracker<OutputRelocationInfo>;
|
||||
|
||||
struct RequestRelocation {
|
||||
std::unique_ptr<InputRelocationTracker> input;
|
||||
std::unique_ptr<OutputRelocationTracker> output;
|
||||
};
|
||||
|
||||
// Relocate pointer-based data to shared memory. If `request` has no
|
||||
// Request::Argument::LifeTime::POINTER data, the function returns with a reference to `request`. If
|
||||
// `request` has Request::Argument::LifeTime::POINTER data, the request is copied to
|
||||
// `maybeRequestInSharedOut` with the POINTER data relocated to a memory pool, and the function
|
||||
// returns with a reference to `*maybeRequestInSharedOut`.
|
||||
nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
|
||||
const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut);
|
||||
|
||||
// Undoes `flushDataFromPointerToShared` on a Request object. More specifically,
|
||||
// `unflushDataFromSharedToPointer` copies the output shared memory data from the transformed
|
||||
// Request object back to the output pointer-based memory in the original Request object.
|
||||
nn::GeneralResult<void> unflushDataFromSharedToPointer(
|
||||
const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared);
|
||||
// returns with a reference to `*maybeRequestInSharedOut`. The `relocationOut` will be set to track
|
||||
// the input and output relocations.
|
||||
//
|
||||
// Unlike `flushDataFromPointerToShared`, this method will not copy the input pointer data to the
|
||||
// shared memory pool. Use `relocationOut` to flush the input or output data after the call.
|
||||
nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
|
||||
const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
|
||||
RequestRelocation* relocationOut);
|
||||
|
||||
nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
|
||||
size_t numberOfOperands, const std::vector<nn::Operation>& operations);
|
||||
|
|
|
@ -35,6 +35,10 @@ class InvalidBurst final : public nn::IBurst {
|
|||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
|
||||
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
class InvalidExecution final : public nn::IExecution {
|
||||
public:
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
|
|
@ -40,6 +40,10 @@ class InvalidPreparedModel final : public nn::IPreparedModel {
|
|||
const nn::OptionalDuration& loopTimeoutDuration,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
|
|
@ -51,7 +51,16 @@ class ResilientBurst final : public nn::IBurst,
|
|||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
private:
|
||||
bool isValidInternal() const EXCLUDES(mMutex);
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const;
|
||||
|
||||
const Factory kMakeBurst;
|
||||
mutable std::mutex mMutex;
|
||||
mutable nn::SharedBurst mBurst GUARDED_BY(mMutex);
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
|
||||
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
class ResilientExecution final : public nn::IExecution,
|
||||
public std::enable_shared_from_this<ResilientExecution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
using Factory = std::function<nn::GeneralResult<nn::SharedExecution>()>;
|
||||
|
||||
static nn::GeneralResult<std::shared_ptr<const ResilientExecution>> create(
|
||||
Factory makeExecution);
|
||||
|
||||
ResilientExecution(PrivateConstructorTag tag, Factory makeExecution,
|
||||
nn::SharedExecution execution);
|
||||
|
||||
nn::SharedExecution getExecution() const;
|
||||
nn::GeneralResult<nn::SharedExecution> recover(const nn::IExecution* failingExecution) const;
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
bool isValidInternal() const EXCLUDES(mMutex);
|
||||
|
||||
const Factory kMakeExecution;
|
||||
mutable std::mutex mMutex;
|
||||
mutable nn::SharedExecution mExecution GUARDED_BY(mMutex);
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
|
|
@ -58,12 +58,19 @@ class ResilientPreparedModel final : public nn::IPreparedModel,
|
|||
const nn::OptionalDuration& loopTimeoutDuration,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
private:
|
||||
bool isValidInternal() const EXCLUDES(mMutex);
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const;
|
||||
nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const;
|
||||
|
||||
const Factory kMakePreparedModel;
|
||||
|
|
|
@ -200,10 +200,31 @@ nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerT
|
|||
return **maybeModelInSharedOut;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
|
||||
const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut) {
|
||||
template <>
|
||||
void InputRelocationTracker::flush() const {
|
||||
// Copy from pointers to shared memory.
|
||||
uint8_t* memoryPtr = static_cast<uint8_t*>(std::get<void*>(kMapping.pointer));
|
||||
for (const auto& [data, length, offset] : kRelocationInfos) {
|
||||
std::memcpy(memoryPtr + offset, data, length);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void OutputRelocationTracker::flush() const {
|
||||
// Copy from shared memory to pointers.
|
||||
const uint8_t* memoryPtr = static_cast<const uint8_t*>(
|
||||
std::visit([](auto ptr) { return static_cast<const void*>(ptr); }, kMapping.pointer));
|
||||
for (const auto& [data, length, offset] : kRelocationInfos) {
|
||||
std::memcpy(data, memoryPtr + offset, length);
|
||||
}
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
|
||||
const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
|
||||
RequestRelocation* relocationOut) {
|
||||
CHECK(request != nullptr);
|
||||
CHECK(maybeRequestInSharedOut != nullptr);
|
||||
CHECK(relocationOut != nullptr);
|
||||
|
||||
if (hasNoPointerData(*request)) {
|
||||
return *request;
|
||||
|
@ -213,8 +234,11 @@ nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointe
|
|||
// to the caller through `maybeRequestInSharedOut` if the function succeeds.
|
||||
nn::Request requestInShared = *request;
|
||||
|
||||
RequestRelocation relocation;
|
||||
|
||||
// Change input pointers to shared memory.
|
||||
nn::ConstantMemoryBuilder inputBuilder(requestInShared.pools.size());
|
||||
nn::MutableMemoryBuilder inputBuilder(requestInShared.pools.size());
|
||||
std::vector<InputRelocationInfo> inputRelocationInfos;
|
||||
for (auto& input : requestInShared.inputs) {
|
||||
const auto& location = input.location;
|
||||
if (input.lifetime != nn::Request::Argument::LifeTime::POINTER) {
|
||||
|
@ -225,17 +249,21 @@ nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointe
|
|||
const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
|
||||
location.pointer);
|
||||
CHECK(data != nullptr);
|
||||
input.location = inputBuilder.append(data, location.length);
|
||||
input.location = inputBuilder.append(location.length);
|
||||
inputRelocationInfos.push_back({data, input.location.length, input.location.offset});
|
||||
}
|
||||
|
||||
// Allocate input memory.
|
||||
if (!inputBuilder.empty()) {
|
||||
auto memory = NN_TRY(inputBuilder.finish());
|
||||
requestInShared.pools.push_back(std::move(memory));
|
||||
requestInShared.pools.push_back(memory);
|
||||
relocation.input = NN_TRY(
|
||||
InputRelocationTracker::create(std::move(inputRelocationInfos), std::move(memory)));
|
||||
}
|
||||
|
||||
// Change output pointers to shared memory.
|
||||
nn::MutableMemoryBuilder outputBuilder(requestInShared.pools.size());
|
||||
std::vector<OutputRelocationInfo> outputRelocationInfos;
|
||||
for (auto& output : requestInShared.outputs) {
|
||||
const auto& location = output.location;
|
||||
if (output.lifetime != nn::Request::Argument::LifeTime::POINTER) {
|
||||
|
@ -243,62 +271,25 @@ nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointe
|
|||
}
|
||||
|
||||
output.lifetime = nn::Request::Argument::LifeTime::POOL;
|
||||
void* data = std::get<void*>(location.pointer);
|
||||
CHECK(data != nullptr);
|
||||
output.location = outputBuilder.append(location.length);
|
||||
outputRelocationInfos.push_back({data, output.location.length, output.location.offset});
|
||||
}
|
||||
|
||||
// Allocate output memory.
|
||||
if (!outputBuilder.empty()) {
|
||||
auto memory = NN_TRY(outputBuilder.finish());
|
||||
requestInShared.pools.push_back(std::move(memory));
|
||||
requestInShared.pools.push_back(memory);
|
||||
relocation.output = NN_TRY(OutputRelocationTracker::create(std::move(outputRelocationInfos),
|
||||
std::move(memory)));
|
||||
}
|
||||
|
||||
*maybeRequestInSharedOut = requestInShared;
|
||||
*relocationOut = std::move(relocation);
|
||||
return **maybeRequestInSharedOut;
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> unflushDataFromSharedToPointer(
|
||||
const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared) {
|
||||
if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() ||
|
||||
!std::holds_alternative<nn::SharedMemory>(maybeRequestInShared->pools.back())) {
|
||||
return {};
|
||||
}
|
||||
const auto& requestInShared = *maybeRequestInShared;
|
||||
|
||||
// Map the memory.
|
||||
const auto& outputMemory = std::get<nn::SharedMemory>(requestInShared.pools.back());
|
||||
const auto [pointer, size, context] = NN_TRY(map(outputMemory));
|
||||
const uint8_t* constantPointer =
|
||||
std::visit([](const auto& o) { return static_cast<const uint8_t*>(o); }, pointer);
|
||||
|
||||
// Flush each output pointer.
|
||||
CHECK_EQ(request.outputs.size(), requestInShared.outputs.size());
|
||||
for (size_t i = 0; i < request.outputs.size(); ++i) {
|
||||
const auto& location = request.outputs[i].location;
|
||||
const auto& locationInShared = requestInShared.outputs[i].location;
|
||||
if (!std::holds_alternative<void*>(location.pointer)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get output pointer and size.
|
||||
void* data = std::get<void*>(location.pointer);
|
||||
CHECK(data != nullptr);
|
||||
const size_t length = location.length;
|
||||
|
||||
// Get output pool location.
|
||||
CHECK(requestInShared.outputs[i].lifetime == nn::Request::Argument::LifeTime::POOL);
|
||||
const size_t index = locationInShared.poolIndex;
|
||||
const size_t offset = locationInShared.offset;
|
||||
const size_t outputPoolIndex = requestInShared.pools.size() - 1;
|
||||
CHECK(locationInShared.length == length);
|
||||
CHECK(index == outputPoolIndex);
|
||||
|
||||
// Flush memory.
|
||||
std::memcpy(data, constantPointer + offset, length);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
|
||||
size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
|
||||
return makeGeneralFailure(nn::countNumberOfConsumers(numberOfOperands, operations));
|
||||
|
|
|
@ -38,4 +38,10 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Invalid
|
|||
return NN_ERROR() << "InvalidBurst";
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> InvalidBurst::createReusableExecution(
|
||||
const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
|
||||
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
|
||||
return NN_ERROR() << "InvalidBurst";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
|
40
neuralnetworks/utils/common/src/InvalidExecution.cpp
Normal file
40
neuralnetworks/utils/common/src/InvalidExecution.cpp
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "InvalidExecution.h"
|
||||
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidExecution::compute(
|
||||
const nn::OptionalTimePoint& /*deadline*/) const {
|
||||
return NN_ERROR() << "InvalidExecution";
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
InvalidExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
|
||||
const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR() << "InvalidExecution";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
|
@ -42,6 +42,12 @@ InvalidPreparedModel::executeFenced(
|
|||
return NN_ERROR() << "InvalidPreparedModel";
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution(
|
||||
const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
|
||||
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
|
||||
return NN_ERROR() << "InvalidPreparedModel";
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> InvalidPreparedModel::configureExecutionBurst() const {
|
||||
return NN_ERROR() << "InvalidPreparedModel";
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <android-base/logging.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
@ -29,6 +30,9 @@
|
|||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
#include "InvalidExecution.h"
|
||||
#include "ResilientExecution.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
|
@ -46,11 +50,11 @@ auto protect(const ResilientBurst& resilientBurst, const FnType& fn)
|
|||
// Attempt recovery and return if it fails.
|
||||
auto maybeBurst = resilientBurst.recover(burst.get());
|
||||
if (!maybeBurst.has_value()) {
|
||||
auto [resultErrorMessage, resultErrorCode, resultOutputShapes] = std::move(result).error();
|
||||
const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeBurst.error();
|
||||
return nn::error(resultErrorCode, std::move(resultOutputShapes))
|
||||
<< resultErrorMessage << ", and failed to recover dead burst object with error "
|
||||
<< recoveryErrorCode << ": " << recoveryErrorMessage;
|
||||
const auto& [message, code] = maybeBurst.error();
|
||||
std::ostringstream oss;
|
||||
oss << ", and failed to recover dead burst object with error " << code << ": " << message;
|
||||
result.error().message += oss.str();
|
||||
return result;
|
||||
}
|
||||
burst = std::move(maybeBurst).value();
|
||||
|
||||
|
@ -109,4 +113,35 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Resilie
|
|||
return protect(*this, fn);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
#if 0
|
||||
auto self = shared_from_this();
|
||||
ResilientExecution::Factory makeExecution =
|
||||
[burst = std::move(self), request, measure, loopTimeoutDuration] {
|
||||
return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
|
||||
};
|
||||
return ResilientExecution::create(std::move(makeExecution));
|
||||
#else
|
||||
return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
|
||||
#endif
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecutionInternal(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
if (!isValidInternal()) {
|
||||
return std::make_shared<const InvalidExecution>();
|
||||
}
|
||||
const auto fn = [&request, measure, &loopTimeoutDuration](const nn::IBurst& burst) {
|
||||
return burst.createReusableExecution(request, measure, loopTimeoutDuration);
|
||||
};
|
||||
return protect(*this, fn);
|
||||
}
|
||||
|
||||
bool ResilientBurst::isValidInternal() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
|
126
neuralnetworks/utils/common/src/ResilientExecution.cpp
Normal file
126
neuralnetworks/utils/common/src/ResilientExecution.cpp
Normal file
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ResilientExecution.h"
|
||||
|
||||
#include "InvalidBurst.h"
|
||||
#include "ResilientBurst.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <sstream>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
template <typename FnType>
|
||||
auto protect(const ResilientExecution& resilientExecution, const FnType& fn)
|
||||
-> decltype(fn(*resilientExecution.getExecution())) {
|
||||
auto execution = resilientExecution.getExecution();
|
||||
auto result = fn(*execution);
|
||||
|
||||
// Immediately return if prepared model is not dead.
|
||||
if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Attempt recovery and return if it fails.
|
||||
auto maybeExecution = resilientExecution.recover(execution.get());
|
||||
if (!maybeExecution.has_value()) {
|
||||
const auto& [message, code] = maybeExecution.error();
|
||||
std::ostringstream oss;
|
||||
oss << ", and failed to recover dead prepared model with error " << code << ": " << message;
|
||||
result.error().message += oss.str();
|
||||
return result;
|
||||
}
|
||||
execution = std::move(maybeExecution).value();
|
||||
|
||||
return fn(*execution);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const ResilientExecution>> ResilientExecution::create(
|
||||
Factory makeExecution) {
|
||||
if (makeExecution == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "utils::ResilientExecution::create must have non-empty makeExecution";
|
||||
}
|
||||
auto execution = NN_TRY(makeExecution());
|
||||
CHECK(execution != nullptr);
|
||||
return std::make_shared<ResilientExecution>(PrivateConstructorTag{}, std::move(makeExecution),
|
||||
std::move(execution));
|
||||
}
|
||||
|
||||
ResilientExecution::ResilientExecution(PrivateConstructorTag /*tag*/, Factory makeExecution,
|
||||
nn::SharedExecution execution)
|
||||
: kMakeExecution(std::move(makeExecution)), mExecution(std::move(execution)) {
|
||||
CHECK(kMakeExecution != nullptr);
|
||||
CHECK(mExecution != nullptr);
|
||||
}
|
||||
|
||||
nn::SharedExecution ResilientExecution::getExecution() const {
|
||||
std::lock_guard guard(mMutex);
|
||||
return mExecution;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> ResilientExecution::recover(
|
||||
const nn::IExecution* failingExecution) const {
|
||||
std::lock_guard guard(mMutex);
|
||||
|
||||
// Another caller updated the failing prepared model.
|
||||
if (mExecution.get() != failingExecution) {
|
||||
return mExecution;
|
||||
}
|
||||
|
||||
mExecution = NN_TRY(kMakeExecution());
|
||||
return mExecution;
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
ResilientExecution::compute(const nn::OptionalTimePoint& deadline) const {
|
||||
const auto fn = [&deadline](const nn::IExecution& execution) {
|
||||
return execution.compute(deadline);
|
||||
};
|
||||
return protect(*this, fn);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
ResilientExecution::computeFenced(const std::vector<nn::SyncFence>& waitFor,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const {
|
||||
const auto fn = [&waitFor, &deadline,
|
||||
&timeoutDurationAfterFence](const nn::IExecution& execution) {
|
||||
return execution.computeFenced(waitFor, deadline, timeoutDurationAfterFence);
|
||||
};
|
||||
return protect(*this, fn);
|
||||
}
|
||||
|
||||
bool ResilientExecution::isValidInternal() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
|
@ -17,7 +17,9 @@
|
|||
#include "ResilientPreparedModel.h"
|
||||
|
||||
#include "InvalidBurst.h"
|
||||
#include "InvalidExecution.h"
|
||||
#include "ResilientBurst.h"
|
||||
#include "ResilientExecution.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
|
@ -127,6 +129,21 @@ ResilientPreparedModel::executeFenced(const nn::Request& request,
|
|||
return protect(*this, fn);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
#if 0
|
||||
auto self = shared_from_this();
|
||||
ResilientExecution::Factory makeExecution =
|
||||
[preparedModel = std::move(self), request, measure, loopTimeoutDuration] {
|
||||
return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
|
||||
};
|
||||
return ResilientExecution::create(std::move(makeExecution));
|
||||
#else
|
||||
return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
|
||||
#endif
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurst() const {
|
||||
#if 0
|
||||
auto self = shared_from_this();
|
||||
|
@ -140,6 +157,19 @@ nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBur
|
|||
#endif
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
if (!isValidInternal()) {
|
||||
return std::make_shared<const InvalidExecution>();
|
||||
}
|
||||
const auto fn = [&request, measure,
|
||||
&loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
|
||||
return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration);
|
||||
};
|
||||
return protect(*this, fn);
|
||||
}
|
||||
|
||||
std::any ResilientPreparedModel::getUnderlyingResource() const {
|
||||
return getPreparedModel()->getUnderlyingResource();
|
||||
}
|
||||
|
|
38
neuralnetworks/utils/common/test/MockExecution.h
Normal file
38
neuralnetworks/utils/common/test/MockExecution.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
|
||||
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
class MockExecution final : public IExecution {
|
||||
public:
|
||||
MOCK_METHOD((ExecutionResult<std::pair<std::vector<OutputShape>, Timing>>), compute,
|
||||
(const OptionalTimePoint& deadline), (const, override));
|
||||
MOCK_METHOD((GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>>), computeFenced,
|
||||
(const std::vector<SyncFence>& waitFor, const OptionalTimePoint& deadline,
|
||||
const OptionalDuration& timeoutDurationAfterFence),
|
||||
(const, override));
|
||||
};
|
||||
|
||||
} // namespace android::nn
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
|
|
@ -35,6 +35,10 @@ class MockPreparedModel final : public IPreparedModel {
|
|||
const OptionalDuration& loopTimeoutDuration,
|
||||
const OptionalDuration& timeoutDurationAfterFence),
|
||||
(const, override));
|
||||
MOCK_METHOD((GeneralResult<SharedExecution>), createReusableExecution,
|
||||
(const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration),
|
||||
(const, override));
|
||||
MOCK_METHOD(GeneralResult<SharedBurst>, configureExecutionBurst, (), (const, override));
|
||||
MOCK_METHOD(std::any, getUnderlyingResource, (), (const, override));
|
||||
};
|
||||
|
|
260
neuralnetworks/utils/common/test/ResilientExecution.cpp
Normal file
260
neuralnetworks/utils/common/test/ResilientExecution.cpp
Normal file
|
@ -0,0 +1,260 @@
|
|||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <gmock/gmock.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/ResilientExecution.h>
|
||||
#include <utility>
|
||||
#include "MockExecution.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
using ::testing::_;
|
||||
using ::testing::InvokeWithoutArgs;
|
||||
using ::testing::Return;
|
||||
|
||||
using SharedMockExecution = std::shared_ptr<const nn::MockExecution>;
|
||||
using MockExecutionFactory = ::testing::MockFunction<nn::GeneralResult<nn::SharedExecution>()>;
|
||||
|
||||
SharedMockExecution createMockExecution() {
|
||||
return std::make_shared<const nn::MockExecution>();
|
||||
}
|
||||
|
||||
std::tuple<SharedMockExecution, std::unique_ptr<MockExecutionFactory>,
|
||||
std::shared_ptr<const ResilientExecution>>
|
||||
setup() {
|
||||
auto mockExecution = std::make_shared<const nn::MockExecution>();
|
||||
|
||||
auto mockExecutionFactory = std::make_unique<MockExecutionFactory>();
|
||||
EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(mockExecution));
|
||||
|
||||
auto buffer = ResilientExecution::create(mockExecutionFactory->AsStdFunction()).value();
|
||||
return std::make_tuple(std::move(mockExecution), std::move(mockExecutionFactory),
|
||||
std::move(buffer));
|
||||
}
|
||||
|
||||
constexpr auto makeError = [](nn::ErrorStatus status) {
|
||||
return [status](const auto&... /*args*/) { return nn::error(status); };
|
||||
};
|
||||
const auto kReturnGeneralFailure = makeError(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
const auto kReturnDeadObject = makeError(nn::ErrorStatus::DEAD_OBJECT);
|
||||
|
||||
const auto kNoExecutionError =
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>{};
|
||||
const auto kNoFencedExecutionError =
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>(
|
||||
std::make_pair(nn::SyncFence::createAsSignaled(), nullptr));
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(ResilientExecutionTest, invalidExecutionFactory) {
|
||||
// setup call
|
||||
const auto invalidExecutionFactory = ResilientExecution::Factory{};
|
||||
|
||||
// run test
|
||||
const auto result = ResilientExecution::create(invalidExecutionFactory);
|
||||
|
||||
// verify result
|
||||
ASSERT_FALSE(result.has_value());
|
||||
EXPECT_EQ(result.error().code, nn::ErrorStatus::INVALID_ARGUMENT);
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, executionFactoryFailure) {
|
||||
// setup call
|
||||
const auto invalidExecutionFactory = kReturnGeneralFailure;
|
||||
|
||||
// run test
|
||||
const auto result = ResilientExecution::create(invalidExecutionFactory);
|
||||
|
||||
// verify result
|
||||
ASSERT_FALSE(result.has_value());
|
||||
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, getExecution) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
|
||||
// run test
|
||||
const auto result = execution->getExecution();
|
||||
|
||||
// verify result
|
||||
EXPECT_TRUE(result == mockExecution);
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, compute) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(Return(kNoExecutionError));
|
||||
|
||||
// run test
|
||||
const auto result = execution->compute({});
|
||||
|
||||
// verify result
|
||||
ASSERT_TRUE(result.has_value())
|
||||
<< "Failed with " << result.error().code << ": " << result.error().message;
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, computeError) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnGeneralFailure);
|
||||
|
||||
// run test
|
||||
const auto result = execution->compute({});
|
||||
|
||||
// verify result
|
||||
ASSERT_FALSE(result.has_value());
|
||||
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, computeDeadObjectFailedRecovery) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnDeadObject);
|
||||
EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
|
||||
|
||||
// run test
|
||||
const auto result = execution->compute({});
|
||||
|
||||
// verify result
|
||||
ASSERT_FALSE(result.has_value());
|
||||
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, computeDeadObjectSuccessfulRecovery) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnDeadObject);
|
||||
const auto recoveredMockExecution = createMockExecution();
|
||||
EXPECT_CALL(*recoveredMockExecution, compute(_)).Times(1).WillOnce(Return(kNoExecutionError));
|
||||
EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
|
||||
|
||||
// run test
|
||||
const auto result = execution->compute({});
|
||||
|
||||
// verify result
|
||||
ASSERT_TRUE(result.has_value())
|
||||
<< "Failed with " << result.error().code << ": " << result.error().message;
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, computeFenced) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
EXPECT_CALL(*mockExecution, computeFenced(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Return(kNoFencedExecutionError));
|
||||
|
||||
// run test
|
||||
const auto result = execution->computeFenced({}, {}, {});
|
||||
|
||||
// verify result
|
||||
ASSERT_TRUE(result.has_value())
|
||||
<< "Failed with " << result.error().code << ": " << result.error().message;
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, computeFencedError) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnGeneralFailure);
|
||||
|
||||
// run test
|
||||
const auto result = execution->computeFenced({}, {}, {});
|
||||
|
||||
// verify result
|
||||
ASSERT_FALSE(result.has_value());
|
||||
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, computeFencedDeadObjectFailedRecovery) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnDeadObject);
|
||||
EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
|
||||
|
||||
// run test
|
||||
const auto result = execution->computeFenced({}, {}, {});
|
||||
|
||||
// verify result
|
||||
ASSERT_FALSE(result.has_value());
|
||||
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, computeFencedDeadObjectSuccessfulRecovery) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnDeadObject);
|
||||
const auto recoveredMockExecution = createMockExecution();
|
||||
EXPECT_CALL(*recoveredMockExecution, computeFenced(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Return(kNoFencedExecutionError));
|
||||
EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
|
||||
|
||||
// run test
|
||||
const auto result = execution->computeFenced({}, {}, {});
|
||||
|
||||
// verify result
|
||||
ASSERT_TRUE(result.has_value())
|
||||
<< "Failed with " << result.error().code << ": " << result.error().message;
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, recover) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
const auto recoveredMockExecution = createMockExecution();
|
||||
EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
|
||||
|
||||
// run test
|
||||
const auto result = execution->recover(mockExecution.get());
|
||||
|
||||
// verify result
|
||||
ASSERT_TRUE(result.has_value())
|
||||
<< "Failed with " << result.error().code << ": " << result.error().message;
|
||||
EXPECT_TRUE(result.value() == recoveredMockExecution);
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, recoverFailure) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
const auto recoveredMockExecution = createMockExecution();
|
||||
EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
|
||||
|
||||
// run test
|
||||
const auto result = execution->recover(mockExecution.get());
|
||||
|
||||
// verify result
|
||||
EXPECT_FALSE(result.has_value());
|
||||
}
|
||||
|
||||
TEST(ResilientExecutionTest, someoneElseRecovered) {
|
||||
// setup call
|
||||
const auto [mockExecution, mockExecutionFactory, execution] = setup();
|
||||
const auto recoveredMockExecution = createMockExecution();
|
||||
EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
|
||||
execution->recover(mockExecution.get());
|
||||
|
||||
// run test
|
||||
const auto result = execution->recover(mockExecution.get());
|
||||
|
||||
// verify result
|
||||
ASSERT_TRUE(result.has_value())
|
||||
<< "Failed with " << result.error().code << ": " << result.error().message;
|
||||
EXPECT_TRUE(result.value() == recoveredMockExecution);
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
|
@ -55,6 +55,7 @@ constexpr auto makeError = [](nn::ErrorStatus status) {
|
|||
const auto kReturnGeneralFailure = makeError(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
const auto kReturnDeadObject = makeError(nn::ErrorStatus::DEAD_OBJECT);
|
||||
|
||||
const auto kNoCreateReusableExecutionError = nn::GeneralResult<nn::SharedExecution>{};
|
||||
const auto kNoExecutionError =
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>{};
|
||||
const auto kNoFencedExecutionError =
|
||||
|
@ -231,6 +232,36 @@ TEST(ResilientPreparedModelTest, executeFencedDeadObjectSuccessfulRecovery) {
|
|||
<< "Failed with " << result.error().code << ": " << result.error().message;
|
||||
}
|
||||
|
||||
TEST(ResilientPreparedModelTest, createReusableExecution) {
|
||||
// setup call
|
||||
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
|
||||
EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(Return(kNoCreateReusableExecutionError));
|
||||
|
||||
// run test
|
||||
const auto result = preparedModel->createReusableExecution({}, {}, {});
|
||||
|
||||
// verify result
|
||||
ASSERT_TRUE(result.has_value())
|
||||
<< "Failed with " << result.error().code << ": " << result.error().message;
|
||||
}
|
||||
|
||||
TEST(ResilientPreparedModelTest, createReusableExecutionError) {
|
||||
// setup call
|
||||
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
|
||||
EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
|
||||
.Times(1)
|
||||
.WillOnce(kReturnGeneralFailure);
|
||||
|
||||
// run test
|
||||
const auto result = preparedModel->createReusableExecution({}, {}, {});
|
||||
|
||||
// verify result
|
||||
ASSERT_FALSE(result.has_value());
|
||||
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
TEST(ResilientPreparedModelTest, getUnderlyingResource) {
|
||||
// setup call
|
||||
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
|
||||
|
|
Loading…
Reference in a new issue