Merge changes Ibd460229,I589668ef am: 7815893786
Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1877839 Change-Id: Ia9887ecbd116066d9f9549986bf2c99730e30171
This commit is contained in:
commit
00e983df29
9 changed files with 182 additions and 201 deletions
|
@ -14,10 +14,10 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_CONTROLLER_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_CONTROLLER_H
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_H
|
||||
|
||||
#include "ExecutionBurstUtils.h"
|
||||
#include "nnapi/hal/1.2/BurstUtils.h"
|
||||
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
|
@ -49,13 +49,11 @@
|
|||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
/**
|
||||
* The ExecutionBurstController class manages both the serialization and deserialization of data
|
||||
* across FMQ, making it appear to the runtime as a regular synchronous inference. Additionally,
|
||||
* this class manages the burst's memory cache.
|
||||
* The Burst class manages both the serialization and deserialization of data across FMQ, making it
|
||||
* appear to the runtime as a regular synchronous inference. Additionally, this class manages the
|
||||
* burst's memory cache.
|
||||
*/
|
||||
class ExecutionBurstController final
|
||||
: public nn::IBurst,
|
||||
public std::enable_shared_from_this<ExecutionBurstController> {
|
||||
class Burst final : public nn::IBurst, public std::enable_shared_from_this<Burst> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
|
@ -150,21 +148,21 @@ class ExecutionBurstController final
|
|||
* Creates a burst controller on a prepared model.
|
||||
*
|
||||
* @param preparedModel Model prepared for execution to execute on.
|
||||
* @param pollingTimeWindow How much time (in microseconds) the ExecutionBurstController is
|
||||
* allowed to poll the FMQ before waiting on the blocking futex. Polling may result in lower
|
||||
* latencies at the potential cost of more power usage.
|
||||
* @return ExecutionBurstController Execution burst controller object.
|
||||
* @param pollingTimeWindow How much time (in microseconds) the Burst is allowed to poll the FMQ
|
||||
* before waiting on the blocking futex. Polling may result in lower latencies at the
|
||||
* potential cost of more power usage.
|
||||
* @return Burst Execution burst controller object.
|
||||
*/
|
||||
static nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> create(
|
||||
static nn::GeneralResult<std::shared_ptr<const Burst>> create(
|
||||
nn::SharedPreparedModel preparedModel, const sp<IPreparedModel>& hidlPreparedModel,
|
||||
std::chrono::microseconds pollingTimeWindow);
|
||||
|
||||
ExecutionBurstController(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel,
|
||||
std::unique_ptr<RequestChannelSender> requestChannelSender,
|
||||
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
|
||||
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
|
||||
std::shared_ptr<MemoryCache> memoryCache,
|
||||
neuralnetworks::utils::DeathHandler deathHandler);
|
||||
Burst(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel,
|
||||
std::unique_ptr<RequestChannelSender> requestChannelSender,
|
||||
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
|
||||
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
|
||||
std::shared_ptr<MemoryCache> memoryCache,
|
||||
neuralnetworks::utils::DeathHandler deathHandler);
|
||||
|
||||
// See IBurst::cacheMemory for information on this method.
|
||||
OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
|
||||
|
@ -202,4 +200,4 @@ class ExecutionBurstController final
|
|||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_CONTROLLER_H
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_H
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_UTILS_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_UTILS_H
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_UTILS_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_UTILS_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
|
@ -298,4 +298,4 @@ class ResultChannelReceiver final : public neuralnetworks::utils::IProtectedCall
|
|||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_UTILS_H
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_UTILS_H
|
|
@ -14,10 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "ExecutionBurstController"
|
||||
|
||||
#include "ExecutionBurstController.h"
|
||||
#include "ExecutionBurstUtils.h"
|
||||
#include "Burst.h"
|
||||
#include "BurstUtils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
|
@ -57,14 +55,13 @@ class BurstExecution final : public nn::IExecution,
|
|||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
|
||||
std::shared_ptr<const Burst> controller, std::vector<FmqRequestDatum> request,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds);
|
||||
|
||||
BurstExecution(PrivateConstructorTag tag,
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
@ -74,10 +71,10 @@ class BurstExecution final : public nn::IExecution,
|
|||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const ExecutionBurstController> kController;
|
||||
const std::shared_ptr<const Burst> kController;
|
||||
const std::vector<FmqRequestDatum> kRequest;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
const std::vector<ExecutionBurstController::OptionalCacheHold> kCacheHolds;
|
||||
const std::vector<Burst::OptionalCacheHold> kCacheHolds;
|
||||
};
|
||||
|
||||
nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
|
||||
|
@ -92,8 +89,7 @@ nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
|
|||
}
|
||||
|
||||
nn::GeneralResult<hidl_vec<hidl_memory>> getMemoriesHelper(
|
||||
const hidl_vec<int32_t>& slots,
|
||||
const std::shared_ptr<ExecutionBurstController::MemoryCache>& memoryCache) {
|
||||
const hidl_vec<int32_t>& slots, const std::shared_ptr<Burst::MemoryCache>& memoryCache) {
|
||||
hidl_vec<hidl_memory> memories(slots.size());
|
||||
for (size_t i = 0; i < slots.size(); ++i) {
|
||||
const int32_t slot = slots[i];
|
||||
|
@ -110,7 +106,7 @@ nn::GeneralResult<hidl_vec<hidl_memory>> getMemoriesHelper(
|
|||
|
||||
// MemoryCache methods
|
||||
|
||||
ExecutionBurstController::MemoryCache::MemoryCache() {
|
||||
Burst::MemoryCache::MemoryCache() {
|
||||
constexpr size_t kPreallocatedCount = 1024;
|
||||
std::vector<int32_t> freeSlotsSpace;
|
||||
freeSlotsSpace.reserve(kPreallocatedCount);
|
||||
|
@ -119,13 +115,13 @@ ExecutionBurstController::MemoryCache::MemoryCache() {
|
|||
mCacheCleaner.reserve(kPreallocatedCount);
|
||||
}
|
||||
|
||||
void ExecutionBurstController::MemoryCache::setBurstContext(sp<IBurstContext> burstContext) {
|
||||
void Burst::MemoryCache::setBurstContext(sp<IBurstContext> burstContext) {
|
||||
std::lock_guard guard(mMutex);
|
||||
mBurstContext = std::move(burstContext);
|
||||
}
|
||||
|
||||
std::pair<int32_t, ExecutionBurstController::MemoryCache::SharedCleanup>
|
||||
ExecutionBurstController::MemoryCache::cacheMemory(const nn::SharedMemory& memory) {
|
||||
std::pair<int32_t, Burst::MemoryCache::SharedCleanup> Burst::MemoryCache::cacheMemory(
|
||||
const nn::SharedMemory& memory) {
|
||||
std::unique_lock lock(mMutex);
|
||||
base::ScopedLockAssertion lockAssert(mMutex);
|
||||
|
||||
|
@ -163,7 +159,7 @@ ExecutionBurstController::MemoryCache::cacheMemory(const nn::SharedMemory& memor
|
|||
return std::make_pair(slot, std::move(cleaner));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedMemory> ExecutionBurstController::MemoryCache::getMemory(int32_t slot) {
|
||||
nn::GeneralResult<nn::SharedMemory> Burst::MemoryCache::getMemory(int32_t slot) {
|
||||
std::lock_guard guard(mMutex);
|
||||
if (slot < 0 || static_cast<size_t>(slot) >= mMemoryCache.size()) {
|
||||
return NN_ERROR() << "Invalid slot: " << slot << " vs " << mMemoryCache.size();
|
||||
|
@ -171,7 +167,7 @@ nn::GeneralResult<nn::SharedMemory> ExecutionBurstController::MemoryCache::getMe
|
|||
return mMemoryCache[slot];
|
||||
}
|
||||
|
||||
void ExecutionBurstController::MemoryCache::freeMemory(const nn::SharedMemory& memory) {
|
||||
void Burst::MemoryCache::freeMemory(const nn::SharedMemory& memory) {
|
||||
{
|
||||
std::lock_guard guard(mMutex);
|
||||
const int32_t slot = mMemoryIdToSlot.at(memory);
|
||||
|
@ -189,7 +185,7 @@ void ExecutionBurstController::MemoryCache::freeMemory(const nn::SharedMemory& m
|
|||
mCond.notify_all();
|
||||
}
|
||||
|
||||
int32_t ExecutionBurstController::MemoryCache::allocateSlotLocked() {
|
||||
int32_t Burst::MemoryCache::allocateSlotLocked() {
|
||||
constexpr size_t kMaxNumberOfSlots = std::numeric_limits<int32_t>::max();
|
||||
|
||||
// If there is a free slot, use it.
|
||||
|
@ -210,18 +206,18 @@ int32_t ExecutionBurstController::MemoryCache::allocateSlotLocked() {
|
|||
|
||||
// ExecutionBurstCallback methods
|
||||
|
||||
ExecutionBurstController::ExecutionBurstCallback::ExecutionBurstCallback(
|
||||
Burst::ExecutionBurstCallback::ExecutionBurstCallback(
|
||||
const std::shared_ptr<MemoryCache>& memoryCache)
|
||||
: kMemoryCache(memoryCache) {
|
||||
CHECK(memoryCache != nullptr);
|
||||
}
|
||||
|
||||
Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
|
||||
const hidl_vec<int32_t>& slots, getMemories_cb cb) {
|
||||
Return<void> Burst::ExecutionBurstCallback::getMemories(const hidl_vec<int32_t>& slots,
|
||||
getMemories_cb cb) {
|
||||
const auto memoryCache = kMemoryCache.lock();
|
||||
if (memoryCache == nullptr) {
|
||||
LOG(ERROR) << "ExecutionBurstController::ExecutionBurstCallback::getMemories called after "
|
||||
"the MemoryCache has been freed";
|
||||
LOG(ERROR) << "Burst::ExecutionBurstCallback::getMemories called after the MemoryCache has "
|
||||
"been freed";
|
||||
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
|
||||
return Void();
|
||||
}
|
||||
|
@ -229,8 +225,8 @@ Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
|
|||
const auto maybeMemories = getMemoriesHelper(slots, memoryCache);
|
||||
if (!maybeMemories.has_value()) {
|
||||
const auto& [message, code] = maybeMemories.error();
|
||||
LOG(ERROR) << "ExecutionBurstController::ExecutionBurstCallback::getMemories failed with "
|
||||
<< code << ": " << message;
|
||||
LOG(ERROR) << "Burst::ExecutionBurstCallback::getMemories failed with " << code << ": "
|
||||
<< message;
|
||||
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
|
||||
return Void();
|
||||
}
|
||||
|
@ -239,14 +235,14 @@ Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
|
|||
return Void();
|
||||
}
|
||||
|
||||
// ExecutionBurstController methods
|
||||
// Burst methods
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurstController::create(
|
||||
nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
|
||||
nn::SharedPreparedModel preparedModel, const sp<V1_2::IPreparedModel>& hidlPreparedModel,
|
||||
std::chrono::microseconds pollingTimeWindow) {
|
||||
// check inputs
|
||||
if (preparedModel == nullptr || hidlPreparedModel == nullptr) {
|
||||
return NN_ERROR() << "ExecutionBurstController::create passed a nullptr";
|
||||
return NN_ERROR() << "Burst::create passed a nullptr";
|
||||
}
|
||||
|
||||
// create FMQ objects
|
||||
|
@ -282,18 +278,18 @@ nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurs
|
|||
deathHandler.protectCallbackForLifetimeOfDeathHandler(resultChannelReceiver.get());
|
||||
|
||||
// make and return controller
|
||||
return std::make_shared<const ExecutionBurstController>(
|
||||
return std::make_shared<const Burst>(
|
||||
PrivateConstructorTag{}, std::move(preparedModel), std::move(requestChannelSender),
|
||||
std::move(resultChannelReceiver), std::move(burstCallback), std::move(burstContext),
|
||||
std::move(memoryCache), std::move(deathHandler));
|
||||
}
|
||||
|
||||
ExecutionBurstController::ExecutionBurstController(
|
||||
PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
|
||||
std::unique_ptr<RequestChannelSender> requestChannelSender,
|
||||
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
|
||||
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
|
||||
std::shared_ptr<MemoryCache> memoryCache, neuralnetworks::utils::DeathHandler deathHandler)
|
||||
Burst::Burst(PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
|
||||
std::unique_ptr<RequestChannelSender> requestChannelSender,
|
||||
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
|
||||
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
|
||||
std::shared_ptr<MemoryCache> memoryCache,
|
||||
neuralnetworks::utils::DeathHandler deathHandler)
|
||||
: kPreparedModel(std::move(preparedModel)),
|
||||
mRequestChannelSender(std::move(requestChannelSender)),
|
||||
mResultChannelReceiver(std::move(resultChannelReceiver)),
|
||||
|
@ -302,21 +298,20 @@ ExecutionBurstController::ExecutionBurstController(
|
|||
mMemoryCache(std::move(memoryCache)),
|
||||
kDeathHandler(std::move(deathHandler)) {}
|
||||
|
||||
ExecutionBurstController::OptionalCacheHold ExecutionBurstController::cacheMemory(
|
||||
const nn::SharedMemory& memory) const {
|
||||
Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& memory) const {
|
||||
auto [slot, hold] = mMemoryCache->cacheMemory(memory);
|
||||
return hold;
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// This is the first point when we know an execution is occurring, so begin to collect
|
||||
// systraces. Note that the first point we can begin collecting systraces in
|
||||
// ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
|
||||
// ExecutionBurstServer collects systraces at different points in the code.
|
||||
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
|
||||
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::execute");
|
||||
|
||||
// if the request is valid but of a higher version than what's supported in burst execution,
|
||||
// fall back to another execution path
|
||||
|
@ -357,10 +352,10 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
|||
}
|
||||
|
||||
// See IBurst::createReusableExecution for information on this method.
|
||||
nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableExecution(
|
||||
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::createReusableExecution");
|
||||
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::createReusableExecution");
|
||||
|
||||
// if the request is valid but of a higher version than what's supported in burst execution,
|
||||
// fall back to another execution path
|
||||
|
@ -397,12 +392,10 @@ nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableE
|
|||
std::move(relocation), std::move(holds));
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& requestPacket,
|
||||
const hal::utils::RequestRelocation& relocation,
|
||||
FallbackFunction fallback) const {
|
||||
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
|
||||
"ExecutionBurstController::executeInternal");
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
|
||||
const std::vector<FmqRequestDatum>& requestPacket,
|
||||
const hal::utils::RequestRelocation& relocation, FallbackFunction fallback) const {
|
||||
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, "Burst::executeInternal");
|
||||
|
||||
// Ensure that at most one execution is in flight at any given time.
|
||||
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
|
||||
|
@ -435,9 +428,9 @@ ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& re
|
|||
}
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds) {
|
||||
std::shared_ptr<const Burst> controller, std::vector<FmqRequestDatum> request,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds) {
|
||||
if (controller == nullptr) {
|
||||
return NN_ERROR() << "V1_2::utils::BurstExecution::create must have non-null controller";
|
||||
}
|
||||
|
@ -448,10 +441,10 @@ nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
|
|||
}
|
||||
|
||||
BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/,
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::shared_ptr<const Burst> controller,
|
||||
std::vector<FmqRequestDatum> request,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds)
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds)
|
||||
: kController(std::move(controller)),
|
||||
kRequest(std::move(request)),
|
||||
kRelocation(std::move(relocation)),
|
|
@ -14,9 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "ExecutionBurstUtils"
|
||||
|
||||
#include "ExecutionBurstUtils.h"
|
||||
#include "BurstUtils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/properties.h>
|
|
@ -16,11 +16,11 @@
|
|||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include "Burst.h"
|
||||
#include "BurstUtils.h"
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Execution.h"
|
||||
#include "ExecutionBurstController.h"
|
||||
#include "ExecutionBurstUtils.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
|
@ -150,16 +150,8 @@ nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
|
|||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
|
||||
auto self = shared_from_this();
|
||||
auto fallback = [preparedModel = std::move(self)](
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration)
|
||||
-> nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> {
|
||||
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
};
|
||||
const auto pollingTimeWindow = getBurstControllerPollingTimeWindow();
|
||||
return ExecutionBurstController::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
|
||||
return Burst::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
|
||||
}
|
||||
|
||||
std::any PreparedModel::getUnderlyingResource() const {
|
||||
|
|
|
@ -32,9 +32,9 @@
|
|||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/HandleError.h>
|
||||
#include <nnapi/hal/1.0/ProtectCallback.h>
|
||||
#include <nnapi/hal/1.2/Burst.h>
|
||||
#include <nnapi/hal/1.2/BurstUtils.h>
|
||||
#include <nnapi/hal/1.2/Conversions.h>
|
||||
#include <nnapi/hal/1.2/ExecutionBurstController.h>
|
||||
#include <nnapi/hal/1.2/ExecutionBurstUtils.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
|
||||
#include <memory>
|
||||
|
@ -246,17 +246,8 @@ nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
|
|||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
|
||||
auto self = shared_from_this();
|
||||
auto fallback = [preparedModel = std::move(self)](
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration)
|
||||
-> nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> {
|
||||
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
};
|
||||
const auto pollingTimeWindow = V1_2::utils::getBurstControllerPollingTimeWindow();
|
||||
return V1_2::utils::ExecutionBurstController::create(shared_from_this(), kPreparedModel,
|
||||
pollingTimeWindow);
|
||||
return V1_2::utils::Burst::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
|
||||
}
|
||||
|
||||
std::any PreparedModel::getUnderlyingResource() const {
|
||||
|
|
|
@ -14,14 +14,13 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_SERVER_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_SERVER_H
|
||||
|
||||
#include "ExecutionBurstUtils.h"
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BURST_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BURST_H
|
||||
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IBurstCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IBurstContext.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <fmq/MessageQueue.h>
|
||||
|
@ -30,6 +29,7 @@
|
|||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/ProtectCallback.h>
|
||||
#include <nnapi/hal/1.2/BurstUtils.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
|
@ -39,13 +39,13 @@
|
|||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
namespace android::hardware::neuralnetworks::adapter {
|
||||
|
||||
/**
|
||||
* The ExecutionBurstServer class is responsible for waiting for and deserializing a request object
|
||||
* from a FMQ, performing the inference, and serializing the result back across another FMQ.
|
||||
* The Burst class is responsible for waiting for and deserializing a request object from a FMQ,
|
||||
* performing the inference, and serializing the result back across another FMQ.
|
||||
*/
|
||||
class ExecutionBurstServer : public IBurstContext {
|
||||
class Burst : public V1_2::IBurstContext {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
|
@ -58,13 +58,13 @@ class ExecutionBurstServer : public IBurstContext {
|
|||
public:
|
||||
// Precondition: burstExecutor != nullptr
|
||||
// Precondition: burstCallback != nullptr
|
||||
MemoryCache(nn::SharedBurst burstExecutor, sp<IBurstCallback> burstCallback);
|
||||
MemoryCache(nn::SharedBurst burstExecutor, sp<V1_2::IBurstCallback> burstCallback);
|
||||
|
||||
/**
|
||||
* Get the cached memory objects corresponding to provided slot identifiers.
|
||||
*
|
||||
* If the slot entry is not present in the cache, this class will use IBurstCallback to
|
||||
* retrieve those entries that are not present in the cache, then cache them.
|
||||
* If the slot entry is not present in the cache, this class will use V1_2::IBurstCallback
|
||||
* to retrieve those entries that are not present in the cache, then cache them.
|
||||
*
|
||||
* @param slots Identifiers of memory objects to be retrieved.
|
||||
* @return A vector where each element is the memory object and a ref-counted cache "hold"
|
||||
|
@ -93,7 +93,7 @@ class ExecutionBurstServer : public IBurstContext {
|
|||
std::map<int32_t, std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>> mCache
|
||||
GUARDED_BY(mMutex);
|
||||
nn::SharedBurst kBurstExecutor;
|
||||
const sp<IBurstCallback> kBurstCallback;
|
||||
const sp<V1_2::IBurstCallback> kBurstCallback;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -111,45 +111,45 @@ class ExecutionBurstServer : public IBurstContext {
|
|||
* execution.
|
||||
* @param burstExecutor Object which maintains a local cache of the memory pools and executes
|
||||
* using the cached memory pools.
|
||||
* @param pollingTimeWindow How much time (in microseconds) the ExecutionBurstServer is allowed
|
||||
* to poll the FMQ before waiting on the blocking futex. Polling may result in lower
|
||||
* latencies at the potential cost of more power usage.
|
||||
* @return IBurstContext Handle to the burst context.
|
||||
* @param pollingTimeWindow How much time (in microseconds) the Burst is allowed to poll the FMQ
|
||||
* before waiting on the blocking futex. Polling may result in lower latencies at the
|
||||
* potential cost of more power usage.
|
||||
* @return V1_2::IBurstContext Handle to the burst context.
|
||||
*/
|
||||
static nn::GeneralResult<sp<ExecutionBurstServer>> create(
|
||||
const sp<IBurstCallback>& callback,
|
||||
const MQDescriptorSync<FmqRequestDatum>& requestChannel,
|
||||
const MQDescriptorSync<FmqResultDatum>& resultChannel, nn::SharedBurst burstExecutor,
|
||||
static nn::GeneralResult<sp<Burst>> create(
|
||||
const sp<V1_2::IBurstCallback>& callback,
|
||||
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
|
||||
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
|
||||
nn::SharedBurst burstExecutor,
|
||||
std::chrono::microseconds pollingTimeWindow = std::chrono::microseconds{0});
|
||||
|
||||
ExecutionBurstServer(PrivateConstructorTag tag, const sp<IBurstCallback>& callback,
|
||||
std::unique_ptr<RequestChannelReceiver> requestChannel,
|
||||
std::unique_ptr<ResultChannelSender> resultChannel,
|
||||
nn::SharedBurst burstExecutor);
|
||||
~ExecutionBurstServer();
|
||||
Burst(PrivateConstructorTag tag, const sp<V1_2::IBurstCallback>& callback,
|
||||
std::unique_ptr<V1_2::utils::RequestChannelReceiver> requestChannel,
|
||||
std::unique_ptr<V1_2::utils::ResultChannelSender> resultChannel,
|
||||
nn::SharedBurst burstExecutor);
|
||||
~Burst();
|
||||
|
||||
// Used by the NN runtime to preemptively remove any stored memory. See
|
||||
// IBurstContext::freeMemory for more information.
|
||||
// V1_2::IBurstContext::freeMemory for more information.
|
||||
Return<void> freeMemory(int32_t slot) override;
|
||||
|
||||
private:
|
||||
// Work loop that will continue processing execution requests until the ExecutionBurstServer
|
||||
// object is freed.
|
||||
// Work loop that will continue processing execution requests until the Burst object is freed.
|
||||
void task();
|
||||
|
||||
nn::ExecutionResult<std::pair<hidl_vec<OutputShape>, Timing>> execute(
|
||||
nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> execute(
|
||||
const V1_0::Request& requestWithoutPools, const std::vector<int32_t>& slotsOfPools,
|
||||
MeasureTiming measure);
|
||||
V1_2::MeasureTiming measure);
|
||||
|
||||
std::thread mWorker;
|
||||
std::atomic<bool> mTeardown{false};
|
||||
const sp<IBurstCallback> mCallback;
|
||||
const std::unique_ptr<RequestChannelReceiver> mRequestChannelReceiver;
|
||||
const std::unique_ptr<ResultChannelSender> mResultChannelSender;
|
||||
const sp<V1_2::IBurstCallback> mCallback;
|
||||
const std::unique_ptr<V1_2::utils::RequestChannelReceiver> mRequestChannelReceiver;
|
||||
const std::unique_ptr<V1_2::utils::ResultChannelSender> mResultChannelSender;
|
||||
const nn::SharedBurst mBurstExecutor;
|
||||
MemoryCache mMemoryCache;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
} // namespace android::hardware::neuralnetworks::adapter
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_SERVER_H
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BURST_H
|
|
@ -14,11 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "ExecutionBurstServer"
|
||||
|
||||
#include "ExecutionBurstServer.h"
|
||||
#include "Conversions.h"
|
||||
#include "ExecutionBurstUtils.h"
|
||||
#include "Burst.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
|
@ -29,6 +25,8 @@
|
|||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/1.0/HandleError.h>
|
||||
#include <nnapi/hal/1.0/ProtectCallback.h>
|
||||
#include <nnapi/hal/1.2/BurstUtils.h>
|
||||
#include <nnapi/hal/1.2/Conversions.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
@ -42,11 +40,11 @@
|
|||
|
||||
#include "Tracing.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
namespace android::hardware::neuralnetworks::adapter {
|
||||
namespace {
|
||||
|
||||
constexpr V1_2::Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
|
||||
std::numeric_limits<uint64_t>::max()};
|
||||
constexpr V1_2::Timing kTiming = {std::numeric_limits<uint64_t>::max(),
|
||||
std::numeric_limits<uint64_t>::max()};
|
||||
|
||||
nn::GeneralResult<std::vector<nn::SharedMemory>> getMemoriesCallback(
|
||||
V1_0::ErrorStatus status, const hidl_vec<hidl_memory>& memories) {
|
||||
|
@ -61,15 +59,15 @@ nn::GeneralResult<std::vector<nn::SharedMemory>> getMemoriesCallback(
|
|||
|
||||
} // anonymous namespace
|
||||
|
||||
ExecutionBurstServer::MemoryCache::MemoryCache(nn::SharedBurst burstExecutor,
|
||||
sp<IBurstCallback> burstCallback)
|
||||
Burst::MemoryCache::MemoryCache(nn::SharedBurst burstExecutor,
|
||||
sp<V1_2::IBurstCallback> burstCallback)
|
||||
: kBurstExecutor(std::move(burstExecutor)), kBurstCallback(std::move(burstCallback)) {
|
||||
CHECK(kBurstExecutor != nullptr);
|
||||
CHECK(kBurstCallback != nullptr);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>>>
|
||||
ExecutionBurstServer::MemoryCache::getCacheEntries(const std::vector<int32_t>& slots) {
|
||||
Burst::MemoryCache::getCacheEntries(const std::vector<int32_t>& slots) {
|
||||
std::lock_guard guard(mMutex);
|
||||
NN_TRY(ensureCacheEntriesArePresentLocked(slots));
|
||||
|
||||
|
@ -82,7 +80,7 @@ ExecutionBurstServer::MemoryCache::getCacheEntries(const std::vector<int32_t>& s
|
|||
return results;
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> ExecutionBurstServer::MemoryCache::ensureCacheEntriesArePresentLocked(
|
||||
nn::GeneralResult<void> Burst::MemoryCache::ensureCacheEntriesArePresentLocked(
|
||||
const std::vector<int32_t>& slots) {
|
||||
const auto slotIsKnown = [this](int32_t slot)
|
||||
REQUIRES(mMutex) { return mCache.count(slot) > 0; };
|
||||
|
@ -107,11 +105,10 @@ nn::GeneralResult<void> ExecutionBurstServer::MemoryCache::ensureCacheEntriesAre
|
|||
auto returnedMemories = NN_TRY(cb.take());
|
||||
|
||||
if (returnedMemories.size() != unknownSlots.size()) {
|
||||
return NN_ERROR()
|
||||
<< "ExecutionBurstServer::MemoryCache::ensureCacheEntriesArePresentLocked: Error "
|
||||
"retrieving memories -- count mismatch between requested memories ("
|
||||
<< unknownSlots.size() << ") and returned memories (" << returnedMemories.size()
|
||||
<< ")";
|
||||
return NN_ERROR() << "Burst::MemoryCache::ensureCacheEntriesArePresentLocked: Error "
|
||||
"retrieving memories -- count mismatch between requested memories ("
|
||||
<< unknownSlots.size() << ") and returned memories ("
|
||||
<< returnedMemories.size() << ")";
|
||||
}
|
||||
|
||||
// add memories to unknown slots
|
||||
|
@ -123,56 +120,54 @@ nn::GeneralResult<void> ExecutionBurstServer::MemoryCache::ensureCacheEntriesAre
|
|||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>>
|
||||
ExecutionBurstServer::MemoryCache::getCacheEntryLocked(int32_t slot) {
|
||||
Burst::MemoryCache::getCacheEntryLocked(int32_t slot) {
|
||||
if (const auto iter = mCache.find(slot); iter != mCache.end()) {
|
||||
return iter->second;
|
||||
}
|
||||
return NN_ERROR()
|
||||
<< "ExecutionBurstServer::MemoryCache::getCacheEntryLocked failed because slot " << slot
|
||||
<< " is not present in the cache";
|
||||
return NN_ERROR() << "Burst::MemoryCache::getCacheEntryLocked failed because slot " << slot
|
||||
<< " is not present in the cache";
|
||||
}
|
||||
|
||||
void ExecutionBurstServer::MemoryCache::addCacheEntryLocked(int32_t slot, nn::SharedMemory memory) {
|
||||
void Burst::MemoryCache::addCacheEntryLocked(int32_t slot, nn::SharedMemory memory) {
|
||||
auto hold = kBurstExecutor->cacheMemory(memory);
|
||||
mCache.emplace(slot, std::make_pair(std::move(memory), std::move(hold)));
|
||||
}
|
||||
|
||||
void ExecutionBurstServer::MemoryCache::removeCacheEntry(int32_t slot) {
|
||||
void Burst::MemoryCache::removeCacheEntry(int32_t slot) {
|
||||
std::lock_guard guard(mMutex);
|
||||
mCache.erase(slot);
|
||||
}
|
||||
|
||||
// ExecutionBurstServer methods
|
||||
// Burst methods
|
||||
|
||||
nn::GeneralResult<sp<ExecutionBurstServer>> ExecutionBurstServer::create(
|
||||
const sp<IBurstCallback>& callback, const MQDescriptorSync<FmqRequestDatum>& requestChannel,
|
||||
const MQDescriptorSync<FmqResultDatum>& resultChannel, nn::SharedBurst burstExecutor,
|
||||
nn::GeneralResult<sp<Burst>> Burst::create(
|
||||
const sp<V1_2::IBurstCallback>& callback,
|
||||
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
|
||||
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, nn::SharedBurst burstExecutor,
|
||||
std::chrono::microseconds pollingTimeWindow) {
|
||||
// check inputs
|
||||
if (callback == nullptr || burstExecutor == nullptr) {
|
||||
return NN_ERROR() << "ExecutionBurstServer::create passed a nullptr";
|
||||
return NN_ERROR() << "Burst::create passed a nullptr";
|
||||
}
|
||||
|
||||
// create FMQ objects
|
||||
auto requestChannelReceiver =
|
||||
NN_TRY(RequestChannelReceiver::create(requestChannel, pollingTimeWindow));
|
||||
auto resultChannelSender = NN_TRY(ResultChannelSender::create(resultChannel));
|
||||
NN_TRY(V1_2::utils::RequestChannelReceiver::create(requestChannel, pollingTimeWindow));
|
||||
auto resultChannelSender = NN_TRY(V1_2::utils::ResultChannelSender::create(resultChannel));
|
||||
|
||||
// check FMQ objects
|
||||
CHECK(requestChannelReceiver != nullptr);
|
||||
CHECK(resultChannelSender != nullptr);
|
||||
|
||||
// make and return context
|
||||
return sp<ExecutionBurstServer>::make(PrivateConstructorTag{}, callback,
|
||||
std::move(requestChannelReceiver),
|
||||
std::move(resultChannelSender), std::move(burstExecutor));
|
||||
return sp<Burst>::make(PrivateConstructorTag{}, callback, std::move(requestChannelReceiver),
|
||||
std::move(resultChannelSender), std::move(burstExecutor));
|
||||
}
|
||||
|
||||
ExecutionBurstServer::ExecutionBurstServer(PrivateConstructorTag /*tag*/,
|
||||
const sp<IBurstCallback>& callback,
|
||||
std::unique_ptr<RequestChannelReceiver> requestChannel,
|
||||
std::unique_ptr<ResultChannelSender> resultChannel,
|
||||
nn::SharedBurst burstExecutor)
|
||||
Burst::Burst(PrivateConstructorTag /*tag*/, const sp<V1_2::IBurstCallback>& callback,
|
||||
std::unique_ptr<V1_2::utils::RequestChannelReceiver> requestChannel,
|
||||
std::unique_ptr<V1_2::utils::ResultChannelSender> resultChannel,
|
||||
nn::SharedBurst burstExecutor)
|
||||
: mCallback(callback),
|
||||
mRequestChannelReceiver(std::move(requestChannel)),
|
||||
mResultChannelSender(std::move(resultChannel)),
|
||||
|
@ -182,7 +177,7 @@ ExecutionBurstServer::ExecutionBurstServer(PrivateConstructorTag /*tag*/,
|
|||
mWorker = std::thread([this] { task(); });
|
||||
}
|
||||
|
||||
ExecutionBurstServer::~ExecutionBurstServer() {
|
||||
Burst::~Burst() {
|
||||
// set teardown flag
|
||||
mTeardown = true;
|
||||
mRequestChannelReceiver->invalidate();
|
||||
|
@ -191,12 +186,12 @@ ExecutionBurstServer::~ExecutionBurstServer() {
|
|||
mWorker.join();
|
||||
}
|
||||
|
||||
Return<void> ExecutionBurstServer::freeMemory(int32_t slot) {
|
||||
Return<void> Burst::freeMemory(int32_t slot) {
|
||||
mMemoryCache.removeCacheEntry(slot);
|
||||
return Void();
|
||||
}
|
||||
|
||||
void ExecutionBurstServer::task() {
|
||||
void Burst::task() {
|
||||
// loop until the burst object is being destroyed
|
||||
while (!mTeardown) {
|
||||
// receive request
|
||||
|
@ -208,12 +203,12 @@ void ExecutionBurstServer::task() {
|
|||
// if the burst is being torn down, skip the execution so the "task" function can end
|
||||
if (!arguments.has_value()) {
|
||||
if (!mTeardown) {
|
||||
mResultChannelSender->send(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
|
||||
mResultChannelSender->send(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kTiming);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// unpack the arguments; types are Request, std::vector<int32_t>, and MeasureTiming,
|
||||
// unpack the arguments; types are Request, std::vector<int32_t>, and V1_2::MeasureTiming,
|
||||
// respectively
|
||||
const auto [requestWithoutPools, slotsOfPools, measure] = std::move(arguments).value();
|
||||
|
||||
|
@ -226,17 +221,17 @@ void ExecutionBurstServer::task() {
|
|||
} else {
|
||||
const auto& [message, code, outputShapes] = result.error();
|
||||
LOG(ERROR) << "IBurst::execute failed with " << code << ": " << message;
|
||||
mResultChannelSender->send(convert(code).value(), convert(outputShapes).value(),
|
||||
kNoTiming);
|
||||
mResultChannelSender->send(V1_2::utils::convert(code).value(),
|
||||
V1_2::utils::convert(outputShapes).value(), kTiming);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<hidl_vec<OutputShape>, Timing>> ExecutionBurstServer::execute(
|
||||
nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> Burst::execute(
|
||||
const V1_0::Request& requestWithoutPools, const std::vector<int32_t>& slotsOfPools,
|
||||
MeasureTiming measure) {
|
||||
V1_2::MeasureTiming measure) {
|
||||
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
|
||||
"ExecutionBurstServer getting memory, executing, and returning results");
|
||||
"Burst getting memory, executing, and returning results");
|
||||
|
||||
// ensure executor with cache has required memory
|
||||
const auto cacheEntries = NN_TRY(mMemoryCache.getCacheEntries(slotsOfPools));
|
||||
|
@ -257,7 +252,8 @@ nn::ExecutionResult<std::pair<hidl_vec<OutputShape>, Timing>> ExecutionBurstServ
|
|||
const auto [outputShapes, timing] =
|
||||
NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
|
||||
|
||||
return std::make_pair(NN_TRY(convert(outputShapes)), NN_TRY(convert(timing)));
|
||||
return std::make_pair(NN_TRY(V1_2::utils::convert(outputShapes)),
|
||||
NN_TRY(V1_2::utils::convert(timing)));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
} // namespace android::hardware::neuralnetworks::adapter
|
|
@ -16,7 +16,8 @@
|
|||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include <ExecutionBurstServer.h>
|
||||
#include "Burst.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
|
@ -272,6 +273,15 @@ nn::GeneralResult<std::vector<nn::SyncFence>> convertSyncFences(
|
|||
return syncFences;
|
||||
}
|
||||
|
||||
nn::GeneralResult<sp<V1_2::IBurstContext>> configureExecutionBurst(
|
||||
const nn::SharedPreparedModel& preparedModel, const sp<V1_2::IBurstCallback>& callback,
|
||||
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
|
||||
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel) {
|
||||
auto burstExecutor = NN_TRY(preparedModel->configureExecutionBurst());
|
||||
return Burst::create(callback, requestChannel, resultChannel, std::move(burstExecutor),
|
||||
V1_2::utils::getBurstServerPollingTimeWindow());
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<hidl_handle, sp<V1_3::IFencedExecutionCallback>>> executeFenced(
|
||||
const nn::SharedPreparedModel& preparedModel, const V1_3::Request& request,
|
||||
const hidl_vec<hidl_handle>& waitFor, V1_2::MeasureTiming measure,
|
||||
|
@ -388,14 +398,17 @@ Return<void> PreparedModel::configureExecutionBurst(
|
|||
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
|
||||
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
|
||||
configureExecutionBurst_cb cb) {
|
||||
const sp<V1_2::IBurstContext> burst = nn::ExecutionBurstServer::create(
|
||||
callback, requestChannel, resultChannel, this, std::chrono::microseconds{0});
|
||||
|
||||
if (burst == nullptr) {
|
||||
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
|
||||
} else {
|
||||
cb(V1_0::ErrorStatus::NONE, burst);
|
||||
auto result = adapter::configureExecutionBurst(kPreparedModel, callback, requestChannel,
|
||||
resultChannel);
|
||||
if (!result.has_value()) {
|
||||
auto [message, code] = std::move(result).error();
|
||||
LOG(ERROR) << "adapter::PreparedModel::configureExecutionBurst failed with " << code << ": "
|
||||
<< message;
|
||||
cb(V1_2::utils::convert(code).value(), nullptr);
|
||||
return Void();
|
||||
}
|
||||
auto burstContext = std::move(result).value();
|
||||
cb(V1_0::ErrorStatus::NONE, std::move(burstContext));
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue