diff --git a/neuralnetworks/utils/adapter/Android.bp b/neuralnetworks/utils/adapter/Android.bp new file mode 100644 index 0000000000..e8dc3e7dd7 --- /dev/null +++ b/neuralnetworks/utils/adapter/Android.bp @@ -0,0 +1,37 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +cc_library_static { + name: "neuralnetworks_utils_hal_adapter", + defaults: ["neuralnetworks_utils_defaults"], + srcs: ["src/*"], + local_include_dirs: ["include/nnapi/hal"], + export_include_dirs: ["include"], + static_libs: [ + "neuralnetworks_types", + "neuralnetworks_utils_hal_1_0", + "neuralnetworks_utils_hal_1_1", + "neuralnetworks_utils_hal_1_2", + "neuralnetworks_utils_hal_1_3", + ], + shared_libs: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", + "android.hardware.neuralnetworks@1.3", + "libfmq", + ], +} diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h b/neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h new file mode 100644 index 0000000000..da00a090ed --- /dev/null +++ b/neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_ADAPTER_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_ADAPTER_H + +#include +#include +#include +#include +#include +#include + +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + +namespace android::hardware::neuralnetworks::adapter { + +/** + * A self-contained unit of work to be executed. + */ +using Task = std::function; + +/** + * A type-erased executor which executes a task asynchronously. + * + * This executor is also provided with an Application ID (Android User ID) and an optional deadline + * for when the caller expects is the upper bound for the amount of time to complete the task. + */ +using Executor = std::function; + +/** + * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object. + * + * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache + * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource(). + * + * @param device NNAPI canonical IDevice interface object to be adapted. + * @param executor Type-erased executor to handle executing tasks asynchronously. + * @return HIDL NN HAL IDevice interface object. + */ +sp adapt(nn::SharedDevice device, Executor executor); + +/** + * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object. + * + * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache + * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource(). + * + * This function uses a default executor, which will execute tasks from a detached thread. + * + * @param device NNAPI canonical IDevice interface object to be adapted. + * @return HIDL NN HAL IDevice interface object. + */ +sp adapt(nn::SharedDevice device); + +} // namespace android::hardware::neuralnetworks::adapter + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_ADAPTER_H diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Buffer.h b/neuralnetworks/utils/adapter/include/nnapi/hal/Buffer.h new file mode 100644 index 0000000000..e53c7d4f09 --- /dev/null +++ b/neuralnetworks/utils/adapter/include/nnapi/hal/Buffer.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BUFFER_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BUFFER_H + +#include +#include +#include +#include +#include + +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + +namespace android::hardware::neuralnetworks::adapter { + +// Class that adapts nn::IBuffer to V1_3::IBuffer. +class Buffer final : public V1_3::IBuffer { + public: + explicit Buffer(nn::SharedBuffer buffer); + + Return copyTo(const hidl_memory& dst) override; + Return copyFrom(const hidl_memory& src, + const hidl_vec& dimensions) override; + + private: + const nn::SharedBuffer kBuffer; +}; + +} // namespace android::hardware::neuralnetworks::adapter + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BUFFER_H diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Device.h b/neuralnetworks/utils/adapter/include/nnapi/hal/Device.h new file mode 100644 index 0000000000..148d0a0341 --- /dev/null +++ b/neuralnetworks/utils/adapter/include/nnapi/hal/Device.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_DEVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_DEVICE_H + +#include "nnapi/hal/Adapter.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + +namespace android::hardware::neuralnetworks::adapter { + +using CacheToken = hidl_array; + +// Class that adapts nn::IDevice to V1_3::IDevice. +class Device final : public V1_3::IDevice { + public: + Device(nn::SharedDevice device, Executor executor); + + Return getCapabilities(getCapabilities_cb cb) override; + Return getCapabilities_1_1(getCapabilities_1_1_cb cb) override; + Return getCapabilities_1_2(getCapabilities_1_2_cb cb) override; + Return getCapabilities_1_3(getCapabilities_1_3_cb cb) override; + Return getVersionString(getVersionString_cb cb) override; + Return getType(getType_cb cb) override; + Return getSupportedExtensions(getSupportedExtensions_cb) override; + Return getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb cb) override; + Return getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb cb) override; + Return getSupportedOperations_1_2(const V1_2::Model& model, + getSupportedOperations_1_2_cb cb) override; + Return getSupportedOperations_1_3(const V1_3::Model& model, + getSupportedOperations_1_3_cb cb) override; + Return getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override; + Return prepareModel( + const V1_0::Model& model, const sp& callback) override; + Return prepareModel_1_1( + const V1_1::Model& model, V1_1::ExecutionPreference preference, + const sp& callback) override; + Return prepareModel_1_2( + const V1_2::Model& model, V1_1::ExecutionPreference preference, + const hidl_vec& modelCache, const hidl_vec& dataCache, + const CacheToken& token, const sp& callback) override; + Return prepareModel_1_3( + const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, + const V1_3::OptionalTimePoint& deadline, const hidl_vec& modelCache, + const hidl_vec& dataCache, const CacheToken& token, + const sp& callback) override; + Return prepareModelFromCache( + const hidl_vec& modelCache, const hidl_vec& dataCache, + const CacheToken& token, const sp& callback) override; + Return prepareModelFromCache_1_3( + const V1_3::OptionalTimePoint& deadline, const hidl_vec& modelCache, + const hidl_vec& dataCache, const CacheToken& token, + const sp& callback) override; + Return getStatus() override; + Return allocate(const V1_3::BufferDesc& desc, + const hidl_vec>& preparedModels, + const hidl_vec& inputRoles, + const hidl_vec& outputRoles, allocate_cb cb) override; + + private: + const nn::SharedDevice kDevice; + const Executor kExecutor; +}; + +} // namespace android::hardware::neuralnetworks::adapter + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_DEVICE_H diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/PreparedModel.h b/neuralnetworks/utils/adapter/include/nnapi/hal/PreparedModel.h new file mode 100644 index 0000000000..65763b8d19 --- /dev/null +++ b/neuralnetworks/utils/adapter/include/nnapi/hal/PreparedModel.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_PREPARED_MODEL_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_PREPARED_MODEL_H + +#include "nnapi/hal/Adapter.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + +namespace android::hardware::neuralnetworks::adapter { + +// Class that adapts nn::IPreparedModel to V1_3::IPreparedModel. +class PreparedModel final : public V1_3::IPreparedModel { + public: + PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId); + + Return execute(const V1_0::Request& request, + const sp& callback) override; + Return execute_1_2(const V1_0::Request& request, V1_2::MeasureTiming measure, + const sp& callback) override; + Return execute_1_3(const V1_3::Request& request, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const sp& callback) override; + Return executeSynchronously(const V1_0::Request& request, V1_2::MeasureTiming measure, + executeSynchronously_cb cb) override; + Return executeSynchronously_1_3(const V1_3::Request& request, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + executeSynchronously_1_3_cb cb) override; + Return configureExecutionBurst( + const sp& callback, + const MQDescriptorSync& requestChannel, + const MQDescriptorSync& resultChannel, + configureExecutionBurst_cb cb) override; + Return executeFenced(const V1_3::Request& request, const hidl_vec& waitFor, + V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const V1_3::OptionalTimeoutDuration& duration, + executeFenced_cb callback) override; + + nn::SharedPreparedModel getUnderlyingPreparedModel() const; + + private: + const nn::SharedPreparedModel kPreparedModel; + const Executor kExecutor; + const uid_t kUserId; +}; + +} // namespace android::hardware::neuralnetworks::adapter + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_PREPARED_MODEL_H diff --git a/neuralnetworks/utils/adapter/src/Adapter.cpp b/neuralnetworks/utils/adapter/src/Adapter.cpp new file mode 100644 index 0000000000..d6f53f05a5 --- /dev/null +++ b/neuralnetworks/utils/adapter/src/Adapter.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Adapter.h" + +#include "Device.h" + +#include +#include +#include +#include + +#include +#include +#include + +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + +namespace android::hardware::neuralnetworks::adapter { + +sp adapt(nn::SharedDevice device, Executor executor) { + return sp::make(std::move(device), std::move(executor)); +} + +sp adapt(nn::SharedDevice device) { + Executor defaultExecutor = [](Task task, uid_t /*uid*/, nn::OptionalTimePoint /*deadline*/) { + std::thread(std::move(task)).detach(); + }; + return adapt(std::move(device), std::move(defaultExecutor)); +} + +} // namespace android::hardware::neuralnetworks::adapter diff --git a/neuralnetworks/utils/adapter/src/Buffer.cpp b/neuralnetworks/utils/adapter/src/Buffer.cpp new file mode 100644 index 0000000000..3a04bf6b79 --- /dev/null +++ b/neuralnetworks/utils/adapter/src/Buffer.cpp @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Buffer.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + +namespace android::hardware::neuralnetworks::adapter { +namespace { + +template +auto convertInput(const Type& object) -> decltype(nn::convert(std::declval())) { + auto result = nn::convert(object); + if (!result.has_value()) { + result.error().code = nn::ErrorStatus::INVALID_ARGUMENT; + } + return result; +} + +nn::GeneralResult copyTo(const nn::SharedBuffer& buffer, const hidl_memory& dst) { + const auto memory = NN_TRY(convertInput(dst)); + NN_TRY(buffer->copyTo(memory)); + return {}; +} + +nn::GeneralResult copyFrom(const nn::SharedBuffer& buffer, const hidl_memory& src, + const hidl_vec& dimensions) { + const auto memory = NN_TRY(convertInput(src)); + NN_TRY(buffer->copyFrom(memory, dimensions)); + return {}; +} + +} // namespace + +Buffer::Buffer(nn::SharedBuffer buffer) : kBuffer(std::move(buffer)) { + CHECK(kBuffer != nullptr); +} + +Return Buffer::copyTo(const hidl_memory& dst) { + auto result = adapter::copyTo(kBuffer, dst); + if (!result.has_value()) { + const auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Buffer::copyTo failed with " << code << ": " << message; + return V1_3::utils::convert(code).value(); + } + return V1_3::ErrorStatus::NONE; +} + +Return Buffer::copyFrom(const hidl_memory& src, + const hidl_vec& dimensions) { + auto result = adapter::copyFrom(kBuffer, src, dimensions); + if (!result.has_value()) { + const auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Buffer::copyFrom failed with " << code << ": " << message; + return V1_3::utils::convert(code).value(); + } + return V1_3::ErrorStatus::NONE; +} + +} // namespace android::hardware::neuralnetworks::adapter diff --git a/neuralnetworks/utils/adapter/src/Device.cpp b/neuralnetworks/utils/adapter/src/Device.cpp new file mode 100644 index 0000000000..96142c3577 --- /dev/null +++ b/neuralnetworks/utils/adapter/src/Device.cpp @@ -0,0 +1,556 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Device.h" + +#include "Buffer.h" +#include "PreparedModel.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + +namespace android::hardware::neuralnetworks::adapter { +namespace { + +template +auto convertInput(const Type& object) -> decltype(nn::convert(std::declval())) { + auto result = nn::convert(object); + if (!result.has_value()) { + result.error().code = nn::ErrorStatus::INVALID_ARGUMENT; + } + return result; +} + +using PrepareModelResult = nn::GeneralResult; + +sp adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, + uid_t userId) { + if (preparedModel == nullptr) { + return nullptr; + } + return sp::make(std::move(preparedModel), std::move(executor), userId); +} + +void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status, + const sp& hidlPreparedModel) { + if (callback != nullptr) { + const auto hidlStatus = V1_0::utils::convert(status).value(); + const auto ret = callback->notify(hidlStatus, hidlPreparedModel); + if (!ret.isOk()) { + LOG(ERROR) << "V1_0::IPreparedModelCallback::notify failed with " << ret.description(); + } + } +} + +void notify(V1_2::IPreparedModelCallback* callback, nn::ErrorStatus status, + const sp& hidlPreparedModel) { + if (callback != nullptr) { + const auto hidlStatus = V1_2::utils::convert(status).value(); + const auto ret = callback->notify_1_2(hidlStatus, hidlPreparedModel); + if (!ret.isOk()) { + LOG(ERROR) << "V1_2::IPreparedModelCallback::notify_1_2 failed with " + << ret.description(); + } + } +} + +void notify(V1_3::IPreparedModelCallback* callback, nn::ErrorStatus status, + const sp& hidlPreparedModel) { + if (callback != nullptr) { + const auto hidlStatus = V1_3::utils::convert(status).value(); + const auto ret = callback->notify_1_3(hidlStatus, hidlPreparedModel); + if (!ret.isOk()) { + LOG(ERROR) << "V1_3::IPreparedModelCallback::notify_1_3 failed with " + << ret.description(); + } + } +} + +template +void notify(CallbackType* callback, PrepareModelResult result, Executor executor, uid_t userId) { + if (!result.has_value()) { + const auto [message, status] = std::move(result).error(); + LOG(ERROR) << message; + notify(callback, status, nullptr); + } else { + auto preparedModel = std::move(result).value(); + auto hidlPreparedModel = + adaptPreparedModel(std::move(preparedModel), std::move(executor), userId); + notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel)); + } +} + +template +nn::GeneralResult> getSupportedOperations(const nn::SharedDevice& device, + const ModelType& model) { + const auto nnModel = NN_TRY(convertInput(model)); + return NN_TRY(device->getSupportedOperations(nnModel)); +} + +nn::GeneralResult prepareModel(const nn::SharedDevice& device, const Executor& executor, + const V1_0::Model& model, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + auto nnModel = NN_TRY(convertInput(model)); + + const uid_t userId = hardware::IPCThreadState::self()->getCallingUid(); + Task task = [device, nnModel = std::move(nnModel), userId, executor, callback] { + auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT, + nn::Priority::DEFAULT, {}, {}, {}, {}); + notify(callback.get(), std::move(result), executor, userId); + }; + executor(std::move(task), userId, {}); + + return {}; +} + +nn::GeneralResult prepareModel_1_1(const nn::SharedDevice& device, const Executor& executor, + const V1_1::Model& model, + V1_1::ExecutionPreference preference, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + auto nnModel = NN_TRY(convertInput(model)); + const auto nnPreference = NN_TRY(convertInput(preference)); + + const uid_t userId = hardware::IPCThreadState::self()->getCallingUid(); + Task task = [device, nnModel = std::move(nnModel), nnPreference, userId, executor, callback] { + auto result = + device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {}); + notify(callback.get(), std::move(result), executor, userId); + }; + executor(std::move(task), userId, {}); + + return {}; +} + +nn::GeneralResult prepareModel_1_2(const nn::SharedDevice& device, const Executor& executor, + const V1_2::Model& model, + V1_1::ExecutionPreference preference, + const hidl_vec& modelCache, + const hidl_vec& dataCache, + const CacheToken& token, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + auto nnModel = NN_TRY(convertInput(model)); + const auto nnPreference = NN_TRY(convertInput(preference)); + auto nnModelCache = NN_TRY(convertInput(modelCache)); + auto nnDataCache = NN_TRY(convertInput(dataCache)); + const auto nnToken = nn::CacheToken(token); + + const uid_t userId = hardware::IPCThreadState::self()->getCallingUid(); + Task task = [device, nnModel = std::move(nnModel), nnPreference, + nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache), + nnToken, userId, executor, callback] { + auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, + nnModelCache, nnDataCache, nnToken); + notify(callback.get(), std::move(result), executor, userId); + }; + executor(std::move(task), userId, {}); + + return {}; +} + +nn::GeneralResult prepareModel_1_3( + const nn::SharedDevice& device, const Executor& executor, const V1_3::Model& model, + V1_1::ExecutionPreference preference, V1_3::Priority priority, + const V1_3::OptionalTimePoint& deadline, const hidl_vec& modelCache, + const hidl_vec& dataCache, const CacheToken& token, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + auto nnModel = NN_TRY(convertInput(model)); + const auto nnPreference = NN_TRY(convertInput(preference)); + const auto nnPriority = NN_TRY(convertInput(priority)); + const auto nnDeadline = NN_TRY(convertInput(deadline)); + auto nnModelCache = NN_TRY(convertInput(modelCache)); + auto nnDataCache = NN_TRY(convertInput(dataCache)); + const auto nnToken = nn::CacheToken(token); + + const uid_t userId = hardware::IPCThreadState::self()->getCallingUid(); + Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline, + nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache), + nnToken, userId, executor, callback] { + auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline, + nnModelCache, nnDataCache, nnToken); + notify(callback.get(), std::move(result), executor, userId); + }; + executor(std::move(task), userId, nnDeadline); + + return {}; +} + +nn::GeneralResult prepareModelFromCache(const nn::SharedDevice& device, + const Executor& executor, + const hidl_vec& modelCache, + const hidl_vec& dataCache, + const CacheToken& token, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + auto nnModelCache = NN_TRY(convertInput(modelCache)); + auto nnDataCache = NN_TRY(convertInput(dataCache)); + const auto nnToken = nn::CacheToken(token); + + const uid_t userId = hardware::IPCThreadState::self()->getCallingUid(); + Task task = [device, nnModelCache = std::move(nnModelCache), + nnDataCache = std::move(nnDataCache), nnToken, userId, executor, callback] { + auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken); + notify(callback.get(), std::move(result), executor, userId); + }; + executor(std::move(task), userId, {}); + + return {}; +} + +nn::GeneralResult prepareModelFromCache_1_3( + const nn::SharedDevice& device, const Executor& executor, + const V1_3::OptionalTimePoint& deadline, const hidl_vec& modelCache, + const hidl_vec& dataCache, const CacheToken& token, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + const auto nnDeadline = NN_TRY(convertInput(deadline)); + auto nnModelCache = NN_TRY(convertInput(modelCache)); + auto nnDataCache = NN_TRY(convertInput(dataCache)); + const auto nnToken = nn::CacheToken(token); + + const uid_t userId = hardware::IPCThreadState::self()->getCallingUid(); + auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache), + nnDataCache = std::move(nnDataCache), nnToken, userId, executor, callback] { + auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken); + notify(callback.get(), std::move(result), executor, userId); + }; + executor(std::move(task), userId, nnDeadline); + + return {}; +} + +nn::GeneralResult downcast(const sp& preparedModel) { + if (preparedModel == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "preparedModel is nullptr"; + } + if (preparedModel->isRemote()) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Cannot convert remote models"; + } + + // This static_cast is safe because adapter::PreparedModel is the only class that implements + // the IPreparedModel interface in the adapter service code. + const auto* casted = static_cast(preparedModel.get()); + return casted->getUnderlyingPreparedModel(); +} + +nn::GeneralResult> downcastAll( + const hidl_vec>& preparedModels) { + std::vector canonical; + canonical.reserve(preparedModels.size()); + for (const auto& preparedModel : preparedModels) { + canonical.push_back(NN_TRY(downcast(preparedModel))); + } + return canonical; +} + +nn::GeneralResult, uint32_t>> allocate( + const nn::SharedDevice& device, const V1_3::BufferDesc& desc, + const hidl_vec>& preparedModels, + const hidl_vec& inputRoles, + const hidl_vec& outputRoles) { + auto nnDesc = NN_TRY(convertInput(desc)); + auto nnPreparedModels = NN_TRY(downcastAll(preparedModels)); + auto nnInputRoles = NN_TRY(convertInput(inputRoles)); + auto nnOutputRoles = NN_TRY(convertInput(outputRoles)); + + auto buffer = NN_TRY(device->allocate(nnDesc, nnPreparedModels, nnInputRoles, nnOutputRoles)); + + const nn::Request::MemoryDomainToken token = buffer->getToken(); + auto hidlBuffer = sp::make(std::move(buffer)); + return std::make_pair(std::move(hidlBuffer), static_cast(token)); +} + +} // namespace + +Device::Device(nn::SharedDevice device, Executor executor) + : kDevice(std::move(device)), kExecutor(std::move(executor)) { + CHECK(kDevice != nullptr); + CHECK(kExecutor != nullptr); +} + +Return Device::getCapabilities(getCapabilities_cb cb) { + const auto capabilities = V1_0::utils::convert(kDevice->getCapabilities()).value(); + cb(V1_0::ErrorStatus::NONE, capabilities); + return Void(); +} + +Return Device::getCapabilities_1_1(getCapabilities_1_1_cb cb) { + const auto capabilities = V1_1::utils::convert(kDevice->getCapabilities()).value(); + cb(V1_0::ErrorStatus::NONE, capabilities); + return Void(); +} + +Return Device::getCapabilities_1_2(getCapabilities_1_2_cb cb) { + const auto capabilities = V1_2::utils::convert(kDevice->getCapabilities()).value(); + cb(V1_0::ErrorStatus::NONE, capabilities); + return Void(); +} + +Return Device::getCapabilities_1_3(getCapabilities_1_3_cb cb) { + const auto capabilities = V1_3::utils::convert(kDevice->getCapabilities()).value(); + cb(V1_3::ErrorStatus::NONE, capabilities); + return Void(); +} + +Return Device::getVersionString(getVersionString_cb cb) { + cb(V1_0::ErrorStatus::NONE, kDevice->getVersionString()); + return Void(); +} + +Return Device::getType(getType_cb cb) { + const auto maybeDeviceType = V1_2::utils::convert(kDevice->getType()); + if (!maybeDeviceType.has_value()) { + const auto& [message, code] = maybeDeviceType.error(); + LOG(ERROR) << "adapter::Device::getType failed with " << code << ": " << message; + cb(V1_2::utils::convert(code).value(), {}); + } else { + cb(V1_0::ErrorStatus::NONE, maybeDeviceType.value()); + } + return Void(); +} + +Return Device::getSupportedExtensions(getSupportedExtensions_cb cb) { + const auto maybeSupportedExtensions = V1_2::utils::convert(kDevice->getSupportedExtensions()); + if (!maybeSupportedExtensions.has_value()) { + const auto& [message, code] = maybeSupportedExtensions.error(); + LOG(ERROR) << "adapter::Device::getSupportedExtensions failed with " << code << ": " + << message; + cb(V1_2::utils::convert(code).value(), {}); + } else { + cb(V1_0::ErrorStatus::NONE, maybeSupportedExtensions.value()); + } + return Void(); +} + +Return Device::getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb cb) { + const auto result = adapter::getSupportedOperations(kDevice, model); + if (!result.has_value()) { + const auto& [message, code] = result.error(); + LOG(ERROR) << "adapter::Device::getSupportedOperations_1_0 failed with " << code << ": " + << message; + cb(V1_0::utils::convert(code).value(), {}); + } else { + cb(V1_0::ErrorStatus::NONE, result.value()); + } + return Void(); +} + +Return Device::getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb cb) { + const auto result = adapter::getSupportedOperations(kDevice, model); + if (!result.has_value()) { + const auto& [message, code] = result.error(); + LOG(ERROR) << "adapter::Device::getSupportedOperations_1_1 failed with " << code << ": " + << message; + cb(V1_1::utils::convert(code).value(), {}); + } else { + cb(V1_0::ErrorStatus::NONE, result.value()); + } + return Void(); +} + +Return Device::getSupportedOperations_1_2(const V1_2::Model& model, + getSupportedOperations_1_2_cb cb) { + const auto result = adapter::getSupportedOperations(kDevice, model); + if (!result.has_value()) { + const auto& [message, code] = result.error(); + LOG(ERROR) << "adapter::Device::getSupportedOperations_1_2 failed with " << code << ": " + << message; + cb(V1_2::utils::convert(code).value(), {}); + } else { + cb(V1_0::ErrorStatus::NONE, result.value()); + } + return Void(); +} + +Return Device::getSupportedOperations_1_3(const V1_3::Model& model, + getSupportedOperations_1_3_cb cb) { + const auto result = adapter::getSupportedOperations(kDevice, model); + if (!result.has_value()) { + const auto& [message, code] = result.error(); + LOG(ERROR) << "adapter::Device::getSupportedOperations_1_3 failed with " << code << ": " + << message; + cb(V1_3::utils::convert(code).value(), {}); + } else { + cb(V1_3::ErrorStatus::NONE, result.value()); + } + return Void(); +} + +Return Device::getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) { + const auto [numModelCache, numDataCache] = kDevice->getNumberOfCacheFilesNeeded(); + cb(V1_0::ErrorStatus::NONE, numModelCache, numDataCache); + return Void(); +} + +Return Device::prepareModel(const V1_0::Model& model, + const sp& callback) { + auto result = adapter::prepareModel(kDevice, kExecutor, model, callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Device::prepareModel failed with " << code << ": " << message; + notify(callback.get(), code, nullptr); + return V1_0::utils::convert(code).value(); + } + return V1_0::ErrorStatus::NONE; +} + +Return Device::prepareModel_1_1( + const V1_1::Model& model, V1_1::ExecutionPreference preference, + const sp& callback) { + auto result = adapter::prepareModel_1_1(kDevice, kExecutor, model, preference, callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Device::prepareModel_1_1 failed with " << code << ": " << message; + notify(callback.get(), code, nullptr); + return V1_1::utils::convert(code).value(); + } + return V1_0::ErrorStatus::NONE; +} + +Return Device::prepareModel_1_2( + const V1_2::Model& model, V1_1::ExecutionPreference preference, + const hidl_vec& modelCache, const hidl_vec& dataCache, + const CacheToken& token, const sp& callback) { + auto result = adapter::prepareModel_1_2(kDevice, kExecutor, model, preference, modelCache, + dataCache, token, callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Device::prepareModel_1_2 failed with " << code << ": " << message; + notify(callback.get(), code, nullptr); + return V1_2::utils::convert(code).value(); + } + return V1_0::ErrorStatus::NONE; +} + +Return Device::prepareModel_1_3( + const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, + const V1_3::OptionalTimePoint& deadline, const hidl_vec& modelCache, + const hidl_vec& dataCache, const CacheToken& token, + const sp& callback) { + auto result = adapter::prepareModel_1_3(kDevice, kExecutor, model, preference, priority, + deadline, modelCache, dataCache, token, callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Device::prepareModel_1_3 failed with " << code << ": " << message; + notify(callback.get(), code, nullptr); + return V1_3::utils::convert(code).value(); + } + return V1_3::ErrorStatus::NONE; +} + +Return Device::prepareModelFromCache( + const hidl_vec& modelCache, const hidl_vec& dataCache, + const CacheToken& token, const sp& callback) { + auto result = adapter::prepareModelFromCache(kDevice, kExecutor, modelCache, dataCache, token, + callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Device::prepareModelFromCache failed with " << code << ": " + << message; + notify(callback.get(), code, nullptr); + return V1_2::utils::convert(code).value(); + } + return V1_0::ErrorStatus::NONE; +} + +Return Device::prepareModelFromCache_1_3( + const V1_3::OptionalTimePoint& deadline, const hidl_vec& modelCache, + const hidl_vec& dataCache, const CacheToken& token, + const sp& callback) { + auto result = adapter::prepareModelFromCache_1_3(kDevice, kExecutor, deadline, modelCache, + dataCache, token, callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Device::prepareModelFromCache_1_3 failed with " << code << ": " + << message; + notify(callback.get(), code, nullptr); + return V1_3::utils::convert(code).value(); + } + return V1_3::ErrorStatus::NONE; +} + +Return Device::getStatus() { + return V1_0::DeviceStatus::AVAILABLE; +} + +Return Device::allocate(const V1_3::BufferDesc& desc, + const hidl_vec>& preparedModels, + const hidl_vec& inputRoles, + const hidl_vec& outputRoles, allocate_cb cb) { + auto result = adapter::allocate(kDevice, desc, preparedModels, inputRoles, outputRoles); + if (!result.has_value()) { + const auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::Device::allocate failed with " << code << ": " << message; + cb(V1_3::utils::convert(code).value(), nullptr, /*token=*/0); + return Void(); + } + auto [buffer, token] = std::move(result).value(); + cb(V1_3::ErrorStatus::NONE, buffer, token); + return Void(); +} + +} // namespace android::hardware::neuralnetworks::adapter diff --git a/neuralnetworks/utils/adapter/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/src/PreparedModel.cpp new file mode 100644 index 0000000000..8968c2cc91 --- /dev/null +++ b/neuralnetworks/utils/adapter/src/PreparedModel.cpp @@ -0,0 +1,417 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PreparedModel.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + +namespace android::hardware::neuralnetworks::adapter { +namespace { + +template +auto convertInput(const Type& object) -> decltype(nn::convert(std::declval())) { + auto result = nn::convert(object); + if (!result.has_value()) { + result.error().code = nn::ErrorStatus::INVALID_ARGUMENT; + } + return result; +} + +class FencedExecutionCallback final : public V1_3::IFencedExecutionCallback { + public: + explicit FencedExecutionCallback(const nn::ExecuteFencedInfoCallback& callback) + : kCallback(callback) { + CHECK(callback != nullptr); + } + + Return getExecutionInfo(getExecutionInfo_cb cb) override { + const auto result = kCallback(); + if (!result.has_value()) { + const auto& [message, code] = result.error(); + const auto status = + V1_3::utils::convert(code).value_or(V1_3::ErrorStatus::GENERAL_FAILURE); + LOG(ERROR) << message; + cb(status, V1_2::utils::kNoTiming, V1_2::utils::kNoTiming); + return Void(); + } + const auto [timingLaunched, timingFenced] = result.value(); + const auto hidlTimingLaunched = V1_3::utils::convert(timingLaunched).value(); + const auto hidlTimingFenced = V1_3::utils::convert(timingFenced).value(); + cb(V1_3::ErrorStatus::NONE, hidlTimingLaunched, hidlTimingFenced); + return Void(); + } + + private: + const nn::ExecuteFencedInfoCallback kCallback; +}; + +using ExecutionResult = nn::ExecutionResult, nn::Timing>>; + +void notify(V1_0::IExecutionCallback* callback, nn::ErrorStatus status, + const std::vector& /*outputShapes*/, const nn::Timing& /*timing*/) { + if (callback != nullptr) { + const auto hidlStatus = V1_0::utils::convert(status).value(); + const auto ret = callback->notify(hidlStatus); + if (!ret.isOk()) { + LOG(ERROR) << "V1_0::IExecutionCallback::notify failed with " << ret.description(); + } + } +} + +void notify(V1_2::IExecutionCallback* callback, nn::ErrorStatus status, + const std::vector& outputShapes, const nn::Timing& timing) { + if (callback != nullptr) { + const auto hidlStatus = V1_2::utils::convert(status).value(); + const auto hidlOutputShapes = V1_2::utils::convert(outputShapes).value(); + const auto hidlTiming = V1_2::utils::convert(timing).value(); + const auto ret = callback->notify_1_2(hidlStatus, hidlOutputShapes, hidlTiming); + if (!ret.isOk()) { + LOG(ERROR) << "V1_2::IExecutionCallback::notify_1_2 failed with " << ret.description(); + } + } +} + +void notify(V1_3::IExecutionCallback* callback, nn::ErrorStatus status, + const std::vector& outputShapes, const nn::Timing& timing) { + if (callback != nullptr) { + const auto hidlStatus = V1_3::utils::convert(status).value(); + const auto hidlOutputShapes = V1_3::utils::convert(outputShapes).value(); + const auto hidlTiming = V1_3::utils::convert(timing).value(); + const auto ret = callback->notify_1_3(hidlStatus, hidlOutputShapes, hidlTiming); + if (!ret.isOk()) { + LOG(ERROR) << "V1_3::IExecutionCallback::notify_1_3 failed with " << ret.description(); + } + } +} + +template +void notify(CallbackType* callback, ExecutionResult result) { + if (!result.has_value()) { + const auto [message, status, outputShapes] = std::move(result).error(); + LOG(ERROR) << message; + notify(callback, status, outputShapes, {}); + } else { + const auto [outputShapes, timing] = std::move(result).value(); + notify(callback, nn::ErrorStatus::NONE, outputShapes, timing); + } +} + +nn::GeneralResult execute(const nn::SharedPreparedModel& preparedModel, uid_t userId, + const Executor& executor, const V1_0::Request& request, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + auto nnRequest = NN_TRY(convertInput(request)); + + const std::any resource = preparedModel->getUnderlyingResource(); + if (const auto* model = std::any_cast(&resource)) { + CHECK(*model != nullptr); + NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model), + nn::ErrorStatus::INVALID_ARGUMENT)); + } + + Task task = [preparedModel, nnRequest = std::move(nnRequest), callback] { + auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {}); + notify(callback.get(), std::move(result)); + }; + executor(std::move(task), userId, {}); + + return {}; +} + +nn::GeneralResult execute_1_2(const nn::SharedPreparedModel& preparedModel, uid_t userId, + const Executor& executor, const V1_0::Request& request, + V1_2::MeasureTiming measure, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + auto nnRequest = NN_TRY(convertInput(request)); + const auto nnMeasure = NN_TRY(convertInput(measure)); + + const std::any resource = preparedModel->getUnderlyingResource(); + if (const auto* model = std::any_cast(&resource)) { + CHECK(*model != nullptr); + NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model), + nn::ErrorStatus::INVALID_ARGUMENT)); + } + + Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, callback] { + auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {}); + notify(callback.get(), std::move(result)); + }; + executor(std::move(task), userId, {}); + + return {}; +} + +nn::GeneralResult execute_1_3(const nn::SharedPreparedModel& preparedModel, uid_t userId, + const Executor& executor, const V1_3::Request& request, + V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const sp& callback) { + if (callback.get() == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; + } + + auto nnRequest = NN_TRY(convertInput(request)); + const auto nnMeasure = NN_TRY(convertInput(measure)); + const auto nnDeadline = NN_TRY(convertInput(deadline)); + const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration)); + + const std::any resource = preparedModel->getUnderlyingResource(); + if (const auto* model = std::any_cast(&resource)) { + CHECK(*model != nullptr); + NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model), + nn::ErrorStatus::INVALID_ARGUMENT)); + } + + Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, nnDeadline, + nnLoopTimeoutDuration, callback] { + auto result = + preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration); + notify(callback.get(), std::move(result)); + }; + executor(std::move(task), userId, nnDeadline); + + return {}; +} + +nn::ExecutionResult, V1_2::Timing>> executeSynchronously( + const nn::SharedPreparedModel& preparedModel, const V1_0::Request& request, + V1_2::MeasureTiming measure) { + const auto nnRequest = NN_TRY(utils::makeExecutionFailure(convertInput(request))); + const auto nnMeasure = NN_TRY(utils::makeExecutionFailure(convertInput(measure))); + + const auto [outputShapes, timing] = + NN_TRY(preparedModel->execute(nnRequest, nnMeasure, {}, {})); + + auto hidlOutputShapes = NN_TRY(utils::makeExecutionFailure(V1_2::utils::convert(outputShapes))); + const auto hidlTiming = NN_TRY(utils::makeExecutionFailure(V1_2::utils::convert(timing))); + return std::make_pair(std::move(hidlOutputShapes), hidlTiming); +} + +nn::ExecutionResult, V1_2::Timing>> executeSynchronously_1_3( + const nn::SharedPreparedModel& preparedModel, const V1_3::Request& request, + V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration) { + const auto nnRequest = NN_TRY(utils::makeExecutionFailure(convertInput(request))); + const auto nnMeasure = NN_TRY(utils::makeExecutionFailure(convertInput(measure))); + const auto nnDeadline = NN_TRY(utils::makeExecutionFailure(convertInput(deadline))); + const auto nnLoopTimeoutDuration = + NN_TRY(utils::makeExecutionFailure(convertInput(loopTimeoutDuration))); + + const auto [outputShapes, timing] = + NN_TRY(preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration)); + + auto hidlOutputShapes = NN_TRY(utils::makeExecutionFailure(V1_3::utils::convert(outputShapes))); + const auto hidlTiming = NN_TRY(utils::makeExecutionFailure(V1_3::utils::convert(timing))); + return std::make_pair(std::move(hidlOutputShapes), hidlTiming); +} + +nn::GeneralResult> convertSyncFences( + const hidl_vec& handles) { + std::vector syncFences; + syncFences.reserve(handles.size()); + for (const auto& handle : handles) { + auto nativeHandle = NN_TRY(convertInput(handle)); + auto syncFence = NN_TRY(utils::makeGeneralFailure( + nn::SyncFence::create(std::move(nativeHandle)), nn::ErrorStatus::INVALID_ARGUMENT)); + syncFences.push_back(std::move(syncFence)); + } + return syncFences; +} + +nn::GeneralResult>> executeFenced( + const nn::SharedPreparedModel& preparedModel, const V1_3::Request& request, + const hidl_vec& waitFor, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const V1_3::OptionalTimeoutDuration& duration) { + const auto nnRequest = NN_TRY(convertInput(request)); + const auto nnWaitFor = NN_TRY(convertSyncFences(waitFor)); + const auto nnMeasure = NN_TRY(convertInput(measure)); + const auto nnDeadline = NN_TRY(convertInput(deadline)); + const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration)); + const auto nnDuration = NN_TRY(convertInput(duration)); + + auto [syncFence, executeFencedCallback] = NN_TRY(preparedModel->executeFenced( + nnRequest, nnWaitFor, nnMeasure, nnDeadline, nnLoopTimeoutDuration, nnDuration)); + + auto hidlSyncFence = NN_TRY(V1_3::utils::convert(syncFence.getSharedHandle())); + auto hidlExecuteFencedCallback = sp::make(executeFencedCallback); + return std::make_pair(std::move(hidlSyncFence), std::move(hidlExecuteFencedCallback)); +} + +} // namespace + +PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId) + : kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)), kUserId(userId) { + CHECK(kPreparedModel != nullptr); + CHECK(kExecutor != nullptr); +} + +nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const { + return kPreparedModel; +} + +Return PreparedModel::execute(const V1_0::Request& request, + const sp& callback) { + auto result = adapter::execute(kPreparedModel, kUserId, kExecutor, request, callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::PreparedModel::execute failed with " << code << ": " << message; + notify(callback.get(), code, {}, {}); + return V1_0::utils::convert(code).value(); + } + return V1_0::ErrorStatus::NONE; +} + +Return PreparedModel::execute_1_2(const V1_0::Request& request, + V1_2::MeasureTiming measure, + const sp& callback) { + auto result = + adapter::execute_1_2(kPreparedModel, kUserId, kExecutor, request, measure, callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::PreparedModel::execute_1_2 failed with " << code << ": " << message; + notify(callback.get(), code, {}, {}); + return V1_2::utils::convert(code).value(); + } + return V1_0::ErrorStatus::NONE; +} + +Return PreparedModel::execute_1_3( + const V1_3::Request& request, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const sp& callback) { + auto result = adapter::execute_1_3(kPreparedModel, kUserId, kExecutor, request, measure, + deadline, loopTimeoutDuration, callback); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::PreparedModel::execute_1_3 failed with " << code << ": " << message; + notify(callback.get(), code, {}, {}); + return V1_3::utils::convert(code).value(); + } + return V1_3::ErrorStatus::NONE; +} + +Return PreparedModel::executeSynchronously(const V1_0::Request& request, + V1_2::MeasureTiming measure, + executeSynchronously_cb cb) { + auto result = adapter::executeSynchronously(kPreparedModel, request, measure); + if (!result.has_value()) { + auto [message, code, outputShapes] = std::move(result).error(); + LOG(ERROR) << "adapter::PreparedModel::executeSynchronously failed with " << code << ": " + << message; + cb(V1_2::utils::convert(code).value(), V1_2::utils::convert(outputShapes).value(), + V1_2::utils::kNoTiming); + return Void(); + } + auto [outputShapes, timing] = std::move(result).value(); + cb(V1_0::ErrorStatus::NONE, outputShapes, timing); + return Void(); +} + +Return PreparedModel::executeSynchronously_1_3( + const V1_3::Request& request, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) { + auto result = adapter::executeSynchronously_1_3(kPreparedModel, request, measure, deadline, + loopTimeoutDuration); + if (!result.has_value()) { + auto [message, code, outputShapes] = std::move(result).error(); + LOG(ERROR) << "adapter::PreparedModel::executeSynchronously_1_3 failed with " << code + << ": " << message; + cb(V1_3::utils::convert(code).value(), V1_3::utils::convert(outputShapes).value(), + V1_2::utils::kNoTiming); + return Void(); + } + auto [outputShapes, timing] = std::move(result).value(); + cb(V1_3::ErrorStatus::NONE, outputShapes, timing); + return Void(); +} + +Return PreparedModel::configureExecutionBurst( + const sp& callback, + const MQDescriptorSync& requestChannel, + const MQDescriptorSync& resultChannel, + configureExecutionBurst_cb cb) { + const sp burst = nn::ExecutionBurstServer::create( + callback, requestChannel, resultChannel, this, std::chrono::microseconds{0}); + + if (burst == nullptr) { + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}); + } else { + cb(V1_0::ErrorStatus::NONE, burst); + } + return Void(); +} + +Return PreparedModel::executeFenced(const V1_3::Request& request, + const hidl_vec& waitFor, + V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const V1_3::OptionalTimeoutDuration& duration, + executeFenced_cb callback) { + auto result = adapter::executeFenced(kPreparedModel, request, waitFor, measure, deadline, + loopTimeoutDuration, duration); + if (!result.has_value()) { + auto [message, code] = std::move(result).error(); + LOG(ERROR) << "adapter::PreparedModel::executeFenced failed with " << code << ": " + << message; + callback(V1_3::utils::convert(code).value(), {}, nullptr); + return Void(); + } + auto [syncFence, executeFencedCallback] = std::move(result).value(); + callback(V1_3::ErrorStatus::NONE, syncFence, executeFencedCallback); + return Void(); +} + +} // namespace android::hardware::neuralnetworks::adapter