Merge "Address ANAPIC review comments -- hal."

This commit is contained in:
Xusong Wang 2022-02-09 18:49:21 +00:00 committed by Gerrit Code Review
commit 8b389e1123
12 changed files with 41 additions and 12 deletions

View file

@ -39,7 +39,8 @@ parcelable PrepareModelConfig {
long deadlineNs;
ParcelFileDescriptor[] modelCache;
ParcelFileDescriptor[] dataCache;
byte[] cacheToken;
byte[32] cacheToken;
android.hardware.neuralnetworks.TokenValuePair[] compilationHints;
android.hardware.neuralnetworks.ExtensionNameAndPrefix[] extensionNameToPrefix;
const int BYTE_SIZE_OF_CACHE_TOKEN = 32;
}

View file

@ -39,7 +39,7 @@ interface IDevice {
/**
* The byte size of the cache token.
*/
const int BYTE_SIZE_OF_CACHE_TOKEN = 32;
const int BYTE_SIZE_OF_CACHE_TOKEN = PrepareModelConfig.BYTE_SIZE_OF_CACHE_TOKEN;
/**
* The maximum number of files for each type of cache in compilation caching.
*/

View file

@ -204,6 +204,12 @@ interface IPreparedModel {
* appropriate ErrorStatus value. If the inputs to the function are valid and there is no error,
* createReusableExecution must construct a reusable execution.
*
* This method will be called when a client requests a reusable execution with consistent
* request and execution config. For single-time execution,
* {@link IPreparedModel::executeSynchronouslyWithConfig} or
* {@link IPreparedModel::executeFencedWithConfig} is preferred, because the overhead of
* setting up a reusable execution can be avoided.
*
* @param request The input and output information on which the prepared model is to be
* executed.
* @param config Specifies the execution configuration parameters.
@ -223,6 +229,10 @@ interface IPreparedModel {
* ExecutionConfig} instead of a list of configuration parameters, and ExecutionConfig contains
* more configuration parameters than are passed to executeSynchronously.
*
* This method is preferred when a client requests a single-time synchronous execution.
* For reusable execution with consistent request and execution config,
* {@link IPreparedModel::createReusableExecution} must be called.
*
* @param request The input and output information on which the prepared model is to be
* executed.
* @param config Specifies the execution configuration parameters.
@ -246,6 +256,10 @@ interface IPreparedModel {
* ExecutionConfig} instead of a list of configuration parameters, and ExecutionConfig contains
* more configuration parameters than are passed to executeFenced.
*
* This method is preferred when a client requests a single-time fenced execution.
* For reusable execution with consistent request and execution config,
* {@link IPreparedModel::createReusableExecution} must be called.
*
* @param request The input and output information on which the prepared model is to be
* executed. The outputs in the request must have fully specified dimensions.
* @param waitFor A vector of sync fence file descriptors. Execution must not start until all

View file

@ -27,6 +27,11 @@ import android.hardware.neuralnetworks.TokenValuePair;
*/
@VintfStability
parcelable PrepareModelConfig {
/**
* The byte size of the cache token.
*/
const int BYTE_SIZE_OF_CACHE_TOKEN = 32;
/**
* Indicates the intended execution behavior of a prepared model.
*/
@ -66,7 +71,7 @@ parcelable PrepareModelConfig {
*/
ParcelFileDescriptor[] dataCache;
/**
* A caching token of length IDevice::BYTE_SIZE_OF_CACHE_TOKEN identifying
* A caching token of length BYTE_SIZE_OF_CACHE_TOKEN identifying
* the prepared model. The same token will be provided when
* retrieving the prepared model from the cache files with
* IDevice::prepareModelFromCache. Tokens should be chosen to have a low
@ -77,7 +82,7 @@ parcelable PrepareModelConfig {
* indicating that caching information is not provided, this
* token must be ignored.
*/
byte[] cacheToken;
byte[BYTE_SIZE_OF_CACHE_TOKEN] cacheToken;
/**
* A vector of token / value pairs represent vendor specific
* compilation hints or metadata. The provided TokenValuePairs must not

View file

@ -27,6 +27,7 @@
#include <aidl/android/hardware/neuralnetworks/Extension.h>
#include <aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.h>
#include <aidl/android/hardware/neuralnetworks/ExtensionOperandTypeInformation.h>
#include <aidl/android/hardware/neuralnetworks/IDevice.h>
#include <aidl/android/hardware/neuralnetworks/Memory.h>
#include <aidl/android/hardware/neuralnetworks/Model.h>
#include <aidl/android/hardware/neuralnetworks/Operand.h>
@ -219,6 +220,7 @@ nn::GeneralResult<std::vector<TokenValuePair>> convert(
#endif // NN_AIDL_V4_OR_ABOVE
nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec);
std::vector<uint8_t> toVec(const std::array<uint8_t, IDevice::BYTE_SIZE_OF_CACHE_TOKEN>& token);
} // namespace aidl::android::hardware::neuralnetworks::utils

View file

@ -614,7 +614,7 @@ struct overloaded : Ts... {
using Ts::operator()...;
};
template <class... Ts>
overloaded(Ts...)->overloaded<Ts...>;
overloaded(Ts...) -> overloaded<Ts...>;
#ifdef __ANDROID__
nn::GeneralResult<common::NativeHandle> aidlHandleFromNativeHandle(
@ -1190,4 +1190,8 @@ nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& ve
return std::vector<int32_t>(vec.begin(), vec.end());
}
std::vector<uint8_t> toVec(const std::array<uint8_t, IDevice::BYTE_SIZE_OF_CACHE_TOKEN>& token) {
return std::vector<uint8_t>(token.begin(), token.end());
}
} // namespace aidl::android::hardware::neuralnetworks::utils

View file

@ -229,7 +229,6 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const auto aidlDeadline = NN_TRY(convert(deadline));
auto aidlModelCache = NN_TRY(convert(modelCache));
auto aidlDataCache = NN_TRY(convert(dataCache));
const auto aidlToken = NN_TRY(convert(token));
const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel);
const auto scoped = kDeathHandler.protectCallback(cb.get());
@ -240,12 +239,13 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const auto ret = kDevice->prepareModelWithConfig(
aidlModel,
{aidlPreference, aidlPriority, aidlDeadline, std::move(aidlModelCache),
std::move(aidlDataCache), aidlToken, std::move(aidlHints),
std::move(aidlDataCache), token, std::move(aidlHints),
std::move(aidlExtensionPrefix)},
cb);
HANDLE_ASTATUS(ret) << "prepareModel failed";
return cb->get();
}
const auto aidlToken = NN_TRY(convert(token));
const auto ret = kDevice->prepareModel(aidlModel, aidlPreference, aidlPriority, aidlDeadline,
aidlModelCache, aidlDataCache, aidlToken, cb);
HANDLE_ASTATUS(ret) << "prepareModel failed";

View file

@ -189,7 +189,8 @@ ndk::ScopedAStatus InvalidDevice::prepareModelWithConfig(
}
}
return prepareModel(model, config.preference, config.priority, config.deadlineNs,
config.modelCache, config.dataCache, config.cacheToken, callback);
config.modelCache, config.dataCache, utils::toVec(config.cacheToken),
callback);
}
ndk::ScopedAStatus InvalidDevice::prepareModelFromCache(

View file

@ -21,6 +21,7 @@
#include <gtest/gtest.h>
#include <algorithm>
#include <array>
#include <iosfwd>
#include <string>
#include <utility>
@ -47,6 +48,7 @@ inline constexpr int64_t kNoDeadline = -1;
inline constexpr int64_t kOmittedTimeoutDuration = -1;
inline constexpr int64_t kNoDuration = -1;
inline const std::vector<uint8_t> kEmptyCacheToken(IDevice::BYTE_SIZE_OF_CACHE_TOKEN);
inline const std::array<uint8_t, IDevice::BYTE_SIZE_OF_CACHE_TOKEN> kEmptyCacheTokenArray{};
// Returns the amount of space needed to store a value of the specified type.
//

View file

@ -85,7 +85,7 @@ static void validatePrepareModelWithConfig(const std::shared_ptr<IDevice>& devic
std::shared_ptr<PreparedModelCallback> preparedModelCallback =
ndk::SharedRefBase::make<PreparedModelCallback>();
const auto prepareLaunchStatus = device->prepareModelWithConfig(
model, {preference, priority, kNoDeadline, {}, {}, kEmptyCacheToken, {}, {}},
model, {preference, priority, kNoDeadline, {}, {}, kEmptyCacheTokenArray, {}, {}},
preparedModelCallback);
ASSERT_FALSE(prepareLaunchStatus.isOk());
ASSERT_EQ(prepareLaunchStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);

View file

@ -72,7 +72,7 @@ void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& mo
kNoDeadline,
{},
{},
kEmptyCacheToken,
kEmptyCacheTokenArray,
{},
{}},
preparedModelCallback);

View file

@ -312,8 +312,8 @@ ndk::ScopedAStatus Device::prepareModelWithConfig(
const std::shared_ptr<IPreparedModelCallback>& callback) {
const auto result = adapter::prepareModel(
kDevice, kExecutor, model, config.preference, config.priority, config.deadlineNs,
config.modelCache, config.dataCache, config.cacheToken, config.compilationHints,
config.extensionNameToPrefix, callback);
config.modelCache, config.dataCache, utils::toVec(config.cacheToken),
config.compilationHints, config.extensionNameToPrefix, callback);
if (!result.has_value()) {
const auto& [message, code] = result.error();
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);