Fix caching interface according to vendor feedback.

- Instead of isCachingSupport returning a single boolean, switch to
  getNumberOfCacheFilesNeeded returning the number of cache files. This
  is to support use cases when driver needs more than one cache file for
  each type, or when driver does not need data cache.

- Instead of a separate saveToCache, pass cache info along with
  prepareModel_1_2 to save into cache as well as perform compilation.
  This is to avoid a potential additional copy of cache files.

Bug: 123780248
Test: VtsHalNeuralnetworksV1_xTargetTest with 1.2 sample driver
Test: VtsHalNeuralnetworksV1_xTargetTest with a test driver that can
      read and write cache entries
Change-Id: I921b7b8ccc3c66af19f6589f7213c6870d6f07bf
This commit is contained in:
Xusong Wang 2019-02-25 16:58:58 -08:00
parent b6ab74810a
commit b61ba1ed0b
9 changed files with 820 additions and 450 deletions

View file

@ -506,11 +506,11 @@ b9422a9aca84df1ff9623dc12c0562abce97716e28d63a965f2bfb88f9ad9607 android.hardwar
4cb139f729c29d8d6f4ecdab149c4feb571dad8a06e56cd57fcb52e70208bab4 android.hardware.media.c2@1.0::types 4cb139f729c29d8d6f4ecdab149c4feb571dad8a06e56cd57fcb52e70208bab4 android.hardware.media.c2@1.0::types
4880af120fc1640225abdc2c60bda6d79617d73484d5124913c7278af3b11e2d android.hardware.neuralnetworks@1.2::IBurstCallback 4880af120fc1640225abdc2c60bda6d79617d73484d5124913c7278af3b11e2d android.hardware.neuralnetworks@1.2::IBurstCallback
19877e466ad8c6ed42b38050b77bd010cf7800ff365fdc8574f45bbfda03a758 android.hardware.neuralnetworks@1.2::IBurstContext 19877e466ad8c6ed42b38050b77bd010cf7800ff365fdc8574f45bbfda03a758 android.hardware.neuralnetworks@1.2::IBurstContext
96249c852dabeefa3a9496ecdfc44681a071c665bfbf88527bf775c88bf1ab1b android.hardware.neuralnetworks@1.2::IDevice 363821d1b71147b896a08e2a570946db9b9d46f90d9f91b085bd8d3013a2b4d5 android.hardware.neuralnetworks@1.2::IDevice
92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback 92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
83885d366f22ada42c00d8854f0b7e7ba4cf73ddf80bb0d8e168ce132cec57ea android.hardware.neuralnetworks@1.2::IPreparedModel 36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel
e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
2ef1bab554ea484523b396e48033117dbbefc2f90269f9e7e3eb5a58ba50bfb9 android.hardware.neuralnetworks@1.2::types 39a6d7cf9bc7290bd90739e971ccad5f35f5cc0faea4a417b59f22c9ca9f1f2a android.hardware.neuralnetworks@1.2::types
cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats 4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats

View file

@ -52,6 +52,7 @@ using ::test_helper::for_each;
using ::test_helper::MixedTyped; using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample; using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly; using ::test_helper::resize_accordingly;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
template <typename T> template <typename T>
void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra, void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
@ -540,7 +541,8 @@ void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get()); ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2( Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));

View file

@ -113,44 +113,83 @@ interface IDevice extends @1.1::IDevice {
generates (ErrorStatus status, vec<bool> supportedOperations); generates (ErrorStatus status, vec<bool> supportedOperations);
/** /**
* Gets whether the driver supports compilation caching. * Gets the caching requirements of the driver implementation.
* *
* isCachingSupported indicates whether the driver supports compilation caching. * There are two types of cache file descriptors provided to the driver: model cache
* Even if so, the driver may still choose not to cache certain compiled models. * and data cache.
* *
* If the device reports the caching is not supported, the user may avoid calling * The data cache is for caching constant data, possibly including preprocessed
* IDevice::prepareModelFromCache and IPreparedModel::saveToCache. * and transformed tensor buffers. Any modification to the data cache should
* have no worse effect than generating bad output values at execution time.
*
* The model cache is for caching security-sensitive data such as compiled
* executable machine code in the device's native binary format. A modification
* to the model cache may affect the driver's execution behavior, and a malicious
* client could make use of this to execute beyond the granted permission. Thus,
* the driver must always check whether the model cache is corrupted before
* preparing the model from cache.
*
* getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver
* implementation needs to cache a single prepared model. Returning 0 for both types
* indicates compilation caching is not supported by this driver. The driver may
* still choose not to cache certain compiled models even if it reports that caching
* is supported.
*
* If the device reports that caching is not supported, the user may avoid calling
* IDevice::prepareModelFromCache or providing cache file descriptors to
* IDevice::prepareModel_1_2.
* *
* @return status Error status of the call, must be: * @return status Error status of the call, must be:
* - NONE if successful * - NONE if successful
* - DEVICE_UNAVAILABLE if driver is offline or busy * - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error * - GENERAL_FAILURE if there is an unspecified error
* @return supported A boolean indicating whether the driver supports compilation * @return numModelCache An unsigned integer indicating how many files for model cache
* caching. Even on returning true, the driver may still choose * the driver needs to cache a single prepared model. It must
* not to cache certain compiled models. * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
* @return numDataCache An unsigned integer indicating how many files for data cache
* the driver needs to cache a single prepared model. It must
* be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
*/ */
isCachingSupported() generates (ErrorStatus status, bool supported); getNumberOfCacheFilesNeeded()
generates (ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache);
/** /**
* Creates a prepared model for execution. * Asynchronously creates a prepared model for execution and optionally saves it
* into cache files.
* *
* prepareModel is used to make any necessary transformations or alternative * prepareModel is used to make any necessary transformations to or alternative
* representations to a model for execution, possibly including * representations to a model for execution, possibly including
* transformations on the constant data, optimization on the model's graph, * transformations on the constant data, optimization on the model's graph,
* or compilation into the device's native binary format. The model itself * or compilation into the device's native binary format. The model itself
* is not changed. * is not changed.
* *
* Optionally, caching information may be provided for the driver to save
* the prepared model to cache files for faster model compilation time
* when the same model preparation is requested in the future. There are
* two types of cache file handles provided to the driver: model cache
* and data cache. For more information on the two types of cache handles,
* refer to getNumberOfCacheFilesNeeded.
*
* The file descriptors must be opened with read and write permission. A file may
* have any size, and the corresponding file descriptor may have any offset. The
* driver must truncate a file to zero size before writing to that file. The file
* descriptors may be closed by the client once the asynchronous preparation has
* finished. The driver must dup a file descriptor if it wants to get access to
* the cache file later.
*
* The model is prepared asynchronously with respect to the caller. The * The model is prepared asynchronously with respect to the caller. The
* prepareModel function must verify the inputs to the prepareModel function * prepareModel function must verify the inputs to the preparedModel function
* are correct. If there is an error, prepareModel must immediately invoke * related to preparing the model (as opposed to saving the prepared model to
* cache) are correct. If there is an error, prepareModel must immediately invoke
* the callback with the appropriate ErrorStatus value and nullptr for the * the callback with the appropriate ErrorStatus value and nullptr for the
* IPreparedModel, then return with the same ErrorStatus. If the inputs to * IPreparedModel, then return with the same ErrorStatus. If the inputs to the
* the prepareModel function are valid and there is no error, prepareModel * prepareModel function that are related to preparing the model are valid and
* must launch an asynchronous task to prepare the model in the background, * there is no error, prepareModel must launch an asynchronous task
* and immediately return from prepareModel with ErrorStatus::NONE. If the * to prepare the model in the background, and immediately return from
* asynchronous task fails to launch, prepareModel must immediately invoke * prepareModel with ErrorStatus::NONE. If the asynchronous task fails to launch,
* the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the * prepareModel must immediately invoke the callback with
* IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE. * ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then return
* with ErrorStatus::GENERAL_FAILURE.
* *
* When the asynchronous task has finished preparing the model, it must * When the asynchronous task has finished preparing the model, it must
* immediately invoke the callback function provided as an input to * immediately invoke the callback function provided as an input to
@ -160,6 +199,14 @@ interface IDevice extends @1.1::IDevice {
* the callback object must be invoked with the appropriate ErrorStatus * the callback object must be invoked with the appropriate ErrorStatus
* value and nullptr for the IPreparedModel. * value and nullptr for the IPreparedModel.
* *
* Optionally, the driver may save the prepared model to cache during the
* asynchronous preparation. Any error that occurs when saving to cache must
* not affect the status of preparing the model. Even if the input arguments
* related to the cache may be invalid, or the driver may fail to save to cache,
* the prepareModel function must finish preparing the model. The driver
* may choose not to save to cache even if the caching information is
* provided and valid.
*
* The only information that may be unknown to the model at this stage is * The only information that may be unknown to the model at this stage is
* the shape of the tensors, which may only be known at execution time. As * the shape of the tensors, which may only be known at execution time. As
* such, some driver services may return partially prepared models, where * such, some driver services may return partially prepared models, where
@ -173,6 +220,26 @@ interface IDevice extends @1.1::IDevice {
* @param model The model to be prepared for execution. * @param model The model to be prepared for execution.
* @param preference Indicates the intended execution behavior of a prepared * @param preference Indicates the intended execution behavior of a prepared
* model. * model.
* @param modelCache A vector of handles with each entry holding exactly one
* cache file descriptor for the security-sensitive cache. The length of
* the vector must either be 0 indicating that caching information is not provided,
* or match the numModelCache returned from getNumberOfCacheFilesNeeded. The cache
* handles will be provided in the same order when retrieving the
* preparedModel from cache files with prepareModelFromCache.
* @param dataCache A vector of handles with each entry holding exactly one
* cache file descriptor for the constants' cache. The length of
* the vector must either be 0 indicating that caching information is not provided,
* or match the numDataCache returned from getNumberOfCacheFilesNeeded. The cache
* handles will be provided in the same order when retrieving the
* preparedModel from cache files with prepareModelFromCache.
* @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
* identifying the prepared model. The same token will be provided when retrieving
* the prepared model from the cache files with prepareModelFromCache.
* Tokens should be chosen to have a low rate of collision for a particular
* application. The driver cannot detect a collision; a collision will result
* in a failed execution or in a successful execution that produces incorrect
* output values. If both modelCache and dataCache are empty indicating that
* caching information is not provided, this token must be ignored.
* @param callback A callback object used to return the error status of * @param callback A callback object used to return the error status of
* preparing the model for execution and the prepared model if * preparing the model for execution and the prepared model if
* successful, nullptr otherwise. The callback object's notify function * successful, nullptr otherwise. The callback object's notify function
@ -182,9 +249,12 @@ interface IDevice extends @1.1::IDevice {
* - NONE if preparation task is successfully launched * - NONE if preparation task is successfully launched
* - DEVICE_UNAVAILABLE if driver is offline or busy * - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error * - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if one of the input arguments is invalid * - INVALID_ARGUMENT if one of the input arguments related to preparing the
* model is invalid
*/ */
prepareModel_1_2(Model model, ExecutionPreference preference, prepareModel_1_2(Model model, ExecutionPreference preference,
vec<handle> modelCache, vec<handle> dataCache,
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback) IPreparedModelCallback callback)
generates (ErrorStatus status); generates (ErrorStatus status);
@ -192,22 +262,17 @@ interface IDevice extends @1.1::IDevice {
* Creates a prepared model from cache files for execution. * Creates a prepared model from cache files for execution.
* *
* prepareModelFromCache is used to retrieve a prepared model directly from * prepareModelFromCache is used to retrieve a prepared model directly from
* cache files to avoid slow model compilation time. There are exactly two * cache files to avoid slow model compilation time. There are
* cache file descriptors provided to the driver: modelCache and dataCache. * two types of cache file handles provided to the driver: model cache
* and data cache. For more information on the two types of cache handles,
* refer to getNumberOfCacheFilesNeeded.
* *
* The dataCache is for caching constant data, possibly including preprocessed * The file descriptors must be opened with read and write permission. A file may
* and transformed tensor buffers. Any modification to the dataCache should * have any size, and the corresponding file descriptor may have any offset. The
* have no worse effect than generating bad output values at execution time. * driver must truncate a file to zero size before writing to that file. The file
* * descriptors may be closed by the client once the asynchronous preparation has
* The modelCache is for caching security-sensitive data such as compiled * finished. The driver must dup a file descriptor if it wants to get access to
* executable machine code in the device's native binary format. A modification * the cache file later.
* to the modelCache may affect the driver's execution behavior, and a malicious
* client could make use of this to execute beyond the granted permission. Thus,
* the driver must always check whether the modelCache is corrupted before preparing
* the model from cache.
*
* The two file descriptors may be closed by the client once the asynchronous
* preparation has finished. The driver has to copy all the data it needs.
* *
* The model is prepared asynchronously with respect to the caller. The * The model is prepared asynchronously with respect to the caller. The
* prepareModelFromCache function must verify the inputs to the * prepareModelFromCache function must verify the inputs to the
@ -241,13 +306,17 @@ interface IDevice extends @1.1::IDevice {
* used with different shapes of inputs on different (possibly concurrent) * used with different shapes of inputs on different (possibly concurrent)
* executions. * executions.
* *
* @param modelCache A handle holding exactly one cache file descriptor for the * @param modelCache A vector of handles with each entry holding exactly one
* security-sensitive cache. * cache file descriptor for the security-sensitive cache. The length of
* @param dataCache A handle holding exactly one cache file descriptor for the * the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded.
* constants' cache. * The cache handles will be provided in the same order as with prepareModel_1_2.
* @param dataCache A vector of handles with each entry holding exactly one
* cache file descriptor for the constants' cache. The length of the vector
* must match the numDataCache returned from getNumberOfCacheFilesNeeded.
* The cache handles will be provided in the same order as with prepareModel_1_2.
* @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
* identifying the prepared model. It is the same token provided when saving * identifying the prepared model. It is the same token provided when saving
* the cache files with IPreparedModel::saveToCache. Tokens should be chosen * the cache files with prepareModel_1_2. Tokens should be chosen
* to have a low rate of collision for a particular application. The driver * to have a low rate of collision for a particular application. The driver
* cannot detect a collision; a collision will result in a failed execution * cannot detect a collision; a collision will result in a failed execution
* or in a successful execution that produces incorrect output values. * or in a successful execution that produces incorrect output values.
@ -263,7 +332,7 @@ interface IDevice extends @1.1::IDevice {
* unspecified error * unspecified error
* - INVALID_ARGUMENT if one of the input arguments is invalid * - INVALID_ARGUMENT if one of the input arguments is invalid
*/ */
prepareModelFromCache(handle modelCache, handle dataCache, prepareModelFromCache(vec<handle> modelCache, vec<handle> dataCache,
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token, uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback) IPreparedModelCallback callback)
generates (ErrorStatus status); generates (ErrorStatus status);

View file

@ -157,62 +157,4 @@ interface IPreparedModel extends @1.0::IPreparedModel {
fmq_sync<FmqRequestDatum> requestChannel, fmq_sync<FmqRequestDatum> requestChannel,
fmq_sync<FmqResultDatum> resultChannel) fmq_sync<FmqResultDatum> resultChannel)
generates (ErrorStatus status, IBurstContext context); generates (ErrorStatus status, IBurstContext context);
/*
* Saves the prepared model to cache files.
*
* saveToCache is used to save a prepared model to cache files for faster
* model compilation time when the same model preparation is requested in
* the future. There are exactly two cache file descriptors provided to the
* driver: modelCache and dataCache.
*
* The dataCache is for caching constant data, possibly including preprocessed
* and transformed tensor buffers. Any modification to the dataCache should
* have no worse effect than generating bad output values at execution time.
*
* The modelCache is for caching security-sensitive data such as compiled
* executable machine code in the device's native binary format. A modification
* to the modelCache may affect the driver's execution behavior, and a malicious
* client could make use of this to execute beyond the granted permission. Thus,
* the driver must always check whether the modelCache is corrupted before preparing
* the model from cache.
*
* The two file descriptors must point to two zero-length files with offset
* positioned at the beginning of the file. The file descriptors may be closed
* by the client once the method has returned.
*
* If the driver decides not to save the prepared model without looking at the
* input arguments to the saveToCache function, saveToCache must return with
* ErrorStatus::GENERAL_FAILURE. Otherwise, the saveToCache function must verify
* the input arguments to the saveToCache function are valid, and return with
* ErrorStatus::INVALID_ARGUMENT if not. If the inputs are valid but the driver
* could not save the prepared model, saveToCache must return with the appropriate
* ErrorStatus. Otherwise, it must write the cache files and return
* ErrorStatus::NONE. Unless saveToCache returns ErrorStatus::NONE, the contents
* of the cache files are undefined.
*
* @param modelCache A handle holding exactly one cache file descriptor for the
* security-sensitive cache.
* @param dataCache A handle holding exactly one cache file descriptor for the
* constants' cache.
* @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
* identifying the prepared model. The same token will be provided
* when retrieving the prepared model from cache files with
* IDevice::prepareModelFromCache. Tokens should be chosen to have
* a low rate of collision for a particular application. The driver
* cannot detect a collision; a collision will result in a failed
* execution or in a successful execution that produces incorrect
* output values.
* @return status Error status of saveToCache, must be:
* - NONE if saveToCache is performed successfully
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the driver could not save the
* prepared model or if there is an unspecified error
* - INVALID_ARGUMENT if one of the input arguments is invalid,
* unless the driver decides not to save the prepared model
* without looking at the input arguments
*/
saveToCache(handle modelCache, handle dataCache,
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token)
generates (ErrorStatus status);
}; };

View file

@ -30,6 +30,11 @@ enum Constant : uint32_t {
* The byte size of the cache token. * The byte size of the cache token.
*/ */
BYTE_SIZE_OF_CACHE_TOKEN = 32, BYTE_SIZE_OF_CACHE_TOKEN = 32,
/**
* The maximum number of files for each type of cache in compilation caching.
*/
MAX_NUMBER_OF_CACHE_FILES = 32,
}; };
enum OperandType : @1.0::OperandType { enum OperandType : @1.0::OperandType {

View file

@ -77,10 +77,15 @@ TEST_F(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) {
EXPECT_TRUE(ret.isOk()); EXPECT_TRUE(ret.isOk());
} }
// isCachingSupported test // getNumberOfCacheFilesNeeded test
TEST_F(NeuralnetworksHidlTest, IsCachingSupported) { TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
Return<void> ret = device->isCachingSupported( Return<void> ret = device->getNumberOfCacheFilesNeeded(
[](ErrorStatus status, bool) { EXPECT_EQ(ErrorStatus::NONE, status); }); [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
EXPECT_EQ(ErrorStatus::NONE, status);
EXPECT_LE(numModelCache,
static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
EXPECT_LE(numDataCache, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
});
EXPECT_TRUE(ret.isOk()); EXPECT_TRUE(ret.isOk());
} }
} // namespace functional } // namespace functional

File diff suppressed because it is too large Load diff

View file

@ -33,6 +33,7 @@ namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
///////////////////////// UTILITY FUNCTIONS ///////////////////////// ///////////////////////// UTILITY FUNCTIONS /////////////////////////
@ -54,7 +55,8 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get()); ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = Return<ErrorStatus> prepareLaunchStatus =
device->prepareModel_1_2(model, preference, preparedModelCallback); device->prepareModel_1_2(model, preference, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));

View file

@ -37,6 +37,7 @@ namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory; using ::android::hidl::memory::V1_0::IMemory;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using test_helper::for_all; using test_helper::for_all;
using test_helper::MixedTyped; using test_helper::MixedTyped;
using test_helper::MixedTypedExample; using test_helper::MixedTypedExample;
@ -66,7 +67,8 @@ static void createPreparedModel(const sp<IDevice>& device, const Model& model,
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get()); ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2( Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));