NNAPI HAL: Change IEvent to explicit callbacks

IEvent was a synchronization primitive which caused some confusion
in the interface. Originally the event object was paired with an
asynchronous task, and the asynchronous task would signal this event
when the corresponding output was ready to be used.

In the case of IDevice::prepareModel, the function call would return an
IPreparedModel object that was not guaranteed to be prepared until the
runtime had returned from waiting on the corresponding event object.
The event object has been changed to two explicit callbacks--
IPreparedModelCallback and IExecutionCallback. Now,
IDevice::prepareModel no longer returns an unfinished IPreparedModel;
instead, it will pass the IPreparedModel object to the runtime through
IPreparedModelCallback::notify. When the runtime retreives the
IPreparedModel object, the asynchronous task has already finished
preparing the model.

The two callbacks are used for different purposes. Each has its own
version of notify to pass the data back to the runtime:
* IPreparedModelCallback::notify(ErrorStatus, IPreparedModel)
* IExecutionCallback::notify(ErrorStatus)

Bug: 63905942
Test: mm, vts, ml/nn/runtime/tests
Change-Id: I0c88cd262ba762e0af15e9da31ebe813a5d150b2
This commit is contained in:
Michael Butler 2017-09-22 13:26:12 -07:00 committed by Steven Moreland
parent 7e0286404a
commit cf22a57c1a
14 changed files with 779 additions and 498 deletions

View file

@ -5,8 +5,9 @@ filegroup {
srcs: [
"types.hal",
"IDevice.hal",
"IEvent.hal",
"IExecutionCallback.hal",
"IPreparedModel.hal",
"IPreparedModelCallback.hal",
],
}
@ -20,8 +21,9 @@ genrule {
out: [
"android/hardware/neuralnetworks/1.0/types.cpp",
"android/hardware/neuralnetworks/1.0/DeviceAll.cpp",
"android/hardware/neuralnetworks/1.0/EventAll.cpp",
"android/hardware/neuralnetworks/1.0/ExecutionCallbackAll.cpp",
"android/hardware/neuralnetworks/1.0/PreparedModelAll.cpp",
"android/hardware/neuralnetworks/1.0/PreparedModelCallbackAll.cpp",
],
}
@ -40,16 +42,21 @@ genrule {
"android/hardware/neuralnetworks/1.0/BnHwDevice.h",
"android/hardware/neuralnetworks/1.0/BpHwDevice.h",
"android/hardware/neuralnetworks/1.0/BsDevice.h",
"android/hardware/neuralnetworks/1.0/IEvent.h",
"android/hardware/neuralnetworks/1.0/IHwEvent.h",
"android/hardware/neuralnetworks/1.0/BnHwEvent.h",
"android/hardware/neuralnetworks/1.0/BpHwEvent.h",
"android/hardware/neuralnetworks/1.0/BsEvent.h",
"android/hardware/neuralnetworks/1.0/IExecutionCallback.h",
"android/hardware/neuralnetworks/1.0/IHwExecutionCallback.h",
"android/hardware/neuralnetworks/1.0/BnHwExecutionCallback.h",
"android/hardware/neuralnetworks/1.0/BpHwExecutionCallback.h",
"android/hardware/neuralnetworks/1.0/BsExecutionCallback.h",
"android/hardware/neuralnetworks/1.0/IPreparedModel.h",
"android/hardware/neuralnetworks/1.0/IHwPreparedModel.h",
"android/hardware/neuralnetworks/1.0/BnHwPreparedModel.h",
"android/hardware/neuralnetworks/1.0/BpHwPreparedModel.h",
"android/hardware/neuralnetworks/1.0/BsPreparedModel.h",
"android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h",
"android/hardware/neuralnetworks/1.0/IHwPreparedModelCallback.h",
"android/hardware/neuralnetworks/1.0/BnHwPreparedModelCallback.h",
"android/hardware/neuralnetworks/1.0/BpHwPreparedModelCallback.h",
"android/hardware/neuralnetworks/1.0/BsPreparedModelCallback.h",
],
}

View file

@ -16,8 +16,7 @@
package android.hardware.neuralnetworks@1.0;
import IEvent;
import IPreparedModel;
import IPreparedModelCallback;
/**
* This interface represents a device driver.
@ -37,10 +36,9 @@ interface IDevice {
/**
* Gets the supported operations in a model.
*
* getSupportedSubgraph provides a more nuanced indication on whether a
* model is able to be compiled by the driver. Having the entire model
* allows for additional information such as tensor shapes to inputs or
* tensor strides, information which is not known in "initialize".
* getSupportedSubgraph indicates which operations of a model are fully
* supported by the vendor driver. If an operation may not be supported for
* any reason, getSupportedOperations must return false for that operation.
*
* @param model A model whose operations--and their corresponding
* operands--are to be verified by the driver.
@ -48,7 +46,7 @@ interface IDevice {
* - NONE if successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT when provided model is invalid
* - INVALID_ARGUMENT if provided model is invalid
* @return supportedOperations A list of supported operations, where true
* indicates the operation is supported and
* false indicates the operation is not
@ -60,29 +58,60 @@ interface IDevice {
generates (ErrorStatus status, vec<bool> supportedOperations);
/**
* Prepares a model for execution.
* Creates a prepared model for execution.
*
* prepareModel is used to make any necessary transformations or alternative
* representations to a model for execution, possible including
* representations to a model for execution, possiblly including
* transformations on the constant data, optimization on the model's graph,
* or compilation into the device's native binary format.
* or compilation into the device's native binary format. The model itself
* is not changed.
*
* The model is prepared asynchronously with respect to the caller. The
* prepareModel function must verify the inputs to the prepareModel function
* are correct. If there is an error, prepareModel must immediately invoke
* the callback with the appropriate ErrorStatus value and nullptr for the
* IPreparedModel, then return with the same ErrorStatus. If the inputs to
* the prepareModel function are valid and there is no error, prepareModel
* must launch an asynchronous task to prepare the model in the background,
* and immediately return from prepareModel with ErrorStatus::NONE. If the
* asynchronous task fails to launch, prepareModel must immediately invoke
* the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the
* IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE.
*
* When the asynchronous task has finished preparing the model, it must
* immediately invoke the callback function provided as an input to
* prepareModel. If the model was prepared successfully, the callback object
* must be invoked with an error status of ErrorStatus::NONE and the
* produced IPreparedModel object. If an error occurred preparing the model,
* the callback object must be invoked with the appropriate ErrorStatus
* value and nullptr for the IPreparedModel.
*
* The only information that may be unknown to the model at this stage is
* the shape of the tensors, which may only be known at execution time.
* the shape of the tensors, which may only be known at execution time. As
* such, some driver services may return partially prepared models, where
* the prepared model can only be finished when it is paired with a set of
* inputs to the model. Note that the same prepared model object can be
* used with different shapes of inputs on different (possibly concurrent)
* executions.
*
* Multiple threads can call prepareModel on the same model concurrently.
*
* @param model The model to be prepared for execution.
* @param event A synchronization callback that must be signaled once the
* execution has finished.
* @return status Error status of the call, must be:
* @param callback A callback object used to return the error status of
* preparing the model for execution and the prepared model
* if successful, nullptr otherwise. The callback object's
* notify function must be called exactly once, even if the
* model could not be prepared.
* @return status Error status of launching a task which prepares the model
* in the background; must be:
* - NONE if preparation task is successfully launched
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT when one of the input arguments is
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
* @return preparedModel A handle to the resultant prepared model.
*/
prepareModel(Model model, IEvent event)
generates (ErrorStatus status, IPreparedModel preparedModel);
prepareModel(Model model, IPreparedModelCallback callback)
generates (ErrorStatus status);
/**
* Returns the current status of a driver.

View file

@ -1,45 +0,0 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.neuralnetworks@1.0;
/**
* The IEvent interface is a callback object passed by the
* Neuralnetworks runtime to the vendor service. It is used as a
* synchronization primitive between one or more runtime threads and a
* single asynchronous vendor thread. An event object is passed as an
* argument to a HIDL call that is expected to take a non-trivial
* amount of time. When the asynchronous execution thread has
* completed its computation, it must call "notify" on the event to
* indicate to the Neuralnetworks runtime whether the computation was
* successful or not, and that the corresponding output is ready to be
* consumed if the execution was successful.
*/
interface IEvent {
/**
* IEvent::notify is called by the server thread (i.e., the thread doing
* the work) to mark the event as completed so that any threads requiring
* the corresponding output can continue executing.
*
* @param status Error status returned from the asynchronous task, must be:
* - NONE if asynchronous task was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an
* unspecified error
*/
oneway notify(ErrorStatus status);
};

View file

@ -0,0 +1,44 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.neuralnetworks@1.0;
/**
* IExecutionCallback must be used to return the error status result from an
* execution asynchronously launched from IPreparedModel::execute.
*/
interface IExecutionCallback {
/**
* notify must be invoked immediately after the asynchronous task has
* finished performing the execution. notify must be provided with the
* ErrorStatus resulting from the execution. If the asynchronous task
* is not launched, notify must be invoked with the appropriate error.
*
* @return param Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself
* (if the launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an
* unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
* not large enough to store the resultant values
* - INVALID_ARGUMENT if one of the input arguments to
* prepareModel is invalid
*/
oneway notify(ErrorStatus status);
};

View file

@ -16,7 +16,7 @@
package android.hardware.neuralnetworks@1.0;
import IEvent;
import IExecutionCallback;
/**
* IPreparedModel describes a model that has been prepared for execution and
@ -24,28 +24,42 @@ import IEvent;
*/
interface IPreparedModel {
/**
* Spawns an asynchronous execution on a prepared model.
* Launches an asynchronous execution on a prepared model.
*
* Executions are asynchronous with respect to the Neuralnetworks runtime.
* To support this, IPreparedModel::execute must spawn a new task and return
* whether the task was successfully launched. The asynchronous task which
* performs the execution must call event's IEvent::notify with the status
* of the execution immediately after the execution has finished.
* The execution is performed asynchronously with respect to the caller.
* execute must verify the inputs to the function are correct. If there is
* an error, execute must immediately invoke the callback with the
* appropriate ErrorStatus value, then return with the same ErrorStatus. If
* the inputs to the function are valid and there is no error, execute must
* launch an asynchronous task to perform the execution in the background,
* and immediately return with ErrorStatus::NONE. If the asynchronous task
* fails to launch, execute must immediately invoke the callback with
* ErrorStatus::GENERAL_FAILURE, then return with
* ErrorStatus::GENERAL_FAILURE.
*
* Multiple threads can call this execute function concurrently.
* When the asynchronous task has finished its execution, it must
* immediately invoke the callback object provided as an input to the
* execute function. This callback must be provided with the ErrorStatus of
* the execution.
*
* Multiple threads can call the execute function on the same IPreparedModel
* object concurrently with different requests.
*
* @param request The input and output information on which the prepared
* model is to be executed.
* @param event A callback used for synchronization that must be signaled
* once the execution has finished.
* @param callback A callback object used to return the error status of
* the execution. The callback object's notify function must
* be called exactly once, even if the execution was
* unsuccessful.
* @return status Error status of the call, must be:
* - NONE if task is successfully launched
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
* not large enough to store the resultant values
* - INVALID_ARGUMENT when one of the input arguments is
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
*/
execute(Request request, IEvent event) generates (ErrorStatus status);
execute(Request request, IExecutionCallback callback)
generates (ErrorStatus status);
};

View file

@ -0,0 +1,53 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.neuralnetworks@1.0;
import IPreparedModel;
/**
* IPreparedModelCallback must be used to return a prepared model produced by an
* asynchronous task launched from IDevice::prepareModel.
*/
interface IPreparedModelCallback {
/**
* notify must be invoked immediately after the asynchronous task holding
* this callback has finished preparing the model. If the model was
* successfully prepared, notify must be invoked with ErrorStatus::NONE and
* the prepared model. If the model was not able to be successfully
* prepared, notify must be invoked with the appropriate ErrorStatus and
* nullptr as the IPreparedModel. If the asynchronous task holding this
* callback fails to launch or if the model provided to
* IDevice::prepareModel is invalid, notify must be invoked with the
* appropriate error as well as nullptr for the IPreparedModel.
*
* @param status Error status returned from the asynchronous model
* preparation task; must be:
* - NONE if the asynchronous task successfully prepared the
* model
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an
* unspecified error
* - INVALID_ARGUMENT if one of the input arguments to
* prepareModel is invalid
* @param preparedModel A model that has been asynchronously prepared for
* execution. If the model was unable to be prepared
* due to an error, nullptr must be passed in place of
* the IPreparedModel object.
*/
oneway notify(ErrorStatus status, IPreparedModel preparedModel);
};

View file

@ -17,7 +17,7 @@
cc_test {
name: "VtsHalNeuralnetworksV1_0TargetTest",
srcs: [
"Event.cpp",
"Callbacks.cpp",
"GeneratedTestHarness.cpp",
"Models.cpp",
"VtsHalNeuralnetworksV1_0TargetTest.cpp",

View file

@ -0,0 +1,127 @@
#include "Callbacks.h"
#include <android-base/logging.h>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace implementation {
CallbackBase::CallbackBase() : mNotified(false) {}
CallbackBase::~CallbackBase() {
// Note that we cannot call CallbackBase::join_thread from here:
// CallbackBase is intended to be reference counted, and it is possible that
// the reference count drops to zero in the bound thread, causing the
// bound thread to call this destructor. If a thread tries to join
// itself, it throws an exception, producing a message like the
// following:
//
// terminating with uncaught exception of type std::__1::system_error:
// thread::join failed: Resource deadlock would occur
}
void CallbackBase::wait() {
std::unique_lock<std::mutex> lock(mMutex);
mCondition.wait(lock, [this]{return mNotified;});
join_thread_locked();
}
bool CallbackBase::on_finish(std::function<bool(void)> post_work) {
std::lock_guard<std::mutex> lock(mMutex);
if (mPostWork != nullptr) {
LOG(ERROR) << "CallbackBase::on_finish -- a post-work function has already been bound to "
"this callback object";
return false;
}
if (post_work == nullptr) {
LOG(ERROR) << "CallbackBase::on_finish -- the new post-work function is invalid";
return false;
}
mPostWork = std::move(post_work);
return true;
}
bool CallbackBase::bind_thread(std::thread&& asyncThread) {
std::lock_guard<std::mutex> lock(mMutex);
if (mThread.joinable()) {
LOG(ERROR) << "CallbackBase::bind_thread -- a thread has already been bound to this "
"callback object";
return false;
}
if (!asyncThread.joinable()) {
LOG(ERROR) << "CallbackBase::bind_thread -- the new thread is not joinable";
return false;
}
mThread = std::move(asyncThread);
return true;
}
void CallbackBase::join_thread() {
std::lock_guard<std::mutex> lock(mMutex);
join_thread_locked();
}
void CallbackBase::notify() {
{
std::lock_guard<std::mutex> lock(mMutex);
mNotified = true;
if (mPostWork != nullptr) {
bool success = mPostWork();
if (!success) {
LOG(ERROR) << "CallbackBase::notify -- post work failed";
}
}
}
mCondition.notify_all();
}
void CallbackBase::join_thread_locked() {
if (mThread.joinable()) {
mThread.join();
}
}
PreparedModelCallback::PreparedModelCallback() :
mErrorStatus(ErrorStatus::GENERAL_FAILURE), mPreparedModel(nullptr) {}
PreparedModelCallback::~PreparedModelCallback() {}
Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
const sp<IPreparedModel>& preparedModel) {
mErrorStatus = errorStatus;
mPreparedModel = preparedModel;
CallbackBase::notify();
return Void();
}
ErrorStatus PreparedModelCallback::getStatus() {
wait();
return mErrorStatus;
}
sp<IPreparedModel> PreparedModelCallback::getPreparedModel() {
wait();
return mPreparedModel;
}
ExecutionCallback::ExecutionCallback() : mErrorStatus(ErrorStatus::GENERAL_FAILURE) {}
ExecutionCallback::~ExecutionCallback() {}
Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
mErrorStatus = errorStatus;
CallbackBase::notify();
return Void();
}
ErrorStatus ExecutionCallback::getStatus() {
wait();
return mErrorStatus;
}
} // namespace implementation
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View file

@ -0,0 +1,319 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <chrono>
#include <condition_variable>
#include <functional>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <mutex>
#include <thread>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace implementation {
using ::android::hardware::hidl_array;
using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::sp;
/**
* The CallbackBase class is used internally by the NeuralNetworks runtime to
* synchronize between different threads. An asynchronous task is launched
* paired with a callback object. When a client thread requires the output being
* generated by the asynchronous task, the client thread can wait for the result
* and be blocked until it has completed or a timeout condition has been
* reached. Any wait* may safely be called concurrently, even on the same
* callback object. When the asynchronous task has finished its workload, it
* must immediately call "notify". If the asynchronous task has failed to launch,
* the function that tried to launch the asynchronous task must immediately call
* "notify". This "notify" call awakens any client threads waiting on the
* callback object.
*
* callback object. When the asynchronous task has finished its workload or has
* failed to launch, it must immediately call "notify", awakening any client
* threads waiting on the callback object.
*
* The CallbackBase class implements some of the base synchronization common to
* both PrepareModelCallback and ExecutionCallback. For consistency, any HIDL
* callback class must inherit from CallbackBase as well as the HIDL callback
* interface it implements.
*
* This class exists to enable synchronization across HIDL. When synchronization
* is only required in the same process, consider using std::future, std::mutex,
* std::condition_variable, or std::experimental::latch instead.
*/
class CallbackBase {
public:
CallbackBase();
~CallbackBase();
/**
* CallbackBase::wait blocks until notify has been called on the callback
* object.
*/
void wait();
/**
* CallbackBase::wait_for blocks until notify has been called on the
* callback object or the time duration from the time the wait_for function
* was called has expired, whichever comes first.
*
* @return Status std::cv_status::no_timeout if the callback was notified
* before the time duration expired, std::cv_status::timeout
* otherwise.
*/
template<class Rep, class Period>
std::cv_status wait_for(const std::chrono::duration<Rep,Period>& timeout_duration);
/**
* CallbackBase::on_finish binds a function to the callback object. This
* bound function will be executed when CallbackBase::notify is called,
* before any calls to wait* return. (Note that CallbackBase::wait_for can
* return std::cv_status::timeout before CallbackBase::notify is called for
* the first time, and hence before the bound function is executed.)
*
* The bound function must not synchronize with or otherwise access the
* callback object it is bound to, as this could cause a deadlock.
*
* CallbackBase::on_finish can be called at most once on a given callback
* object, and the call to CallbackBase::on_finish must finish before
* CallbackBase::notify is called.
*
* @param post_work Function to be invoked the first time
* CallbackBase::notify is called. Must have a target --
* i.e., must not compare equal to nullptr. post_work
* returns true if it successfully completes, false if it
* fails.
* @return bool True if the function was successfully bound, false if
* unsuccessful.
*
* TODO: Why does the return value of the callback matter?
*/
bool on_finish(std::function<bool(void)> post_work);
/**
* CallbackBase::bind_thread binds a thread to the event for later use by
* CallbackBase::join_thread.
*
* The thread must be passed using std::move.
*
* Once a thread is bound with CallbackBase::bind_thread, the client code
* should ensure that one of the following occurs before the event is
* destroyed:
* - CallbackBase::join_thread has been called.
* - CallbackBase::wait has been called.
* - CallbackBase::wait_for has been called and returned other than
* std::cv_status::no_timeout.
*
* The bound thread shall not call any CallbackBase method with the
* exception of CallbackBase::notify, which it must call when the thread has
* finished its computation.
*
* CallbackBase::bind_thread can be called at most once on a given callback
* object.
*
* @param asyncThread Thread to be bound to the callback object. The thread
* object must represent a thread of execution -- i.e.,
* asyncThread.joinable() must be true.
* @return bool True if successful, false if thread was not properly bound.
*/
bool bind_thread(std::thread&& asyncThread);
/**
* CallbackBase::join_thread ensures that the thread (if any) bound to this
* event with CallbackBase::bind_thread has fully finished and cleaned its
* resources. It is legal to call this function multiple times, concurrently
* or sequentially.
*/
void join_thread();
protected:
/**
* CallbackBase::notify enables all prior and future wait* calls on the
* callback object to proceed. The call to CallbackBase::notify happens
* before any wait* calls on this callback object return (except in the case
* of wait_for timing out). The asynchronous call the callback object is
* paired with must ensure that any update to state that should be visible
* to the caller of wait* happens before the call to CallbackBase::notify.
*
* CallbackBase::notify must be called exactly once on a given callback
* object.
*/
void notify();
private:
// Same as CallbackBase::join_thread but assumes we already hold a lock on
// mMutex.
void join_thread_locked();
bool mNotified;
std::mutex mMutex;
std::condition_variable mCondition;
std::function<bool(void)> mPostWork;
std::thread mThread;
};
/**
* The PreparedModelCallback class is used to receive the error status of
* preparing a model as well as the prepared model from a task executing
* asynchronously with respect to the runtime. If a calling thread calls wait*
* or get* on a PreparedModelCallback object and the corresponding asynchronous
* task has not finished preparing the model, the calling thread will block
* until the asynchronous task has called notify. For more information on the
* synchronization behavior, refer to the CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
* CallbackBase, and implements the HIDL notify call from
* IPreparedModelCallback. This callback object is passed as an argument to
* IDevice::prepareModel.
*/
class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback {
public:
PreparedModelCallback();
~PreparedModelCallback() override;
/**
* IPreparedModelCallback::notify marks the callback object with the return
* status of the asynchronous model preparation along with the prepared
* model, and calls CallbackBase::notify, enabling all prior and future
* wait* calls on the PreparedModelCallback object to proceed. For more
* information on the synchronization behavior, refer to the CallbackBase
* class.
*
* IPreparedModelCallback::notify must be called exactly once on a given
* PreparedModelCallback object.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous preparation was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if the input model is invalid
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
/**
* Retrieves the error status returned from the asynchronous task launched
* by IDevice::prepareModel. If IDevice::prepareModel has not finished
* asynchronously preparing the model, this call will block until the
* asynchronous task notifies the object.
*
* @return status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous preparation was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if the input model is invalid
*/
ErrorStatus getStatus();
/**
* Retrieves the model that has been prepared for execution from the
* asynchronous task launched by IDevice::prepareModel. If
* IDevice::prepareModel has not finished asynchronously preparing the
* model, this call will block until the asynchronous task notifies the
* object.
*
* @return preparedModel Returned model that has been prepared for
* execution, nullptr if the model was unable to be
* prepared.
*/
sp<IPreparedModel> getPreparedModel();
private:
ErrorStatus mErrorStatus;
sp<IPreparedModel> mPreparedModel;
};
/**
* The ExecutionCallback class is used to receive the error status of the
* execution from a task executing asynchronously with respect to the runtime.
* If a calling thread calls wait* or get* on a PreparedModelCallback object and
* the corresponding asynchronous task has not finished the execution, the
* calling thread will block until the asynchronous task has called notify. For
* more information on the synchronization behavior, refer to the CallbackBase
* class.
*
* This class inherits the basic blocking and signaling calls from
* CallbackBase, and implements the HIDL notify call from
* IExecutionCallback. This callback object is passed as an argument to
* IPreparedModel::execute.
*/
class ExecutionCallback : public CallbackBase, public IExecutionCallback {
public:
ExecutionCallback();
~ExecutionCallback() override;
/**
* IExecutionCallback::notify marks the callback object with the return
* status of the asynchronous execution that held this callback and enables
* all prior and future wait* calls on the ExecutionCallback object to
* proceed. For more information on the synchronization behavior, refer to
* the CallbackBase class.
*
* IExecutionCallback::notify must be called exactly once on a given
* ExecutionCallback object.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
* not large enough to store the resultant values
* - INVALID_ARGUMENT if the input request is invalid
*/
Return<void> notify(ErrorStatus status) override;
/**
* Retrieves the error status returned from the asynchronous task launched
* by IPreparedModel::execute. If IPreparedModel::execute has not finished
* asynchronously executing, this call will block until the asynchronous task
* notifies the object.
*
* @return status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
* not large enough to store the resultant values
* - INVALID_ARGUMENT if the input request is invalid
*/
ErrorStatus getStatus();
private:
ErrorStatus mErrorStatus;
};
// template function implementation(s) below this point
template<class Rep, class Period>
std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep,Period>& timeout_duration) {
std::unique_lock<std::mutex> lock(mMutex);
std::cv_status status = mCondition.wait_for(lock, timeout_duration, [this]{return mNotified;});
if (status != std::cv_status::timeout) {
join_thread_locked();
}
return status;
}
} // namespace implementation
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H

View file

@ -1,94 +0,0 @@
#include "Event.h"
#include <android-base/logging.h>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace implementation {
Event::Event() : mStatus(Status::WAITING) {}
Event::~Event() {
// Note that we cannot call Event::join_thread from here: Event is
// intended to be reference counted, and it is possible that the
// reference count drops to zero in the bound thread, causing the
// bound thread to call this destructor. If a thread tries to join
// itself, it throws an exception, producing a message like the
// following:
//
// terminating with uncaught exception of type std::__1::system_error:
// thread::join failed: Resource deadlock would occur
}
Return<void> Event::notify(ErrorStatus status) {
{
std::lock_guard<std::mutex> lock(mMutex);
mStatus = status == ErrorStatus::NONE ? Status::SUCCESS : Status::ERROR;
if (mStatus == Status::SUCCESS && mCallback != nullptr) {
bool success = mCallback();
if (!success) {
LOG(ERROR) << "Event::notify -- callback failed";
}
}
}
mCondition.notify_all();
return Void();
}
Event::Status Event::poll() {
std::lock_guard<std::mutex> lock(mMutex);
return mStatus;
}
Event::Status Event::wait() {
std::unique_lock<std::mutex> lock(mMutex);
mCondition.wait(lock, [this]{return mStatus != Status::WAITING;});
join_thread_locked();
return mStatus;
}
bool Event::on_finish(std::function<bool(void)> callback) {
std::lock_guard<std::mutex> lock(mMutex);
if (mCallback != nullptr) {
LOG(ERROR) << "Event::on_finish -- a callback has already been bound to this event";
return false;
}
if (callback == nullptr) {
LOG(ERROR) << "Event::on_finish -- the new callback is invalid";
return false;
}
mCallback = std::move(callback);
return true;
}
bool Event::bind_thread(std::thread&& asyncThread) {
std::lock_guard<std::mutex> lock(mMutex);
if (mThread.joinable()) {
LOG(ERROR) << "Event::bind_thread -- a thread has already been bound to this event";
return false;
}
if (!asyncThread.joinable()) {
LOG(ERROR) << "Event::bind_thread -- the new thread is not joinable";
return false;
}
mThread = std::move(asyncThread);
return true;
}
void Event::join_thread() {
std::lock_guard<std::mutex> lock(mMutex);
join_thread_locked();
}
void Event::join_thread_locked() {
if (mThread.joinable()) {
mThread.join();
}
}
} // namespace implementation
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View file

@ -1,216 +0,0 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H
#include <android/hardware/neuralnetworks/1.0/IEvent.h>
#include <chrono>
#include <condition_variable>
#include <functional>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <mutex>
#include <thread>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace implementation {
using ::android::hardware::hidl_array;
using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::sp;
/**
* The Event class is used internally by the Neuralnetworks runtime to
* synchronize between different threads. An asynchronous task is launched
* paired with an event object. When a client thread requires the output being
* processed by the asynchronous task, the client thread can wait for the result
* and be blocked until it has completed or a timeout condition has been
* reached, or poll the result periodically. Both poll and wait* may safely be
* called concurrently, even on the same event. When the server thread has
* completed, it should immediately call "notify" to indicate the corresponding
* output has been produced and awaken any client threads waiting on the event.
*
* This class exists to enable synchronization across HIDL. When synchronization
* is only required in the same process, consider using std::future, std::mutex,
* std::condition_variable, or std::experimental::latch instead.
*/
struct Event : public IEvent {
Event();
~Event() override;
/**
* Event::Status::WAITING -- The corresponding asynchronous execution has
* not yet finished.
* Event::Status::SUCCESS -- The corresponding asynchronous execution has
* succeeded and the output is ready to be
* consumed.
* Event::Status::TIMEOUT -- The calling thread has waited longer than the
* user has specified. This only applies to the
* methods Event::wait_for and Event::wait_until.
* Event::Status::ERROR -- The corresponding asynchronous execution has
* failed to properly execute.
*/
enum class Status : uint32_t {
WAITING,
SUCCESS,
TIMEOUT,
ERROR,
};
/**
* IEvent::notify marks the event with the return status of the
* asynchronous call the event is paired with and enables all
* prior and future wait calls on the Event object to proceed. The
* call to IEvent::notify happens before any wait* calls on
* this event return (except in the case of TIMEOUT) and before
* any poll calls that see the resulting status. The asynchronous
* call the event is paired with must ensure that any update to
* state that should be visible to the caller of wait* or poll
* happens before the call to IEvent::notify.
*
* IEvent::notify can be called at most once on a given event.
*
* @param neuralnetworks::V1_0::ErrorStatus ErrorStatus::NONE on success
*/
Return<void> notify(ErrorStatus status) override;
/**
* Event::poll returns the current status of the event.
*
* @return Status SUCCESS, ERROR, or WAITING
*/
Event::Status poll();
/**
* Event::wait blocks until the event has been signaled.
*
* @return Status SUCCESS or ERROR
*/
Event::Status wait();
/**
* Event::wait_for blocks until the event has been signaled or the time
* duration from the time the wait_for function was called has expired,
* whichever comes first.
*
* @return Status SUCCESS, ERROR, or TIMEOUT
*/
template<class Rep, class Period>
Event::Status wait_for(const std::chrono::duration<Rep,Period>& timeout_duration);
/**
* Event::wait_until blocks until the event has been signaled or a certain
* time has been reached, whichever comes first.
*
* @return Status SUCCESS, ERROR, or TIMEOUT
*/
template<class Clock, class Duration>
Event::Status wait_until(const std::chrono::time_point<Clock,Duration>& timeout_duration);
/**
* Event::on_finish binds a callback function to the event. The
* callback will be executed when IEvent::notify is called, before
* any calls to wait* return. (Note that wait_for or wait_until
* can return TIMEOUT before IEvent::notify is called for the
* first time, and hence before the callback is executed.)
*
* The callback function must not synchronize with or otherwise
* access the event object it is bound to.
*
* Event::on_finish can be called at most once on a given event.
*
* @param callback Function to be invoked the first time IEvent::notify is
* called. Must have a target -- i.e., must not compare equal
* to nullptr. Callback returns true if it successfully
* completes, false if it fails.
* @return bool True if the callback was successfully bound, false if
* unsuccessful.
*
* TODO: What if notify has already been called before on_finish?
* TODO: Why does the return value of the callback matter?
*/
bool on_finish(std::function<bool(void)> callback);
/**
* Event::bind_thread binds a thread to the event for later use by
* Event::join_thread.
*
* The thread must be passed using std::move.
*
* Once a thread is bound with Event::bind_thread, the client code
* should ensure that one of the following occurs before the event is
* destroyed:
* - Event::join_thread has been called.
* - Event::wait has been called.
* - Event::wait_for has been called and returned other than TIMEOUT.
* - Event::wait_until has been called and returned other than TIMEOUT.
*
* The bound thread shall not call any Event method with the exception of
* IEvent::notify, which it will call when the thread has finished its
* computation.
*
* Event::bind_thread can be called at most once on a given event.
*
* @param asyncThread Thread to be bound to the event. The thread object
* must represent a thread of execution -- i.e.,
* asyncThread.joinable() must be true.
* @return bool True if successful, false if thread was not properly bound.
*/
bool bind_thread(std::thread&& asyncThread);
/**
* Event::join_thread ensures that the thread (if any) bound to
* this event with Event::bind_thread has fully finished and
* cleaned its resources. It is legal to call this function
* multiple times, concurrently or sequentially.
*/
void join_thread();
private:
// Same as Event::join_thread but assumes we already hold a lock on mMutex.
void join_thread_locked();
Status mStatus;
std::mutex mMutex;
std::condition_variable mCondition;
std::function<bool(void)> mCallback;
std::thread mThread;
};
// template function implementations
template<class Rep, class Period>
Event::Status Event::wait_for(const std::chrono::duration<Rep,Period>& timeout_duration) {
std::unique_lock<std::mutex> lock(mMutex);
std::cv_status status = mCondition.wait_for(lock, timeout_duration,
[this]{return mStatus != Status::WAITING;});
if (status != std::cv_status::timeout) {
join_thread_locked();
}
return status != std::cv_status::timeout ? mStatus : Status::TIMEOUT;
}
template<class Clock, class Duration>
Event::Status Event::wait_until(const std::chrono::time_point<Clock,Duration>& timeout_time) {
std::unique_lock<std::mutex> lock(mMutex);
std::cv_status status = mCondition.wait_until(lock, timeout_time,
[this]{return mStatus != Status::WAITING;});
if (status != std::cv_status::timeout) {
join_thread_locked();
}
return status != std::cv_status::timeout ? mStatus : Status::TIMEOUT;
}
} // namespace implementation
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H

View file

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "Event.h"
#include "Callbacks.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
@ -32,7 +32,8 @@ namespace functional {
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem");
namespace generated_tests {
using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::generated_tests::filter;
using ::generated_tests::for_all;
using ::generated_tests::for_each;
@ -65,22 +66,22 @@ void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* sr
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExampleType>& examples) {
Model model = create_model();
sp<IPreparedModel> preparedModel;
sp<Event> preparationEvent = new Event();
ASSERT_NE(nullptr, preparationEvent.get());
Return<void> prepareRet = device->prepareModel(
model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
EXPECT_EQ(ErrorStatus::NONE, status);
preparedModel = prepared;
});
ASSERT_TRUE(prepareRet.isOk());
ASSERT_NE(nullptr, preparedModel.get());
Event::Status preparationStatus = preparationEvent->wait();
EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
Model model = create_model();
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
ASSERT_NE(nullptr, preparedModel.get());
int example_no = 1;
for (auto& example : examples) {
@ -160,15 +161,19 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
inputMemory->commit();
outputMemory->commit();
// execute request
sp<Event> executionEvent = new Event();
ASSERT_NE(nullptr, executionEvent.get());
Return<ErrorStatus> executeStatus = preparedModel->execute(
{.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionEvent);
ASSERT_TRUE(executeStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
Event::Status eventStatus = executionEvent->wait();
EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(
{.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
ErrorStatus executionReturnStatus = executionCallback->getStatus();
EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
// validate results
outputMemory->read();

View file

@ -17,7 +17,8 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
#include "Event.h"
#include "Callbacks.h"
#include "Models.h"
#include "TestHarness.h"
@ -32,8 +33,10 @@ namespace V1_0 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::generated_tests::MixedTypedExampleType;
namespace generated_tests {
extern void Execute(const sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>,
const std::vector<MixedTypedExampleType>&);
@ -66,26 +69,22 @@ void NeuralnetworksHidlTest::SetUp() {
void NeuralnetworksHidlTest::TearDown() {}
sp<IPreparedModel> NeuralnetworksHidlTest::doPrepareModelShortcut(const Model& model) {
sp<IPreparedModel> preparedModel;
ErrorStatus prepareStatus;
sp<Event> preparationEvent = new Event();
if (preparationEvent.get() == nullptr) {
sp<IPreparedModel> NeuralnetworksHidlTest::doPrepareModelShortcut() {
Model model = createValidTestModel();
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
if (preparedModelCallback == nullptr) {
return nullptr;
}
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) {
return nullptr;
}
Return<void> prepareRet = device->prepareModel(
model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
prepareStatus = status;
preparedModel = prepared;
});
if (!prepareRet.isOk() || prepareStatus != ErrorStatus::NONE ||
preparedModel.get() == nullptr) {
return nullptr;
}
Event::Status eventStatus = preparationEvent->wait();
if (eventStatus != Event::Status::SUCCESS) {
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
if (prepareReturnStatus != ErrorStatus::NONE || preparedModel == nullptr) {
return nullptr;
}
@ -151,99 +150,121 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
// prepare simple model positive test
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
Model model = createValidTestModel();
sp<Event> preparationEvent = new Event();
ASSERT_NE(nullptr, preparationEvent.get());
Return<void> prepareRet = device->prepareModel(
model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
EXPECT_EQ(ErrorStatus::NONE, status);
(void)prepared;
});
ASSERT_TRUE(prepareRet.isOk());
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
EXPECT_NE(nullptr, preparedModel.get());
}
// prepare simple model negative test 1
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
Model model = createInvalidTestModel1();
sp<Event> preparationEvent = new Event();
ASSERT_NE(nullptr, preparationEvent.get());
Return<void> prepareRet = device->prepareModel(
model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
(void)prepared;
});
ASSERT_TRUE(prepareRet.isOk());
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
EXPECT_EQ(nullptr, preparedModel.get());
}
// prepare simple model negative test 2
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
Model model = createInvalidTestModel2();
sp<Event> preparationEvent = new Event();
ASSERT_NE(nullptr, preparationEvent.get());
Return<void> prepareRet = device->prepareModel(
model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
(void)prepared;
});
ASSERT_TRUE(prepareRet.isOk());
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
EXPECT_EQ(nullptr, preparedModel.get());
}
// execute simple graph positive test
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
Model model = createValidTestModel();
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
ASSERT_NE(nullptr, preparedModel.get());
Request request = createValidTestRequest();
sp<Event> executionEvent = new Event();
ASSERT_NE(nullptr, executionEvent.get());
Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
ASSERT_TRUE(executeStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
Event::Status eventStatus = executionEvent->wait();
EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
const uint32_t OUTPUT = 1;
sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
ASSERT_NE(nullptr, outputMemory.get());
float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
ASSERT_NE(nullptr, outputPtr);
outputMemory->read();
std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
outputMemory->commit();
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
ASSERT_NE(nullptr, preparedModel.get());
Request request = createValidTestRequest();
auto postWork = [&] {
sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
if (outputMemory == nullptr) {
return false;
}
float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
if (outputPtr == nullptr) {
return false;
}
outputMemory->read();
std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
outputMemory->commit();
return true;
};
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
executionCallback->on_finish(postWork);
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeLaunchStatus));
executionCallback->wait();
ErrorStatus executionReturnStatus = executionCallback->getStatus();
EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
EXPECT_EQ(expectedData, outputData);
}
// execute simple graph negative test 1
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
Model model = createValidTestModel();
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
ASSERT_NE(nullptr, preparedModel.get());
Request request = createInvalidTestRequest1();
sp<Event> executionEvent = new Event();
ASSERT_NE(nullptr, executionEvent.get());
Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
ASSERT_TRUE(executeStatus.isOk());
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
executionEvent->wait();
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
executionCallback->wait();
ErrorStatus executionReturnStatus = executionCallback->getStatus();
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
}
// execute simple graph negative test 2
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
Model model = createValidTestModel();
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
ASSERT_NE(nullptr, preparedModel.get());
Request request = createInvalidTestRequest2();
sp<Event> executionEvent = new Event();
ASSERT_NE(nullptr, executionEvent.get());
Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
ASSERT_TRUE(executeStatus.isOk());
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
executionEvent->wait();
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
executionCallback->wait();
ErrorStatus executionReturnStatus = executionCallback->getStatus();
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
}
// Mixed-typed examples

View file

@ -18,7 +18,9 @@
#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
@ -72,13 +74,28 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
void SetUp() override;
void TearDown() override;
sp<IPreparedModel> doPrepareModelShortcut(const Model& model);
sp<IPreparedModel> doPrepareModelShortcut();
sp<IDevice> device;
};
} // namespace functional
} // namespace vts
// pretty-print values for error messages
template<typename CharT, typename Traits>
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
ErrorStatus errorStatus) {
return os << toString(errorStatus);
}
template<typename CharT, typename Traits>
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
DeviceStatus deviceStatus) {
return os << toString(deviceStatus);
}
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware