Merge "NNAPI Concurrent Query Management -- HAL and VTS update" into oc-mr1-dev

This commit is contained in:
David Gross 2017-09-02 22:56:48 +00:00 committed by Android (Google) Code Review
commit 6ebe4bd842
9 changed files with 361 additions and 8 deletions

View file

@ -5,6 +5,7 @@ filegroup {
srcs: [
"types.hal",
"IDevice.hal",
"IEvent.hal",
"IPreparedModel.hal",
],
}
@ -19,6 +20,7 @@ genrule {
out: [
"android/hardware/neuralnetworks/1.0/types.cpp",
"android/hardware/neuralnetworks/1.0/DeviceAll.cpp",
"android/hardware/neuralnetworks/1.0/EventAll.cpp",
"android/hardware/neuralnetworks/1.0/PreparedModelAll.cpp",
],
}
@ -38,6 +40,11 @@ genrule {
"android/hardware/neuralnetworks/1.0/BnHwDevice.h",
"android/hardware/neuralnetworks/1.0/BpHwDevice.h",
"android/hardware/neuralnetworks/1.0/BsDevice.h",
"android/hardware/neuralnetworks/1.0/IEvent.h",
"android/hardware/neuralnetworks/1.0/IHwEvent.h",
"android/hardware/neuralnetworks/1.0/BnHwEvent.h",
"android/hardware/neuralnetworks/1.0/BpHwEvent.h",
"android/hardware/neuralnetworks/1.0/BsEvent.h",
"android/hardware/neuralnetworks/1.0/IPreparedModel.h",
"android/hardware/neuralnetworks/1.0/IHwPreparedModel.h",
"android/hardware/neuralnetworks/1.0/BnHwPreparedModel.h",

View file

@ -0,0 +1,49 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This HAL is a work in progress */
package android.hardware.neuralnetworks@1.0;
/**
* The IEvent interface is a callback object passed by the
* Neuralnetworks runtime to the vendor service. It is used as a
* synchronization primitive between one or more runtime threads and a
* single asynchronous vendor thread. An event object is passed as an
* argument to a HIDL call that is expected to take a non-trivial
* amount of time. When the asynchronous execution thread has
* completed its computation, it must call "notify" on the event to
* indicate to the Neuralnetworks runtime whether the computation was
* successful or not, and that the corresponding output is ready to be
* consumed if the execution was successful.
*
* TODO: Mention that "notify" is also called by a runtime thread
* during CPU fallback execution? Depends on whether the HIDL comments
* are strictly for vendors or not.
*/
interface IEvent {
/**
* IEvent::notify is called by the server thread (i.e. the thread doing the
* work) to mark the event as completed so that any threads requiring the
* corresponding resources can continue executing.
*
* @param status Status of the execution associated with the Event.
* Should be SUCCESS or ERROR.
*/
oneway notify(Status status);
};

View file

@ -18,8 +18,9 @@
package android.hardware.neuralnetworks@1.0;
import IEvent;
interface IPreparedModel {
// TODO: The execution is synchronous. Change that to have a callback on completion.
// Multiple threads can call this execute function concurrently.
execute(Request request) generates(bool success);
execute(Request request, IEvent event) generates(bool success);
};

View file

@ -181,3 +181,8 @@ struct Request {
vec<InputOutputInfo> outputs;
vec<memory> pools;
};
enum Status : uint32_t {
SUCCESS,
ERROR,
};

View file

@ -16,7 +16,10 @@
cc_test {
name: "VtsHalNeuralnetworksV1_0TargetTest",
srcs: ["VtsHalNeuralnetworksV1_0TargetTest.cpp"],
srcs: [
"Event.cpp",
"VtsHalNeuralnetworksV1_0TargetTest.cpp",
],
defaults: ["VtsHalTargetTestDefaults"],
static_libs: [
"android.hardware.neuralnetworks@1.0",

View file

@ -0,0 +1,76 @@
#include "Event.h"
#include <android-base/logging.h>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace implementation {
Event::Event() : mStatus(Status::WAITING) {}
Event::~Event() {
if (mThread.joinable()) {
mThread.join();
}
}
Return<void> Event::notify(ReturnedStatus status) {
{
std::lock_guard<std::mutex> lock(mMutex);
mStatus = status == ReturnedStatus::SUCCESS ? Status::SUCCESS : Status::ERROR;
if (mStatus == Status::SUCCESS && mCallback != nullptr) {
bool success = mCallback();
if (!success) {
LOG(ERROR) << "Event::notify -- callback failed";
}
}
}
mCondition.notify_all();
return Void();
}
Event::Status Event::poll() {
std::lock_guard<std::mutex> lock(mMutex);
return mStatus;
}
Event::Status Event::wait() {
std::unique_lock<std::mutex> lock(mMutex);
mCondition.wait(lock, [this]{return mStatus != Status::WAITING;});
return mStatus;
}
bool Event::on_finish(std::function<bool(void)> callback) {
std::lock_guard<std::mutex> lock(mMutex);
if (mCallback != nullptr) {
LOG(ERROR) << "Event::on_finish -- a callback has already been bound to this event";
return false;
}
if (callback == nullptr) {
LOG(ERROR) << "Event::on_finish -- the new callback is invalid";
return false;
}
mCallback = std::move(callback);
return true;
}
bool Event::bind_thread(std::thread&& asyncThread) {
std::lock_guard<std::mutex> lock(mMutex);
if (mThread.joinable()) {
LOG(ERROR) << "Event::bind_thread -- a thread has already been bound to this event";
return false;
}
if (!asyncThread.joinable()) {
LOG(ERROR) << "Event::bind_thread -- the new thread is not joinable";
return false;
}
mThread = std::move(asyncThread);
return true;
}
} // namespace implementation
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View file

@ -0,0 +1,192 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H
#include <android/hardware/neuralnetworks/1.0/IEvent.h>
#include <chrono>
#include <condition_variable>
#include <functional>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <mutex>
#include <thread>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace implementation {
using ::android::hardware::hidl_array;
using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::sp;
using ReturnedStatus = ::android::hardware::neuralnetworks::V1_0::Status;
/**
* The Event class is used internally by the Neuralnetworks runtime to
* synchronize between different threads. An asynchronous task is launched
* paired with an event object. When a client thread requires the output being
* processed by the asynchronous task, the client thread can wait for the result
* and be blocked until it has completed or a timeout condition has been
* reached, or poll the result periodically. Both poll and wait* may safely be
* called concurrently, even on the same event. When the server thread has
* completed, it should immediately call "notify" to indicate the corresponding
* output has been produced and awaken any client threads waiting on the event.
*
* This class exists to enable synchronization across HIDL. When synchronization
* is only required in the same process, consider using std::future, std::mutex,
* std::condition_variable, or std::experimental::latch instead.
*/
struct Event : public IEvent {
Event();
~Event() override;
/**
* Event::Status::WAITING -- The corresponding asynchronous execution has
* not yet finished.
* Event::Status::SUCCESS -- The corresponding asynchronous execution has
* succeeded and the output is ready to be
* consumed.
* Event::Status::TIMEOUT -- The calling thread has waited longer than the
* user has specified. This only applies to the
* methods Event::wait_for and Event::wait_until.
* Event::Status::ERROR -- The corresponding asynchronous execution has
* failed to properly execute.
*/
enum class Status : uint32_t {
WAITING,
SUCCESS,
TIMEOUT,
ERROR,
};
/**
* IEvent::notify marks the event with the return status of the
* asynchronous call the event is paired with and enables all
* prior and future wait calls on the Event object to proceed. The
* call to IEvent::notify happens before any wait* calls on
* this event return (except in the case of TIMEOUT) and before
* any poll calls that see the resulting status. The asynchronous
* call the event is paired with must ensure that any update to
* state that should be visible to the caller of wait* or poll
* happens before the call to IEvent::notify.
*
* IEvent::notify can be called at most once on a given event.
*
* @param neuralnetworks::V1_0::Status SUCCESS or ERROR
*/
Return<void> notify(ReturnedStatus status) override;
/**
* Event::poll returns the current status of the event.
*
* @return Status SUCCESS, ERROR, or WAITING
*/
Event::Status poll();
/**
* Event::wait blocks until the event has been signaled.
*
* @return Status SUCCESS or ERROR
*/
Event::Status wait();
/**
* Event::wait_for blocks until the event has been signaled or the time
* duration from the time the wait_for function was called has expired,
* whichever comes first.
*
* @return Status SUCCESS, ERROR, or TIMEOUT
*/
template<class Rep, class Period>
Event::Status wait_for(const std::chrono::duration<Rep,Period>& timeout_duration);
/**
* Event::wait_until blocks until the event has been signaled or a certain
* time has been reached, whichever comes first.
*
* @return Status SUCCESS, ERROR, or TIMEOUT
*/
template<class Clock, class Duration>
Event::Status wait_until(const std::chrono::time_point<Clock,Duration>& timeout_duration);
/**
* Event::on_finish binds a callback function to the event. The
* callback will be executed when IEvent::notify is called, before
* any calls to wait* return. (Note that wait_for or wait_until
* can return TIMEOUT before IEvent::notify is called for the
* first time, and hence before the callback is executed.)
*
* The callback function must not synchronize with or otherwise
* access the event object it is bound to.
*
* Event::on_finish can be called at most once on a given event.
*
* @param callback Function to be invoked the first time IEvent::notify is
* called. Must have a target -- i.e., must not compare equal
* to nullptr. Callback returns true if it successfully
* completes, false if it fails.
* @return bool True if the callback was successfully bound, false if
* unsuccessful.
*
* TODO: What if notify has already been called before on_finish?
* TODO: Why does the return value of the callback matter?
*/
bool on_finish(std::function<bool(void)> callback);
/**
* Event::bind_thread binds a thread to the event ensuring that the thread
* has fully finished and cleaned its resources before the event is
* destroyed. The thread should be bound using std::move.
*
* The bound thread shall not call any Event method with the exception of
* IEvent::notify, which it will call when the thread has finished its
* computation.
*
* Event::bind_thread can be called at most once on a given event.
*
* @param asyncThread Thread to be bound to the event. The thread object
* must represent a thread of execution -- i.e.,
* asyncThread.joinable() must be true.
* @return bool True if successful, false if thread was not properly bound.
*/
bool bind_thread(std::thread&& asyncThread);
private:
Status mStatus;
std::mutex mMutex;
std::condition_variable mCondition;
std::function<bool(void)> mCallback;
std::thread mThread;
};
// template function implementations
template<class Rep, class Period>
Event::Status Event::wait_for(const std::chrono::duration<Rep,Period>& timeout_duration) {
std::unique_lock<std::mutex> lock(mMutex);
std::cv_status status = mCondition.wait_for(lock, timeout_duration,
[this]{return mStatus != Status::WAITING;});
return status != std::cv_status::timeout ? mStatus : Status::TIMEOUT;
}
template<class Clock, class Duration>
Event::Status Event::wait_until(const std::chrono::time_point<Clock,Duration>& timeout_time) {
std::unique_lock<std::mutex> lock(mMutex);
std::cv_status status = mCondition.wait_until(lock, timeout_time,
[this]{return mStatus != Status::WAITING;});
return status != std::cv_status::timeout ? mStatus : Status::TIMEOUT;
}
} // namespace implementation
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H

View file

@ -16,6 +16,7 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "Event.h"
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
@ -29,9 +30,13 @@ namespace V1_0 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
// A class for test environment setup
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
// This has to return a "new" object because it is freed inside
// ::testing::AddGlobalTestEnvironment when the gtest is being torn down
@ -44,6 +49,8 @@ void NeuralnetworksHidlEnvironment::registerTestServices() {
}
// The main test class for NEURALNETWORK HIDL HAL.
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
void NeuralnetworksHidlTest::SetUp() {
device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
NeuralnetworksHidlEnvironment::getInstance());
@ -226,21 +233,32 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) {
float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
ASSERT_NE(nullptr, inputPtr);
ASSERT_NE(nullptr, outputPtr);
inputMemory->update();
outputMemory->update();
std::copy(inputData.begin(), inputData.end(), inputPtr);
std::copy(outputData.begin(), outputData.end(), outputPtr);
inputMemory->commit();
outputMemory->commit();
// execute request
bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools});
sp<Event> event = sp<Event>(new Event());
ASSERT_NE(nullptr, event.get());
bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools},
event);
EXPECT_TRUE(success);
Event::Status status = event->wait();
EXPECT_EQ(Event::Status::SUCCESS, status);
// validate results { 1+5, 2+6, 3+7, 4+8 }
outputMemory->update();
outputMemory->read();
std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
outputMemory->commit();
EXPECT_EQ(expectedData, outputData);
}
// TODO: Add tests for execution failure, or wait_for/wait_until timeout.
// Discussion: https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222
} // namespace functional
} // namespace vts
} // namespace V1_0

View file

@ -59,15 +59,17 @@ class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvB
NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
public:
~NeuralnetworksHidlEnvironment() override;
static NeuralnetworksHidlEnvironment* getInstance();
virtual void registerTestServices() override;
void registerTestServices() override;
};
// The main test class for NEURALNETWORKS HIDL HAL.
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
public:
virtual void SetUp() override;
virtual void TearDown() override;
~NeuralnetworksHidlTest() override;
void SetUp() override;
void TearDown() override;
sp<IDevice> device;
};