Merge changes I3999f4e9,Icc1f90b8
* changes: Avoid holding lock while calling recurrent actions. Remove lock for fakeVehicleHardware callbacks.
This commit is contained in:
commit
8f0958eb88
6 changed files with 80 additions and 41 deletions
|
@ -138,9 +138,12 @@ class FakeVehicleHardware : public IVehicleHardware {
|
|||
std::unique_ptr<RecurrentTimer> mRecurrentTimer;
|
||||
// GeneratorHub is thread-safe.
|
||||
std::unique_ptr<GeneratorHub> mGeneratorHub;
|
||||
|
||||
// Only allowed to set once.
|
||||
std::unique_ptr<const PropertyChangeCallback> mOnPropertyChangeCallback;
|
||||
std::unique_ptr<const PropertySetErrorCallback> mOnPropertySetErrorCallback;
|
||||
|
||||
std::mutex mLock;
|
||||
std::unique_ptr<const PropertyChangeCallback> mOnPropertyChangeCallback GUARDED_BY(mLock);
|
||||
std::unique_ptr<const PropertySetErrorCallback> mOnPropertySetErrorCallback GUARDED_BY(mLock);
|
||||
std::unordered_map<PropIdAreaId, std::shared_ptr<RecurrentTimer::Callback>, PropIdAreaIdHash>
|
||||
mRecurrentActions GUARDED_BY(mLock);
|
||||
std::unordered_map<PropIdAreaId, VehiclePropValuePool::RecyclableType, PropIdAreaIdHash>
|
||||
|
|
|
@ -1228,13 +1228,19 @@ StatusCode FakeVehicleHardware::checkHealth() {
|
|||
|
||||
void FakeVehicleHardware::registerOnPropertyChangeEvent(
|
||||
std::unique_ptr<const PropertyChangeCallback> callback) {
|
||||
std::scoped_lock<std::mutex> lockGuard(mLock);
|
||||
if (mOnPropertyChangeCallback != nullptr) {
|
||||
ALOGE("registerOnPropertyChangeEvent must only be called once");
|
||||
return;
|
||||
}
|
||||
mOnPropertyChangeCallback = std::move(callback);
|
||||
}
|
||||
|
||||
void FakeVehicleHardware::registerOnPropertySetErrorEvent(
|
||||
std::unique_ptr<const PropertySetErrorCallback> callback) {
|
||||
std::scoped_lock<std::mutex> lockGuard(mLock);
|
||||
if (mOnPropertySetErrorCallback != nullptr) {
|
||||
ALOGE("registerOnPropertySetErrorEvent must only be called once");
|
||||
return;
|
||||
}
|
||||
mOnPropertySetErrorCallback = std::move(callback);
|
||||
}
|
||||
|
||||
|
@ -1278,8 +1284,6 @@ StatusCode FakeVehicleHardware::updateSampleRate(int32_t propId, int32_t areaId,
|
|||
}
|
||||
|
||||
void FakeVehicleHardware::onValueChangeCallback(const VehiclePropValue& value) {
|
||||
std::scoped_lock<std::mutex> lockGuard(mLock);
|
||||
|
||||
if (mOnPropertyChangeCallback == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -116,11 +116,12 @@ class IVehicleHardware {
|
|||
virtual aidl::android::hardware::automotive::vehicle::StatusCode checkHealth() = 0;
|
||||
|
||||
// Register a callback that would be called when there is a property change event from vehicle.
|
||||
// Must only be called once during initialization.
|
||||
virtual void registerOnPropertyChangeEvent(
|
||||
std::unique_ptr<const PropertyChangeCallback> callback) = 0;
|
||||
|
||||
// Register a callback that would be called when there is a property set error event from
|
||||
// vehicle.
|
||||
// vehicle. Must only be called once during initialization.
|
||||
virtual void registerOnPropertySetErrorEvent(
|
||||
std::unique_ptr<const PropertySetErrorCallback> callback) = 0;
|
||||
};
|
||||
|
|
|
@ -83,8 +83,9 @@ class RecurrentTimer final {
|
|||
// each time we might introduce outdated elements to the top. We must make sure the heap is
|
||||
// always valid from the top.
|
||||
void removeInvalidCallbackLocked() REQUIRES(mLock);
|
||||
// Pops the next closest callback (must be valid) from the heap.
|
||||
std::unique_ptr<CallbackInfo> popNextCallbackLocked() REQUIRES(mLock);
|
||||
// Gets the next calblack to run (must be valid) from the heap, update its nextTime and put
|
||||
// it back to the heap.
|
||||
std::shared_ptr<Callback> getNextCallbackLocked(int64_t now) REQUIRES(mLock);
|
||||
};
|
||||
|
||||
} // namespace vehicle
|
||||
|
|
|
@ -103,68 +103,71 @@ void RecurrentTimer::removeInvalidCallbackLocked() {
|
|||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<RecurrentTimer::CallbackInfo> RecurrentTimer::popNextCallbackLocked() {
|
||||
std::shared_ptr<RecurrentTimer::Callback> RecurrentTimer::getNextCallbackLocked(int64_t now) {
|
||||
std::pop_heap(mCallbackQueue.begin(), mCallbackQueue.end(), CallbackInfo::cmp);
|
||||
std::unique_ptr<CallbackInfo> info = std::move(mCallbackQueue[mCallbackQueue.size() - 1]);
|
||||
mCallbackQueue.pop_back();
|
||||
auto& callbackInfo = mCallbackQueue[mCallbackQueue.size() - 1];
|
||||
auto nextCallback = callbackInfo->callback;
|
||||
// intervalCount is the number of interval we have to advance until we pass now.
|
||||
size_t intervalCount = (now - callbackInfo->nextTime) / callbackInfo->interval + 1;
|
||||
callbackInfo->nextTime += intervalCount * callbackInfo->interval;
|
||||
std::push_heap(mCallbackQueue.begin(), mCallbackQueue.end(), CallbackInfo::cmp);
|
||||
|
||||
// Make sure the first element is always valid.
|
||||
removeInvalidCallbackLocked();
|
||||
return info;
|
||||
|
||||
return nextCallback;
|
||||
}
|
||||
|
||||
void RecurrentTimer::loop() {
|
||||
std::unique_lock<std::mutex> uniqueLock(mLock);
|
||||
|
||||
std::vector<std::shared_ptr<Callback>> callbacksToRun;
|
||||
while (true) {
|
||||
// Wait until the timer exits or we have at least one recurrent callback.
|
||||
mCond.wait(uniqueLock, [this] {
|
||||
ScopedLockAssertion lockAssertion(mLock);
|
||||
return mStopRequested || mCallbackQueue.size() != 0;
|
||||
});
|
||||
|
||||
int64_t interval;
|
||||
{
|
||||
std::unique_lock<std::mutex> uniqueLock(mLock);
|
||||
ScopedLockAssertion lockAssertion(mLock);
|
||||
// Wait until the timer exits or we have at least one recurrent callback.
|
||||
mCond.wait(uniqueLock, [this] {
|
||||
ScopedLockAssertion lockAssertion(mLock);
|
||||
return mStopRequested || mCallbackQueue.size() != 0;
|
||||
});
|
||||
|
||||
int64_t interval;
|
||||
if (mStopRequested) {
|
||||
return;
|
||||
}
|
||||
// The first element is the nearest next event.
|
||||
int64_t nextTime = mCallbackQueue[0]->nextTime;
|
||||
int64_t now = uptimeNanos();
|
||||
|
||||
if (nextTime > now) {
|
||||
interval = nextTime - now;
|
||||
} else {
|
||||
interval = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the next event or the timer exits.
|
||||
if (mCond.wait_for(uniqueLock, std::chrono::nanoseconds(interval), [this] {
|
||||
ScopedLockAssertion lockAssertion(mLock);
|
||||
return mStopRequested;
|
||||
})) {
|
||||
return;
|
||||
}
|
||||
// Wait for the next event or the timer exits.
|
||||
if (mCond.wait_for(uniqueLock, std::chrono::nanoseconds(interval), [this] {
|
||||
ScopedLockAssertion lockAssertion(mLock);
|
||||
return mStopRequested;
|
||||
})) {
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
ScopedLockAssertion lockAssertion(mLock);
|
||||
int64_t now = uptimeNanos();
|
||||
now = uptimeNanos();
|
||||
callbacksToRun.clear();
|
||||
while (mCallbackQueue.size() > 0) {
|
||||
int64_t nextTime = mCallbackQueue[0]->nextTime;
|
||||
if (nextTime > now) {
|
||||
break;
|
||||
}
|
||||
|
||||
std::unique_ptr<CallbackInfo> info = popNextCallbackLocked();
|
||||
info->nextTime += info->interval;
|
||||
|
||||
auto callback = info->callback;
|
||||
mCallbackQueue.push_back(std::move(info));
|
||||
std::push_heap(mCallbackQueue.begin(), mCallbackQueue.end(), CallbackInfo::cmp);
|
||||
|
||||
(*callback)();
|
||||
callbacksToRun.push_back(getNextCallbackLocked(now));
|
||||
}
|
||||
}
|
||||
|
||||
// Do not execute the callback while holding the lock.
|
||||
for (size_t i = 0; i < callbacksToRun.size(); i++) {
|
||||
(*callbacksToRun[i])();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -186,6 +186,33 @@ TEST_F(RecurrentTimerTest, testRegisterSameCallbackMultipleTimes) {
|
|||
ASSERT_EQ(countTimerCallbackQueue(&timer), static_cast<size_t>(0));
|
||||
}
|
||||
|
||||
TEST_F(RecurrentTimerTest, testRegisterCallbackMultipleTimesNoDeadLock) {
|
||||
// We want to avoid the following situation:
|
||||
// Caller holds a lock while calling registerTimerCallback, registerTimerCallback will try
|
||||
// to obtain an internal lock inside timer.
|
||||
// Meanwhile an recurrent action happens with timer holding an internal lock. The action
|
||||
// tries to obtain the lock currently hold by the caller.
|
||||
// The solution is that while calling recurrent actions, timer must not hold the internal lock.
|
||||
|
||||
std::unique_ptr<RecurrentTimer> timer = std::make_unique<RecurrentTimer>();
|
||||
std::mutex lock;
|
||||
for (size_t i = 0; i < 1000; i++) {
|
||||
std::scoped_lock<std::mutex> lockGuard(lock);
|
||||
auto action = std::make_shared<RecurrentTimer::Callback>([&lock] {
|
||||
// While calling this function, the timer must not hold lock in order not to dead
|
||||
// lock.
|
||||
std::scoped_lock<std::mutex> lockGuard(lock);
|
||||
});
|
||||
// 10ms
|
||||
int64_t interval = 10'000'000;
|
||||
timer->registerTimerCallback(interval, action);
|
||||
// Sleep for a little while to let the recurrent actions begin.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
// Make sure we stop the timer before we destroy lock.
|
||||
timer.reset();
|
||||
}
|
||||
|
||||
} // namespace vehicle
|
||||
} // namespace automotive
|
||||
} // namespace hardware
|
||||
|
|
Loading…
Reference in a new issue