AIDL BufferPool implementation (HIDL -> AIDL)

Bug: 254050250
Test: m
Merged-In: I0d7bae2c01bd480d1e99f4b39c4a9013a2828897
Change-Id: I0d7bae2c01bd480d1e99f4b39c4a9013a2828897
This commit is contained in:
Sungtak Lee 2022-12-07 11:42:03 +00:00 committed by Wonsik Kim
parent 97e1dfb99d
commit 8878a13271
26 changed files with 2199 additions and 2864 deletions

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -13,22 +13,60 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#define LOG_TAG "BufferPoolConnection" #define LOG_TAG "AidlBufferPoolAcc"
//#define LOG_NDEBUG 0
#include <sys/types.h>
#include <stdint.h>
#include <time.h>
#include <unistd.h>
#include <utils/Log.h>
#include <thread>
#include "Accessor.h" #include "Accessor.h"
#include "AccessorImpl.h"
#include "Connection.h" #include "Connection.h"
#include "DataHelper.h"
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media { namespace {
namespace bufferpool { static constexpr nsecs_t kEvictGranularityNs = 1000000000; // 1 sec
namespace V2_0 { static constexpr nsecs_t kEvictDurationNs = 5000000000; // 5 secs
namespace implementation { }
#ifdef __ANDROID_VNDK__
static constexpr uint32_t kSeqIdVndkBit = 1U << 31;
#else
static constexpr uint32_t kSeqIdVndkBit = 0;
#endif
static constexpr uint32_t kSeqIdMax = 0x7fffffff;
uint32_t Accessor::sSeqId = time(nullptr) & kSeqIdMax;
namespace {
// anonymous namespace
static std::shared_ptr<ConnectionDeathRecipient> sConnectionDeathRecipient =
std::make_shared<ConnectionDeathRecipient>();
void serviceDied(void *cookie) {
if (sConnectionDeathRecipient) {
sConnectionDeathRecipient->onDead(cookie);
}
}
}
std::shared_ptr<ConnectionDeathRecipient> Accessor::getConnectionDeathRecipient() {
return sConnectionDeathRecipient;
}
ConnectionDeathRecipient::ConnectionDeathRecipient() {
mDeathRecipient = ndk::ScopedAIBinder_DeathRecipient(
AIBinder_DeathRecipient_new(serviceDied));
}
void ConnectionDeathRecipient::add( void ConnectionDeathRecipient::add(
int64_t connectionId, int64_t connectionId,
const sp<Accessor> &accessor) { const std::shared_ptr<Accessor> &accessor) {
std::lock_guard<std::mutex> lock(mLock); std::lock_guard<std::mutex> lock(mLock);
if (mAccessors.find(connectionId) == mAccessors.end()) { if (mAccessors.find(connectionId) == mAccessors.end()) {
mAccessors.insert(std::make_pair(connectionId, accessor)); mAccessors.insert(std::make_pair(connectionId, accessor));
@ -40,7 +78,7 @@ void ConnectionDeathRecipient::remove(int64_t connectionId) {
mAccessors.erase(connectionId); mAccessors.erase(connectionId);
auto it = mConnectionToCookie.find(connectionId); auto it = mConnectionToCookie.find(connectionId);
if (it != mConnectionToCookie.end()) { if (it != mConnectionToCookie.end()) {
uint64_t cookie = it->second; void * cookie = it->second;
mConnectionToCookie.erase(it); mConnectionToCookie.erase(it);
auto cit = mCookieToConnections.find(cookie); auto cit = mCookieToConnections.find(cookie);
if (cit != mCookieToConnections.end()) { if (cit != mCookieToConnections.end()) {
@ -53,7 +91,7 @@ void ConnectionDeathRecipient::remove(int64_t connectionId) {
} }
void ConnectionDeathRecipient::addCookieToConnection( void ConnectionDeathRecipient::addCookieToConnection(
uint64_t cookie, void *cookie,
int64_t connectionId) { int64_t connectionId) {
std::lock_guard<std::mutex> lock(mLock); std::lock_guard<std::mutex> lock(mLock);
if (mAccessors.find(connectionId) == mAccessors.end()) { if (mAccessors.find(connectionId) == mAccessors.end()) {
@ -69,11 +107,8 @@ void ConnectionDeathRecipient::addCookieToConnection(
} }
} }
void ConnectionDeathRecipient::serviceDied( void ConnectionDeathRecipient::onDead(void *cookie) {
uint64_t cookie, std::map<int64_t, const std::weak_ptr<Accessor>> connectionsToClose;
const wp<::android::hidl::base::V1_0::IBase>& /* who */
) {
std::map<int64_t, const wp<Accessor>> connectionsToClose;
{ {
std::lock_guard<std::mutex> lock(mLock); std::lock_guard<std::mutex> lock(mLock);
@ -92,9 +127,9 @@ void ConnectionDeathRecipient::serviceDied(
} }
if (connectionsToClose.size() > 0) { if (connectionsToClose.size() > 0) {
sp<Accessor> accessor; std::shared_ptr<Accessor> accessor;
for (auto it = connectionsToClose.begin(); it != connectionsToClose.end(); ++it) { for (auto it = connectionsToClose.begin(); it != connectionsToClose.end(); ++it) {
accessor = it->second.promote(); accessor = it->second.lock();
if (accessor) { if (accessor) {
accessor->close(it->first); accessor->close(it->first);
@ -104,127 +139,371 @@ void ConnectionDeathRecipient::serviceDied(
} }
} }
namespace { AIBinder_DeathRecipient *ConnectionDeathRecipient::getRecipient() {
static sp<ConnectionDeathRecipient> sConnectionDeathRecipient = return mDeathRecipient.get();
new ConnectionDeathRecipient();
} }
sp<ConnectionDeathRecipient> Accessor::getConnectionDeathRecipient() { ::ndk::ScopedAStatus Accessor::connect(const std::shared_ptr<::aidl::android::hardware::media::bufferpool2::IObserver>& in_observer, ::aidl::android::hardware::media::bufferpool2::IAccessor::ConnectionInfo* _aidl_return) {
return sConnectionDeathRecipient; std::shared_ptr<Connection> connection;
}
void Accessor::createInvalidator() {
Accessor::Impl::createInvalidator();
}
void Accessor::createEvictor() {
Accessor::Impl::createEvictor();
}
// Methods from ::android::hardware::media::bufferpool::V2_0::IAccessor follow.
Return<void> Accessor::connect(
const sp<::android::hardware::media::bufferpool::V2_0::IObserver>& observer,
connect_cb _hidl_cb) {
sp<Connection> connection;
ConnectionId connectionId; ConnectionId connectionId;
uint32_t msgId; uint32_t msgId;
const StatusDescriptor* fmqDesc; StatusDescriptor statusDesc;
const InvalidationDescriptor* invDesc; InvalidationDescriptor invDesc;
BufferPoolStatus status = connect(
ResultStatus status = connect( in_observer, false, &connection, &connectionId, &msgId, &statusDesc, &invDesc);
observer, false, &connection, &connectionId, &msgId, &fmqDesc, &invDesc);
if (status == ResultStatus::OK) { if (status == ResultStatus::OK) {
_hidl_cb(status, connection, connectionId, msgId, *fmqDesc, *invDesc); _aidl_return->connection = connection;
} else { _aidl_return->connectionId = connectionId;
_hidl_cb(status, nullptr, -1LL, 0, _aidl_return->msgId = msgId;
android::hardware::MQDescriptorSync<BufferStatusMessage>( _aidl_return->toFmqDesc = std::move(statusDesc);
std::vector<android::hardware::GrantorDescriptor>(), _aidl_return->fromFmqDesc = std::move(invDesc);
nullptr /* nhandle */, 0 /* size */), return ::ndk::ScopedAStatus::ok();
android::hardware::MQDescriptorUnsync<BufferInvalidationMessage>(
std::vector<android::hardware::GrantorDescriptor>(),
nullptr /* nhandle */, 0 /* size */));
} }
return Void(); return ::ndk::ScopedAStatus::fromServiceSpecificError(status);
} }
Accessor::Accessor(const std::shared_ptr<BufferPoolAllocator> &allocator) Accessor::Accessor(const std::shared_ptr<BufferPoolAllocator> &allocator)
: mImpl(new Impl(allocator)) {} : mAllocator(allocator), mScheduleEvictTs(0) {}
Accessor::~Accessor() { Accessor::~Accessor() {
} }
bool Accessor::isValid() { bool Accessor::isValid() {
return (bool)mImpl && mImpl->isValid(); return mBufferPool.isValid();
} }
ResultStatus Accessor::flush() { BufferPoolStatus Accessor::flush() {
if (mImpl) { std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
mImpl->flush(); mBufferPool.processStatusMessages();
return ResultStatus::OK; mBufferPool.flush(ref<Accessor>());
} return ResultStatus::OK;
return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus Accessor::allocate( BufferPoolStatus Accessor::allocate(
ConnectionId connectionId, ConnectionId connectionId,
const std::vector<uint8_t> &params, const std::vector<uint8_t> &params,
BufferId *bufferId, const native_handle_t** handle) { BufferId *bufferId, const native_handle_t** handle) {
if (mImpl) { std::unique_lock<std::mutex> lock(mBufferPool.mMutex);
return mImpl->allocate(connectionId, params, bufferId, handle); mBufferPool.processStatusMessages();
BufferPoolStatus status = ResultStatus::OK;
if (!mBufferPool.getFreeBuffer(mAllocator, params, bufferId, handle)) {
lock.unlock();
std::shared_ptr<BufferPoolAllocation> alloc;
size_t allocSize;
status = mAllocator->allocate(params, &alloc, &allocSize);
lock.lock();
if (status == ResultStatus::OK) {
status = mBufferPool.addNewBuffer(alloc, allocSize, params, bufferId, handle);
}
ALOGV("create a buffer %d : %u %p",
status == ResultStatus::OK, *bufferId, *handle);
} }
return ResultStatus::CRITICAL_ERROR; if (status == ResultStatus::OK) {
// TODO: handle ownBuffer failure
mBufferPool.handleOwnBuffer(connectionId, *bufferId);
}
mBufferPool.cleanUp();
scheduleEvictIfNeeded();
return status;
} }
ResultStatus Accessor::fetch( BufferPoolStatus Accessor::fetch(
ConnectionId connectionId, TransactionId transactionId, ConnectionId connectionId, TransactionId transactionId,
BufferId bufferId, const native_handle_t** handle) { BufferId bufferId, const native_handle_t** handle) {
if (mImpl) { std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
return mImpl->fetch(connectionId, transactionId, bufferId, handle); mBufferPool.processStatusMessages();
} auto found = mBufferPool.mTransactions.find(transactionId);
return ResultStatus::CRITICAL_ERROR; if (found != mBufferPool.mTransactions.end() &&
} contains(&mBufferPool.mPendingTransactions,
connectionId, transactionId)) {
ResultStatus Accessor::connect( if (found->second->mSenderValidated &&
const sp<IObserver> &observer, bool local, found->second->mStatus == BufferStatus::TRANSFER_FROM &&
sp<Connection> *connection, ConnectionId *pConnectionId, found->second->mBufferId == bufferId) {
uint32_t *pMsgId, found->second->mStatus = BufferStatus::TRANSFER_FETCH;
const StatusDescriptor** statusDescPtr, auto bufferIt = mBufferPool.mBuffers.find(bufferId);
const InvalidationDescriptor** invDescPtr) { if (bufferIt != mBufferPool.mBuffers.end()) {
if (mImpl) { mBufferPool.mStats.onBufferFetched();
ResultStatus status = mImpl->connect( *handle = bufferIt->second->handle();
this, observer, connection, pConnectionId, pMsgId, return ResultStatus::OK;
statusDescPtr, invDescPtr); }
if (!local && status == ResultStatus::OK) {
sp<Accessor> accessor(this);
sConnectionDeathRecipient->add(*pConnectionId, accessor);
} }
return status;
} }
mBufferPool.cleanUp();
scheduleEvictIfNeeded();
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus Accessor::close(ConnectionId connectionId) { BufferPoolStatus Accessor::connect(
if (mImpl) { const std::shared_ptr<IObserver> &observer, bool local,
ResultStatus status = mImpl->close(connectionId); std::shared_ptr<Connection> *connection, ConnectionId *pConnectionId,
sConnectionDeathRecipient->remove(connectionId); uint32_t *pMsgId,
return status; StatusDescriptor* statusDescPtr,
InvalidationDescriptor* invDescPtr) {
std::shared_ptr<Connection> newConnection = ::ndk::SharedRefBase::make<Connection>();
BufferPoolStatus status = ResultStatus::CRITICAL_ERROR;
{
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
if (newConnection) {
int32_t pid = getpid();
ConnectionId id = (int64_t)pid << 32 | sSeqId | kSeqIdVndkBit;
status = mBufferPool.mObserver.open(id, statusDescPtr);
if (status == ResultStatus::OK) {
newConnection->initialize(ref<Accessor>(), id);
*connection = newConnection;
*pConnectionId = id;
*pMsgId = mBufferPool.mInvalidation.mInvalidationId;
mBufferPool.mConnectionIds.insert(id);
mBufferPool.mInvalidationChannel.getDesc(invDescPtr);
mBufferPool.mInvalidation.onConnect(id, observer);
if (sSeqId == kSeqIdMax) {
sSeqId = 0;
} else {
++sSeqId;
}
}
}
mBufferPool.processStatusMessages();
mBufferPool.cleanUp();
scheduleEvictIfNeeded();
} }
return ResultStatus::CRITICAL_ERROR; if (!local && status == ResultStatus::OK) {
std::shared_ptr<Accessor> accessor(ref<Accessor>());
sConnectionDeathRecipient->add(*pConnectionId, accessor);
}
return status;
}
BufferPoolStatus Accessor::close(ConnectionId connectionId) {
{
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
ALOGV("connection close %lld: %u", (long long)connectionId, mBufferPool.mInvalidation.mId);
mBufferPool.processStatusMessages();
mBufferPool.handleClose(connectionId);
mBufferPool.mObserver.close(connectionId);
mBufferPool.mInvalidation.onClose(connectionId);
// Since close# will be called after all works are finished, it is OK to
// evict unused buffers.
mBufferPool.cleanUp(true);
scheduleEvictIfNeeded();
}
sConnectionDeathRecipient->remove(connectionId);
return ResultStatus::OK;
} }
void Accessor::cleanUp(bool clearCache) { void Accessor::cleanUp(bool clearCache) {
if (mImpl) { // transaction timeout, buffer caching TTL handling
mImpl->cleanUp(clearCache); std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
mBufferPool.processStatusMessages();
mBufferPool.cleanUp(clearCache);
}
void Accessor::handleInvalidateAck() {
std::map<ConnectionId, const std::shared_ptr<IObserver>> observers;
uint32_t invalidationId;
{
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
mBufferPool.processStatusMessages();
mBufferPool.mInvalidation.onHandleAck(&observers, &invalidationId);
}
// Do not hold lock for send invalidations
size_t deadClients = 0;
for (auto it = observers.begin(); it != observers.end(); ++it) {
const std::shared_ptr<IObserver> observer = it->second;
if (observer) {
::ndk::ScopedAStatus status = observer->onMessage(it->first, invalidationId);
if (!status.isOk()) {
++deadClients;
}
}
}
if (deadClients > 0) {
ALOGD("During invalidation found %zu dead clients", deadClients);
} }
} }
//IAccessor* HIDL_FETCH_IAccessor(const char* /* name */) { void Accessor::invalidatorThread(
// return new Accessor(); std::map<uint32_t, const std::weak_ptr<Accessor>> &accessors,
//} std::mutex &mutex,
std::condition_variable &cv,
bool &ready) {
constexpr uint32_t NUM_SPIN_TO_INCREASE_SLEEP = 1024;
constexpr uint32_t NUM_SPIN_TO_LOG = 1024*8;
constexpr useconds_t MAX_SLEEP_US = 10000;
uint32_t numSpin = 0;
useconds_t sleepUs = 1;
} // namespace implementation while(true) {
} // namespace V2_0 std::map<uint32_t, const std::weak_ptr<Accessor>> copied;
} // namespace bufferpool {
} // namespace media std::unique_lock<std::mutex> lock(mutex);
} // namespace hardware while (!ready) {
} // namespace android numSpin = 0;
sleepUs = 1;
cv.wait(lock);
}
copied.insert(accessors.begin(), accessors.end());
}
std::list<ConnectionId> erased;
for (auto it = copied.begin(); it != copied.end(); ++it) {
const std::shared_ptr<Accessor> acc = it->second.lock();
if (!acc) {
erased.push_back(it->first);
} else {
acc->handleInvalidateAck();
}
}
{
std::unique_lock<std::mutex> lock(mutex);
for (auto it = erased.begin(); it != erased.end(); ++it) {
accessors.erase(*it);
}
if (accessors.size() == 0) {
ready = false;
} else {
// N.B. Since there is not a efficient way to wait over FMQ,
// polling over the FMQ is the current way to prevent draining
// CPU.
lock.unlock();
++numSpin;
if (numSpin % NUM_SPIN_TO_INCREASE_SLEEP == 0 &&
sleepUs < MAX_SLEEP_US) {
sleepUs *= 10;
}
if (numSpin % NUM_SPIN_TO_LOG == 0) {
ALOGW("invalidator thread spinning");
}
::usleep(sleepUs);
}
}
}
}
Accessor::AccessorInvalidator::AccessorInvalidator() : mReady(false) {
std::thread invalidator(
invalidatorThread,
std::ref(mAccessors),
std::ref(mMutex),
std::ref(mCv),
std::ref(mReady));
invalidator.detach();
}
void Accessor::AccessorInvalidator::addAccessor(
uint32_t accessorId, const std::weak_ptr<Accessor> &accessor) {
bool notify = false;
std::unique_lock<std::mutex> lock(mMutex);
if (mAccessors.find(accessorId) == mAccessors.end()) {
if (!mReady) {
mReady = true;
notify = true;
}
mAccessors.emplace(accessorId, accessor);
ALOGV("buffer invalidation added bp:%u %d", accessorId, notify);
}
lock.unlock();
if (notify) {
mCv.notify_one();
}
}
void Accessor::AccessorInvalidator::delAccessor(uint32_t accessorId) {
std::lock_guard<std::mutex> lock(mMutex);
mAccessors.erase(accessorId);
ALOGV("buffer invalidation deleted bp:%u", accessorId);
if (mAccessors.size() == 0) {
mReady = false;
}
}
std::unique_ptr<Accessor::AccessorInvalidator> Accessor::sInvalidator;
void Accessor::createInvalidator() {
if (!sInvalidator) {
sInvalidator = std::make_unique<Accessor::AccessorInvalidator>();
}
}
void Accessor::evictorThread(
std::map<const std::weak_ptr<Accessor>, nsecs_t, std::owner_less<>> &accessors,
std::mutex &mutex,
std::condition_variable &cv) {
std::list<const std::weak_ptr<Accessor>> evictList;
while (true) {
int expired = 0;
int evicted = 0;
{
nsecs_t now = systemTime();
std::unique_lock<std::mutex> lock(mutex);
while (accessors.size() == 0) {
cv.wait(lock);
}
auto it = accessors.begin();
while (it != accessors.end()) {
if (now > (it->second + kEvictDurationNs)) {
++expired;
evictList.push_back(it->first);
it = accessors.erase(it);
} else {
++it;
}
}
}
// evict idle accessors;
for (auto it = evictList.begin(); it != evictList.end(); ++it) {
const std::shared_ptr<Accessor> accessor = it->lock();
if (accessor) {
accessor->cleanUp(true);
++evicted;
}
}
if (expired > 0) {
ALOGD("evictor expired: %d, evicted: %d", expired, evicted);
}
evictList.clear();
::usleep(kEvictGranularityNs / 1000);
}
}
Accessor::AccessorEvictor::AccessorEvictor() {
std::thread evictor(
evictorThread,
std::ref(mAccessors),
std::ref(mMutex),
std::ref(mCv));
evictor.detach();
}
void Accessor::AccessorEvictor::addAccessor(
const std::weak_ptr<Accessor> &accessor, nsecs_t ts) {
std::lock_guard<std::mutex> lock(mMutex);
bool notify = mAccessors.empty();
auto it = mAccessors.find(accessor);
if (it == mAccessors.end()) {
mAccessors.emplace(accessor, ts);
} else {
it->second = ts;
}
if (notify) {
mCv.notify_one();
}
}
std::unique_ptr<Accessor::AccessorEvictor> Accessor::sEvictor;
void Accessor::createEvictor() {
if (!sEvictor) {
sEvictor = std::make_unique<Accessor::AccessorEvictor>();
}
}
void Accessor::scheduleEvictIfNeeded() {
nsecs_t now = systemTime();
if (now > (mScheduleEvictTs + kEvictGranularityNs)) {
mScheduleEvictTs = now;
sEvictor->addAccessor(ref<Accessor>(), now);
}
}
} // namespace aidl::android::hardware::media::bufferpool2::implemntation {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,73 +14,65 @@
* limitations under the License. * limitations under the License.
*/ */
#ifndef ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_ACCESSOR_H #pragma once
#define ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_ACCESSOR_H
#include <android/hardware/media/bufferpool/2.0/IAccessor.h> #include <aidl/android/hardware/media/bufferpool2/BnAccessor.h>
#include <android/hardware/media/bufferpool/2.0/IObserver.h> #include <aidl/android/hardware/media/bufferpool2/IObserver.h>
#include <bufferpool/BufferPoolTypes.h> #include <bufferpool2/BufferPoolTypes.h>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include "BufferStatus.h"
#include <memory>
#include <map>
#include <set> #include <set>
#include <condition_variable>
namespace android { #include "BufferPool.h"
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
using ::android::hardware::hidl_array; namespace aidl::android::hardware::media::bufferpool2::implementation {
using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::sp;
struct Accessor;
struct Connection; struct Connection;
using ::aidl::android::hardware::media::bufferpool2::IObserver;
using ::aidl::android::hardware::media::bufferpool2::IAccessor;
/** /**
* Receives death notifications from remote connections. * Receives death notifications from remote connections.
* On death notifications, the connections are closed and used resources * On death notifications, the connections are closed and used resources
* are released. * are released.
*/ */
struct ConnectionDeathRecipient : public hardware::hidl_death_recipient { struct ConnectionDeathRecipient {
ConnectionDeathRecipient();
/** /**
* Registers a newly connected connection from remote processes. * Registers a newly connected connection from remote processes.
*/ */
void add(int64_t connectionId, const sp<Accessor> &accessor); void add(int64_t connectionId, const std::shared_ptr<Accessor> &accessor);
/** /**
* Removes a connection. * Removes a connection.
*/ */
void remove(int64_t connectionId); void remove(int64_t connectionId);
void addCookieToConnection(uint64_t cookie, int64_t connectionId); void addCookieToConnection(void *cookie, int64_t connectionId);
virtual void serviceDied( void onDead(void *cookie);
uint64_t /* cookie */,
const wp<::android::hidl::base::V1_0::IBase>& /* who */ AIBinder_DeathRecipient *getRecipient();
) override;
private: private:
::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
std::mutex mLock; std::mutex mLock;
std::map<uint64_t, std::set<int64_t>> mCookieToConnections; std::map<void *, std::set<int64_t>> mCookieToConnections;
std::map<int64_t, uint64_t> mConnectionToCookie; std::map<int64_t, void *> mConnectionToCookie;
std::map<int64_t, const wp<Accessor>> mAccessors; std::map<int64_t, const std::weak_ptr<Accessor>> mAccessors;
}; };
/** /**
* A buffer pool accessor which enables a buffer pool to communicate with buffer * A buffer pool accessor which enables a buffer pool to communicate with buffer
* pool clients. 1:1 correspondense holds between a buffer pool and an accessor. * pool clients. 1:1 correspondense holds between a buffer pool and an accessor.
*/ */
struct Accessor : public IAccessor { struct Accessor : public BnAccessor {
// Methods from ::android::hardware::media::bufferpool::V2_0::IAccessor follow. // Methods from ::aidl::android::hardware::media::bufferpool2::IAccessor.
Return<void> connect(const sp<::android::hardware::media::bufferpool::V2_0::IObserver>& observer, connect_cb _hidl_cb) override; ::ndk::ScopedAStatus connect(const std::shared_ptr<IObserver>& in_observer,
IAccessor::ConnectionInfo* _aidl_return) override;
/** /**
* Creates a buffer pool accessor which uses the specified allocator. * Creates a buffer pool accessor which uses the specified allocator.
@ -96,7 +88,7 @@ struct Accessor : public IAccessor {
bool isValid(); bool isValid();
/** Invalidates all buffers which are owned by bufferpool */ /** Invalidates all buffers which are owned by bufferpool */
ResultStatus flush(); BufferPoolStatus flush();
/** Allocates a buffer from a buffer pool. /** Allocates a buffer from a buffer pool.
* *
@ -109,7 +101,7 @@ struct Accessor : public IAccessor {
* NO_MEMORY when there is no memory. * NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus allocate( BufferPoolStatus allocate(
ConnectionId connectionId, ConnectionId connectionId,
const std::vector<uint8_t>& params, const std::vector<uint8_t>& params,
BufferId *bufferId, BufferId *bufferId,
@ -127,7 +119,7 @@ struct Accessor : public IAccessor {
* NO_MEMORY when there is no memory. * NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus fetch( BufferPoolStatus fetch(
ConnectionId connectionId, ConnectionId connectionId,
TransactionId transactionId, TransactionId transactionId,
BufferId bufferId, BufferId bufferId,
@ -153,13 +145,13 @@ struct Accessor : public IAccessor {
* NO_MEMORY when there is no memory. * NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus connect( BufferPoolStatus connect(
const sp<IObserver>& observer, const std::shared_ptr<IObserver>& observer,
bool local, bool local,
sp<Connection> *connection, ConnectionId *pConnectionId, std::shared_ptr<Connection> *connection, ConnectionId *pConnectionId,
uint32_t *pMsgId, uint32_t *pMsgId,
const StatusDescriptor** statusDescPtr, StatusDescriptor* statusDescPtr,
const InvalidationDescriptor** invDescPtr); InvalidationDescriptor* invDescPtr);
/** /**
* Closes the specified connection to the client. * Closes the specified connection to the client.
@ -169,10 +161,10 @@ struct Accessor : public IAccessor {
* @return OK when the connection is closed. * @return OK when the connection is closed.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus close(ConnectionId connectionId); BufferPoolStatus close(ConnectionId connectionId);
/** /**
* Processes pending buffer status messages and perfoms periodic cache * Processes pending buffer status messages and performs periodic cache
* cleaning. * cleaning.
* *
* @param clearCache if clearCache is true, it frees all buffers waiting * @param clearCache if clearCache is true, it frees all buffers waiting
@ -181,24 +173,66 @@ struct Accessor : public IAccessor {
void cleanUp(bool clearCache); void cleanUp(bool clearCache);
/** /**
* Gets a hidl_death_recipient for remote connection death. * ACK on buffer invalidation messages
*/ */
static sp<ConnectionDeathRecipient> getConnectionDeathRecipient(); void handleInvalidateAck();
/**
* Gets a death_recipient for remote connection death.
*/
static std::shared_ptr<ConnectionDeathRecipient> getConnectionDeathRecipient();
static void createInvalidator(); static void createInvalidator();
static void createEvictor(); static void createEvictor();
private: private:
class Impl; // ConnectionId = pid : (timestamp_created + seqId)
std::shared_ptr<Impl> mImpl; // in order to guarantee uniqueness for each connection
static uint32_t sSeqId;
const std::shared_ptr<BufferPoolAllocator> mAllocator;
nsecs_t mScheduleEvictTs;
BufferPool mBufferPool;
struct AccessorInvalidator {
std::map<uint32_t, const std::weak_ptr<Accessor>> mAccessors;
std::mutex mMutex;
std::condition_variable mCv;
bool mReady;
AccessorInvalidator();
void addAccessor(uint32_t accessorId, const std::weak_ptr<Accessor> &accessor);
void delAccessor(uint32_t accessorId);
};
static std::unique_ptr<AccessorInvalidator> sInvalidator;
static void invalidatorThread(
std::map<uint32_t, const std::weak_ptr<Accessor>> &accessors,
std::mutex &mutex,
std::condition_variable &cv,
bool &ready);
struct AccessorEvictor {
std::map<const std::weak_ptr<Accessor>, nsecs_t, std::owner_less<>> mAccessors;
std::mutex mMutex;
std::condition_variable mCv;
AccessorEvictor();
void addAccessor(const std::weak_ptr<Accessor> &accessor, nsecs_t ts);
};
static std::unique_ptr<AccessorEvictor> sEvictor;
static void evictorThread(
std::map<const std::weak_ptr<Accessor>, nsecs_t, std::owner_less<>> &accessors,
std::mutex &mutex,
std::condition_variable &cv);
void scheduleEvictIfNeeded();
friend struct BufferPool;
}; };
} // namespace implementation } // namespace aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_ACCESSOR_H

View file

@ -1,993 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "BufferPoolAccessor2.0"
//#define LOG_NDEBUG 0
#include <sys/types.h>
#include <stdint.h>
#include <time.h>
#include <unistd.h>
#include <utils/Log.h>
#include <thread>
#include "AccessorImpl.h"
#include "Connection.h"
namespace android {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
namespace {
static constexpr int64_t kCleanUpDurationUs = 500000; // TODO tune 0.5 sec
static constexpr int64_t kLogDurationUs = 5000000; // 5 secs
static constexpr size_t kMinAllocBytesForEviction = 1024*1024*15;
static constexpr size_t kMinBufferCountForEviction = 25;
static constexpr size_t kMaxUnusedBufferCount = 64;
static constexpr size_t kUnusedBufferCountTarget = kMaxUnusedBufferCount - 16;
static constexpr nsecs_t kEvictGranularityNs = 1000000000; // 1 sec
static constexpr nsecs_t kEvictDurationNs = 5000000000; // 5 secs
}
// Buffer structure in bufferpool process
struct InternalBuffer {
BufferId mId;
size_t mOwnerCount;
size_t mTransactionCount;
const std::shared_ptr<BufferPoolAllocation> mAllocation;
const size_t mAllocSize;
const std::vector<uint8_t> mConfig;
bool mInvalidated;
InternalBuffer(
BufferId id,
const std::shared_ptr<BufferPoolAllocation> &alloc,
const size_t allocSize,
const std::vector<uint8_t> &allocConfig)
: mId(id), mOwnerCount(0), mTransactionCount(0),
mAllocation(alloc), mAllocSize(allocSize), mConfig(allocConfig),
mInvalidated(false) {}
const native_handle_t *handle() {
return mAllocation->handle();
}
void invalidate() {
mInvalidated = true;
}
};
struct TransactionStatus {
TransactionId mId;
BufferId mBufferId;
ConnectionId mSender;
ConnectionId mReceiver;
BufferStatus mStatus;
int64_t mTimestampUs;
bool mSenderValidated;
TransactionStatus(const BufferStatusMessage &message, int64_t timestampUs) {
mId = message.transactionId;
mBufferId = message.bufferId;
mStatus = message.newStatus;
mTimestampUs = timestampUs;
if (mStatus == BufferStatus::TRANSFER_TO) {
mSender = message.connectionId;
mReceiver = message.targetConnectionId;
mSenderValidated = true;
} else {
mSender = -1LL;
mReceiver = message.connectionId;
mSenderValidated = false;
}
}
};
// Helper template methods for handling map of set.
template<class T, class U>
bool insert(std::map<T, std::set<U>> *mapOfSet, T key, U value) {
auto iter = mapOfSet->find(key);
if (iter == mapOfSet->end()) {
std::set<U> valueSet{value};
mapOfSet->insert(std::make_pair(key, valueSet));
return true;
} else if (iter->second.find(value) == iter->second.end()) {
iter->second.insert(value);
return true;
}
return false;
}
template<class T, class U>
bool erase(std::map<T, std::set<U>> *mapOfSet, T key, U value) {
bool ret = false;
auto iter = mapOfSet->find(key);
if (iter != mapOfSet->end()) {
if (iter->second.erase(value) > 0) {
ret = true;
}
if (iter->second.size() == 0) {
mapOfSet->erase(iter);
}
}
return ret;
}
template<class T, class U>
bool contains(std::map<T, std::set<U>> *mapOfSet, T key, U value) {
auto iter = mapOfSet->find(key);
if (iter != mapOfSet->end()) {
auto setIter = iter->second.find(value);
return setIter != iter->second.end();
}
return false;
}
#ifdef __ANDROID_VNDK__
static constexpr uint32_t kSeqIdVndkBit = 1U << 31;
#else
static constexpr uint32_t kSeqIdVndkBit = 0;
#endif
static constexpr uint32_t kSeqIdMax = 0x7fffffff;
uint32_t Accessor::Impl::sSeqId = time(nullptr) & kSeqIdMax;
Accessor::Impl::Impl(
const std::shared_ptr<BufferPoolAllocator> &allocator)
: mAllocator(allocator), mScheduleEvictTs(0) {}
Accessor::Impl::~Impl() {
}
ResultStatus Accessor::Impl::connect(
const sp<Accessor> &accessor, const sp<IObserver> &observer,
sp<Connection> *connection,
ConnectionId *pConnectionId,
uint32_t *pMsgId,
const StatusDescriptor** statusDescPtr,
const InvalidationDescriptor** invDescPtr) {
sp<Connection> newConnection = new Connection();
ResultStatus status = ResultStatus::CRITICAL_ERROR;
{
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
if (newConnection) {
int32_t pid = getpid();
ConnectionId id = (int64_t)pid << 32 | sSeqId | kSeqIdVndkBit;
status = mBufferPool.mObserver.open(id, statusDescPtr);
if (status == ResultStatus::OK) {
newConnection->initialize(accessor, id);
*connection = newConnection;
*pConnectionId = id;
*pMsgId = mBufferPool.mInvalidation.mInvalidationId;
mBufferPool.mConnectionIds.insert(id);
mBufferPool.mInvalidationChannel.getDesc(invDescPtr);
mBufferPool.mInvalidation.onConnect(id, observer);
if (sSeqId == kSeqIdMax) {
sSeqId = 0;
} else {
++sSeqId;
}
}
}
mBufferPool.processStatusMessages();
mBufferPool.cleanUp();
scheduleEvictIfNeeded();
}
return status;
}
ResultStatus Accessor::Impl::close(ConnectionId connectionId) {
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
ALOGV("connection close %lld: %u", (long long)connectionId, mBufferPool.mInvalidation.mId);
mBufferPool.processStatusMessages();
mBufferPool.handleClose(connectionId);
mBufferPool.mObserver.close(connectionId);
mBufferPool.mInvalidation.onClose(connectionId);
// Since close# will be called after all works are finished, it is OK to
// evict unused buffers.
mBufferPool.cleanUp(true);
scheduleEvictIfNeeded();
return ResultStatus::OK;
}
ResultStatus Accessor::Impl::allocate(
ConnectionId connectionId, const std::vector<uint8_t>& params,
BufferId *bufferId, const native_handle_t** handle) {
std::unique_lock<std::mutex> lock(mBufferPool.mMutex);
mBufferPool.processStatusMessages();
ResultStatus status = ResultStatus::OK;
if (!mBufferPool.getFreeBuffer(mAllocator, params, bufferId, handle)) {
lock.unlock();
std::shared_ptr<BufferPoolAllocation> alloc;
size_t allocSize;
status = mAllocator->allocate(params, &alloc, &allocSize);
lock.lock();
if (status == ResultStatus::OK) {
status = mBufferPool.addNewBuffer(alloc, allocSize, params, bufferId, handle);
}
ALOGV("create a buffer %d : %u %p",
status == ResultStatus::OK, *bufferId, *handle);
}
if (status == ResultStatus::OK) {
// TODO: handle ownBuffer failure
mBufferPool.handleOwnBuffer(connectionId, *bufferId);
}
mBufferPool.cleanUp();
scheduleEvictIfNeeded();
return status;
}
ResultStatus Accessor::Impl::fetch(
ConnectionId connectionId, TransactionId transactionId,
BufferId bufferId, const native_handle_t** handle) {
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
mBufferPool.processStatusMessages();
auto found = mBufferPool.mTransactions.find(transactionId);
if (found != mBufferPool.mTransactions.end() &&
contains(&mBufferPool.mPendingTransactions,
connectionId, transactionId)) {
if (found->second->mSenderValidated &&
found->second->mStatus == BufferStatus::TRANSFER_FROM &&
found->second->mBufferId == bufferId) {
found->second->mStatus = BufferStatus::TRANSFER_FETCH;
auto bufferIt = mBufferPool.mBuffers.find(bufferId);
if (bufferIt != mBufferPool.mBuffers.end()) {
mBufferPool.mStats.onBufferFetched();
*handle = bufferIt->second->handle();
return ResultStatus::OK;
}
}
}
mBufferPool.cleanUp();
scheduleEvictIfNeeded();
return ResultStatus::CRITICAL_ERROR;
}
void Accessor::Impl::cleanUp(bool clearCache) {
// transaction timeout, buffer cacheing TTL handling
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
mBufferPool.processStatusMessages();
mBufferPool.cleanUp(clearCache);
}
void Accessor::Impl::flush() {
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
mBufferPool.processStatusMessages();
mBufferPool.flush(shared_from_this());
}
void Accessor::Impl::handleInvalidateAck() {
std::map<ConnectionId, const sp<IObserver>> observers;
uint32_t invalidationId;
{
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
mBufferPool.processStatusMessages();
mBufferPool.mInvalidation.onHandleAck(&observers, &invalidationId);
}
// Do not hold lock for send invalidations
size_t deadClients = 0;
for (auto it = observers.begin(); it != observers.end(); ++it) {
const sp<IObserver> observer = it->second;
if (observer) {
Return<void> transResult = observer->onMessage(it->first, invalidationId);
if (!transResult.isOk()) {
++deadClients;
}
}
}
if (deadClients > 0) {
ALOGD("During invalidation found %zu dead clients", deadClients);
}
}
bool Accessor::Impl::isValid() {
return mBufferPool.isValid();
}
Accessor::Impl::Impl::BufferPool::BufferPool()
: mTimestampUs(getTimestampNow()),
mLastCleanUpUs(mTimestampUs),
mLastLogUs(mTimestampUs),
mSeq(0),
mStartSeq(0) {
mValid = mInvalidationChannel.isValid();
}
// Statistics helper
template<typename T, typename S>
int percentage(T base, S total) {
return int(total ? 0.5 + 100. * static_cast<S>(base) / total : 0);
}
std::atomic<std::uint32_t> Accessor::Impl::BufferPool::Invalidation::sInvSeqId(0);
Accessor::Impl::Impl::BufferPool::~BufferPool() {
std::lock_guard<std::mutex> lock(mMutex);
ALOGD("Destruction - bufferpool2 %p "
"cached: %zu/%zuM, %zu/%d%% in use; "
"allocs: %zu, %d%% recycled; "
"transfers: %zu, %d%% unfetched",
this, mStats.mBuffersCached, mStats.mSizeCached >> 20,
mStats.mBuffersInUse, percentage(mStats.mBuffersInUse, mStats.mBuffersCached),
mStats.mTotalAllocations, percentage(mStats.mTotalRecycles, mStats.mTotalAllocations),
mStats.mTotalTransfers,
percentage(mStats.mTotalTransfers - mStats.mTotalFetches, mStats.mTotalTransfers));
}
void Accessor::Impl::BufferPool::Invalidation::onConnect(
ConnectionId conId, const sp<IObserver>& observer) {
mAcks[conId] = mInvalidationId; // starts from current invalidationId
mObservers.insert(std::make_pair(conId, observer));
}
void Accessor::Impl::BufferPool::Invalidation::onClose(ConnectionId conId) {
mAcks.erase(conId);
mObservers.erase(conId);
}
void Accessor::Impl::BufferPool::Invalidation::onAck(
ConnectionId conId,
uint32_t msgId) {
auto it = mAcks.find(conId);
if (it == mAcks.end()) {
ALOGW("ACK from inconsistent connection! %lld", (long long)conId);
return;
}
if (isMessageLater(msgId, it->second)) {
mAcks[conId] = msgId;
}
}
void Accessor::Impl::BufferPool::Invalidation::onBufferInvalidated(
BufferId bufferId,
BufferInvalidationChannel &channel) {
for (auto it = mPendings.begin(); it != mPendings.end();) {
if (it->isInvalidated(bufferId)) {
uint32_t msgId = 0;
if (it->mNeedsAck) {
msgId = ++mInvalidationId;
if (msgId == 0) {
// wrap happens
msgId = ++mInvalidationId;
}
}
channel.postInvalidation(msgId, it->mFrom, it->mTo);
it = mPendings.erase(it);
continue;
}
++it;
}
}
void Accessor::Impl::BufferPool::Invalidation::onInvalidationRequest(
bool needsAck,
uint32_t from,
uint32_t to,
size_t left,
BufferInvalidationChannel &channel,
const std::shared_ptr<Accessor::Impl> &impl) {
uint32_t msgId = 0;
if (needsAck) {
msgId = ++mInvalidationId;
if (msgId == 0) {
// wrap happens
msgId = ++mInvalidationId;
}
}
ALOGV("bufferpool2 invalidation requested and queued");
if (left == 0) {
channel.postInvalidation(msgId, from, to);
} else {
// TODO: sending hint message?
ALOGV("bufferpoo2 invalidation requested and pending");
Pending pending(needsAck, from, to, left, impl);
mPendings.push_back(pending);
}
sInvalidator->addAccessor(mId, impl);
}
void Accessor::Impl::BufferPool::Invalidation::onHandleAck(
std::map<ConnectionId, const sp<IObserver>> *observers,
uint32_t *invalidationId) {
if (mInvalidationId != 0) {
*invalidationId = mInvalidationId;
std::set<int> deads;
for (auto it = mAcks.begin(); it != mAcks.end(); ++it) {
if (it->second != mInvalidationId) {
const sp<IObserver> observer = mObservers[it->first];
if (observer) {
observers->emplace(it->first, observer);
ALOGV("connection %lld will call observer (%u: %u)",
(long long)it->first, it->second, mInvalidationId);
// N.B: onMessage will be called later. ignore possibility of
// onMessage# oneway call being lost.
it->second = mInvalidationId;
} else {
ALOGV("bufferpool2 observer died %lld", (long long)it->first);
deads.insert(it->first);
}
}
}
if (deads.size() > 0) {
for (auto it = deads.begin(); it != deads.end(); ++it) {
onClose(*it);
}
}
}
if (mPendings.size() == 0) {
// All invalidation Ids are synced and no more pending invalidations.
sInvalidator->delAccessor(mId);
}
}
bool Accessor::Impl::BufferPool::handleOwnBuffer(
ConnectionId connectionId, BufferId bufferId) {
bool added = insert(&mUsingBuffers, connectionId, bufferId);
if (added) {
auto iter = mBuffers.find(bufferId);
iter->second->mOwnerCount++;
}
insert(&mUsingConnections, bufferId, connectionId);
return added;
}
bool Accessor::Impl::BufferPool::handleReleaseBuffer(
ConnectionId connectionId, BufferId bufferId) {
bool deleted = erase(&mUsingBuffers, connectionId, bufferId);
if (deleted) {
auto iter = mBuffers.find(bufferId);
iter->second->mOwnerCount--;
if (iter->second->mOwnerCount == 0 &&
iter->second->mTransactionCount == 0) {
if (!iter->second->mInvalidated) {
mStats.onBufferUnused(iter->second->mAllocSize);
mFreeBuffers.insert(bufferId);
} else {
mStats.onBufferUnused(iter->second->mAllocSize);
mStats.onBufferEvicted(iter->second->mAllocSize);
mBuffers.erase(iter);
mInvalidation.onBufferInvalidated(bufferId, mInvalidationChannel);
}
}
}
erase(&mUsingConnections, bufferId, connectionId);
ALOGV("release buffer %u : %d", bufferId, deleted);
return deleted;
}
bool Accessor::Impl::BufferPool::handleTransferTo(const BufferStatusMessage &message) {
auto completed = mCompletedTransactions.find(
message.transactionId);
if (completed != mCompletedTransactions.end()) {
// already completed
mCompletedTransactions.erase(completed);
return true;
}
// the buffer should exist and be owned.
auto bufferIter = mBuffers.find(message.bufferId);
if (bufferIter == mBuffers.end() ||
!contains(&mUsingBuffers, message.connectionId, message.bufferId)) {
return false;
}
auto found = mTransactions.find(message.transactionId);
if (found != mTransactions.end()) {
// transfer_from was received earlier.
found->second->mSender = message.connectionId;
found->second->mSenderValidated = true;
return true;
}
if (mConnectionIds.find(message.targetConnectionId) == mConnectionIds.end()) {
// N.B: it could be fake or receive connection already closed.
ALOGD("bufferpool2 %p receiver connection %lld is no longer valid",
this, (long long)message.targetConnectionId);
return false;
}
mStats.onBufferSent();
mTransactions.insert(std::make_pair(
message.transactionId,
std::make_unique<TransactionStatus>(message, mTimestampUs)));
insert(&mPendingTransactions, message.targetConnectionId,
message.transactionId);
bufferIter->second->mTransactionCount++;
return true;
}
bool Accessor::Impl::BufferPool::handleTransferFrom(const BufferStatusMessage &message) {
auto found = mTransactions.find(message.transactionId);
if (found == mTransactions.end()) {
// TODO: is it feasible to check ownership here?
mStats.onBufferSent();
mTransactions.insert(std::make_pair(
message.transactionId,
std::make_unique<TransactionStatus>(message, mTimestampUs)));
insert(&mPendingTransactions, message.connectionId,
message.transactionId);
auto bufferIter = mBuffers.find(message.bufferId);
bufferIter->second->mTransactionCount++;
} else {
if (message.connectionId == found->second->mReceiver) {
found->second->mStatus = BufferStatus::TRANSFER_FROM;
}
}
return true;
}
bool Accessor::Impl::BufferPool::handleTransferResult(const BufferStatusMessage &message) {
auto found = mTransactions.find(message.transactionId);
if (found != mTransactions.end()) {
bool deleted = erase(&mPendingTransactions, message.connectionId,
message.transactionId);
if (deleted) {
if (!found->second->mSenderValidated) {
mCompletedTransactions.insert(message.transactionId);
}
auto bufferIter = mBuffers.find(message.bufferId);
if (message.newStatus == BufferStatus::TRANSFER_OK) {
handleOwnBuffer(message.connectionId, message.bufferId);
}
bufferIter->second->mTransactionCount--;
if (bufferIter->second->mOwnerCount == 0
&& bufferIter->second->mTransactionCount == 0) {
if (!bufferIter->second->mInvalidated) {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mFreeBuffers.insert(message.bufferId);
} else {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mStats.onBufferEvicted(bufferIter->second->mAllocSize);
mBuffers.erase(bufferIter);
mInvalidation.onBufferInvalidated(message.bufferId, mInvalidationChannel);
}
}
mTransactions.erase(found);
}
ALOGV("transfer finished %llu %u - %d", (unsigned long long)message.transactionId,
message.bufferId, deleted);
return deleted;
}
ALOGV("transfer not found %llu %u", (unsigned long long)message.transactionId,
message.bufferId);
return false;
}
void Accessor::Impl::BufferPool::processStatusMessages() {
std::vector<BufferStatusMessage> messages;
mObserver.getBufferStatusChanges(messages);
mTimestampUs = getTimestampNow();
for (BufferStatusMessage& message: messages) {
bool ret = false;
switch (message.newStatus) {
case BufferStatus::NOT_USED:
ret = handleReleaseBuffer(
message.connectionId, message.bufferId);
break;
case BufferStatus::USED:
// not happening
break;
case BufferStatus::TRANSFER_TO:
ret = handleTransferTo(message);
break;
case BufferStatus::TRANSFER_FROM:
ret = handleTransferFrom(message);
break;
case BufferStatus::TRANSFER_TIMEOUT:
// TODO
break;
case BufferStatus::TRANSFER_LOST:
// TODO
break;
case BufferStatus::TRANSFER_FETCH:
// not happening
break;
case BufferStatus::TRANSFER_OK:
case BufferStatus::TRANSFER_ERROR:
ret = handleTransferResult(message);
break;
case BufferStatus::INVALIDATION_ACK:
mInvalidation.onAck(message.connectionId, message.bufferId);
ret = true;
break;
}
if (ret == false) {
ALOGW("buffer status message processing failure - message : %d connection : %lld",
message.newStatus, (long long)message.connectionId);
}
}
messages.clear();
}
bool Accessor::Impl::BufferPool::handleClose(ConnectionId connectionId) {
// Cleaning buffers
auto buffers = mUsingBuffers.find(connectionId);
if (buffers != mUsingBuffers.end()) {
for (const BufferId& bufferId : buffers->second) {
bool deleted = erase(&mUsingConnections, bufferId, connectionId);
if (deleted) {
auto bufferIter = mBuffers.find(bufferId);
bufferIter->second->mOwnerCount--;
if (bufferIter->second->mOwnerCount == 0 &&
bufferIter->second->mTransactionCount == 0) {
// TODO: handle freebuffer insert fail
if (!bufferIter->second->mInvalidated) {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mFreeBuffers.insert(bufferId);
} else {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mStats.onBufferEvicted(bufferIter->second->mAllocSize);
mBuffers.erase(bufferIter);
mInvalidation.onBufferInvalidated(bufferId, mInvalidationChannel);
}
}
}
}
mUsingBuffers.erase(buffers);
}
// Cleaning transactions
auto pending = mPendingTransactions.find(connectionId);
if (pending != mPendingTransactions.end()) {
for (const TransactionId& transactionId : pending->second) {
auto iter = mTransactions.find(transactionId);
if (iter != mTransactions.end()) {
if (!iter->second->mSenderValidated) {
mCompletedTransactions.insert(transactionId);
}
BufferId bufferId = iter->second->mBufferId;
auto bufferIter = mBuffers.find(bufferId);
bufferIter->second->mTransactionCount--;
if (bufferIter->second->mOwnerCount == 0 &&
bufferIter->second->mTransactionCount == 0) {
// TODO: handle freebuffer insert fail
if (!bufferIter->second->mInvalidated) {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mFreeBuffers.insert(bufferId);
} else {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mStats.onBufferEvicted(bufferIter->second->mAllocSize);
mBuffers.erase(bufferIter);
mInvalidation.onBufferInvalidated(bufferId, mInvalidationChannel);
}
}
mTransactions.erase(iter);
}
}
}
mConnectionIds.erase(connectionId);
return true;
}
bool Accessor::Impl::BufferPool::getFreeBuffer(
const std::shared_ptr<BufferPoolAllocator> &allocator,
const std::vector<uint8_t> &params, BufferId *pId,
const native_handle_t** handle) {
auto bufferIt = mFreeBuffers.begin();
for (;bufferIt != mFreeBuffers.end(); ++bufferIt) {
BufferId bufferId = *bufferIt;
if (allocator->compatible(params, mBuffers[bufferId]->mConfig)) {
break;
}
}
if (bufferIt != mFreeBuffers.end()) {
BufferId id = *bufferIt;
mFreeBuffers.erase(bufferIt);
mStats.onBufferRecycled(mBuffers[id]->mAllocSize);
*handle = mBuffers[id]->handle();
*pId = id;
ALOGV("recycle a buffer %u %p", id, *handle);
return true;
}
return false;
}
ResultStatus Accessor::Impl::BufferPool::addNewBuffer(
const std::shared_ptr<BufferPoolAllocation> &alloc,
const size_t allocSize,
const std::vector<uint8_t> &params,
BufferId *pId,
const native_handle_t** handle) {
BufferId bufferId = mSeq++;
if (mSeq == Connection::SYNC_BUFFERID) {
mSeq = 0;
}
std::unique_ptr<InternalBuffer> buffer =
std::make_unique<InternalBuffer>(
bufferId, alloc, allocSize, params);
if (buffer) {
auto res = mBuffers.insert(std::make_pair(
bufferId, std::move(buffer)));
if (res.second) {
mStats.onBufferAllocated(allocSize);
*handle = alloc->handle();
*pId = bufferId;
return ResultStatus::OK;
}
}
return ResultStatus::NO_MEMORY;
}
void Accessor::Impl::BufferPool::cleanUp(bool clearCache) {
if (clearCache || mTimestampUs > mLastCleanUpUs + kCleanUpDurationUs ||
mStats.buffersNotInUse() > kMaxUnusedBufferCount) {
mLastCleanUpUs = mTimestampUs;
if (mTimestampUs > mLastLogUs + kLogDurationUs ||
mStats.buffersNotInUse() > kMaxUnusedBufferCount) {
mLastLogUs = mTimestampUs;
ALOGD("bufferpool2 %p : %zu(%zu size) total buffers - "
"%zu(%zu size) used buffers - %zu/%zu (recycle/alloc) - "
"%zu/%zu (fetch/transfer)",
this, mStats.mBuffersCached, mStats.mSizeCached,
mStats.mBuffersInUse, mStats.mSizeInUse,
mStats.mTotalRecycles, mStats.mTotalAllocations,
mStats.mTotalFetches, mStats.mTotalTransfers);
}
for (auto freeIt = mFreeBuffers.begin(); freeIt != mFreeBuffers.end();) {
if (!clearCache && mStats.buffersNotInUse() <= kUnusedBufferCountTarget &&
(mStats.mSizeCached < kMinAllocBytesForEviction ||
mBuffers.size() < kMinBufferCountForEviction)) {
break;
}
auto it = mBuffers.find(*freeIt);
if (it != mBuffers.end() &&
it->second->mOwnerCount == 0 && it->second->mTransactionCount == 0) {
mStats.onBufferEvicted(it->second->mAllocSize);
mBuffers.erase(it);
freeIt = mFreeBuffers.erase(freeIt);
} else {
++freeIt;
ALOGW("bufferpool2 inconsistent!");
}
}
}
}
void Accessor::Impl::BufferPool::invalidate(
bool needsAck, BufferId from, BufferId to,
const std::shared_ptr<Accessor::Impl> &impl) {
for (auto freeIt = mFreeBuffers.begin(); freeIt != mFreeBuffers.end();) {
if (isBufferInRange(from, to, *freeIt)) {
auto it = mBuffers.find(*freeIt);
if (it != mBuffers.end() &&
it->second->mOwnerCount == 0 && it->second->mTransactionCount == 0) {
mStats.onBufferEvicted(it->second->mAllocSize);
mBuffers.erase(it);
freeIt = mFreeBuffers.erase(freeIt);
continue;
} else {
ALOGW("bufferpool2 inconsistent!");
}
}
++freeIt;
}
size_t left = 0;
for (auto it = mBuffers.begin(); it != mBuffers.end(); ++it) {
if (isBufferInRange(from, to, it->first)) {
it->second->invalidate();
++left;
}
}
mInvalidation.onInvalidationRequest(needsAck, from, to, left, mInvalidationChannel, impl);
}
void Accessor::Impl::BufferPool::flush(const std::shared_ptr<Accessor::Impl> &impl) {
BufferId from = mStartSeq;
BufferId to = mSeq;
mStartSeq = mSeq;
// TODO: needsAck params
ALOGV("buffer invalidation request bp:%u %u %u", mInvalidation.mId, from, to);
if (from != to) {
invalidate(true, from, to, impl);
}
}
void Accessor::Impl::invalidatorThread(
std::map<uint32_t, const std::weak_ptr<Accessor::Impl>> &accessors,
std::mutex &mutex,
std::condition_variable &cv,
bool &ready) {
constexpr uint32_t NUM_SPIN_TO_INCREASE_SLEEP = 1024;
constexpr uint32_t NUM_SPIN_TO_LOG = 1024*8;
constexpr useconds_t MAX_SLEEP_US = 10000;
uint32_t numSpin = 0;
useconds_t sleepUs = 1;
while(true) {
std::map<uint32_t, const std::weak_ptr<Accessor::Impl>> copied;
{
std::unique_lock<std::mutex> lock(mutex);
if (!ready) {
numSpin = 0;
sleepUs = 1;
cv.wait(lock);
}
copied.insert(accessors.begin(), accessors.end());
}
std::list<ConnectionId> erased;
for (auto it = copied.begin(); it != copied.end(); ++it) {
const std::shared_ptr<Accessor::Impl> impl = it->second.lock();
if (!impl) {
erased.push_back(it->first);
} else {
impl->handleInvalidateAck();
}
}
{
std::unique_lock<std::mutex> lock(mutex);
for (auto it = erased.begin(); it != erased.end(); ++it) {
accessors.erase(*it);
}
if (accessors.size() == 0) {
ready = false;
} else {
// TODO Use an efficient way to wait over FMQ.
// N.B. Since there is not a efficient way to wait over FMQ,
// polling over the FMQ is the current way to prevent draining
// CPU.
lock.unlock();
++numSpin;
if (numSpin % NUM_SPIN_TO_INCREASE_SLEEP == 0 &&
sleepUs < MAX_SLEEP_US) {
sleepUs *= 10;
}
if (numSpin % NUM_SPIN_TO_LOG == 0) {
ALOGW("invalidator thread spinning");
}
::usleep(sleepUs);
}
}
}
}
Accessor::Impl::AccessorInvalidator::AccessorInvalidator() : mReady(false) {
std::thread invalidator(
invalidatorThread,
std::ref(mAccessors),
std::ref(mMutex),
std::ref(mCv),
std::ref(mReady));
invalidator.detach();
}
void Accessor::Impl::AccessorInvalidator::addAccessor(
uint32_t accessorId, const std::weak_ptr<Accessor::Impl> &impl) {
bool notify = false;
std::unique_lock<std::mutex> lock(mMutex);
if (mAccessors.find(accessorId) == mAccessors.end()) {
if (!mReady) {
mReady = true;
notify = true;
}
mAccessors.insert(std::make_pair(accessorId, impl));
ALOGV("buffer invalidation added bp:%u %d", accessorId, notify);
}
lock.unlock();
if (notify) {
mCv.notify_one();
}
}
void Accessor::Impl::AccessorInvalidator::delAccessor(uint32_t accessorId) {
std::lock_guard<std::mutex> lock(mMutex);
mAccessors.erase(accessorId);
ALOGV("buffer invalidation deleted bp:%u", accessorId);
if (mAccessors.size() == 0) {
mReady = false;
}
}
std::unique_ptr<Accessor::Impl::AccessorInvalidator> Accessor::Impl::sInvalidator;
void Accessor::Impl::createInvalidator() {
if (!sInvalidator) {
sInvalidator = std::make_unique<Accessor::Impl::AccessorInvalidator>();
}
}
void Accessor::Impl::evictorThread(
std::map<const std::weak_ptr<Accessor::Impl>, nsecs_t, std::owner_less<>> &accessors,
std::mutex &mutex,
std::condition_variable &cv) {
std::list<const std::weak_ptr<Accessor::Impl>> evictList;
while (true) {
int expired = 0;
int evicted = 0;
{
nsecs_t now = systemTime();
std::unique_lock<std::mutex> lock(mutex);
if (accessors.size() == 0) {
cv.wait(lock);
}
auto it = accessors.begin();
while (it != accessors.end()) {
if (now > (it->second + kEvictDurationNs)) {
++expired;
evictList.push_back(it->first);
it = accessors.erase(it);
} else {
++it;
}
}
}
// evict idle accessors;
for (auto it = evictList.begin(); it != evictList.end(); ++it) {
const std::shared_ptr<Accessor::Impl> accessor = it->lock();
if (accessor) {
accessor->cleanUp(true);
++evicted;
}
}
if (expired > 0) {
ALOGD("evictor expired: %d, evicted: %d", expired, evicted);
}
evictList.clear();
::usleep(kEvictGranularityNs / 1000);
}
}
Accessor::Impl::AccessorEvictor::AccessorEvictor() {
std::thread evictor(
evictorThread,
std::ref(mAccessors),
std::ref(mMutex),
std::ref(mCv));
evictor.detach();
}
void Accessor::Impl::AccessorEvictor::addAccessor(
const std::weak_ptr<Accessor::Impl> &impl, nsecs_t ts) {
std::lock_guard<std::mutex> lock(mMutex);
bool notify = mAccessors.empty();
auto it = mAccessors.find(impl);
if (it == mAccessors.end()) {
mAccessors.emplace(impl, ts);
} else {
it->second = ts;
}
if (notify) {
mCv.notify_one();
}
}
std::unique_ptr<Accessor::Impl::AccessorEvictor> Accessor::Impl::sEvictor;
void Accessor::Impl::createEvictor() {
if (!sEvictor) {
sEvictor = std::make_unique<Accessor::Impl::AccessorEvictor>();
}
}
void Accessor::Impl::scheduleEvictIfNeeded() {
nsecs_t now = systemTime();
if (now > (mScheduleEvictTs + kEvictGranularityNs)) {
mScheduleEvictTs = now;
sEvictor->addAccessor(shared_from_this(), now);
}
}
} // namespace implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android

View file

@ -1,431 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_ACCESSORIMPL_H
#define ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_ACCESSORIMPL_H
#include <map>
#include <set>
#include <condition_variable>
#include <utils/Timers.h>
#include "Accessor.h"
namespace android {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
struct InternalBuffer;
struct TransactionStatus;
/**
* An implementation of a buffer pool accessor(or a buffer pool implementation.) */
class Accessor::Impl
: public std::enable_shared_from_this<Accessor::Impl> {
public:
Impl(const std::shared_ptr<BufferPoolAllocator> &allocator);
~Impl();
ResultStatus connect(
const sp<Accessor> &accessor, const sp<IObserver> &observer,
sp<Connection> *connection,
ConnectionId *pConnectionId,
uint32_t *pMsgId,
const StatusDescriptor** statusDescPtr,
const InvalidationDescriptor** invDescPtr);
ResultStatus close(ConnectionId connectionId);
ResultStatus allocate(ConnectionId connectionId,
const std::vector<uint8_t>& params,
BufferId *bufferId,
const native_handle_t** handle);
ResultStatus fetch(ConnectionId connectionId,
TransactionId transactionId,
BufferId bufferId,
const native_handle_t** handle);
void flush();
void cleanUp(bool clearCache);
bool isValid();
void handleInvalidateAck();
static void createInvalidator();
static void createEvictor();
private:
// ConnectionId = pid : (timestamp_created + seqId)
// in order to guarantee uniqueness for each connection
static uint32_t sSeqId;
const std::shared_ptr<BufferPoolAllocator> mAllocator;
nsecs_t mScheduleEvictTs;
/**
* Buffer pool implementation.
*
* Handles buffer status messages. Handles buffer allocation/recycling.
* Handles buffer transfer between buffer pool clients.
*/
struct BufferPool {
private:
std::mutex mMutex;
int64_t mTimestampUs;
int64_t mLastCleanUpUs;
int64_t mLastLogUs;
BufferId mSeq;
BufferId mStartSeq;
bool mValid;
BufferStatusObserver mObserver;
BufferInvalidationChannel mInvalidationChannel;
std::map<ConnectionId, std::set<BufferId>> mUsingBuffers;
std::map<BufferId, std::set<ConnectionId>> mUsingConnections;
std::map<ConnectionId, std::set<TransactionId>> mPendingTransactions;
// Transactions completed before TRANSFER_TO message arrival.
// Fetch does not occur for the transactions.
// Only transaction id is kept for the transactions in short duration.
std::set<TransactionId> mCompletedTransactions;
// Currently active(pending) transations' status & information.
std::map<TransactionId, std::unique_ptr<TransactionStatus>>
mTransactions;
std::map<BufferId, std::unique_ptr<InternalBuffer>> mBuffers;
std::set<BufferId> mFreeBuffers;
std::set<ConnectionId> mConnectionIds;
struct Invalidation {
static std::atomic<std::uint32_t> sInvSeqId;
struct Pending {
bool mNeedsAck;
uint32_t mFrom;
uint32_t mTo;
size_t mLeft;
const std::weak_ptr<Accessor::Impl> mImpl;
Pending(bool needsAck, uint32_t from, uint32_t to, size_t left,
const std::shared_ptr<Accessor::Impl> &impl)
: mNeedsAck(needsAck),
mFrom(from),
mTo(to),
mLeft(left),
mImpl(impl)
{}
bool isInvalidated(uint32_t bufferId) {
return isBufferInRange(mFrom, mTo, bufferId) && --mLeft == 0;
}
};
std::list<Pending> mPendings;
std::map<ConnectionId, uint32_t> mAcks;
std::map<ConnectionId, const sp<IObserver>> mObservers;
uint32_t mInvalidationId;
uint32_t mId;
Invalidation() : mInvalidationId(0), mId(sInvSeqId.fetch_add(1)) {}
void onConnect(ConnectionId conId, const sp<IObserver> &observer);
void onClose(ConnectionId conId);
void onAck(ConnectionId conId, uint32_t msgId);
void onBufferInvalidated(
BufferId bufferId,
BufferInvalidationChannel &channel);
void onInvalidationRequest(
bool needsAck, uint32_t from, uint32_t to, size_t left,
BufferInvalidationChannel &channel,
const std::shared_ptr<Accessor::Impl> &impl);
void onHandleAck(
std::map<ConnectionId, const sp<IObserver>> *observers,
uint32_t *invalidationId);
} mInvalidation;
/// Buffer pool statistics which tracks allocation and transfer statistics.
struct Stats {
/// Total size of allocations which are used or available to use.
/// (bytes or pixels)
size_t mSizeCached;
/// # of cached buffers which are used or available to use.
size_t mBuffersCached;
/// Total size of allocations which are currently used. (bytes or pixels)
size_t mSizeInUse;
/// # of currently used buffers
size_t mBuffersInUse;
/// # of allocations called on bufferpool. (# of fetched from BlockPool)
size_t mTotalAllocations;
/// # of allocations that were served from the cache.
/// (# of allocator alloc prevented)
size_t mTotalRecycles;
/// # of buffer transfers initiated.
size_t mTotalTransfers;
/// # of transfers that had to be fetched.
size_t mTotalFetches;
Stats()
: mSizeCached(0), mBuffersCached(0), mSizeInUse(0), mBuffersInUse(0),
mTotalAllocations(0), mTotalRecycles(0), mTotalTransfers(0), mTotalFetches(0) {}
/// # of currently unused buffers
size_t buffersNotInUse() const {
ALOG_ASSERT(mBuffersCached >= mBuffersInUse);
return mBuffersCached - mBuffersInUse;
}
/// A new buffer is allocated on an allocation request.
void onBufferAllocated(size_t allocSize) {
mSizeCached += allocSize;
mBuffersCached++;
mSizeInUse += allocSize;
mBuffersInUse++;
mTotalAllocations++;
}
/// A buffer is evicted and destroyed.
void onBufferEvicted(size_t allocSize) {
mSizeCached -= allocSize;
mBuffersCached--;
}
/// A buffer is recycled on an allocation request.
void onBufferRecycled(size_t allocSize) {
mSizeInUse += allocSize;
mBuffersInUse++;
mTotalAllocations++;
mTotalRecycles++;
}
/// A buffer is available to be recycled.
void onBufferUnused(size_t allocSize) {
mSizeInUse -= allocSize;
mBuffersInUse--;
}
/// A buffer transfer is initiated.
void onBufferSent() {
mTotalTransfers++;
}
/// A buffer fetch is invoked by a buffer transfer.
void onBufferFetched() {
mTotalFetches++;
}
} mStats;
bool isValid() {
return mValid;
}
void invalidate(bool needsAck, BufferId from, BufferId to,
const std::shared_ptr<Accessor::Impl> &impl);
static void createInvalidator();
public:
/** Creates a buffer pool. */
BufferPool();
/** Destroys a buffer pool. */
~BufferPool();
/**
* Processes all pending buffer status messages, and returns the result.
* Each status message is handled by methods with 'handle' prefix.
*/
void processStatusMessages();
/**
* Handles a buffer being owned by a connection.
*
* @param connectionId the id of the buffer owning connection.
* @param bufferId the id of the buffer.
*
* @return {@code true} when the buffer is owned,
* {@code false} otherwise.
*/
bool handleOwnBuffer(ConnectionId connectionId, BufferId bufferId);
/**
* Handles a buffer being released by a connection.
*
* @param connectionId the id of the buffer owning connection.
* @param bufferId the id of the buffer.
*
* @return {@code true} when the buffer ownership is released,
* {@code false} otherwise.
*/
bool handleReleaseBuffer(ConnectionId connectionId, BufferId bufferId);
/**
* Handles a transfer transaction start message from the sender.
*
* @param message a buffer status message for the transaction.
*
* @result {@code true} when transfer_to message is acknowledged,
* {@code false} otherwise.
*/
bool handleTransferTo(const BufferStatusMessage &message);
/**
* Handles a transfer transaction being acked by the receiver.
*
* @param message a buffer status message for the transaction.
*
* @result {@code true} when transfer_from message is acknowledged,
* {@code false} otherwise.
*/
bool handleTransferFrom(const BufferStatusMessage &message);
/**
* Handles a transfer transaction result message from the receiver.
*
* @param message a buffer status message for the transaction.
*
* @result {@code true} when the exisitng transaction is finished,
* {@code false} otherwise.
*/
bool handleTransferResult(const BufferStatusMessage &message);
/**
* Handles a connection being closed, and returns the result. All the
* buffers and transactions owned by the connection will be cleaned up.
* The related FMQ will be cleaned up too.
*
* @param connectionId the id of the connection.
*
* @result {@code true} when the connection existed,
* {@code false} otherwise.
*/
bool handleClose(ConnectionId connectionId);
/**
* Recycles a existing free buffer if it is possible.
*
* @param allocator the buffer allocator
* @param params the allocation parameters.
* @param pId the id of the recycled buffer.
* @param handle the native handle of the recycled buffer.
*
* @return {@code true} when a buffer is recycled, {@code false}
* otherwise.
*/
bool getFreeBuffer(
const std::shared_ptr<BufferPoolAllocator> &allocator,
const std::vector<uint8_t> &params,
BufferId *pId, const native_handle_t **handle);
/**
* Adds a newly allocated buffer to bufferpool.
*
* @param alloc the newly allocated buffer.
* @param allocSize the size of the newly allocated buffer.
* @param params the allocation parameters.
* @param pId the buffer id for the newly allocated buffer.
* @param handle the native handle for the newly allocated buffer.
*
* @return OK when an allocation is successfully allocated.
* NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise.
*/
ResultStatus addNewBuffer(
const std::shared_ptr<BufferPoolAllocation> &alloc,
const size_t allocSize,
const std::vector<uint8_t> &params,
BufferId *pId,
const native_handle_t **handle);
/**
* Processes pending buffer status messages and performs periodic cache
* cleaning.
*
* @param clearCache if clearCache is true, it frees all buffers
* waiting to be recycled.
*/
void cleanUp(bool clearCache = false);
/**
* Processes pending buffer status messages and invalidate all current
* free buffers. Active buffers are invalidated after being inactive.
*/
void flush(const std::shared_ptr<Accessor::Impl> &impl);
friend class Accessor::Impl;
} mBufferPool;
struct AccessorInvalidator {
std::map<uint32_t, const std::weak_ptr<Accessor::Impl>> mAccessors;
std::mutex mMutex;
std::condition_variable mCv;
bool mReady;
AccessorInvalidator();
void addAccessor(uint32_t accessorId, const std::weak_ptr<Accessor::Impl> &impl);
void delAccessor(uint32_t accessorId);
};
static std::unique_ptr<AccessorInvalidator> sInvalidator;
static void invalidatorThread(
std::map<uint32_t, const std::weak_ptr<Accessor::Impl>> &accessors,
std::mutex &mutex,
std::condition_variable &cv,
bool &ready);
struct AccessorEvictor {
std::map<const std::weak_ptr<Accessor::Impl>, nsecs_t, std::owner_less<>> mAccessors;
std::mutex mMutex;
std::condition_variable mCv;
AccessorEvictor();
void addAccessor(const std::weak_ptr<Accessor::Impl> &impl, nsecs_t ts);
};
static std::unique_ptr<AccessorEvictor> sEvictor;
static void evictorThread(
std::map<const std::weak_ptr<Accessor::Impl>, nsecs_t, std::owner_less<>> &accessors,
std::mutex &mutex,
std::condition_variable &cv);
void scheduleEvictIfNeeded();
};
} // namespace implementation
} // namespace V2_0
} // namespace ufferpool
} // namespace media
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_ACCESSORIMPL_H

View file

@ -0,0 +1,50 @@
package {
// See: http://go/android-license-faq
// A large-scale-change added 'default_applicable_licenses' to import
// all of the 'license_kinds' from "frameworks_av_license"
// to get the below license kinds:
// SPDX-license-identifier-Apache-2.0
default_applicable_licenses: ["hardware_interfaces_license"],
}
cc_library {
name: "libstagefright_aidl_bufferpool2",
vendor_available: true,
min_sdk_version: "29",
apex_available: [
"//apex_available:platform",
"com.android.media.swcodec",
"test_com.android.media.swcodec",
],
srcs: [
"Accessor.cpp",
"BufferPool.cpp",
"BufferPoolClient.cpp",
"BufferStatus.cpp",
"ClientManager.cpp",
"Connection.cpp",
"Observer.cpp",
],
export_include_dirs: [
"include",
],
shared_libs: [
"libbinder_ndk",
"libcutils",
"libfmq",
"liblog",
"libutils",
"android.hardware.media.bufferpool2-V1-ndk",
],
static_libs: [
"libaidlcommonsupport",
],
export_shared_lib_headers: [
"libfmq",
"android.hardware.media.bufferpool2-V1-ndk",
],
double_loadable: true,
cflags: [
"-DBUFFERPOOL_CLONE_HANDLES",
],
}

View file

@ -0,0 +1,540 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "AidlBufferPool"
//#define LOG_NDEBUG 0
#include <sys/types.h>
#include <stdint.h>
#include <time.h>
#include <unistd.h>
#include <utils/Log.h>
#include <thread>
#include "Accessor.h"
#include "BufferPool.h"
#include "Connection.h"
#include "DataHelper.h"
namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace {
static constexpr int64_t kCleanUpDurationMs = 500; // 0.5 sec
static constexpr int64_t kLogDurationMs = 5000; // 5 secs
static constexpr size_t kMinAllocBytesForEviction = 1024*1024*15;
static constexpr size_t kMinBufferCountForEviction = 25;
static constexpr size_t kMaxUnusedBufferCount = 64;
static constexpr size_t kUnusedBufferCountTarget = kMaxUnusedBufferCount - 16;
}
BufferPool::BufferPool()
: mTimestampMs(::android::elapsedRealtime()),
mLastCleanUpMs(mTimestampMs),
mLastLogMs(mTimestampMs),
mSeq(0),
mStartSeq(0) {
mValid = mInvalidationChannel.isValid();
}
// Statistics helper
template<typename T, typename S>
int percentage(T base, S total) {
return int(total ? 0.5 + 100. * static_cast<S>(base) / total : 0);
}
std::atomic<std::uint32_t> BufferPool::Invalidation::sInvSeqId(0);
BufferPool::~BufferPool() {
std::lock_guard<std::mutex> lock(mMutex);
ALOGD("Destruction - bufferpool2 %p "
"cached: %zu/%zuM, %zu/%d%% in use; "
"allocs: %zu, %d%% recycled; "
"transfers: %zu, %d%% unfetched",
this, mStats.mBuffersCached, mStats.mSizeCached >> 20,
mStats.mBuffersInUse, percentage(mStats.mBuffersInUse, mStats.mBuffersCached),
mStats.mTotalAllocations, percentage(mStats.mTotalRecycles, mStats.mTotalAllocations),
mStats.mTotalTransfers,
percentage(mStats.mTotalTransfers - mStats.mTotalFetches, mStats.mTotalTransfers));
}
void BufferPool::Invalidation::onConnect(
ConnectionId conId, const std::shared_ptr<IObserver>& observer) {
mAcks[conId] = mInvalidationId; // starts from current invalidationId
mObservers.insert(std::make_pair(conId, observer));
}
void BufferPool::Invalidation::onClose(ConnectionId conId) {
mAcks.erase(conId);
mObservers.erase(conId);
}
void BufferPool::Invalidation::onAck(
ConnectionId conId,
uint32_t msgId) {
auto it = mAcks.find(conId);
if (it == mAcks.end()) {
ALOGW("ACK from inconsistent connection! %lld", (long long)conId);
return;
}
if (isMessageLater(msgId, it->second)) {
mAcks[conId] = msgId;
}
}
void BufferPool::Invalidation::onBufferInvalidated(
BufferId bufferId,
BufferInvalidationChannel &channel) {
for (auto it = mPendings.begin(); it != mPendings.end();) {
if (it->isInvalidated(bufferId)) {
uint32_t msgId = 0;
if (it->mNeedsAck) {
msgId = ++mInvalidationId;
if (msgId == 0) {
// wrap happens
msgId = ++mInvalidationId;
}
}
channel.postInvalidation(msgId, it->mFrom, it->mTo);
it = mPendings.erase(it);
continue;
}
++it;
}
}
void BufferPool::Invalidation::onInvalidationRequest(
bool needsAck,
uint32_t from,
uint32_t to,
size_t left,
BufferInvalidationChannel &channel,
const std::shared_ptr<Accessor> &impl) {
uint32_t msgId = 0;
if (needsAck) {
msgId = ++mInvalidationId;
if (msgId == 0) {
// wrap happens
msgId = ++mInvalidationId;
}
}
ALOGV("bufferpool2 invalidation requested and queued");
if (left == 0) {
channel.postInvalidation(msgId, from, to);
} else {
ALOGV("bufferpoo2 invalidation requested and pending");
Pending pending(needsAck, from, to, left, impl);
mPendings.push_back(pending);
}
Accessor::sInvalidator->addAccessor(mId, impl);
}
void BufferPool::Invalidation::onHandleAck(
std::map<ConnectionId, const std::shared_ptr<IObserver>> *observers,
uint32_t *invalidationId) {
if (mInvalidationId != 0) {
*invalidationId = mInvalidationId;
std::set<int> deads;
for (auto it = mAcks.begin(); it != mAcks.end(); ++it) {
if (it->second != mInvalidationId) {
const std::shared_ptr<IObserver> observer = mObservers[it->first];
if (observer) {
observers->emplace(it->first, observer);
ALOGV("connection %lld will call observer (%u: %u)",
(long long)it->first, it->second, mInvalidationId);
// N.B: onMessage will be called later. ignore possibility of
// onMessage# oneway call being lost.
it->second = mInvalidationId;
} else {
ALOGV("bufferpool2 observer died %lld", (long long)it->first);
deads.insert(it->first);
}
}
}
if (deads.size() > 0) {
for (auto it = deads.begin(); it != deads.end(); ++it) {
onClose(*it);
}
}
}
if (mPendings.size() == 0) {
// All invalidation Ids are synced and no more pending invalidations.
Accessor::sInvalidator->delAccessor(mId);
}
}
bool BufferPool::handleOwnBuffer(
ConnectionId connectionId, BufferId bufferId) {
bool added = insert(&mUsingBuffers, connectionId, bufferId);
if (added) {
auto iter = mBuffers.find(bufferId);
iter->second->mOwnerCount++;
}
insert(&mUsingConnections, bufferId, connectionId);
return added;
}
bool BufferPool::handleReleaseBuffer(
ConnectionId connectionId, BufferId bufferId) {
bool deleted = erase(&mUsingBuffers, connectionId, bufferId);
if (deleted) {
auto iter = mBuffers.find(bufferId);
iter->second->mOwnerCount--;
if (iter->second->mOwnerCount == 0 &&
iter->second->mTransactionCount == 0) {
if (!iter->second->mInvalidated) {
mStats.onBufferUnused(iter->second->mAllocSize);
mFreeBuffers.insert(bufferId);
} else {
mStats.onBufferUnused(iter->second->mAllocSize);
mStats.onBufferEvicted(iter->second->mAllocSize);
mBuffers.erase(iter);
mInvalidation.onBufferInvalidated(bufferId, mInvalidationChannel);
}
}
}
erase(&mUsingConnections, bufferId, connectionId);
ALOGV("release buffer %u : %d", bufferId, deleted);
return deleted;
}
bool BufferPool::handleTransferTo(const BufferStatusMessage &message) {
auto completed = mCompletedTransactions.find(
message.transactionId);
if (completed != mCompletedTransactions.end()) {
// already completed
mCompletedTransactions.erase(completed);
return true;
}
// the buffer should exist and be owned.
auto bufferIter = mBuffers.find(message.bufferId);
if (bufferIter == mBuffers.end() ||
!contains(&mUsingBuffers, message.connectionId, FromAidl(message.bufferId))) {
return false;
}
auto found = mTransactions.find(message.transactionId);
if (found != mTransactions.end()) {
// transfer_from was received earlier.
found->second->mSender = message.connectionId;
found->second->mSenderValidated = true;
return true;
}
if (mConnectionIds.find(message.targetConnectionId) == mConnectionIds.end()) {
// N.B: it could be fake or receive connection already closed.
ALOGD("bufferpool2 %p receiver connection %lld is no longer valid",
this, (long long)message.targetConnectionId);
return false;
}
mStats.onBufferSent();
mTransactions.insert(std::make_pair(
message.transactionId,
std::make_unique<TransactionStatus>(message, mTimestampMs)));
insert(&mPendingTransactions, message.targetConnectionId,
FromAidl(message.transactionId));
bufferIter->second->mTransactionCount++;
return true;
}
bool BufferPool::handleTransferFrom(const BufferStatusMessage &message) {
auto found = mTransactions.find(message.transactionId);
if (found == mTransactions.end()) {
// TODO: is it feasible to check ownership here?
mStats.onBufferSent();
mTransactions.insert(std::make_pair(
message.transactionId,
std::make_unique<TransactionStatus>(message, mTimestampMs)));
insert(&mPendingTransactions, message.connectionId,
FromAidl(message.transactionId));
auto bufferIter = mBuffers.find(message.bufferId);
bufferIter->second->mTransactionCount++;
} else {
if (message.connectionId == found->second->mReceiver) {
found->second->mStatus = BufferStatus::TRANSFER_FROM;
}
}
return true;
}
bool BufferPool::handleTransferResult(const BufferStatusMessage &message) {
auto found = mTransactions.find(message.transactionId);
if (found != mTransactions.end()) {
bool deleted = erase(&mPendingTransactions, message.connectionId,
FromAidl(message.transactionId));
if (deleted) {
if (!found->second->mSenderValidated) {
mCompletedTransactions.insert(message.transactionId);
}
auto bufferIter = mBuffers.find(message.bufferId);
if (message.status == BufferStatus::TRANSFER_OK) {
handleOwnBuffer(message.connectionId, message.bufferId);
}
bufferIter->second->mTransactionCount--;
if (bufferIter->second->mOwnerCount == 0
&& bufferIter->second->mTransactionCount == 0) {
if (!bufferIter->second->mInvalidated) {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mFreeBuffers.insert(message.bufferId);
} else {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mStats.onBufferEvicted(bufferIter->second->mAllocSize);
mBuffers.erase(bufferIter);
mInvalidation.onBufferInvalidated(message.bufferId, mInvalidationChannel);
}
}
mTransactions.erase(found);
}
ALOGV("transfer finished %llu %u - %d", (unsigned long long)message.transactionId,
message.bufferId, deleted);
return deleted;
}
ALOGV("transfer not found %llu %u", (unsigned long long)message.transactionId,
message.bufferId);
return false;
}
void BufferPool::processStatusMessages() {
std::vector<BufferStatusMessage> messages;
mObserver.getBufferStatusChanges(messages);
mTimestampMs = ::android::elapsedRealtime();
for (BufferStatusMessage& message: messages) {
bool ret = false;
switch (message.status) {
case BufferStatus::NOT_USED:
ret = handleReleaseBuffer(
message.connectionId, message.bufferId);
break;
case BufferStatus::USED:
// not happening
break;
case BufferStatus::TRANSFER_TO:
ret = handleTransferTo(message);
break;
case BufferStatus::TRANSFER_FROM:
ret = handleTransferFrom(message);
break;
case BufferStatus::TRANSFER_TIMEOUT:
// TODO
break;
case BufferStatus::TRANSFER_LOST:
// TODO
break;
case BufferStatus::TRANSFER_FETCH:
// not happening
break;
case BufferStatus::TRANSFER_OK:
case BufferStatus::TRANSFER_ERROR:
ret = handleTransferResult(message);
break;
case BufferStatus::INVALIDATION_ACK:
mInvalidation.onAck(message.connectionId, message.bufferId);
ret = true;
break;
}
if (ret == false) {
ALOGW("buffer status message processing failure - message : %d connection : %lld",
message.status, (long long)message.connectionId);
}
}
messages.clear();
}
bool BufferPool::handleClose(ConnectionId connectionId) {
// Cleaning buffers
auto buffers = mUsingBuffers.find(connectionId);
if (buffers != mUsingBuffers.end()) {
for (const BufferId& bufferId : buffers->second) {
bool deleted = erase(&mUsingConnections, bufferId, connectionId);
if (deleted) {
auto bufferIter = mBuffers.find(bufferId);
bufferIter->second->mOwnerCount--;
if (bufferIter->second->mOwnerCount == 0 &&
bufferIter->second->mTransactionCount == 0) {
// TODO: handle freebuffer insert fail
if (!bufferIter->second->mInvalidated) {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mFreeBuffers.insert(bufferId);
} else {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mStats.onBufferEvicted(bufferIter->second->mAllocSize);
mBuffers.erase(bufferIter);
mInvalidation.onBufferInvalidated(bufferId, mInvalidationChannel);
}
}
}
}
mUsingBuffers.erase(buffers);
}
// Cleaning transactions
auto pending = mPendingTransactions.find(connectionId);
if (pending != mPendingTransactions.end()) {
for (const TransactionId& transactionId : pending->second) {
auto iter = mTransactions.find(transactionId);
if (iter != mTransactions.end()) {
if (!iter->second->mSenderValidated) {
mCompletedTransactions.insert(transactionId);
}
BufferId bufferId = iter->second->mBufferId;
auto bufferIter = mBuffers.find(bufferId);
bufferIter->second->mTransactionCount--;
if (bufferIter->second->mOwnerCount == 0 &&
bufferIter->second->mTransactionCount == 0) {
// TODO: handle freebuffer insert fail
if (!bufferIter->second->mInvalidated) {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mFreeBuffers.insert(bufferId);
} else {
mStats.onBufferUnused(bufferIter->second->mAllocSize);
mStats.onBufferEvicted(bufferIter->second->mAllocSize);
mBuffers.erase(bufferIter);
mInvalidation.onBufferInvalidated(bufferId, mInvalidationChannel);
}
}
mTransactions.erase(iter);
}
}
}
mConnectionIds.erase(connectionId);
return true;
}
bool BufferPool::getFreeBuffer(
const std::shared_ptr<BufferPoolAllocator> &allocator,
const std::vector<uint8_t> &params, BufferId *pId,
const native_handle_t** handle) {
auto bufferIt = mFreeBuffers.begin();
for (;bufferIt != mFreeBuffers.end(); ++bufferIt) {
BufferId bufferId = *bufferIt;
if (allocator->compatible(params, mBuffers[bufferId]->mConfig)) {
break;
}
}
if (bufferIt != mFreeBuffers.end()) {
BufferId id = *bufferIt;
mFreeBuffers.erase(bufferIt);
mStats.onBufferRecycled(mBuffers[id]->mAllocSize);
*handle = mBuffers[id]->handle();
*pId = id;
ALOGV("recycle a buffer %u %p", id, *handle);
return true;
}
return false;
}
BufferPoolStatus BufferPool::addNewBuffer(
const std::shared_ptr<BufferPoolAllocation> &alloc,
const size_t allocSize,
const std::vector<uint8_t> &params,
BufferId *pId,
const native_handle_t** handle) {
BufferId bufferId = mSeq++;
if (mSeq == Connection::SYNC_BUFFERID) {
mSeq = 0;
}
std::unique_ptr<InternalBuffer> buffer =
std::make_unique<InternalBuffer>(
bufferId, alloc, allocSize, params);
if (buffer) {
auto res = mBuffers.insert(std::make_pair(
bufferId, std::move(buffer)));
if (res.second) {
mStats.onBufferAllocated(allocSize);
*handle = alloc->handle();
*pId = bufferId;
return ResultStatus::OK;
}
}
return ResultStatus::NO_MEMORY;
}
void BufferPool::cleanUp(bool clearCache) {
if (clearCache || mTimestampMs > mLastCleanUpMs + kCleanUpDurationMs ||
mStats.buffersNotInUse() > kMaxUnusedBufferCount) {
mLastCleanUpMs = mTimestampMs;
if (mTimestampMs > mLastLogMs + kLogDurationMs ||
mStats.buffersNotInUse() > kMaxUnusedBufferCount) {
mLastLogMs = mTimestampMs;
ALOGD("bufferpool2 %p : %zu(%zu size) total buffers - "
"%zu(%zu size) used buffers - %zu/%zu (recycle/alloc) - "
"%zu/%zu (fetch/transfer)",
this, mStats.mBuffersCached, mStats.mSizeCached,
mStats.mBuffersInUse, mStats.mSizeInUse,
mStats.mTotalRecycles, mStats.mTotalAllocations,
mStats.mTotalFetches, mStats.mTotalTransfers);
}
for (auto freeIt = mFreeBuffers.begin(); freeIt != mFreeBuffers.end();) {
if (!clearCache && mStats.buffersNotInUse() <= kUnusedBufferCountTarget &&
(mStats.mSizeCached < kMinAllocBytesForEviction ||
mBuffers.size() < kMinBufferCountForEviction)) {
break;
}
auto it = mBuffers.find(*freeIt);
if (it != mBuffers.end() &&
it->second->mOwnerCount == 0 && it->second->mTransactionCount == 0) {
mStats.onBufferEvicted(it->second->mAllocSize);
mBuffers.erase(it);
freeIt = mFreeBuffers.erase(freeIt);
} else {
++freeIt;
ALOGW("bufferpool2 inconsistent!");
}
}
}
}
void BufferPool::invalidate(
bool needsAck, BufferId from, BufferId to,
const std::shared_ptr<Accessor> &impl) {
for (auto freeIt = mFreeBuffers.begin(); freeIt != mFreeBuffers.end();) {
if (isBufferInRange(from, to, *freeIt)) {
auto it = mBuffers.find(*freeIt);
if (it != mBuffers.end() &&
it->second->mOwnerCount == 0 && it->second->mTransactionCount == 0) {
mStats.onBufferEvicted(it->second->mAllocSize);
mBuffers.erase(it);
freeIt = mFreeBuffers.erase(freeIt);
continue;
} else {
ALOGW("bufferpool2 inconsistent!");
}
}
++freeIt;
}
size_t left = 0;
for (auto it = mBuffers.begin(); it != mBuffers.end(); ++it) {
if (isBufferInRange(from, to, it->first)) {
it->second->invalidate();
++left;
}
}
mInvalidation.onInvalidationRequest(needsAck, from, to, left, mInvalidationChannel, impl);
}
void BufferPool::flush(const std::shared_ptr<Accessor> &impl) {
BufferId from = mStartSeq;
BufferId to = mSeq;
mStartSeq = mSeq;
// TODO: needsAck params
ALOGV("buffer invalidation request bp:%u %u %u", mInvalidation.mId, from, to);
if (from != to) {
invalidate(true, from, to, impl);
}
}
} // namespace aidl::android::hardware::media::bufferpool2::implementation

View file

@ -0,0 +1,337 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <map>
#include <set>
#include <vector>
#include <mutex>
#include <condition_variable>
#include <utils/Timers.h>
#include "BufferStatus.h"
namespace aidl::android::hardware::media::bufferpool2::implementation {
using BufferStatus = aidl::android::hardware::media::bufferpool2::BufferStatus;
using BufferStatusMessage = aidl::android::hardware::media::bufferpool2::BufferStatusMessage;
struct Accessor;
struct InternalBuffer;
struct TransactionStatus;
/**
* Buffer pool implementation.
*
* Handles buffer status messages. Handles buffer allocation/recycling.
* Handles buffer transfer between buffer pool clients.
*/
struct BufferPool {
private:
std::mutex mMutex;
int64_t mTimestampMs;
int64_t mLastCleanUpMs;
int64_t mLastLogMs;
BufferId mSeq;
BufferId mStartSeq;
bool mValid;
BufferStatusObserver mObserver;
BufferInvalidationChannel mInvalidationChannel;
std::map<ConnectionId, std::set<BufferId>> mUsingBuffers;
std::map<BufferId, std::set<ConnectionId>> mUsingConnections;
std::map<ConnectionId, std::set<TransactionId>> mPendingTransactions;
// Transactions completed before TRANSFER_TO message arrival.
// Fetch does not occur for the transactions.
// Only transaction id is kept for the transactions in short duration.
std::set<TransactionId> mCompletedTransactions;
// Currently active(pending) transations' status & information.
std::map<TransactionId, std::unique_ptr<TransactionStatus>>
mTransactions;
std::map<BufferId, std::unique_ptr<InternalBuffer>> mBuffers;
std::set<BufferId> mFreeBuffers;
std::set<ConnectionId> mConnectionIds;
struct Invalidation {
static std::atomic<std::uint32_t> sInvSeqId;
struct Pending {
bool mNeedsAck;
uint32_t mFrom;
uint32_t mTo;
size_t mLeft;
const std::weak_ptr<Accessor> mImpl;
Pending(bool needsAck, uint32_t from, uint32_t to, size_t left,
const std::shared_ptr<Accessor> &impl)
: mNeedsAck(needsAck),
mFrom(from),
mTo(to),
mLeft(left),
mImpl(impl)
{}
bool isInvalidated(uint32_t bufferId) {
return isBufferInRange(mFrom, mTo, bufferId) && --mLeft == 0;
}
};
std::list<Pending> mPendings;
std::map<ConnectionId, uint32_t> mAcks;
std::map<ConnectionId, const std::shared_ptr<IObserver>> mObservers;
uint32_t mInvalidationId;
uint32_t mId;
Invalidation() : mInvalidationId(0), mId(sInvSeqId.fetch_add(1)) {}
void onConnect(ConnectionId conId, const std::shared_ptr<IObserver> &observer);
void onClose(ConnectionId conId);
void onAck(ConnectionId conId, uint32_t msgId);
void onBufferInvalidated(
BufferId bufferId,
BufferInvalidationChannel &channel);
void onInvalidationRequest(
bool needsAck, uint32_t from, uint32_t to, size_t left,
BufferInvalidationChannel &channel,
const std::shared_ptr<Accessor> &impl);
void onHandleAck(
std::map<ConnectionId, const std::shared_ptr<IObserver>> *observers,
uint32_t *invalidationId);
} mInvalidation;
/// Buffer pool statistics which tracks allocation and transfer statistics.
struct Stats {
/// Total size of allocations which are used or available to use.
/// (bytes or pixels)
size_t mSizeCached;
/// # of cached buffers which are used or available to use.
size_t mBuffersCached;
/// Total size of allocations which are currently used. (bytes or pixels)
size_t mSizeInUse;
/// # of currently used buffers
size_t mBuffersInUse;
/// # of allocations called on bufferpool. (# of fetched from BlockPool)
size_t mTotalAllocations;
/// # of allocations that were served from the cache.
/// (# of allocator alloc prevented)
size_t mTotalRecycles;
/// # of buffer transfers initiated.
size_t mTotalTransfers;
/// # of transfers that had to be fetched.
size_t mTotalFetches;
Stats()
: mSizeCached(0), mBuffersCached(0), mSizeInUse(0), mBuffersInUse(0),
mTotalAllocations(0), mTotalRecycles(0), mTotalTransfers(0), mTotalFetches(0) {}
/// # of currently unused buffers
size_t buffersNotInUse() const {
ALOG_ASSERT(mBuffersCached >= mBuffersInUse);
return mBuffersCached - mBuffersInUse;
}
/// A new buffer is allocated on an allocation request.
void onBufferAllocated(size_t allocSize) {
mSizeCached += allocSize;
mBuffersCached++;
mSizeInUse += allocSize;
mBuffersInUse++;
mTotalAllocations++;
}
/// A buffer is evicted and destroyed.
void onBufferEvicted(size_t allocSize) {
mSizeCached -= allocSize;
mBuffersCached--;
}
/// A buffer is recycled on an allocation request.
void onBufferRecycled(size_t allocSize) {
mSizeInUse += allocSize;
mBuffersInUse++;
mTotalAllocations++;
mTotalRecycles++;
}
/// A buffer is available to be recycled.
void onBufferUnused(size_t allocSize) {
mSizeInUse -= allocSize;
mBuffersInUse--;
}
/// A buffer transfer is initiated.
void onBufferSent() {
mTotalTransfers++;
}
/// A buffer fetch is invoked by a buffer transfer.
void onBufferFetched() {
mTotalFetches++;
}
} mStats;
bool isValid() {
return mValid;
}
void invalidate(bool needsAck, BufferId from, BufferId to,
const std::shared_ptr<Accessor> &impl);
static void createInvalidator();
public:
/** Creates a buffer pool. */
BufferPool();
/** Destroys a buffer pool. */
~BufferPool();
/**
* Processes all pending buffer status messages, and returns the result.
* Each status message is handled by methods with 'handle' prefix.
*/
void processStatusMessages();
/**
* Handles a buffer being owned by a connection.
*
* @param connectionId the id of the buffer owning connection.
* @param bufferId the id of the buffer.
*
* @return {@code true} when the buffer is owned,
* {@code false} otherwise.
*/
bool handleOwnBuffer(ConnectionId connectionId, BufferId bufferId);
/**
* Handles a buffer being released by a connection.
*
* @param connectionId the id of the buffer owning connection.
* @param bufferId the id of the buffer.
*
* @return {@code true} when the buffer ownership is released,
* {@code false} otherwise.
*/
bool handleReleaseBuffer(ConnectionId connectionId, BufferId bufferId);
/**
* Handles a transfer transaction start message from the sender.
*
* @param message a buffer status message for the transaction.
*
* @result {@code true} when transfer_to message is acknowledged,
* {@code false} otherwise.
*/
bool handleTransferTo(const BufferStatusMessage &message);
/**
* Handles a transfer transaction being acked by the receiver.
*
* @param message a buffer status message for the transaction.
*
* @result {@code true} when transfer_from message is acknowledged,
* {@code false} otherwise.
*/
bool handleTransferFrom(const BufferStatusMessage &message);
/**
* Handles a transfer transaction result message from the receiver.
*
* @param message a buffer status message for the transaction.
*
* @result {@code true} when the existing transaction is finished,
* {@code false} otherwise.
*/
bool handleTransferResult(const BufferStatusMessage &message);
/**
* Handles a connection being closed, and returns the result. All the
* buffers and transactions owned by the connection will be cleaned up.
* The related FMQ will be cleaned up too.
*
* @param connectionId the id of the connection.
*
* @result {@code true} when the connection existed,
* {@code false} otherwise.
*/
bool handleClose(ConnectionId connectionId);
/**
* Recycles a existing free buffer if it is possible.
*
* @param allocator the buffer allocator
* @param params the allocation parameters.
* @param pId the id of the recycled buffer.
* @param handle the native handle of the recycled buffer.
*
* @return {@code true} when a buffer is recycled, {@code false}
* otherwise.
*/
bool getFreeBuffer(
const std::shared_ptr<BufferPoolAllocator> &allocator,
const std::vector<uint8_t> &params,
BufferId *pId, const native_handle_t **handle);
/**
* Adds a newly allocated buffer to bufferpool.
*
* @param alloc the newly allocated buffer.
* @param allocSize the size of the newly allocated buffer.
* @param params the allocation parameters.
* @param pId the buffer id for the newly allocated buffer.
* @param handle the native handle for the newly allocated buffer.
*
* @return OK when an allocation is successfully allocated.
* NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise.
*/
BufferPoolStatus addNewBuffer(
const std::shared_ptr<BufferPoolAllocation> &alloc,
const size_t allocSize,
const std::vector<uint8_t> &params,
BufferId *pId,
const native_handle_t **handle);
/**
* Processes pending buffer status messages and performs periodic cache
* cleaning.
*
* @param clearCache if clearCache is true, it frees all buffers
* waiting to be recycled.
*/
void cleanUp(bool clearCache = false);
/**
* Processes pending buffer status messages and invalidate all current
* free buffers. Active buffers are invalidated after being inactive.
*/
void flush(const std::shared_ptr<Accessor> &impl);
friend struct Accessor;
};
} // namespace aidl::android::hardware::media::bufferpool2::implementation

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,33 +14,37 @@
* limitations under the License. * limitations under the License.
*/ */
#define LOG_TAG "BufferPoolClient" #define LOG_TAG "AidlBufferPoolCli"
//#define LOG_NDEBUG 0 //#define LOG_NDEBUG 0
#include <thread> #include <thread>
#include <aidlcommonsupport/NativeHandle.h>
#include <utils/Log.h> #include <utils/Log.h>
#include "BufferPoolClient.h" #include "BufferPoolClient.h"
#include "Accessor.h"
#include "Connection.h" #include "Connection.h"
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
static constexpr int64_t kReceiveTimeoutUs = 2000000; // 2s using aidl::android::hardware::media::bufferpool2::IConnection;
using aidl::android::hardware::media::bufferpool2::ResultStatus;
using FetchInfo = aidl::android::hardware::media::bufferpool2::IConnection::FetchInfo;
using FetchResult = aidl::android::hardware::media::bufferpool2::IConnection::FetchResult;
static constexpr int64_t kReceiveTimeoutMs = 2000; // 2s
static constexpr int kPostMaxRetry = 3; static constexpr int kPostMaxRetry = 3;
static constexpr int kCacheTtlUs = 1000000; // TODO: tune static constexpr int kCacheTtlMs = 1000;
static constexpr size_t kMaxCachedBufferCount = 64; static constexpr size_t kMaxCachedBufferCount = 64;
static constexpr size_t kCachedBufferCountTarget = kMaxCachedBufferCount - 16; static constexpr size_t kCachedBufferCountTarget = kMaxCachedBufferCount - 16;
class BufferPoolClient::Impl class BufferPoolClient::Impl
: public std::enable_shared_from_this<BufferPoolClient::Impl> { : public std::enable_shared_from_this<BufferPoolClient::Impl> {
public: public:
explicit Impl(const sp<Accessor> &accessor, const sp<IObserver> &observer); explicit Impl(const std::shared_ptr<Accessor> &accessor,
const std::shared_ptr<IObserver> &observer);
explicit Impl(const sp<IAccessor> &accessor, const sp<IObserver> &observer); explicit Impl(const std::shared_ptr<IAccessor> &accessor,
const std::shared_ptr<IObserver> &observer);
bool isValid() { bool isValid() {
return mValid; return mValid;
@ -54,35 +58,35 @@ public:
return mConnectionId; return mConnectionId;
} }
sp<IAccessor> &getAccessor() { std::shared_ptr<IAccessor> &getAccessor() {
return mAccessor; return mAccessor;
} }
bool isActive(int64_t *lastTransactionUs, bool clearCache); bool isActive(int64_t *lastTransactionMs, bool clearCache);
void receiveInvalidation(uint32_t msgID); void receiveInvalidation(uint32_t msgID);
ResultStatus flush(); BufferPoolStatus flush();
ResultStatus allocate(const std::vector<uint8_t> &params, BufferPoolStatus allocate(const std::vector<uint8_t> &params,
native_handle_t **handle, native_handle_t **handle,
std::shared_ptr<BufferPoolData> *buffer); std::shared_ptr<BufferPoolData> *buffer);
ResultStatus receive( BufferPoolStatus receive(
TransactionId transactionId, BufferId bufferId, TransactionId transactionId, BufferId bufferId,
int64_t timestampUs, int64_t timestampMs,
native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer); native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer);
void postBufferRelease(BufferId bufferId); void postBufferRelease(BufferId bufferId);
bool postSend( bool postSend(
BufferId bufferId, ConnectionId receiver, BufferId bufferId, ConnectionId receiver,
TransactionId *transactionId, int64_t *timestampUs); TransactionId *transactionId, int64_t *timestampMs);
private: private:
bool postReceive( bool postReceive(
BufferId bufferId, TransactionId transactionId, BufferId bufferId, TransactionId transactionId,
int64_t timestampUs); int64_t timestampMs);
bool postReceiveResult( bool postReceiveResult(
BufferId bufferId, TransactionId transactionId, bool result, bool *needsSync); BufferId bufferId, TransactionId transactionId, bool result, bool *needsSync);
@ -97,11 +101,11 @@ private:
void invalidateRange(BufferId from, BufferId to); void invalidateRange(BufferId from, BufferId to);
ResultStatus allocateBufferHandle( BufferPoolStatus allocateBufferHandle(
const std::vector<uint8_t>& params, BufferId *bufferId, const std::vector<uint8_t>& params, BufferId *bufferId,
native_handle_t **handle); native_handle_t **handle);
ResultStatus fetchBufferHandle( BufferPoolStatus fetchBufferHandle(
TransactionId transactionId, BufferId bufferId, TransactionId transactionId, BufferId bufferId,
native_handle_t **handle); native_handle_t **handle);
@ -110,12 +114,12 @@ private:
bool mLocal; bool mLocal;
bool mValid; bool mValid;
sp<IAccessor> mAccessor; std::shared_ptr<IAccessor> mAccessor;
sp<Connection> mLocalConnection; std::shared_ptr<Connection> mLocalConnection;
sp<IConnection> mRemoteConnection; std::shared_ptr<IConnection> mRemoteConnection;
uint32_t mSeqId; uint32_t mSeqId;
ConnectionId mConnectionId; ConnectionId mConnectionId;
int64_t mLastEvictCacheUs; int64_t mLastEvictCacheMs;
std::unique_ptr<BufferInvalidationListener> mInvalidationListener; std::unique_ptr<BufferInvalidationListener> mInvalidationListener;
// CachedBuffers // CachedBuffers
@ -125,18 +129,19 @@ private:
std::condition_variable mCreateCv; std::condition_variable mCreateCv;
std::map<BufferId, std::unique_ptr<ClientBuffer>> mBuffers; std::map<BufferId, std::unique_ptr<ClientBuffer>> mBuffers;
int mActive; int mActive;
int64_t mLastChangeUs; int64_t mLastChangeMs;
BufferCache() : mCreating(false), mActive(0), mLastChangeUs(getTimestampNow()) {} BufferCache() : mCreating(false), mActive(0),
mLastChangeMs(::android::elapsedRealtime()) {}
void incActive_l() { void incActive_l() {
++mActive; ++mActive;
mLastChangeUs = getTimestampNow(); mLastChangeMs = ::android::elapsedRealtime();
} }
void decActive_l() { void decActive_l() {
--mActive; --mActive;
mLastChangeUs = getTimestampNow(); mLastChangeMs = ::android::elapsedRealtime();
} }
int cachedBufferCount() const { int cachedBufferCount() const {
@ -147,7 +152,6 @@ private:
// FMQ - release notifier // FMQ - release notifier
struct ReleaseCache { struct ReleaseCache {
std::mutex mLock; std::mutex mLock;
// TODO: use only one list?(using one list may dealy sending messages?)
std::list<BufferId> mReleasingIds; std::list<BufferId> mReleasingIds;
std::list<BufferId> mReleasedIds; std::list<BufferId> mReleasedIds;
uint32_t mInvalidateId; // TODO: invalidation ACK to bufferpool uint32_t mInvalidateId; // TODO: invalidation ACK to bufferpool
@ -158,7 +162,7 @@ private:
} mReleasing; } mReleasing;
// This lock is held during synchronization from remote side. // This lock is held during synchronization from remote side.
// In order to minimize remote calls and locking durtaion, this lock is held // In order to minimize remote calls and locking duration, this lock is held
// by best effort approach using try_lock(). // by best effort approach using try_lock().
std::mutex mRemoteSyncLock; std::mutex mRemoteSyncLock;
}; };
@ -181,7 +185,7 @@ struct BufferPoolClient::Impl::BlockPoolDataDtor {
struct BufferPoolClient::Impl::ClientBuffer { struct BufferPoolClient::Impl::ClientBuffer {
private: private:
int64_t mExpireUs; int64_t mExpireMs;
bool mHasCache; bool mHasCache;
ConnectionId mConnectionId; ConnectionId mConnectionId;
BufferId mId; BufferId mId;
@ -189,7 +193,7 @@ private:
std::weak_ptr<BufferPoolData> mCache; std::weak_ptr<BufferPoolData> mCache;
void updateExpire() { void updateExpire() {
mExpireUs = getTimestampNow() + kCacheTtlUs; mExpireMs = ::android::elapsedRealtime() + kCacheTtlMs;
} }
public: public:
@ -197,7 +201,7 @@ public:
ConnectionId connectionId, BufferId id, native_handle_t *handle) ConnectionId connectionId, BufferId id, native_handle_t *handle)
: mHasCache(false), mConnectionId(connectionId), : mHasCache(false), mConnectionId(connectionId),
mId(id), mHandle(handle) { mId(id), mHandle(handle) {
mExpireUs = getTimestampNow() + kCacheTtlUs; mExpireMs = ::android::elapsedRealtime() + kCacheTtlMs;
} }
~ClientBuffer() { ~ClientBuffer() {
@ -212,8 +216,8 @@ public:
} }
bool expire() const { bool expire() const {
int64_t now = getTimestampNow(); int64_t now = ::android::elapsedRealtime();
return now >= mExpireUs; return now >= mExpireMs;
} }
bool hasCache() const { bool hasCache() const {
@ -265,20 +269,21 @@ public:
} }
}; };
BufferPoolClient::Impl::Impl(const sp<Accessor> &accessor, const sp<IObserver> &observer) BufferPoolClient::Impl::Impl(const std::shared_ptr<Accessor> &accessor,
const std::shared_ptr<IObserver> &observer)
: mLocal(true), mValid(false), mAccessor(accessor), mSeqId(0), : mLocal(true), mValid(false), mAccessor(accessor), mSeqId(0),
mLastEvictCacheUs(getTimestampNow()) { mLastEvictCacheMs(::android::elapsedRealtime()) {
const StatusDescriptor *statusDesc; StatusDescriptor statusDesc;
const InvalidationDescriptor *invDesc; InvalidationDescriptor invDesc;
ResultStatus status = accessor->connect( BufferPoolStatus status = accessor->connect(
observer, true, observer, true,
&mLocalConnection, &mConnectionId, &mReleasing.mInvalidateId, &mLocalConnection, &mConnectionId, &mReleasing.mInvalidateId,
&statusDesc, &invDesc); &statusDesc, &invDesc);
if (status == ResultStatus::OK) { if (status == ResultStatus::OK) {
mReleasing.mStatusChannel = mReleasing.mStatusChannel =
std::make_unique<BufferStatusChannel>(*statusDesc); std::make_unique<BufferStatusChannel>(statusDesc);
mInvalidationListener = mInvalidationListener =
std::make_unique<BufferInvalidationListener>(*invDesc); std::make_unique<BufferInvalidationListener>(invDesc);
mValid = mReleasing.mStatusChannel && mValid = mReleasing.mStatusChannel &&
mReleasing.mStatusChannel->isValid() && mReleasing.mStatusChannel->isValid() &&
mInvalidationListener && mInvalidationListener &&
@ -286,46 +291,36 @@ BufferPoolClient::Impl::Impl(const sp<Accessor> &accessor, const sp<IObserver> &
} }
} }
BufferPoolClient::Impl::Impl(const sp<IAccessor> &accessor, const sp<IObserver> &observer) BufferPoolClient::Impl::Impl(const std::shared_ptr<IAccessor> &accessor,
const std::shared_ptr<IObserver> &observer)
: mLocal(false), mValid(false), mAccessor(accessor), mSeqId(0), : mLocal(false), mValid(false), mAccessor(accessor), mSeqId(0),
mLastEvictCacheUs(getTimestampNow()) { mLastEvictCacheMs(::android::elapsedRealtime()) {
IAccessor::ConnectionInfo conInfo;
bool valid = false; bool valid = false;
sp<IConnection>& outConnection = mRemoteConnection; if(accessor->connect(observer, &conInfo).isOk()) {
ConnectionId& id = mConnectionId; auto channel = std::make_unique<BufferStatusChannel>(conInfo.toFmqDesc);
uint32_t& outMsgId = mReleasing.mInvalidateId; auto observer = std::make_unique<BufferInvalidationListener>(conInfo.fromFmqDesc);
std::unique_ptr<BufferStatusChannel>& outChannel =
mReleasing.mStatusChannel; if (channel && channel->isValid()
std::unique_ptr<BufferInvalidationListener>& outObserver = && observer && observer->isValid()) {
mInvalidationListener; mRemoteConnection = conInfo.connection;
Return<void> transResult = accessor->connect( mConnectionId = conInfo.connectionId;
observer, mReleasing.mInvalidateId = conInfo.msgId;
[&valid, &outConnection, &id, &outMsgId, &outChannel, &outObserver] mReleasing.mStatusChannel = std::move(channel);
(ResultStatus status, sp<IConnection> connection, mInvalidationListener = std::move(observer);
ConnectionId connectionId, uint32_t msgId, valid = true;
const StatusDescriptor& statusDesc, }
const InvalidationDescriptor& invDesc) { }
if (status == ResultStatus::OK) { mValid = valid;
outConnection = connection;
id = connectionId;
outMsgId = msgId;
outChannel = std::make_unique<BufferStatusChannel>(statusDesc);
outObserver = std::make_unique<BufferInvalidationListener>(invDesc);
if (outChannel && outChannel->isValid() &&
outObserver && outObserver->isValid()) {
valid = true;
}
}
});
mValid = transResult.isOk() && valid;
} }
bool BufferPoolClient::Impl::isActive(int64_t *lastTransactionUs, bool clearCache) { bool BufferPoolClient::Impl::isActive(int64_t *lastTransactionMs, bool clearCache) {
bool active = false; bool active = false;
{ {
std::lock_guard<std::mutex> lock(mCache.mLock); std::lock_guard<std::mutex> lock(mCache.mLock);
syncReleased(); syncReleased();
evictCaches(clearCache); evictCaches(clearCache);
*lastTransactionUs = mCache.mLastChangeUs; *lastTransactionMs = mCache.mLastChangeMs;
active = mCache.mActive > 0; active = mCache.mActive > 0;
} }
if (mValid && mLocal && mLocalConnection) { if (mValid && mLocal && mLocalConnection) {
@ -341,7 +336,7 @@ void BufferPoolClient::Impl::receiveInvalidation(uint32_t messageId) {
// TODO: evict cache required? // TODO: evict cache required?
} }
ResultStatus BufferPoolClient::Impl::flush() { BufferPoolStatus BufferPoolClient::Impl::flush() {
if (!mLocal || !mLocalConnection || !mValid) { if (!mLocal || !mLocalConnection || !mValid) {
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
@ -353,7 +348,7 @@ ResultStatus BufferPoolClient::Impl::flush() {
} }
} }
ResultStatus BufferPoolClient::Impl::allocate( BufferPoolStatus BufferPoolClient::Impl::allocate(
const std::vector<uint8_t> &params, const std::vector<uint8_t> &params,
native_handle_t **pHandle, native_handle_t **pHandle,
std::shared_ptr<BufferPoolData> *buffer) { std::shared_ptr<BufferPoolData> *buffer) {
@ -363,7 +358,7 @@ ResultStatus BufferPoolClient::Impl::allocate(
BufferId bufferId; BufferId bufferId;
native_handle_t *handle = nullptr; native_handle_t *handle = nullptr;
buffer->reset(); buffer->reset();
ResultStatus status = allocateBufferHandle(params, &bufferId, &handle); BufferPoolStatus status = allocateBufferHandle(params, &bufferId, &handle);
if (status == ResultStatus::OK) { if (status == ResultStatus::OK) {
if (handle) { if (handle) {
std::unique_lock<std::mutex> lock(mCache.mLock); std::unique_lock<std::mutex> lock(mCache.mLock);
@ -398,20 +393,20 @@ ResultStatus BufferPoolClient::Impl::allocate(
return status; return status;
} }
ResultStatus BufferPoolClient::Impl::receive( BufferPoolStatus BufferPoolClient::Impl::receive(
TransactionId transactionId, BufferId bufferId, int64_t timestampUs, TransactionId transactionId, BufferId bufferId, int64_t timestampMs,
native_handle_t **pHandle, native_handle_t **pHandle,
std::shared_ptr<BufferPoolData> *buffer) { std::shared_ptr<BufferPoolData> *buffer) {
if (!mValid) { if (!mValid) {
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
if (timestampUs != 0) { if (timestampMs != 0) {
timestampUs += kReceiveTimeoutUs; timestampMs += kReceiveTimeoutMs;
} }
if (!postReceive(bufferId, transactionId, timestampUs)) { if (!postReceive(bufferId, transactionId, timestampMs)) {
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus status = ResultStatus::CRITICAL_ERROR; BufferPoolStatus status = ResultStatus::CRITICAL_ERROR;
buffer->reset(); buffer->reset();
while(1) { while(1) {
std::unique_lock<std::mutex> lock(mCache.mLock); std::unique_lock<std::mutex> lock(mCache.mLock);
@ -505,7 +500,7 @@ void BufferPoolClient::Impl::postBufferRelease(BufferId bufferId) {
// TODO: revise ad-hoc posting data structure // TODO: revise ad-hoc posting data structure
bool BufferPoolClient::Impl::postSend( bool BufferPoolClient::Impl::postSend(
BufferId bufferId, ConnectionId receiver, BufferId bufferId, ConnectionId receiver,
TransactionId *transactionId, int64_t *timestampUs) { TransactionId *transactionId, int64_t *timestampMs) {
{ {
// TODO: don't need to call syncReleased every time // TODO: don't need to call syncReleased every time
std::lock_guard<std::mutex> lock(mCache.mLock); std::lock_guard<std::mutex> lock(mCache.mLock);
@ -515,7 +510,7 @@ bool BufferPoolClient::Impl::postSend(
bool needsSync = false; bool needsSync = false;
{ {
std::lock_guard<std::mutex> lock(mReleasing.mLock); std::lock_guard<std::mutex> lock(mReleasing.mLock);
*timestampUs = getTimestampNow(); *timestampMs = ::android::elapsedRealtime();
*transactionId = (mConnectionId << 32) | mSeqId++; *transactionId = (mConnectionId << 32) | mSeqId++;
// TODO: retry, add timeout, target? // TODO: retry, add timeout, target?
ret = mReleasing.mStatusChannel->postBufferStatusMessage( ret = mReleasing.mStatusChannel->postBufferStatusMessage(
@ -533,11 +528,11 @@ bool BufferPoolClient::Impl::postSend(
} }
bool BufferPoolClient::Impl::postReceive( bool BufferPoolClient::Impl::postReceive(
BufferId bufferId, TransactionId transactionId, int64_t timestampUs) { BufferId bufferId, TransactionId transactionId, int64_t timestampMs) {
for (int i = 0; i < kPostMaxRetry; ++i) { for (int i = 0; i < kPostMaxRetry; ++i) {
std::unique_lock<std::mutex> lock(mReleasing.mLock); std::unique_lock<std::mutex> lock(mReleasing.mLock);
int64_t now = getTimestampNow(); int64_t now = ::android::elapsedRealtime();
if (timestampUs == 0 || now < timestampUs) { if (timestampMs == 0 || now < timestampMs) {
bool result = mReleasing.mStatusChannel->postBufferStatusMessage( bool result = mReleasing.mStatusChannel->postBufferStatusMessage(
transactionId, bufferId, BufferStatus::TRANSFER_FROM, transactionId, bufferId, BufferStatus::TRANSFER_FROM,
mConnectionId, -1, mReleasing.mReleasingIds, mConnectionId, -1, mReleasing.mReleasingIds,
@ -579,16 +574,7 @@ void BufferPoolClient::Impl::trySyncFromRemote() {
needsSync = mReleasing.mStatusChannel->needsSync(); needsSync = mReleasing.mStatusChannel->needsSync();
} }
if (needsSync) { if (needsSync) {
TransactionId transactionId = (mConnectionId << 32); if (!mRemoteConnection->sync().isOk()) {
BufferId bufferId = Connection::SYNC_BUFFERID;
Return<void> transResult = mRemoteConnection->fetch(
transactionId, bufferId,
[]
(ResultStatus outStatus, Buffer outBuffer) {
(void) outStatus;
(void) outBuffer;
});
if (!transResult.isOk()) {
ALOGD("sync from client %lld failed: bufferpool process died.", ALOGD("sync from client %lld failed: bufferpool process died.",
(long long)mConnectionId); (long long)mConnectionId);
} }
@ -616,12 +602,12 @@ bool BufferPoolClient::Impl::syncReleased(uint32_t messageId) {
mCache.decActive_l(); mCache.decActive_l();
} else { } else {
// should not happen! // should not happen!
ALOGW("client %lld cache release status inconsitent!", ALOGW("client %lld cache release status inconsistent!",
(long long)mConnectionId); (long long)mConnectionId);
} }
} else { } else {
// should not happen! // should not happen!
ALOGW("client %lld cache status inconsitent!", (long long)mConnectionId); ALOGW("client %lld cache status inconsistent!", (long long)mConnectionId);
} }
} }
mReleasing.mReleasedIds.clear(); mReleasing.mReleasedIds.clear();
@ -673,8 +659,8 @@ bool BufferPoolClient::Impl::syncReleased(uint32_t messageId) {
// should have mCache.mLock // should have mCache.mLock
void BufferPoolClient::Impl::evictCaches(bool clearCache) { void BufferPoolClient::Impl::evictCaches(bool clearCache) {
int64_t now = getTimestampNow(); int64_t now = ::android::elapsedRealtime();
if (now >= mLastEvictCacheUs + kCacheTtlUs || if (now >= mLastEvictCacheMs + kCacheTtlMs ||
clearCache || mCache.cachedBufferCount() > kMaxCachedBufferCount) { clearCache || mCache.cachedBufferCount() > kMaxCachedBufferCount) {
size_t evicted = 0; size_t evicted = 0;
for (auto it = mCache.mBuffers.begin(); it != mCache.mBuffers.end();) { for (auto it = mCache.mBuffers.begin(); it != mCache.mBuffers.end();) {
@ -688,7 +674,7 @@ void BufferPoolClient::Impl::evictCaches(bool clearCache) {
} }
ALOGV("cache count %lld : total %zu, active %d, evicted %zu", ALOGV("cache count %lld : total %zu, active %d, evicted %zu",
(long long)mConnectionId, mCache.mBuffers.size(), mCache.mActive, evicted); (long long)mConnectionId, mCache.mBuffers.size(), mCache.mActive, evicted);
mLastEvictCacheUs = now; mLastEvictCacheMs = now;
} }
} }
@ -701,7 +687,7 @@ void BufferPoolClient::Impl::invalidateBuffer(BufferId id) {
ALOGV("cache invalidated %lld : buffer %u", ALOGV("cache invalidated %lld : buffer %u",
(long long)mConnectionId, id); (long long)mConnectionId, id);
} else { } else {
ALOGW("Inconsitent invalidation %lld : activer buffer!! %u", ALOGW("Inconsistent invalidation %lld : activer buffer!! %u",
(long long)mConnectionId, (unsigned int)id); (long long)mConnectionId, (unsigned int)id);
} }
break; break;
@ -735,12 +721,12 @@ void BufferPoolClient::Impl::invalidateRange(BufferId from, BufferId to) {
(long long)mConnectionId, invalidated); (long long)mConnectionId, invalidated);
} }
ResultStatus BufferPoolClient::Impl::allocateBufferHandle( BufferPoolStatus BufferPoolClient::Impl::allocateBufferHandle(
const std::vector<uint8_t>& params, BufferId *bufferId, const std::vector<uint8_t>& params, BufferId *bufferId,
native_handle_t** handle) { native_handle_t** handle) {
if (mLocalConnection) { if (mLocalConnection) {
const native_handle_t* allocHandle = nullptr; const native_handle_t* allocHandle = nullptr;
ResultStatus status = mLocalConnection->allocate( BufferPoolStatus status = mLocalConnection->allocate(
params, bufferId, &allocHandle); params, bufferId, &allocHandle);
if (status == ResultStatus::OK) { if (status == ResultStatus::OK) {
*handle = native_handle_clone(allocHandle); *handle = native_handle_clone(allocHandle);
@ -753,37 +739,38 @@ ResultStatus BufferPoolClient::Impl::allocateBufferHandle(
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus BufferPoolClient::Impl::fetchBufferHandle( BufferPoolStatus BufferPoolClient::Impl::fetchBufferHandle(
TransactionId transactionId, BufferId bufferId, TransactionId transactionId, BufferId bufferId,
native_handle_t **handle) { native_handle_t **handle) {
sp<IConnection> connection; std::shared_ptr<IConnection> connection;
if (mLocal) { if (mLocal) {
connection = mLocalConnection; connection = mLocalConnection;
} else { } else {
connection = mRemoteConnection; connection = mRemoteConnection;
} }
ResultStatus status; std::vector<FetchInfo> infos;
Return<void> transResult = connection->fetch( std::vector<FetchResult> results;
transactionId, bufferId, infos.emplace_back(FetchInfo{ToAidl(transactionId), ToAidl(bufferId)});
[&status, &handle] ndk::ScopedAStatus status = connection->fetch(infos, &results);
(ResultStatus outStatus, Buffer outBuffer) { if (!status.isOk()) {
status = outStatus; BufferPoolStatus svcSpecific = status.getServiceSpecificError();
if (status == ResultStatus::OK) { return svcSpecific ? svcSpecific : ResultStatus::CRITICAL_ERROR;
*handle = native_handle_clone( }
outBuffer.buffer.getNativeHandle()); if (results[0].getTag() == FetchResult::buffer) {
} *handle = ::android::dupFromAidl(results[0].get<FetchResult::buffer>().buffer);
}); return ResultStatus::OK;
return transResult.isOk() ? status : ResultStatus::CRITICAL_ERROR; }
return results[0].get<FetchResult::failure>();
} }
BufferPoolClient::BufferPoolClient(const sp<Accessor> &accessor, BufferPoolClient::BufferPoolClient(const std::shared_ptr<Accessor> &accessor,
const sp<IObserver> &observer) { const std::shared_ptr<IObserver> &observer) {
mImpl = std::make_shared<Impl>(accessor, observer); mImpl = std::make_shared<Impl>(accessor, observer);
} }
BufferPoolClient::BufferPoolClient(const sp<IAccessor> &accessor, BufferPoolClient::BufferPoolClient(const std::shared_ptr<IAccessor> &accessor,
const sp<IObserver> &observer) { const std::shared_ptr<IObserver> &observer) {
mImpl = std::make_shared<Impl>(accessor, observer); mImpl = std::make_shared<Impl>(accessor, observer);
} }
@ -799,12 +786,12 @@ bool BufferPoolClient::isLocal() {
return mImpl && mImpl->isLocal(); return mImpl && mImpl->isLocal();
} }
bool BufferPoolClient::isActive(int64_t *lastTransactionUs, bool clearCache) { bool BufferPoolClient::isActive(int64_t *lastTransactionMs, bool clearCache) {
if (!isValid()) { if (!isValid()) {
*lastTransactionUs = 0; *lastTransactionMs = 0;
return false; return false;
} }
return mImpl->isActive(lastTransactionUs, clearCache); return mImpl->isActive(lastTransactionMs, clearCache);
} }
ConnectionId BufferPoolClient::getConnectionId() { ConnectionId BufferPoolClient::getConnectionId() {
@ -814,7 +801,7 @@ ConnectionId BufferPoolClient::getConnectionId() {
return -1; return -1;
} }
ResultStatus BufferPoolClient::getAccessor(sp<IAccessor> *accessor) { BufferPoolStatus BufferPoolClient::getAccessor(std::shared_ptr<IAccessor> *accessor) {
if (isValid()) { if (isValid()) {
*accessor = mImpl->getAccessor(); *accessor = mImpl->getAccessor();
return ResultStatus::OK; return ResultStatus::OK;
@ -829,14 +816,14 @@ void BufferPoolClient::receiveInvalidation(uint32_t msgId) {
} }
} }
ResultStatus BufferPoolClient::flush() { BufferPoolStatus BufferPoolClient::flush() {
if (isValid()) { if (isValid()) {
return mImpl->flush(); return mImpl->flush();
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus BufferPoolClient::allocate( BufferPoolStatus BufferPoolClient::allocate(
const std::vector<uint8_t> &params, const std::vector<uint8_t> &params,
native_handle_t **handle, native_handle_t **handle,
std::shared_ptr<BufferPoolData> *buffer) { std::shared_ptr<BufferPoolData> *buffer) {
@ -846,31 +833,26 @@ ResultStatus BufferPoolClient::allocate(
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus BufferPoolClient::receive( BufferPoolStatus BufferPoolClient::receive(
TransactionId transactionId, BufferId bufferId, int64_t timestampUs, TransactionId transactionId, BufferId bufferId, int64_t timestampMs,
native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) { native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) {
if (isValid()) { if (isValid()) {
return mImpl->receive(transactionId, bufferId, timestampUs, handle, buffer); return mImpl->receive(transactionId, bufferId, timestampMs, handle, buffer);
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus BufferPoolClient::postSend( BufferPoolStatus BufferPoolClient::postSend(
ConnectionId receiverId, ConnectionId receiverId,
const std::shared_ptr<BufferPoolData> &buffer, const std::shared_ptr<BufferPoolData> &buffer,
TransactionId *transactionId, TransactionId *transactionId,
int64_t *timestampUs) { int64_t *timestampMs) {
if (isValid()) { if (isValid()) {
bool result = mImpl->postSend( bool result = mImpl->postSend(
buffer->mId, receiverId, transactionId, timestampUs); buffer->mId, receiverId, transactionId, timestampMs);
return result ? ResultStatus::OK : ResultStatus::CRITICAL_ERROR; return result ? ResultStatus::OK : ResultStatus::CRITICAL_ERROR;
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
} // namespace implementation } // namespace aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,29 +14,19 @@
* limitations under the License. * limitations under the License.
*/ */
#ifndef ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERPOOLCLIENT_H #pragma once
#define ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERPOOLCLIENT_H
#include <memory> #include <memory>
#include <android/hardware/media/bufferpool/2.0/IAccessor.h> #include <aidl/android/hardware/media/bufferpool2/IAccessor.h>
#include <android/hardware/media/bufferpool/2.0/IConnection.h> #include <aidl/android/hardware/media/bufferpool2/IObserver.h>
#include <android/hardware/media/bufferpool/2.0/IObserver.h> #include <bufferpool2/BufferPoolTypes.h>
#include <bufferpool/BufferPoolTypes.h>
#include <cutils/native_handle.h>
#include "Accessor.h"
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
using ::android::hardware::media::bufferpool::V2_0::IAccessor; using aidl::android::hardware::media::bufferpool2::IAccessor;
using ::android::hardware::media::bufferpool::V2_0::IConnection; using aidl::android::hardware::media::bufferpool2::IObserver;
using ::android::hardware::media::bufferpool::V2_0::IObserver;
using ::android::hardware::media::bufferpool::V2_0::ResultStatus; struct Accessor;
using ::android::sp;
/** /**
* A buffer pool client for a buffer pool. For a specific buffer pool, at most * A buffer pool client for a buffer pool. For a specific buffer pool, at most
@ -49,8 +39,8 @@ public:
* Creates a buffer pool client from a local buffer pool * Creates a buffer pool client from a local buffer pool
* (via ClientManager#create). * (via ClientManager#create).
*/ */
explicit BufferPoolClient(const sp<Accessor> &accessor, explicit BufferPoolClient(const std::shared_ptr<Accessor> &accessor,
const sp<IObserver> &observer); const std::shared_ptr<IObserver> &observer);
/** /**
* Creates a buffer pool client from a remote buffer pool * Creates a buffer pool client from a remote buffer pool
@ -58,8 +48,8 @@ public:
* Note: A buffer pool client created with remote buffer pool cannot * Note: A buffer pool client created with remote buffer pool cannot
* allocate a buffer. * allocate a buffer.
*/ */
explicit BufferPoolClient(const sp<IAccessor> &accessor, explicit BufferPoolClient(const std::shared_ptr<IAccessor> &accessor,
const sp<IObserver> &observer); const std::shared_ptr<IObserver> &observer);
/** Destructs a buffer pool client. */ /** Destructs a buffer pool client. */
~BufferPoolClient(); ~BufferPoolClient();
@ -69,30 +59,30 @@ private:
bool isLocal(); bool isLocal();
bool isActive(int64_t *lastTransactionUs, bool clearCache); bool isActive(int64_t *lastTransactionMs, bool clearCache);
ConnectionId getConnectionId(); ConnectionId getConnectionId();
ResultStatus getAccessor(sp<IAccessor> *accessor); BufferPoolStatus getAccessor(std::shared_ptr<IAccessor> *accessor);
void receiveInvalidation(uint32_t msgId); void receiveInvalidation(uint32_t msgId);
ResultStatus flush(); BufferPoolStatus flush();
ResultStatus allocate(const std::vector<uint8_t> &params, BufferPoolStatus allocate(const std::vector<uint8_t> &params,
native_handle_t **handle, native_handle_t **handle,
std::shared_ptr<BufferPoolData> *buffer); std::shared_ptr<BufferPoolData> *buffer);
ResultStatus receive(TransactionId transactionId, BufferPoolStatus receive(TransactionId transactionId,
BufferId bufferId, BufferId bufferId,
int64_t timestampUs, int64_t timestampMs,
native_handle_t **handle, native_handle_t **handle,
std::shared_ptr<BufferPoolData> *buffer); std::shared_ptr<BufferPoolData> *buffer);
ResultStatus postSend(ConnectionId receiver, BufferPoolStatus postSend(ConnectionId receiver,
const std::shared_ptr<BufferPoolData> &buffer, const std::shared_ptr<BufferPoolData> &buffer,
TransactionId *transactionId, TransactionId *transactionId,
int64_t *timestampUs); int64_t *timestampMs);
class Impl; class Impl;
std::shared_ptr<Impl> mImpl; std::shared_ptr<Impl> mImpl;
@ -101,11 +91,4 @@ private:
friend struct Observer; friend struct Observer;
}; };
} // namespace implementation } // namespace aidl::android::hardware::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERPOOLCLIENT_H

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,29 +14,17 @@
* limitations under the License. * limitations under the License.
*/ */
#define LOG_TAG "BufferPoolStatus" #define LOG_TAG "AidlBufferPoolStatus"
//#define LOG_NDEBUG 0 //#define LOG_NDEBUG 0
#include <thread> #include <thread>
#include <time.h> #include <time.h>
#include <aidl/android/hardware/media/bufferpool2/BufferStatus.h>
#include "BufferStatus.h" #include "BufferStatus.h"
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
int64_t getTimestampNow() { using aidl::android::hardware::media::bufferpool2::BufferStatus;
int64_t stamp;
struct timespec ts;
// TODO: CLOCK_MONOTONIC_COARSE?
clock_gettime(CLOCK_MONOTONIC, &ts);
stamp = ts.tv_nsec / 1000;
stamp += (ts.tv_sec * 1000000LL);
return stamp;
}
bool isMessageLater(uint32_t curMsgId, uint32_t prevMsgId) { bool isMessageLater(uint32_t curMsgId, uint32_t prevMsgId) {
return curMsgId != prevMsgId && curMsgId - prevMsgId < prevMsgId - curMsgId; return curMsgId != prevMsgId && curMsgId - prevMsgId < prevMsgId - curMsgId;
@ -53,30 +41,26 @@ bool isBufferInRange(BufferId from, BufferId to, BufferId bufferId) {
static constexpr int kNumElementsInQueue = 1024*16; static constexpr int kNumElementsInQueue = 1024*16;
static constexpr int kMinElementsToSyncInQueue = 128; static constexpr int kMinElementsToSyncInQueue = 128;
ResultStatus BufferStatusObserver::open( BufferPoolStatus BufferStatusObserver::open(
ConnectionId id, const StatusDescriptor** fmqDescPtr) { ConnectionId id, StatusDescriptor* fmqDescPtr) {
if (mBufferStatusQueues.find(id) != mBufferStatusQueues.end()) { if (mBufferStatusQueues.find(id) != mBufferStatusQueues.end()) {
// TODO: id collision log? ALOGE("connection id collision %lld", (unsigned long long)id);
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
std::unique_ptr<BufferStatusQueue> queue = auto queue = std::make_unique<BufferStatusQueue>(kNumElementsInQueue);
std::make_unique<BufferStatusQueue>(kNumElementsInQueue);
if (!queue || queue->isValid() == false) { if (!queue || queue->isValid() == false) {
*fmqDescPtr = nullptr;
return ResultStatus::NO_MEMORY; return ResultStatus::NO_MEMORY;
} else {
*fmqDescPtr = queue->getDesc();
} }
*fmqDescPtr = queue->dupeDesc();
auto result = mBufferStatusQueues.insert( auto result = mBufferStatusQueues.insert(
std::make_pair(id, std::move(queue))); std::make_pair(id, std::move(queue)));
if (!result.second) { if (!result.second) {
*fmqDescPtr = nullptr;
return ResultStatus::NO_MEMORY; return ResultStatus::NO_MEMORY;
} }
return ResultStatus::OK; return ResultStatus::OK;
} }
ResultStatus BufferStatusObserver::close(ConnectionId id) { BufferPoolStatus BufferStatusObserver::close(ConnectionId id) {
if (mBufferStatusQueues.find(id) == mBufferStatusQueues.end()) { if (mBufferStatusQueues.find(id) == mBufferStatusQueues.end()) {
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
@ -90,7 +74,7 @@ void BufferStatusObserver::getBufferStatusChanges(std::vector<BufferStatusMessag
size_t avail = it->second->availableToRead(); size_t avail = it->second->availableToRead();
while (avail > 0) { while (avail > 0) {
if (!it->second->read(&message, 1)) { if (!it->second->read(&message, 1)) {
// Since avaliable # of reads are already confirmed, // Since available # of reads are already confirmed,
// this should not happen. // this should not happen.
// TODO: error handling (spurious client?) // TODO: error handling (spurious client?)
ALOGW("FMQ message cannot be read from %lld", (long long)it->first); ALOGW("FMQ message cannot be read from %lld", (long long)it->first);
@ -105,8 +89,7 @@ void BufferStatusObserver::getBufferStatusChanges(std::vector<BufferStatusMessag
BufferStatusChannel::BufferStatusChannel( BufferStatusChannel::BufferStatusChannel(
const StatusDescriptor &fmqDesc) { const StatusDescriptor &fmqDesc) {
std::unique_ptr<BufferStatusQueue> queue = auto queue = std::make_unique<BufferStatusQueue>(fmqDesc);
std::make_unique<BufferStatusQueue>(fmqDesc);
if (!queue || queue->isValid() == false) { if (!queue || queue->isValid() == false) {
mValid = false; mValid = false;
return; return;
@ -136,11 +119,11 @@ void BufferStatusChannel::postBufferRelease(
BufferStatusMessage message; BufferStatusMessage message;
for (size_t i = 0 ; i < avail; ++i) { for (size_t i = 0 ; i < avail; ++i) {
BufferId id = pending.front(); BufferId id = pending.front();
message.newStatus = BufferStatus::NOT_USED; message.status = BufferStatus::NOT_USED;
message.bufferId = id; message.bufferId = id;
message.connectionId = connectionId; message.connectionId = connectionId;
if (!mBufferStatusQueue->write(&message, 1)) { if (!mBufferStatusQueue->write(&message, 1)) {
// Since avaliable # of writes are already confirmed, // Since available # of writes are already confirmed,
// this should not happen. // this should not happen.
// TODO: error handing? // TODO: error handing?
ALOGW("FMQ message cannot be sent from %lld", (long long)connectionId); ALOGW("FMQ message cannot be sent from %lld", (long long)connectionId);
@ -160,11 +143,11 @@ void BufferStatusChannel::postBufferInvalidateAck(
size_t avail = mBufferStatusQueue->availableToWrite(); size_t avail = mBufferStatusQueue->availableToWrite();
if (avail > 0) { if (avail > 0) {
BufferStatusMessage message; BufferStatusMessage message;
message.newStatus = BufferStatus::INVALIDATION_ACK; message.status = BufferStatus::INVALIDATION_ACK;
message.bufferId = invalidateId; message.bufferId = invalidateId;
message.connectionId = connectionId; message.connectionId = connectionId;
if (!mBufferStatusQueue->write(&message, 1)) { if (!mBufferStatusQueue->write(&message, 1)) {
// Since avaliable # of writes are already confirmed, // Since available # of writes are already confirmed,
// this should not happen. // this should not happen.
// TODO: error handing? // TODO: error handing?
ALOGW("FMQ message cannot be sent from %lld", (long long)connectionId); ALOGW("FMQ message cannot be sent from %lld", (long long)connectionId);
@ -186,11 +169,11 @@ bool BufferStatusChannel::postBufferStatusMessage(
BufferStatusMessage release, message; BufferStatusMessage release, message;
for (size_t i = 0; i < numPending; ++i) { for (size_t i = 0; i < numPending; ++i) {
BufferId id = pending.front(); BufferId id = pending.front();
release.newStatus = BufferStatus::NOT_USED; release.status = BufferStatus::NOT_USED;
release.bufferId = id; release.bufferId = id;
release.connectionId = connectionId; release.connectionId = connectionId;
if (!mBufferStatusQueue->write(&release, 1)) { if (!mBufferStatusQueue->write(&release, 1)) {
// Since avaliable # of writes are already confirmed, // Since available # of writes are already confirmed,
// this should not happen. // this should not happen.
// TODO: error handling? // TODO: error handling?
ALOGW("FMQ message cannot be sent from %lld", (long long)connectionId); ALOGW("FMQ message cannot be sent from %lld", (long long)connectionId);
@ -201,13 +184,13 @@ bool BufferStatusChannel::postBufferStatusMessage(
} }
message.transactionId = transactionId; message.transactionId = transactionId;
message.bufferId = bufferId; message.bufferId = bufferId;
message.newStatus = status; message.status = status;
message.connectionId = connectionId; message.connectionId = connectionId;
message.targetConnectionId = targetId; message.targetConnectionId = targetId;
// TODO : timesatamp // TODO : timesatamp
message.timestampUs = 0; message.timestampUs = 0;
if (!mBufferStatusQueue->write(&message, 1)) { if (!mBufferStatusQueue->write(&message, 1)) {
// Since avaliable # of writes are already confirmed, // Since available # of writes are already confirmed,
// this should not happen. // this should not happen.
ALOGW("FMQ message cannot be sent from %lld", (long long)connectionId); ALOGW("FMQ message cannot be sent from %lld", (long long)connectionId);
return false; return false;
@ -276,12 +259,11 @@ bool BufferInvalidationChannel::isValid() {
return mValid; return mValid;
} }
void BufferInvalidationChannel::getDesc(const InvalidationDescriptor **fmqDescPtr) { void BufferInvalidationChannel::getDesc(InvalidationDescriptor *fmqDescPtr) {
if (mValid) { if (mValid) {
*fmqDescPtr = mBufferInvalidationQueue->getDesc(); *fmqDescPtr = mBufferInvalidationQueue->dupeDesc();
} else {
*fmqDescPtr = nullptr;
} }
// TODO: writing invalid descriptor?
} }
void BufferInvalidationChannel::postInvalidation( void BufferInvalidationChannel::postInvalidation(
@ -295,10 +277,5 @@ void BufferInvalidationChannel::postInvalidation(
mBufferInvalidationQueue->write(&message); mBufferInvalidationQueue->write(&message);
} }
} // namespace implementation } // namespace ::aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,28 +14,16 @@
* limitations under the License. * limitations under the License.
*/ */
#ifndef ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERSTATUS_H #pragma once
#define ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERSTATUS_H
#include <android/hardware/media/bufferpool/2.0/types.h> #include <bufferpool2/BufferPoolTypes.h>
#include <bufferpool/BufferPoolTypes.h> #include <map>
#include <fmq/MessageQueue.h>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <vector> #include <vector>
#include <list> #include <list>
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
/** Returns monotonic timestamp in Us since fixed point in time. */
int64_t getTimestampNow();
bool isMessageLater(uint32_t curMsgId, uint32_t prevMsgId); bool isMessageLater(uint32_t curMsgId, uint32_t prevMsgId);
@ -55,13 +43,13 @@ public:
* connection(client). * connection(client).
* *
* @param connectionId connection Id of the specified client. * @param connectionId connection Id of the specified client.
* @param fmqDescPtr double ptr of created FMQ's descriptor. * @param fmqDescPtr ptr of created FMQ's descriptor.
* *
* @return OK if FMQ is created successfully. * @return OK if FMQ is created successfully.
* NO_MEMORY when there is no memory. * NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus open(ConnectionId id, const StatusDescriptor** fmqDescPtr); BufferPoolStatus open(ConnectionId id, StatusDescriptor* _Nonnull fmqDescPtr);
/** Closes a buffer status message FMQ for the specified /** Closes a buffer status message FMQ for the specified
* connection(client). * connection(client).
@ -71,7 +59,7 @@ public:
* @return OK if the specified connection is closed successfully. * @return OK if the specified connection is closed successfully.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus close(ConnectionId id); BufferPoolStatus close(ConnectionId id);
/** Retrieves all pending FMQ buffer status messages from clients. /** Retrieves all pending FMQ buffer status messages from clients.
* *
@ -140,7 +128,7 @@ public:
std::list<BufferId> &pending, std::list<BufferId> &posted); std::list<BufferId> &pending, std::list<BufferId> &posted);
/** /**
* Posts a buffer invaliadation messge to the buffer pool. * Posts a buffer invaliadation message to the buffer pool.
* *
* @param connectionId connection Id of the client. * @param connectionId connection Id of the client.
* @param invalidateId invalidation ack to the buffer pool. * @param invalidateId invalidation ack to the buffer pool.
@ -152,7 +140,7 @@ public:
void postBufferInvalidateAck( void postBufferInvalidateAck(
ConnectionId connectionId, ConnectionId connectionId,
uint32_t invalidateId, uint32_t invalidateId,
bool *invalidated); bool* _Nonnull invalidated);
}; };
/** /**
@ -179,7 +167,7 @@ public:
*/ */
void getInvalidations(std::vector<BufferInvalidationMessage> &messages); void getInvalidations(std::vector<BufferInvalidationMessage> &messages);
/** Returns whether the FMQ is connected succesfully. */ /** Returns whether the FMQ is connected successfully. */
bool isValid(); bool isValid();
}; };
@ -199,16 +187,16 @@ public:
*/ */
BufferInvalidationChannel(); BufferInvalidationChannel();
/** Returns whether the FMQ is connected succesfully. */ /** Returns whether the FMQ is connected successfully. */
bool isValid(); bool isValid();
/** /**
* Retrieves the descriptor of a buffer invalidation FMQ. the descriptor may * Retrieves the descriptor of a buffer invalidation FMQ. the descriptor may
* be passed to the client for buffer invalidation handling. * be passed to the client for buffer invalidation handling.
* *
* @param fmqDescPtr double ptr of created FMQ's descriptor. * @param fmqDescPtr ptr of created FMQ's descriptor.
*/ */
void getDesc(const InvalidationDescriptor **fmqDescPtr); void getDesc(InvalidationDescriptor* _Nonnull fmqDescPtr);
/** Posts a buffer invalidation for invalidated buffers. /** Posts a buffer invalidation for invalidated buffers.
* *
@ -220,11 +208,4 @@ public:
void postInvalidation(uint32_t msgId, BufferId fromId, BufferId toId); void postInvalidation(uint32_t msgId, BufferId fromId, BufferId toId);
}; };
} // namespace implementation } // namespace aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERSTATUS_H

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -13,122 +13,74 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#define LOG_TAG "BufferPoolManager" #define LOG_TAG "AidlBufferPoolMgr"
//#define LOG_NDEBUG 0 //#define LOG_NDEBUG 0
#include <bufferpool/ClientManager.h> #include <aidl/android/hardware/media/bufferpool2/ResultStatus.h>
#include <hidl/HidlTransportSupport.h> #include <bufferpool2/ClientManager.h>
#include <sys/types.h> #include <sys/types.h>
#include <time.h> #include <utils/SystemClock.h>
#include <unistd.h> #include <unistd.h>
#include <utils/Log.h> #include <utils/Log.h>
#include <chrono>
#include "BufferPoolClient.h" #include "BufferPoolClient.h"
#include "Observer.h" #include "Observer.h"
#include "Accessor.h" #include "Accessor.h"
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
static constexpr int64_t kRegisterTimeoutUs = 500000; // 0.5 sec using namespace std::chrono_literals;
static constexpr int64_t kCleanUpDurationUs = 1000000; // TODO: 1 sec tune
static constexpr int64_t kClientTimeoutUs = 5000000; // TODO: 5 secs tune
/** using Registration = aidl::android::hardware::media::bufferpool2::IClientManager::Registration;
* The holder of the cookie of remote IClientManager. using aidl::android::hardware::media::bufferpool2::ResultStatus;
* The cookie is process locally unique for each IClientManager.
* (The cookie is used to notify death of clients to bufferpool process.)
*/
class ClientManagerCookieHolder {
public:
/**
* Creates a cookie holder for remote IClientManager(s).
*/
ClientManagerCookieHolder();
/** static constexpr int64_t kRegisterTimeoutMs = 500; // 0.5 sec
* Gets a cookie for a remote IClientManager. static constexpr int64_t kCleanUpDurationMs = 1000; // TODO: 1 sec tune
* static constexpr int64_t kClientTimeoutMs = 5000; // TODO: 5 secs tune
* @param manager the specified remote IClientManager.
* @param added true when the specified remote IClientManager is added
* newly, false otherwise.
*
* @return the process locally unique cookie for the specified IClientManager.
*/
uint64_t getCookie(const sp<IClientManager> &manager, bool *added);
private:
uint64_t mSeqId;
std::mutex mLock;
std::list<std::pair<const wp<IClientManager>, uint64_t>> mManagers;
};
ClientManagerCookieHolder::ClientManagerCookieHolder() : mSeqId(0){}
uint64_t ClientManagerCookieHolder::getCookie(
const sp<IClientManager> &manager,
bool *added) {
std::lock_guard<std::mutex> lock(mLock);
for (auto it = mManagers.begin(); it != mManagers.end();) {
const sp<IClientManager> key = it->first.promote();
if (key) {
if (interfacesEqual(key, manager)) {
*added = false;
return it->second;
}
++it;
} else {
it = mManagers.erase(it);
}
}
uint64_t id = mSeqId++;
*added = true;
mManagers.push_back(std::make_pair(manager, id));
return id;
}
class ClientManager::Impl { class ClientManager::Impl {
public: public:
Impl(); Impl();
// BnRegisterSender // BnRegisterSender
ResultStatus registerSender(const sp<IAccessor> &accessor, BufferPoolStatus registerSender(const std::shared_ptr<IAccessor> &accessor,
ConnectionId *pConnectionId); Registration *pRegistration);
// BpRegisterSender // BpRegisterSender
ResultStatus registerSender(const sp<IClientManager> &receiver, BufferPoolStatus registerSender(const std::shared_ptr<IClientManager> &receiver,
ConnectionId senderId, ConnectionId senderId,
ConnectionId *receiverId); ConnectionId *receiverId,
bool *isNew);
ResultStatus create(const std::shared_ptr<BufferPoolAllocator> &allocator, BufferPoolStatus create(const std::shared_ptr<BufferPoolAllocator> &allocator,
ConnectionId *pConnectionId); ConnectionId *pConnectionId);
ResultStatus close(ConnectionId connectionId); BufferPoolStatus close(ConnectionId connectionId);
ResultStatus flush(ConnectionId connectionId); BufferPoolStatus flush(ConnectionId connectionId);
ResultStatus allocate(ConnectionId connectionId, BufferPoolStatus allocate(ConnectionId connectionId,
const std::vector<uint8_t> &params, const std::vector<uint8_t> &params,
native_handle_t **handle, native_handle_t **handle,
std::shared_ptr<BufferPoolData> *buffer); std::shared_ptr<BufferPoolData> *buffer);
ResultStatus receive(ConnectionId connectionId, BufferPoolStatus receive(ConnectionId connectionId,
TransactionId transactionId, TransactionId transactionId,
BufferId bufferId, BufferId bufferId,
int64_t timestampUs, int64_t timestampMs,
native_handle_t **handle, native_handle_t **handle,
std::shared_ptr<BufferPoolData> *buffer); std::shared_ptr<BufferPoolData> *buffer);
ResultStatus postSend(ConnectionId receiverId, BufferPoolStatus postSend(ConnectionId receiverId,
const std::shared_ptr<BufferPoolData> &buffer, const std::shared_ptr<BufferPoolData> &buffer,
TransactionId *transactionId, TransactionId *transactionId,
int64_t *timestampUs); int64_t *timestampMs);
ResultStatus getAccessor(ConnectionId connectionId, BufferPoolStatus getAccessor(ConnectionId connectionId,
sp<IAccessor> *accessor); std::shared_ptr<IAccessor> *accessor);
void cleanUp(bool clearCache = false); void cleanUp(bool clearCache = false);
@ -139,13 +91,13 @@ private:
// This lock is held for brief duration. // This lock is held for brief duration.
// Blocking operation is not performed while holding the lock. // Blocking operation is not performed while holding the lock.
std::mutex mMutex; std::mutex mMutex;
std::list<std::pair<const wp<IAccessor>, const std::weak_ptr<BufferPoolClient>>> std::list<std::pair<const std::weak_ptr<IAccessor>, const std::weak_ptr<BufferPoolClient>>>
mClients; mClients;
std::condition_variable mConnectCv; std::condition_variable mConnectCv;
bool mConnecting; bool mConnecting;
int64_t mLastCleanUpUs; int64_t mLastCleanUpMs;
ClientCache() : mConnecting(false), mLastCleanUpUs(getTimestampNow()) {} ClientCache() : mConnecting(false), mLastCleanUpMs(::android::elapsedRealtime()) {}
} mCache; } mCache;
// Active clients which can be retrieved via ConnectionId // Active clients which can be retrieved via ConnectionId
@ -157,30 +109,31 @@ private:
mClients; mClients;
} mActive; } mActive;
sp<Observer> mObserver; std::shared_ptr<Observer> mObserver;
ClientManagerCookieHolder mRemoteClientCookies;
}; };
ClientManager::Impl::Impl() ClientManager::Impl::Impl()
: mObserver(new Observer()) {} : mObserver(::ndk::SharedRefBase::make<Observer>()) {}
ResultStatus ClientManager::Impl::registerSender( BufferPoolStatus ClientManager::Impl::registerSender(
const sp<IAccessor> &accessor, ConnectionId *pConnectionId) { const std::shared_ptr<IAccessor> &accessor, Registration *pRegistration) {
cleanUp(); cleanUp();
int64_t timeoutUs = getTimestampNow() + kRegisterTimeoutUs; int64_t timeoutMs = ::android::elapsedRealtime() + kRegisterTimeoutMs;
do { do {
std::unique_lock<std::mutex> lock(mCache.mMutex); std::unique_lock<std::mutex> lock(mCache.mMutex);
for (auto it = mCache.mClients.begin(); it != mCache.mClients.end(); ++it) { for (auto it = mCache.mClients.begin(); it != mCache.mClients.end(); ++it) {
sp<IAccessor> sAccessor = it->first.promote(); std::shared_ptr<IAccessor> sAccessor = it->first.lock();
if (sAccessor && interfacesEqual(sAccessor, accessor)) { if (sAccessor && sAccessor.get() == accessor.get()) {
const std::shared_ptr<BufferPoolClient> client = it->second.lock(); const std::shared_ptr<BufferPoolClient> client = it->second.lock();
if (client) { if (client) {
std::lock_guard<std::mutex> lock(mActive.mMutex); std::lock_guard<std::mutex> lock(mActive.mMutex);
*pConnectionId = client->getConnectionId(); pRegistration->connectionId = client->getConnectionId();
if (mActive.mClients.find(*pConnectionId) != mActive.mClients.end()) { if (mActive.mClients.find(pRegistration->connectionId)
ALOGV("register existing connection %lld", (long long)*pConnectionId); != mActive.mClients.end()) {
return ResultStatus::ALREADY_EXISTS; ALOGV("register existing connection %lld",
(long long)pRegistration->connectionId);
pRegistration->isNew = false;
return ResultStatus::OK;
} }
} }
mCache.mClients.erase(it); mCache.mClients.erase(it);
@ -190,7 +143,7 @@ ResultStatus ClientManager::Impl::registerSender(
if (!mCache.mConnecting) { if (!mCache.mConnecting) {
mCache.mConnecting = true; mCache.mConnecting = true;
lock.unlock(); lock.unlock();
ResultStatus result = ResultStatus::OK; BufferPoolStatus result = ResultStatus::OK;
const std::shared_ptr<BufferPoolClient> client = const std::shared_ptr<BufferPoolClient> client =
std::make_shared<BufferPoolClient>(accessor, mObserver); std::make_shared<BufferPoolClient>(accessor, mObserver);
lock.lock(); lock.lock();
@ -209,26 +162,27 @@ ResultStatus ClientManager::Impl::registerSender(
std::lock_guard<std::mutex> lock(mActive.mMutex); std::lock_guard<std::mutex> lock(mActive.mMutex);
mActive.mClients.insert(std::make_pair(conId, client)); mActive.mClients.insert(std::make_pair(conId, client));
} }
*pConnectionId = conId; pRegistration->connectionId = conId;
ALOGV("register new connection %lld", (long long)*pConnectionId); pRegistration->isNew = true;
ALOGV("register new connection %lld", (long long)conId);
} }
mCache.mConnecting = false; mCache.mConnecting = false;
lock.unlock(); lock.unlock();
mCache.mConnectCv.notify_all(); mCache.mConnectCv.notify_all();
return result; return result;
} }
mCache.mConnectCv.wait_for( mCache.mConnectCv.wait_for(lock, kRegisterTimeoutMs*1ms);
lock, std::chrono::microseconds(kRegisterTimeoutUs)); } while (::android::elapsedRealtime() < timeoutMs);
} while (getTimestampNow() < timeoutUs);
// TODO: return timeout error // TODO: return timeout error
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus ClientManager::Impl::registerSender( BufferPoolStatus ClientManager::Impl::registerSender(
const sp<IClientManager> &receiver, const std::shared_ptr<IClientManager> &receiver,
ConnectionId senderId, ConnectionId senderId,
ConnectionId *receiverId) { ConnectionId *receiverId,
sp<IAccessor> accessor; bool *isNew) {
std::shared_ptr<IAccessor> accessor;
bool local = false; bool local = false;
{ {
std::lock_guard<std::mutex> lock(mActive.mMutex); std::lock_guard<std::mutex> lock(mActive.mMutex);
@ -239,38 +193,32 @@ ResultStatus ClientManager::Impl::registerSender(
it->second->getAccessor(&accessor); it->second->getAccessor(&accessor);
local = it->second->isLocal(); local = it->second->isLocal();
} }
ResultStatus rs = ResultStatus::CRITICAL_ERROR;
if (accessor) { if (accessor) {
Return<void> transResult = receiver->registerSender( Registration registration;
accessor, ::ndk::ScopedAStatus status = receiver->registerSender(accessor, &registration);
[&rs, receiverId]( if (!status.isOk()) {
ResultStatus status,
int64_t connectionId) {
rs = status;
*receiverId = connectionId;
});
if (!transResult.isOk()) {
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} else if (local && rs == ResultStatus::OK) { } else if (local) {
sp<ConnectionDeathRecipient> recipient = Accessor::getConnectionDeathRecipient(); std::shared_ptr<ConnectionDeathRecipient> recipient =
Accessor::getConnectionDeathRecipient();
if (recipient) { if (recipient) {
ALOGV("client death recipient registered %lld", (long long)*receiverId); ALOGV("client death recipient registered %lld", (long long)*receiverId);
bool added; recipient->addCookieToConnection(receiver->asBinder().get(), *receiverId);
uint64_t cookie = mRemoteClientCookies.getCookie(receiver, &added); AIBinder_linkToDeath(receiver->asBinder().get(), recipient->getRecipient(),
recipient->addCookieToConnection(cookie, *receiverId); receiver->asBinder().get());
if (added) {
Return<bool> transResult = receiver->linkToDeath(recipient, cookie);
}
} }
} }
*receiverId = registration.connectionId;
*isNew = registration.isNew;
return ResultStatus::OK;
} }
return rs; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus ClientManager::Impl::create( BufferPoolStatus ClientManager::Impl::create(
const std::shared_ptr<BufferPoolAllocator> &allocator, const std::shared_ptr<BufferPoolAllocator> &allocator,
ConnectionId *pConnectionId) { ConnectionId *pConnectionId) {
const sp<Accessor> accessor = new Accessor(allocator); std::shared_ptr<Accessor> accessor = ::ndk::SharedRefBase::make<Accessor>(allocator);
if (!accessor || !accessor->isValid()) { if (!accessor || !accessor->isValid()) {
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
@ -300,19 +248,19 @@ ResultStatus ClientManager::Impl::create(
return ResultStatus::OK; return ResultStatus::OK;
} }
ResultStatus ClientManager::Impl::close(ConnectionId connectionId) { BufferPoolStatus ClientManager::Impl::close(ConnectionId connectionId) {
std::unique_lock<std::mutex> lock1(mCache.mMutex); std::unique_lock<std::mutex> lock1(mCache.mMutex);
std::unique_lock<std::mutex> lock2(mActive.mMutex); std::unique_lock<std::mutex> lock2(mActive.mMutex);
auto it = mActive.mClients.find(connectionId); auto it = mActive.mClients.find(connectionId);
if (it != mActive.mClients.end()) { if (it != mActive.mClients.end()) {
sp<IAccessor> accessor; std::shared_ptr<IAccessor> accessor;
it->second->getAccessor(&accessor); it->second->getAccessor(&accessor);
std::shared_ptr<BufferPoolClient> closing = it->second; std::shared_ptr<BufferPoolClient> closing = it->second;
mActive.mClients.erase(connectionId); mActive.mClients.erase(connectionId);
for (auto cit = mCache.mClients.begin(); cit != mCache.mClients.end();) { for (auto cit = mCache.mClients.begin(); cit != mCache.mClients.end();) {
// clean up dead client caches // clean up dead client caches
sp<IAccessor> cAccessor = cit->first.promote(); std::shared_ptr<IAccessor> cAccessor = cit->first.lock();
if (!cAccessor || (accessor && interfacesEqual(cAccessor, accessor))) { if (!cAccessor || (accessor && cAccessor.get() == accessor.get())) {
cit = mCache.mClients.erase(cit); cit = mCache.mClients.erase(cit);
} else { } else {
cit++; cit++;
@ -326,7 +274,7 @@ ResultStatus ClientManager::Impl::close(ConnectionId connectionId) {
return ResultStatus::NOT_FOUND; return ResultStatus::NOT_FOUND;
} }
ResultStatus ClientManager::Impl::flush(ConnectionId connectionId) { BufferPoolStatus ClientManager::Impl::flush(ConnectionId connectionId) {
std::shared_ptr<BufferPoolClient> client; std::shared_ptr<BufferPoolClient> client;
{ {
std::lock_guard<std::mutex> lock(mActive.mMutex); std::lock_guard<std::mutex> lock(mActive.mMutex);
@ -339,7 +287,7 @@ ResultStatus ClientManager::Impl::flush(ConnectionId connectionId) {
return client->flush(); return client->flush();
} }
ResultStatus ClientManager::Impl::allocate( BufferPoolStatus ClientManager::Impl::allocate(
ConnectionId connectionId, const std::vector<uint8_t> &params, ConnectionId connectionId, const std::vector<uint8_t> &params,
native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) { native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) {
std::shared_ptr<BufferPoolClient> client; std::shared_ptr<BufferPoolClient> client;
@ -353,7 +301,7 @@ ResultStatus ClientManager::Impl::allocate(
} }
#ifdef BUFFERPOOL_CLONE_HANDLES #ifdef BUFFERPOOL_CLONE_HANDLES
native_handle_t *origHandle; native_handle_t *origHandle;
ResultStatus res = client->allocate(params, &origHandle, buffer); BufferPoolStatus res = client->allocate(params, &origHandle, buffer);
if (res != ResultStatus::OK) { if (res != ResultStatus::OK) {
return res; return res;
} }
@ -368,9 +316,9 @@ ResultStatus ClientManager::Impl::allocate(
#endif #endif
} }
ResultStatus ClientManager::Impl::receive( BufferPoolStatus ClientManager::Impl::receive(
ConnectionId connectionId, TransactionId transactionId, ConnectionId connectionId, TransactionId transactionId,
BufferId bufferId, int64_t timestampUs, BufferId bufferId, int64_t timestampMs,
native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) { native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) {
std::shared_ptr<BufferPoolClient> client; std::shared_ptr<BufferPoolClient> client;
{ {
@ -383,8 +331,8 @@ ResultStatus ClientManager::Impl::receive(
} }
#ifdef BUFFERPOOL_CLONE_HANDLES #ifdef BUFFERPOOL_CLONE_HANDLES
native_handle_t *origHandle; native_handle_t *origHandle;
ResultStatus res = client->receive( BufferPoolStatus res = client->receive(
transactionId, bufferId, timestampUs, &origHandle, buffer); transactionId, bufferId, timestampMs, &origHandle, buffer);
if (res != ResultStatus::OK) { if (res != ResultStatus::OK) {
return res; return res;
} }
@ -395,13 +343,13 @@ ResultStatus ClientManager::Impl::receive(
} }
return ResultStatus::OK; return ResultStatus::OK;
#else #else
return client->receive(transactionId, bufferId, timestampUs, handle, buffer); return client->receive(transactionId, bufferId, timestampMs, handle, buffer);
#endif #endif
} }
ResultStatus ClientManager::Impl::postSend( BufferPoolStatus ClientManager::Impl::postSend(
ConnectionId receiverId, const std::shared_ptr<BufferPoolData> &buffer, ConnectionId receiverId, const std::shared_ptr<BufferPoolData> &buffer,
TransactionId *transactionId, int64_t *timestampUs) { TransactionId *transactionId, int64_t *timestampMs) {
ConnectionId connectionId = buffer->mConnectionId; ConnectionId connectionId = buffer->mConnectionId;
std::shared_ptr<BufferPoolClient> client; std::shared_ptr<BufferPoolClient> client;
{ {
@ -412,11 +360,11 @@ ResultStatus ClientManager::Impl::postSend(
} }
client = it->second; client = it->second;
} }
return client->postSend(receiverId, buffer, transactionId, timestampUs); return client->postSend(receiverId, buffer, transactionId, timestampMs);
} }
ResultStatus ClientManager::Impl::getAccessor( BufferPoolStatus ClientManager::Impl::getAccessor(
ConnectionId connectionId, sp<IAccessor> *accessor) { ConnectionId connectionId, std::shared_ptr<IAccessor> *accessor) {
std::shared_ptr<BufferPoolClient> client; std::shared_ptr<BufferPoolClient> client;
{ {
std::lock_guard<std::mutex> lock(mActive.mMutex); std::lock_guard<std::mutex> lock(mActive.mMutex);
@ -430,16 +378,16 @@ ResultStatus ClientManager::Impl::getAccessor(
} }
void ClientManager::Impl::cleanUp(bool clearCache) { void ClientManager::Impl::cleanUp(bool clearCache) {
int64_t now = getTimestampNow(); int64_t now = ::android::elapsedRealtime();
int64_t lastTransactionUs; int64_t lastTransactionMs;
std::lock_guard<std::mutex> lock1(mCache.mMutex); std::lock_guard<std::mutex> lock1(mCache.mMutex);
if (clearCache || mCache.mLastCleanUpUs + kCleanUpDurationUs < now) { if (clearCache || mCache.mLastCleanUpMs + kCleanUpDurationMs < now) {
std::lock_guard<std::mutex> lock2(mActive.mMutex); std::lock_guard<std::mutex> lock2(mActive.mMutex);
int cleaned = 0; int cleaned = 0;
for (auto it = mActive.mClients.begin(); it != mActive.mClients.end();) { for (auto it = mActive.mClients.begin(); it != mActive.mClients.end();) {
if (!it->second->isActive(&lastTransactionUs, clearCache)) { if (!it->second->isActive(&lastTransactionMs, clearCache)) {
if (lastTransactionUs + kClientTimeoutUs < now) { if (lastTransactionMs + kClientTimeoutMs < now) {
sp<IAccessor> accessor; std::shared_ptr<IAccessor> accessor;
it->second->getAccessor(&accessor); it->second->getAccessor(&accessor);
it = mActive.mClients.erase(it); it = mActive.mClients.erase(it);
++cleaned; ++cleaned;
@ -450,7 +398,7 @@ void ClientManager::Impl::cleanUp(bool clearCache) {
} }
for (auto cit = mCache.mClients.begin(); cit != mCache.mClients.end();) { for (auto cit = mCache.mClients.begin(); cit != mCache.mClients.end();) {
// clean up dead client caches // clean up dead client caches
sp<IAccessor> cAccessor = cit->first.promote(); std::shared_ptr<IAccessor> cAccessor = cit->first.lock();
if (!cAccessor) { if (!cAccessor) {
cit = mCache.mClients.erase(cit); cit = mCache.mClients.erase(cit);
} else { } else {
@ -458,30 +406,32 @@ void ClientManager::Impl::cleanUp(bool clearCache) {
} }
} }
ALOGV("# of cleaned connections: %d", cleaned); ALOGV("# of cleaned connections: %d", cleaned);
mCache.mLastCleanUpUs = now; mCache.mLastCleanUpMs = now;
} }
} }
// Methods from ::android::hardware::media::bufferpool::V2_0::IClientManager follow. ::ndk::ScopedAStatus ClientManager::registerSender(
Return<void> ClientManager::registerSender(const sp<::android::hardware::media::bufferpool::V2_0::IAccessor>& bufferPool, registerSender_cb _hidl_cb) { const std::shared_ptr<IAccessor>& in_bufferPool, Registration* _aidl_return) {
BufferPoolStatus status = ResultStatus::CRITICAL_ERROR;
if (mImpl) { if (mImpl) {
ConnectionId connectionId = -1; status = mImpl->registerSender(in_bufferPool, _aidl_return);
ResultStatus status = mImpl->registerSender(bufferPool, &connectionId);
_hidl_cb(status, connectionId);
} else {
_hidl_cb(ResultStatus::CRITICAL_ERROR, -1);
} }
return Void(); if (status != ResultStatus::OK) {
return ::ndk::ScopedAStatus::fromServiceSpecificError(status);
}
return ::ndk::ScopedAStatus::ok();
} }
// Methods for local use. // Methods for local use.
sp<ClientManager> ClientManager::sInstance; std::shared_ptr<ClientManager> ClientManager::sInstance;
std::mutex ClientManager::sInstanceLock; std::mutex ClientManager::sInstanceLock;
sp<ClientManager> ClientManager::getInstance() { std::shared_ptr<ClientManager> ClientManager::getInstance() {
std::lock_guard<std::mutex> lock(sInstanceLock); std::lock_guard<std::mutex> lock(sInstanceLock);
if (!sInstance) { if (!sInstance) {
sInstance = new ClientManager(); sInstance = ::ndk::SharedRefBase::make<ClientManager>();
// TODO: configure thread count for threadpool properly
// after b/261652496 is resolved.
} }
Accessor::createInvalidator(); Accessor::createInvalidator();
Accessor::createEvictor(); Accessor::createEvictor();
@ -493,7 +443,7 @@ ClientManager::ClientManager() : mImpl(new Impl()) {}
ClientManager::~ClientManager() { ClientManager::~ClientManager() {
} }
ResultStatus ClientManager::create( BufferPoolStatus ClientManager::create(
const std::shared_ptr<BufferPoolAllocator> &allocator, const std::shared_ptr<BufferPoolAllocator> &allocator,
ConnectionId *pConnectionId) { ConnectionId *pConnectionId) {
if (mImpl) { if (mImpl) {
@ -502,31 +452,32 @@ ResultStatus ClientManager::create(
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus ClientManager::registerSender( BufferPoolStatus ClientManager::registerSender(
const sp<IClientManager> &receiver, const std::shared_ptr<IClientManager> &receiver,
ConnectionId senderId, ConnectionId senderId,
ConnectionId *receiverId) { ConnectionId *receiverId,
bool *isNew) {
if (mImpl) { if (mImpl) {
return mImpl->registerSender(receiver, senderId, receiverId); return mImpl->registerSender(receiver, senderId, receiverId, isNew);
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus ClientManager::close(ConnectionId connectionId) { BufferPoolStatus ClientManager::close(ConnectionId connectionId) {
if (mImpl) { if (mImpl) {
return mImpl->close(connectionId); return mImpl->close(connectionId);
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus ClientManager::flush(ConnectionId connectionId) { BufferPoolStatus ClientManager::flush(ConnectionId connectionId) {
if (mImpl) { if (mImpl) {
return mImpl->flush(connectionId); return mImpl->flush(connectionId);
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus ClientManager::allocate( BufferPoolStatus ClientManager::allocate(
ConnectionId connectionId, const std::vector<uint8_t> &params, ConnectionId connectionId, const std::vector<uint8_t> &params,
native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) { native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) {
if (mImpl) { if (mImpl) {
@ -535,22 +486,22 @@ ResultStatus ClientManager::allocate(
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus ClientManager::receive( BufferPoolStatus ClientManager::receive(
ConnectionId connectionId, TransactionId transactionId, ConnectionId connectionId, TransactionId transactionId,
BufferId bufferId, int64_t timestampUs, BufferId bufferId, int64_t timestampMs,
native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) { native_handle_t **handle, std::shared_ptr<BufferPoolData> *buffer) {
if (mImpl) { if (mImpl) {
return mImpl->receive(connectionId, transactionId, bufferId, return mImpl->receive(connectionId, transactionId, bufferId,
timestampUs, handle, buffer); timestampMs, handle, buffer);
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus ClientManager::postSend( BufferPoolStatus ClientManager::postSend(
ConnectionId receiverId, const std::shared_ptr<BufferPoolData> &buffer, ConnectionId receiverId, const std::shared_ptr<BufferPoolData> &buffer,
TransactionId *transactionId, int64_t* timestampUs) { TransactionId *transactionId, int64_t* timestampMs) {
if (mImpl && buffer) { if (mImpl && buffer) {
return mImpl->postSend(receiverId, buffer, transactionId, timestampUs); return mImpl->postSend(receiverId, buffer, transactionId, timestampMs);
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
@ -561,9 +512,4 @@ void ClientManager::cleanUp() {
} }
} }
} // namespace implementation } // namespace ::aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -13,42 +13,63 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#define LOG_TAG "AidlBufferPoolCon"
//#define LOG_NDEBUG 0
#include <aidlcommonsupport/NativeHandle.h>
#include "Connection.h" #include "Connection.h"
#include "Accessor.h"
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
// Methods from ::android::hardware::media::bufferpool::V2_0::IConnection follow. using aidl::android::hardware::media::bufferpool2::ResultStatus;
Return<void> Connection::fetch(uint64_t transactionId, uint32_t bufferId, fetch_cb _hidl_cb) { using Buffer = aidl::android::hardware::media::bufferpool2::Buffer;
ResultStatus status = ResultStatus::CRITICAL_ERROR; using FetchInfo = aidl::android::hardware::media::bufferpool2::IConnection::FetchInfo;
using FetchResult = aidl::android::hardware::media::bufferpool2::IConnection::FetchResult;
::ndk::ScopedAStatus Connection::fetch(const std::vector<FetchInfo>& in_fetchInfos,
std::vector<FetchResult>* _aidl_return) {
int success = 0;
int failure = 0;
if (mInitialized && mAccessor) { if (mInitialized && mAccessor) {
if (bufferId != SYNC_BUFFERID) { for (auto it = in_fetchInfos.begin(); it != in_fetchInfos.end(); ++it) {
const native_handle_t *handle = nullptr; if (fetch(it->transactionId, it->bufferId, _aidl_return)) {
status = mAccessor->fetch( success++;
mConnectionId, transactionId, bufferId, &handle); } else {
if (status == ResultStatus::OK) { failure++;
Buffer buffer = {};
buffer.id = bufferId;
buffer.buffer = handle;
_hidl_cb(status, buffer);
return Void();
} }
} else {
mAccessor->cleanUp(false);
} }
if (failure > 0) {
ALOGD("total fetch %d, failure %d", success + failure, failure);
}
return ::ndk::ScopedAStatus::ok();
} }
return ::ndk::ScopedAStatus::fromServiceSpecificError(ResultStatus::CRITICAL_ERROR);
}
Buffer buffer = {}; ::ndk::ScopedAStatus Connection::sync() {
buffer.id = 0; if (mInitialized && mAccessor) {
buffer.buffer = nullptr; mAccessor->cleanUp(false);
}
return ::ndk::ScopedAStatus::ok();
}
_hidl_cb(status, buffer);
return Void(); bool Connection::fetch(TransactionId transactionId, BufferId bufferId,
std::vector<FetchResult> *result) {
BufferPoolStatus status = ResultStatus::CRITICAL_ERROR;
const native_handle_t *handle = nullptr;
status = mAccessor->fetch(
mConnectionId, transactionId, bufferId, &handle);
if (status == ResultStatus::OK) {
result->emplace_back(FetchResult::make<FetchResult::buffer>());
result->back().get<FetchResult::buffer>().id = bufferId;
result->back().get<FetchResult::buffer>().buffer = ::android::dupToAidl(handle);
return true;
}
result->emplace_back(FetchResult::make<FetchResult::failure>(status));
return false;
} }
Connection::Connection() : mInitialized(false), mConnectionId(-1LL) {} Connection::Connection() : mInitialized(false), mConnectionId(-1LL) {}
@ -60,7 +81,7 @@ Connection::~Connection() {
} }
void Connection::initialize( void Connection::initialize(
const sp<Accessor>& accessor, ConnectionId connectionId) { const std::shared_ptr<Accessor>& accessor, ConnectionId connectionId) {
if (!mInitialized) { if (!mInitialized) {
mAccessor = accessor; mAccessor = accessor;
mConnectionId = connectionId; mConnectionId = connectionId;
@ -68,14 +89,14 @@ void Connection::initialize(
} }
} }
ResultStatus Connection::flush() { BufferPoolStatus Connection::flush() {
if (mInitialized && mAccessor) { if (mInitialized && mAccessor) {
return mAccessor->flush(); return mAccessor->flush();
} }
return ResultStatus::CRITICAL_ERROR; return ResultStatus::CRITICAL_ERROR;
} }
ResultStatus Connection::allocate( BufferPoolStatus Connection::allocate(
const std::vector<uint8_t> &params, BufferId *bufferId, const std::vector<uint8_t> &params, BufferId *bufferId,
const native_handle_t **handle) { const native_handle_t **handle) {
if (mInitialized && mAccessor) { if (mInitialized && mAccessor) {
@ -90,15 +111,4 @@ void Connection::cleanUp(bool clearCache) {
} }
} }
// Methods from ::android::hidl::base::V1_0::IBase follow. } // namespace ::aidl::android::hardware::media::bufferpool2::implementation
//IConnection* HIDL_FETCH_IConnection(const char* /* name */) {
// return new Connection();
//}
} // namespace implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,39 +14,28 @@
* limitations under the License. * limitations under the License.
*/ */
#ifndef ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_CONNECTION_H #pragma once
#define ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_CONNECTION_H
#include <android/hardware/media/bufferpool/2.0/IConnection.h> #include <memory>
#include <bufferpool/BufferPoolTypes.h>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include "Accessor.h"
namespace android { #include <aidl/android/hardware/media/bufferpool2/BnConnection.h>
namespace hardware { #include <bufferpool2/BufferPoolTypes.h>
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
using ::android::hardware::hidl_array; namespace aidl::android::hardware::media::bufferpool2::implementation {
using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::media::bufferpool::V2_0::implementation::Accessor;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::sp;
struct Connection : public IConnection { struct Accessor;
// Methods from ::android::hardware::media::bufferpool::V2_0::IConnection follow.
Return<void> fetch(uint64_t transactionId, uint32_t bufferId, fetch_cb _hidl_cb) override; struct Connection : public BnConnection {
// Methods from ::aidl::android::hardware::media::bufferpool2::IConnection.
::ndk::ScopedAStatus fetch(const std::vector<::aidl::android::hardware::media::bufferpool2::IConnection::FetchInfo>& in_fetchInfos, std::vector<::aidl::android::hardware::media::bufferpool2::IConnection::FetchResult>* _aidl_return) override;
// Methods from ::aidl::android::hardware::media::bufferpool2::IConnection.
::ndk::ScopedAStatus sync() override;
/** /**
* Invalidates all buffers which are active and/or are ready to be recycled. * Invalidates all buffers which are active and/or are ready to be recycled.
*/ */
ResultStatus flush(); BufferPoolStatus flush();
/** /**
* Allocates a buffer using the specified parameters. Recycles a buffer if * Allocates a buffer using the specified parameters. Recycles a buffer if
@ -61,7 +50,7 @@ struct Connection : public IConnection {
* NO_MEMORY when there is no memory. * NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus allocate(const std::vector<uint8_t> &params, BufferPoolStatus allocate(const std::vector<uint8_t> &params,
BufferId *bufferId, const native_handle_t **handle); BufferId *bufferId, const native_handle_t **handle);
/** /**
@ -86,7 +75,7 @@ struct Connection : public IConnection {
* @param accessor the specified buffer pool. * @param accessor the specified buffer pool.
* @param connectionId Id. * @param connectionId Id.
*/ */
void initialize(const sp<Accessor> &accessor, ConnectionId connectionId); void initialize(const std::shared_ptr<Accessor> &accessor, ConnectionId connectionId);
enum : uint32_t { enum : uint32_t {
SYNC_BUFFERID = UINT32_MAX, SYNC_BUFFERID = UINT32_MAX,
@ -94,15 +83,14 @@ struct Connection : public IConnection {
private: private:
bool mInitialized; bool mInitialized;
sp<Accessor> mAccessor; std::shared_ptr<Accessor> mAccessor;
ConnectionId mConnectionId; ConnectionId mConnectionId;
bool fetch(
uint64_t transactionId,
uint32_t bufferId,
std::vector<::aidl::android::hardware::media::bufferpool2::IConnection::FetchResult>
*result);
}; };
} // namespace implementation } // namespace aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_CONNECTION_H

View file

@ -0,0 +1,124 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <aidl/android/hardware/media/bufferpool2/BufferStatusMessage.h>
#include <bufferpool2/BufferPoolTypes.h>
#include <map>
#include <set>
namespace aidl::android::hardware::media::bufferpool2::implementation {
// Helper template methods for handling map of set.
template<class T, class U>
bool insert(std::map<T, std::set<U>> *mapOfSet, T key, U value) {
auto iter = mapOfSet->find(key);
if (iter == mapOfSet->end()) {
std::set<U> valueSet{value};
mapOfSet->insert(std::make_pair(key, valueSet));
return true;
} else if (iter->second.find(value) == iter->second.end()) {
iter->second.insert(value);
return true;
}
return false;
}
// Helper template methods for handling map of set.
template<class T, class U>
bool erase(std::map<T, std::set<U>> *mapOfSet, T key, U value) {
bool ret = false;
auto iter = mapOfSet->find(key);
if (iter != mapOfSet->end()) {
if (iter->second.erase(value) > 0) {
ret = true;
}
if (iter->second.size() == 0) {
mapOfSet->erase(iter);
}
}
return ret;
}
// Helper template methods for handling map of set.
template<class T, class U>
bool contains(std::map<T, std::set<U>> *mapOfSet, T key, U value) {
auto iter = mapOfSet->find(key);
if (iter != mapOfSet->end()) {
auto setIter = iter->second.find(value);
return setIter != iter->second.end();
}
return false;
}
// Buffer data structure for internal BufferPool use.(storage/fetching)
struct InternalBuffer {
BufferId mId;
size_t mOwnerCount;
size_t mTransactionCount;
const std::shared_ptr<BufferPoolAllocation> mAllocation;
const size_t mAllocSize;
const std::vector<uint8_t> mConfig;
bool mInvalidated;
InternalBuffer(
BufferId id,
const std::shared_ptr<BufferPoolAllocation> &alloc,
const size_t allocSize,
const std::vector<uint8_t> &allocConfig)
: mId(id), mOwnerCount(0), mTransactionCount(0),
mAllocation(alloc), mAllocSize(allocSize), mConfig(allocConfig),
mInvalidated(false) {}
const native_handle_t *handle() {
return mAllocation->handle();
}
void invalidate() {
mInvalidated = true;
}
};
// Buffer transacion status/message data structure for internal BufferPool use.
struct TransactionStatus {
TransactionId mId;
BufferId mBufferId;
ConnectionId mSender;
ConnectionId mReceiver;
BufferStatus mStatus;
int64_t mTimestampMs;
bool mSenderValidated;
TransactionStatus(const BufferStatusMessage &message, int64_t timestampMs) {
mId = message.transactionId;
mBufferId = message.bufferId;
mStatus = message.status;
mTimestampMs = timestampMs;
if (mStatus == BufferStatus::TRANSFER_TO) {
mSender = message.connectionId;
mReceiver = message.targetConnectionId;
mSenderValidated = true;
} else {
mSender = -1LL;
mReceiver = message.connectionId;
mSenderValidated = false;
}
}
};
} // namespace aidl::android::hardware::media::bufferpool2::implementation

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -15,13 +15,9 @@
*/ */
#include "Observer.h" #include "Observer.h"
#include "BufferPoolClient.h"
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
Observer::Observer() { Observer::Observer() {
} }
@ -29,20 +25,19 @@ Observer::Observer() {
Observer::~Observer() { Observer::~Observer() {
} }
// Methods from ::android::hardware::media::bufferpool::V2_0::IObserver follow. ::ndk::ScopedAStatus Observer::onMessage(int64_t in_connectionId, int32_t in_msgId) {
Return<void> Observer::onMessage(int64_t connectionId, uint32_t msgId) {
std::unique_lock<std::mutex> lock(mLock); std::unique_lock<std::mutex> lock(mLock);
auto it = mClients.find(connectionId); auto it = mClients.find(in_connectionId);
if (it != mClients.end()) { if (it != mClients.end()) {
const std::shared_ptr<BufferPoolClient> client = it->second.lock(); const std::shared_ptr<BufferPoolClient> client = it->second.lock();
if (!client) { if (!client) {
mClients.erase(it); mClients.erase(it);
} else { } else {
lock.unlock(); lock.unlock();
client->receiveInvalidation(msgId); client->receiveInvalidation(in_msgId);
} }
} }
return Void(); return ::ndk::ScopedAStatus::ok();
} }
void Observer::addClient(ConnectionId connectionId, void Observer::addClient(ConnectionId connectionId,
@ -65,9 +60,4 @@ void Observer::delClient(ConnectionId connectionId) {
} }
} // namespace implementation } // namespace aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,32 +14,20 @@
* limitations under the License. * limitations under the License.
*/ */
#ifndef ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_OBSERVER_H #pragma once
#define ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_OBSERVER_H
#include <android/hardware/media/bufferpool/2.0/IObserver.h> #include <map>
#include <hidl/MQDescriptor.h> #include <memory>
#include <hidl/Status.h> #include <mutex>
#include "BufferPoolClient.h" #include <aidl/android/hardware/media/bufferpool2/BnObserver.h>
#include <bufferpool2/BufferPoolTypes.h>
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
using ::android::hardware::hidl_array; class BufferPoolClient;
using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::sp;
struct Observer : public IObserver { struct Observer : public BnObserver {
// Methods from ::android::hardware::media::bufferpool::V2_0::IObserver follow. ::ndk::ScopedAStatus onMessage(int64_t in_connectionId, int32_t in_msgId) override;
Return<void> onMessage(int64_t connectionId, uint32_t msgId) override;
~Observer(); ~Observer();
@ -51,17 +39,11 @@ struct Observer : public IObserver {
private: private:
Observer(); Observer();
friend struct ClientManager; friend class ::ndk::SharedRefBase;
std::mutex mLock; std::mutex mLock;
std::map<ConnectionId, const std::weak_ptr<BufferPoolClient>> mClients; std::map<ConnectionId, const std::weak_ptr<BufferPoolClient>> mClients;
}; };
} // namespace implementation } // namespace aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_OBSERVER_H

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,23 +14,19 @@
* limitations under the License. * limitations under the License.
*/ */
#ifndef ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERPOOLTYPES_H #pragma once
#define ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERPOOLTYPES_H
#include <android/hardware/media/bufferpool/2.0/types.h>
#include <cutils/native_handle.h> #include <cutils/native_handle.h>
#include <fmq/MessageQueue.h> #include <fmq/AidlMessageQueue.h>
#include <hidl/MQDescriptor.h> #include <aidl/android/hardware/media/bufferpool2/BufferStatusMessage.h>
#include <hidl/Status.h> #include <aidl/android/hardware/media/bufferpool2/BufferInvalidationMessage.h>
#include <aidl/android/hardware/media/bufferpool2/ResultStatus.h>
namespace android { namespace aidl::android::hardware::media::bufferpool2 {
namespace hardware {
namespace media {
namespace bufferpool {
struct BufferPoolData { struct BufferPoolData {
// For local use, to specify a bufferpool (client connection) for buffers. // For local use, to specify a bufferpool (client connection) for buffers.
// Return value from connect#IAccessor(android.hardware.media.bufferpool@2.0). // Retrieved from returned info of IAccessor#connect(android.hardware.media.bufferpool@2.0).
int64_t mConnectionId; int64_t mConnectionId;
// BufferId // BufferId
uint32_t mId; uint32_t mId;
@ -44,26 +40,38 @@ struct BufferPoolData {
~BufferPoolData() {} ~BufferPoolData() {}
}; };
namespace V2_0 {
namespace implementation { namespace implementation {
using ::android::hardware::kSynchronizedReadWrite; using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
using ::android::hardware::kUnsynchronizedWrite; using aidl::android::hardware::common::fmq::UnsynchronizedWrite;
using aidl::android::hardware::media::bufferpool2::BufferStatusMessage;
using aidl::android::hardware::media::bufferpool2::BufferInvalidationMessage;
typedef uint32_t BufferId; typedef uint32_t BufferId;
typedef uint64_t TransactionId; typedef uint64_t TransactionId;
typedef int64_t ConnectionId; typedef int64_t ConnectionId;
typedef int32_t BufferPoolStatus;
// AIDL hal description language does not support unsigned.
int32_t static inline ToAidl(BufferId id) {return static_cast<int32_t>(id);}
int64_t static inline ToAidl(TransactionId id) {return static_cast<int64_t>(id);}
BufferId static inline FromAidl(int32_t id) {return static_cast<BufferId>(id);}
TransactionId static inline FromAidl(int64_t id) {return static_cast<TransactionId>(id);}
enum : ConnectionId { enum : ConnectionId {
INVALID_CONNECTIONID = 0, INVALID_CONNECTIONID = 0,
}; };
typedef android::hardware::MessageQueue<BufferStatusMessage, kSynchronizedReadWrite> BufferStatusQueue; typedef ::android::AidlMessageQueue<BufferStatusMessage, SynchronizedReadWrite> BufferStatusQueue;
typedef BufferStatusQueue::Descriptor StatusDescriptor; typedef aidl::android::hardware::common::fmq::MQDescriptor<BufferStatusMessage, SynchronizedReadWrite>
StatusDescriptor;
typedef android::hardware::MessageQueue<BufferInvalidationMessage, kUnsynchronizedWrite> typedef ::android::AidlMessageQueue<BufferInvalidationMessage, UnsynchronizedWrite>
BufferInvalidationQueue; BufferInvalidationQueue;
typedef BufferInvalidationQueue::Descriptor InvalidationDescriptor; typedef aidl::android::hardware::common::fmq::MQDescriptor<BufferInvalidationMessage, UnsynchronizedWrite>
InvalidationDescriptor;
/** /**
* Allocation wrapper class for buffer pool. * Allocation wrapper class for buffer pool.
@ -95,7 +103,7 @@ public:
* *
* @return OK when an allocation is created successfully. * @return OK when an allocation is created successfully.
*/ */
virtual ResultStatus allocate( virtual BufferPoolStatus allocate(
const std::vector<uint8_t> &params, const std::vector<uint8_t> &params,
std::shared_ptr<BufferPoolAllocation> *alloc, std::shared_ptr<BufferPoolAllocation> *alloc,
size_t *allocSize) = 0; size_t *allocSize) = 0;
@ -114,10 +122,5 @@ protected:
}; };
} // namespace implementation } // namespace implementation
} // namespace V2_0 } // namespace aidl::android::hareware::media::bufferpool2
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_BUFFERPOOLTYPES_H

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,38 +14,28 @@
* limitations under the License. * limitations under the License.
*/ */
#ifndef ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_CLIENTMANAGER_H #pragma once
#define ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_CLIENTMANAGER_H
#include <android/hardware/media/bufferpool/2.0/IClientManager.h> #include <aidl/android/hardware/media/bufferpool2/IAccessor.h>
#include <hidl/MQDescriptor.h> #include <aidl/android/hardware/media/bufferpool2/BnClientManager.h>
#include <hidl/Status.h>
#include <memory> #include <memory>
#include "BufferPoolTypes.h" #include "BufferPoolTypes.h"
namespace android { namespace aidl::android::hardware::media::bufferpool2::implementation {
namespace hardware {
namespace media {
namespace bufferpool {
namespace V2_0 {
namespace implementation {
using ::android::hardware::hidl_array; using aidl::android::hardware::media::bufferpool2::BnClientManager;
using ::android::hardware::hidl_memory; using aidl::android::hardware::media::bufferpool2::IClientManager;
using ::android::hardware::hidl_string; using aidl::android::hardware::media::bufferpool2::IAccessor;
using ::android::hardware::hidl_vec;
using ::android::hardware::media::bufferpool::V2_0::IAccessor;
using ::android::hardware::media::bufferpool::V2_0::ResultStatus;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::sp;
struct ClientManager : public IClientManager { struct ClientManager : public BnClientManager {
// Methods from ::android::hardware::media::bufferpool::V2_0::IClientManager follow. // Methods from ::aidl::android::hardware::media::bufferpool2::IClientManager follow.
Return<void> registerSender(const sp<::android::hardware::media::bufferpool::V2_0::IAccessor>& bufferPool, registerSender_cb _hidl_cb) override; ::ndk::ScopedAStatus registerSender(
const std::shared_ptr<IAccessor>& in_bufferPool,
::aidl::android::hardware::media::bufferpool2::IClientManager::Registration* _aidl_return)
override;
/** Gets an instance. */ /** Gets an instance. */
static sp<ClientManager> getInstance(); static std::shared_ptr<ClientManager> getInstance();
/** /**
* Creates a local connection with a newly created buffer pool. * Creates a local connection with a newly created buffer pool.
@ -56,10 +46,10 @@ struct ClientManager : public IClientManager {
* *
* @return OK when a buffer pool and a local connection is successfully * @return OK when a buffer pool and a local connection is successfully
* created. * created.
* NO_MEMORY when there is no memory. * ResultStatus::NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus create(const std::shared_ptr<BufferPoolAllocator> &allocator, BufferPoolStatus create(const std::shared_ptr<BufferPoolAllocator> &allocator,
ConnectionId *pConnectionId); ConnectionId *pConnectionId);
/** /**
@ -69,16 +59,17 @@ struct ClientManager : public IClientManager {
* @param senderId A local connection which will send buffers to. * @param senderId A local connection which will send buffers to.
* @param receiverId Id of the created receiving connection on the receiver * @param receiverId Id of the created receiving connection on the receiver
* process. * process.
* @param isNew @true when the receiving connection is newly created.
* *
* @return OK when the receiving connection is successfully created on the * @return OK when the receiving connection is successfully created on the
* receiver process. * receiver process.
* NOT_FOUND when the sender connection was not found. * NOT_FOUND when the sender connection was not found.
* ALREADY_EXISTS the receiving connection is already made.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus registerSender(const sp<IClientManager> &receiver, BufferPoolStatus registerSender(const std::shared_ptr<IClientManager> &receiver,
ConnectionId senderId, ConnectionId senderId,
ConnectionId *receiverId); ConnectionId *receiverId,
bool *isNew);
/** /**
* Closes the specified connection. * Closes the specified connection.
@ -89,7 +80,7 @@ struct ClientManager : public IClientManager {
* NOT_FOUND when the specified connection was not found. * NOT_FOUND when the specified connection was not found.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus close(ConnectionId connectionId); BufferPoolStatus close(ConnectionId connectionId);
/** /**
* Evicts cached allocations. If it's local connection, release the * Evicts cached allocations. If it's local connection, release the
@ -101,7 +92,7 @@ struct ClientManager : public IClientManager {
* NOT_FOUND when the specified connection was not found. * NOT_FOUND when the specified connection was not found.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus flush(ConnectionId connectionId); BufferPoolStatus flush(ConnectionId connectionId);
/** /**
* Allocates a buffer from the specified connection. The output parameter * Allocates a buffer from the specified connection. The output parameter
@ -119,7 +110,7 @@ struct ClientManager : public IClientManager {
* NO_MEMORY when there is no memory. * NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus allocate(ConnectionId connectionId, BufferPoolStatus allocate(ConnectionId connectionId,
const std::vector<uint8_t> &params, const std::vector<uint8_t> &params,
native_handle_t **handle, native_handle_t **handle,
std::shared_ptr<BufferPoolData> *buffer); std::shared_ptr<BufferPoolData> *buffer);
@ -132,7 +123,7 @@ struct ClientManager : public IClientManager {
* @param connectionId The id of the receiving connection. * @param connectionId The id of the receiving connection.
* @param transactionId The id for the transaction. * @param transactionId The id for the transaction.
* @param bufferId The id for the buffer. * @param bufferId The id for the buffer.
* @param timestampUs The timestamp of the buffer is being sent. * @param timestampMs The timestamp of the buffer is being sent.
* @param handle The native handle to the allocated buffer. handle * @param handle The native handle to the allocated buffer. handle
* should be cloned before use. * should be cloned before use.
* @param buffer The received buffer. * @param buffer The received buffer.
@ -142,10 +133,10 @@ struct ClientManager : public IClientManager {
* NO_MEMORY when there is no memory. * NO_MEMORY when there is no memory.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus receive(ConnectionId connectionId, BufferPoolStatus receive(ConnectionId connectionId,
TransactionId transactionId, TransactionId transactionId,
BufferId bufferId, BufferId bufferId,
int64_t timestampUs, int64_t timestampMs,
native_handle_t **handle, native_handle_t **handle,
std::shared_ptr<BufferPoolData> *buffer); std::shared_ptr<BufferPoolData> *buffer);
@ -156,17 +147,17 @@ struct ClientManager : public IClientManager {
* @param receiverId The id of the receiving connection. * @param receiverId The id of the receiving connection.
* @param buffer to transfer * @param buffer to transfer
* @param transactionId Id of the transfer transaction. * @param transactionId Id of the transfer transaction.
* @param timestampUs The timestamp of the buffer transaction is being * @param timestampMs The timestamp of the buffer transaction is being
* posted. * posted.
* *
* @return OK when a buffer transaction was posted successfully. * @return OK when a buffer transaction was posted successfully.
* NOT_FOUND when the sending connection was not found. * NOT_FOUND when the sending connection was not found.
* CRITICAL_ERROR otherwise. * CRITICAL_ERROR otherwise.
*/ */
ResultStatus postSend(ConnectionId receiverId, BufferPoolStatus postSend(ConnectionId receiverId,
const std::shared_ptr<BufferPoolData> &buffer, const std::shared_ptr<BufferPoolData> &buffer,
TransactionId *transactionId, TransactionId *transactionId,
int64_t *timestampUs); int64_t *timestampMs);
/** /**
* Time out inactive lingering connections and close. * Time out inactive lingering connections and close.
@ -176,20 +167,16 @@ struct ClientManager : public IClientManager {
/** Destructs the manager of buffer pool clients. */ /** Destructs the manager of buffer pool clients. */
~ClientManager(); ~ClientManager();
private: private:
static sp<ClientManager> sInstance; static std::shared_ptr<ClientManager> sInstance;
static std::mutex sInstanceLock; static std::mutex sInstanceLock;
class Impl; class Impl;
const std::unique_ptr<Impl> mImpl; const std::unique_ptr<Impl> mImpl;
friend class ::ndk::SharedRefBase;
ClientManager(); ClientManager();
}; };
} // namespace implementation } // namespace aidl::android::hardware::media::bufferpool2::implementation
} // namespace V2_0
} // namespace bufferpool
} // namespace media
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_MEDIA_BUFFERPOOL_V2_0_CLIENTMANAGER_H

View file

@ -0,0 +1,93 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package {
// See: http://go/android-license-faq
// A large-scale-change added 'default_applicable_licenses' to import
// all of the 'license_kinds' from "frameworks_av_license"
// to get the below license kinds:
// SPDX-license-identifier-Apache-2.0
default_applicable_licenses: ["hardware_interfaces_license"],
}
cc_test {
name: "VtsVndkAidlBufferpool2V1_0TargetSingleTest",
test_suites: ["device-tests"],
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"allocator.cpp",
"single.cpp",
],
shared_libs: [
"libbinder_ndk",
"libcutils",
"libfmq",
"liblog",
"libutils",
"android.hardware.media.bufferpool2-V1-ndk",
],
static_libs: [
"libaidlcommonsupport",
"libstagefright_aidl_bufferpool2"
],
compile_multilib: "both",
}
cc_test {
name: "VtsVndkAidlBufferpool2V1_0TargetMultiTest",
test_suites: ["device-tests"],
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"allocator.cpp",
"multi.cpp",
],
shared_libs: [
"libbinder_ndk",
"libcutils",
"libfmq",
"liblog",
"libutils",
"android.hardware.media.bufferpool2-V1-ndk",
],
static_libs: [
"libaidlcommonsupport",
"libstagefright_aidl_bufferpool2"
],
compile_multilib: "both",
}
cc_test {
name: "VtsVndkAidlBufferpool2V1_0TargetCondTest",
test_suites: ["device-tests"],
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"allocator.cpp",
"cond.cpp",
],
shared_libs: [
"libbinder_ndk",
"libcutils",
"libfmq",
"liblog",
"libutils",
"android.hardware.media.bufferpool2-V1-ndk",
],
static_libs: [
"libaidlcommonsupport",
"libstagefright_aidl_bufferpool2"
],
compile_multilib: "both",
}

View file

@ -1,541 +0,0 @@
/*
* Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "BufferpoolUnitTest"
#include <utils/Log.h>
#include <binder/ProcessState.h>
#include <bufferpool/ClientManager.h>
#include <gtest/gtest.h>
#include <hidl/LegacySupport.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unordered_set>
#include <vector>
#include "allocator.h"
using android::hardware::configureRpcThreadpool;
using android::hardware::media::bufferpool::BufferPoolData;
using android::hardware::media::bufferpool::V2_0::IClientManager;
using android::hardware::media::bufferpool::V2_0::ResultStatus;
using android::hardware::media::bufferpool::V2_0::implementation::BufferId;
using android::hardware::media::bufferpool::V2_0::implementation::ClientManager;
using android::hardware::media::bufferpool::V2_0::implementation::ConnectionId;
using android::hardware::media::bufferpool::V2_0::implementation::TransactionId;
using namespace android;
// communication message types between processes.
enum PipeCommand : int32_t {
INIT,
TRANSFER,
STOP,
INIT_OK,
INIT_ERROR,
TRANSFER_OK,
TRANSFER_ERROR,
STOP_OK,
STOP_ERROR,
};
// communication message between processes.
union PipeMessage {
struct {
int32_t command;
int32_t memsetValue;
BufferId bufferId;
ConnectionId connectionId;
TransactionId transactionId;
int64_t timestampUs;
} data;
char array[0];
};
static int32_t kNumIterationCount = 10;
class BufferpoolTest {
public:
BufferpoolTest() : mConnectionValid(false), mManager(nullptr), mAllocator(nullptr) {
mConnectionId = -1;
mReceiverId = -1;
}
~BufferpoolTest() {
if (mConnectionValid) {
mManager->close(mConnectionId);
}
}
protected:
bool mConnectionValid;
ConnectionId mConnectionId;
ConnectionId mReceiverId;
android::sp<ClientManager> mManager;
std::shared_ptr<BufferPoolAllocator> mAllocator;
void setupBufferpoolManager();
};
void BufferpoolTest::setupBufferpoolManager() {
// retrieving per process bufferpool object sp<ClientManager>
mManager = ClientManager::getInstance();
ASSERT_NE(mManager, nullptr) << "unable to get ClientManager\n";
mAllocator = std::make_shared<TestBufferPoolAllocator>();
ASSERT_NE(mAllocator, nullptr) << "unable to create TestBufferPoolAllocator\n";
// set-up local bufferpool connection for sender
ResultStatus status = mManager->create(mAllocator, &mConnectionId);
ASSERT_EQ(status, ResultStatus::OK)
<< "unable to set-up local bufferpool connection for sender\n";
mConnectionValid = true;
}
class BufferpoolUnitTest : public BufferpoolTest, public ::testing::Test {
public:
virtual void SetUp() override { setupBufferpoolManager(); }
virtual void TearDown() override {}
};
class BufferpoolFunctionalityTest : public BufferpoolTest, public ::testing::Test {
public:
virtual void SetUp() override {
mReceiverPid = -1;
ASSERT_TRUE(pipe(mCommandPipeFds) == 0) << "pipe connection failed for commandPipe\n";
ASSERT_TRUE(pipe(mResultPipeFds) == 0) << "pipe connection failed for resultPipe\n";
mReceiverPid = fork();
ASSERT_TRUE(mReceiverPid >= 0) << "fork failed\n";
if (mReceiverPid == 0) {
doReceiver();
// In order to ignore gtest behaviour, wait for being killed from tearDown
pause();
}
setupBufferpoolManager();
}
virtual void TearDown() override {
if (mReceiverPid > 0) {
kill(mReceiverPid, SIGKILL);
int wstatus;
wait(&wstatus);
}
}
protected:
pid_t mReceiverPid;
int mCommandPipeFds[2];
int mResultPipeFds[2];
bool sendMessage(int* pipes, const PipeMessage& message) {
int ret = write(pipes[1], message.array, sizeof(PipeMessage));
return ret == sizeof(PipeMessage);
}
bool receiveMessage(int* pipes, PipeMessage* message) {
int ret = read(pipes[0], message->array, sizeof(PipeMessage));
return ret == sizeof(PipeMessage);
}
void doReceiver();
};
void BufferpoolFunctionalityTest::doReceiver() {
// Configures the threadpool used for handling incoming RPC calls in this process.
configureRpcThreadpool(1 /*threads*/, false /*willJoin*/);
bool receiverRunning = true;
while (receiverRunning) {
PipeMessage message;
receiveMessage(mCommandPipeFds, &message);
ResultStatus err = ResultStatus::OK;
switch (message.data.command) {
case PipeCommand::INIT: {
// receiver manager creation
mManager = ClientManager::getInstance();
if (!mManager) {
message.data.command = PipeCommand::INIT_ERROR;
sendMessage(mResultPipeFds, message);
return;
}
android::status_t status = mManager->registerAsService();
if (status != android::OK) {
message.data.command = PipeCommand::INIT_ERROR;
sendMessage(mResultPipeFds, message);
return;
}
message.data.command = PipeCommand::INIT_OK;
sendMessage(mResultPipeFds, message);
break;
}
case PipeCommand::TRANSFER: {
native_handle_t* receiveHandle = nullptr;
std::shared_ptr<BufferPoolData> receiveBuffer;
err = mManager->receive(message.data.connectionId, message.data.transactionId,
message.data.bufferId, message.data.timestampUs,
&receiveHandle, &receiveBuffer);
if (err != ResultStatus::OK) {
message.data.command = PipeCommand::TRANSFER_ERROR;
sendMessage(mResultPipeFds, message);
return;
}
if (!TestBufferPoolAllocator::Verify(receiveHandle, message.data.memsetValue)) {
message.data.command = PipeCommand::TRANSFER_ERROR;
sendMessage(mResultPipeFds, message);
return;
}
if (receiveHandle) {
native_handle_close(receiveHandle);
native_handle_delete(receiveHandle);
}
receiveHandle = nullptr;
receiveBuffer.reset();
message.data.command = PipeCommand::TRANSFER_OK;
sendMessage(mResultPipeFds, message);
break;
}
case PipeCommand::STOP: {
err = mManager->close(message.data.connectionId);
if (err != ResultStatus::OK) {
message.data.command = PipeCommand::STOP_ERROR;
sendMessage(mResultPipeFds, message);
return;
}
message.data.command = PipeCommand::STOP_OK;
sendMessage(mResultPipeFds, message);
receiverRunning = false;
break;
}
default:
ALOGE("unknown command. try again");
break;
}
}
}
// Buffer allocation test.
// Check whether each buffer allocation is done successfully with unique buffer id.
TEST_F(BufferpoolUnitTest, AllocateBuffer) {
std::vector<uint8_t> vecParams;
getTestAllocatorParams(&vecParams);
std::vector<std::shared_ptr<BufferPoolData>> buffers{};
std::vector<native_handle_t*> allocHandle{};
ResultStatus status;
for (int i = 0; i < kNumIterationCount; ++i) {
native_handle_t* handle = nullptr;
std::shared_ptr<BufferPoolData> buffer{};
status = mManager->allocate(mConnectionId, vecParams, &handle, &buffer);
ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for " << i << "iteration";
buffers.push_back(std::move(buffer));
if (handle) {
allocHandle.push_back(std::move(handle));
}
}
for (int i = 0; i < kNumIterationCount; ++i) {
for (int j = i + 1; j < kNumIterationCount; ++j) {
ASSERT_TRUE(buffers[i]->mId != buffers[j]->mId) << "allocated buffers are not unique";
}
}
// delete the buffer handles
for (auto handle : allocHandle) {
native_handle_close(handle);
native_handle_delete(handle);
}
// clear the vectors
buffers.clear();
allocHandle.clear();
}
// Buffer recycle test.
// Check whether de-allocated buffers are recycled.
TEST_F(BufferpoolUnitTest, RecycleBuffer) {
std::vector<uint8_t> vecParams;
getTestAllocatorParams(&vecParams);
ResultStatus status;
std::vector<BufferId> bid{};
std::vector<native_handle_t*> allocHandle{};
for (int i = 0; i < kNumIterationCount; ++i) {
native_handle_t* handle = nullptr;
std::shared_ptr<BufferPoolData> buffer;
status = mManager->allocate(mConnectionId, vecParams, &handle, &buffer);
ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for " << i << "iteration";
bid.push_back(buffer->mId);
if (handle) {
allocHandle.push_back(std::move(handle));
}
buffer.reset();
}
std::unordered_set<BufferId> set(bid.begin(), bid.end());
ASSERT_EQ(set.size(), 1) << "buffers are not recycled properly";
// delete the buffer handles
for (auto handle : allocHandle) {
native_handle_close(handle);
native_handle_delete(handle);
}
allocHandle.clear();
}
// Validate cache evict and invalidate APIs.
TEST_F(BufferpoolUnitTest, FlushTest) {
std::vector<uint8_t> vecParams;
getTestAllocatorParams(&vecParams);
ResultStatus status = mManager->registerSender(mManager, mConnectionId, &mReceiverId);
ASSERT_TRUE(status == ResultStatus::ALREADY_EXISTS && mReceiverId == mConnectionId);
// testing empty flush
status = mManager->flush(mConnectionId);
ASSERT_EQ(status, ResultStatus::OK) << "failed to flush connection : " << mConnectionId;
std::vector<std::shared_ptr<BufferPoolData>> senderBuffer{};
std::vector<native_handle_t*> allocHandle{};
std::vector<TransactionId> tid{};
std::vector<int64_t> timestampUs{};
std::map<TransactionId, BufferId> bufferMap{};
for (int i = 0; i < kNumIterationCount; i++) {
int64_t postUs;
TransactionId transactionId;
native_handle_t* handle = nullptr;
std::shared_ptr<BufferPoolData> buffer{};
status = mManager->allocate(mConnectionId, vecParams, &handle, &buffer);
ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for " << i << " iteration";
ASSERT_TRUE(TestBufferPoolAllocator::Fill(handle, i));
status = mManager->postSend(mReceiverId, buffer, &transactionId, &postUs);
ASSERT_EQ(status, ResultStatus::OK) << "unable to post send transaction on bufferpool";
timestampUs.push_back(postUs);
tid.push_back(transactionId);
bufferMap.insert({transactionId, buffer->mId});
senderBuffer.push_back(std::move(buffer));
if (handle) {
allocHandle.push_back(std::move(handle));
}
buffer.reset();
}
status = mManager->flush(mConnectionId);
ASSERT_EQ(status, ResultStatus::OK) << "failed to flush connection : " << mConnectionId;
std::shared_ptr<BufferPoolData> receiverBuffer{};
native_handle_t* recvHandle = nullptr;
for (int i = 0; i < kNumIterationCount; i++) {
status = mManager->receive(mReceiverId, tid[i], senderBuffer[i]->mId, timestampUs[i],
&recvHandle, &receiverBuffer);
ASSERT_EQ(status, ResultStatus::OK) << "receive failed for buffer " << senderBuffer[i]->mId;
// find the buffer id from transaction id
auto findIt = bufferMap.find(tid[i]);
ASSERT_NE(findIt, bufferMap.end()) << "inconsistent buffer mapping";
// buffer id received must be same as the buffer id sent
ASSERT_EQ(findIt->second, receiverBuffer->mId) << "invalid buffer received";
ASSERT_TRUE(TestBufferPoolAllocator::Verify(recvHandle, i))
<< "Message received not same as that sent";
bufferMap.erase(findIt);
if (recvHandle) {
native_handle_close(recvHandle);
native_handle_delete(recvHandle);
}
recvHandle = nullptr;
receiverBuffer.reset();
}
ASSERT_EQ(bufferMap.size(), 0) << "buffers received is less than the number of buffers sent";
for (auto handle : allocHandle) {
native_handle_close(handle);
native_handle_delete(handle);
}
allocHandle.clear();
senderBuffer.clear();
timestampUs.clear();
}
// Buffer transfer test between processes.
TEST_F(BufferpoolFunctionalityTest, TransferBuffer) {
// initialize the receiver
PipeMessage message;
message.data.command = PipeCommand::INIT;
sendMessage(mCommandPipeFds, message);
ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
ASSERT_EQ(message.data.command, PipeCommand::INIT_OK) << "receiver init failed";
android::sp<IClientManager> receiver = IClientManager::getService();
ASSERT_NE(receiver, nullptr) << "getService failed for receiver\n";
ConnectionId receiverId;
ResultStatus status = mManager->registerSender(receiver, mConnectionId, &receiverId);
ASSERT_EQ(status, ResultStatus::OK)
<< "registerSender failed for connection id " << mConnectionId << "\n";
std::vector<uint8_t> vecParams;
getTestAllocatorParams(&vecParams);
for (int i = 0; i < kNumIterationCount; ++i) {
native_handle_t* handle = nullptr;
std::shared_ptr<BufferPoolData> buffer;
status = mManager->allocate(mConnectionId, vecParams, &handle, &buffer);
ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for " << i << "iteration";
ASSERT_TRUE(TestBufferPoolAllocator::Fill(handle, i))
<< "Fill fail for buffer handle " << handle << "\n";
// send the buffer to the receiver
int64_t postUs;
TransactionId transactionId;
status = mManager->postSend(receiverId, buffer, &transactionId, &postUs);
ASSERT_EQ(status, ResultStatus::OK)
<< "postSend failed for receiver " << receiverId << "\n";
// PipeMessage message;
message.data.command = PipeCommand::TRANSFER;
message.data.memsetValue = i;
message.data.bufferId = buffer->mId;
message.data.connectionId = receiverId;
message.data.transactionId = transactionId;
message.data.timestampUs = postUs;
sendMessage(mCommandPipeFds, message);
// delete buffer handle
if (handle) {
native_handle_close(handle);
native_handle_delete(handle);
}
ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
ASSERT_EQ(message.data.command, PipeCommand::TRANSFER_OK)
<< "received error during buffer transfer\n";
}
message.data.command = PipeCommand::STOP;
sendMessage(mCommandPipeFds, message);
ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
ASSERT_EQ(message.data.command, PipeCommand::STOP_OK)
<< "received error during buffer transfer\n";
}
/* Validate bufferpool for following corner cases:
1. invalid connectionID
2. invalid receiver
3. when sender is not registered
4. when connection is closed
*/
// TODO: Enable when the issue in b/212196495 is fixed
TEST_F(BufferpoolFunctionalityTest, DISABLED_ValidityTest) {
std::vector<uint8_t> vecParams;
getTestAllocatorParams(&vecParams);
std::shared_ptr<BufferPoolData> senderBuffer;
native_handle_t* allocHandle = nullptr;
// call allocate() on a random connection id
ConnectionId randomId = rand();
ResultStatus status = mManager->allocate(randomId, vecParams, &allocHandle, &senderBuffer);
EXPECT_TRUE(status == ResultStatus::NOT_FOUND);
// initialize the receiver
PipeMessage message;
message.data.command = PipeCommand::INIT;
sendMessage(mCommandPipeFds, message);
ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
ASSERT_EQ(message.data.command, PipeCommand::INIT_OK) << "receiver init failed";
allocHandle = nullptr;
senderBuffer.reset();
status = mManager->allocate(mConnectionId, vecParams, &allocHandle, &senderBuffer);
ASSERT_TRUE(TestBufferPoolAllocator::Fill(allocHandle, 0x77));
// send buffers w/o registering sender
int64_t postUs;
TransactionId transactionId;
// random receiver
status = mManager->postSend(randomId, senderBuffer, &transactionId, &postUs);
ASSERT_NE(status, ResultStatus::OK) << "bufferpool shouldn't allow send on random receiver";
// establish connection
android::sp<IClientManager> receiver = IClientManager::getService();
ASSERT_NE(receiver, nullptr) << "getService failed for receiver\n";
ConnectionId receiverId;
status = mManager->registerSender(receiver, mConnectionId, &receiverId);
ASSERT_EQ(status, ResultStatus::OK)
<< "registerSender failed for connection id " << mConnectionId << "\n";
allocHandle = nullptr;
senderBuffer.reset();
status = mManager->allocate(mConnectionId, vecParams, &allocHandle, &senderBuffer);
ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for connection " << mConnectionId;
ASSERT_TRUE(TestBufferPoolAllocator::Fill(allocHandle, 0x88));
// send the buffer to the receiver
status = mManager->postSend(receiverId, senderBuffer, &transactionId, &postUs);
ASSERT_EQ(status, ResultStatus::OK) << "postSend failed for receiver " << receiverId << "\n";
// PipeMessage message;
message.data.command = PipeCommand::TRANSFER;
message.data.memsetValue = 0x88;
message.data.bufferId = senderBuffer->mId;
message.data.connectionId = receiverId;
message.data.transactionId = transactionId;
message.data.timestampUs = postUs;
sendMessage(mCommandPipeFds, message);
ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
ASSERT_EQ(message.data.command, PipeCommand::TRANSFER_OK)
<< "received error during buffer transfer\n";
if (allocHandle) {
native_handle_close(allocHandle);
native_handle_delete(allocHandle);
}
message.data.command = PipeCommand::STOP;
sendMessage(mCommandPipeFds, message);
ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
ASSERT_EQ(message.data.command, PipeCommand::STOP_OK)
<< "received error during buffer transfer\n";
// try to send msg to closed connection
status = mManager->postSend(receiverId, senderBuffer, &transactionId, &postUs);
ASSERT_NE(status, ResultStatus::OK) << "bufferpool shouldn't allow send on closed connection";
}
int main(int argc, char** argv) {
android::hardware::details::setTrebleTestingOverride(true);
::testing::InitGoogleTest(&argc, argv);
int status = RUN_ALL_TESTS();
ALOGV("Test result = %d\n", status);
return status;
}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -139,7 +139,7 @@ IpcMutex *IpcMutex::Import(void *pMutex) {
} }
ResultStatus TestBufferPoolAllocator::allocate( BufferPoolStatus TestBufferPoolAllocator::allocate(
const std::vector<uint8_t> &params, const std::vector<uint8_t> &params,
std::shared_ptr<BufferPoolAllocation> *alloc, std::shared_ptr<BufferPoolAllocation> *alloc,
size_t *allocSize) { size_t *allocSize) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,17 +14,18 @@
* limitations under the License. * limitations under the License.
*/ */
#ifndef VNDK_HIDL_BUFFERPOOL_V2_0_ALLOCATOR_H #pragma once
#define VNDK_HIDL_BUFFERPOOL_V2_0_ALLOCATOR_H
#include <pthread.h> #include <pthread.h>
#include <bufferpool/BufferPoolTypes.h> #include <bufferpool2/BufferPoolTypes.h>
using android::hardware::media::bufferpool::V2_0::ResultStatus; using aidl::android::hardware::media::bufferpool2::implementation::
using android::hardware::media::bufferpool::V2_0::implementation:: BufferPoolStatus;
using aidl::android::hardware::media::bufferpool2::implementation::
BufferPoolAllocation; BufferPoolAllocation;
using android::hardware::media::bufferpool::V2_0::implementation:: using aidl::android::hardware::media::bufferpool2::implementation::
BufferPoolAllocator; BufferPoolAllocator;
using aidl::android::hardware::media::bufferpool2::ResultStatus;
struct IpcMutex { struct IpcMutex {
pthread_mutex_t lock; pthread_mutex_t lock;
@ -44,7 +45,7 @@ class TestBufferPoolAllocator : public BufferPoolAllocator {
~TestBufferPoolAllocator() override {} ~TestBufferPoolAllocator() override {}
ResultStatus allocate(const std::vector<uint8_t> &params, BufferPoolStatus allocate(const std::vector<uint8_t> &params,
std::shared_ptr<BufferPoolAllocation> *alloc, std::shared_ptr<BufferPoolAllocation> *alloc,
size_t *allocSize) override; size_t *allocSize) override;
@ -60,9 +61,7 @@ class TestBufferPoolAllocator : public BufferPoolAllocator {
static bool UnmapMemoryForMutex(void *mem); static bool UnmapMemoryForMutex(void *mem);
}; };
// retrieve buffer allocator paramters // retrieve buffer allocator parameters
void getTestAllocatorParams(std::vector<uint8_t> *params); void getTestAllocatorParams(std::vector<uint8_t> *params);
void getIpcMutexParams(std::vector<uint8_t> *params); void getIpcMutexParams(std::vector<uint8_t> *params);
#endif // VNDK_HIDL_BUFFERPOOL_V2_0_ALLOCATOR_H

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,35 +18,36 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <android/binder_manager.h>
#include <android/binder_process.h>
#include <android/binder_stability.h>
#include <android-base/logging.h> #include <android-base/logging.h>
#include <binder/ProcessState.h> #include <bufferpool2/ClientManager.h>
#include <bufferpool/ClientManager.h>
#include <errno.h> #include <errno.h>
#include <hidl/HidlSupport.h>
#include <hidl/HidlTransportSupport.h>
#include <hidl/LegacySupport.h>
#include <hidl/Status.h>
#include <signal.h> #include <signal.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/wait.h> #include <sys/wait.h>
#include <unistd.h> #include <unistd.h>
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "allocator.h" #include "allocator.h"
using android::hardware::configureRpcThreadpool; using aidl::android::hardware::media::bufferpool2::IClientManager;
using android::hardware::hidl_handle; using aidl::android::hardware::media::bufferpool2::ResultStatus;
using android::hardware::media::bufferpool::V2_0::IClientManager; using aidl::android::hardware::media::bufferpool2::implementation::BufferId;
using android::hardware::media::bufferpool::V2_0::ResultStatus; using aidl::android::hardware::media::bufferpool2::implementation::ClientManager;
using android::hardware::media::bufferpool::V2_0::implementation::BufferId; using aidl::android::hardware::media::bufferpool2::implementation::ConnectionId;
using android::hardware::media::bufferpool::V2_0::implementation::ClientManager; using aidl::android::hardware::media::bufferpool2::implementation::TransactionId;
using android::hardware::media::bufferpool::V2_0::implementation::ConnectionId; using aidl::android::hardware::media::bufferpool2::BufferPoolData;
using android::hardware::media::bufferpool::V2_0::implementation::TransactionId;
using android::hardware::media::bufferpool::BufferPoolData;
namespace { namespace {
const std::string testInstance = std::string() + ClientManager::descriptor + "/condtest";
// communication message types between processes. // communication message types between processes.
enum PipeCommand : int32_t { enum PipeCommand : int32_t {
INIT_OK = 0, INIT_OK = 0,
@ -74,7 +75,7 @@ constexpr int kSignalInt = 200;
class BufferpoolMultiTest : public ::testing::Test { class BufferpoolMultiTest : public ::testing::Test {
public: public:
virtual void SetUp() override { virtual void SetUp() override {
ResultStatus status; BufferPoolStatus status;
mReceiverPid = -1; mReceiverPid = -1;
mConnectionValid = false; mConnectionValid = false;
@ -119,7 +120,7 @@ class BufferpoolMultiTest : public ::testing::Test {
RecordProperty("description", description); RecordProperty("description", description);
} }
android::sp<ClientManager> mManager; std::shared_ptr<ClientManager> mManager;
std::shared_ptr<BufferPoolAllocator> mAllocator; std::shared_ptr<BufferPoolAllocator> mAllocator;
bool mConnectionValid; bool mConnectionValid;
ConnectionId mConnectionId; ConnectionId mConnectionId;
@ -138,7 +139,8 @@ class BufferpoolMultiTest : public ::testing::Test {
} }
void doReceiver() { void doReceiver() {
configureRpcThreadpool(1, false); ABinderProcess_setThreadPoolMaxThreadCount(1);
ABinderProcess_startThreadPool();
PipeMessage message; PipeMessage message;
mManager = ClientManager::getInstance(); mManager = ClientManager::getInstance();
if (!mManager) { if (!mManager) {
@ -146,7 +148,11 @@ class BufferpoolMultiTest : public ::testing::Test {
sendMessage(mResultPipeFds, message); sendMessage(mResultPipeFds, message);
return; return;
} }
android::status_t status = mManager->registerAsService(); auto binder = mManager->asBinder();
AIBinder_forceDowngradeToSystemStability(binder.get());
binder_status_t status =
AServiceManager_addService(binder.get(), testInstance.c_str());
CHECK_EQ(status, STATUS_OK);
if (status != android::OK) { if (status != android::OK) {
message.data.command = PipeCommand::INIT_ERROR; message.data.command = PipeCommand::INIT_ERROR;
sendMessage(mResultPipeFds, message); sendMessage(mResultPipeFds, message);
@ -162,7 +168,7 @@ class BufferpoolMultiTest : public ::testing::Test {
std::shared_ptr<BufferPoolData> rbuffer; std::shared_ptr<BufferPoolData> rbuffer;
void *mem = nullptr; void *mem = nullptr;
IpcMutex *mutex = nullptr; IpcMutex *mutex = nullptr;
ResultStatus status = mManager->receive( BufferPoolStatus status = mManager->receive(
message.data.connectionId, message.data.transactionId, message.data.connectionId, message.data.transactionId,
message.data.bufferId, message.data.timestampUs, &rhandle, &rbuffer); message.data.bufferId, message.data.timestampUs, &rhandle, &rbuffer);
mManager->close(message.data.connectionId); mManager->close(message.data.connectionId);
@ -201,16 +207,22 @@ class BufferpoolMultiTest : public ::testing::Test {
// Buffer transfer test between processes. // Buffer transfer test between processes.
TEST_F(BufferpoolMultiTest, TransferBuffer) { TEST_F(BufferpoolMultiTest, TransferBuffer) {
ResultStatus status; BufferPoolStatus status;
PipeMessage message; PipeMessage message;
ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)); ASSERT_TRUE(receiveMessage(mResultPipeFds, &message));
ABinderProcess_setThreadPoolMaxThreadCount(1);
ABinderProcess_startThreadPool();
android::sp<IClientManager> receiver = IClientManager::getService();
std::shared_ptr<IClientManager> receiver =
IClientManager::fromBinder(
ndk::SpAIBinder(AServiceManager_waitForService(testInstance.c_str())));
ASSERT_NE(receiver, nullptr);
ConnectionId receiverId; ConnectionId receiverId;
ASSERT_TRUE((bool)receiver);
status = mManager->registerSender(receiver, mConnectionId, &receiverId); bool isNew = true;
status = mManager->registerSender(receiver, mConnectionId, &receiverId, &isNew);
ASSERT_TRUE(status == ResultStatus::OK); ASSERT_TRUE(status == ResultStatus::OK);
{ {
native_handle_t *shandle = nullptr; native_handle_t *shandle = nullptr;
@ -261,7 +273,6 @@ TEST_F(BufferpoolMultiTest, TransferBuffer) {
} // anonymous namespace } // anonymous namespace
int main(int argc, char** argv) { int main(int argc, char** argv) {
android::hardware::details::setTrebleTestingOverride(true);
::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleTest(&argc, argv);
int status = RUN_ALL_TESTS(); int status = RUN_ALL_TESTS();
LOG(INFO) << "Test result = " << status; LOG(INFO) << "Test result = " << status;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,34 +18,35 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <android/binder_manager.h>
#include <android/binder_process.h>
#include <android/binder_stability.h>
#include <android-base/logging.h> #include <android-base/logging.h>
#include <binder/ProcessState.h> #include <bufferpool2/ClientManager.h>
#include <bufferpool/ClientManager.h>
#include <hidl/HidlSupport.h>
#include <hidl/HidlTransportSupport.h>
#include <hidl/LegacySupport.h>
#include <hidl/Status.h>
#include <signal.h> #include <signal.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/wait.h> #include <sys/wait.h>
#include <unistd.h> #include <unistd.h>
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "allocator.h" #include "allocator.h"
using android::hardware::configureRpcThreadpool; using aidl::android::hardware::media::bufferpool2::IClientManager;
using android::hardware::hidl_handle; using aidl::android::hardware::media::bufferpool2::ResultStatus;
using android::hardware::media::bufferpool::V2_0::IClientManager; using aidl::android::hardware::media::bufferpool2::implementation::BufferId;
using android::hardware::media::bufferpool::V2_0::ResultStatus; using aidl::android::hardware::media::bufferpool2::implementation::ClientManager;
using android::hardware::media::bufferpool::V2_0::implementation::BufferId; using aidl::android::hardware::media::bufferpool2::implementation::ConnectionId;
using android::hardware::media::bufferpool::V2_0::implementation::ClientManager; using aidl::android::hardware::media::bufferpool2::implementation::TransactionId;
using android::hardware::media::bufferpool::V2_0::implementation::ConnectionId; using aidl::android::hardware::media::bufferpool2::BufferPoolData;
using android::hardware::media::bufferpool::V2_0::implementation::TransactionId;
using android::hardware::media::bufferpool::BufferPoolData;
namespace { namespace {
const std::string testInstance = std::string() + ClientManager::descriptor + "/multitest";
// communication message types between processes. // communication message types between processes.
enum PipeCommand : int32_t { enum PipeCommand : int32_t {
INIT_OK = 0, INIT_OK = 0,
@ -71,7 +72,7 @@ union PipeMessage {
class BufferpoolMultiTest : public ::testing::Test { class BufferpoolMultiTest : public ::testing::Test {
public: public:
virtual void SetUp() override { virtual void SetUp() override {
ResultStatus status; BufferPoolStatus status;
mReceiverPid = -1; mReceiverPid = -1;
mConnectionValid = false; mConnectionValid = false;
@ -87,7 +88,6 @@ class BufferpoolMultiTest : public ::testing::Test {
// tearDown // tearDown
pause(); pause();
} }
mManager = ClientManager::getInstance(); mManager = ClientManager::getInstance();
ASSERT_NE(mManager, nullptr); ASSERT_NE(mManager, nullptr);
@ -116,7 +116,7 @@ class BufferpoolMultiTest : public ::testing::Test {
RecordProperty("description", description); RecordProperty("description", description);
} }
android::sp<ClientManager> mManager; std::shared_ptr<ClientManager> mManager;
std::shared_ptr<BufferPoolAllocator> mAllocator; std::shared_ptr<BufferPoolAllocator> mAllocator;
bool mConnectionValid; bool mConnectionValid;
ConnectionId mConnectionId; ConnectionId mConnectionId;
@ -135,7 +135,8 @@ class BufferpoolMultiTest : public ::testing::Test {
} }
void doReceiver() { void doReceiver() {
configureRpcThreadpool(1, false); ABinderProcess_setThreadPoolMaxThreadCount(1);
ABinderProcess_startThreadPool();
PipeMessage message; PipeMessage message;
mManager = ClientManager::getInstance(); mManager = ClientManager::getInstance();
if (!mManager) { if (!mManager) {
@ -143,7 +144,11 @@ class BufferpoolMultiTest : public ::testing::Test {
sendMessage(mResultPipeFds, message); sendMessage(mResultPipeFds, message);
return; return;
} }
android::status_t status = mManager->registerAsService(); auto binder = mManager->asBinder();
AIBinder_forceDowngradeToSystemStability(binder.get());
binder_status_t status =
AServiceManager_addService(binder.get(), testInstance.c_str());
CHECK_EQ(status, STATUS_OK);
if (status != android::OK) { if (status != android::OK) {
message.data.command = PipeCommand::INIT_ERROR; message.data.command = PipeCommand::INIT_ERROR;
sendMessage(mResultPipeFds, message); sendMessage(mResultPipeFds, message);
@ -156,7 +161,7 @@ class BufferpoolMultiTest : public ::testing::Test {
{ {
native_handle_t *rhandle = nullptr; native_handle_t *rhandle = nullptr;
std::shared_ptr<BufferPoolData> rbuffer; std::shared_ptr<BufferPoolData> rbuffer;
ResultStatus status = mManager->receive( BufferPoolStatus status = mManager->receive(
message.data.connectionId, message.data.transactionId, message.data.connectionId, message.data.transactionId,
message.data.bufferId, message.data.timestampUs, &rhandle, &rbuffer); message.data.bufferId, message.data.timestampUs, &rhandle, &rbuffer);
mManager->close(message.data.connectionId); mManager->close(message.data.connectionId);
@ -182,16 +187,20 @@ class BufferpoolMultiTest : public ::testing::Test {
// Buffer transfer test between processes. // Buffer transfer test between processes.
TEST_F(BufferpoolMultiTest, TransferBuffer) { TEST_F(BufferpoolMultiTest, TransferBuffer) {
ResultStatus status; BufferPoolStatus status;
PipeMessage message; PipeMessage message;
ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)); ASSERT_TRUE(receiveMessage(mResultPipeFds, &message));
ABinderProcess_setThreadPoolMaxThreadCount(1);
ABinderProcess_startThreadPool();
android::sp<IClientManager> receiver = IClientManager::getService(); std::shared_ptr<IClientManager> receiver = IClientManager::fromBinder(ndk::SpAIBinder(
AServiceManager_waitForService(testInstance.c_str())));
ASSERT_NE(receiver, nullptr);
ConnectionId receiverId; ConnectionId receiverId;
ASSERT_TRUE((bool)receiver);
status = mManager->registerSender(receiver, mConnectionId, &receiverId); bool isNew = true;
status = mManager->registerSender(receiver, mConnectionId, &receiverId, &isNew);
ASSERT_TRUE(status == ResultStatus::OK); ASSERT_TRUE(status == ResultStatus::OK);
{ {
native_handle_t *shandle = nullptr; native_handle_t *shandle = nullptr;
@ -227,7 +236,6 @@ TEST_F(BufferpoolMultiTest, TransferBuffer) {
} // anonymous namespace } // anonymous namespace
int main(int argc, char** argv) { int main(int argc, char** argv) {
android::hardware::details::setTrebleTestingOverride(true);
::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleTest(&argc, argv);
int status = RUN_ALL_TESTS(); int status = RUN_ALL_TESTS();
LOG(INFO) << "Test result = " << status; LOG(INFO) << "Test result = " << status;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 The Android Open Source Project * Copyright (C) 2022 The Android Open Source Project
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,24 +20,19 @@
#include <android-base/logging.h> #include <android-base/logging.h>
#include <binder/ProcessState.h> #include <binder/ProcessState.h>
#include <bufferpool/ClientManager.h> #include <bufferpool2/ClientManager.h>
#include <hidl/HidlSupport.h>
#include <hidl/HidlTransportSupport.h>
#include <hidl/LegacySupport.h>
#include <hidl/Status.h>
#include <unistd.h> #include <unistd.h>
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "allocator.h" #include "allocator.h"
using android::hardware::hidl_handle; using aidl::android::hardware::media::bufferpool2::implementation::BufferId;
using android::hardware::media::bufferpool::V2_0::ResultStatus; using aidl::android::hardware::media::bufferpool2::implementation::BufferPoolStatus;
using android::hardware::media::bufferpool::V2_0::implementation::BufferId; using aidl::android::hardware::media::bufferpool2::implementation::ClientManager;
using android::hardware::media::bufferpool::V2_0::implementation::ClientManager; using aidl::android::hardware::media::bufferpool2::implementation::ConnectionId;
using android::hardware::media::bufferpool::V2_0::implementation::ConnectionId; using aidl::android::hardware::media::bufferpool2::implementation::TransactionId;
using android::hardware::media::bufferpool::V2_0::implementation::TransactionId; using aidl::android::hardware::media::bufferpool2::BufferPoolData;
using android::hardware::media::bufferpool::BufferPoolData;
namespace { namespace {
@ -51,7 +46,7 @@ constexpr static int kNumRecycleTest = 3;
class BufferpoolSingleTest : public ::testing::Test { class BufferpoolSingleTest : public ::testing::Test {
public: public:
virtual void SetUp() override { virtual void SetUp() override {
ResultStatus status; BufferPoolStatus status;
mConnectionValid = false; mConnectionValid = false;
mManager = ClientManager::getInstance(); mManager = ClientManager::getInstance();
@ -65,8 +60,9 @@ class BufferpoolSingleTest : public ::testing::Test {
mConnectionValid = true; mConnectionValid = true;
status = mManager->registerSender(mManager, mConnectionId, &mReceiverId); bool isNew = true;
ASSERT_TRUE(status == ResultStatus::ALREADY_EXISTS && status = mManager->registerSender(mManager, mConnectionId, &mReceiverId, &isNew);
ASSERT_TRUE(status == ResultStatus::OK && isNew == false &&
mReceiverId == mConnectionId); mReceiverId == mConnectionId);
} }
@ -81,7 +77,7 @@ class BufferpoolSingleTest : public ::testing::Test {
RecordProperty("description", description); RecordProperty("description", description);
} }
android::sp<ClientManager> mManager; std::shared_ptr<ClientManager> mManager;
std::shared_ptr<BufferPoolAllocator> mAllocator; std::shared_ptr<BufferPoolAllocator> mAllocator;
bool mConnectionValid; bool mConnectionValid;
ConnectionId mConnectionId; ConnectionId mConnectionId;
@ -93,7 +89,7 @@ class BufferpoolSingleTest : public ::testing::Test {
// Check whether each buffer allocation is done successfully with // Check whether each buffer allocation is done successfully with
// unique buffer id. // unique buffer id.
TEST_F(BufferpoolSingleTest, AllocateBuffer) { TEST_F(BufferpoolSingleTest, AllocateBuffer) {
ResultStatus status; BufferPoolStatus status;
std::vector<uint8_t> vecParams; std::vector<uint8_t> vecParams;
getTestAllocatorParams(&vecParams); getTestAllocatorParams(&vecParams);
@ -118,7 +114,7 @@ TEST_F(BufferpoolSingleTest, AllocateBuffer) {
// Buffer recycle test. // Buffer recycle test.
// Check whether de-allocated buffers are recycled. // Check whether de-allocated buffers are recycled.
TEST_F(BufferpoolSingleTest, RecycleBuffer) { TEST_F(BufferpoolSingleTest, RecycleBuffer) {
ResultStatus status; BufferPoolStatus status;
std::vector<uint8_t> vecParams; std::vector<uint8_t> vecParams;
getTestAllocatorParams(&vecParams); getTestAllocatorParams(&vecParams);
@ -143,7 +139,7 @@ TEST_F(BufferpoolSingleTest, RecycleBuffer) {
// Buffer transfer test. // Buffer transfer test.
// Check whether buffer is transferred to another client successfully. // Check whether buffer is transferred to another client successfully.
TEST_F(BufferpoolSingleTest, TransferBuffer) { TEST_F(BufferpoolSingleTest, TransferBuffer) {
ResultStatus status; BufferPoolStatus status;
std::vector<uint8_t> vecParams; std::vector<uint8_t> vecParams;
getTestAllocatorParams(&vecParams); getTestAllocatorParams(&vecParams);
std::shared_ptr<BufferPoolData> sbuffer, rbuffer; std::shared_ptr<BufferPoolData> sbuffer, rbuffer;
@ -151,14 +147,14 @@ TEST_F(BufferpoolSingleTest, TransferBuffer) {
native_handle_t *recvHandle = nullptr; native_handle_t *recvHandle = nullptr;
TransactionId transactionId; TransactionId transactionId;
int64_t postUs; int64_t postMs;
status = mManager->allocate(mConnectionId, vecParams, &allocHandle, &sbuffer); status = mManager->allocate(mConnectionId, vecParams, &allocHandle, &sbuffer);
ASSERT_TRUE(status == ResultStatus::OK); ASSERT_TRUE(status == ResultStatus::OK);
ASSERT_TRUE(TestBufferPoolAllocator::Fill(allocHandle, 0x77)); ASSERT_TRUE(TestBufferPoolAllocator::Fill(allocHandle, 0x77));
status = mManager->postSend(mReceiverId, sbuffer, &transactionId, &postUs); status = mManager->postSend(mReceiverId, sbuffer, &transactionId, &postMs);
ASSERT_TRUE(status == ResultStatus::OK); ASSERT_TRUE(status == ResultStatus::OK);
status = mManager->receive(mReceiverId, transactionId, sbuffer->mId, postUs, status = mManager->receive(mReceiverId, transactionId, sbuffer->mId, postMs,
&recvHandle, &rbuffer); &recvHandle, &rbuffer);
EXPECT_TRUE(status == ResultStatus::OK); EXPECT_TRUE(status == ResultStatus::OK);
ASSERT_TRUE(TestBufferPoolAllocator::Verify(recvHandle, 0x77)); ASSERT_TRUE(TestBufferPoolAllocator::Verify(recvHandle, 0x77));