pmem allocator in gralloc. enabled for all surfaces. currently it uses a lot more address space than needed.

This commit is contained in:
Mathias Agopian 2009-06-11 16:32:05 -07:00
parent 72c8508db9
commit 8c4ab1fa14
7 changed files with 453 additions and 46 deletions

View file

@ -52,7 +52,7 @@ static const char *variant_keys[] = {
#define HAL_VARIANT_KEYS_COUNT (sizeof(variant_keys)/sizeof(variant_keys[0]))
/**
* Load the file defined by the variant and if succesfull
* Load the file defined by the variant and if successful
* return the dlopen handle and the hmi.
* @return 0 = success, !0 = failure.
*/

View file

@ -21,7 +21,13 @@ include $(CLEAR_VARS)
LOCAL_PRELINK_MODULE := false
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
LOCAL_SHARED_LIBRARIES := liblog libcutils
LOCAL_SRC_FILES := gralloc.cpp mapper.cpp framebuffer.cpp
LOCAL_SRC_FILES := \
allocator.cpp \
gralloc.cpp \
framebuffer.cpp \
mapper.cpp
LOCAL_MODULE := gralloc.default
LOCAL_CFLAGS:= -DLOG_TAG=\"gralloc\"
include $(BUILD_SHARED_LIBRARY)

View file

@ -0,0 +1,154 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cutils/log.h>
#include "allocator.h"
// align all the memory blocks on a cache-line boundary
const int SimpleBestFitAllocator::kMemoryAlign = 32;
SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
{
size_t pagesize = getpagesize();
mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
mList.insertHead(node);
}
SimpleBestFitAllocator::~SimpleBestFitAllocator()
{
while(!mList.isEmpty()) {
delete mList.remove(mList.head());
}
}
size_t SimpleBestFitAllocator::size() const
{
return mHeapSize;
}
size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
{
Locker::Autolock _l(mLock);
ssize_t offset = alloc(size, flags);
return offset;
}
ssize_t SimpleBestFitAllocator::deallocate(size_t offset)
{
Locker::Autolock _l(mLock);
chunk_t const * const freed = dealloc(offset);
if (freed) {
return 0;
}
return -ENOENT;
}
ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
{
if (size == 0) {
return 0;
}
size = (size + kMemoryAlign-1) / kMemoryAlign;
chunk_t* free_chunk = 0;
chunk_t* cur = mList.head();
size_t pagesize = getpagesize();
while (cur) {
int extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
// best fit
if (cur->free && (cur->size >= (size+extra))) {
if ((!free_chunk) || (cur->size < free_chunk->size)) {
free_chunk = cur;
}
if (cur->size == size) {
break;
}
}
cur = cur->next;
}
if (free_chunk) {
const size_t free_size = free_chunk->size;
free_chunk->free = 0;
free_chunk->size = size;
if (free_size > size) {
int extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
if (extra) {
chunk_t* split = new chunk_t(free_chunk->start, extra);
free_chunk->start += extra;
mList.insertBefore(free_chunk, split);
}
LOGE_IF(((free_chunk->start*kMemoryAlign)&(pagesize-1)),
"page is not aligned!!!");
const ssize_t tail_free = free_size - (size+extra);
if (tail_free > 0) {
chunk_t* split = new chunk_t(
free_chunk->start + free_chunk->size, tail_free);
mList.insertAfter(free_chunk, split);
}
}
return (free_chunk->start)*kMemoryAlign;
}
return -ENOMEM;
}
SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
{
start = start / kMemoryAlign;
chunk_t* cur = mList.head();
while (cur) {
if (cur->start == start) {
LOG_FATAL_IF(cur->free,
"block at offset 0x%08lX of size 0x%08lX already freed",
cur->start*kMemoryAlign, cur->size*kMemoryAlign);
// merge freed blocks together
chunk_t* freed = cur;
cur->free = 1;
do {
chunk_t* const p = cur->prev;
chunk_t* const n = cur->next;
if (p && (p->free || !cur->size)) {
freed = p;
p->size += cur->size;
mList.remove(cur);
delete cur;
}
cur = n;
} while (cur && cur->free);
#ifndef NDEBUG
if (!freed->free) {
dump_l("dealloc (!freed->free)");
}
#endif
LOG_FATAL_IF(!freed->free,
"freed block at offset 0x%08lX of size 0x%08lX is not free!",
freed->start * kMemoryAlign, freed->size * kMemoryAlign);
return freed;
}
cur = cur->next;
}
return 0;
}

127
modules/gralloc/allocator.h Normal file
View file

@ -0,0 +1,127 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GRALLOC_ALLOCATOR_H_
#define GRALLOC_ALLOCATOR_H_
#include <stdint.h>
#include <sys/types.h>
#include "gralloc_priv.h"
// ----------------------------------------------------------------------------
/*
* A simple templatized doubly linked-list implementation
*/
template <typename NODE>
class LinkedList
{
NODE* mFirst;
NODE* mLast;
public:
LinkedList() : mFirst(0), mLast(0) { }
bool isEmpty() const { return mFirst == 0; }
NODE const* head() const { return mFirst; }
NODE* head() { return mFirst; }
NODE const* tail() const { return mLast; }
NODE* tail() { return mLast; }
void insertAfter(NODE* node, NODE* newNode) {
newNode->prev = node;
newNode->next = node->next;
if (node->next == 0) mLast = newNode;
else node->next->prev = newNode;
node->next = newNode;
}
void insertBefore(NODE* node, NODE* newNode) {
newNode->prev = node->prev;
newNode->next = node;
if (node->prev == 0) mFirst = newNode;
else node->prev->next = newNode;
node->prev = newNode;
}
void insertHead(NODE* newNode) {
if (mFirst == 0) {
mFirst = mLast = newNode;
newNode->prev = newNode->next = 0;
} else {
newNode->prev = 0;
newNode->next = mFirst;
mFirst->prev = newNode;
mFirst = newNode;
}
}
void insertTail(NODE* newNode) {
if (mLast == 0) {
insertHead(newNode);
} else {
newNode->prev = mLast;
newNode->next = 0;
mLast->next = newNode;
mLast = newNode;
}
}
NODE* remove(NODE* node) {
if (node->prev == 0) mFirst = node->next;
else node->prev->next = node->next;
if (node->next == 0) mLast = node->prev;
else node->next->prev = node->prev;
return node;
}
};
class SimpleBestFitAllocator
{
public:
SimpleBestFitAllocator(size_t size);
virtual ~SimpleBestFitAllocator();
virtual size_t allocate(size_t size, uint32_t flags = 0);
virtual ssize_t deallocate(size_t offset);
virtual size_t size() const;
private:
struct chunk_t {
chunk_t(size_t start, size_t size)
: start(start), size(size), free(1), prev(0), next(0) {
}
size_t start;
size_t size : 28;
int free : 4;
mutable chunk_t* prev;
mutable chunk_t* next;
};
ssize_t alloc(size_t size, uint32_t flags);
chunk_t* dealloc(size_t start);
static const int kMemoryAlign;
mutable Locker mLock;
LinkedList<chunk_t> mList;
size_t mHeapSize;
};
#endif /* GRALLOC_ALLOCATOR_H_ */

View file

@ -15,25 +15,29 @@
*/
#include <limits.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cutils/ashmem.h>
#include <cutils/log.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <pthread.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <cutils/ashmem.h>
#include <cutils/log.h>
#include <cutils/atomic.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
#include "gralloc_priv.h"
#include "allocator.h"
#if HAVE_ANDROID_OS
#include <linux/android_pmem.h>
#endif
/*****************************************************************************/
@ -94,7 +98,9 @@ struct private_module_t HAL_MODULE_INFO_SYM = {
numBuffers: 0,
bufferMask: 0,
lock: PTHREAD_MUTEX_INITIALIZER,
currentBuffer: 0
currentBuffer: 0,
pmem_master: -1,
pmem_master_base: 0
};
/*****************************************************************************/
@ -164,32 +170,104 @@ static int gralloc_alloc_framebuffer(alloc_device_t* dev,
return err;
}
static SimpleBestFitAllocator sAllocator(8*1024*1024);
static int init_pmem_area(private_module_t* m)
{
int err = 0;
int master_fd = open("/dev/pmem", O_RDWR, 0);
if (master_fd >= 0) {
void* base = mmap(0, sAllocator.size(),
PROT_READ|PROT_WRITE, MAP_SHARED, master_fd, 0);
if (base == MAP_FAILED) {
err = -errno;
base = 0;
close(master_fd);
master_fd = -1;
}
m->pmem_master = master_fd;
m->pmem_master_base = base;
} else {
err = -errno;
}
return err;
}
static int gralloc_alloc_buffer(alloc_device_t* dev,
size_t size, int usage, buffer_handle_t* pHandle)
{
size = roundUpToPageSize(size);
int err = 0;
int flags = 0;
int fd = -1;
void* base = 0;
int offset = 0;
int lockState = 0;
size = roundUpToPageSize(size);
if (usage & GRALLOC_USAGE_HW_TEXTURE) {
// enable pmem in that case, so our software GL can fallback to
// the copybit module.
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
}
if (usage & GRALLOC_USAGE_HW_2D) {
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
}
int fd;
if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) == 0) {
fd = ashmem_create_region("Buffer", size);
if (fd < 0) {
err = -errno;
}
} else {
fd = open("/dev/pmem", O_RDWR, 0);
// Note: Currently pmem get sized when it is mmaped.
// This means that just doing the open doesn't actually allocate
// anything. We basically need to do an implicit "mmap"
// (under the hood) for pmem regions. However, the current
// code will work okay for now thanks to the reference-counting.
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
pthread_mutex_lock(&m->lock);
if (m->pmem_master == -1)
err = init_pmem_area(m);
pthread_mutex_unlock(&m->lock);
if (m->pmem_master >= 0) {
// PMEM buffers are always mmapped
base = m->pmem_master_base;
lockState |= private_handle_t::LOCK_STATE_MAPPED;
offset = sAllocator.allocate(size);
if (offset < 0) {
err = -ENOMEM;
} else {
fd = open("/dev/pmem", O_RDWR, 0);
err = ioctl(fd, PMEM_CONNECT, m->pmem_master);
if (err < 0) {
err = -errno;
} else {
struct pmem_region sub = { offset, size };
err = ioctl(fd, PMEM_MAP, &sub);
}
if (err < 0) {
close(fd);
sAllocator.deallocate(offset);
fd = -1;
}
LOGD_IF(!err, "allocating pmem size=%d, offset=%d", size, offset);
}
}
}
if (fd < 0) {
return -errno;
if (err == 0) {
private_handle_t* hnd = new private_handle_t(fd, size, flags);
hnd->offset = offset;
hnd->base = int(base)+offset;
hnd->lockState = lockState;
*pHandle = hnd;
}
private_handle_t* hnd = new private_handle_t(fd, size, flags);
*pHandle = hnd;
return 0;
LOGE_IF(err, "gralloc failed err=%s", strerror(-err));
return err;
}
/*****************************************************************************/
@ -249,6 +327,10 @@ static int gralloc_free(alloc_device_t* dev,
const size_t bufferSize = m->finfo.line_length * m->info.yres;
int index = (hnd->base - m->framebuffer->base) / bufferSize;
m->bufferMask &= ~(1<<index);
} else if (true || hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
if (hnd->fd >= 0) {
sAllocator.deallocate(hnd->offset);
}
}
gralloc_module_t* m = reinterpret_cast<gralloc_module_t*>(

View file

@ -23,6 +23,8 @@
#include <sys/cdefs.h>
#include <hardware/gralloc.h>
#include <pthread.h>
#include <errno.h>
#include <unistd.h>
#include <cutils/native_handle.h>
@ -40,6 +42,23 @@ int mapFrameBufferLocked(struct private_module_t* module);
/*****************************************************************************/
class Locker {
pthread_mutex_t mutex;
public:
class Autolock {
Locker& locker;
public:
inline Autolock(Locker& locker) : locker(locker) { locker.lock(); }
inline ~Autolock() { locker.unlock(); }
};
inline Locker() { pthread_mutex_init(&mutex, 0); }
inline ~Locker() { pthread_mutex_destroy(&mutex); }
inline void lock() { pthread_mutex_lock(&mutex); }
inline void unlock() { pthread_mutex_unlock(&mutex); }
};
/*****************************************************************************/
struct private_handle_t;
struct private_module_t {
@ -51,6 +70,9 @@ struct private_module_t {
uint32_t bufferMask;
pthread_mutex_t lock;
buffer_handle_t currentBuffer;
int pmem_master;
void* pmem_master_base;
struct fb_var_screeninfo info;
struct fb_fix_screeninfo finfo;
float xdpi;
@ -82,25 +104,25 @@ struct private_handle_t : public native_handle
int magic;
int flags;
int size;
int offset; // used with copybit
int offset;
// FIXME: the attributes below should be out-of-line
int base;
int lockState;
int writeOwner;
int pid;
static const int sNumInts = 7;
static const int sNumInts = 8;
static const int sNumFds = 1;
static const int sMagic = 0x3141592;
private_handle_t(int fd, int size, int flags) :
fd(fd), magic(sMagic), flags(flags), size(size), offset(0),
base(0), lockState(0), writeOwner(0)
base(0), lockState(0), writeOwner(0), pid(getpid())
{
version = sizeof(native_handle);
numInts = sNumInts;
numFds = sNumFds;
}
~private_handle_t() {
magic = 0;
}

View file

@ -17,6 +17,7 @@
#include <limits.h>
#include <errno.h>
#include <pthread.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/stat.h>
@ -30,6 +31,10 @@
#include "gralloc_priv.h"
// we need this for now because pmem cannot mmap at an offset
#define PMEM_HACK 1
/*****************************************************************************/
static int gralloc_map(gralloc_module_t const* module,
@ -38,12 +43,19 @@ static int gralloc_map(gralloc_module_t const* module,
{
private_handle_t* hnd = (private_handle_t*)handle;
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
void* mappedAddress = mmap(0, hnd->size, PROT_READ|PROT_WRITE,
MAP_SHARED, hnd->fd, 0);
size_t size = hnd->size;
#if PMEM_HACK
size += hnd->offset;
#endif
void* mappedAddress = mmap(0, size,
PROT_READ|PROT_WRITE, MAP_SHARED, hnd->fd, 0);
if (mappedAddress == MAP_FAILED) {
LOGE("Could not mmap %s", strerror(errno));
return -errno;
}
hnd->base = intptr_t(mappedAddress);
hnd->base = intptr_t(mappedAddress) + hnd->offset;
//LOGD("gralloc_map() succeeded fd=%d, off=%d, size=%d, vaddr=%p",
// hnd->fd, hnd->offset, hnd->size, mappedAddress);
}
*vaddr = (void*)hnd->base;
return 0;
@ -55,7 +67,7 @@ static int gralloc_unmap(gralloc_module_t const* module,
private_handle_t* hnd = (private_handle_t*)handle;
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
if (munmap((void*)hnd->base, hnd->size) < 0) {
LOGE("Could not unmap %d", errno);
LOGE("Could not unmap %s", strerror(errno));
}
}
hnd->base = 0;
@ -82,10 +94,14 @@ int gralloc_register_buffer(gralloc_module_t const* module,
* handle, but instead maintained in the kernel or at least
* out-of-line
*/
// if this handle was created in this process, then we keep it as is.
private_handle_t* hnd = (private_handle_t*)handle;
hnd->base = 0;
hnd->lockState = 0;
hnd->writeOwner = 0;
if (hnd->pid != getpid()) {
hnd->base = 0;
hnd->lockState = 0;
hnd->writeOwner = 0;
}
return 0;
}
@ -102,20 +118,20 @@ int gralloc_unregister_buffer(gralloc_module_t const* module,
*/
private_handle_t* hnd = (private_handle_t*)handle;
int32_t current_value, new_value;
int retry;
LOGE_IF(hnd->lockState & private_handle_t::LOCK_STATE_READ_MASK,
"handle %p still locked (state=%08x)",
hnd, hnd->lockState);
if (hnd->lockState & private_handle_t::LOCK_STATE_MAPPED) {
gralloc_unmap(module, handle);
// never unmap buffers that were created in this process
if (hnd->pid != getpid()) {
if (hnd->lockState & private_handle_t::LOCK_STATE_MAPPED) {
gralloc_unmap(module, handle);
}
hnd->base = 0;
hnd->lockState = 0;
hnd->writeOwner = 0;
}
hnd->base = 0;
hnd->lockState = 0;
hnd->writeOwner = 0;
return 0;
}