Merge "Add YUV support in EVS VTS tests" into oc-dev

This commit is contained in:
TreeHugger Robot 2017-05-02 18:59:28 +00:00 committed by Android (Google) Code Review
commit b3d108de74
5 changed files with 298 additions and 36 deletions

View file

@ -19,7 +19,8 @@ cc_test {
srcs: [
"VtsEvsV1_0TargetTest.cpp",
"FrameHandler.cpp"
"FrameHandler.cpp",
"FormatConvert.cpp"
],
defaults: [

View file

@ -0,0 +1,173 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "VtsHalEvsTest"
#include "FormatConvert.h"
#include <algorithm> // std::min
// Round up to the nearest multiple of the given alignment value
template<unsigned alignment>
int align(int value) {
static_assert((alignment && !(alignment & (alignment - 1))),
"alignment must be a power of 2");
unsigned mask = alignment - 1;
return (value + mask) & ~mask;
}
// Limit the given value to the provided range. :)
static inline float clamp(float v, float min, float max) {
if (v < min) return min;
if (v > max) return max;
return v;
}
static uint32_t yuvToRgbx(const unsigned char Y, const unsigned char Uin, const unsigned char Vin) {
// Don't use this if you want to see the best performance. :)
// Better to do this in a pixel shader if we really have to, but on actual
// embedded hardware we expect to be able to texture directly from the YUV data
float U = Uin - 128.0f;
float V = Vin - 128.0f;
float Rf = Y + 1.140f*V;
float Gf = Y - 0.395f*U - 0.581f*V;
float Bf = Y + 2.032f*U;
unsigned char R = (unsigned char)clamp(Rf, 0.0f, 255.0f);
unsigned char G = (unsigned char)clamp(Gf, 0.0f, 255.0f);
unsigned char B = (unsigned char)clamp(Bf, 0.0f, 255.0f);
return (R ) |
(G << 8) |
(B << 16) |
0xFF000000; // Fill the alpha channel with ones
}
void copyNV21toRGB32(unsigned width, unsigned height,
uint8_t* src,
uint32_t* dst, unsigned dstStridePixels)
{
// The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
// U/V array. It assumes an even width and height for the overall image, and a horizontal
// stride that is an even multiple of 16 bytes for both the Y and UV arrays.
unsigned strideLum = align<16>(width);
unsigned sizeY = strideLum * height;
unsigned strideColor = strideLum; // 1/2 the samples, but two interleaved channels
unsigned offsetUV = sizeY;
uint8_t* srcY = src;
uint8_t* srcUV = src+offsetUV;
for (unsigned r = 0; r < height; r++) {
// Note that we're walking the same UV row twice for even/odd luminance rows
uint8_t* rowY = srcY + r*strideLum;
uint8_t* rowUV = srcUV + (r/2 * strideColor);
uint32_t* rowDest = dst + r*dstStridePixels;
for (unsigned c = 0; c < width; c++) {
unsigned uCol = (c & ~1); // uCol is always even and repeats 1:2 with Y values
unsigned vCol = uCol | 1; // vCol is always odd
rowDest[c] = yuvToRgbx(rowY[c], rowUV[uCol], rowUV[vCol]);
}
}
}
void copyYV12toRGB32(unsigned width, unsigned height,
uint8_t* src,
uint32_t* dst, unsigned dstStridePixels)
{
// The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
// by another 1/2 x 1/2 V array. It assumes an even width and height for the overall image,
// and a horizontal stride that is an even multiple of 16 bytes for each of the Y, U,
// and V arrays.
unsigned strideLum = align<16>(width);
unsigned sizeY = strideLum * height;
unsigned strideColor = align<16>(strideLum/2);
unsigned sizeColor = strideColor * height/2;
unsigned offsetU = sizeY;
unsigned offsetV = sizeY + sizeColor;
uint8_t* srcY = src;
uint8_t* srcU = src+offsetU;
uint8_t* srcV = src+offsetV;
for (unsigned r = 0; r < height; r++) {
// Note that we're walking the same U and V rows twice for even/odd luminance rows
uint8_t* rowY = srcY + r*strideLum;
uint8_t* rowU = srcU + (r/2 * strideColor);
uint8_t* rowV = srcV + (r/2 * strideColor);
uint32_t* rowDest = dst + r*dstStridePixels;
for (unsigned c = 0; c < width; c++) {
rowDest[c] = yuvToRgbx(rowY[c], rowU[c], rowV[c]);
}
}
}
void copyYUYVtoRGB32(unsigned width, unsigned height,
uint8_t* src, unsigned srcStridePixels,
uint32_t* dst, unsigned dstStridePixels)
{
uint32_t* srcWords = (uint32_t*)src;
const int srcRowPadding32 = srcStridePixels/2 - width/2; // 2 bytes per pixel, 4 bytes per word
const int dstRowPadding32 = dstStridePixels - width; // 4 bytes per pixel, 4 bytes per word
for (unsigned r = 0; r < height; r++) {
for (unsigned c = 0; c < width/2; c++) {
// Note: we're walking two pixels at a time here (even/odd)
uint32_t srcPixel = *srcWords++;
uint8_t Y1 = (srcPixel) & 0xFF;
uint8_t U = (srcPixel >> 8) & 0xFF;
uint8_t Y2 = (srcPixel >> 16) & 0xFF;
uint8_t V = (srcPixel >> 24) & 0xFF;
// On the RGB output, we're writing one pixel at a time
*(dst+0) = yuvToRgbx(Y1, U, V);
*(dst+1) = yuvToRgbx(Y2, U, V);
dst += 2;
}
// Skip over any extra data or end of row alignment padding
srcWords += srcRowPadding32;
dst += dstRowPadding32;
}
}
void copyMatchedInterleavedFormats(unsigned width, unsigned height,
void* src, unsigned srcStridePixels,
void* dst, unsigned dstStridePixels,
unsigned pixelSize) {
for (unsigned row = 0; row < height; row++) {
// Copy the entire row of pixel data
memcpy(dst, src, width * pixelSize);
// Advance to the next row (keeping in mind that stride here is in units of pixels)
src = (uint8_t*)src + srcStridePixels * pixelSize;
dst = (uint8_t*)dst + dstStridePixels * pixelSize;
}
}

View file

@ -0,0 +1,60 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef EVS_VTS_FORMATCONVERT_H
#define EVS_VTS_FORMATCONVERT_H
#include <queue>
#include <stdint.h>
// Given an image buffer in NV21 format (HAL_PIXEL_FORMAT_YCRCB_420_SP), output 32bit RGBx values.
// The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
// U/V array. It assumes an even width and height for the overall image, and a horizontal
// stride that is an even multiple of 16 bytes for both the Y and UV arrays.
void copyNV21toRGB32(unsigned width, unsigned height,
uint8_t* src,
uint32_t* dst, unsigned dstStridePixels);
// Given an image buffer in YV12 format (HAL_PIXEL_FORMAT_YV12), output 32bit RGBx values.
// The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
// by another 1/2 x 1/2 V array. It assumes an even width and height for the overall image,
// and a horizontal stride that is an even multiple of 16 bytes for each of the Y, U,
// and V arrays.
void copyYV12toRGB32(unsigned width, unsigned height,
uint8_t* src,
uint32_t* dst, unsigned dstStridePixels);
// Given an image buffer in YUYV format (HAL_PIXEL_FORMAT_YCBCR_422_I), output 32bit RGBx values.
// The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
// U/V array. It assumes an even width and height for the overall image, and a horizontal
// stride that is an even multiple of 16 bytes for both the Y and UV arrays.
void copyYUYVtoRGB32(unsigned width, unsigned height,
uint8_t* src, unsigned srcStrideBytes,
uint32_t* dst, unsigned dstStrideBytes);
// Given an simple rectangular image buffer with an integer number of bytes per pixel,
// copy the pixel values into a new rectangular buffer (potentially with a different stride).
// This is typically used to copy RGBx data into an RGBx output buffer.
void copyMatchedInterleavedFormats(unsigned width, unsigned height,
void* src, unsigned srcStridePixels,
void* dst, unsigned dstStridePixels,
unsigned pixelSize);
#endif // EVS_VTS_FORMATCONVERT_H

View file

@ -17,6 +17,7 @@
#define LOG_TAG "VtsHalEvsTest"
#include "FrameHandler.h"
#include "FormatConvert.h"
#include <stdio.h>
#include <string.h>
@ -25,14 +26,6 @@
#include <cutils/native_handle.h>
#include <ui/GraphicBuffer.h>
#include <algorithm> // std::min
// For the moment, we're assuming that the underlying EVS driver we're working with
// is providing 4 byte RGBx data. This is fine for loopback testing, although
// real hardware is expected to provide YUV data -- most likly formatted as YV12
static const unsigned kBytesPerPixel = 4; // assuming 4 byte RGBx pixels
FrameHandler::FrameHandler(android::sp <IEvsCamera> pCamera, CameraDesc cameraInfo,
android::sp <IEvsDisplay> pDisplay,
@ -58,14 +51,18 @@ void FrameHandler::shutdown()
bool FrameHandler::startStream() {
// Tell the camera to start streaming
Return<EvsResult> result = mCamera->startVideoStream(this);
if (result != EvsResult::OK) {
return false;
}
// Mark ourselves as running
mLock.lock();
mRunning = true;
mLock.unlock();
// Tell the camera to start streaming
Return<EvsResult> result = mCamera->startVideoStream(this);
return (result == EvsResult::OK);
return true;
}
@ -82,7 +79,9 @@ void FrameHandler::blockingStopStream() {
// Wait until the stream has actually stopped
std::unique_lock<std::mutex> lock(mLock);
mSignal.wait(lock, [this](){ return !mRunning; });
if (mRunning) {
mSignal.wait(lock, [this]() { return !mRunning; });
}
}
@ -179,13 +178,13 @@ Return<void> FrameHandler::deliverFrame(const BufferDesc& bufferArg) {
switch (mReturnMode) {
case eAutoReturn:
// Send the camera buffer back now that we're done with it
// Send the camera buffer back now that the client has seen it
ALOGD("Calling doneWithFrame");
// TODO: Why is it that we get a HIDL crash if we pass back the cloned buffer?
mCamera->doneWithFrame(bufferArg);
break;
case eNoAutoReturn:
// Hang onto the buffer handle for now -- we'll return it explicitly later
// Hang onto the buffer handle for now -- the client will return it explicitly later
mHeldBuffers.push(bufferArg);
}
@ -228,25 +227,41 @@ bool FrameHandler::copyBufferContents(const BufferDesc& tgtBuffer,
srcBuffer.width, srcBuffer.height, srcBuffer.format, 1, srcBuffer.usage,
srcBuffer.stride);
// Lock our source buffer for reading
unsigned char* srcPixels = nullptr;
// Lock our source buffer for reading (current expectation are for this to be NV21 format)
uint8_t* srcPixels = nullptr;
src->lock(GRALLOC_USAGE_SW_READ_OFTEN, (void**)&srcPixels);
// Lock our target buffer for writing
unsigned char* tgtPixels = nullptr;
// Lock our target buffer for writing (should be RGBA8888 format)
uint32_t* tgtPixels = nullptr;
tgt->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)&tgtPixels);
if (srcPixels && tgtPixels) {
for (unsigned row = 0; row < height; row++) {
// Copy the entire row of pixel data
memcpy(tgtPixels, srcPixels, width * kBytesPerPixel);
// Advance to the next row (keeping in mind that stride here is in units of pixels)
tgtPixels += tgtBuffer.stride * kBytesPerPixel;
srcPixels += srcBuffer.stride * kBytesPerPixel;
if (tgtBuffer.format != HAL_PIXEL_FORMAT_RGBA_8888) {
// We always expect 32 bit RGB for the display output for now. Is there a need for 565?
ALOGE("Diplay buffer is always expected to be 32bit RGBA");
success = false;
} else {
if (srcBuffer.format == HAL_PIXEL_FORMAT_YCRCB_420_SP) { // 420SP == NV21
copyNV21toRGB32(width, height,
srcPixels,
tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
copyYV12toRGB32(width, height,
srcPixels,
tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
copyYUYVtoRGB32(width, height,
srcPixels, srcBuffer.stride,
tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == tgtBuffer.format) { // 32bit RGBA
copyMatchedInterleavedFormats(width, height,
srcPixels, srcBuffer.stride,
tgtPixels, tgtBuffer.stride,
tgtBuffer.pixelSize);
}
}
} else {
ALOGE("Failed to copy buffer contents");
ALOGE("Failed to lock buffer contents for contents transfer");
success = false;
}

View file

@ -107,6 +107,8 @@ protected:
* call to closeCamera. Then repeats the test to ensure all cameras can be reopened.
*/
TEST_F(EvsHidlTest, CameraOpenClean) {
ALOGI("Starting CameraOpenClean test");
// Get the camera list
loadCameraList();
@ -137,6 +139,8 @@ TEST_F(EvsHidlTest, CameraOpenClean) {
* the system to be tolerant of shutdown/restart race conditions.
*/
TEST_F(EvsHidlTest, CameraOpenAggressive) {
ALOGI("Starting CameraOpenAggressive test");
// Get the camera list
loadCameraList();
@ -183,6 +187,8 @@ TEST_F(EvsHidlTest, CameraOpenAggressive) {
* Test both clean shut down and "aggressive open" device stealing behavior.
*/
TEST_F(EvsHidlTest, DisplayOpen) {
ALOGI("Starting DisplayOpen test");
// Request exclusive access to the EVS display, then let it go
{
sp<IEvsDisplay> pDisplay = pEnumerator->openDisplay();
@ -229,6 +235,8 @@ TEST_F(EvsHidlTest, DisplayOpen) {
* object itself or the owning enumerator.
*/
TEST_F(EvsHidlTest, DisplayStates) {
ALOGI("Starting DisplayStates test");
// Ensure the display starts in the expected state
EXPECT_EQ((DisplayState)pEnumerator->getDisplayState(), DisplayState::NOT_OPEN);
@ -270,15 +278,14 @@ TEST_F(EvsHidlTest, DisplayStates) {
}
// TODO: This hack shouldn't be necessary. b/36122635
// NOTE: Calling flushCommand here did not avoid the race. Going back to sleep... :(
// android::hardware::IPCThreadState::self()->flushCommands();
sleep(1);
// Now that the display pointer has gone out of scope, causing the IEvsDisplay interface
// object to be destroyed, we should be back to the "not open" state.
// NOTE: If we want this to pass without the sleep above, we'd have to add the
// (now recommended) closeDisplay() call instead of relying on the smarter pointer
// going out of scope.
// going out of scope. I've not done that because I want to verify that the deletion
// of the object does actually clean up (eventually).
EXPECT_EQ((DisplayState)pEnumerator->getDisplayState(), DisplayState::NOT_OPEN);
}
@ -288,6 +295,8 @@ TEST_F(EvsHidlTest, DisplayStates) {
* Measure and qualify the stream start up time and streaming frame rate of each reported camera
*/
TEST_F(EvsHidlTest, CameraStreamPerformance) {
ALOGI("Starting CameraStreamPerformance test");
// Get the camera list
loadCameraList();
@ -304,7 +313,7 @@ TEST_F(EvsHidlTest, CameraStreamPerformance) {
// Start the camera's video stream
nsecs_t start = systemTime(SYSTEM_TIME_MONOTONIC);
bool startResult = frameHandler->startStream();
EXPECT_EQ(startResult, true);
ASSERT_TRUE(startResult);
// Ensure the first frame arrived within the expected time
frameHandler->waitForFrameCount(1);
@ -344,6 +353,8 @@ TEST_F(EvsHidlTest, CameraStreamPerformance) {
* than one frame time. The camera must cleanly skip frames until the client is ready again.
*/
TEST_F(EvsHidlTest, CameraStreamBuffering) {
ALOGI("Starting CameraStreamBuffering test");
// Arbitrary constant (should be > 1 and less than crazy)
static const unsigned int kBuffersToHold = 6;
@ -372,14 +383,14 @@ TEST_F(EvsHidlTest, CameraStreamBuffering) {
// Start the camera's video stream
bool startResult = frameHandler->startStream();
EXPECT_TRUE(startResult);
ASSERT_TRUE(startResult);
// Check that the video stream stalls once we've gotten exactly the number of buffers
// we requested since we told the frameHandler not to return them.
sleep(1); // 1 second would be enough for at least 5 frames to be delivered worst case
sleep(2); // 1 second should be enough for at least 5 frames to be delivered worst case
unsigned framesReceived = 0;
frameHandler->getFramesCounters(&framesReceived, nullptr);
EXPECT_EQ(kBuffersToHold, framesReceived);
ASSERT_EQ(kBuffersToHold, framesReceived) << "Stream didn't stall at expected buffer limit";
// Give back one buffer
@ -390,7 +401,7 @@ TEST_F(EvsHidlTest, CameraStreamBuffering) {
// filled since we require 10fps minimum -- but give a 10% allowance just in case.
usleep(110 * kMillisecondsToMicroseconds);
frameHandler->getFramesCounters(&framesReceived, nullptr);
EXPECT_EQ(kBuffersToHold+1, framesReceived);
EXPECT_EQ(kBuffersToHold+1, framesReceived) << "Stream should've resumed";
// Even when the camera pointer goes out of scope, the FrameHandler object will
// keep the stream alive unless we tell it to shutdown.
@ -411,6 +422,8 @@ TEST_F(EvsHidlTest, CameraStreamBuffering) {
* which a human could observe to see the operation of the system on the physical display.
*/
TEST_F(EvsHidlTest, CameraToDisplayRoundTrip) {
ALOGI("Starting CameraToDisplayRoundTrip test");
// Get the camera list
loadCameraList();
@ -434,7 +447,7 @@ TEST_F(EvsHidlTest, CameraToDisplayRoundTrip) {
// Start the camera's video stream
bool startResult = frameHandler->startStream();
EXPECT_EQ(startResult, true);
ASSERT_TRUE(startResult);
// Wait a while to let the data flow
static const int kSecondsToWait = 5;