audio: Add StreamDescriptor.frameSizeBytes am: a2c714129e am: c8df971609 am: e579d10f70

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/2199852

Change-Id: Ie004fd658bda3f94276b4c52bd2dc4d2e06cc172
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
This commit is contained in:
Mikhail Naganov 2022-08-30 18:22:02 +00:00 committed by Automerger Merge Worker
commit af8ca6aedd
7 changed files with 223 additions and 16 deletions

View file

@ -36,6 +36,7 @@ package android.hardware.audio.core;
parcelable StreamDescriptor {
android.hardware.common.fmq.MQDescriptor<android.hardware.audio.core.StreamDescriptor.Command,android.hardware.common.fmq.SynchronizedReadWrite> command;
android.hardware.common.fmq.MQDescriptor<android.hardware.audio.core.StreamDescriptor.Reply,android.hardware.common.fmq.SynchronizedReadWrite> reply;
int frameSizeBytes;
long bufferSizeFrames;
android.hardware.audio.core.StreamDescriptor.AudioBuffer audio;
const int COMMAND_BURST = 1;

View file

@ -144,6 +144,14 @@ parcelable StreamDescriptor {
}
MQDescriptor<Reply, SynchronizedReadWrite> reply;
/**
* The size of one frame of audio data in bytes. For PCM formats this is
* usually equal to the size of a sample multiplied by the number of
* channels used. For encoded bitstreams encapsulated into PCM the sample
* size of the underlying PCM stream is used. For encoded bitstreams that
* are passed without encapsulation, the frame size is usually 1 byte.
*/
int frameSizeBytes;
/**
* Total buffer size in frames. This applies both to the size of the 'audio.fmq'
* queue and to the size of the shared memory buffer for MMap No IRQ streams.

View file

@ -46,6 +46,7 @@ cc_test {
host_supported: true,
vendor_available: true,
static_libs: [
"android.media.audio.common.types-V1-ndk",
"libaudioaidlcommon",
],
shared_libs: [
@ -59,6 +60,7 @@ cc_test {
],
srcs: [
"tests/streamworker_tests.cpp",
"tests/utils_tests.cpp",
],
test_suites: [
"general-tests",

View file

@ -62,12 +62,20 @@ constexpr size_t getChannelCount(
constexpr size_t getFrameSizeInBytes(
const ::aidl::android::media::audio::common::AudioFormatDescription& format,
const ::aidl::android::media::audio::common::AudioChannelLayout& layout) {
if (format == ::aidl::android::media::audio::common::AudioFormatDescription{}) {
// Unspecified format.
return 0;
}
using ::aidl::android::media::audio::common::AudioFormatType;
if (format.type == AudioFormatType::PCM) {
return getPcmSampleSizeInBytes(format.pcm) * getChannelCount(layout);
} else if (format.type == AudioFormatType::NON_PCM) {
// For non-PCM formats always use the underlying PCM size. The default value for
// PCM is "UINT_8_BIT", thus non-encapsulated streams have the frame size of 1.
return getPcmSampleSizeInBytes(format.pcm);
}
// For non-PCM formats always use frame size of 1.
return 1;
// Something unexpected.
return 0;
}
} // namespace android::hardware::audio::common

View file

@ -0,0 +1,190 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <limits>
#include <type_traits>
#include <utility>
#include <vector>
#include <Utils.h>
#include <gtest/gtest.h>
#define LOG_TAG "Utils_Test"
#include <log/log.h>
using aidl::android::media::audio::common::AudioChannelLayout;
using aidl::android::media::audio::common::AudioFormatDescription;
using aidl::android::media::audio::common::AudioFormatType;
using aidl::android::media::audio::common::PcmType;
using android::hardware::audio::common::getChannelCount;
using android::hardware::audio::common::getFrameSizeInBytes;
using android::hardware::audio::common::getPcmSampleSizeInBytes;
TEST(UtilsTest, ChannelCountOddCases) {
using Tag = AudioChannelLayout::Tag;
EXPECT_EQ(0UL, getChannelCount(AudioChannelLayout{}));
EXPECT_EQ(0UL, getChannelCount(AudioChannelLayout::make<Tag::invalid>(0)));
EXPECT_EQ(0UL, getChannelCount(AudioChannelLayout::make<Tag::invalid>(-1)));
}
TEST(UtilsTest, ChannelCountForIndexMask) {
using Tag = AudioChannelLayout::Tag;
EXPECT_EQ(0UL, getChannelCount(AudioChannelLayout::make<Tag::indexMask>(0)));
#define VERIFY_INDEX_MASK(N) \
{ \
const auto l = \
AudioChannelLayout::make<Tag::indexMask>(AudioChannelLayout::INDEX_MASK_##N); \
EXPECT_EQ(N##UL, getChannelCount(l)) << l.toString(); \
}
VERIFY_INDEX_MASK(1);
VERIFY_INDEX_MASK(2);
VERIFY_INDEX_MASK(3);
VERIFY_INDEX_MASK(4);
VERIFY_INDEX_MASK(5);
VERIFY_INDEX_MASK(6);
VERIFY_INDEX_MASK(7);
VERIFY_INDEX_MASK(8);
VERIFY_INDEX_MASK(9);
VERIFY_INDEX_MASK(10);
VERIFY_INDEX_MASK(11);
VERIFY_INDEX_MASK(12);
VERIFY_INDEX_MASK(13);
VERIFY_INDEX_MASK(14);
VERIFY_INDEX_MASK(15);
VERIFY_INDEX_MASK(16);
VERIFY_INDEX_MASK(17);
VERIFY_INDEX_MASK(18);
VERIFY_INDEX_MASK(19);
VERIFY_INDEX_MASK(20);
VERIFY_INDEX_MASK(21);
VERIFY_INDEX_MASK(22);
VERIFY_INDEX_MASK(23);
VERIFY_INDEX_MASK(24);
#undef VERIFY_INDEX_MASK
}
TEST(UtilsTest, ChannelCountForLayoutMask) {
using Tag = AudioChannelLayout::Tag;
const std::vector<std::pair<size_t, int32_t>> kTestLayouts = {
std::make_pair(0UL, 0),
std::make_pair(1UL, AudioChannelLayout::LAYOUT_MONO),
std::make_pair(2UL, AudioChannelLayout::LAYOUT_STEREO),
std::make_pair(6UL, AudioChannelLayout::LAYOUT_5POINT1),
std::make_pair(8UL, AudioChannelLayout::LAYOUT_7POINT1),
std::make_pair(16UL, AudioChannelLayout::LAYOUT_9POINT1POINT6),
std::make_pair(13UL, AudioChannelLayout::LAYOUT_13POINT_360RA),
std::make_pair(24UL, AudioChannelLayout::LAYOUT_22POINT2),
std::make_pair(3UL, AudioChannelLayout::LAYOUT_STEREO_HAPTIC_A),
std::make_pair(4UL, AudioChannelLayout::LAYOUT_STEREO_HAPTIC_AB)};
for (const auto& [expected_count, layout] : kTestLayouts) {
const auto l = AudioChannelLayout::make<Tag::layoutMask>(layout);
EXPECT_EQ(expected_count, getChannelCount(l)) << l.toString();
}
}
TEST(UtilsTest, ChannelCountForVoiceMask) {
using Tag = AudioChannelLayout::Tag;
// clang-format off
const std::vector<std::pair<size_t, int32_t>> kTestLayouts = {
std::make_pair(0UL, 0),
std::make_pair(1UL, AudioChannelLayout::VOICE_UPLINK_MONO),
std::make_pair(1UL, AudioChannelLayout::VOICE_DNLINK_MONO),
std::make_pair(2UL, AudioChannelLayout::VOICE_CALL_MONO)};
// clang-format on
for (const auto& [expected_count, layout] : kTestLayouts) {
const auto l = AudioChannelLayout::make<Tag::voiceMask>(layout);
EXPECT_EQ(expected_count, getChannelCount(l)) << l.toString();
}
}
namespace {
AudioChannelLayout make_AudioChannelLayout_Mono() {
return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
AudioChannelLayout::LAYOUT_MONO);
}
AudioChannelLayout make_AudioChannelLayout_Stereo() {
return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
AudioChannelLayout::LAYOUT_STEREO);
}
AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
AudioFormatDescription result;
result.type = type;
return result;
}
AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
auto result = make_AudioFormatDescription(AudioFormatType::PCM);
result.pcm = pcm;
return result;
}
AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
AudioFormatDescription result;
result.encoding = encoding;
return result;
}
AudioFormatDescription make_AudioFormatDescription(PcmType transport, const std::string& encoding) {
auto result = make_AudioFormatDescription(encoding);
result.pcm = transport;
return result;
}
} // namespace
TEST(UtilsTest, FrameSize) {
EXPECT_EQ(0UL, getFrameSizeInBytes(AudioFormatDescription{}, AudioChannelLayout{}));
EXPECT_EQ(sizeof(int16_t), getFrameSizeInBytes(make_AudioFormatDescription(PcmType::INT_16_BIT),
make_AudioChannelLayout_Mono()));
EXPECT_EQ(2 * sizeof(int16_t),
getFrameSizeInBytes(make_AudioFormatDescription(PcmType::INT_16_BIT),
make_AudioChannelLayout_Stereo()));
EXPECT_EQ(sizeof(int32_t), getFrameSizeInBytes(make_AudioFormatDescription(PcmType::INT_32_BIT),
make_AudioChannelLayout_Mono()));
EXPECT_EQ(2 * sizeof(int32_t),
getFrameSizeInBytes(make_AudioFormatDescription(PcmType::INT_32_BIT),
make_AudioChannelLayout_Stereo()));
EXPECT_EQ(sizeof(float), getFrameSizeInBytes(make_AudioFormatDescription(PcmType::FLOAT_32_BIT),
make_AudioChannelLayout_Mono()));
EXPECT_EQ(2 * sizeof(float),
getFrameSizeInBytes(make_AudioFormatDescription(PcmType::FLOAT_32_BIT),
make_AudioChannelLayout_Stereo()));
EXPECT_EQ(sizeof(uint8_t),
getFrameSizeInBytes(make_AudioFormatDescription("bitstream"), AudioChannelLayout{}));
EXPECT_EQ(sizeof(int16_t),
getFrameSizeInBytes(make_AudioFormatDescription(PcmType::INT_16_BIT, "encapsulated"),
AudioChannelLayout{}));
}
TEST(UtilsTest, PcmSampleSize) {
EXPECT_EQ(1UL, getPcmSampleSizeInBytes(PcmType{}));
EXPECT_EQ(sizeof(uint8_t), getPcmSampleSizeInBytes(PcmType::UINT_8_BIT));
EXPECT_EQ(sizeof(int16_t), getPcmSampleSizeInBytes(PcmType::INT_16_BIT));
EXPECT_EQ(sizeof(int32_t), getPcmSampleSizeInBytes(PcmType::INT_32_BIT));
EXPECT_EQ(sizeof(int32_t), getPcmSampleSizeInBytes(PcmType::FIXED_Q_8_24));
EXPECT_EQ(sizeof(float), getPcmSampleSizeInBytes(PcmType::FLOAT_32_BIT));
EXPECT_EQ(3UL, getPcmSampleSizeInBytes(PcmType::INT_24_BIT));
EXPECT_EQ(0UL, getPcmSampleSizeInBytes(PcmType(-1)));
using PcmTypeUnderlyingType = std::underlying_type_t<PcmType>;
EXPECT_EQ(0UL,
getPcmSampleSizeInBytes(PcmType(std::numeric_limits<PcmTypeUnderlyingType>::min())));
EXPECT_EQ(0UL,
getPcmSampleSizeInBytes(PcmType(std::numeric_limits<PcmTypeUnderlyingType>::max())));
}

View file

@ -35,6 +35,7 @@ void StreamContext::fillDescriptor(StreamDescriptor* desc) {
desc->reply = mReplyMQ->dupeDesc();
}
if (mDataMQ) {
desc->frameSizeBytes = mFrameSize;
desc->bufferSizeFrames =
mDataMQ->getQuantumCount() * mDataMQ->getQuantumSize() / mFrameSize;
desc->audio.set<StreamDescriptor::AudioBuffer::Tag::fmq>(mDataMQ->dupeDesc());

View file

@ -28,7 +28,6 @@
#include <android-base/logging.h>
#include <StreamWorker.h>
#include <Utils.h>
#include <aidl/Gtest.h>
#include <aidl/Vintf.h>
#include <aidl/android/hardware/audio/core/IConfig.h>
@ -55,6 +54,7 @@ using aidl::android::hardware::audio::core::IStreamIn;
using aidl::android::hardware::audio::core::IStreamOut;
using aidl::android::hardware::audio::core::ModuleDebug;
using aidl::android::hardware::audio::core::StreamDescriptor;
using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
using aidl::android::media::audio::common::AudioContentType;
using aidl::android::media::audio::common::AudioDevice;
using aidl::android::media::audio::common::AudioDeviceAddress;
@ -68,7 +68,6 @@ using aidl::android::media::audio::common::AudioPortDeviceExt;
using aidl::android::media::audio::common::AudioPortExt;
using aidl::android::media::audio::common::AudioSource;
using aidl::android::media::audio::common::AudioUsage;
using android::hardware::audio::common::getFrameSizeInBytes;
using android::hardware::audio::common::StreamLogic;
using android::hardware::audio::common::StreamWorker;
using ndk::ScopedAStatus;
@ -368,18 +367,12 @@ class WithDevicePortConnectedState {
class StreamContext {
public:
typedef AidlMessageQueue<StreamDescriptor::Command,
::aidl::android::hardware::common::fmq::SynchronizedReadWrite>
CommandMQ;
typedef AidlMessageQueue<StreamDescriptor::Reply,
::aidl::android::hardware::common::fmq::SynchronizedReadWrite>
ReplyMQ;
typedef AidlMessageQueue<int8_t, ::aidl::android::hardware::common::fmq::SynchronizedReadWrite>
DataMQ;
typedef AidlMessageQueue<StreamDescriptor::Command, SynchronizedReadWrite> CommandMQ;
typedef AidlMessageQueue<StreamDescriptor::Reply, SynchronizedReadWrite> ReplyMQ;
typedef AidlMessageQueue<int8_t, SynchronizedReadWrite> DataMQ;
StreamContext(const AudioPortConfig& portConfig, const StreamDescriptor& descriptor)
: mFrameSizeBytes(
getFrameSizeInBytes(portConfig.format.value(), portConfig.channelMask.value())),
explicit StreamContext(const StreamDescriptor& descriptor)
: mFrameSizeBytes(descriptor.frameSizeBytes),
mCommandMQ(new CommandMQ(descriptor.command)),
mReplyMQ(new ReplyMQ(descriptor.reply)),
mBufferSizeFrames(descriptor.bufferSizeFrames),
@ -392,6 +385,10 @@ class StreamContext {
EXPECT_TRUE(mReplyMQ->isValid());
if (mDataMQ != nullptr) {
EXPECT_TRUE(mDataMQ->isValid());
EXPECT_GE(mDataMQ->getQuantumCount() * mDataMQ->getQuantumSize(),
mFrameSizeBytes * mBufferSizeFrames)
<< "Data MQ actual buffer size is "
"less than the buffer size as specified by the descriptor";
}
}
size_t getBufferSizeBytes() const { return mFrameSizeBytes * mBufferSizeFrames; }
@ -605,7 +602,7 @@ class WithStream {
ASSERT_NE(nullptr, mStream) << "; port config id " << getPortId();
EXPECT_GE(mDescriptor.bufferSizeFrames, bufferSizeFrames)
<< "actual buffer size must be no less than requested";
mContext.emplace(mPortConfig.get(), mDescriptor);
mContext.emplace(mDescriptor);
ASSERT_NO_FATAL_FAILURE(mContext.value().checkIsValid());
}
Stream* get() const { return mStream.get(); }