Merge changes from topic "audio-v5-api"

* changes:
  Audio V5: move Stream Metadata to common
  Audio HAL V5: Introduce HAL V5, equal to V4 for now
This commit is contained in:
Treehugger Robot 2018-12-04 17:44:07 +00:00 committed by Gerrit Code Review
commit d8ebd25073
29 changed files with 4861 additions and 0 deletions

48
audio/5.0/Android.bp Normal file
View file

@ -0,0 +1,48 @@
// This file is autogenerated by hidl-gen -Landroidbp.
hidl_interface {
name: "android.hardware.audio@5.0",
root: "android.hardware",
vndk: {
enabled: true,
},
srcs: [
"types.hal",
"IDevice.hal",
"IDevicesFactory.hal",
"IPrimaryDevice.hal",
"IStream.hal",
"IStreamIn.hal",
"IStreamOut.hal",
"IStreamOutCallback.hal",
],
interfaces: [
"android.hardware.audio.common@5.0",
"android.hardware.audio.effect@5.0",
"android.hidl.base@1.0",
],
types: [
"AudioDrain",
"AudioFrequencyResponsePoint",
"AudioMicrophoneChannelMapping",
"AudioMicrophoneCoordinate",
"AudioMicrophoneDirectionality",
"AudioMicrophoneLocation",
"DeviceAddress",
"MessageQueueFlagBits",
"MicrophoneInfo",
"MmapBufferFlag",
"MmapBufferInfo",
"MmapPosition",
"ParameterValue",
"PlaybackTrackMetadata",
"RecordTrackMetadata",
"Result",
"SinkMetadata",
"SourceMetadata",
"TimeSpec",
],
gen_java: false,
gen_java_constants: true,
}

282
audio/5.0/IDevice.hal Normal file
View file

@ -0,0 +1,282 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio@5.0;
import android.hardware.audio.common@5.0;
import IStreamIn;
import IStreamOut;
interface IDevice {
/**
* Returns whether the audio hardware interface has been initialized.
*
* @return retval OK on success, NOT_INITIALIZED on failure.
*/
initCheck() generates (Result retval);
/**
* Sets the audio volume for all audio activities other than voice call. If
* NOT_SUPPORTED is returned, the software mixer will emulate this
* capability.
*
* @param volume 1.0f means unity, 0.0f is zero.
* @return retval operation completion status.
*/
setMasterVolume(float volume) generates (Result retval);
/**
* Get the current master volume value for the HAL, if the HAL supports
* master volume control. For example, AudioFlinger will query this value
* from the primary audio HAL when the service starts and use the value for
* setting the initial master volume across all HALs. HALs which do not
* support this method must return NOT_SUPPORTED in 'retval'.
*
* @return retval operation completion status.
* @return volume 1.0f means unity, 0.0f is zero.
*/
getMasterVolume() generates (Result retval, float volume);
/**
* Sets microphone muting state.
*
* @param mute whether microphone is muted.
* @return retval operation completion status.
*/
setMicMute(bool mute) generates (Result retval);
/**
* Gets whether microphone is muted.
*
* @return retval operation completion status.
* @return mute whether microphone is muted.
*/
getMicMute() generates (Result retval, bool mute);
/**
* Set the audio mute status for all audio activities. If the return value
* is NOT_SUPPORTED, the software mixer will emulate this capability.
*
* @param mute whether audio is muted.
* @return retval operation completion status.
*/
setMasterMute(bool mute) generates (Result retval);
/**
* Get the current master mute status for the HAL, if the HAL supports
* master mute control. AudioFlinger will query this value from the primary
* audio HAL when the service starts and use the value for setting the
* initial master mute across all HALs. HAL must indicate that the feature
* is not supported by returning NOT_SUPPORTED status.
*
* @return retval operation completion status.
* @return mute whether audio is muted.
*/
getMasterMute() generates (Result retval, bool mute);
/**
* Returns audio input buffer size according to parameters passed or
* INVALID_ARGUMENTS if one of the parameters is not supported.
*
* @param config audio configuration.
* @return retval operation completion status.
* @return bufferSize input buffer size in bytes.
*/
getInputBufferSize(AudioConfig config)
generates (Result retval, uint64_t bufferSize);
/**
* This method creates and opens the audio hardware output stream.
* If the stream can not be opened with the proposed audio config,
* HAL must provide suggested values for the audio config.
*
* @param ioHandle handle assigned by AudioFlinger.
* @param device device type and (if needed) address.
* @param config stream configuration.
* @param flags additional flags.
* @param sourceMetadata Description of the audio that will be played.
May be used by implementations to configure hardware effects.
* @return retval operation completion status.
* @return outStream created output stream.
* @return suggestedConfig in case of invalid parameters, suggested config.
*/
openOutputStream(
AudioIoHandle ioHandle,
DeviceAddress device,
AudioConfig config,
bitfield<AudioOutputFlag> flags,
SourceMetadata sourceMetadata) generates (
Result retval,
IStreamOut outStream,
AudioConfig suggestedConfig);
/**
* This method creates and opens the audio hardware input stream.
* If the stream can not be opened with the proposed audio config,
* HAL must provide suggested values for the audio config.
*
* @param ioHandle handle assigned by AudioFlinger.
* @param device device type and (if needed) address.
* @param config stream configuration.
* @param flags additional flags.
* @param sinkMetadata Description of the audio that is suggested by the client.
* May be used by implementations to configure hardware effects.
* @return retval operation completion status.
* @return inStream in case of success, created input stream.
* @return suggestedConfig in case of invalid parameters, suggested config.
*/
openInputStream(
AudioIoHandle ioHandle,
DeviceAddress device,
AudioConfig config,
bitfield<AudioInputFlag> flags,
SinkMetadata sinkMetadata) generates (
Result retval,
IStreamIn inStream,
AudioConfig suggestedConfig);
/**
* Returns whether HAL supports audio patches.
*
* @return supports true if audio patches are supported.
*/
supportsAudioPatches() generates (bool supports);
/**
* Creates an audio patch between several source and sink ports. The handle
* is allocated by the HAL and must be unique for this audio HAL module.
*
* @param sources patch sources.
* @param sinks patch sinks.
* @return retval operation completion status.
* @return patch created patch handle.
*/
createAudioPatch(vec<AudioPortConfig> sources, vec<AudioPortConfig> sinks)
generates (Result retval, AudioPatchHandle patch);
/**
* Release an audio patch.
*
* @param patch patch handle.
* @return retval operation completion status.
*/
releaseAudioPatch(AudioPatchHandle patch) generates (Result retval);
/**
* Returns the list of supported attributes for a given audio port.
*
* As input, 'port' contains the information (type, role, address etc...)
* needed by the HAL to identify the port.
*
* As output, 'resultPort' contains possible attributes (sampling rates,
* formats, channel masks, gain controllers...) for this port.
*
* @param port port identifier.
* @return retval operation completion status.
* @return resultPort port descriptor with all parameters filled up.
*/
getAudioPort(AudioPort port)
generates (Result retval, AudioPort resultPort);
/**
* Set audio port configuration.
*
* @param config audio port configuration.
* @return retval operation completion status.
*/
setAudioPortConfig(AudioPortConfig config) generates (Result retval);
/**
* Gets the HW synchronization source of the device. Calling this method is
* equivalent to getting AUDIO_PARAMETER_HW_AV_SYNC on the legacy HAL.
* Optional method
*
* @return retval operation completion status: OK or NOT_SUPPORTED.
* @return hwAvSync HW synchronization source
*/
getHwAvSync() generates (Result retval, AudioHwSync hwAvSync);
/**
* Sets whether the screen is on. Calling this method is equivalent to
* setting AUDIO_PARAMETER_KEY_SCREEN_STATE on the legacy HAL.
* Optional method
*
* @param turnedOn whether the screen is turned on.
* @return retval operation completion status.
*/
setScreenState(bool turnedOn) generates (Result retval);
/**
* Generic method for retrieving vendor-specific parameter values.
* The framework does not interpret the parameters, they are passed
* in an opaque manner between a vendor application and HAL.
*
* Multiple parameters can be retrieved at the same time.
* The implementation should return as many requested parameters
* as possible, even if one or more is not supported
*
* @param context provides more information about the request
* @param keys keys of the requested parameters
* @return retval operation completion status.
* OK must be returned if keys is empty.
* NOT_SUPPORTED must be returned if at least one key is unknown.
* @return parameters parameter key value pairs.
* Must contain the value of all requested keys if retval == OK
*/
getParameters(vec<ParameterValue> context, vec<string> keys)
generates (Result retval, vec<ParameterValue> parameters);
/**
* Generic method for setting vendor-specific parameter values.
* The framework does not interpret the parameters, they are passed
* in an opaque manner between a vendor application and HAL.
*
* Multiple parameters can be set at the same time though this is
* discouraged as it make failure analysis harder.
*
* If possible, a failed setParameters should not impact the platform state.
*
* @param context provides more information about the request
* @param parameters parameter key value pairs.
* @return retval operation completion status.
* All parameters must be successfully set for OK to be returned
*/
setParameters(vec<ParameterValue> context, vec<ParameterValue> parameters)
generates (Result retval);
/**
* Returns an array with available microphones in device.
*
* @return retval INVALID_STATE if the call is not successful,
* OK otherwise.
*
* @return microphones array with microphones info
*/
getMicrophones()
generates(Result retval, vec<MicrophoneInfo> microphones);
/**
* Notifies the device module about the connection state of an input/output
* device attached to it. Calling this method is equivalent to setting
* AUDIO_PARAMETER_DEVICE_[DIS]CONNECT on the legacy HAL.
*
* @param address audio device specification.
* @param connected whether the device is connected.
* @return retval operation completion status.
*/
setConnectedState(DeviceAddress address, bool connected)
generates (Result retval);
};

View file

@ -0,0 +1,70 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio@5.0;
import android.hardware.audio.common@5.0;
import IDevice;
import IPrimaryDevice;
/** This factory allows a HAL implementation to be split in multiple independent
* devices (called module in the pre-treble API).
* Note that this division is arbitrary and implementation are free
* to only have a Primary.
* The framework will query the devices according to audio_policy_configuration.xml
*
* Each device name is arbitrary, provided by the vendor's audio_policy_configuration.xml
* and only used to identify a device in this factory.
* The framework must not interpret the name, treating it as a vendor opaque data
* with the following exception:
* - the "r_submix" device that must be present to support policyMixes (Eg: Android projected).
* Note that this Device is included by default in a build derived from AOSP.
*
* Note that on AOSP Oreo (including MR1) the "a2dp" module is not using this API
* but is loaded directly from the system partition using the legacy API
* due to limitations with the Bluetooth framework.
*/
interface IDevicesFactory {
/**
* Opens an audio device. To close the device, it is necessary to release
* references to the returned device object.
*
* @param device device name.
* @return retval operation completion status. Returns INVALID_ARGUMENTS
* if there is no corresponding hardware module found,
* NOT_INITIALIZED if an error occured while opening the hardware
* module.
* @return result the interface for the created device.
*/
openDevice(string device) generates (Result retval, IDevice result);
/**
* Opens the Primary audio device that must be present.
* This function is not optional and must return successfully the primary device.
*
* This device must have the name "primary".
*
* The telephony stack uses this device to control the audio during a voice call.
*
* @return retval operation completion status. Must be SUCCESS.
* For debuging, return INVALID_ARGUMENTS if there is no corresponding
* hardware module found, NOT_INITIALIZED if an error occurred
* while opening the hardware module.
* @return result the interface for the created device.
*/
openPrimaryDevice() generates (Result retval, IPrimaryDevice result);
};

View file

@ -0,0 +1,195 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio@5.0;
import android.hardware.audio.common@5.0;
import IDevice;
interface IPrimaryDevice extends IDevice {
/**
* Sets the audio volume of a voice call.
*
* @param volume 1.0f means unity, 0.0f is zero.
* @return retval operation completion status.
*/
setVoiceVolume(float volume) generates (Result retval);
/**
* This method is used to notify the HAL about audio mode changes.
*
* @param mode new mode.
* @return retval operation completion status.
*/
setMode(AudioMode mode) generates (Result retval);
/**
* Sets the name of the current BT SCO headset. Calling this method
* is equivalent to setting legacy "bt_headset_name" parameter.
* The BT SCO headset name must only be used for debugging purposes.
* Optional method
*
* @param name the name of the current BT SCO headset (can be empty).
* @return retval operation completion status.
*/
setBtScoHeadsetDebugName(string name) generates (Result retval);
/**
* Gets whether BT SCO Noise Reduction and Echo Cancellation are enabled.
* Calling this method is equivalent to getting AUDIO_PARAMETER_KEY_BT_NREC
* on the legacy HAL.
*
* @return retval operation completion status.
* @return enabled whether BT SCO NR + EC are enabled.
*/
getBtScoNrecEnabled() generates (Result retval, bool enabled);
/**
* Sets whether BT SCO Noise Reduction and Echo Cancellation are enabled.
* Calling this method is equivalent to setting AUDIO_PARAMETER_KEY_BT_NREC
* on the legacy HAL.
* Optional method
*
* @param enabled whether BT SCO NR + EC are enabled.
* @return retval operation completion status.
*/
setBtScoNrecEnabled(bool enabled) generates (Result retval);
/**
* Gets whether BT SCO Wideband mode is enabled. Calling this method is
* equivalent to getting AUDIO_PARAMETER_KEY_BT_SCO_WB on the legacy HAL.
*
* @return retval operation completion status.
* @return enabled whether BT Wideband is enabled.
*/
getBtScoWidebandEnabled() generates (Result retval, bool enabled);
/**
* Sets whether BT SCO Wideband mode is enabled. Calling this method is
* equivalent to setting AUDIO_PARAMETER_KEY_BT_SCO_WB on the legacy HAL.
* Optional method
*
* @param enabled whether BT Wideband is enabled.
* @return retval operation completion status.
*/
setBtScoWidebandEnabled(bool enabled) generates (Result retval);
/**
* Gets whether BT HFP (Hands-Free Profile) is enabled. Calling this method
* is equivalent to getting "hfp_enable" parameter value on the legacy HAL.
*
* @return retval operation completion status.
* @return enabled whether BT HFP is enabled.
*/
getBtHfpEnabled() generates (Result retval, bool enabled);
/**
* Sets whether BT HFP (Hands-Free Profile) is enabled. Calling this method
* is equivalent to setting "hfp_enable" parameter on the legacy HAL.
* Optional method
*
* @param enabled whether BT HFP is enabled.
* @return retval operation completion status.
*/
setBtHfpEnabled(bool enabled) generates (Result retval);
/**
* Sets the sampling rate of BT HFP (Hands-Free Profile). Calling this
* method is equivalent to setting "hfp_set_sampling_rate" parameter
* on the legacy HAL.
* Optional method
*
* @param sampleRateHz sample rate in Hz.
* @return retval operation completion status.
*/
setBtHfpSampleRate(uint32_t sampleRateHz) generates (Result retval);
/**
* Sets the current output volume Hz for BT HFP (Hands-Free Profile).
* Calling this method is equivalent to setting "hfp_volume" parameter value
* on the legacy HAL (except that legacy HAL implementations expect
* an integer value in the range from 0 to 15.)
* Optional method
*
* @param volume 1.0f means unity, 0.0f is zero.
* @return retval operation completion status.
*/
setBtHfpVolume(float volume) generates (Result retval);
enum TtyMode : int32_t {
OFF,
VCO,
HCO,
FULL
};
/**
* Gets current TTY mode selection. Calling this method is equivalent to
* getting AUDIO_PARAMETER_KEY_TTY_MODE on the legacy HAL.
*
* @return retval operation completion status.
* @return mode TTY mode.
*/
getTtyMode() generates (Result retval, TtyMode mode);
/**
* Sets current TTY mode. Calling this method is equivalent to setting
* AUDIO_PARAMETER_KEY_TTY_MODE on the legacy HAL.
*
* @param mode TTY mode.
* @return retval operation completion status.
*/
setTtyMode(TtyMode mode) generates (Result retval);
/**
* Gets whether Hearing Aid Compatibility - Telecoil (HAC-T) mode is
* enabled. Calling this method is equivalent to getting
* AUDIO_PARAMETER_KEY_HAC on the legacy HAL.
*
* @return retval operation completion status.
* @return enabled whether HAC mode is enabled.
*/
getHacEnabled() generates (Result retval, bool enabled);
/**
* Sets whether Hearing Aid Compatibility - Telecoil (HAC-T) mode is
* enabled. Calling this method is equivalent to setting
* AUDIO_PARAMETER_KEY_HAC on the legacy HAL.
* Optional method
*
* @param enabled whether HAC mode is enabled.
* @return retval operation completion status.
*/
setHacEnabled(bool enabled) generates (Result retval);
enum Rotation : int32_t {
DEG_0,
DEG_90,
DEG_180,
DEG_270
};
/**
* Updates HAL on the current rotation of the device relative to natural
* orientation. Calling this method is equivalent to setting legacy
* parameter "rotation".
*
* @param rotation rotation in degrees relative to natural device
* orientation.
* @return retval operation completion status.
*/
updateRotation(Rotation rotation) generates (Result retval);
};

310
audio/5.0/IStream.hal Normal file
View file

@ -0,0 +1,310 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio@5.0;
import android.hardware.audio.common@5.0;
import android.hardware.audio.effect@5.0::IEffect;
interface IStream {
/**
* Return the frame size (number of bytes per sample).
*
* @return frameSize frame size in bytes.
*/
getFrameSize() generates (uint64_t frameSize);
/**
* Return the frame count of the buffer. Calling this method is equivalent
* to getting AUDIO_PARAMETER_STREAM_FRAME_COUNT on the legacy HAL.
*
* @return count frame count.
*/
getFrameCount() generates (uint64_t count);
/**
* Return the size of input/output buffer in bytes for this stream.
* It must be a multiple of the frame size.
*
* @return buffer buffer size in bytes.
*/
getBufferSize() generates (uint64_t bufferSize);
/**
* Return the sampling rate in Hz.
*
* @return sampleRateHz sample rate in Hz.
*/
getSampleRate() generates (uint32_t sampleRateHz);
/**
* Return supported native sampling rates of the stream for a given format.
* A supported native sample rate is a sample rate that can be efficiently
* played by the hardware (typically without sample-rate conversions).
*
* This function is only called for dynamic profile. If called for
* non-dynamic profile is should return NOT_SUPPORTED or the same list
* as in audio_policy_configuration.xml.
*
* Calling this method is equivalent to getting
* AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES on the legacy HAL.
*
*
* @param format audio format for which the sample rates are supported.
* @return retval operation completion status.
* Must be OK if the format is supported.
* @return sampleRateHz supported sample rates.
*/
getSupportedSampleRates(AudioFormat format)
generates (Result retval, vec<uint32_t> sampleRates);
/**
* Sets the sampling rate of the stream. Calling this method is equivalent
* to setting AUDIO_PARAMETER_STREAM_SAMPLING_RATE on the legacy HAL.
* Optional method. If implemented, only called on a stopped stream.
*
* @param sampleRateHz sample rate in Hz.
* @return retval operation completion status.
*/
setSampleRate(uint32_t sampleRateHz) generates (Result retval);
/**
* Return the channel mask of the stream.
*
* @return mask channel mask.
*/
getChannelMask() generates (bitfield<AudioChannelMask> mask);
/**
* Return supported channel masks of the stream. Calling this method is
* equivalent to getting AUDIO_PARAMETER_STREAM_SUP_CHANNELS on the legacy
* HAL.
*
* @param format audio format for which the channel masks are supported.
* @return retval operation completion status.
* Must be OK if the format is supported.
* @return masks supported audio masks.
*/
getSupportedChannelMasks(AudioFormat format)
generates (Result retval, vec<bitfield<AudioChannelMask>> masks);
/**
* Sets the channel mask of the stream. Calling this method is equivalent to
* setting AUDIO_PARAMETER_STREAM_CHANNELS on the legacy HAL.
* Optional method
*
* @param format audio format.
* @return retval operation completion status.
*/
setChannelMask(bitfield<AudioChannelMask> mask) generates (Result retval);
/**
* Return the audio format of the stream.
*
* @return format audio format.
*/
getFormat() generates (AudioFormat format);
/**
* Return supported audio formats of the stream. Calling this method is
* equivalent to getting AUDIO_PARAMETER_STREAM_SUP_FORMATS on the legacy
* HAL.
*
* @return formats supported audio formats.
*/
getSupportedFormats() generates (vec<AudioFormat> formats);
/**
* Sets the audio format of the stream. Calling this method is equivalent to
* setting AUDIO_PARAMETER_STREAM_FORMAT on the legacy HAL.
* Optional method
*
* @param format audio format.
* @return retval operation completion status.
*/
setFormat(AudioFormat format) generates (Result retval);
/**
* Convenience method for retrieving several stream parameters in
* one transaction.
*
* @return sampleRateHz sample rate in Hz.
* @return mask channel mask.
* @return format audio format.
*/
getAudioProperties() generates (
uint32_t sampleRateHz, bitfield<AudioChannelMask> mask, AudioFormat format);
/**
* Applies audio effect to the stream.
*
* @param effectId effect ID (obtained from IEffectsFactory.createEffect) of
* the effect to apply.
* @return retval operation completion status.
*/
addEffect(uint64_t effectId) generates (Result retval);
/**
* Stops application of the effect to the stream.
*
* @param effectId effect ID (obtained from IEffectsFactory.createEffect) of
* the effect to remove.
* @return retval operation completion status.
*/
removeEffect(uint64_t effectId) generates (Result retval);
/**
* Put the audio hardware input/output into standby mode.
* Driver must exit from standby mode at the next I/O operation.
*
* @return retval operation completion status.
*/
standby() generates (Result retval);
/**
* Return the set of devices which this stream is connected to.
* Optional method
*
* @return retval operation completion status: OK or NOT_SUPPORTED.
* @return device set of devices which this stream is connected to.
*/
getDevices() generates (Result retval, vec<DeviceAddress> devices);
/**
* Connects the stream to one or multiple devices.
*
* This method must only be used for HALs that do not support
* 'IDevice.createAudioPatch' method. Calling this method is
* equivalent to setting AUDIO_PARAMETER_STREAM_ROUTING preceeded
* with a device address in the legacy HAL interface.
*
* @param address device to connect the stream to.
* @return retval operation completion status.
*/
setDevices(vec<DeviceAddress> devices) generates (Result retval);
/**
* Sets the HW synchronization source. Calling this method is equivalent to
* setting AUDIO_PARAMETER_STREAM_HW_AV_SYNC on the legacy HAL.
* Optional method
*
* @param hwAvSync HW synchronization source
* @return retval operation completion status.
*/
setHwAvSync(AudioHwSync hwAvSync) generates (Result retval);
/**
* Generic method for retrieving vendor-specific parameter values.
* The framework does not interpret the parameters, they are passed
* in an opaque manner between a vendor application and HAL.
*
* Multiple parameters can be retrieved at the same time.
* The implementation should return as many requested parameters
* as possible, even if one or more is not supported
*
* @param context provides more information about the request
* @param keys keys of the requested parameters
* @return retval operation completion status.
* OK must be returned if keys is empty.
* NOT_SUPPORTED must be returned if at least one key is unknown.
* @return parameters parameter key value pairs.
* Must contain the value of all requested keys if retval == OK
*/
getParameters(vec<ParameterValue> context, vec<string> keys)
generates (Result retval, vec<ParameterValue> parameters);
/**
* Generic method for setting vendor-specific parameter values.
* The framework does not interpret the parameters, they are passed
* in an opaque manner between a vendor application and HAL.
*
* Multiple parameters can be set at the same time though this is
* discouraged as it make failure analysis harder.
*
* If possible, a failed setParameters should not impact the platform state.
*
* @param context provides more information about the request
* @param parameters parameter key value pairs.
* @return retval operation completion status.
* All parameters must be successfully set for OK to be returned
*/
setParameters(vec<ParameterValue> context, vec<ParameterValue> parameters)
generates (Result retval);
/**
* Called by the framework to start a stream operating in mmap mode.
* createMmapBuffer() must be called before calling start().
* Function only implemented by streams operating in mmap mode.
*
* @return retval OK in case the success.
* NOT_SUPPORTED on non mmap mode streams
* INVALID_STATE if called out of sequence
*/
start() generates (Result retval);
/**
* Called by the framework to stop a stream operating in mmap mode.
* Function only implemented by streams operating in mmap mode.
*
* @return retval OK in case the succes.
* NOT_SUPPORTED on non mmap mode streams
* INVALID_STATE if called out of sequence
*/
stop() generates (Result retval) ;
/**
* Called by the framework to retrieve information on the mmap buffer used for audio
* samples transfer.
* Function only implemented by streams operating in mmap mode.
*
* @param minSizeFrames minimum buffer size requested. The actual buffer
* size returned in struct MmapBufferInfo can be larger.
* @return retval OK in case the success.
* NOT_SUPPORTED on non mmap mode streams
* NOT_INITIALIZED in case of memory allocation error
* INVALID_ARGUMENTS if the requested buffer size is too large
* INVALID_STATE if called out of sequence
* @return info a MmapBufferInfo struct containing information on the MMMAP buffer created.
*/
createMmapBuffer(int32_t minSizeFrames)
generates (Result retval, MmapBufferInfo info);
/**
* Called by the framework to read current read/write position in the mmap buffer
* with associated time stamp.
* Function only implemented by streams operating in mmap mode.
*
* @return retval OK in case the success.
* NOT_SUPPORTED on non mmap mode streams
* INVALID_STATE if called out of sequence
* @return position a MmapPosition struct containing current HW read/write position in frames
* with associated time stamp.
*/
getMmapPosition()
generates (Result retval, MmapPosition position);
/**
* Called by the framework to deinitialize the stream and free up
* all the currently allocated resources. It is recommended to close
* the stream on the client side as soon as it is becomes unused.
*
* @return retval OK in case the success.
* NOT_SUPPORTED if called on IStream instead of input or
* output stream interface.
* INVALID_STATE if the stream was already closed.
*/
close() generates (Result retval);
};

168
audio/5.0/IStreamIn.hal Normal file
View file

@ -0,0 +1,168 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio@5.0;
import android.hardware.audio.common@5.0;
import IStream;
interface IStreamIn extends IStream {
/**
* Returns the source descriptor of the input stream. Calling this method is
* equivalent to getting AUDIO_PARAMETER_STREAM_INPUT_SOURCE on the legacy
* HAL.
* Optional method
*
* @return retval operation completion status.
* @return source audio source.
*/
getAudioSource() generates (Result retval, AudioSource source);
/**
* Set the input gain for the audio driver.
* Optional method
*
* @param gain 1.0f is unity, 0.0f is zero.
* @result retval operation completion status.
*/
setGain(float gain) generates (Result retval);
/**
* Commands that can be executed on the driver reader thread.
*/
enum ReadCommand : int32_t {
READ,
GET_CAPTURE_POSITION
};
/**
* Data structure passed to the driver for executing commands
* on the driver reader thread.
*/
struct ReadParameters {
ReadCommand command; // discriminator
union Params {
uint64_t read; // READ command, amount of bytes to read, >= 0.
// No parameters for GET_CAPTURE_POSITION.
} params;
};
/**
* Data structure passed back to the client via status message queue
* of 'read' operation.
*
* Possible values of 'retval' field:
* - OK, read operation was successful;
* - INVALID_ARGUMENTS, stream was not configured properly;
* - INVALID_STATE, stream is in a state that doesn't allow reads.
*/
struct ReadStatus {
Result retval;
ReadCommand replyTo; // discriminator
union Reply {
uint64_t read; // READ command, amount of bytes read, >= 0.
struct CapturePosition { // same as generated by getCapturePosition.
uint64_t frames;
uint64_t time;
} capturePosition;
} reply;
};
/**
* Called when the metadata of the stream's sink has been changed.
* @param sinkMetadata Description of the audio that is suggested by the clients.
*/
updateSinkMetadata(SinkMetadata sinkMetadata);
/**
* Set up required transports for receiving audio buffers from the driver.
*
* The transport consists of three message queues:
* -- command queue is used to instruct the reader thread what operation
* to perform;
* -- data queue is used for passing audio data from the driver
* to the client;
* -- status queue is used for reporting operation status
* (e.g. amount of bytes actually read or error code).
*
* The driver operates on a dedicated thread. The client must ensure that
* the thread is given an appropriate priority and assigned to correct
* scheduler and cgroup. For this purpose, the method returns identifiers
* of the driver thread.
*
* @param frameSize the size of a single frame, in bytes.
* @param framesCount the number of frames in a buffer.
* @param threadPriority priority of the driver thread.
* @return retval OK if both message queues were created successfully.
* INVALID_STATE if the method was already called.
* INVALID_ARGUMENTS if there was a problem setting up
* the queues.
* @return commandMQ a message queue used for passing commands.
* @return dataMQ a message queue used for passing audio data in the format
* specified at the stream opening.
* @return statusMQ a message queue used for passing status from the driver
* using ReadStatus structures.
* @return threadInfo identifiers of the driver's dedicated thread.
*/
prepareForReading(uint32_t frameSize, uint32_t framesCount)
generates (
Result retval,
fmq_sync<ReadParameters> commandMQ,
fmq_sync<uint8_t> dataMQ,
fmq_sync<ReadStatus> statusMQ,
ThreadInfo threadInfo);
/**
* Return the amount of input frames lost in the audio driver since the last
* call of this function.
*
* Audio driver is expected to reset the value to 0 and restart counting
* upon returning the current value by this function call. Such loss
* typically occurs when the user space process is blocked longer than the
* capacity of audio driver buffers.
*
* @return framesLost the number of input audio frames lost.
*/
getInputFramesLost() generates (uint32_t framesLost);
/**
* Return a recent count of the number of audio frames received and the
* clock time associated with that frame count.
*
* @return retval INVALID_STATE if the device is not ready/available,
* NOT_SUPPORTED if the command is not supported,
* OK otherwise.
* @return frames the total frame count received. This must be as early in
* the capture pipeline as possible. In general, frames
* must be non-negative and must not go "backwards".
* @return time is the clock monotonic time when frames was measured. In
* general, time must be a positive quantity and must not
* go "backwards".
*/
getCapturePosition()
generates (Result retval, uint64_t frames, uint64_t time);
/**
* Returns an array with active microphones in the stream.
*
* @return retval INVALID_STATE if the call is not successful,
* OK otherwise.
*
* @return microphones array with microphones info
*/
getActiveMicrophones()
generates(Result retval, vec<MicrophoneInfo> microphones);
};

279
audio/5.0/IStreamOut.hal Normal file
View file

@ -0,0 +1,279 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio@5.0;
import android.hardware.audio.common@5.0;
import IStream;
import IStreamOutCallback;
interface IStreamOut extends IStream {
/**
* Return the audio hardware driver estimated latency in milliseconds.
*
* @return latencyMs latency in milliseconds.
*/
getLatency() generates (uint32_t latencyMs);
/**
* This method is used in situations where audio mixing is done in the
* hardware. This method serves as a direct interface with hardware,
* allowing to directly set the volume as apposed to via the framework.
* This method might produce multiple PCM outputs or hardware accelerated
* codecs, such as MP3 or AAC.
* Optional method
*
* @param left left channel attenuation, 1.0f is unity, 0.0f is zero.
* @param right right channel attenuation, 1.0f is unity, 0.0f is zero.
* @return retval operation completion status.
* If a volume is outside [0,1], return INVALID_ARGUMENTS
*/
setVolume(float left, float right) generates (Result retval);
/**
* Commands that can be executed on the driver writer thread.
*/
enum WriteCommand : int32_t {
WRITE,
GET_PRESENTATION_POSITION,
GET_LATENCY
};
/**
* Data structure passed back to the client via status message queue
* of 'write' operation.
*
* Possible values of 'retval' field:
* - OK, write operation was successful;
* - INVALID_ARGUMENTS, stream was not configured properly;
* - INVALID_STATE, stream is in a state that doesn't allow writes;
* - INVALID_OPERATION, retrieving presentation position isn't supported.
*/
struct WriteStatus {
Result retval;
WriteCommand replyTo; // discriminator
union Reply {
uint64_t written; // WRITE command, amount of bytes written, >= 0.
struct PresentationPosition { // same as generated by
uint64_t frames; // getPresentationPosition.
TimeSpec timeStamp;
} presentationPosition;
uint32_t latencyMs; // Same as generated by getLatency.
} reply;
};
/**
* Called when the metadata of the stream's source has been changed.
* @param sourceMetadata Description of the audio that is played by the clients.
*/
updateSourceMetadata(SourceMetadata sourceMetadata);
/**
* Set up required transports for passing audio buffers to the driver.
*
* The transport consists of three message queues:
* -- command queue is used to instruct the writer thread what operation
* to perform;
* -- data queue is used for passing audio data from the client
* to the driver;
* -- status queue is used for reporting operation status
* (e.g. amount of bytes actually written or error code).
*
* The driver operates on a dedicated thread. The client must ensure that
* the thread is given an appropriate priority and assigned to correct
* scheduler and cgroup. For this purpose, the method returns identifiers
* of the driver thread.
*
* @param frameSize the size of a single frame, in bytes.
* @param framesCount the number of frames in a buffer.
* @return retval OK if both message queues were created successfully.
* INVALID_STATE if the method was already called.
* INVALID_ARGUMENTS if there was a problem setting up
* the queues.
* @return commandMQ a message queue used for passing commands.
* @return dataMQ a message queue used for passing audio data in the format
* specified at the stream opening.
* @return statusMQ a message queue used for passing status from the driver
* using WriteStatus structures.
* @return threadInfo identifiers of the driver's dedicated thread.
*/
prepareForWriting(uint32_t frameSize, uint32_t framesCount)
generates (
Result retval,
fmq_sync<WriteCommand> commandMQ,
fmq_sync<uint8_t> dataMQ,
fmq_sync<WriteStatus> statusMQ,
ThreadInfo threadInfo);
/**
* Return the number of audio frames written by the audio DSP to DAC since
* the output has exited standby.
* Optional method
*
* @return retval operation completion status.
* @return dspFrames number of audio frames written.
*/
getRenderPosition() generates (Result retval, uint32_t dspFrames);
/**
* Get the local time at which the next write to the audio driver will be
* presented. The units are microseconds, where the epoch is decided by the
* local audio HAL.
* Optional method
*
* @return retval operation completion status.
* @return timestampUs time of the next write.
*/
getNextWriteTimestamp() generates (Result retval, int64_t timestampUs);
/**
* Set the callback interface for notifying completion of non-blocking
* write and drain.
*
* Calling this function implies that all future 'write' and 'drain'
* must be non-blocking and use the callback to signal completion.
*
* 'clearCallback' method needs to be called in order to release the local
* callback proxy on the server side and thus dereference the callback
* implementation on the client side.
*
* @return retval operation completion status.
*/
setCallback(IStreamOutCallback callback) generates (Result retval);
/**
* Clears the callback previously set via 'setCallback' method.
*
* Warning: failure to call this method results in callback implementation
* on the client side being held until the HAL server termination.
*
* If no callback was previously set, the method should be a no-op
* and return OK.
*
* @return retval operation completion status: OK or NOT_SUPPORTED.
*/
clearCallback() generates (Result retval);
/**
* Returns whether HAL supports pausing and resuming of streams.
*
* @return supportsPause true if pausing is supported.
* @return supportsResume true if resume is supported.
*/
supportsPauseAndResume()
generates (bool supportsPause, bool supportsResume);
/**
* Notifies to the audio driver to stop playback however the queued buffers
* are retained by the hardware. Useful for implementing pause/resume. Empty
* implementation if not supported however must be implemented for hardware
* with non-trivial latency. In the pause state, some audio hardware may
* still be using power. Client code may consider calling 'suspend' after a
* timeout to prevent that excess power usage.
*
* Implementation of this function is mandatory for offloaded playback.
*
* @return retval operation completion status.
*/
pause() generates (Result retval);
/**
* Notifies to the audio driver to resume playback following a pause.
* Returns error INVALID_STATE if called without matching pause.
*
* Implementation of this function is mandatory for offloaded playback.
*
* @return retval operation completion status.
*/
resume() generates (Result retval);
/**
* Returns whether HAL supports draining of streams.
*
* @return supports true if draining is supported.
*/
supportsDrain() generates (bool supports);
/**
* Requests notification when data buffered by the driver/hardware has been
* played. If 'setCallback' has previously been called to enable
* non-blocking mode, then 'drain' must not block, instead it must return
* quickly and completion of the drain is notified through the callback. If
* 'setCallback' has not been called, then 'drain' must block until
* completion.
*
* If 'type' is 'ALL', the drain completes when all previously written data
* has been played.
*
* If 'type' is 'EARLY_NOTIFY', the drain completes shortly before all data
* for the current track has played to allow time for the framework to
* perform a gapless track switch.
*
* Drain must return immediately on 'stop' and 'flush' calls.
*
* Implementation of this function is mandatory for offloaded playback.
*
* @param type type of drain.
* @return retval operation completion status.
*/
drain(AudioDrain type) generates (Result retval);
/**
* Notifies to the audio driver to flush the queued data. Stream must
* already be paused before calling 'flush'.
* Optional method
*
* Implementation of this function is mandatory for offloaded playback.
*
* @return retval operation completion status.
*/
flush() generates (Result retval);
/**
* Return a recent count of the number of audio frames presented to an
* external observer. This excludes frames which have been written but are
* still in the pipeline. The count is not reset to zero when output enters
* standby. Also returns the value of CLOCK_MONOTONIC as of this
* presentation count. The returned count is expected to be 'recent', but
* does not need to be the most recent possible value. However, the
* associated time must correspond to whatever count is returned.
*
* Example: assume that N+M frames have been presented, where M is a 'small'
* number. Then it is permissible to return N instead of N+M, and the
* timestamp must correspond to N rather than N+M. The terms 'recent' and
* 'small' are not defined. They reflect the quality of the implementation.
*
* Optional method
*
* @return retval operation completion status.
* @return frames count of presented audio frames.
* @return timeStamp associated clock time.
*/
getPresentationPosition()
generates (Result retval, uint64_t frames, TimeSpec timeStamp);
/**
* Selects a presentation for decoding from a next generation media stream
* (as defined per ETSI TS 103 190-2) and a program within the presentation.
* Optional method
*
* @param presentationId selected audio presentation.
* @param programId refinement for the presentation.
* @return retval operation completion status.
*/
selectPresentation(int32_t presentationId, int32_t programId)
generates (Result retval);
};

View file

@ -0,0 +1,37 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio@5.0;
/**
* Asynchronous write callback interface.
*/
interface IStreamOutCallback {
/**
* Non blocking write completed.
*/
oneway onWriteReady();
/**
* Drain completed.
*/
oneway onDrainReady();
/**
* Stream hit an error.
*/
oneway onError();
};

View file

@ -0,0 +1,595 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Copyright (C) 2017 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- TODO: define a targetNamespace. Note that it will break retrocompatibility -->
<xs:schema version="2.0"
elementFormDefault="qualified"
attributeFormDefault="unqualified"
xmlns:xs="http://www.w3.org/2001/XMLSchema">
<!-- List the config versions supported by audio policy. -->
<xs:simpleType name="version">
<xs:restriction base="xs:decimal">
<xs:enumeration value="1.0"/>
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="halVersion">
<xs:annotation>
<xs:documentation xml:lang="en">
Version of the interface the hal implements.
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:decimal">
<!-- List of HAL versions supported by the framework. -->
<xs:enumeration value="2.0"/>
<xs:enumeration value="3.0"/>
</xs:restriction>
</xs:simpleType>
<xs:element name="audioPolicyConfiguration">
<xs:complexType>
<xs:sequence>
<xs:element name="globalConfiguration" type="globalConfiguration"/>
<xs:element name="modules" type="modules" maxOccurs="unbounded"/>
<xs:element name="volumes" type="volumes" maxOccurs="unbounded"/>
<xs:element name="surroundSound" type="surroundSound" />
</xs:sequence>
<xs:attribute name="version" type="version"/>
</xs:complexType>
<xs:key name="moduleNameKey">
<xs:selector xpath="modules/module"/>
<xs:field xpath="@name"/>
</xs:key>
<xs:unique name="volumeTargetUniqueness">
<xs:selector xpath="volumes/volume"/>
<xs:field xpath="@stream"/>
<xs:field xpath="@deviceCategory"/>
</xs:unique>
<xs:key name="volumeCurveNameKey">
<xs:selector xpath="volumes/reference"/>
<xs:field xpath="@name"/>
</xs:key>
<xs:keyref name="volumeCurveRef" refer="volumeCurveNameKey">
<xs:selector xpath="volumes/volume"/>
<xs:field xpath="@ref"/>
</xs:keyref>
</xs:element>
<xs:complexType name="globalConfiguration">
<xs:attribute name="speaker_drc_enabled" type="xs:boolean" use="required"/>
</xs:complexType>
<xs:complexType name="modules">
<xs:annotation>
<xs:documentation xml:lang="en">
There should be one section per audio HW module present on the platform.
Each <module/> contains two mandatory tags: “halVersion” and “name”.
The module "name" is the same as in previous .conf file.
Each module must contain the following sections:
- <devicePorts/>: a list of device descriptors for all
input and output devices accessible via this module.
This contains both permanently attached devices and removable devices.
- <mixPorts/>: listing all output and input streams exposed by the audio HAL
- <routes/>: list of possible connections between input
and output devices or between stream and devices.
A <route/> is defined by a set of 3 attributes:
-"type": mux|mix means all sources are mutual exclusive (mux) or can be mixed (mix)
-"sink": the sink involved in this route
-"sources": all the sources than can be connected to the sink via this route
- <attachedDevices/>: permanently attached devices.
The attachedDevices section is a list of devices names.
Their names correspond to device names defined in "devicePorts" section.
- <defaultOutputDevice/> is the device to be used when no policy rule applies
</xs:documentation>
</xs:annotation>
<xs:sequence>
<xs:element name="module" maxOccurs="unbounded">
<xs:complexType>
<xs:sequence>
<xs:element name="attachedDevices" type="attachedDevices" minOccurs="0">
<xs:unique name="attachedDevicesUniqueness">
<xs:selector xpath="item"/>
<xs:field xpath="."/>
</xs:unique>
</xs:element>
<xs:element name="defaultOutputDevice" type="xs:token" minOccurs="0"/>
<xs:element name="mixPorts" type="mixPorts" minOccurs="0"/>
<xs:element name="devicePorts" type="devicePorts" minOccurs="0"/>
<xs:element name="routes" type="routes" minOccurs="0"/>
</xs:sequence>
<xs:attribute name="name" type="xs:string" use="required"/>
<xs:attribute name="halVersion" type="halVersion" use="required"/>
</xs:complexType>
<xs:unique name="mixPortNameUniqueness">
<xs:selector xpath="mixPorts/mixPort"/>
<xs:field xpath="@name"/>
</xs:unique>
<xs:key name="devicePortNameKey">
<xs:selector xpath="devicePorts/devicePort"/>
<xs:field xpath="@tagName"/>
</xs:key>
<xs:unique name="devicePortUniqueness">
<xs:selector xpath="devicePorts/devicePort"/>
<xs:field xpath="@type"/>
<xs:field xpath="@address"/>
</xs:unique>
<xs:keyref name="defaultOutputDeviceRef" refer="devicePortNameKey">
<xs:selector xpath="defaultOutputDevice"/>
<xs:field xpath="."/>
</xs:keyref>
<xs:keyref name="attachedDeviceRef" refer="devicePortNameKey">
<xs:selector xpath="attachedDevices/item"/>
<xs:field xpath="."/>
</xs:keyref>
<!-- The following 3 constraints try to make sure each sink port
is reference in one an only one route. -->
<xs:key name="routeSinkKey">
<!-- predicate [@type='sink'] does not work in xsd 1.0 -->
<xs:selector xpath="devicePorts/devicePort|mixPorts/mixPort"/>
<xs:field xpath="@tagName|@name"/>
</xs:key>
<xs:keyref name="routeSinkRef" refer="routeSinkKey">
<xs:selector xpath="routes/route"/>
<xs:field xpath="@sink"/>
</xs:keyref>
<xs:unique name="routeUniqueness">
<xs:selector xpath="routes/route"/>
<xs:field xpath="@sink"/>
</xs:unique>
</xs:element>
</xs:sequence>
</xs:complexType>
<xs:complexType name="attachedDevices">
<xs:sequence>
<xs:element name="item" type="xs:token" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
<!-- TODO: separate values by space for better xsd validations. -->
<xs:simpleType name="audioInOutFlags">
<xs:annotation>
<xs:documentation xml:lang="en">
"|" separated list of audio_output_flags_t or audio_input_flags_t.
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:pattern value="|[_A-Z]+(\|[_A-Z]+)*"/>
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="role">
<xs:restriction base="xs:string">
<xs:enumeration value="sink"/>
<xs:enumeration value="source"/>
</xs:restriction>
</xs:simpleType>
<xs:complexType name="mixPorts">
<xs:sequence>
<xs:element name="mixPort" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:sequence>
<xs:element name="profile" type="profile" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="gains" type="gains" minOccurs="0"/>
</xs:sequence>
<xs:attribute name="name" type="xs:token" use="required"/>
<xs:attribute name="role" type="role" use="required"/>
<xs:attribute name="flags" type="audioInOutFlags"/>
<xs:attribute name="maxOpenCount" type="xs:unsignedInt"/>
<xs:attribute name="maxActiveCount" type="xs:unsignedInt"/>
<xs:attribute name="preferredUsage" type="audioUsageList">
<xs:annotation>
<xs:documentation xml:lang="en">
When choosing the mixPort of an audio track, the audioPolicy
first considers the mixPorts with a preferredUsage including
the track AudioUsage preferred .
If non support the track format, the other mixPorts are considered.
Eg: a <mixPort preferredUsage="AUDIO_USAGE_MEDIA" /> will receive
the audio of all apps playing with a MEDIA usage.
It may receive audio from ALARM if there are no audio compatible
<mixPort preferredUsage="AUDIO_USAGE_ALARM" />.
</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:complexType>
<xs:unique name="mixPortProfileUniqueness">
<xs:selector xpath="profile"/>
<xs:field xpath="format"/>
<xs:field xpath="samplingRate"/>
<xs:field xpath="channelMasks"/>
</xs:unique>
<xs:unique name="mixPortGainUniqueness">
<xs:selector xpath="gains/gain"/>
<xs:field xpath="@name"/>
</xs:unique>
</xs:element>
</xs:sequence>
</xs:complexType>
<!-- Enum values of audio_device_t in audio.h
TODO: generate from hidl to avoid manual sync.
TODO: separate source and sink in the xml for better xsd validations. -->
<xs:simpleType name="audioDevice">
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_DEVICE_NONE"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_EARPIECE"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_SPEAKER"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_WIRED_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_WIRED_HEADPHONE"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_AUX_DIGITAL"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_HDMI"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_ACCESSORY"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_DEVICE"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_REMOTE_SUBMIX"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_TELEPHONY_TX"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_LINE"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_HDMI_ARC"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_SPDIF"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_FM"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_AUX_LINE"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_SPEAKER_SAFE"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_IP"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_BUS"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_PROXY"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_HEARING_AID"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_ECHO_CANCELLER"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_DEFAULT"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_STUB"/>
<!-- Due to the xml format, IN types can not be a separated from OUT types -->
<xs:enumeration value="AUDIO_DEVICE_IN_COMMUNICATION"/>
<xs:enumeration value="AUDIO_DEVICE_IN_AMBIENT"/>
<xs:enumeration value="AUDIO_DEVICE_IN_BUILTIN_MIC"/>
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_IN_WIRED_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_IN_AUX_DIGITAL"/>
<xs:enumeration value="AUDIO_DEVICE_IN_HDMI"/>
<xs:enumeration value="AUDIO_DEVICE_IN_VOICE_CALL"/>
<xs:enumeration value="AUDIO_DEVICE_IN_TELEPHONY_RX"/>
<xs:enumeration value="AUDIO_DEVICE_IN_BACK_MIC"/>
<xs:enumeration value="AUDIO_DEVICE_IN_REMOTE_SUBMIX"/>
<xs:enumeration value="AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_IN_USB_ACCESSORY"/>
<xs:enumeration value="AUDIO_DEVICE_IN_USB_DEVICE"/>
<xs:enumeration value="AUDIO_DEVICE_IN_FM_TUNER"/>
<xs:enumeration value="AUDIO_DEVICE_IN_TV_TUNER"/>
<xs:enumeration value="AUDIO_DEVICE_IN_LINE"/>
<xs:enumeration value="AUDIO_DEVICE_IN_SPDIF"/>
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_A2DP"/>
<xs:enumeration value="AUDIO_DEVICE_IN_LOOPBACK"/>
<xs:enumeration value="AUDIO_DEVICE_IN_IP"/>
<xs:enumeration value="AUDIO_DEVICE_IN_BUS"/>
<xs:enumeration value="AUDIO_DEVICE_IN_PROXY"/>
<xs:enumeration value="AUDIO_DEVICE_IN_USB_HEADSET"/>
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_BLE"/>
<xs:enumeration value="AUDIO_DEVICE_IN_DEFAULT"/>
<xs:enumeration value="AUDIO_DEVICE_IN_STUB"/>
</xs:restriction>
</xs:simpleType>
<!-- Enum values of audio_format_t in audio.h
TODO: generate from hidl to avoid manual sync. -->
<xs:simpleType name="audioFormat">
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_FORMAT_PCM_16_BIT" />
<xs:enumeration value="AUDIO_FORMAT_PCM_8_BIT"/>
<xs:enumeration value="AUDIO_FORMAT_PCM_32_BIT"/>
<xs:enumeration value="AUDIO_FORMAT_PCM_8_24_BIT"/>
<xs:enumeration value="AUDIO_FORMAT_PCM_FLOAT"/>
<xs:enumeration value="AUDIO_FORMAT_PCM_24_BIT_PACKED"/>
<xs:enumeration value="AUDIO_FORMAT_MP3"/>
<xs:enumeration value="AUDIO_FORMAT_AMR_NB"/>
<xs:enumeration value="AUDIO_FORMAT_AMR_WB"/>
<xs:enumeration value="AUDIO_FORMAT_AAC"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_MAIN"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_LC"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_SSR"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_LTP"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_HE_V1"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_SCALABLE"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ERLC"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_LD"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_HE_V2"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ELD"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_MAIN"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LC"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_SSR"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LTP"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_HE_V1"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_SCALABLE"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_ERLC"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LD"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_HE_V2"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_ELD"/>
<xs:enumeration value="AUDIO_FORMAT_VORBIS"/>
<xs:enumeration value="AUDIO_FORMAT_HE_AAC_V1"/>
<xs:enumeration value="AUDIO_FORMAT_HE_AAC_V2"/>
<xs:enumeration value="AUDIO_FORMAT_OPUS"/>
<xs:enumeration value="AUDIO_FORMAT_AC3"/>
<xs:enumeration value="AUDIO_FORMAT_E_AC3"/>
<xs:enumeration value="AUDIO_FORMAT_DTS"/>
<xs:enumeration value="AUDIO_FORMAT_DTS_HD"/>
<xs:enumeration value="AUDIO_FORMAT_IEC61937"/>
<xs:enumeration value="AUDIO_FORMAT_DOLBY_TRUEHD"/>
<xs:enumeration value="AUDIO_FORMAT_EVRC"/>
<xs:enumeration value="AUDIO_FORMAT_EVRCB"/>
<xs:enumeration value="AUDIO_FORMAT_EVRCWB"/>
<xs:enumeration value="AUDIO_FORMAT_EVRCNW"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADIF"/>
<xs:enumeration value="AUDIO_FORMAT_WMA"/>
<xs:enumeration value="AUDIO_FORMAT_WMA_PRO"/>
<xs:enumeration value="AUDIO_FORMAT_AMR_WB_PLUS"/>
<xs:enumeration value="AUDIO_FORMAT_MP2"/>
<xs:enumeration value="AUDIO_FORMAT_QCELP"/>
<xs:enumeration value="AUDIO_FORMAT_DSD"/>
<xs:enumeration value="AUDIO_FORMAT_FLAC"/>
<xs:enumeration value="AUDIO_FORMAT_ALAC"/>
<xs:enumeration value="AUDIO_FORMAT_APE"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS"/>
<xs:enumeration value="AUDIO_FORMAT_SBC"/>
<xs:enumeration value="AUDIO_FORMAT_APTX"/>
<xs:enumeration value="AUDIO_FORMAT_APTX_HD"/>
<xs:enumeration value="AUDIO_FORMAT_AC4"/>
<xs:enumeration value="AUDIO_FORMAT_LDAC"/>
<xs:enumeration value="AUDIO_FORMAT_E_AC3_JOC"/>
<xs:enumeration value="AUDIO_FORMAT_MAT_1_0"/>
<xs:enumeration value="AUDIO_FORMAT_MAT_2_0"/>
<xs:enumeration value="AUDIO_FORMAT_MAT_2_1"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_XHE"/>
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_XHE"/>
</xs:restriction>
</xs:simpleType>
<!-- Enum values of audio::common::4_0::AudioUsage
TODO: generate from HIDL to avoid manual sync. -->
<xs:simpleType name="audioUsage">
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_USAGE_UNKNOWN" />
<xs:enumeration value="AUDIO_USAGE_MEDIA" />
<xs:enumeration value="AUDIO_USAGE_VOICE_COMMUNICATION" />
<xs:enumeration value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING" />
<xs:enumeration value="AUDIO_USAGE_ALARM" />
<xs:enumeration value="AUDIO_USAGE_NOTIFICATION" />
<xs:enumeration value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE" />
<xs:enumeration value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY" />
<xs:enumeration value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE" />
<xs:enumeration value="AUDIO_USAGE_ASSISTANCE_SONIFICATION" />
<xs:enumeration value="AUDIO_USAGE_GAME" />
<xs:enumeration value="AUDIO_USAGE_VIRTUAL_SOURCE" />
<xs:enumeration value="AUDIO_USAGE_ASSISTANT" />
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="audioUsageList">
<xs:list itemType="audioUsage"/>
</xs:simpleType>
<!-- TODO: Change to a space separated list to xsd enforce correctness. -->
<xs:simpleType name="samplingRates">
<xs:restriction base="xs:string">
<xs:pattern value="[0-9]+(,[0-9]+)*"/>
</xs:restriction>
</xs:simpleType>
<!-- TODO: Change to a space separated list to xsd enforce correctness. -->
<xs:simpleType name="channelMask">
<xs:annotation>
<xs:documentation xml:lang="en">
Comma (",") separated list of channel flags
from audio_channel_mask_t.
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:pattern value="[_A-Z][_A-Z0-9]*(,[_A-Z][_A-Z0-9]*)*"/>
</xs:restriction>
</xs:simpleType>
<xs:complexType name="profile">
<xs:attribute name="name" type="xs:token" use="optional"/>
<xs:attribute name="format" type="audioFormat" use="optional"/>
<xs:attribute name="samplingRates" type="samplingRates" use="optional"/>
<xs:attribute name="channelMasks" type="channelMask" use="optional"/>
</xs:complexType>
<xs:simpleType name="gainMode">
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_GAIN_MODE_JOINT"/>
<xs:enumeration value="AUDIO_GAIN_MODE_CHANNELS"/>
<xs:enumeration value="AUDIO_GAIN_MODE_RAMP"/>
</xs:restriction>
</xs:simpleType>
<xs:complexType name="gains">
<xs:sequence>
<xs:element name="gain" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:attribute name="name" type="xs:token" use="required"/>
<xs:attribute name="mode" type="gainMode" use="required"/>
<xs:attribute name="channel_mask" type="channelMask" use="optional"/>
<xs:attribute name="minValueMB" type="xs:int" use="optional"/>
<xs:attribute name="maxValueMB" type="xs:int" use="optional"/>
<xs:attribute name="defaultValueMB" type="xs:int" use="optional"/>
<xs:attribute name="stepValueMB" type="xs:int" use="optional"/>
<xs:attribute name="minRampMs" type="xs:int" use="optional"/>
<xs:attribute name="maxRampMs" type="xs:int" use="optional"/>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
<xs:complexType name="devicePorts">
<xs:sequence>
<xs:element name="devicePort" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:sequence>
<xs:element name="profile" type="profile" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="gains" type="gains" minOccurs="0"/>
</xs:sequence>
<xs:attribute name="tagName" type="xs:token" use="required"/>
<xs:attribute name="type" type="audioDevice" use="required"/>
<xs:attribute name="role" type="role" use="required"/>
<xs:attribute name="address" type="xs:string" use="optional" default=""/>
<!-- Note that XSD 1.0 can not check that a type only has one default. -->
<xs:attribute name="default" type="xs:boolean" use="optional">
<xs:annotation>
<xs:documentation xml:lang="en">
The default device will be used if multiple have the same type
and no explicit route request exists for a specific device of
that type.
</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:complexType>
<xs:unique name="devicePortProfileUniqueness">
<xs:selector xpath="profile"/>
<xs:field xpath="format"/>
<xs:field xpath="samplingRate"/>
<xs:field xpath="channelMasks"/>
</xs:unique>
<xs:unique name="devicePortGainUniqueness">
<xs:selector xpath="gains/gain"/>
<xs:field xpath="@name"/>
</xs:unique>
</xs:element>
</xs:sequence>
</xs:complexType>
<xs:simpleType name="mixType">
<xs:restriction base="xs:string">
<xs:enumeration value="mix"/>
<xs:enumeration value="mux"/>
</xs:restriction>
</xs:simpleType>
<xs:complexType name="routes">
<xs:sequence>
<xs:element name="route" minOccurs="0" maxOccurs="unbounded">
<xs:annotation>
<xs:documentation xml:lang="en">
List all available sources for a given sink.
</xs:documentation>
</xs:annotation>
<xs:complexType>
<xs:attribute name="type" type="mixType" use="required"/>
<xs:attribute name="sink" type="xs:string" use="required"/>
<xs:attribute name="sources" type="xs:string" use="required"/>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
<xs:complexType name="volumes">
<xs:sequence>
<xs:element name="volume" type="volume" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="reference" type="reference" minOccurs="0" maxOccurs="unbounded">
</xs:element>
</xs:sequence>
</xs:complexType>
<!-- TODO: Always require a ref for better xsd validations.
Currently a volume could have no points nor ref
as it can not be forbidden by xsd 1.0.-->
<xs:simpleType name="volumePoint">
<xs:annotation>
<xs:documentation xml:lang="en">
Comma separated pair of number.
The fist one is the framework level (between 0 and 100).
The second one is the volume to send to the HAL.
The framework will interpolate volumes not specified.
Their MUST be at least 2 points specified.
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:pattern value="([0-9]{1,2}|100),-?[0-9]+"/>
</xs:restriction>
</xs:simpleType>
<!-- Enum values of audio_stream_type_t in audio-base.h
TODO: generate from hidl to avoid manual sync. -->
<xs:simpleType name="stream">
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_STREAM_VOICE_CALL"/>
<xs:enumeration value="AUDIO_STREAM_SYSTEM"/>
<xs:enumeration value="AUDIO_STREAM_RING"/>
<xs:enumeration value="AUDIO_STREAM_MUSIC"/>
<xs:enumeration value="AUDIO_STREAM_ALARM"/>
<xs:enumeration value="AUDIO_STREAM_NOTIFICATION"/>
<xs:enumeration value="AUDIO_STREAM_BLUETOOTH_SCO"/>
<xs:enumeration value="AUDIO_STREAM_ENFORCED_AUDIBLE"/>
<xs:enumeration value="AUDIO_STREAM_DTMF"/>
<xs:enumeration value="AUDIO_STREAM_TTS"/>
<xs:enumeration value="AUDIO_STREAM_ACCESSIBILITY"/>
<xs:enumeration value="AUDIO_STREAM_REROUTING"/>
<xs:enumeration value="AUDIO_STREAM_PATCH"/>
</xs:restriction>
</xs:simpleType>
<!-- Enum values of device_category from Volume.h.
TODO: generate from hidl to avoid manual sync. -->
<xs:simpleType name="deviceCategory">
<xs:restriction base="xs:string">
<xs:enumeration value="DEVICE_CATEGORY_HEADSET"/>
<xs:enumeration value="DEVICE_CATEGORY_SPEAKER"/>
<xs:enumeration value="DEVICE_CATEGORY_EARPIECE"/>
<xs:enumeration value="DEVICE_CATEGORY_EXT_MEDIA"/>
<xs:enumeration value="DEVICE_CATEGORY_HEARING_AID"/>
</xs:restriction>
</xs:simpleType>
<xs:complexType name="volume">
<xs:annotation>
<xs:documentation xml:lang="en">
Volume section defines a volume curve for a given use case and device category.
It contains a list of points of this curve expressing the attenuation in Millibels
for a given volume index from 0 to 100.
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER">
<point>0,-9600</point>
<point>100,0</point>
</volume>
It may also reference a reference/@name to avoid duplicating curves.
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<reference name="DEFAULT_MEDIA_VOLUME_CURVE">
<point>0,-9600</point>
<point>100,0</point>
</reference>
</xs:documentation>
</xs:annotation>
<xs:sequence>
<xs:element name="point" type="volumePoint" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
<xs:attribute name="stream" type="stream"/>
<xs:attribute name="deviceCategory" type="deviceCategory"/>
<xs:attribute name="ref" type="xs:token" use="optional"/>
</xs:complexType>
<xs:complexType name="reference">
<xs:sequence>
<xs:element name="point" type="volumePoint" minOccurs="2" maxOccurs="unbounded"/>
</xs:sequence>
<xs:attribute name="name" type="xs:token" use="required"/>
</xs:complexType>
<xs:complexType name="surroundSound">
<xs:annotation>
<xs:documentation xml:lang="en">
Surround Sound section provides configuration related to handling of
multi-channel formats.
</xs:documentation>
</xs:annotation>
<xs:sequence>
<xs:element name="formats" type="surroundFormats"/>
</xs:sequence>
</xs:complexType>
<xs:simpleType name="surroundFormatsList">
<xs:list itemType="audioFormat" />
</xs:simpleType>
<xs:complexType name="surroundFormats">
<xs:sequence>
<xs:element name="format" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:attribute name="name" type="audioFormat" use="required"/>
<xs:attribute name="subformats" type="surroundFormatsList" />
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:schema>

246
audio/5.0/types.hal Normal file
View file

@ -0,0 +1,246 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio@5.0;
import android.hardware.audio.common@5.0;
enum Result : int32_t {
OK,
NOT_INITIALIZED,
INVALID_ARGUMENTS,
INVALID_STATE,
/**
* Methods marked as "Optional method" must return this result value
* if the operation is not supported by HAL.
*/
NOT_SUPPORTED
};
@export(name="audio_drain_type_t", value_prefix="AUDIO_DRAIN_")
enum AudioDrain : int32_t {
/** drain() returns when all data has been played. */
ALL,
/**
* drain() returns a short time before all data from the current track has
* been played to give time for gapless track switch.
*/
EARLY_NOTIFY
};
/**
* A substitute for POSIX timespec.
*/
struct TimeSpec {
uint64_t tvSec; // seconds
uint64_t tvNSec; // nanoseconds
};
/**
* IEEE 802 MAC address.
*/
typedef uint8_t[6] MacAddress;
struct ParameterValue {
string key;
string value;
};
/**
* Specifies a device in case when several devices of the same type
* can be connected (e.g. BT A2DP, USB).
*/
struct DeviceAddress {
AudioDevice device; // discriminator
union Address {
MacAddress mac; // used for BLUETOOTH_A2DP_*
uint8_t[4] ipv4; // used for IP
struct Alsa {
int32_t card;
int32_t device;
} alsa; // used for USB_*
} address;
string busAddress; // used for BUS
string rSubmixAddress; // used for REMOTE_SUBMIX
};
enum MmapBufferFlag : uint32_t {
NONE = 0x0,
/**
* If the buffer can be securely shared to untrusted applications
* through the AAudio exclusive mode.
* Only set this flag if applications are restricted from accessing the
* memory surrounding the audio data buffer by a kernel mechanism.
* See Linux kernel's dma_buf.
*/
APPLICATION_SHAREABLE = 0x1,
};
/**
* Mmap buffer descriptor returned by IStream.createMmapBuffer().
* Used by streams opened in mmap mode.
*/
struct MmapBufferInfo {
/** Mmap memory buffer */
memory sharedMemory;
/** Total buffer size in frames */
uint32_t bufferSizeFrames;
/** Transfer size granularity in frames */
uint32_t burstSizeFrames;
/** Attributes describing the buffer. */
bitfield<MmapBufferFlag> flags;
};
/**
* Mmap buffer read/write position returned by IStream.getMmapPosition().
* Used by streams opened in mmap mode.
*/
struct MmapPosition {
int64_t timeNanoseconds; // time stamp in ns, CLOCK_MONOTONIC
int32_t positionFrames; // increasing 32 bit frame count reset when IStream.stop() is called
};
/**
* The message queue flags used to synchronize reads and writes from
* message queues used by StreamIn and StreamOut.
*/
enum MessageQueueFlagBits : uint32_t {
NOT_EMPTY = 1 << 0,
NOT_FULL = 1 << 1
};
/*
* Microphone information
*
*/
/**
* A 3D point used to represent position or orientation of a microphone.
*
* Position: Coordinates of the microphone's capsule, in meters, from the
* bottom-left-back corner of the bounding box of android device in natural
* orientation (PORTRAIT for phones, LANDSCAPE for tablets, tvs, etc).
* The orientation musth match the reported by the api Display.getRotation().
*
* Orientation: Normalized vector to signal the main orientation of the
* microphone's capsule. Magnitude = sqrt(x^2 + y^2 + z^2) = 1
*/
struct AudioMicrophoneCoordinate {
float x;
float y;
float z;
};
/**
* Enum to identify the type of channel mapping for active microphones.
* Used channels further identify if the microphone has any significative
* process (e.g. High Pass Filtering, dynamic compression)
* Simple processing as constant gain adjustment must be DIRECT.
*/
enum AudioMicrophoneChannelMapping : uint32_t {
UNUSED = 0, /* Channel not used */
DIRECT = 1, /* Channel used and signal not processed */
PROCESSED = 2, /* Channel used and signal has some process */
};
/**
* Enum to identify locations of microphones in regards to the body of the
* android device.
*/
enum AudioMicrophoneLocation : uint32_t {
UNKNOWN = 0,
MAINBODY = 1,
MAINBODY_MOVABLE = 2,
PERIPHERAL = 3,
};
/**
* Identifier to help group related microphones together
* e.g. microphone arrays should belong to the same group
*/
typedef int32_t AudioMicrophoneGroup;
/**
* Enum with standard polar patterns of microphones
*/
enum AudioMicrophoneDirectionality : uint32_t {
UNKNOWN = 0,
OMNI = 1,
BI_DIRECTIONAL = 2,
CARDIOID = 3,
HYPER_CARDIOID = 4,
SUPER_CARDIOID = 5,
};
/**
* A (frequency, level) pair. Used to represent frequency response.
*/
struct AudioFrequencyResponsePoint {
/** In Hz */
float frequency;
/** In dB */
float level;
};
/**
* Structure used by the HAL to describe microphone's characteristics
* Used by StreamIn and Device
*/
struct MicrophoneInfo {
/** Unique alphanumeric id for microphone. Guaranteed to be the same
* even after rebooting.
*/
string deviceId;
/**
* Device specific information
*/
DeviceAddress deviceAddress;
/** Each element of the vector must describe the channel with the same
* index.
*/
vec<AudioMicrophoneChannelMapping> channelMapping;
/** Location of the microphone in regard to the body of the device */
AudioMicrophoneLocation location;
/** Identifier to help group related microphones together
* e.g. microphone arrays should belong to the same group
*/
AudioMicrophoneGroup group;
/** Index of this microphone within the group.
* (group, index) must be unique within the same device.
*/
uint32_t indexInTheGroup;
/** Level in dBFS produced by a 1000 Hz tone at 94 dB SPL */
float sensitivity;
/** Level in dB of the max SPL supported at 1000 Hz */
float maxSpl;
/** Level in dB of the min SPL supported at 1000 Hz */
float minSpl;
/** Standard polar pattern of the microphone */
AudioMicrophoneDirectionality directionality;
/** Vector with ordered frequency responses (from low to high frequencies)
* with the frequency response of the microphone.
* Levels are in dB, relative to level at 1000 Hz
*/
vec<AudioFrequencyResponsePoint> frequencyResponse;
/** Position of the microphone's capsule in meters, from the
* bottom-left-back corner of the bounding box of device.
*/
AudioMicrophoneCoordinate position;
/** Normalized point to signal the main orientation of the microphone's
* capsule. sqrt(x^2 + y^2 + z^2) = 1
*/
AudioMicrophoneCoordinate orientation;
};

View file

@ -0,0 +1,48 @@
// This file is autogenerated by hidl-gen -Landroidbp.
hidl_interface {
name: "android.hardware.audio.common@5.0",
root: "android.hardware",
vndk: {
enabled: true,
},
srcs: [
"types.hal",
],
types: [
"AudioChannelMask",
"AudioConfig",
"AudioContentType",
"AudioDevice",
"AudioFormat",
"AudioGain",
"AudioGainConfig",
"AudioGainMode",
"AudioHandleConsts",
"AudioInputFlag",
"AudioMixLatencyClass",
"AudioMode",
"AudioOffloadInfo",
"AudioOutputFlag",
"AudioPort",
"AudioPortConfig",
"AudioPortConfigDeviceExt",
"AudioPortConfigMask",
"AudioPortConfigSessionExt",
"AudioPortDeviceExt",
"AudioPortMixExt",
"AudioPortRole",
"AudioPortSessionExt",
"AudioPortType",
"AudioSessionConsts",
"AudioSource",
"AudioStreamType",
"AudioUsage",
"FixedChannelCount",
"ThreadInfo",
"Uuid",
],
gen_java: false,
gen_java_constants: true,
}

936
audio/common/5.0/types.hal Normal file
View file

@ -0,0 +1,936 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.common@5.0;
/*
*
* IDs and Handles
*
*/
/**
* Handle type for identifying audio sources and sinks.
*/
typedef int32_t AudioIoHandle;
/**
* Audio hw module handle functions or structures referencing a module.
*/
typedef int32_t AudioModuleHandle;
/**
* Each port has a unique ID or handle allocated by policy manager.
*/
typedef int32_t AudioPortHandle;
/**
* Each patch is identified by a handle at the interface used to create that
* patch. For instance, when a patch is created by the audio HAL, the HAL
* allocates and returns a handle. This handle is unique to a given audio HAL
* hardware module. But the same patch receives another system wide unique
* handle allocated by the framework. This unique handle is used for all
* transactions inside the framework.
*/
typedef int32_t AudioPatchHandle;
/**
* A HW synchronization source returned by the audio HAL.
*/
typedef uint32_t AudioHwSync;
/**
* Each port has a unique ID or handle allocated by policy manager.
*/
@export(name="")
enum AudioHandleConsts : int32_t {
AUDIO_IO_HANDLE_NONE = 0,
AUDIO_MODULE_HANDLE_NONE = 0,
AUDIO_PORT_HANDLE_NONE = 0,
AUDIO_PATCH_HANDLE_NONE = 0,
};
/**
* Commonly used structure for passing unique identifieds (UUID).
* For the definition of UUID, refer to ITU-T X.667 spec.
*/
struct Uuid {
uint32_t timeLow;
uint16_t timeMid;
uint16_t versionAndTimeHigh;
uint16_t variantAndClockSeqHigh;
uint8_t[6] node;
};
/*
*
* Audio streams
*
*/
/**
* Audio stream type describing the intended use case of a stream.
*/
@export(name="audio_stream_type_t", value_prefix="AUDIO_STREAM_")
enum AudioStreamType : int32_t {
// These values must kept in sync with
// frameworks/base/media/java/android/media/AudioSystem.java
DEFAULT = -1,
MIN = 0,
VOICE_CALL = 0,
SYSTEM = 1,
RING = 2,
MUSIC = 3,
ALARM = 4,
NOTIFICATION = 5,
BLUETOOTH_SCO = 6,
ENFORCED_AUDIBLE = 7, // Sounds that cannot be muted by user and must be
// routed to speaker
DTMF = 8,
TTS = 9, // Transmitted Through Speaker. Plays over speaker
// only, silent on other devices
ACCESSIBILITY = 10, // For accessibility talk back prompts
};
@export(name="audio_source_t", value_prefix="AUDIO_SOURCE_")
enum AudioSource : int32_t {
// These values must kept in sync with
// frameworks/base/media/java/android/media/MediaRecorder.java,
// frameworks/av/services/audiopolicy/AudioPolicyService.cpp,
// system/media/audio_effects/include/audio_effects/audio_effects_conf.h
DEFAULT = 0,
MIC = 1,
VOICE_UPLINK = 2,
VOICE_DOWNLINK = 3,
VOICE_CALL = 4,
CAMCORDER = 5,
VOICE_RECOGNITION = 6,
VOICE_COMMUNICATION = 7,
/**
* Source for the mix to be presented remotely. An example of remote
* presentation is Wifi Display where a dongle attached to a TV can be used
* to play the mix captured by this audio source.
*/
REMOTE_SUBMIX = 8,
/**
* Source for unprocessed sound. Usage examples include level measurement
* and raw signal analysis.
*/
UNPROCESSED = 9,
FM_TUNER = 1998,
};
typedef int32_t AudioSession;
/**
* Special audio session values.
*/
@export(name="audio_session_t", value_prefix="AUDIO_SESSION_")
enum AudioSessionConsts : int32_t {
/**
* Session for effects attached to a particular output stream
* (value must be less than 0)
*/
OUTPUT_STAGE = -1,
/**
* Session for effects applied to output mix. These effects can
* be moved by audio policy manager to another output stream
* (value must be 0)
*/
OUTPUT_MIX = 0,
/**
* Application does not specify an explicit session ID to be used, and
* requests a new session ID to be allocated. Corresponds to
* AudioManager.AUDIO_SESSION_ID_GENERATE and
* AudioSystem.AUDIO_SESSION_ALLOCATE.
*/
ALLOCATE = 0,
/**
* For use with AudioRecord::start(), this indicates no trigger session.
* It is also used with output tracks and patch tracks, which never have a
* session.
*/
NONE = 0
};
/**
* Audio format is a 32-bit word that consists of:
* main format field (upper 8 bits)
* sub format field (lower 24 bits).
*
* The main format indicates the main codec type. The sub format field indicates
* options and parameters for each format. The sub format is mainly used for
* record to indicate for instance the requested bitrate or profile. It can
* also be used for certain formats to give informations not present in the
* encoded audio stream (e.g. octet alignement for AMR).
*/
@export(name="audio_format_t", value_prefix="AUDIO_FORMAT_")
enum AudioFormat : uint32_t {
INVALID = 0xFFFFFFFFUL,
DEFAULT = 0,
PCM = 0x00000000UL,
MP3 = 0x01000000UL,
AMR_NB = 0x02000000UL,
AMR_WB = 0x03000000UL,
AAC = 0x04000000UL,
/** Deprecated, Use AAC_HE_V1 */
HE_AAC_V1 = 0x05000000UL,
/** Deprecated, Use AAC_HE_V2 */
HE_AAC_V2 = 0x06000000UL,
VORBIS = 0x07000000UL,
OPUS = 0x08000000UL,
AC3 = 0x09000000UL,
E_AC3 = 0x0A000000UL,
DTS = 0x0B000000UL,
DTS_HD = 0x0C000000UL,
/** IEC61937 is encoded audio wrapped in 16-bit PCM. */
IEC61937 = 0x0D000000UL,
DOLBY_TRUEHD = 0x0E000000UL,
EVRC = 0x10000000UL,
EVRCB = 0x11000000UL,
EVRCWB = 0x12000000UL,
EVRCNW = 0x13000000UL,
AAC_ADIF = 0x14000000UL,
WMA = 0x15000000UL,
WMA_PRO = 0x16000000UL,
AMR_WB_PLUS = 0x17000000UL,
MP2 = 0x18000000UL,
QCELP = 0x19000000UL,
DSD = 0x1A000000UL,
FLAC = 0x1B000000UL,
ALAC = 0x1C000000UL,
APE = 0x1D000000UL,
AAC_ADTS = 0x1E000000UL,
SBC = 0x1F000000UL,
APTX = 0x20000000UL,
APTX_HD = 0x21000000UL,
AC4 = 0x22000000UL,
LDAC = 0x23000000UL,
/** Dolby Metadata-enhanced Audio Transmission */
MAT = 0x24000000UL,
/** Deprecated */
MAIN_MASK = 0xFF000000UL,
SUB_MASK = 0x00FFFFFFUL,
/* Subformats */
PCM_SUB_16_BIT = 0x1, // PCM signed 16 bits
PCM_SUB_8_BIT = 0x2, // PCM unsigned 8 bits
PCM_SUB_32_BIT = 0x3, // PCM signed .31 fixed point
PCM_SUB_8_24_BIT = 0x4, // PCM signed 8.23 fixed point
PCM_SUB_FLOAT = 0x5, // PCM single-precision float pt
PCM_SUB_24_BIT_PACKED = 0x6, // PCM signed .23 fix pt (3 bytes)
MP3_SUB_NONE = 0x0,
AMR_SUB_NONE = 0x0,
AAC_SUB_MAIN = 0x1,
AAC_SUB_LC = 0x2,
AAC_SUB_SSR = 0x4,
AAC_SUB_LTP = 0x8,
AAC_SUB_HE_V1 = 0x10,
AAC_SUB_SCALABLE = 0x20,
AAC_SUB_ERLC = 0x40,
AAC_SUB_LD = 0x80,
AAC_SUB_HE_V2 = 0x100,
AAC_SUB_ELD = 0x200,
AAC_SUB_XHE = 0x300,
VORBIS_SUB_NONE = 0x0,
E_AC3_SUB_JOC = 0x1,
MAT_SUB_1_0 = 0x1,
MAT_SUB_2_0 = 0x2,
MAT_SUB_2_1 = 0x3,
/* Aliases */
/** note != AudioFormat.ENCODING_PCM_16BIT */
PCM_16_BIT = (PCM | PCM_SUB_16_BIT),
/** note != AudioFormat.ENCODING_PCM_8BIT */
PCM_8_BIT = (PCM | PCM_SUB_8_BIT),
PCM_32_BIT = (PCM | PCM_SUB_32_BIT),
PCM_8_24_BIT = (PCM | PCM_SUB_8_24_BIT),
PCM_FLOAT = (PCM | PCM_SUB_FLOAT),
PCM_24_BIT_PACKED = (PCM | PCM_SUB_24_BIT_PACKED),
AAC_MAIN = (AAC | AAC_SUB_MAIN),
AAC_LC = (AAC | AAC_SUB_LC),
AAC_SSR = (AAC | AAC_SUB_SSR),
AAC_LTP = (AAC | AAC_SUB_LTP),
AAC_HE_V1 = (AAC | AAC_SUB_HE_V1),
AAC_SCALABLE = (AAC | AAC_SUB_SCALABLE),
AAC_ERLC = (AAC | AAC_SUB_ERLC),
AAC_LD = (AAC | AAC_SUB_LD),
AAC_HE_V2 = (AAC | AAC_SUB_HE_V2),
AAC_ELD = (AAC | AAC_SUB_ELD),
AAC_XHE = (AAC | AAC_SUB_XHE),
AAC_ADTS_MAIN = (AAC_ADTS | AAC_SUB_MAIN),
AAC_ADTS_LC = (AAC_ADTS | AAC_SUB_LC),
AAC_ADTS_SSR = (AAC_ADTS | AAC_SUB_SSR),
AAC_ADTS_LTP = (AAC_ADTS | AAC_SUB_LTP),
AAC_ADTS_HE_V1 = (AAC_ADTS | AAC_SUB_HE_V1),
AAC_ADTS_SCALABLE = (AAC_ADTS | AAC_SUB_SCALABLE),
AAC_ADTS_ERLC = (AAC_ADTS | AAC_SUB_ERLC),
AAC_ADTS_LD = (AAC_ADTS | AAC_SUB_LD),
AAC_ADTS_HE_V2 = (AAC_ADTS | AAC_SUB_HE_V2),
AAC_ADTS_ELD = (AAC_ADTS | AAC_SUB_ELD),
AAC_ADTS_XHE = (AAC_ADTS | AAC_SUB_XHE),
E_AC3_JOC = (E_AC3 | E_AC3_SUB_JOC),
MAT_1_0 = (MAT | MAT_SUB_1_0),
MAT_2_0 = (MAT | MAT_SUB_2_0),
MAT_2_1 = (MAT | MAT_SUB_2_1),
};
/**
* Usage of these values highlights places in the code that use 2- or 8- channel
* assumptions.
*/
@export(name="")
enum FixedChannelCount : int32_t {
FCC_2 = 2, // This is typically due to legacy implementation of stereo I/O
FCC_8 = 8 // This is typically due to audio mixer and resampler limitations
};
/**
* A channel mask per se only defines the presence or absence of a channel, not
* the order. See AUDIO_INTERLEAVE_* for the platform convention of order.
*
* AudioChannelMask is an opaque type and its internal layout should not be
* assumed as it may change in the future. Instead, always use functions
* to examine it.
*
* These are the current representations:
*
* REPRESENTATION_POSITION
* is a channel mask representation for position assignment. Each low-order
* bit corresponds to the spatial position of a transducer (output), or
* interpretation of channel (input). The user of a channel mask needs to
* know the context of whether it is for output or input. The constants
* OUT_* or IN_* apply to the bits portion. It is not permitted for no bits
* to be set.
*
* REPRESENTATION_INDEX
* is a channel mask representation for index assignment. Each low-order
* bit corresponds to a selected channel. There is no platform
* interpretation of the various bits. There is no concept of output or
* input. It is not permitted for no bits to be set.
*
* All other representations are reserved for future use.
*
* Warning: current representation distinguishes between input and output, but
* this will not the be case in future revisions of the platform. Wherever there
* is an ambiguity between input and output that is currently resolved by
* checking the channel mask, the implementer should look for ways to fix it
* with additional information outside of the mask.
*/
@export(name="", value_prefix="AUDIO_CHANNEL_")
enum AudioChannelMask : uint32_t {
/** must be 0 for compatibility */
REPRESENTATION_POSITION = 0,
/** 1 is reserved for future use */
REPRESENTATION_INDEX = 2,
/* 3 is reserved for future use */
/** These can be a complete value of AudioChannelMask */
NONE = 0x0,
INVALID = 0xC0000000,
/*
* These can be the bits portion of an AudioChannelMask
* with representation REPRESENTATION_POSITION.
*/
/** output channels */
OUT_FRONT_LEFT = 0x1,
OUT_FRONT_RIGHT = 0x2,
OUT_FRONT_CENTER = 0x4,
OUT_LOW_FREQUENCY = 0x8,
OUT_BACK_LEFT = 0x10,
OUT_BACK_RIGHT = 0x20,
OUT_FRONT_LEFT_OF_CENTER = 0x40,
OUT_FRONT_RIGHT_OF_CENTER = 0x80,
OUT_BACK_CENTER = 0x100,
OUT_SIDE_LEFT = 0x200,
OUT_SIDE_RIGHT = 0x400,
OUT_TOP_CENTER = 0x800,
OUT_TOP_FRONT_LEFT = 0x1000,
OUT_TOP_FRONT_CENTER = 0x2000,
OUT_TOP_FRONT_RIGHT = 0x4000,
OUT_TOP_BACK_LEFT = 0x8000,
OUT_TOP_BACK_CENTER = 0x10000,
OUT_TOP_BACK_RIGHT = 0x20000,
OUT_TOP_SIDE_LEFT = 0x40000,
OUT_TOP_SIDE_RIGHT = 0x80000,
OUT_MONO = OUT_FRONT_LEFT,
OUT_STEREO = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT),
OUT_2POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_LOW_FREQUENCY),
OUT_2POINT0POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
OUT_2POINT1POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT |
OUT_LOW_FREQUENCY),
OUT_3POINT0POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_CENTER | OUT_FRONT_RIGHT |
OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
OUT_3POINT1POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_CENTER | OUT_FRONT_RIGHT |
OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT |
OUT_LOW_FREQUENCY),
OUT_QUAD = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_BACK_LEFT | OUT_BACK_RIGHT),
OUT_QUAD_BACK = OUT_QUAD,
/** like OUT_QUAD_BACK with *_SIDE_* instead of *_BACK_* */
OUT_QUAD_SIDE = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
OUT_SURROUND = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_FRONT_CENTER | OUT_BACK_CENTER),
OUT_PENTA = (OUT_QUAD | OUT_FRONT_CENTER),
OUT_5POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
OUT_BACK_LEFT | OUT_BACK_RIGHT),
OUT_5POINT1_BACK = OUT_5POINT1,
/** like OUT_5POINT1_BACK with *_SIDE_* instead of *_BACK_* */
OUT_5POINT1_SIDE = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
OUT_5POINT1POINT2 = (OUT_5POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
OUT_5POINT1POINT4 = (OUT_5POINT1 |
OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT |
OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT),
OUT_6POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
OUT_BACK_LEFT | OUT_BACK_RIGHT |
OUT_BACK_CENTER),
/** matches the correct AudioFormat.CHANNEL_OUT_7POINT1_SURROUND */
OUT_7POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
OUT_BACK_LEFT | OUT_BACK_RIGHT |
OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
OUT_7POINT1POINT2 = (OUT_7POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
OUT_7POINT1POINT4 = (OUT_7POINT1 |
OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT |
OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT),
// Note that the 2.0 OUT_ALL* have been moved to helper functions
/* These are bits only, not complete values */
/** input channels */
IN_LEFT = 0x4,
IN_RIGHT = 0x8,
IN_FRONT = 0x10,
IN_BACK = 0x20,
IN_LEFT_PROCESSED = 0x40,
IN_RIGHT_PROCESSED = 0x80,
IN_FRONT_PROCESSED = 0x100,
IN_BACK_PROCESSED = 0x200,
IN_PRESSURE = 0x400,
IN_X_AXIS = 0x800,
IN_Y_AXIS = 0x1000,
IN_Z_AXIS = 0x2000,
IN_BACK_LEFT = 0x10000,
IN_BACK_RIGHT = 0x20000,
IN_CENTER = 0x40000,
IN_LOW_FREQUENCY = 0x100000,
IN_TOP_LEFT = 0x200000,
IN_TOP_RIGHT = 0x400000,
IN_VOICE_UPLINK = 0x4000,
IN_VOICE_DNLINK = 0x8000,
IN_MONO = IN_FRONT,
IN_STEREO = (IN_LEFT | IN_RIGHT),
IN_FRONT_BACK = (IN_FRONT | IN_BACK),
IN_6 = (IN_LEFT | IN_RIGHT |
IN_FRONT | IN_BACK |
IN_LEFT_PROCESSED | IN_RIGHT_PROCESSED),
IN_2POINT0POINT2 = (IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT),
IN_2POINT1POINT2 = (IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT |
IN_LOW_FREQUENCY),
IN_3POINT0POINT2 = (IN_LEFT | IN_CENTER | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT),
IN_3POINT1POINT2 = (IN_LEFT | IN_CENTER | IN_RIGHT |
IN_TOP_LEFT | IN_TOP_RIGHT | IN_LOW_FREQUENCY),
IN_5POINT1 = (IN_LEFT | IN_CENTER | IN_RIGHT |
IN_BACK_LEFT | IN_BACK_RIGHT | IN_LOW_FREQUENCY),
IN_VOICE_UPLINK_MONO = (IN_VOICE_UPLINK | IN_MONO),
IN_VOICE_DNLINK_MONO = (IN_VOICE_DNLINK | IN_MONO),
IN_VOICE_CALL_MONO = (IN_VOICE_UPLINK_MONO |
IN_VOICE_DNLINK_MONO),
// Note that the 2.0 IN_ALL* have been moved to helper functions
COUNT_MAX = 30,
INDEX_HDR = REPRESENTATION_INDEX << COUNT_MAX,
INDEX_MASK_1 = INDEX_HDR | ((1 << 1) - 1),
INDEX_MASK_2 = INDEX_HDR | ((1 << 2) - 1),
INDEX_MASK_3 = INDEX_HDR | ((1 << 3) - 1),
INDEX_MASK_4 = INDEX_HDR | ((1 << 4) - 1),
INDEX_MASK_5 = INDEX_HDR | ((1 << 5) - 1),
INDEX_MASK_6 = INDEX_HDR | ((1 << 6) - 1),
INDEX_MASK_7 = INDEX_HDR | ((1 << 7) - 1),
INDEX_MASK_8 = INDEX_HDR | ((1 << 8) - 1)
};
/**
* Major modes for a mobile device. The current mode setting affects audio
* routing.
*/
@export(name="audio_mode_t", value_prefix="AUDIO_MODE_")
enum AudioMode : int32_t {
NORMAL = 0,
RINGTONE = 1,
/** Calls handled by the telephony stack (Eg: PSTN). */
IN_CALL = 2,
/** Calls handled by apps (Eg: Hangout). */
IN_COMMUNICATION = 3,
};
@export(name="", value_prefix="AUDIO_DEVICE_")
enum AudioDevice : uint32_t {
NONE = 0x0,
/** reserved bits */
BIT_IN = 0x80000000,
BIT_DEFAULT = 0x40000000,
/** output devices */
OUT_EARPIECE = 0x1,
OUT_SPEAKER = 0x2,
OUT_WIRED_HEADSET = 0x4,
OUT_WIRED_HEADPHONE = 0x8,
OUT_BLUETOOTH_SCO = 0x10,
OUT_BLUETOOTH_SCO_HEADSET = 0x20,
OUT_BLUETOOTH_SCO_CARKIT = 0x40,
OUT_BLUETOOTH_A2DP = 0x80,
OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
OUT_AUX_DIGITAL = 0x400,
OUT_HDMI = OUT_AUX_DIGITAL,
/** uses an analog connection (multiplexed over the USB pins for instance) */
OUT_ANLG_DOCK_HEADSET = 0x800,
OUT_DGTL_DOCK_HEADSET = 0x1000,
/** USB accessory mode: Android device is USB device and dock is USB host */
OUT_USB_ACCESSORY = 0x2000,
/** USB host mode: Android device is USB host and dock is USB device */
OUT_USB_DEVICE = 0x4000,
OUT_REMOTE_SUBMIX = 0x8000,
/** Telephony voice TX path */
OUT_TELEPHONY_TX = 0x10000,
/** Analog jack with line impedance detected */
OUT_LINE = 0x20000,
/** HDMI Audio Return Channel */
OUT_HDMI_ARC = 0x40000,
/** S/PDIF out */
OUT_SPDIF = 0x80000,
/** FM transmitter out */
OUT_FM = 0x100000,
/** Line out for av devices */
OUT_AUX_LINE = 0x200000,
/** limited-output speaker device for acoustic safety */
OUT_SPEAKER_SAFE = 0x400000,
OUT_IP = 0x800000,
/** audio bus implemented by the audio system (e.g an MOST stereo channel) */
OUT_BUS = 0x1000000,
OUT_PROXY = 0x2000000,
OUT_USB_HEADSET = 0x4000000,
OUT_HEARING_AID = 0x8000000,
OUT_ECHO_CANCELLER = 0x10000000,
OUT_DEFAULT = BIT_DEFAULT,
// Note that the 2.0 OUT_ALL* have been moved to helper functions
/** input devices */
IN_COMMUNICATION = BIT_IN | 0x1,
IN_AMBIENT = BIT_IN | 0x2,
IN_BUILTIN_MIC = BIT_IN | 0x4,
IN_BLUETOOTH_SCO_HEADSET = BIT_IN | 0x8,
IN_WIRED_HEADSET = BIT_IN | 0x10,
IN_AUX_DIGITAL = BIT_IN | 0x20,
IN_HDMI = IN_AUX_DIGITAL,
/** Telephony voice RX path */
IN_VOICE_CALL = BIT_IN | 0x40,
IN_TELEPHONY_RX = IN_VOICE_CALL,
IN_BACK_MIC = BIT_IN | 0x80,
IN_REMOTE_SUBMIX = BIT_IN | 0x100,
IN_ANLG_DOCK_HEADSET = BIT_IN | 0x200,
IN_DGTL_DOCK_HEADSET = BIT_IN | 0x400,
IN_USB_ACCESSORY = BIT_IN | 0x800,
IN_USB_DEVICE = BIT_IN | 0x1000,
/** FM tuner input */
IN_FM_TUNER = BIT_IN | 0x2000,
/** TV tuner input */
IN_TV_TUNER = BIT_IN | 0x4000,
/** Analog jack with line impedance detected */
IN_LINE = BIT_IN | 0x8000,
/** S/PDIF in */
IN_SPDIF = BIT_IN | 0x10000,
IN_BLUETOOTH_A2DP = BIT_IN | 0x20000,
IN_LOOPBACK = BIT_IN | 0x40000,
IN_IP = BIT_IN | 0x80000,
/** audio bus implemented by the audio system (e.g an MOST stereo channel) */
IN_BUS = BIT_IN | 0x100000,
IN_PROXY = BIT_IN | 0x1000000,
IN_USB_HEADSET = BIT_IN | 0x2000000,
IN_BLUETOOTH_BLE = BIT_IN | 0x4000000,
IN_DEFAULT = BIT_IN | BIT_DEFAULT,
// Note that the 2.0 IN_ALL* have been moved to helper functions
};
/**
* The audio output flags serve two purposes:
*
* - when an AudioTrack is created they indicate a "wish" to be connected to an
* output stream with attributes corresponding to the specified flags;
*
* - when present in an output profile descriptor listed for a particular audio
* hardware module, they indicate that an output stream can be opened that
* supports the attributes indicated by the flags.
*
* The audio policy manager will try to match the flags in the request
* (when getOuput() is called) to an available output stream.
*/
@export(name="audio_output_flags_t", value_prefix="AUDIO_OUTPUT_FLAG_")
enum AudioOutputFlag : int32_t {
NONE = 0x0, // no attributes
DIRECT = 0x1, // this output directly connects a track
// to one output stream: no software mixer
PRIMARY = 0x2, // this output is the primary output of the device. It is
// unique and must be present. It is opened by default and
// receives routing, audio mode and volume controls related
// to voice calls.
FAST = 0x4, // output supports "fast tracks", defined elsewhere
DEEP_BUFFER = 0x8, // use deep audio buffers
COMPRESS_OFFLOAD = 0x10, // offload playback of compressed streams to
// hardware codec
NON_BLOCKING = 0x20, // use non-blocking write
HW_AV_SYNC = 0x40, // output uses a hardware A/V sync
TTS = 0x80, // output for streams transmitted through speaker at a
// sample rate high enough to accommodate lower-range
// ultrasonic p/b
RAW = 0x100, // minimize signal processing
SYNC = 0x200, // synchronize I/O streams
IEC958_NONAUDIO = 0x400, // Audio stream contains compressed audio in SPDIF
// data bursts, not PCM.
DIRECT_PCM = 0x2000, // Audio stream containing PCM data that needs
// to pass through compress path for DSP post proc.
MMAP_NOIRQ = 0x4000, // output operates in MMAP no IRQ mode.
VOIP_RX = 0x8000, // preferred output for VoIP calls.
/** preferred output for call music */
INCALL_MUSIC = 0x10000,
};
/**
* The audio input flags are analogous to audio output flags.
* Currently they are used only when an AudioRecord is created,
* to indicate a preference to be connected to an input stream with
* attributes corresponding to the specified flags.
*/
@export(name="audio_input_flags_t", value_prefix="AUDIO_INPUT_FLAG_")
enum AudioInputFlag : int32_t {
NONE = 0x0, // no attributes
FAST = 0x1, // prefer an input that supports "fast tracks"
HW_HOTWORD = 0x2, // prefer an input that captures from hw hotword source
RAW = 0x4, // minimize signal processing
SYNC = 0x8, // synchronize I/O streams
MMAP_NOIRQ = 0x10, // input operates in MMAP no IRQ mode.
VOIP_TX = 0x20, // preferred input for VoIP calls.
HW_AV_SYNC = 0x40, // input connected to an output that uses a hardware A/V sync
};
@export(name="audio_usage_t", value_prefix="AUDIO_USAGE_")
enum AudioUsage : int32_t {
// These values must kept in sync with
// frameworks/base/media/java/android/media/AudioAttributes.java
// Note that not all framework values are exposed
UNKNOWN = 0,
MEDIA = 1,
VOICE_COMMUNICATION = 2,
VOICE_COMMUNICATION_SIGNALLING = 3,
ALARM = 4,
NOTIFICATION = 5,
NOTIFICATION_TELEPHONY_RINGTONE = 6,
ASSISTANCE_ACCESSIBILITY = 11,
ASSISTANCE_NAVIGATION_GUIDANCE = 12,
ASSISTANCE_SONIFICATION = 13,
GAME = 14,
VIRTUAL_SOURCE = 15,
ASSISTANT = 16,
};
/** Type of audio generated by an application. */
@export(name="audio_content_type_t", value_prefix="AUDIO_CONTENT_TYPE_")
enum AudioContentType : uint32_t {
// Do not change these values without updating their counterparts
// in frameworks/base/media/java/android/media/AudioAttributes.java
UNKNOWN = 0,
SPEECH = 1,
MUSIC = 2,
MOVIE = 3,
SONIFICATION = 4,
};
/**
* Additional information about the stream passed to hardware decoders.
*/
struct AudioOffloadInfo {
uint32_t sampleRateHz;
bitfield<AudioChannelMask> channelMask;
AudioFormat format;
AudioStreamType streamType;
uint32_t bitRatePerSecond;
int64_t durationMicroseconds; // -1 if unknown
bool hasVideo;
bool isStreaming;
uint32_t bitWidth;
uint32_t bufferSize;
AudioUsage usage;
};
/**
* Commonly used audio stream configuration parameters.
*/
struct AudioConfig {
uint32_t sampleRateHz;
bitfield<AudioChannelMask> channelMask;
AudioFormat format;
AudioOffloadInfo offloadInfo;
uint64_t frameCount;
};
/** Metadata of a playback track for a StreamOut. */
struct PlaybackTrackMetadata {
AudioUsage usage;
AudioContentType contentType;
/**
* Positive linear gain applied to the track samples. 0 being muted and 1 is no attenuation,
* 2 means double amplification...
* Must not be negative.
*/
float gain;
};
/** Metadatas of the source of a StreamOut. */
struct SourceMetadata {
vec<PlaybackTrackMetadata> tracks;
};
/** Metadata of a record track for a StreamIn. */
struct RecordTrackMetadata {
AudioSource source;
/**
* Positive linear gain applied to the track samples. 0 being muted and 1 is no attenuation,
* 2 means double amplification...
* Must not be negative.
*/
float gain;
};
/** Metadatas of the source of a StreamIn. */
struct SinkMetadata {
vec<RecordTrackMetadata> tracks;
};
/*
*
* Volume control
*
*/
/**
* Type of gain control exposed by an audio port.
*/
@export(name="", value_prefix="AUDIO_GAIN_MODE_")
enum AudioGainMode : uint32_t {
JOINT = 0x1, // supports joint channel gain control
CHANNELS = 0x2, // supports separate channel gain control
RAMP = 0x4 // supports gain ramps
};
/**
* An audio_gain struct is a representation of a gain stage.
* A gain stage is always attached to an audio port.
*/
struct AudioGain {
bitfield<AudioGainMode> mode;
bitfield<AudioChannelMask> channelMask; // channels which gain an be controlled
int32_t minValue; // minimum gain value in millibels
int32_t maxValue; // maximum gain value in millibels
int32_t defaultValue; // default gain value in millibels
uint32_t stepValue; // gain step in millibels
uint32_t minRampMs; // minimum ramp duration in ms
uint32_t maxRampMs; // maximum ramp duration in ms
};
/**
* The gain configuration structure is used to get or set the gain values of a
* given port.
*/
struct AudioGainConfig {
int32_t index; // index of the corresponding AudioGain in AudioPort.gains
AudioGainMode mode;
AudioChannelMask channelMask; // channels which gain value follows
/**
* 4 = sizeof(AudioChannelMask),
* 8 is not "FCC_8", so it won't need to be changed for > 8 channels.
* Gain values in millibels for each channel ordered from LSb to MSb in
* channel mask. The number of values is 1 in joint mode or
* popcount(channel_mask).
*/
int32_t[4 * 8] values;
uint32_t rampDurationMs; // ramp duration in ms
};
/*
*
* Routing control
*
*/
/*
* Types defined here are used to describe an audio source or sink at internal
* framework interfaces (audio policy, patch panel) or at the audio HAL.
* Sink and sources are grouped in a concept of “audio port” representing an
* audio end point at the edge of the system managed by the module exposing
* the interface.
*/
/** Audio port role: either source or sink */
@export(name="audio_port_role_t", value_prefix="AUDIO_PORT_ROLE_")
enum AudioPortRole : int32_t {
NONE,
SOURCE,
SINK,
};
/**
* Audio port type indicates if it is a session (e.g AudioTrack), a mix (e.g
* PlaybackThread output) or a physical device (e.g OUT_SPEAKER)
*/
@export(name="audio_port_type_t", value_prefix="AUDIO_PORT_TYPE_")
enum AudioPortType : int32_t {
NONE,
DEVICE,
MIX,
SESSION,
};
/**
* Extension for audio port configuration structure when the audio port is a
* hardware device.
*/
struct AudioPortConfigDeviceExt {
AudioModuleHandle hwModule; // module the device is attached to
AudioDevice type; // device type (e.g OUT_SPEAKER)
uint8_t[32] address; // device address. "" if N/A
};
/**
* Extension for audio port configuration structure when the audio port is an
* audio session.
*/
struct AudioPortConfigSessionExt {
AudioSession session;
};
/**
* Flags indicating which fields are to be considered in AudioPortConfig.
*/
@export(name="", value_prefix="AUDIO_PORT_CONFIG_")
enum AudioPortConfigMask : uint32_t {
SAMPLE_RATE = 0x1,
CHANNEL_MASK = 0x2,
FORMAT = 0x4,
GAIN = 0x8,
};
/**
* Audio port configuration structure used to specify a particular configuration
* of an audio port.
*/
struct AudioPortConfig {
AudioPortHandle id;
bitfield<AudioPortConfigMask> configMask;
uint32_t sampleRateHz;
bitfield<AudioChannelMask> channelMask;
AudioFormat format;
AudioGainConfig gain;
AudioPortType type; // type is used as a discriminator for Ext union
AudioPortRole role; // role is used as a discriminator for UseCase union
union Ext {
AudioPortConfigDeviceExt device;
struct AudioPortConfigMixExt {
AudioModuleHandle hwModule; // module the stream is attached to
AudioIoHandle ioHandle; // I/O handle of the input/output stream
union UseCase {
AudioStreamType stream;
AudioSource source;
} useCase;
} mix;
AudioPortConfigSessionExt session;
} ext;
};
/**
* Extension for audio port structure when the audio port is a hardware device.
*/
struct AudioPortDeviceExt {
AudioModuleHandle hwModule; // module the device is attached to
AudioDevice type;
/** 32 byte string identifying the port. */
uint8_t[32] address;
};
/**
* Latency class of the audio mix.
*/
@export(name="audio_mix_latency_class_t", value_prefix="AUDIO_LATENCY_")
enum AudioMixLatencyClass : int32_t {
LOW,
NORMAL
};
struct AudioPortMixExt {
AudioModuleHandle hwModule; // module the stream is attached to
AudioIoHandle ioHandle; // I/O handle of the stream
AudioMixLatencyClass latencyClass;
};
/**
* Extension for audio port structure when the audio port is an audio session.
*/
struct AudioPortSessionExt {
AudioSession session;
};
struct AudioPort {
AudioPortHandle id;
AudioPortRole role;
string name;
vec<uint32_t> sampleRates;
vec<bitfield<AudioChannelMask>> channelMasks;
vec<AudioFormat> formats;
vec<AudioGain> gains;
AudioPortConfig activeConfig; // current audio port configuration
AudioPortType type; // type is used as a discriminator
union Ext {
AudioPortDeviceExt device;
AudioPortMixExt mix;
AudioPortSessionExt session;
} ext;
};
struct ThreadInfo {
int64_t pid;
int64_t tid;
};

View file

@ -0,0 +1,47 @@
// This file is autogenerated by hidl-gen -Landroidbp.
hidl_interface {
name: "android.hardware.audio.effect@5.0",
root: "android.hardware",
vndk: {
enabled: true,
},
srcs: [
"types.hal",
"IAcousticEchoCancelerEffect.hal",
"IAutomaticGainControlEffect.hal",
"IBassBoostEffect.hal",
"IDownmixEffect.hal",
"IEffect.hal",
"IEffectBufferProviderCallback.hal",
"IEffectsFactory.hal",
"IEnvironmentalReverbEffect.hal",
"IEqualizerEffect.hal",
"ILoudnessEnhancerEffect.hal",
"INoiseSuppressionEffect.hal",
"IPresetReverbEffect.hal",
"IVirtualizerEffect.hal",
"IVisualizerEffect.hal",
],
interfaces: [
"android.hardware.audio.common@5.0",
"android.hidl.base@1.0",
],
types: [
"AudioBuffer",
"EffectAuxChannelsConfig",
"EffectBufferAccess",
"EffectBufferConfig",
"EffectConfig",
"EffectConfigParameters",
"EffectDescriptor",
"EffectFeature",
"EffectFlags",
"EffectOffloadParameter",
"MessageQueueFlagBits",
"Result",
],
gen_java: false,
gen_java_constants: true,
}

View file

@ -0,0 +1,32 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IAcousticEchoCancelerEffect extends IEffect {
/**
* Sets echo delay value in milliseconds.
*/
setEchoDelay(uint32_t echoDelayMs) generates (Result retval);
/**
* Gets echo delay value in milliseconds.
*/
getEchoDelay() generates (Result retval, uint32_t echoDelayMs);
};

View file

@ -0,0 +1,68 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IAutomaticGainControlEffect extends IEffect {
/**
* Sets target level in millibels.
*/
setTargetLevel(int16_t targetLevelMb) generates (Result retval);
/**
* Gets target level.
*/
getTargetLevel() generates (Result retval, int16_t targetLevelMb);
/**
* Sets gain in the compression range in millibels.
*/
setCompGain(int16_t compGainMb) generates (Result retval);
/**
* Gets gain in the compression range.
*/
getCompGain() generates (Result retval, int16_t compGainMb);
/**
* Enables or disables limiter.
*/
setLimiterEnabled(bool enabled) generates (Result retval);
/**
* Returns whether limiter is enabled.
*/
isLimiterEnabled() generates (Result retval, bool enabled);
struct AllProperties {
int16_t targetLevelMb;
int16_t compGainMb;
bool limiterEnabled;
};
/**
* Sets all properties at once.
*/
setAllProperties(AllProperties properties) generates (Result retval);
/**
* Gets all properties at once.
*/
getAllProperties() generates (Result retval, AllProperties properties);
};

View file

@ -0,0 +1,48 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IBassBoostEffect extends IEffect {
/**
* Returns whether setting bass boost strength is supported.
*/
isStrengthSupported() generates (Result retval, bool strengthSupported);
enum StrengthRange : uint16_t {
MIN = 0,
MAX = 1000
};
/**
* Sets bass boost strength.
*
* @param strength strength of the effect. The valid range for strength
* strength is [0, 1000], where 0 per mille designates the
* mildest effect and 1000 per mille designates the
* strongest.
* @return retval operation completion status.
*/
setStrength(uint16_t strength) generates (Result retval);
/**
* Gets virtualization strength.
*/
getStrength() generates (Result retval, uint16_t strength);
};

View file

@ -0,0 +1,37 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IDownmixEffect extends IEffect {
enum Type : int32_t {
STRIP, // throw away the extra channels
FOLD // mix the extra channels with FL/FR
};
/**
* Sets the current downmix preset.
*/
setType(Type preset) generates (Result retval);
/**
* Gets the current downmix preset.
*/
getType() generates (Result retval, Type preset);
};

View file

@ -0,0 +1,418 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffectBufferProviderCallback;
interface IEffect {
/**
* Initialize effect engine--all configurations return to default.
*
* @return retval operation completion status.
*/
@entry
init() generates (Result retval);
/**
* Apply new audio parameters configurations for input and output buffers.
* The provider callbacks may be empty, but in this case the buffer
* must be provided in the EffectConfig structure.
*
* @param config configuration descriptor.
* @param inputBufferProvider optional buffer provider reference.
* @param outputBufferProvider optional buffer provider reference.
* @return retval operation completion status.
*/
setConfig(EffectConfig config,
IEffectBufferProviderCallback inputBufferProvider,
IEffectBufferProviderCallback outputBufferProvider)
generates (Result retval);
/**
* Reset the effect engine. Keep configuration but resets state and buffer
* content.
*
* @return retval operation completion status.
*/
reset() generates (Result retval);
/**
* Enable processing.
*
* @return retval operation completion status.
*/
@callflow(next={"prepareForProcessing"})
enable() generates (Result retval);
/**
* Disable processing.
*
* @return retval operation completion status.
*/
@callflow(next={"close"})
disable() generates (Result retval);
/**
* Set the rendering device the audio output path is connected to. The
* effect implementation must set EFFECT_FLAG_DEVICE_IND flag in its
* descriptor to receive this command when the device changes.
*
* Note: this method is only supported for effects inserted into
* the output chain.
*
* @param device output device specification.
* @return retval operation completion status.
*/
setDevice(bitfield<AudioDevice> device) generates (Result retval);
/**
* Set and get volume. Used by audio framework to delegate volume control to
* effect engine. The effect implementation must set EFFECT_FLAG_VOLUME_CTRL
* flag in its descriptor to receive this command. The effect engine must
* return the volume that should be applied before the effect is
* processed. The overall volume (the volume actually applied by the effect
* engine multiplied by the returned value) should match the value indicated
* in the command.
*
* @param volumes vector containing volume for each channel defined in
* EffectConfig for output buffer expressed in 8.24 fixed
* point format.
* @return result updated volume values.
* @return retval operation completion status.
*/
setAndGetVolume(vec<uint32_t> volumes)
generates (Result retval, vec<uint32_t> result);
/**
* Notify the effect of the volume change. The effect implementation must
* set EFFECT_FLAG_VOLUME_IND flag in its descriptor to receive this
* command.
*
* @param volumes vector containing volume for each channel defined in
* EffectConfig for output buffer expressed in 8.24 fixed
* point format.
* @return retval operation completion status.
*/
volumeChangeNotification(vec<uint32_t> volumes)
generates (Result retval);
/**
* Set the audio mode. The effect implementation must set
* EFFECT_FLAG_AUDIO_MODE_IND flag in its descriptor to receive this command
* when the audio mode changes.
*
* @param mode desired audio mode.
* @return retval operation completion status.
*/
setAudioMode(AudioMode mode) generates (Result retval);
/**
* Apply new audio parameters configurations for input and output buffers of
* reverse stream. An example of reverse stream is the echo reference
* supplied to an Acoustic Echo Canceler.
*
* @param config configuration descriptor.
* @param inputBufferProvider optional buffer provider reference.
* @param outputBufferProvider optional buffer provider reference.
* @return retval operation completion status.
*/
setConfigReverse(EffectConfig config,
IEffectBufferProviderCallback inputBufferProvider,
IEffectBufferProviderCallback outputBufferProvider)
generates (Result retval);
/**
* Set the capture device the audio input path is connected to. The effect
* implementation must set EFFECT_FLAG_DEVICE_IND flag in its descriptor to
* receive this command when the device changes.
*
* Note: this method is only supported for effects inserted into
* the input chain.
*
* @param device input device specification.
* @return retval operation completion status.
*/
setInputDevice(bitfield<AudioDevice> device) generates (Result retval);
/**
* Read audio parameters configurations for input and output buffers.
*
* @return retval operation completion status.
* @return config configuration descriptor.
*/
getConfig() generates (Result retval, EffectConfig config);
/**
* Read audio parameters configurations for input and output buffers of
* reverse stream.
*
* @return retval operation completion status.
* @return config configuration descriptor.
*/
getConfigReverse() generates (Result retval, EffectConfig config);
/**
* Queries for supported combinations of main and auxiliary channels
* (e.g. for a multi-microphone noise suppressor).
*
* @param maxConfigs maximum number of the combinations to return.
* @return retval absence of the feature support is indicated using
* NOT_SUPPORTED code. RESULT_TOO_BIG is returned if
* the number of supported combinations exceeds 'maxConfigs'.
* @return result list of configuration descriptors.
*/
getSupportedAuxChannelsConfigs(uint32_t maxConfigs)
generates (Result retval, vec<EffectAuxChannelsConfig> result);
/**
* Retrieves the current configuration of main and auxiliary channels.
*
* @return retval absence of the feature support is indicated using
* NOT_SUPPORTED code.
* @return result configuration descriptor.
*/
getAuxChannelsConfig()
generates (Result retval, EffectAuxChannelsConfig result);
/**
* Sets the current configuration of main and auxiliary channels.
*
* @return retval operation completion status; absence of the feature
* support is indicated using NOT_SUPPORTED code.
*/
setAuxChannelsConfig(EffectAuxChannelsConfig config)
generates (Result retval);
/**
* Set the audio source the capture path is configured for (Camcorder, voice
* recognition...).
*
* Note: this method is only supported for effects inserted into
* the input chain.
*
* @param source source descriptor.
* @return retval operation completion status.
*/
setAudioSource(AudioSource source) generates (Result retval);
/**
* This command indicates if the playback thread the effect is attached to
* is offloaded or not, and updates the I/O handle of the playback thread
* the effect is attached to.
*
* @param param effect offload descriptor.
* @return retval operation completion status.
*/
offload(EffectOffloadParameter param) generates (Result retval);
/**
* Returns the effect descriptor.
*
* @return retval operation completion status.
* @return descriptor effect descriptor.
*/
getDescriptor() generates (Result retval, EffectDescriptor descriptor);
/**
* Set up required transports for passing audio buffers to the effect.
*
* The transport consists of shared memory and a message queue for reporting
* effect processing operation status. The shared memory is set up
* separately using 'setProcessBuffers' method.
*
* Processing is requested by setting 'REQUEST_PROCESS' or
* 'REQUEST_PROCESS_REVERSE' EventFlags associated with the status message
* queue. The result of processing may be one of the following:
* OK if there were no errors during processing;
* INVALID_ARGUMENTS if audio buffers are invalid;
* INVALID_STATE if the engine has finished the disable phase;
* NOT_INITIALIZED if the audio buffers were not set;
* NOT_SUPPORTED if the requested processing type is not supported by
* the effect.
*
* @return retval OK if both message queues were created successfully.
* INVALID_STATE if the method was already called.
* INVALID_ARGUMENTS if there was a problem setting up
* the queue.
* @return statusMQ a message queue used for passing status from the effect.
*/
@callflow(next={"setProcessBuffers"})
prepareForProcessing() generates (Result retval, fmq_sync<Result> statusMQ);
/**
* Set up input and output buffers for processing audio data. The effect
* may modify both the input and the output buffer during the operation.
* Buffers may be set multiple times during effect lifetime.
*
* The input and the output buffer may be reused between different effects,
* and the input buffer may be used as an output buffer. Buffers are
* distinguished using 'AudioBuffer.id' field.
*
* @param inBuffer input audio buffer.
* @param outBuffer output audio buffer.
* @return retval OK if both buffers were mapped successfully.
* INVALID_ARGUMENTS if there was a problem with mapping
* any of the buffers.
*/
setProcessBuffers(AudioBuffer inBuffer, AudioBuffer outBuffer)
generates (Result retval);
/**
* Execute a vendor specific command on the effect. The command code
* and data, as well as result data are not interpreted by Android
* Framework and are passed as-is between the application and the effect.
*
* The effect must use standard POSIX.1-2001 error codes for the operation
* completion status.
*
* Use this method only if the effect is provided by a third party, and
* there is no interface defined for it. This method only works for effects
* implemented in software.
*
* @param commandId the ID of the command.
* @param data command data.
* @param resultMaxSize maximum size in bytes of the result; can be 0.
* @return status command completion status.
* @return result result data.
*/
command(uint32_t commandId, vec<uint8_t> data, uint32_t resultMaxSize)
generates (int32_t status, vec<uint8_t> result);
/**
* Set a vendor-specific parameter and apply it immediately. The parameter
* code and data are not interpreted by Android Framework and are passed
* as-is between the application and the effect.
*
* The effect must use INVALID_ARGUMENTS return code if the parameter ID is
* unknown or if provided parameter data is invalid. If the effect does not
* support setting vendor-specific parameters, it must return NOT_SUPPORTED.
*
* Use this method only if the effect is provided by a third party, and
* there is no interface defined for it. This method only works for effects
* implemented in software.
*
* @param parameter identifying data of the parameter.
* @param value the value of the parameter.
* @return retval operation completion status.
*/
setParameter(vec<uint8_t> parameter, vec<uint8_t> value)
generates (Result retval);
/**
* Get a vendor-specific parameter value. The parameter code and returned
* data are not interpreted by Android Framework and are passed as-is
* between the application and the effect.
*
* The effect must use INVALID_ARGUMENTS return code if the parameter ID is
* unknown. If the effect does not support setting vendor-specific
* parameters, it must return NOT_SUPPORTED.
*
* Use this method only if the effect is provided by a third party, and
* there is no interface defined for it. This method only works for effects
* implemented in software.
*
* @param parameter identifying data of the parameter.
* @param valueMaxSize maximum size in bytes of the value.
* @return retval operation completion status.
* @return result the value of the parameter.
*/
getParameter(vec<uint8_t> parameter, uint32_t valueMaxSize)
generates (Result retval, vec<uint8_t> value);
/**
* Get supported configs for a vendor-specific feature. The configs returned
* are not interpreted by Android Framework and are passed as-is between the
* application and the effect.
*
* The effect must use INVALID_ARGUMENTS return code if the feature ID is
* unknown. If the effect does not support getting vendor-specific feature
* configs, it must return NOT_SUPPORTED. If the feature is supported but
* the total number of supported configurations exceeds the maximum number
* indicated by the caller, the method must return RESULT_TOO_BIG.
*
* Use this method only if the effect is provided by a third party, and
* there is no interface defined for it. This method only works for effects
* implemented in software.
*
* @param featureId feature identifier.
* @param maxConfigs maximum number of configs to return.
* @param configSize size of each config in bytes.
* @return retval operation completion status.
* @return configsCount number of configs returned.
* @return configsData data for all the configs returned.
*/
getSupportedConfigsForFeature(
uint32_t featureId,
uint32_t maxConfigs,
uint32_t configSize) generates (
Result retval,
uint32_t configsCount,
vec<uint8_t> configsData);
/**
* Get the current config for a vendor-specific feature. The config returned
* is not interpreted by Android Framework and is passed as-is between the
* application and the effect.
*
* The effect must use INVALID_ARGUMENTS return code if the feature ID is
* unknown. If the effect does not support getting vendor-specific
* feature configs, it must return NOT_SUPPORTED.
*
* Use this method only if the effect is provided by a third party, and
* there is no interface defined for it. This method only works for effects
* implemented in software.
*
* @param featureId feature identifier.
* @param configSize size of the config in bytes.
* @return retval operation completion status.
* @return configData config data.
*/
getCurrentConfigForFeature(uint32_t featureId, uint32_t configSize)
generates (Result retval, vec<uint8_t> configData);
/**
* Set the current config for a vendor-specific feature. The config data
* is not interpreted by Android Framework and is passed as-is between the
* application and the effect.
*
* The effect must use INVALID_ARGUMENTS return code if the feature ID is
* unknown. If the effect does not support getting vendor-specific
* feature configs, it must return NOT_SUPPORTED.
*
* Use this method only if the effect is provided by a third party, and
* there is no interface defined for it. This method only works for effects
* implemented in software.
*
* @param featureId feature identifier.
* @param configData config data.
* @return retval operation completion status.
*/
setCurrentConfigForFeature(uint32_t featureId, vec<uint8_t> configData)
generates (Result retval);
/**
* Called by the framework to deinitialize the effect and free up
* all the currently allocated resources. It is recommended to close
* the effect on the client side as soon as it is becomes unused.
*
* @return retval OK in case the success.
* INVALID_STATE if the effect was already closed.
*/
@exit
close() generates (Result retval);
};

View file

@ -0,0 +1,38 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
/**
* This callback interface contains functions that can be used by the effect
* engine 'process' function to exchange input and output audio buffers.
*/
interface IEffectBufferProviderCallback {
/**
* Called to retrieve a buffer where data should read from by 'process'
* function.
*
* @return buffer audio buffer for processing
*/
getBuffer() generates (AudioBuffer buffer);
/**
* Called to provide a buffer with the data written by 'process' function.
*
* @param buffer audio buffer for processing
*/
putBuffer(AudioBuffer buffer);
};

View file

@ -0,0 +1,58 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IEffectsFactory {
/**
* Returns descriptors of different effects in all loaded libraries.
*
* @return retval operation completion status.
* @return result list of effect descriptors.
*/
getAllDescriptors() generates(Result retval, vec<EffectDescriptor> result);
/**
* Returns a descriptor of a particular effect.
*
* @return retval operation completion status.
* @return result effect descriptor.
*/
getDescriptor(Uuid uid) generates(Result retval, EffectDescriptor result);
/**
* Creates an effect engine of the specified type. To release the effect
* engine, it is necessary to release references to the returned effect
* object.
*
* @param uid effect uuid.
* @param session audio session to which this effect instance will be
* attached. All effects created with the same session ID
* are connected in series and process the same signal
* stream.
* @param ioHandle identifies the output or input stream this effect is
* directed to in audio HAL.
* @return retval operation completion status.
* @return result the interface for the created effect.
* @return effectId the unique ID of the effect to be used with
* IStream::addEffect and IStream::removeEffect methods.
*/
createEffect(Uuid uid, AudioSession session, AudioIoHandle ioHandle)
generates (Result retval, IEffect result, uint64_t effectId);
};

View file

@ -0,0 +1,178 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IEnvironmentalReverbEffect extends IEffect {
/**
* Sets whether the effect should be bypassed.
*/
setBypass(bool bypass) generates (Result retval);
/**
* Gets whether the effect should be bypassed.
*/
getBypass() generates (Result retval, bool bypass);
enum ParamRange : int16_t {
ROOM_LEVEL_MIN = -6000,
ROOM_LEVEL_MAX = 0,
ROOM_HF_LEVEL_MIN = -4000,
ROOM_HF_LEVEL_MAX = 0,
DECAY_TIME_MIN = 100,
DECAY_TIME_MAX = 20000,
DECAY_HF_RATIO_MIN = 100,
DECAY_HF_RATIO_MAX = 1000,
REFLECTIONS_LEVEL_MIN = -6000,
REFLECTIONS_LEVEL_MAX = 0,
REFLECTIONS_DELAY_MIN = 0,
REFLECTIONS_DELAY_MAX = 65,
REVERB_LEVEL_MIN = -6000,
REVERB_LEVEL_MAX = 0,
REVERB_DELAY_MIN = 0,
REVERB_DELAY_MAX = 65,
DIFFUSION_MIN = 0,
DIFFUSION_MAX = 1000,
DENSITY_MIN = 0,
DENSITY_MAX = 1000
};
/**
* Sets the room level.
*/
setRoomLevel(int16_t roomLevel) generates (Result retval);
/**
* Gets the room level.
*/
getRoomLevel() generates (Result retval, int16_t roomLevel);
/**
* Sets the room high frequencies level.
*/
setRoomHfLevel(int16_t roomHfLevel) generates (Result retval);
/**
* Gets the room high frequencies level.
*/
getRoomHfLevel() generates (Result retval, int16_t roomHfLevel);
/**
* Sets the room decay time.
*/
setDecayTime(uint32_t decayTime) generates (Result retval);
/**
* Gets the room decay time.
*/
getDecayTime() generates (Result retval, uint32_t decayTime);
/**
* Sets the ratio of high frequencies decay.
*/
setDecayHfRatio(int16_t decayHfRatio) generates (Result retval);
/**
* Gets the ratio of high frequencies decay.
*/
getDecayHfRatio() generates (Result retval, int16_t decayHfRatio);
/**
* Sets the level of reflections in the room.
*/
setReflectionsLevel(int16_t reflectionsLevel) generates (Result retval);
/**
* Gets the level of reflections in the room.
*/
getReflectionsLevel() generates (Result retval, int16_t reflectionsLevel);
/**
* Sets the reflections delay in the room.
*/
setReflectionsDelay(uint32_t reflectionsDelay) generates (Result retval);
/**
* Gets the reflections delay in the room.
*/
getReflectionsDelay() generates (Result retval, uint32_t reflectionsDelay);
/**
* Sets the reverb level of the room.
*/
setReverbLevel(int16_t reverbLevel) generates (Result retval);
/**
* Gets the reverb level of the room.
*/
getReverbLevel() generates (Result retval, int16_t reverbLevel);
/**
* Sets the reverb delay of the room.
*/
setReverbDelay(uint32_t reverDelay) generates (Result retval);
/**
* Gets the reverb delay of the room.
*/
getReverbDelay() generates (Result retval, uint32_t reverbDelay);
/**
* Sets room diffusion.
*/
setDiffusion(int16_t diffusion) generates (Result retval);
/**
* Gets room diffusion.
*/
getDiffusion() generates (Result retval, int16_t diffusion);
/**
* Sets room wall density.
*/
setDensity(int16_t density) generates (Result retval);
/**
* Gets room wall density.
*/
getDensity() generates (Result retval, int16_t density);
struct AllProperties {
int16_t roomLevel; // in millibels, range -6000 to 0
int16_t roomHfLevel; // in millibels, range -4000 to 0
uint32_t decayTime; // in milliseconds, range 100 to 20000
int16_t decayHfRatio; // in permilles, range 100 to 1000
int16_t reflectionsLevel; // in millibels, range -6000 to 0
uint32_t reflectionsDelay; // in milliseconds, range 0 to 65
int16_t reverbLevel; // in millibels, range -6000 to 0
uint32_t reverbDelay; // in milliseconds, range 0 to 65
int16_t diffusion; // in permilles, range 0 to 1000
int16_t density; // in permilles, range 0 to 1000
};
/**
* Sets all properties at once.
*/
setAllProperties(AllProperties properties) generates (Result retval);
/**
* Gets all properties at once.
*/
getAllProperties() generates (Result retval, AllProperties properties);
};

View file

@ -0,0 +1,93 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IEqualizerEffect extends IEffect {
/**
* Gets the number of frequency bands that the equalizer supports.
*/
getNumBands() generates (Result retval, uint16_t numBands);
/**
* Returns the minimum and maximum band levels supported.
*/
getLevelRange()
generates (Result retval, int16_t minLevel, int16_t maxLevel);
/**
* Sets the gain for the given equalizer band.
*/
setBandLevel(uint16_t band, int16_t level) generates (Result retval);
/**
* Gets the gain for the given equalizer band.
*/
getBandLevel(uint16_t band) generates (Result retval, int16_t level);
/**
* Gets the center frequency of the given band, in milliHertz.
*/
getBandCenterFrequency(uint16_t band)
generates (Result retval, uint32_t centerFreqmHz);
/**
* Gets the frequency range of the given frequency band, in milliHertz.
*/
getBandFrequencyRange(uint16_t band)
generates (Result retval, uint32_t minFreqmHz, uint32_t maxFreqmHz);
/**
* Gets the band that has the most effect on the given frequency
* in milliHertz.
*/
getBandForFrequency(uint32_t freqmHz)
generates (Result retval, uint16_t band);
/**
* Gets the names of all presets the equalizer supports.
*/
getPresetNames() generates (Result retval, vec<string> names);
/**
* Sets the current preset using the index of the preset in the names
* vector returned via 'getPresetNames'.
*/
setCurrentPreset(uint16_t preset) generates (Result retval);
/**
* Gets the current preset.
*/
getCurrentPreset() generates (Result retval, uint16_t preset);
struct AllProperties {
uint16_t curPreset;
vec<int16_t> bandLevels;
};
/**
* Sets all properties at once.
*/
setAllProperties(AllProperties properties) generates (Result retval);
/**
* Gets all properties at once.
*/
getAllProperties() generates (Result retval, AllProperties properties);
};

View file

@ -0,0 +1,32 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface ILoudnessEnhancerEffect extends IEffect {
/**
* Sets target gain expressed in millibels.
*/
setTargetGain(int32_t targetGainMb) generates (Result retval);
/**
* Gets target gain expressed in millibels.
*/
getTargetGain() generates (Result retval, int32_t targetGainMb);
};

View file

@ -0,0 +1,68 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface INoiseSuppressionEffect extends IEffect {
enum Level : int32_t {
LOW,
MEDIUM,
HIGH
};
/**
* Sets suppression level.
*/
setSuppressionLevel(Level level) generates (Result retval);
/**
* Gets suppression level.
*/
getSuppressionLevel() generates (Result retval, Level level);
enum Type : int32_t {
SINGLE_CHANNEL,
MULTI_CHANNEL
};
/**
* Set suppression type.
*/
setSuppressionType(Type type) generates (Result retval);
/**
* Get suppression type.
*/
getSuppressionType() generates (Result retval, Type type);
struct AllProperties {
Level level;
Type type;
};
/**
* Sets all properties at once.
*/
setAllProperties(AllProperties properties) generates (Result retval);
/**
* Gets all properties at once.
*/
getAllProperties() generates (Result retval, AllProperties properties);
};

View file

@ -0,0 +1,43 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IPresetReverbEffect extends IEffect {
enum Preset : int32_t {
NONE, // no reverb or reflections
SMALLROOM, // a small room less than five meters in length
MEDIUMROOM, // a medium room with a length of ten meters or less
LARGEROOM, // a large-sized room suitable for live performances
MEDIUMHALL, // a medium-sized hall
LARGEHALL, // a large-sized hall suitable for a full orchestra
PLATE, // synthesis of the traditional plate reverb
LAST = PLATE
};
/**
* Sets the current preset.
*/
setPreset(Preset preset) generates (Result retval);
/**
* Gets the current preset.
*/
getPreset() generates (Result retval, Preset preset);
};

View file

@ -0,0 +1,77 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IVirtualizerEffect extends IEffect {
/**
* Returns whether setting virtualization strength is supported.
*/
isStrengthSupported() generates (bool strengthSupported);
enum StrengthRange : uint16_t {
MIN = 0,
MAX = 1000
};
/**
* Sets virtualization strength.
*
* @param strength strength of the effect. The valid range for strength
* strength is [0, 1000], where 0 per mille designates the
* mildest effect and 1000 per mille designates the
* strongest.
* @return retval operation completion status.
*/
setStrength(uint16_t strength) generates (Result retval);
/**
* Gets virtualization strength.
*/
getStrength() generates (Result retval, uint16_t strength);
struct SpeakerAngle {
/** Speaker channel mask */
bitfield<AudioChannelMask> mask;
// all angles are expressed in degrees and
// are relative to the listener.
int16_t azimuth; // 0 is the direction the listener faces
// 180 is behind the listener
// -90 is to their left
int16_t elevation; // 0 is the horizontal plane
// +90 is above the listener, -90 is below
};
/**
* Retrieves virtual speaker angles for the given channel mask on the
* specified device.
*/
getVirtualSpeakerAngles(bitfield<AudioChannelMask> mask, AudioDevice device)
generates (Result retval, vec<SpeakerAngle> speakerAngles);
/**
* Forces the virtualizer effect for the given output device.
*/
forceVirtualizationMode(AudioDevice device) generates (Result retval);
/**
* Returns audio device reflecting the current virtualization mode,
* AUDIO_DEVICE_NONE when not virtualizing.
*/
getVirtualizationMode() generates (Result retval, AudioDevice device);
};

View file

@ -0,0 +1,110 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
import IEffect;
interface IVisualizerEffect extends IEffect {
enum CaptureSizeRange : int32_t {
MAX = 1024, // maximum capture size in samples
MIN = 128 // minimum capture size in samples
};
/**
* Sets the number PCM samples in the capture.
*/
setCaptureSize(uint16_t captureSize) generates (Result retval);
/**
* Gets the number PCM samples in the capture.
*/
getCaptureSize() generates (Result retval, uint16_t captureSize);
enum ScalingMode : int32_t {
// Keep in sync with SCALING_MODE_... in
// frameworks/base/media/java/android/media/audiofx/Visualizer.java
NORMALIZED = 0,
AS_PLAYED = 1
};
/**
* Specifies the way the captured data is scaled.
*/
setScalingMode(ScalingMode scalingMode) generates (Result retval);
/**
* Retrieves the way the captured data is scaled.
*/
getScalingMode() generates (Result retval, ScalingMode scalingMode);
/**
* Informs the visualizer about the downstream latency.
*/
setLatency(uint32_t latencyMs) generates (Result retval);
/**
* Gets the downstream latency.
*/
getLatency() generates (Result retval, uint32_t latencyMs);
enum MeasurementMode : int32_t {
// Keep in sync with MEASUREMENT_MODE_... in
// frameworks/base/media/java/android/media/audiofx/Visualizer.java
NONE = 0x0,
PEAK_RMS = 0x1
};
/**
* Specifies which measurements are to be made.
*/
setMeasurementMode(MeasurementMode measurementMode)
generates (Result retval);
/**
* Retrieves which measurements are to be made.
*/
getMeasurementMode() generates (
Result retval, MeasurementMode measurementMode);
/**
* Retrieves the latest PCM snapshot captured by the visualizer engine. The
* number of samples to capture is specified by 'setCaptureSize' parameter.
*
* @return retval operation completion status.
* @return samples samples in 8 bit unsigned format (0 = 0x80)
*/
capture() generates (Result retval, vec<uint8_t> samples);
struct Measurement {
MeasurementMode mode; // discriminator
union Values {
struct PeakAndRms {
int32_t peakMb; // millibels
int32_t rmsMb; // millibels
} peakAndRms;
} value;
};
/**
* Retrieves the latest measurements. The measurements to be made
* are specified by 'setMeasurementMode' parameter.
*
* @return retval operation completion status.
* @return result measurement.
*/
measure() generates (Result retval, Measurement result);
};

299
audio/effect/5.0/types.hal Normal file
View file

@ -0,0 +1,299 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.audio.effect@5.0;
import android.hardware.audio.common@5.0;
enum Result : int32_t {
OK,
NOT_INITIALIZED,
INVALID_ARGUMENTS,
INVALID_STATE,
NOT_SUPPORTED,
RESULT_TOO_BIG
};
/**
* Effect engine capabilities/requirements flags.
*
* Definitions for flags field of effect descriptor.
*
* +----------------+--------+--------------------------------------------------
* | description | bits | values
* +----------------+--------+--------------------------------------------------
* | connection | 0..2 | 0 insert: after track process
* | mode | | 1 auxiliary: connect to track auxiliary
* | | | output and use send level
* | | | 2 replace: replaces track process function;
* | | | must implement SRC, volume and mono to stereo.
* | | | 3 pre processing: applied below audio HAL on in
* | | | 4 post processing: applied below audio HAL on out
* | | | 5 - 7 reserved
* +----------------+--------+--------------------------------------------------
* | insertion | 3..5 | 0 none
* | preference | | 1 first of the chain
* | | | 2 last of the chain
* | | | 3 exclusive (only effect in the insert chain)
* | | | 4..7 reserved
* +----------------+--------+--------------------------------------------------
* | Volume | 6..8 | 0 none
* | management | | 1 implements volume control
* | | | 2 requires volume indication
* | | | 4 reserved
* +----------------+--------+--------------------------------------------------
* | Device | 9..11 | 0 none
* | indication | | 1 requires device updates
* | | | 2, 4 reserved
* +----------------+--------+--------------------------------------------------
* | Sample input | 12..13 | 1 direct: process() function or
* | mode | | EFFECT_CMD_SET_CONFIG command must specify
* | | | a buffer descriptor
* | | | 2 provider: process() function uses the
* | | | bufferProvider indicated by the
* | | | EFFECT_CMD_SET_CONFIG command to request input.
* | | | buffers.
* | | | 3 both: both input modes are supported
* +----------------+--------+--------------------------------------------------
* | Sample output | 14..15 | 1 direct: process() function or
* | mode | | EFFECT_CMD_SET_CONFIG command must specify
* | | | a buffer descriptor
* | | | 2 provider: process() function uses the
* | | | bufferProvider indicated by the
* | | | EFFECT_CMD_SET_CONFIG command to request output
* | | | buffers.
* | | | 3 both: both output modes are supported
* +----------------+--------+--------------------------------------------------
* | Hardware | 16..17 | 0 No hardware acceleration
* | acceleration | | 1 non tunneled hw acceleration: the process()
* | | | function reads the samples, send them to HW
* | | | accelerated effect processor, reads back
* | | | the processed samples and returns them
* | | | to the output buffer.
* | | | 2 tunneled hw acceleration: the process()
* | | | function is transparent. The effect interface
* | | | is only used to control the effect engine.
* | | | This mode is relevant for global effects
* | | | actually applied by the audio hardware on
* | | | the output stream.
* +----------------+--------+--------------------------------------------------
* | Audio Mode | 18..19 | 0 none
* | indication | | 1 requires audio mode updates
* | | | 2..3 reserved
* +----------------+--------+--------------------------------------------------
* | Audio source | 20..21 | 0 none
* | indication | | 1 requires audio source updates
* | | | 2..3 reserved
* +----------------+--------+--------------------------------------------------
* | Effect offload | 22 | 0 The effect cannot be offloaded to an audio DSP
* | supported | | 1 The effect can be offloaded to an audio DSP
* +----------------+--------+--------------------------------------------------
* | Process | 23 | 0 The effect implements a process function.
* | function | | 1 The effect does not implement a process
* | not | | function: enabling the effect has no impact
* | implemented | | on latency or CPU load.
* | | | Effect implementations setting this flag do not
* | | | have to implement a process function.
* +----------------+--------+--------------------------------------------------
*/
@export(name="", value_prefix="EFFECT_FLAG_")
enum EffectFlags : int32_t {
// Insert mode
TYPE_SHIFT = 0,
TYPE_SIZE = 3,
TYPE_MASK = ((1 << TYPE_SIZE) -1) << TYPE_SHIFT,
TYPE_INSERT = 0 << TYPE_SHIFT,
TYPE_AUXILIARY = 1 << TYPE_SHIFT,
TYPE_REPLACE = 2 << TYPE_SHIFT,
TYPE_PRE_PROC = 3 << TYPE_SHIFT,
TYPE_POST_PROC = 4 << TYPE_SHIFT,
// Insert preference
INSERT_SHIFT = TYPE_SHIFT + TYPE_SIZE,
INSERT_SIZE = 3,
INSERT_MASK = ((1 << INSERT_SIZE) -1) << INSERT_SHIFT,
INSERT_ANY = 0 << INSERT_SHIFT,
INSERT_FIRST = 1 << INSERT_SHIFT,
INSERT_LAST = 2 << INSERT_SHIFT,
INSERT_EXCLUSIVE = 3 << INSERT_SHIFT,
// Volume control
VOLUME_SHIFT = INSERT_SHIFT + INSERT_SIZE,
VOLUME_SIZE = 3,
VOLUME_MASK = ((1 << VOLUME_SIZE) -1) << VOLUME_SHIFT,
VOLUME_CTRL = 1 << VOLUME_SHIFT,
VOLUME_IND = 2 << VOLUME_SHIFT,
VOLUME_NONE = 0 << VOLUME_SHIFT,
// Device indication
DEVICE_SHIFT = VOLUME_SHIFT + VOLUME_SIZE,
DEVICE_SIZE = 3,
DEVICE_MASK = ((1 << DEVICE_SIZE) -1) << DEVICE_SHIFT,
DEVICE_IND = 1 << DEVICE_SHIFT,
DEVICE_NONE = 0 << DEVICE_SHIFT,
// Sample input modes
INPUT_SHIFT = DEVICE_SHIFT + DEVICE_SIZE,
INPUT_SIZE = 2,
INPUT_MASK = ((1 << INPUT_SIZE) -1) << INPUT_SHIFT,
INPUT_DIRECT = 1 << INPUT_SHIFT,
INPUT_PROVIDER = 2 << INPUT_SHIFT,
INPUT_BOTH = 3 << INPUT_SHIFT,
// Sample output modes
OUTPUT_SHIFT = INPUT_SHIFT + INPUT_SIZE,
OUTPUT_SIZE = 2,
OUTPUT_MASK = ((1 << OUTPUT_SIZE) -1) << OUTPUT_SHIFT,
OUTPUT_DIRECT = 1 << OUTPUT_SHIFT,
OUTPUT_PROVIDER = 2 << OUTPUT_SHIFT,
OUTPUT_BOTH = 3 << OUTPUT_SHIFT,
// Hardware acceleration mode
HW_ACC_SHIFT = OUTPUT_SHIFT + OUTPUT_SIZE,
HW_ACC_SIZE = 2,
HW_ACC_MASK = ((1 << HW_ACC_SIZE) -1) << HW_ACC_SHIFT,
HW_ACC_SIMPLE = 1 << HW_ACC_SHIFT,
HW_ACC_TUNNEL = 2 << HW_ACC_SHIFT,
// Audio mode indication
AUDIO_MODE_SHIFT = HW_ACC_SHIFT + HW_ACC_SIZE,
AUDIO_MODE_SIZE = 2,
AUDIO_MODE_MASK = ((1 << AUDIO_MODE_SIZE) -1) << AUDIO_MODE_SHIFT,
AUDIO_MODE_IND = 1 << AUDIO_MODE_SHIFT,
AUDIO_MODE_NONE = 0 << AUDIO_MODE_SHIFT,
// Audio source indication
AUDIO_SOURCE_SHIFT = AUDIO_MODE_SHIFT + AUDIO_MODE_SIZE,
AUDIO_SOURCE_SIZE = 2,
AUDIO_SOURCE_MASK = ((1 << AUDIO_SOURCE_SIZE) -1) << AUDIO_SOURCE_SHIFT,
AUDIO_SOURCE_IND = 1 << AUDIO_SOURCE_SHIFT,
AUDIO_SOURCE_NONE = 0 << AUDIO_SOURCE_SHIFT,
// Effect offload indication
OFFLOAD_SHIFT = AUDIO_SOURCE_SHIFT + AUDIO_SOURCE_SIZE,
OFFLOAD_SIZE = 1,
OFFLOAD_MASK = ((1 << OFFLOAD_SIZE) -1) << OFFLOAD_SHIFT,
OFFLOAD_SUPPORTED = 1 << OFFLOAD_SHIFT,
// Effect has no process indication
NO_PROCESS_SHIFT = OFFLOAD_SHIFT + OFFLOAD_SIZE,
NO_PROCESS_SIZE = 1,
NO_PROCESS_MASK = ((1 << NO_PROCESS_SIZE) -1) << NO_PROCESS_SHIFT,
NO_PROCESS = 1 << NO_PROCESS_SHIFT
};
/**
* The effect descriptor contains necessary information to facilitate the
* enumeration of the effect engines present in a library.
*/
struct EffectDescriptor {
Uuid type; // UUID of to the OpenSL ES interface implemented
// by this effect
Uuid uuid; // UUID for this particular implementation
bitfield<EffectFlags> flags; // effect engine capabilities/requirements flags
uint16_t cpuLoad; // CPU load indication expressed in 0.1 MIPS units
// as estimated on an ARM9E core (ARMv5TE) with 0 WS
uint16_t memoryUsage; // data memory usage expressed in KB and includes
// only dynamically allocated memory
uint8_t[64] name; // human readable effect name
uint8_t[64] implementor; // human readable effect implementor name
};
/**
* A buffer is a chunk of audio data for processing. Multi-channel audio is
* always interleaved. The channel order is from LSB to MSB with regard to the
* channel mask definition in audio.h, audio_channel_mask_t, e.g.:
* Stereo: L, R; 5.1: FL, FR, FC, LFE, BL, BR.
*
* The buffer size is expressed in frame count, a frame being composed of
* samples for all channels at a given time. Frame size for unspecified format
* (AUDIO_FORMAT_OTHER) is 8 bit by definition.
*/
struct AudioBuffer {
uint64_t id;
uint32_t frameCount;
memory data;
};
@export(name="effect_buffer_access_e", value_prefix="EFFECT_BUFFER_")
enum EffectBufferAccess : int32_t {
ACCESS_WRITE,
ACCESS_READ,
ACCESS_ACCUMULATE
};
/**
* Determines what fields of EffectBufferConfig need to be considered.
*/
@export(name="", value_prefix="EFFECT_CONFIG_")
enum EffectConfigParameters : int32_t {
BUFFER = 0x0001, // buffer field
SMP_RATE = 0x0002, // samplingRate
CHANNELS = 0x0004, // channels
FORMAT = 0x0008, // format
ACC_MODE = 0x0010, // accessMode
// Note that the 2.0 ALL have been moved to an helper function
};
/**
* The buffer config structure specifies the input or output audio format
* to be used by the effect engine.
*/
struct EffectBufferConfig {
AudioBuffer buffer;
uint32_t samplingRateHz;
bitfield<AudioChannelMask> channels;
AudioFormat format;
EffectBufferAccess accessMode;
bitfield<EffectConfigParameters> mask;
};
struct EffectConfig {
EffectBufferConfig inputCfg;
EffectBufferConfig outputCfg;
};
@export(name="effect_feature_e", value_prefix="EFFECT_FEATURE_")
enum EffectFeature : int32_t {
AUX_CHANNELS, // supports auxiliary channels
// (e.g. dual mic noise suppressor)
CNT
};
struct EffectAuxChannelsConfig {
bitfield<AudioChannelMask> mainChannels; // channel mask for main channels
bitfield<AudioChannelMask> auxChannels; // channel mask for auxiliary channels
};
struct EffectOffloadParameter {
bool isOffload; // true if the playback thread the effect
// is attached to is offloaded
AudioIoHandle ioHandle; // io handle of the playback thread
// the effect is attached to
};
/**
* The message queue flags used to synchronize reads and writes from
* the status message queue used by effects.
*/
enum MessageQueueFlagBits : uint32_t {
DONE_PROCESSING = 1 << 0,
REQUEST_PROCESS = 1 << 1,
REQUEST_PROCESS_REVERSE = 1 << 2,
REQUEST_QUIT = 1 << 3,
REQUEST_PROCESS_ALL =
REQUEST_PROCESS | REQUEST_PROCESS_REVERSE | REQUEST_QUIT
};

View file

@ -0,0 +1 @@
../../2.0/xml/audio_effects_conf.xsd