Audio: Copy HAL V6 into V7 am: 60ced768f3
Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1475340 Change-Id: I90cd551bd26717ea19fa9b5f01a33eb6c211bc17
This commit is contained in:
commit
ccf6fefbe5
48 changed files with 6699 additions and 0 deletions
25
audio/7.0/Android.bp
Normal file
25
audio/7.0/Android.bp
Normal file
|
@ -0,0 +1,25 @@
|
|||
// This file is autogenerated by hidl-gen -Landroidbp.
|
||||
|
||||
hidl_interface {
|
||||
name: "android.hardware.audio@7.0",
|
||||
root: "android.hardware",
|
||||
srcs: [
|
||||
"types.hal",
|
||||
"IDevice.hal",
|
||||
"IDevicesFactory.hal",
|
||||
"IPrimaryDevice.hal",
|
||||
"IStream.hal",
|
||||
"IStreamIn.hal",
|
||||
"IStreamOut.hal",
|
||||
"IStreamOutCallback.hal",
|
||||
"IStreamOutEventCallback.hal",
|
||||
],
|
||||
interfaces: [
|
||||
"android.hardware.audio.common@7.0",
|
||||
"android.hardware.audio.effect@7.0",
|
||||
"android.hidl.base@1.0",
|
||||
"android.hidl.safe_union@1.0",
|
||||
],
|
||||
gen_java: false,
|
||||
gen_java_constants: true,
|
||||
}
|
346
audio/7.0/IDevice.hal
Normal file
346
audio/7.0/IDevice.hal
Normal file
|
@ -0,0 +1,346 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IStreamIn;
|
||||
import IStreamOut;
|
||||
|
||||
interface IDevice {
|
||||
/**
|
||||
* Returns whether the audio hardware interface has been initialized.
|
||||
*
|
||||
* @return retval OK on success, NOT_INITIALIZED on failure.
|
||||
*/
|
||||
initCheck() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Sets the audio volume for all audio activities other than voice call. If
|
||||
* NOT_SUPPORTED is returned, the software mixer will emulate this
|
||||
* capability.
|
||||
*
|
||||
* @param volume 1.0f means unity, 0.0f is zero.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setMasterVolume(float volume) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Get the current master volume value for the HAL, if the HAL supports
|
||||
* master volume control. For example, AudioFlinger will query this value
|
||||
* from the primary audio HAL when the service starts and use the value for
|
||||
* setting the initial master volume across all HALs. HALs which do not
|
||||
* support this method must return NOT_SUPPORTED in 'retval'.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return volume 1.0f means unity, 0.0f is zero.
|
||||
*/
|
||||
getMasterVolume() generates (Result retval, float volume);
|
||||
|
||||
/**
|
||||
* Sets microphone muting state.
|
||||
*
|
||||
* @param mute whether microphone is muted.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setMicMute(bool mute) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets whether microphone is muted.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return mute whether microphone is muted.
|
||||
*/
|
||||
getMicMute() generates (Result retval, bool mute);
|
||||
|
||||
/**
|
||||
* Set the audio mute status for all audio activities. If the return value
|
||||
* is NOT_SUPPORTED, the software mixer will emulate this capability.
|
||||
*
|
||||
* @param mute whether audio is muted.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setMasterMute(bool mute) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Get the current master mute status for the HAL, if the HAL supports
|
||||
* master mute control. AudioFlinger will query this value from the primary
|
||||
* audio HAL when the service starts and use the value for setting the
|
||||
* initial master mute across all HALs. HAL must indicate that the feature
|
||||
* is not supported by returning NOT_SUPPORTED status.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return mute whether audio is muted.
|
||||
*/
|
||||
getMasterMute() generates (Result retval, bool mute);
|
||||
|
||||
/**
|
||||
* Returns audio input buffer size according to parameters passed or
|
||||
* INVALID_ARGUMENTS if one of the parameters is not supported.
|
||||
*
|
||||
* @param config audio configuration.
|
||||
* @return retval operation completion status.
|
||||
* @return bufferSize input buffer size in bytes.
|
||||
*/
|
||||
getInputBufferSize(AudioConfig config)
|
||||
generates (Result retval, uint64_t bufferSize);
|
||||
|
||||
/**
|
||||
* This method creates and opens the audio hardware output stream.
|
||||
* If the stream can not be opened with the proposed audio config,
|
||||
* HAL must provide suggested values for the audio config.
|
||||
*
|
||||
* @param ioHandle handle assigned by AudioFlinger.
|
||||
* @param device device type and (if needed) address.
|
||||
* @param config stream configuration.
|
||||
* @param flags additional flags.
|
||||
* @param sourceMetadata Description of the audio that will be played.
|
||||
May be used by implementations to configure hardware effects.
|
||||
* @return retval operation completion status.
|
||||
* @return outStream created output stream.
|
||||
* @return suggestedConfig in case of invalid parameters, suggested config.
|
||||
*/
|
||||
openOutputStream(
|
||||
AudioIoHandle ioHandle,
|
||||
DeviceAddress device,
|
||||
AudioConfig config,
|
||||
bitfield<AudioOutputFlag> flags,
|
||||
SourceMetadata sourceMetadata) generates (
|
||||
Result retval,
|
||||
IStreamOut outStream,
|
||||
AudioConfig suggestedConfig);
|
||||
|
||||
/**
|
||||
* This method creates and opens the audio hardware input stream.
|
||||
* If the stream can not be opened with the proposed audio config,
|
||||
* HAL must provide suggested values for the audio config.
|
||||
*
|
||||
* @param ioHandle handle assigned by AudioFlinger.
|
||||
* @param device device type and (if needed) address.
|
||||
* @param config stream configuration.
|
||||
* @param flags additional flags.
|
||||
* @param sinkMetadata Description of the audio that is suggested by the client.
|
||||
* May be used by implementations to configure processing effects.
|
||||
* @return retval operation completion status.
|
||||
* @return inStream in case of success, created input stream.
|
||||
* @return suggestedConfig in case of invalid parameters, suggested config.
|
||||
*/
|
||||
openInputStream(
|
||||
AudioIoHandle ioHandle,
|
||||
DeviceAddress device,
|
||||
AudioConfig config,
|
||||
bitfield<AudioInputFlag> flags,
|
||||
SinkMetadata sinkMetadata) generates (
|
||||
Result retval,
|
||||
IStreamIn inStream,
|
||||
AudioConfig suggestedConfig);
|
||||
|
||||
/**
|
||||
* Returns whether HAL supports audio patches. Patch represents a connection
|
||||
* between signal source(s) and signal sink(s). If HAL doesn't support
|
||||
* patches natively (in hardware) then audio system will need to establish
|
||||
* them in software.
|
||||
*
|
||||
* @return supports true if audio patches are supported.
|
||||
*/
|
||||
supportsAudioPatches() generates (bool supports);
|
||||
|
||||
/**
|
||||
* Creates an audio patch between several source and sink ports. The handle
|
||||
* is allocated by the HAL and must be unique for this audio HAL module.
|
||||
*
|
||||
* @param sources patch sources.
|
||||
* @param sinks patch sinks.
|
||||
* @return retval operation completion status.
|
||||
* @return patch created patch handle.
|
||||
*/
|
||||
createAudioPatch(vec<AudioPortConfig> sources, vec<AudioPortConfig> sinks)
|
||||
generates (Result retval, AudioPatchHandle patch);
|
||||
|
||||
/**
|
||||
* Updates an audio patch.
|
||||
*
|
||||
* Use of this function is preferred to releasing and re-creating a patch
|
||||
* as the HAL module can figure out a way of switching the route without
|
||||
* causing audio disruption.
|
||||
*
|
||||
* @param previousPatch handle of the previous patch to update.
|
||||
* @param sources new patch sources.
|
||||
* @param sinks new patch sinks.
|
||||
* @return retval operation completion status.
|
||||
* @return patch updated patch handle.
|
||||
*/
|
||||
updateAudioPatch(
|
||||
AudioPatchHandle previousPatch,
|
||||
vec<AudioPortConfig> sources,
|
||||
vec<AudioPortConfig> sinks) generates (
|
||||
Result retval, AudioPatchHandle patch);
|
||||
|
||||
/**
|
||||
* Release an audio patch.
|
||||
*
|
||||
* @param patch patch handle.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
releaseAudioPatch(AudioPatchHandle patch) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns the list of supported attributes for a given audio port.
|
||||
*
|
||||
* As input, 'port' contains the information (type, role, address etc...)
|
||||
* needed by the HAL to identify the port.
|
||||
*
|
||||
* As output, 'resultPort' contains possible attributes (sampling rates,
|
||||
* formats, channel masks, gain controllers...) for this port.
|
||||
*
|
||||
* @param port port identifier.
|
||||
* @return retval operation completion status.
|
||||
* @return resultPort port descriptor with all parameters filled up.
|
||||
*/
|
||||
getAudioPort(AudioPort port)
|
||||
generates (Result retval, AudioPort resultPort);
|
||||
|
||||
/**
|
||||
* Set audio port configuration.
|
||||
*
|
||||
* @param config audio port configuration.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setAudioPortConfig(AudioPortConfig config) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the HW synchronization source of the device. Calling this method is
|
||||
* equivalent to getting AUDIO_PARAMETER_HW_AV_SYNC on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @return retval operation completion status: OK or NOT_SUPPORTED.
|
||||
* @return hwAvSync HW synchronization source
|
||||
*/
|
||||
getHwAvSync() generates (Result retval, AudioHwSync hwAvSync);
|
||||
|
||||
/**
|
||||
* Sets whether the screen is on. Calling this method is equivalent to
|
||||
* setting AUDIO_PARAMETER_KEY_SCREEN_STATE on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param turnedOn whether the screen is turned on.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setScreenState(bool turnedOn) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Generic method for retrieving vendor-specific parameter values.
|
||||
* The framework does not interpret the parameters, they are passed
|
||||
* in an opaque manner between a vendor application and HAL.
|
||||
*
|
||||
* Multiple parameters can be retrieved at the same time.
|
||||
* The implementation should return as many requested parameters
|
||||
* as possible, even if one or more is not supported
|
||||
*
|
||||
* @param context provides more information about the request
|
||||
* @param keys keys of the requested parameters
|
||||
* @return retval operation completion status.
|
||||
* OK must be returned if keys is empty.
|
||||
* NOT_SUPPORTED must be returned if at least one key is unknown.
|
||||
* @return parameters parameter key value pairs.
|
||||
* Must contain the value of all requested keys if retval == OK
|
||||
*/
|
||||
getParameters(vec<ParameterValue> context, vec<string> keys)
|
||||
generates (Result retval, vec<ParameterValue> parameters);
|
||||
|
||||
/**
|
||||
* Generic method for setting vendor-specific parameter values.
|
||||
* The framework does not interpret the parameters, they are passed
|
||||
* in an opaque manner between a vendor application and HAL.
|
||||
*
|
||||
* Multiple parameters can be set at the same time though this is
|
||||
* discouraged as it make failure analysis harder.
|
||||
*
|
||||
* If possible, a failed setParameters should not impact the platform state.
|
||||
*
|
||||
* @param context provides more information about the request
|
||||
* @param parameters parameter key value pairs.
|
||||
* @return retval operation completion status.
|
||||
* All parameters must be successfully set for OK to be returned
|
||||
*/
|
||||
setParameters(vec<ParameterValue> context, vec<ParameterValue> parameters)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns an array with available microphones in device.
|
||||
*
|
||||
* @return retval NOT_SUPPORTED if there are no microphones on this device
|
||||
* INVALID_STATE if the call is not successful,
|
||||
* OK otherwise.
|
||||
*
|
||||
* @return microphones array with microphones info
|
||||
*/
|
||||
getMicrophones()
|
||||
generates(Result retval, vec<MicrophoneInfo> microphones);
|
||||
|
||||
/**
|
||||
* Notifies the device module about the connection state of an input/output
|
||||
* device attached to it. Calling this method is equivalent to setting
|
||||
* AUDIO_PARAMETER_DEVICE_[DIS]CONNECT on the legacy HAL.
|
||||
*
|
||||
* @param address audio device specification.
|
||||
* @param connected whether the device is connected.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setConnectedState(DeviceAddress address, bool connected)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Called by the framework to deinitialize the device and free up
|
||||
* all currently allocated resources. It is recommended to close
|
||||
* the device on the client side as soon as it is becomes unused.
|
||||
*
|
||||
* Note that all streams must be closed by the client before
|
||||
* attempting to close the device they belong to.
|
||||
*
|
||||
* @return retval OK in case the success.
|
||||
* INVALID_STATE if the device was already closed
|
||||
* or there are streams currently opened.
|
||||
*/
|
||||
@exit
|
||||
close() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Applies an audio effect to an audio device. The effect is inserted
|
||||
* according to its insertion preference specified by INSERT_... EffectFlags
|
||||
* in the EffectDescriptor.
|
||||
*
|
||||
* @param device identifies the sink or source device this effect must be applied to.
|
||||
* "device" is the AudioPortHandle indicated for the device when the audio
|
||||
* patch connecting that device was created.
|
||||
* @param effectId effect ID (obtained from IEffectsFactory.createEffect) of
|
||||
* the effect to add.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
addDeviceEffect(AudioPortHandle device, uint64_t effectId) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Stops applying an audio effect to an audio device.
|
||||
*
|
||||
* @param device identifies the sink or source device this effect was applied to.
|
||||
* "device" is the AudioPortHandle indicated for the device when the audio
|
||||
* patch is created at the audio HAL.
|
||||
* @param effectId effect ID (obtained from IEffectsFactory.createEffect) of
|
||||
* the effect.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
removeDeviceEffect(AudioPortHandle device, uint64_t effectId) generates (Result retval);
|
||||
};
|
70
audio/7.0/IDevicesFactory.hal
Normal file
70
audio/7.0/IDevicesFactory.hal
Normal file
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IDevice;
|
||||
import IPrimaryDevice;
|
||||
|
||||
/** This factory allows a HAL implementation to be split in multiple independent
|
||||
* devices (called module in the pre-treble API).
|
||||
* Note that this division is arbitrary and implementation are free
|
||||
* to only have a Primary.
|
||||
* The framework will query the devices according to audio_policy_configuration.xml
|
||||
*
|
||||
* Each device name is arbitrary, provided by the vendor's audio_policy_configuration.xml
|
||||
* and only used to identify a device in this factory.
|
||||
* The framework must not interpret the name, treating it as a vendor opaque data
|
||||
* with the following exception:
|
||||
* - the "r_submix" device that must be present to support policyMixes (Eg: Android projected).
|
||||
* Note that this Device is included by default in a build derived from AOSP.
|
||||
*
|
||||
* Note that on AOSP Oreo (including MR1) the "a2dp" module is not using this API
|
||||
* but is loaded directly from the system partition using the legacy API
|
||||
* due to limitations with the Bluetooth framework.
|
||||
*/
|
||||
interface IDevicesFactory {
|
||||
|
||||
/**
|
||||
* Opens an audio device. To close the device, it is necessary to release
|
||||
* references to the returned device object.
|
||||
*
|
||||
* @param device device name.
|
||||
* @return retval operation completion status. Returns INVALID_ARGUMENTS
|
||||
* if there is no corresponding hardware module found,
|
||||
* NOT_INITIALIZED if an error occurred while opening the hardware
|
||||
* module.
|
||||
* @return result the interface for the created device.
|
||||
*/
|
||||
openDevice(string device) generates (Result retval, IDevice result);
|
||||
|
||||
/**
|
||||
* Opens the Primary audio device that must be present.
|
||||
* This function is not optional and must return successfully the primary device.
|
||||
*
|
||||
* This device must have the name "primary".
|
||||
*
|
||||
* The telephony stack uses this device to control the audio during a voice call.
|
||||
*
|
||||
* @return retval operation completion status. Must be SUCCESS.
|
||||
* For debugging, return INVALID_ARGUMENTS if there is no corresponding
|
||||
* hardware module found, NOT_INITIALIZED if an error occurred
|
||||
* while opening the hardware module.
|
||||
* @return result the interface for the created device.
|
||||
*/
|
||||
openPrimaryDevice() generates (Result retval, IPrimaryDevice result);
|
||||
};
|
195
audio/7.0/IPrimaryDevice.hal
Normal file
195
audio/7.0/IPrimaryDevice.hal
Normal file
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IDevice;
|
||||
|
||||
interface IPrimaryDevice extends IDevice {
|
||||
/**
|
||||
* Sets the audio volume of a voice call.
|
||||
*
|
||||
* @param volume 1.0f means unity, 0.0f is zero.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setVoiceVolume(float volume) generates (Result retval);
|
||||
|
||||
/**
|
||||
* This method is used to notify the HAL about audio mode changes.
|
||||
*
|
||||
* @param mode new mode.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setMode(AudioMode mode) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Sets the name of the current BT SCO headset. Calling this method
|
||||
* is equivalent to setting legacy "bt_headset_name" parameter.
|
||||
* The BT SCO headset name must only be used for debugging purposes.
|
||||
* Optional method
|
||||
*
|
||||
* @param name the name of the current BT SCO headset (can be empty).
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setBtScoHeadsetDebugName(string name) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets whether BT SCO Noise Reduction and Echo Cancellation are enabled.
|
||||
* Calling this method is equivalent to getting AUDIO_PARAMETER_KEY_BT_NREC
|
||||
* on the legacy HAL.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return enabled whether BT SCO NR + EC are enabled.
|
||||
*/
|
||||
getBtScoNrecEnabled() generates (Result retval, bool enabled);
|
||||
|
||||
/**
|
||||
* Sets whether BT SCO Noise Reduction and Echo Cancellation are enabled.
|
||||
* Calling this method is equivalent to setting AUDIO_PARAMETER_KEY_BT_NREC
|
||||
* on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param enabled whether BT SCO NR + EC are enabled.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setBtScoNrecEnabled(bool enabled) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets whether BT SCO Wideband mode is enabled. Calling this method is
|
||||
* equivalent to getting AUDIO_PARAMETER_KEY_BT_SCO_WB on the legacy HAL.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return enabled whether BT Wideband is enabled.
|
||||
*/
|
||||
getBtScoWidebandEnabled() generates (Result retval, bool enabled);
|
||||
|
||||
/**
|
||||
* Sets whether BT SCO Wideband mode is enabled. Calling this method is
|
||||
* equivalent to setting AUDIO_PARAMETER_KEY_BT_SCO_WB on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param enabled whether BT Wideband is enabled.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setBtScoWidebandEnabled(bool enabled) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets whether BT HFP (Hands-Free Profile) is enabled. Calling this method
|
||||
* is equivalent to getting "hfp_enable" parameter value on the legacy HAL.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return enabled whether BT HFP is enabled.
|
||||
*/
|
||||
getBtHfpEnabled() generates (Result retval, bool enabled);
|
||||
|
||||
/**
|
||||
* Sets whether BT HFP (Hands-Free Profile) is enabled. Calling this method
|
||||
* is equivalent to setting "hfp_enable" parameter on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param enabled whether BT HFP is enabled.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setBtHfpEnabled(bool enabled) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Sets the sampling rate of BT HFP (Hands-Free Profile). Calling this
|
||||
* method is equivalent to setting "hfp_set_sampling_rate" parameter
|
||||
* on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param sampleRateHz sample rate in Hz.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setBtHfpSampleRate(uint32_t sampleRateHz) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Sets the current output volume Hz for BT HFP (Hands-Free Profile).
|
||||
* Calling this method is equivalent to setting "hfp_volume" parameter value
|
||||
* on the legacy HAL (except that legacy HAL implementations expect
|
||||
* an integer value in the range from 0 to 15.)
|
||||
* Optional method
|
||||
*
|
||||
* @param volume 1.0f means unity, 0.0f is zero.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setBtHfpVolume(float volume) generates (Result retval);
|
||||
|
||||
enum TtyMode : int32_t {
|
||||
OFF,
|
||||
VCO,
|
||||
HCO,
|
||||
FULL
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets current TTY mode selection. Calling this method is equivalent to
|
||||
* getting AUDIO_PARAMETER_KEY_TTY_MODE on the legacy HAL.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return mode TTY mode.
|
||||
*/
|
||||
getTtyMode() generates (Result retval, TtyMode mode);
|
||||
|
||||
/**
|
||||
* Sets current TTY mode. Calling this method is equivalent to setting
|
||||
* AUDIO_PARAMETER_KEY_TTY_MODE on the legacy HAL.
|
||||
*
|
||||
* @param mode TTY mode.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setTtyMode(TtyMode mode) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets whether Hearing Aid Compatibility - Telecoil (HAC-T) mode is
|
||||
* enabled. Calling this method is equivalent to getting
|
||||
* AUDIO_PARAMETER_KEY_HAC on the legacy HAL.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return enabled whether HAC mode is enabled.
|
||||
*/
|
||||
getHacEnabled() generates (Result retval, bool enabled);
|
||||
|
||||
/**
|
||||
* Sets whether Hearing Aid Compatibility - Telecoil (HAC-T) mode is
|
||||
* enabled. Calling this method is equivalent to setting
|
||||
* AUDIO_PARAMETER_KEY_HAC on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param enabled whether HAC mode is enabled.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setHacEnabled(bool enabled) generates (Result retval);
|
||||
|
||||
enum Rotation : int32_t {
|
||||
DEG_0,
|
||||
DEG_90,
|
||||
DEG_180,
|
||||
DEG_270
|
||||
};
|
||||
|
||||
/**
|
||||
* Updates HAL on the current rotation of the device relative to natural
|
||||
* orientation. Calling this method is equivalent to setting legacy
|
||||
* parameter "rotation".
|
||||
*
|
||||
* @param rotation rotation in degrees relative to natural device
|
||||
* orientation.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
updateRotation(Rotation rotation) generates (Result retval);
|
||||
};
|
317
audio/7.0/IStream.hal
Normal file
317
audio/7.0/IStream.hal
Normal file
|
@ -0,0 +1,317 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import android.hardware.audio.effect@7.0::IEffect;
|
||||
|
||||
interface IStream {
|
||||
/**
|
||||
* Return the frame size (number of bytes per sample).
|
||||
*
|
||||
* @return frameSize frame size in bytes.
|
||||
*/
|
||||
getFrameSize() generates (uint64_t frameSize);
|
||||
|
||||
/**
|
||||
* Return the frame count of the buffer. Calling this method is equivalent
|
||||
* to getting AUDIO_PARAMETER_STREAM_FRAME_COUNT on the legacy HAL.
|
||||
*
|
||||
* @return count frame count.
|
||||
*/
|
||||
getFrameCount() generates (uint64_t count);
|
||||
|
||||
/**
|
||||
* Return the size of input/output buffer in bytes for this stream.
|
||||
* It must be a multiple of the frame size.
|
||||
*
|
||||
* @return buffer buffer size in bytes.
|
||||
*/
|
||||
getBufferSize() generates (uint64_t bufferSize);
|
||||
|
||||
/**
|
||||
* Return the sampling rate in Hz.
|
||||
*
|
||||
* @return sampleRateHz sample rate in Hz.
|
||||
*/
|
||||
getSampleRate() generates (uint32_t sampleRateHz);
|
||||
|
||||
/**
|
||||
* Return supported native sampling rates of the stream for a given format.
|
||||
* A supported native sample rate is a sample rate that can be efficiently
|
||||
* played by the hardware (typically without sample-rate conversions).
|
||||
*
|
||||
* This function is only called for dynamic profile. If called for
|
||||
* non-dynamic profile is should return NOT_SUPPORTED or the same list
|
||||
* as in audio_policy_configuration.xml.
|
||||
*
|
||||
* Calling this method is equivalent to getting
|
||||
* AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES on the legacy HAL.
|
||||
*
|
||||
*
|
||||
* @param format audio format for which the sample rates are supported.
|
||||
* @return retval operation completion status.
|
||||
* Must be OK if the format is supported.
|
||||
* @return sampleRateHz supported sample rates.
|
||||
*/
|
||||
getSupportedSampleRates(AudioFormat format)
|
||||
generates (Result retval, vec<uint32_t> sampleRates);
|
||||
|
||||
/**
|
||||
* Sets the sampling rate of the stream. Calling this method is equivalent
|
||||
* to setting AUDIO_PARAMETER_STREAM_SAMPLING_RATE on the legacy HAL.
|
||||
* Optional method. If implemented, only called on a stopped stream.
|
||||
*
|
||||
* @param sampleRateHz sample rate in Hz.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setSampleRate(uint32_t sampleRateHz) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Return the channel mask of the stream.
|
||||
*
|
||||
* @return mask channel mask.
|
||||
*/
|
||||
getChannelMask() generates (bitfield<AudioChannelMask> mask);
|
||||
|
||||
/**
|
||||
* Return supported channel masks of the stream. Calling this method is
|
||||
* equivalent to getting AUDIO_PARAMETER_STREAM_SUP_CHANNELS on the legacy
|
||||
* HAL.
|
||||
*
|
||||
* @param format audio format for which the channel masks are supported.
|
||||
* @return retval operation completion status.
|
||||
* Must be OK if the format is supported.
|
||||
* @return masks supported audio masks.
|
||||
*/
|
||||
getSupportedChannelMasks(AudioFormat format)
|
||||
generates (Result retval, vec<bitfield<AudioChannelMask>> masks);
|
||||
|
||||
/**
|
||||
* Sets the channel mask of the stream. Calling this method is equivalent to
|
||||
* setting AUDIO_PARAMETER_STREAM_CHANNELS on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param format audio format.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setChannelMask(bitfield<AudioChannelMask> mask) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Return the audio format of the stream.
|
||||
*
|
||||
* @return format audio format.
|
||||
*/
|
||||
getFormat() generates (AudioFormat format);
|
||||
|
||||
/**
|
||||
* Return supported audio formats of the stream. Calling this method is
|
||||
* equivalent to getting AUDIO_PARAMETER_STREAM_SUP_FORMATS on the legacy
|
||||
* HAL.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return formats supported audio formats.
|
||||
* Must be non empty if retval is OK.
|
||||
*/
|
||||
getSupportedFormats() generates (Result retval, vec<AudioFormat> formats);
|
||||
|
||||
/**
|
||||
* Sets the audio format of the stream. Calling this method is equivalent to
|
||||
* setting AUDIO_PARAMETER_STREAM_FORMAT on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param format audio format.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setFormat(AudioFormat format) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Convenience method for retrieving several stream parameters in
|
||||
* one transaction.
|
||||
*
|
||||
* @return sampleRateHz sample rate in Hz.
|
||||
* @return mask channel mask.
|
||||
* @return format audio format.
|
||||
*/
|
||||
getAudioProperties() generates (
|
||||
uint32_t sampleRateHz, bitfield<AudioChannelMask> mask, AudioFormat format);
|
||||
|
||||
/**
|
||||
* Applies audio effect to the stream.
|
||||
*
|
||||
* @param effectId effect ID (obtained from IEffectsFactory.createEffect) of
|
||||
* the effect to apply.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
addEffect(uint64_t effectId) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Stops application of the effect to the stream.
|
||||
*
|
||||
* @param effectId effect ID (obtained from IEffectsFactory.createEffect) of
|
||||
* the effect to remove.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
removeEffect(uint64_t effectId) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Put the audio hardware input/output into standby mode.
|
||||
* Driver must exit from standby mode at the next I/O operation.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
standby() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Return the set of devices which this stream is connected to.
|
||||
* Optional method
|
||||
*
|
||||
* @return retval operation completion status: OK or NOT_SUPPORTED.
|
||||
* @return device set of devices which this stream is connected to.
|
||||
*/
|
||||
getDevices() generates (Result retval, vec<DeviceAddress> devices);
|
||||
|
||||
/**
|
||||
* Connects the stream to one or multiple devices.
|
||||
*
|
||||
* This method must only be used for HALs that do not support
|
||||
* 'IDevice.createAudioPatch' method. Calling this method is
|
||||
* equivalent to setting AUDIO_PARAMETER_STREAM_ROUTING preceded
|
||||
* with a device address in the legacy HAL interface.
|
||||
*
|
||||
* @param address device to connect the stream to.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setDevices(vec<DeviceAddress> devices) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Sets the HW synchronization source. Calling this method is equivalent to
|
||||
* setting AUDIO_PARAMETER_STREAM_HW_AV_SYNC on the legacy HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @param hwAvSync HW synchronization source
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setHwAvSync(AudioHwSync hwAvSync) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Generic method for retrieving vendor-specific parameter values.
|
||||
* The framework does not interpret the parameters, they are passed
|
||||
* in an opaque manner between a vendor application and HAL.
|
||||
*
|
||||
* Multiple parameters can be retrieved at the same time.
|
||||
* The implementation should return as many requested parameters
|
||||
* as possible, even if one or more is not supported
|
||||
*
|
||||
* @param context provides more information about the request
|
||||
* @param keys keys of the requested parameters
|
||||
* @return retval operation completion status.
|
||||
* OK must be returned if keys is empty.
|
||||
* NOT_SUPPORTED must be returned if at least one key is unknown.
|
||||
* @return parameters parameter key value pairs.
|
||||
* Must contain the value of all requested keys if retval == OK
|
||||
*/
|
||||
getParameters(vec<ParameterValue> context, vec<string> keys)
|
||||
generates (Result retval, vec<ParameterValue> parameters);
|
||||
|
||||
/**
|
||||
* Generic method for setting vendor-specific parameter values.
|
||||
* The framework does not interpret the parameters, they are passed
|
||||
* in an opaque manner between a vendor application and HAL.
|
||||
*
|
||||
* Multiple parameters can be set at the same time though this is
|
||||
* discouraged as it make failure analysis harder.
|
||||
*
|
||||
* If possible, a failed setParameters should not impact the platform state.
|
||||
*
|
||||
* @param context provides more information about the request
|
||||
* @param parameters parameter key value pairs.
|
||||
* @return retval operation completion status.
|
||||
* All parameters must be successfully set for OK to be returned
|
||||
*/
|
||||
setParameters(vec<ParameterValue> context, vec<ParameterValue> parameters)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Called by the framework to start a stream operating in mmap mode.
|
||||
* createMmapBuffer() must be called before calling start().
|
||||
* Function only implemented by streams operating in mmap mode.
|
||||
*
|
||||
* @return retval OK in case the success.
|
||||
* NOT_SUPPORTED on non mmap mode streams
|
||||
* INVALID_STATE if called out of sequence
|
||||
*/
|
||||
start() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Called by the framework to stop a stream operating in mmap mode.
|
||||
* Function only implemented by streams operating in mmap mode.
|
||||
*
|
||||
* @return retval OK in case the success.
|
||||
* NOT_SUPPORTED on non mmap mode streams
|
||||
* INVALID_STATE if called out of sequence
|
||||
*/
|
||||
stop() generates (Result retval) ;
|
||||
|
||||
/**
|
||||
* Called by the framework to retrieve information on the mmap buffer used for audio
|
||||
* samples transfer.
|
||||
* Function only implemented by streams operating in mmap mode.
|
||||
*
|
||||
* @param minSizeFrames minimum buffer size requested. The actual buffer
|
||||
* size returned in struct MmapBufferInfo can be larger.
|
||||
* The size must be a positive value.
|
||||
* @return retval OK in case the success.
|
||||
* NOT_SUPPORTED on non mmap mode streams
|
||||
* NOT_INITIALIZED in case of memory allocation error
|
||||
* INVALID_ARGUMENTS if the requested buffer size is invalid
|
||||
* INVALID_STATE if called out of sequence
|
||||
* @return info a MmapBufferInfo struct containing information on the MMMAP buffer created.
|
||||
*/
|
||||
createMmapBuffer(int32_t minSizeFrames)
|
||||
generates (Result retval, MmapBufferInfo info);
|
||||
|
||||
/**
|
||||
* Called by the framework to read current read/write position in the mmap buffer
|
||||
* with associated time stamp.
|
||||
* Function only implemented by streams operating in mmap mode.
|
||||
*
|
||||
* @return retval OK in case the success.
|
||||
* NOT_SUPPORTED on non mmap mode streams
|
||||
* INVALID_STATE if called out of sequence
|
||||
* @return position a MmapPosition struct containing current HW read/write position in frames
|
||||
* with associated time stamp.
|
||||
*/
|
||||
getMmapPosition()
|
||||
generates (Result retval, MmapPosition position);
|
||||
|
||||
/**
|
||||
* Called by the framework to deinitialize the stream and free up
|
||||
* all currently allocated resources. It is recommended to close
|
||||
* the stream on the client side as soon as it is becomes unused.
|
||||
*
|
||||
* The client must ensure that this function is not called while
|
||||
* audio data is being transferred through the stream's message queues.
|
||||
*
|
||||
* @return retval OK in case the success.
|
||||
* NOT_SUPPORTED if called on IStream instead of input or
|
||||
* output stream interface.
|
||||
* INVALID_STATE if the stream was already closed.
|
||||
*/
|
||||
@exit
|
||||
close() generates (Result retval);
|
||||
};
|
199
audio/7.0/IStreamIn.hal
Normal file
199
audio/7.0/IStreamIn.hal
Normal file
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IStream;
|
||||
|
||||
interface IStreamIn extends IStream {
|
||||
/**
|
||||
* Returns the source descriptor of the input stream. Calling this method is
|
||||
* equivalent to getting AUDIO_PARAMETER_STREAM_INPUT_SOURCE on the legacy
|
||||
* HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return source audio source.
|
||||
*/
|
||||
getAudioSource() generates (Result retval, AudioSource source);
|
||||
|
||||
/**
|
||||
* Set the input gain for the audio driver.
|
||||
* Optional method
|
||||
*
|
||||
* @param gain 1.0f is unity, 0.0f is zero.
|
||||
* @result retval operation completion status.
|
||||
*/
|
||||
setGain(float gain) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Commands that can be executed on the driver reader thread.
|
||||
*/
|
||||
enum ReadCommand : int32_t {
|
||||
READ,
|
||||
GET_CAPTURE_POSITION
|
||||
};
|
||||
|
||||
/**
|
||||
* Data structure passed to the driver for executing commands
|
||||
* on the driver reader thread.
|
||||
*/
|
||||
struct ReadParameters {
|
||||
ReadCommand command; // discriminator
|
||||
union Params {
|
||||
uint64_t read; // READ command, amount of bytes to read, >= 0.
|
||||
// No parameters for GET_CAPTURE_POSITION.
|
||||
} params;
|
||||
};
|
||||
|
||||
/**
|
||||
* Data structure passed back to the client via status message queue
|
||||
* of 'read' operation.
|
||||
*
|
||||
* Possible values of 'retval' field:
|
||||
* - OK, read operation was successful;
|
||||
* - INVALID_ARGUMENTS, stream was not configured properly;
|
||||
* - INVALID_STATE, stream is in a state that doesn't allow reads.
|
||||
*/
|
||||
struct ReadStatus {
|
||||
Result retval;
|
||||
ReadCommand replyTo; // discriminator
|
||||
union Reply {
|
||||
uint64_t read; // READ command, amount of bytes read, >= 0.
|
||||
struct CapturePosition { // same as generated by getCapturePosition.
|
||||
uint64_t frames;
|
||||
uint64_t time;
|
||||
} capturePosition;
|
||||
} reply;
|
||||
};
|
||||
|
||||
/**
|
||||
* Called when the metadata of the stream's sink has been changed.
|
||||
* @param sinkMetadata Description of the audio that is suggested by the clients.
|
||||
*/
|
||||
updateSinkMetadata(SinkMetadata sinkMetadata);
|
||||
|
||||
/**
|
||||
* Set up required transports for receiving audio buffers from the driver.
|
||||
*
|
||||
* The transport consists of three message queues:
|
||||
* -- command queue is used to instruct the reader thread what operation
|
||||
* to perform;
|
||||
* -- data queue is used for passing audio data from the driver
|
||||
* to the client;
|
||||
* -- status queue is used for reporting operation status
|
||||
* (e.g. amount of bytes actually read or error code).
|
||||
*
|
||||
* The driver operates on a dedicated thread. The client must ensure that
|
||||
* the thread is given an appropriate priority and assigned to correct
|
||||
* scheduler and cgroup. For this purpose, the method returns identifiers
|
||||
* of the driver thread.
|
||||
*
|
||||
* @param frameSize the size of a single frame, in bytes.
|
||||
* @param framesCount the number of frames in a buffer.
|
||||
* @param threadPriority priority of the driver thread.
|
||||
* @return retval OK if both message queues were created successfully.
|
||||
* INVALID_STATE if the method was already called.
|
||||
* INVALID_ARGUMENTS if there was a problem setting up
|
||||
* the queues.
|
||||
* @return commandMQ a message queue used for passing commands.
|
||||
* @return dataMQ a message queue used for passing audio data in the format
|
||||
* specified at the stream opening.
|
||||
* @return statusMQ a message queue used for passing status from the driver
|
||||
* using ReadStatus structures.
|
||||
* @return threadInfo identifiers of the driver's dedicated thread.
|
||||
*/
|
||||
prepareForReading(uint32_t frameSize, uint32_t framesCount)
|
||||
generates (
|
||||
Result retval,
|
||||
fmq_sync<ReadParameters> commandMQ,
|
||||
fmq_sync<uint8_t> dataMQ,
|
||||
fmq_sync<ReadStatus> statusMQ,
|
||||
ThreadInfo threadInfo);
|
||||
|
||||
/**
|
||||
* Return the amount of input frames lost in the audio driver since the last
|
||||
* call of this function.
|
||||
*
|
||||
* Audio driver is expected to reset the value to 0 and restart counting
|
||||
* upon returning the current value by this function call. Such loss
|
||||
* typically occurs when the user space process is blocked longer than the
|
||||
* capacity of audio driver buffers.
|
||||
*
|
||||
* @return framesLost the number of input audio frames lost.
|
||||
*/
|
||||
getInputFramesLost() generates (uint32_t framesLost);
|
||||
|
||||
/**
|
||||
* Return a recent count of the number of audio frames received and the
|
||||
* clock time associated with that frame count.
|
||||
*
|
||||
* @return retval INVALID_STATE if the device is not ready/available,
|
||||
* NOT_SUPPORTED if the command is not supported,
|
||||
* OK otherwise.
|
||||
* @return frames the total frame count received. This must be as early in
|
||||
* the capture pipeline as possible. In general, frames
|
||||
* must be non-negative and must not go "backwards".
|
||||
* @return time is the clock monotonic time when frames was measured. In
|
||||
* general, time must be a positive quantity and must not
|
||||
* go "backwards".
|
||||
*/
|
||||
getCapturePosition()
|
||||
generates (Result retval, uint64_t frames, uint64_t time);
|
||||
|
||||
/**
|
||||
* Returns an array with active microphones in the stream.
|
||||
*
|
||||
* @return retval INVALID_STATE if the call is not successful,
|
||||
* OK otherwise.
|
||||
*
|
||||
* @return microphones array with microphones info
|
||||
*/
|
||||
getActiveMicrophones()
|
||||
generates(Result retval, vec<MicrophoneInfo> microphones);
|
||||
|
||||
/**
|
||||
* Specifies the logical microphone (for processing).
|
||||
*
|
||||
* If the feature is not supported an error should be returned
|
||||
* If multiple microphones are present, this should be treated as a preference
|
||||
* for their combined direction.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* @param Direction constant
|
||||
* @return retval OK if the call is successful, an error code otherwise.
|
||||
*/
|
||||
setMicrophoneDirection(MicrophoneDirection direction)
|
||||
generates(Result retval);
|
||||
|
||||
/**
|
||||
* Specifies the zoom factor for the selected microphone (for processing).
|
||||
*
|
||||
* If the feature is not supported an error should be returned
|
||||
* If multiple microphones are present, this should be treated as a preference
|
||||
* for their combined field dimension.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* @param the desired field dimension of microphone capture. Range is from -1 (wide angle),
|
||||
* though 0 (no zoom) to 1 (maximum zoom).
|
||||
*
|
||||
* @return retval OK if the call is not successful, an error code otherwise.
|
||||
*/
|
||||
setMicrophoneFieldDimension(float zoom) generates(Result retval);
|
||||
};
|
378
audio/7.0/IStreamOut.hal
Normal file
378
audio/7.0/IStreamOut.hal
Normal file
|
@ -0,0 +1,378 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IStream;
|
||||
import IStreamOutCallback;
|
||||
import IStreamOutEventCallback;
|
||||
|
||||
interface IStreamOut extends IStream {
|
||||
/**
|
||||
* Return the audio hardware driver estimated latency in milliseconds.
|
||||
*
|
||||
* @return latencyMs latency in milliseconds.
|
||||
*/
|
||||
getLatency() generates (uint32_t latencyMs);
|
||||
|
||||
/**
|
||||
* This method is used in situations where audio mixing is done in the
|
||||
* hardware. This method serves as a direct interface with hardware,
|
||||
* allowing to directly set the volume as apposed to via the framework.
|
||||
* This method might produce multiple PCM outputs or hardware accelerated
|
||||
* codecs, such as MP3 or AAC.
|
||||
* Optional method
|
||||
*
|
||||
* @param left left channel attenuation, 1.0f is unity, 0.0f is zero.
|
||||
* @param right right channel attenuation, 1.0f is unity, 0.0f is zero.
|
||||
* @return retval operation completion status.
|
||||
* If a volume is outside [0,1], return INVALID_ARGUMENTS
|
||||
*/
|
||||
setVolume(float left, float right) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Commands that can be executed on the driver writer thread.
|
||||
*/
|
||||
enum WriteCommand : int32_t {
|
||||
WRITE,
|
||||
GET_PRESENTATION_POSITION,
|
||||
GET_LATENCY
|
||||
};
|
||||
|
||||
/**
|
||||
* Data structure passed back to the client via status message queue
|
||||
* of 'write' operation.
|
||||
*
|
||||
* Possible values of 'retval' field:
|
||||
* - OK, write operation was successful;
|
||||
* - INVALID_ARGUMENTS, stream was not configured properly;
|
||||
* - INVALID_STATE, stream is in a state that doesn't allow writes;
|
||||
* - INVALID_OPERATION, retrieving presentation position isn't supported.
|
||||
*/
|
||||
struct WriteStatus {
|
||||
Result retval;
|
||||
WriteCommand replyTo; // discriminator
|
||||
union Reply {
|
||||
uint64_t written; // WRITE command, amount of bytes written, >= 0.
|
||||
struct PresentationPosition { // same as generated by
|
||||
uint64_t frames; // getPresentationPosition.
|
||||
TimeSpec timeStamp;
|
||||
} presentationPosition;
|
||||
uint32_t latencyMs; // Same as generated by getLatency.
|
||||
} reply;
|
||||
};
|
||||
|
||||
/**
|
||||
* Called when the metadata of the stream's source has been changed.
|
||||
* @param sourceMetadata Description of the audio that is played by the clients.
|
||||
*/
|
||||
updateSourceMetadata(SourceMetadata sourceMetadata);
|
||||
|
||||
/**
|
||||
* Set up required transports for passing audio buffers to the driver.
|
||||
*
|
||||
* The transport consists of three message queues:
|
||||
* -- command queue is used to instruct the writer thread what operation
|
||||
* to perform;
|
||||
* -- data queue is used for passing audio data from the client
|
||||
* to the driver;
|
||||
* -- status queue is used for reporting operation status
|
||||
* (e.g. amount of bytes actually written or error code).
|
||||
*
|
||||
* The driver operates on a dedicated thread. The client must ensure that
|
||||
* the thread is given an appropriate priority and assigned to correct
|
||||
* scheduler and cgroup. For this purpose, the method returns identifiers
|
||||
* of the driver thread.
|
||||
*
|
||||
* @param frameSize the size of a single frame, in bytes.
|
||||
* @param framesCount the number of frames in a buffer.
|
||||
* @return retval OK if both message queues were created successfully.
|
||||
* INVALID_STATE if the method was already called.
|
||||
* INVALID_ARGUMENTS if there was a problem setting up
|
||||
* the queues.
|
||||
* @return commandMQ a message queue used for passing commands.
|
||||
* @return dataMQ a message queue used for passing audio data in the format
|
||||
* specified at the stream opening.
|
||||
* @return statusMQ a message queue used for passing status from the driver
|
||||
* using WriteStatus structures.
|
||||
* @return threadInfo identifiers of the driver's dedicated thread.
|
||||
*/
|
||||
prepareForWriting(uint32_t frameSize, uint32_t framesCount)
|
||||
generates (
|
||||
Result retval,
|
||||
fmq_sync<WriteCommand> commandMQ,
|
||||
fmq_sync<uint8_t> dataMQ,
|
||||
fmq_sync<WriteStatus> statusMQ,
|
||||
ThreadInfo threadInfo);
|
||||
|
||||
/**
|
||||
* Return the number of audio frames written by the audio DSP to DAC since
|
||||
* the output has exited standby.
|
||||
* Optional method
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return dspFrames number of audio frames written.
|
||||
*/
|
||||
getRenderPosition() generates (Result retval, uint32_t dspFrames);
|
||||
|
||||
/**
|
||||
* Get the local time at which the next write to the audio driver will be
|
||||
* presented. The units are microseconds, where the epoch is decided by the
|
||||
* local audio HAL.
|
||||
* Optional method
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return timestampUs time of the next write.
|
||||
*/
|
||||
getNextWriteTimestamp() generates (Result retval, int64_t timestampUs);
|
||||
|
||||
/**
|
||||
* Set the callback interface for notifying completion of non-blocking
|
||||
* write and drain.
|
||||
*
|
||||
* Calling this function implies that all future 'write' and 'drain'
|
||||
* must be non-blocking and use the callback to signal completion.
|
||||
*
|
||||
* 'clearCallback' method needs to be called in order to release the local
|
||||
* callback proxy on the server side and thus dereference the callback
|
||||
* implementation on the client side.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setCallback(IStreamOutCallback callback) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Clears the callback previously set via 'setCallback' method.
|
||||
*
|
||||
* Warning: failure to call this method results in callback implementation
|
||||
* on the client side being held until the HAL server termination.
|
||||
*
|
||||
* If no callback was previously set, the method should be a no-op
|
||||
* and return OK.
|
||||
*
|
||||
* @return retval operation completion status: OK or NOT_SUPPORTED.
|
||||
*/
|
||||
clearCallback() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Set the callback interface for notifying about an output stream event.
|
||||
*
|
||||
* Calling this method with a null pointer will result in releasing
|
||||
* the local callback proxy on the server side and thus dereference
|
||||
* the callback implementation on the client side.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setEventCallback(IStreamOutEventCallback callback)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns whether HAL supports pausing and resuming of streams.
|
||||
*
|
||||
* @return supportsPause true if pausing is supported.
|
||||
* @return supportsResume true if resume is supported.
|
||||
*/
|
||||
supportsPauseAndResume()
|
||||
generates (bool supportsPause, bool supportsResume);
|
||||
|
||||
/**
|
||||
* Notifies to the audio driver to stop playback however the queued buffers
|
||||
* are retained by the hardware. Useful for implementing pause/resume. Empty
|
||||
* implementation if not supported however must be implemented for hardware
|
||||
* with non-trivial latency. In the pause state, some audio hardware may
|
||||
* still be using power. Client code may consider calling 'suspend' after a
|
||||
* timeout to prevent that excess power usage.
|
||||
*
|
||||
* Implementation of this function is mandatory for offloaded playback.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
pause() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Notifies to the audio driver to resume playback following a pause.
|
||||
* Returns error INVALID_STATE if called without matching pause.
|
||||
*
|
||||
* Implementation of this function is mandatory for offloaded playback.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
resume() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns whether HAL supports draining of streams.
|
||||
*
|
||||
* @return supports true if draining is supported.
|
||||
*/
|
||||
supportsDrain() generates (bool supports);
|
||||
|
||||
/**
|
||||
* Requests notification when data buffered by the driver/hardware has been
|
||||
* played. If 'setCallback' has previously been called to enable
|
||||
* non-blocking mode, then 'drain' must not block, instead it must return
|
||||
* quickly and completion of the drain is notified through the callback. If
|
||||
* 'setCallback' has not been called, then 'drain' must block until
|
||||
* completion.
|
||||
*
|
||||
* If 'type' is 'ALL', the drain completes when all previously written data
|
||||
* has been played.
|
||||
*
|
||||
* If 'type' is 'EARLY_NOTIFY', the drain completes shortly before all data
|
||||
* for the current track has played to allow time for the framework to
|
||||
* perform a gapless track switch.
|
||||
*
|
||||
* Drain must return immediately on 'stop' and 'flush' calls.
|
||||
*
|
||||
* Implementation of this function is mandatory for offloaded playback.
|
||||
*
|
||||
* @param type type of drain.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
drain(AudioDrain type) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Notifies to the audio driver to flush the queued data. Stream must
|
||||
* already be paused before calling 'flush'.
|
||||
* Optional method
|
||||
*
|
||||
* Implementation of this function is mandatory for offloaded playback.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
flush() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Return a recent count of the number of audio frames presented to an
|
||||
* external observer. This excludes frames which have been written but are
|
||||
* still in the pipeline. The count is not reset to zero when output enters
|
||||
* standby. Also returns the value of CLOCK_MONOTONIC as of this
|
||||
* presentation count. The returned count is expected to be 'recent', but
|
||||
* does not need to be the most recent possible value. However, the
|
||||
* associated time must correspond to whatever count is returned.
|
||||
*
|
||||
* Example: assume that N+M frames have been presented, where M is a 'small'
|
||||
* number. Then it is permissible to return N instead of N+M, and the
|
||||
* timestamp must correspond to N rather than N+M. The terms 'recent' and
|
||||
* 'small' are not defined. They reflect the quality of the implementation.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return frames count of presented audio frames.
|
||||
* @return timeStamp associated clock time.
|
||||
*/
|
||||
getPresentationPosition()
|
||||
generates (Result retval, uint64_t frames, TimeSpec timeStamp);
|
||||
|
||||
/**
|
||||
* Selects a presentation for decoding from a next generation media stream
|
||||
* (as defined per ETSI TS 103 190-2) and a program within the presentation.
|
||||
* Optional method
|
||||
*
|
||||
* @param presentationId selected audio presentation.
|
||||
* @param programId refinement for the presentation.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
selectPresentation(int32_t presentationId, int32_t programId)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns the Dual Mono mode presentation setting.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return mode current setting of Dual Mono mode.
|
||||
*/
|
||||
getDualMonoMode() generates (Result retval, DualMonoMode mode);
|
||||
|
||||
/**
|
||||
* Sets the Dual Mono mode presentation on the output device.
|
||||
*
|
||||
* The Dual Mono mode is generally applied to stereo audio streams
|
||||
* where the left and right channels come from separate sources.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* @param mode selected Dual Mono mode.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setDualMonoMode(DualMonoMode mode) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns the Audio Description Mix level in dB.
|
||||
*
|
||||
* The level is applied to streams incorporating a secondary Audio
|
||||
* Description stream. It specifies the relative level of mixing for
|
||||
* the Audio Description with a reference to the Main Audio.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* The value of the relative level is in the range from negative infinity
|
||||
* to +48.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return leveldB the current Audio Description Mix Level in dB.
|
||||
*/
|
||||
getAudioDescriptionMixLevel() generates (Result retval, float leveldB);
|
||||
|
||||
/**
|
||||
* Sets the Audio Description Mix level in dB.
|
||||
*
|
||||
* For streams incorporating a secondary Audio Description stream
|
||||
* the relative level of mixing of the Audio Description to the Main Audio
|
||||
* is controlled by this method.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* The value of the relative level must be in the range from negative
|
||||
* infinity to +48.
|
||||
*
|
||||
* @param leveldB Audio Description Mix Level in dB
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setAudioDescriptionMixLevel(float leveldB) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Retrieves current playback rate parameters.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return playbackRate current playback parameters
|
||||
*/
|
||||
getPlaybackRateParameters()
|
||||
generates (Result retval, PlaybackRate playbackRate);
|
||||
|
||||
/**
|
||||
* Sets the playback rate parameters that control playback behavior.
|
||||
* This is normally used when playing encoded content and decoding
|
||||
* is performed in hardware. Otherwise, the framework can apply
|
||||
* necessary transformations.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* If the HAL supports setting the playback rate, it is recommended
|
||||
* to support speed and pitch values at least in the range
|
||||
* from 0.5f to 2.0f, inclusive (see the definition of PlaybackRate struct).
|
||||
*
|
||||
* @param playbackRate playback parameters
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setPlaybackRateParameters(PlaybackRate playbackRate)
|
||||
generates (Result retval);
|
||||
};
|
37
audio/7.0/IStreamOutCallback.hal
Normal file
37
audio/7.0/IStreamOutCallback.hal
Normal file
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
/**
|
||||
* Asynchronous write callback interface.
|
||||
*/
|
||||
interface IStreamOutCallback {
|
||||
/**
|
||||
* Non blocking write completed.
|
||||
*/
|
||||
oneway onWriteReady();
|
||||
|
||||
/**
|
||||
* Drain completed.
|
||||
*/
|
||||
oneway onDrainReady();
|
||||
|
||||
/**
|
||||
* Stream hit an error.
|
||||
*/
|
||||
oneway onError();
|
||||
};
|
140
audio/7.0/IStreamOutEventCallback.hal
Normal file
140
audio/7.0/IStreamOutEventCallback.hal
Normal file
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
/**
|
||||
* Asynchronous stream out event callback interface. The interface provides
|
||||
* a way for the HAL to notify platform when there are changes, e.g. codec
|
||||
* format change, from the lower layer.
|
||||
*/
|
||||
interface IStreamOutEventCallback {
|
||||
/**
|
||||
* Codec format changed.
|
||||
*
|
||||
* onCodecFormatChanged returns an AudioMetadata object in read-only ByteString format.
|
||||
* It represents the most recent codec format decoded by a HW audio decoder.
|
||||
*
|
||||
* Codec format is an optional message from HW audio decoders. It serves to
|
||||
* notify the application about the codec format and audio objects contained
|
||||
* within the compressed audio stream for control, informational,
|
||||
* and display purposes.
|
||||
*
|
||||
* audioMetadata ByteString is convertible to an AudioMetadata object through
|
||||
* both a C++ and a C API present in Metadata.h [1], or through a Java API present
|
||||
* in AudioMetadata.java [2].
|
||||
*
|
||||
* The ByteString format is a stable format used for parcelling (marshalling) across
|
||||
* JNI, AIDL, and HIDL interfaces. The test for R compatibility for native marshalling
|
||||
* is TEST(metadata_tests, compatibility_R) [3]. The test for R compatibility for JNI
|
||||
* marshalling is android.media.cts.AudioMetadataTest#testCompatibilityR [4].
|
||||
*
|
||||
* R (audio HAL 7.0) defined keys are as follows [2]:
|
||||
* "bitrate", int32
|
||||
* "channel-mask", int32
|
||||
* "mime", string
|
||||
* "sample-rate", int32
|
||||
* "bit-width", int32
|
||||
* "has-atmos", int32
|
||||
* "audio-encoding", int32
|
||||
*
|
||||
* Parceling Format:
|
||||
* All values are native endian order. [1]
|
||||
*
|
||||
* using type_size_t = uint32_t;
|
||||
* using index_size_t = uint32_t;
|
||||
* using datum_size_t = uint32_t;
|
||||
*
|
||||
* Permitted type indexes are
|
||||
* TYPE_NONE = 0, // Reserved
|
||||
* TYPE_INT32 = 1,
|
||||
* TYPE_INT64 = 2,
|
||||
* TYPE_FLOAT = 3,
|
||||
* TYPE_DOUBLE = 4,
|
||||
* TYPE_STRING = 5,
|
||||
* TYPE_DATA = 6, // A data table of <String, Datum>
|
||||
*
|
||||
* Datum = {
|
||||
* (type_size_t) Type (the type index from type_as_value<T>.)
|
||||
* (datum_size_t) Size (size of the Payload)
|
||||
* (byte string) Payload<Type>
|
||||
* }
|
||||
*
|
||||
* The data is specified in native endian order.
|
||||
* Since the size of the Payload is always present, unknown types may be skipped.
|
||||
*
|
||||
* Payload<Fixed-size Primitive_Value>
|
||||
* [ sizeof(Primitive_Value) in raw bytes ]
|
||||
*
|
||||
* Example of Payload<Int32> of 123:
|
||||
* Payload<Int32>
|
||||
* [ value of 123 ] = 0x7b 0x00 0x00 0x00 123
|
||||
*
|
||||
* Payload<String>
|
||||
* [ (index_size_t) length, not including zero terminator.]
|
||||
* [ (length) raw bytes ]
|
||||
*
|
||||
* Example of Payload<String> of std::string("hi"):
|
||||
* [ (index_size_t) length ] = 0x02 0x00 0x00 0x00 2 strlen("hi")
|
||||
* [ raw bytes "hi" ] = 0x68 0x69 "hi"
|
||||
*
|
||||
* Payload<Data>
|
||||
* [ (index_size_t) entries ]
|
||||
* [ raw bytes (entry 1) Key (Payload<String>)
|
||||
* Value (Datum)
|
||||
* ... (until #entries) ]
|
||||
*
|
||||
* Example of Payload<Data> of {{"hello", "world"},
|
||||
* {"value", (int32_t)1000}};
|
||||
* [ (index_size_t) #entries ] = 0x02 0x00 0x00 0x00 2 entries
|
||||
* Key (Payload<String>)
|
||||
* [ index_size_t length ] = 0x05 0x00 0x00 0x00 5 strlen("hello")
|
||||
* [ raw bytes "hello" ] = 0x68 0x65 0x6c 0x6c 0x6f "hello"
|
||||
* Value (Datum)
|
||||
* [ (type_size_t) type ] = 0x05 0x00 0x00 0x00 5 (TYPE_STRING)
|
||||
* [ (datum_size_t) size ] = 0x09 0x00 0x00 0x00 sizeof(index_size_t) +
|
||||
* strlen("world")
|
||||
* Payload<String>
|
||||
* [ (index_size_t) length ] = 0x05 0x00 0x00 0x00 5 strlen("world")
|
||||
* [ raw bytes "world" ] = 0x77 0x6f 0x72 0x6c 0x64 "world"
|
||||
* Key (Payload<String>)
|
||||
* [ index_size_t length ] = 0x05 0x00 0x00 0x00 5 strlen("value")
|
||||
* [ raw bytes "value" ] = 0x76 0x61 0x6c 0x75 0x65 "value"
|
||||
* Value (Datum)
|
||||
* [ (type_size_t) type ] = 0x01 0x00 0x00 0x00 1 (TYPE_INT32)
|
||||
* [ (datum_size_t) size ] = 0x04 0x00 0x00 0x00 4 sizeof(int32_t)
|
||||
* Payload<Int32>
|
||||
* [ raw bytes 1000 ] = 0xe8 0x03 0x00 0x00 1000
|
||||
*
|
||||
* The contents of audioMetadata is a Payload<Data>.
|
||||
* An implementation dependent detail is that the Keys are always
|
||||
* stored sorted, so the byte string representation generated is unique.
|
||||
*
|
||||
* Vendor keys are allowed for informational and debugging purposes.
|
||||
* Vendor keys should consist of the vendor company name followed
|
||||
* by a dot; for example, "vendorCompany.someVolume" [2].
|
||||
*
|
||||
* [1] system/media/audio_utils/include/audio_utils/Metadata.h
|
||||
* [2] frameworks/base/media/java/android/media/AudioMetadata.java
|
||||
* [3] system/media/audio_utils/tests/metadata_tests.cpp
|
||||
* [4] cts/tests/tests/media/src/android/media/cts/AudioMetadataTest.java
|
||||
*
|
||||
* @param audioMetadata is a buffer containing decoded format changes
|
||||
* reported by codec. The buffer contains data that can be transformed
|
||||
* to audio metadata, which is a C++ object based map.
|
||||
*/
|
||||
oneway onCodecFormatChanged(vec<uint8_t> audioMetadata);
|
||||
};
|
5
audio/7.0/config/Android.bp
Normal file
5
audio/7.0/config/Android.bp
Normal file
|
@ -0,0 +1,5 @@
|
|||
xsd_config {
|
||||
name: "audio_policy_configuration_V7_0",
|
||||
srcs: ["audio_policy_configuration.xsd"],
|
||||
package_name: "audio.policy.configuration.V7_0",
|
||||
}
|
435
audio/7.0/config/api/current.txt
Normal file
435
audio/7.0/config/api/current.txt
Normal file
|
@ -0,0 +1,435 @@
|
|||
// Signature format: 2.0
|
||||
package audio.policy.configuration.V7_0 {
|
||||
|
||||
public class AttachedDevices {
|
||||
ctor public AttachedDevices();
|
||||
method public java.util.List<java.lang.String> getItem();
|
||||
}
|
||||
|
||||
public enum AudioDevice {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_AMBIENT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_AUX_DIGITAL;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_BACK_MIC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_BLUETOOTH_BLE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_BUILTIN_MIC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_BUS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_COMMUNICATION;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_DEFAULT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_ECHO_REFERENCE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_FM_TUNER;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_HDMI;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_HDMI_ARC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_IP;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_LINE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_LOOPBACK;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_PROXY;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_REMOTE_SUBMIX;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_SPDIF;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_STUB;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_TELEPHONY_RX;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_TV_TUNER;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_USB_ACCESSORY;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_USB_DEVICE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_USB_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_VOICE_CALL;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_WIRED_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_NONE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_AUX_DIGITAL;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_AUX_LINE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_BUS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_DEFAULT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_EARPIECE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_ECHO_CANCELLER;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_FM;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_HDMI;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_HDMI_ARC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_HEARING_AID;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_IP;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_LINE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_PROXY;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_SPDIF;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_SPEAKER;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_SPEAKER_SAFE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_STUB;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_TELEPHONY_TX;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_USB_ACCESSORY;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_USB_DEVICE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_USB_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_OUT_WIRED_HEADSET;
|
||||
}
|
||||
|
||||
public enum AudioFormat {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADIF;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_ELD;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_ERLC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_HE_V1;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_HE_V2;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_LC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_LD;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_LTP;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_MAIN;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_SCALABLE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_SSR;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_XHE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ELD;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_ERLC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_HE_V1;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_HE_V2;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_LATM;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_LATM_HE_V1;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_LATM_HE_V2;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_LATM_LC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_LC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_LD;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_LTP;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_MAIN;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_SCALABLE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_SSR;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AAC_XHE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AC3;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AC4;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_ALAC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AMR_NB;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AMR_WB;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_AMR_WB_PLUS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APTX;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APTX_ADAPTIVE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APTX_HD;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APTX_TWSP;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_CELT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DOLBY_TRUEHD;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DSD;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DTS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DTS_HD;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_EVRC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_EVRCB;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_EVRCNW;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_EVRCWB;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_E_AC3;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_E_AC3_JOC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_FLAC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_HE_AAC_V1;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_HE_AAC_V2;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_IEC61937;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_LDAC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_LHDC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_LHDC_LL;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_MAT_1_0;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_MAT_2_0;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_MAT_2_1;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_MP2;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_MP3;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_OPUS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_PCM_16_BIT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_PCM_24_BIT_PACKED;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_PCM_32_BIT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_PCM_8_24_BIT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_PCM_8_BIT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_PCM_FLOAT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_QCELP;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_SBC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_VORBIS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_WMA;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_WMA_PRO;
|
||||
}
|
||||
|
||||
public class AudioPolicyConfiguration {
|
||||
ctor public AudioPolicyConfiguration();
|
||||
method public audio.policy.configuration.V7_0.GlobalConfiguration getGlobalConfiguration();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Modules> getModules();
|
||||
method public audio.policy.configuration.V7_0.SurroundSound getSurroundSound();
|
||||
method public audio.policy.configuration.V7_0.Version getVersion();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Volumes> getVolumes();
|
||||
method public void setGlobalConfiguration(audio.policy.configuration.V7_0.GlobalConfiguration);
|
||||
method public void setSurroundSound(audio.policy.configuration.V7_0.SurroundSound);
|
||||
method public void setVersion(audio.policy.configuration.V7_0.Version);
|
||||
}
|
||||
|
||||
public enum AudioUsage {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ALARM;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_SONIFICATION;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_GAME;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_MEDIA;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_NOTIFICATION;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_UNKNOWN;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VIRTUAL_SOURCE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VOICE_COMMUNICATION;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
|
||||
}
|
||||
|
||||
public enum DeviceCategory {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.DeviceCategory DEVICE_CATEGORY_EARPIECE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.DeviceCategory DEVICE_CATEGORY_EXT_MEDIA;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.DeviceCategory DEVICE_CATEGORY_HEADSET;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.DeviceCategory DEVICE_CATEGORY_HEARING_AID;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.DeviceCategory DEVICE_CATEGORY_SPEAKER;
|
||||
}
|
||||
|
||||
public class DevicePorts {
|
||||
ctor public DevicePorts();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.DevicePorts.DevicePort> getDevicePort();
|
||||
}
|
||||
|
||||
public static class DevicePorts.DevicePort {
|
||||
ctor public DevicePorts.DevicePort();
|
||||
method public String getAddress();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.AudioFormat> getEncodedFormats();
|
||||
method public audio.policy.configuration.V7_0.Gains getGains();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Profile> getProfile();
|
||||
method public audio.policy.configuration.V7_0.Role getRole();
|
||||
method public String getTagName();
|
||||
method public String getType();
|
||||
method public boolean get_default();
|
||||
method public void setAddress(String);
|
||||
method public void setEncodedFormats(java.util.List<audio.policy.configuration.V7_0.AudioFormat>);
|
||||
method public void setGains(audio.policy.configuration.V7_0.Gains);
|
||||
method public void setRole(audio.policy.configuration.V7_0.Role);
|
||||
method public void setTagName(String);
|
||||
method public void setType(String);
|
||||
method public void set_default(boolean);
|
||||
}
|
||||
|
||||
public enum EngineSuffix {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.EngineSuffix _default;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.EngineSuffix configurable;
|
||||
}
|
||||
|
||||
public enum GainMode {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.GainMode AUDIO_GAIN_MODE_CHANNELS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.GainMode AUDIO_GAIN_MODE_JOINT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.GainMode AUDIO_GAIN_MODE_RAMP;
|
||||
}
|
||||
|
||||
public class Gains {
|
||||
ctor public Gains();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Gains.Gain> getGain();
|
||||
}
|
||||
|
||||
public static class Gains.Gain {
|
||||
ctor public Gains.Gain();
|
||||
method public String getChannel_mask();
|
||||
method public int getDefaultValueMB();
|
||||
method public int getMaxRampMs();
|
||||
method public int getMaxValueMB();
|
||||
method public int getMinRampMs();
|
||||
method public int getMinValueMB();
|
||||
method public audio.policy.configuration.V7_0.GainMode getMode();
|
||||
method public String getName();
|
||||
method public int getStepValueMB();
|
||||
method public boolean getUseForVolume();
|
||||
method public void setChannel_mask(String);
|
||||
method public void setDefaultValueMB(int);
|
||||
method public void setMaxRampMs(int);
|
||||
method public void setMaxValueMB(int);
|
||||
method public void setMinRampMs(int);
|
||||
method public void setMinValueMB(int);
|
||||
method public void setMode(audio.policy.configuration.V7_0.GainMode);
|
||||
method public void setName(String);
|
||||
method public void setStepValueMB(int);
|
||||
method public void setUseForVolume(boolean);
|
||||
}
|
||||
|
||||
public class GlobalConfiguration {
|
||||
ctor public GlobalConfiguration();
|
||||
method public boolean getCall_screen_mode_supported();
|
||||
method public audio.policy.configuration.V7_0.EngineSuffix getEngine_library();
|
||||
method public boolean getSpeaker_drc_enabled();
|
||||
method public void setCall_screen_mode_supported(boolean);
|
||||
method public void setEngine_library(audio.policy.configuration.V7_0.EngineSuffix);
|
||||
method public void setSpeaker_drc_enabled(boolean);
|
||||
}
|
||||
|
||||
public enum HalVersion {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.HalVersion _2_0;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.HalVersion _3_0;
|
||||
}
|
||||
|
||||
public class MixPorts {
|
||||
ctor public MixPorts();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.MixPorts.MixPort> getMixPort();
|
||||
}
|
||||
|
||||
public static class MixPorts.MixPort {
|
||||
ctor public MixPorts.MixPort();
|
||||
method public String getFlags();
|
||||
method public audio.policy.configuration.V7_0.Gains getGains();
|
||||
method public long getMaxActiveCount();
|
||||
method public long getMaxOpenCount();
|
||||
method public String getName();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.AudioUsage> getPreferredUsage();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Profile> getProfile();
|
||||
method public audio.policy.configuration.V7_0.Role getRole();
|
||||
method public void setFlags(String);
|
||||
method public void setGains(audio.policy.configuration.V7_0.Gains);
|
||||
method public void setMaxActiveCount(long);
|
||||
method public void setMaxOpenCount(long);
|
||||
method public void setName(String);
|
||||
method public void setPreferredUsage(java.util.List<audio.policy.configuration.V7_0.AudioUsage>);
|
||||
method public void setRole(audio.policy.configuration.V7_0.Role);
|
||||
}
|
||||
|
||||
public enum MixType {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.MixType mix;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.MixType mux;
|
||||
}
|
||||
|
||||
public class Modules {
|
||||
ctor public Modules();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Modules.Module> getModule();
|
||||
}
|
||||
|
||||
public static class Modules.Module {
|
||||
ctor public Modules.Module();
|
||||
method public audio.policy.configuration.V7_0.AttachedDevices getAttachedDevices();
|
||||
method public String getDefaultOutputDevice();
|
||||
method public audio.policy.configuration.V7_0.DevicePorts getDevicePorts();
|
||||
method public audio.policy.configuration.V7_0.HalVersion getHalVersion();
|
||||
method public audio.policy.configuration.V7_0.MixPorts getMixPorts();
|
||||
method public String getName();
|
||||
method public audio.policy.configuration.V7_0.Routes getRoutes();
|
||||
method public void setAttachedDevices(audio.policy.configuration.V7_0.AttachedDevices);
|
||||
method public void setDefaultOutputDevice(String);
|
||||
method public void setDevicePorts(audio.policy.configuration.V7_0.DevicePorts);
|
||||
method public void setHalVersion(audio.policy.configuration.V7_0.HalVersion);
|
||||
method public void setMixPorts(audio.policy.configuration.V7_0.MixPorts);
|
||||
method public void setName(String);
|
||||
method public void setRoutes(audio.policy.configuration.V7_0.Routes);
|
||||
}
|
||||
|
||||
public class Profile {
|
||||
ctor public Profile();
|
||||
method public String getChannelMasks();
|
||||
method public String getFormat();
|
||||
method public String getName();
|
||||
method public String getSamplingRates();
|
||||
method public void setChannelMasks(String);
|
||||
method public void setFormat(String);
|
||||
method public void setName(String);
|
||||
method public void setSamplingRates(String);
|
||||
}
|
||||
|
||||
public class Reference {
|
||||
ctor public Reference();
|
||||
method public String getName();
|
||||
method public java.util.List<java.lang.String> getPoint();
|
||||
method public void setName(String);
|
||||
}
|
||||
|
||||
public enum Role {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Role sink;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Role source;
|
||||
}
|
||||
|
||||
public class Routes {
|
||||
ctor public Routes();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Routes.Route> getRoute();
|
||||
}
|
||||
|
||||
public static class Routes.Route {
|
||||
ctor public Routes.Route();
|
||||
method public String getSink();
|
||||
method public String getSources();
|
||||
method public audio.policy.configuration.V7_0.MixType getType();
|
||||
method public void setSink(String);
|
||||
method public void setSources(String);
|
||||
method public void setType(audio.policy.configuration.V7_0.MixType);
|
||||
}
|
||||
|
||||
public enum Stream {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ACCESSIBILITY;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ALARM;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ASSISTANT;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_BLUETOOTH_SCO;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_DTMF;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ENFORCED_AUDIBLE;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_MUSIC;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_NOTIFICATION;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_PATCH;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_REROUTING;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_RING;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_SYSTEM;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_TTS;
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_VOICE_CALL;
|
||||
}
|
||||
|
||||
public class SurroundFormats {
|
||||
ctor public SurroundFormats();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.SurroundFormats.Format> getFormat();
|
||||
}
|
||||
|
||||
public static class SurroundFormats.Format {
|
||||
ctor public SurroundFormats.Format();
|
||||
method public audio.policy.configuration.V7_0.AudioFormat getName();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.AudioFormat> getSubformats();
|
||||
method public void setName(audio.policy.configuration.V7_0.AudioFormat);
|
||||
method public void setSubformats(java.util.List<audio.policy.configuration.V7_0.AudioFormat>);
|
||||
}
|
||||
|
||||
public class SurroundSound {
|
||||
ctor public SurroundSound();
|
||||
method public audio.policy.configuration.V7_0.SurroundFormats getFormats();
|
||||
method public void setFormats(audio.policy.configuration.V7_0.SurroundFormats);
|
||||
}
|
||||
|
||||
public enum Version {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.policy.configuration.V7_0.Version _1_0;
|
||||
}
|
||||
|
||||
public class Volume {
|
||||
ctor public Volume();
|
||||
method public audio.policy.configuration.V7_0.DeviceCategory getDeviceCategory();
|
||||
method public java.util.List<java.lang.String> getPoint();
|
||||
method public String getRef();
|
||||
method public audio.policy.configuration.V7_0.Stream getStream();
|
||||
method public void setDeviceCategory(audio.policy.configuration.V7_0.DeviceCategory);
|
||||
method public void setRef(String);
|
||||
method public void setStream(audio.policy.configuration.V7_0.Stream);
|
||||
}
|
||||
|
||||
public class Volumes {
|
||||
ctor public Volumes();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Reference> getReference();
|
||||
method public java.util.List<audio.policy.configuration.V7_0.Volume> getVolume();
|
||||
}
|
||||
|
||||
public class XmlParser {
|
||||
ctor public XmlParser();
|
||||
method public static audio.policy.configuration.V7_0.AudioPolicyConfiguration read(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
|
||||
method public static String readText(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
|
||||
method public static void skip(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
|
||||
}
|
||||
|
||||
}
|
||||
|
0
audio/7.0/config/api/last_current.txt
Normal file
0
audio/7.0/config/api/last_current.txt
Normal file
0
audio/7.0/config/api/last_removed.txt
Normal file
0
audio/7.0/config/api/last_removed.txt
Normal file
1
audio/7.0/config/api/removed.txt
Normal file
1
audio/7.0/config/api/removed.txt
Normal file
|
@ -0,0 +1 @@
|
|||
// Signature format: 2.0
|
634
audio/7.0/config/audio_policy_configuration.xsd
Normal file
634
audio/7.0/config/audio_policy_configuration.xsd
Normal file
|
@ -0,0 +1,634 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!-- Copyright (C) 2020 The Android Open Source Project
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<!-- TODO: define a targetNamespace. Note that it will break retrocompatibility -->
|
||||
<xs:schema version="2.0"
|
||||
elementFormDefault="qualified"
|
||||
attributeFormDefault="unqualified"
|
||||
xmlns:xs="http://www.w3.org/2001/XMLSchema">
|
||||
<!-- List the config versions supported by audio policy. -->
|
||||
<xs:simpleType name="version">
|
||||
<xs:restriction base="xs:decimal">
|
||||
<xs:enumeration value="1.0"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="halVersion">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
Version of the interface the hal implements.
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:restriction base="xs:decimal">
|
||||
<!-- List of HAL versions supported by the framework. -->
|
||||
<xs:enumeration value="2.0"/>
|
||||
<xs:enumeration value="3.0"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:element name="audioPolicyConfiguration">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<xs:element name="globalConfiguration" type="globalConfiguration"/>
|
||||
<xs:element name="modules" type="modules" maxOccurs="unbounded"/>
|
||||
<xs:element name="volumes" type="volumes" maxOccurs="unbounded"/>
|
||||
<xs:element name="surroundSound" type="surroundSound" minOccurs="0" />
|
||||
</xs:sequence>
|
||||
<xs:attribute name="version" type="version"/>
|
||||
</xs:complexType>
|
||||
<xs:key name="moduleNameKey">
|
||||
<xs:selector xpath="modules/module"/>
|
||||
<xs:field xpath="@name"/>
|
||||
</xs:key>
|
||||
<xs:unique name="volumeTargetUniqueness">
|
||||
<xs:selector xpath="volumes/volume"/>
|
||||
<xs:field xpath="@stream"/>
|
||||
<xs:field xpath="@deviceCategory"/>
|
||||
</xs:unique>
|
||||
<xs:key name="volumeCurveNameKey">
|
||||
<xs:selector xpath="volumes/reference"/>
|
||||
<xs:field xpath="@name"/>
|
||||
</xs:key>
|
||||
<xs:keyref name="volumeCurveRef" refer="volumeCurveNameKey">
|
||||
<xs:selector xpath="volumes/volume"/>
|
||||
<xs:field xpath="@ref"/>
|
||||
</xs:keyref>
|
||||
</xs:element>
|
||||
<xs:complexType name="globalConfiguration">
|
||||
<xs:attribute name="speaker_drc_enabled" type="xs:boolean" use="required"/>
|
||||
<xs:attribute name="call_screen_mode_supported" type="xs:boolean" use="optional"/>
|
||||
<xs:attribute name="engine_library" type="engineSuffix" use="optional"/>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="modules">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
There should be one section per audio HW module present on the platform.
|
||||
Each <module/> contains two mandatory tags: “halVersion” and “name”.
|
||||
The module "name" is the same as in previous .conf file.
|
||||
Each module must contain the following sections:
|
||||
- <devicePorts/>: a list of device descriptors for all
|
||||
input and output devices accessible via this module.
|
||||
This contains both permanently attached devices and removable devices.
|
||||
- <mixPorts/>: listing all output and input streams exposed by the audio HAL
|
||||
- <routes/>: list of possible connections between input
|
||||
and output devices or between stream and devices.
|
||||
A <route/> is defined by a set of 3 attributes:
|
||||
-"type": mux|mix means all sources are mutual exclusive (mux) or can be mixed (mix)
|
||||
-"sink": the sink involved in this route
|
||||
-"sources": all the sources than can be connected to the sink via this route
|
||||
- <attachedDevices/>: permanently attached devices.
|
||||
The attachedDevices section is a list of devices names.
|
||||
Their names correspond to device names defined in "devicePorts" section.
|
||||
- <defaultOutputDevice/> is the device to be used when no policy rule applies
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:sequence>
|
||||
<xs:element name="module" maxOccurs="unbounded">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<xs:element name="attachedDevices" type="attachedDevices" minOccurs="0">
|
||||
<xs:unique name="attachedDevicesUniqueness">
|
||||
<xs:selector xpath="item"/>
|
||||
<xs:field xpath="."/>
|
||||
</xs:unique>
|
||||
</xs:element>
|
||||
<xs:element name="defaultOutputDevice" type="xs:token" minOccurs="0"/>
|
||||
<xs:element name="mixPorts" type="mixPorts" minOccurs="0"/>
|
||||
<xs:element name="devicePorts" type="devicePorts" minOccurs="0"/>
|
||||
<xs:element name="routes" type="routes" minOccurs="0"/>
|
||||
</xs:sequence>
|
||||
<xs:attribute name="name" type="xs:string" use="required"/>
|
||||
<xs:attribute name="halVersion" type="halVersion" use="required"/>
|
||||
</xs:complexType>
|
||||
<xs:unique name="mixPortNameUniqueness">
|
||||
<xs:selector xpath="mixPorts/mixPort"/>
|
||||
<xs:field xpath="@name"/>
|
||||
</xs:unique>
|
||||
<xs:key name="devicePortNameKey">
|
||||
<xs:selector xpath="devicePorts/devicePort"/>
|
||||
<xs:field xpath="@tagName"/>
|
||||
</xs:key>
|
||||
<xs:unique name="devicePortUniqueness">
|
||||
<xs:selector xpath="devicePorts/devicePort"/>
|
||||
<xs:field xpath="@type"/>
|
||||
<xs:field xpath="@address"/>
|
||||
</xs:unique>
|
||||
<xs:keyref name="defaultOutputDeviceRef" refer="devicePortNameKey">
|
||||
<xs:selector xpath="defaultOutputDevice"/>
|
||||
<xs:field xpath="."/>
|
||||
</xs:keyref>
|
||||
<xs:keyref name="attachedDeviceRef" refer="devicePortNameKey">
|
||||
<xs:selector xpath="attachedDevices/item"/>
|
||||
<xs:field xpath="."/>
|
||||
</xs:keyref>
|
||||
<!-- The following 3 constraints try to make sure each sink port
|
||||
is reference in one an only one route. -->
|
||||
<xs:key name="routeSinkKey">
|
||||
<!-- predicate [@type='sink'] does not work in xsd 1.0 -->
|
||||
<xs:selector xpath="devicePorts/devicePort|mixPorts/mixPort"/>
|
||||
<xs:field xpath="@tagName|@name"/>
|
||||
</xs:key>
|
||||
<xs:keyref name="routeSinkRef" refer="routeSinkKey">
|
||||
<xs:selector xpath="routes/route"/>
|
||||
<xs:field xpath="@sink"/>
|
||||
</xs:keyref>
|
||||
<xs:unique name="routeUniqueness">
|
||||
<xs:selector xpath="routes/route"/>
|
||||
<xs:field xpath="@sink"/>
|
||||
</xs:unique>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="attachedDevices">
|
||||
<xs:sequence>
|
||||
<xs:element name="item" type="xs:token" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<!-- TODO: separate values by space for better xsd validations. -->
|
||||
<xs:simpleType name="audioInOutFlags">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
"|" separated list of audio_output_flags_t or audio_input_flags_t.
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="|[_A-Z]+(\|[_A-Z]+)*"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="role">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="sink"/>
|
||||
<xs:enumeration value="source"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:complexType name="mixPorts">
|
||||
<xs:sequence>
|
||||
<xs:element name="mixPort" minOccurs="0" maxOccurs="unbounded">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<xs:element name="profile" type="profile" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xs:element name="gains" type="gains" minOccurs="0"/>
|
||||
</xs:sequence>
|
||||
<xs:attribute name="name" type="xs:token" use="required"/>
|
||||
<xs:attribute name="role" type="role" use="required"/>
|
||||
<xs:attribute name="flags" type="audioInOutFlags"/>
|
||||
<xs:attribute name="maxOpenCount" type="xs:unsignedInt"/>
|
||||
<xs:attribute name="maxActiveCount" type="xs:unsignedInt"/>
|
||||
<xs:attribute name="preferredUsage" type="audioUsageList">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
When choosing the mixPort of an audio track, the audioPolicy
|
||||
first considers the mixPorts with a preferredUsage including
|
||||
the track AudioUsage preferred .
|
||||
If non support the track format, the other mixPorts are considered.
|
||||
Eg: a <mixPort preferredUsage="AUDIO_USAGE_MEDIA" /> will receive
|
||||
the audio of all apps playing with a MEDIA usage.
|
||||
It may receive audio from ALARM if there are no audio compatible
|
||||
<mixPort preferredUsage="AUDIO_USAGE_ALARM" />.
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:attribute>
|
||||
</xs:complexType>
|
||||
<xs:unique name="mixPortProfileUniqueness">
|
||||
<xs:selector xpath="profile"/>
|
||||
<xs:field xpath="format"/>
|
||||
<xs:field xpath="samplingRate"/>
|
||||
<xs:field xpath="channelMasks"/>
|
||||
</xs:unique>
|
||||
<xs:unique name="mixPortGainUniqueness">
|
||||
<xs:selector xpath="gains/gain"/>
|
||||
<xs:field xpath="@name"/>
|
||||
</xs:unique>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<!-- Enum values of audio_device_t in audio.h
|
||||
TODO: generate from hidl to avoid manual sync.
|
||||
TODO: separate source and sink in the xml for better xsd validations. -->
|
||||
<xs:simpleType name="audioDevice">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="AUDIO_DEVICE_NONE"/>
|
||||
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_EARPIECE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_SPEAKER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_WIRED_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_WIRED_HEADPHONE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_AUX_DIGITAL"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_HDMI"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_ACCESSORY"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_DEVICE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_REMOTE_SUBMIX"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_TELEPHONY_TX"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_LINE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_HDMI_ARC"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_SPDIF"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_FM"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_AUX_LINE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_SPEAKER_SAFE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_IP"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BUS"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_PROXY"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_HEARING_AID"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_ECHO_CANCELLER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_DEFAULT"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_STUB"/>
|
||||
|
||||
<!-- Due to the xml format, IN types can not be a separated from OUT types -->
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_COMMUNICATION"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_AMBIENT"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BUILTIN_MIC"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_WIRED_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_AUX_DIGITAL"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_HDMI"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_VOICE_CALL"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_TELEPHONY_RX"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BACK_MIC"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_REMOTE_SUBMIX"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_USB_ACCESSORY"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_USB_DEVICE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_FM_TUNER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_TV_TUNER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_LINE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_SPDIF"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_A2DP"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_LOOPBACK"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_IP"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BUS"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_PROXY"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_USB_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_BLE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_HDMI_ARC"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_ECHO_REFERENCE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_DEFAULT"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_STUB"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="vendorExtension">
|
||||
<!-- Vendor extension names must be prefixed by "VX_" to distinguish them from AOSP values.
|
||||
Vendor are encouraged to namespace their module names to avoid conflicts.
|
||||
Example for an hypothetical Google virtual reality device:
|
||||
<devicePort tagName="VR" type="VX_GOOGLE_VR" role="sink">
|
||||
-->
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="VX_[_a-zA-Z0-9]+"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="extendableAudioDevice">
|
||||
<xs:union memberTypes="audioDevice vendorExtension"/>
|
||||
</xs:simpleType>
|
||||
<!-- Enum values of audio_format_t in audio.h
|
||||
TODO: generate from hidl to avoid manual sync. -->
|
||||
<xs:simpleType name="audioFormat">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="AUDIO_FORMAT_PCM_16_BIT" />
|
||||
<xs:enumeration value="AUDIO_FORMAT_PCM_8_BIT"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_PCM_32_BIT"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_PCM_8_24_BIT"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_PCM_FLOAT"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_PCM_24_BIT_PACKED"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_MP3"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AMR_NB"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AMR_WB"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_MAIN"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_LC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_SSR"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_LTP"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_HE_V1"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_SCALABLE"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ERLC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_LD"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_HE_V2"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ELD"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_MAIN"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_SSR"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LTP"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_HE_V1"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_SCALABLE"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_ERLC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LD"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_HE_V2"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_ELD"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_VORBIS"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_HE_AAC_V1"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_HE_AAC_V2"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_OPUS"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AC3"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_E_AC3"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_DTS"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_DTS_HD"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_IEC61937"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_DOLBY_TRUEHD"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_EVRC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_EVRCB"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_EVRCWB"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_EVRCNW"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADIF"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_WMA"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_WMA_PRO"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AMR_WB_PLUS"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_MP2"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_QCELP"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_DSD"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_FLAC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_ALAC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_APE"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_SBC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_APTX"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_APTX_HD"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AC4"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_LDAC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_E_AC3_JOC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_MAT_1_0"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_MAT_2_0"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_MAT_2_1"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_XHE"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_XHE"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_LATM"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_LATM_LC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_LATM_HE_V1"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_AAC_LATM_HE_V2"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_CELT"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_APTX_ADAPTIVE"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_LHDC"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_LHDC_LL"/>
|
||||
<xs:enumeration value="AUDIO_FORMAT_APTX_TWSP"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="extendableAudioFormat">
|
||||
<xs:union memberTypes="audioFormat vendorExtension"/>
|
||||
</xs:simpleType>
|
||||
<!-- Enum values of audio::common::4_0::AudioUsage
|
||||
TODO: generate from HIDL to avoid manual sync. -->
|
||||
<xs:simpleType name="audioUsage">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="AUDIO_USAGE_UNKNOWN" />
|
||||
<xs:enumeration value="AUDIO_USAGE_MEDIA" />
|
||||
<xs:enumeration value="AUDIO_USAGE_VOICE_COMMUNICATION" />
|
||||
<xs:enumeration value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING" />
|
||||
<xs:enumeration value="AUDIO_USAGE_ALARM" />
|
||||
<xs:enumeration value="AUDIO_USAGE_NOTIFICATION" />
|
||||
<xs:enumeration value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE" />
|
||||
<xs:enumeration value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY" />
|
||||
<xs:enumeration value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE" />
|
||||
<xs:enumeration value="AUDIO_USAGE_ASSISTANCE_SONIFICATION" />
|
||||
<xs:enumeration value="AUDIO_USAGE_GAME" />
|
||||
<xs:enumeration value="AUDIO_USAGE_VIRTUAL_SOURCE" />
|
||||
<xs:enumeration value="AUDIO_USAGE_ASSISTANT" />
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="audioUsageList">
|
||||
<xs:list itemType="audioUsage"/>
|
||||
</xs:simpleType>
|
||||
<!-- TODO: Change to a space separated list to xsd enforce correctness. -->
|
||||
<xs:simpleType name="samplingRates">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="[0-9]+(,[0-9]+)*"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- TODO: Change to a space separated list to xsd enforce correctness. -->
|
||||
<xs:simpleType name="channelMask">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
Comma (",") separated list of channel flags
|
||||
from audio_channel_mask_t.
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="[_A-Z][_A-Z0-9]*(,[_A-Z][_A-Z0-9]*)*"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:complexType name="profile">
|
||||
<xs:attribute name="name" type="xs:token" use="optional"/>
|
||||
<xs:attribute name="format" type="extendableAudioFormat" use="optional"/>
|
||||
<xs:attribute name="samplingRates" type="samplingRates" use="optional"/>
|
||||
<xs:attribute name="channelMasks" type="channelMask" use="optional"/>
|
||||
</xs:complexType>
|
||||
<xs:simpleType name="gainMode">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="AUDIO_GAIN_MODE_JOINT"/>
|
||||
<xs:enumeration value="AUDIO_GAIN_MODE_CHANNELS"/>
|
||||
<xs:enumeration value="AUDIO_GAIN_MODE_RAMP"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:complexType name="gains">
|
||||
<xs:sequence>
|
||||
<xs:element name="gain" minOccurs="0" maxOccurs="unbounded">
|
||||
<xs:complexType>
|
||||
<xs:attribute name="name" type="xs:token" use="required"/>
|
||||
<xs:attribute name="mode" type="gainMode" use="required"/>
|
||||
<xs:attribute name="channel_mask" type="channelMask" use="optional"/>
|
||||
<xs:attribute name="minValueMB" type="xs:int" use="optional"/>
|
||||
<xs:attribute name="maxValueMB" type="xs:int" use="optional"/>
|
||||
<xs:attribute name="defaultValueMB" type="xs:int" use="optional"/>
|
||||
<xs:attribute name="stepValueMB" type="xs:int" use="optional"/>
|
||||
<xs:attribute name="minRampMs" type="xs:int" use="optional"/>
|
||||
<xs:attribute name="maxRampMs" type="xs:int" use="optional"/>
|
||||
<xs:attribute name="useForVolume" type="xs:boolean" use="optional"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="devicePorts">
|
||||
<xs:sequence>
|
||||
<xs:element name="devicePort" minOccurs="0" maxOccurs="unbounded">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<xs:element name="profile" type="profile" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xs:element name="gains" type="gains" minOccurs="0"/>
|
||||
</xs:sequence>
|
||||
<xs:attribute name="tagName" type="xs:token" use="required"/>
|
||||
<xs:attribute name="type" type="extendableAudioDevice" use="required"/>
|
||||
<xs:attribute name="role" type="role" use="required"/>
|
||||
<xs:attribute name="address" type="xs:string" use="optional" default=""/>
|
||||
<!-- Note that XSD 1.0 can not check that a type only has one default. -->
|
||||
<xs:attribute name="default" type="xs:boolean" use="optional">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
The default device will be used if multiple have the same type
|
||||
and no explicit route request exists for a specific device of
|
||||
that type.
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:attribute>
|
||||
<xs:attribute name="encodedFormats" type="audioFormatsList" use="optional"
|
||||
default="" />
|
||||
</xs:complexType>
|
||||
<xs:unique name="devicePortProfileUniqueness">
|
||||
<xs:selector xpath="profile"/>
|
||||
<xs:field xpath="format"/>
|
||||
<xs:field xpath="samplingRate"/>
|
||||
<xs:field xpath="channelMasks"/>
|
||||
</xs:unique>
|
||||
<xs:unique name="devicePortGainUniqueness">
|
||||
<xs:selector xpath="gains/gain"/>
|
||||
<xs:field xpath="@name"/>
|
||||
</xs:unique>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<xs:simpleType name="mixType">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="mix"/>
|
||||
<xs:enumeration value="mux"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:complexType name="routes">
|
||||
<xs:sequence>
|
||||
<xs:element name="route" minOccurs="0" maxOccurs="unbounded">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
List all available sources for a given sink.
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:complexType>
|
||||
<xs:attribute name="type" type="mixType" use="required"/>
|
||||
<xs:attribute name="sink" type="xs:string" use="required"/>
|
||||
<xs:attribute name="sources" type="xs:string" use="required"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="volumes">
|
||||
<xs:sequence>
|
||||
<xs:element name="volume" type="volume" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xs:element name="reference" type="reference" minOccurs="0" maxOccurs="unbounded">
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<!-- TODO: Always require a ref for better xsd validations.
|
||||
Currently a volume could have no points nor ref
|
||||
as it can not be forbidden by xsd 1.0.-->
|
||||
<xs:simpleType name="volumePoint">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
Comma separated pair of number.
|
||||
The fist one is the framework level (between 0 and 100).
|
||||
The second one is the volume to send to the HAL.
|
||||
The framework will interpolate volumes not specified.
|
||||
Their MUST be at least 2 points specified.
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="([0-9]{1,2}|100),-?[0-9]+"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- Enum values of audio_stream_type_t in audio-base.h
|
||||
TODO: generate from hidl to avoid manual sync. -->
|
||||
<xs:simpleType name="stream">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="AUDIO_STREAM_VOICE_CALL"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_SYSTEM"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_RING"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_MUSIC"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_ALARM"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_NOTIFICATION"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_BLUETOOTH_SCO"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_ENFORCED_AUDIBLE"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_DTMF"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_TTS"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_ACCESSIBILITY"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_ASSISTANT"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_REROUTING"/>
|
||||
<xs:enumeration value="AUDIO_STREAM_PATCH"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- Enum values of device_category from Volume.h.
|
||||
TODO: generate from hidl to avoid manual sync. -->
|
||||
<xs:simpleType name="deviceCategory">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="DEVICE_CATEGORY_HEADSET"/>
|
||||
<xs:enumeration value="DEVICE_CATEGORY_SPEAKER"/>
|
||||
<xs:enumeration value="DEVICE_CATEGORY_EARPIECE"/>
|
||||
<xs:enumeration value="DEVICE_CATEGORY_EXT_MEDIA"/>
|
||||
<xs:enumeration value="DEVICE_CATEGORY_HEARING_AID"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:complexType name="volume">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
Volume section defines a volume curve for a given use case and device category.
|
||||
It contains a list of points of this curve expressing the attenuation in Millibels
|
||||
for a given volume index from 0 to 100.
|
||||
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER">
|
||||
<point>0,-9600</point>
|
||||
<point>100,0</point>
|
||||
</volume>
|
||||
|
||||
It may also reference a reference/@name to avoid duplicating curves.
|
||||
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER"
|
||||
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
|
||||
<reference name="DEFAULT_MEDIA_VOLUME_CURVE">
|
||||
<point>0,-9600</point>
|
||||
<point>100,0</point>
|
||||
</reference>
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:sequence>
|
||||
<xs:element name="point" type="volumePoint" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xs:sequence>
|
||||
<xs:attribute name="stream" type="stream"/>
|
||||
<xs:attribute name="deviceCategory" type="deviceCategory"/>
|
||||
<xs:attribute name="ref" type="xs:token" use="optional"/>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="reference">
|
||||
<xs:sequence>
|
||||
<xs:element name="point" type="volumePoint" minOccurs="2" maxOccurs="unbounded"/>
|
||||
</xs:sequence>
|
||||
<xs:attribute name="name" type="xs:token" use="required"/>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="surroundSound">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
Surround Sound section provides configuration related to handling of
|
||||
multi-channel formats.
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:sequence>
|
||||
<xs:element name="formats" type="surroundFormats"/>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<xs:simpleType name="audioFormatsList">
|
||||
<xs:list itemType="audioFormat" />
|
||||
</xs:simpleType>
|
||||
<xs:complexType name="surroundFormats">
|
||||
<xs:sequence>
|
||||
<xs:element name="format" minOccurs="0" maxOccurs="unbounded">
|
||||
<xs:complexType>
|
||||
<xs:attribute name="name" type="audioFormat" use="required"/>
|
||||
<xs:attribute name="subformats" type="audioFormatsList" />
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<xs:simpleType name="engineSuffix">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="default"/>
|
||||
<xs:enumeration value="configurable"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:schema>
|
357
audio/7.0/types.hal
Normal file
357
audio/7.0/types.hal
Normal file
|
@ -0,0 +1,357 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
|
||||
enum Result : int32_t {
|
||||
OK,
|
||||
NOT_INITIALIZED,
|
||||
INVALID_ARGUMENTS,
|
||||
INVALID_STATE,
|
||||
/**
|
||||
* Methods marked as "Optional method" must return this result value
|
||||
* if the operation is not supported by HAL.
|
||||
*/
|
||||
NOT_SUPPORTED
|
||||
};
|
||||
|
||||
@export(name="audio_drain_type_t", value_prefix="AUDIO_DRAIN_")
|
||||
enum AudioDrain : int32_t {
|
||||
/** drain() returns when all data has been played. */
|
||||
ALL,
|
||||
/**
|
||||
* drain() returns a short time before all data from the current track has
|
||||
* been played to give time for gapless track switch.
|
||||
*/
|
||||
EARLY_NOTIFY
|
||||
};
|
||||
|
||||
/**
|
||||
* A substitute for POSIX timespec.
|
||||
*/
|
||||
struct TimeSpec {
|
||||
uint64_t tvSec; // seconds
|
||||
uint64_t tvNSec; // nanoseconds
|
||||
};
|
||||
|
||||
struct ParameterValue {
|
||||
string key;
|
||||
string value;
|
||||
};
|
||||
|
||||
enum MmapBufferFlag : uint32_t {
|
||||
NONE = 0x0,
|
||||
/**
|
||||
* If the buffer can be securely shared to untrusted applications
|
||||
* through the AAudio exclusive mode.
|
||||
* Only set this flag if applications are restricted from accessing the
|
||||
* memory surrounding the audio data buffer by a kernel mechanism.
|
||||
* See Linux kernel's dma_buf.
|
||||
*/
|
||||
APPLICATION_SHAREABLE = 0x1,
|
||||
};
|
||||
|
||||
/**
|
||||
* Mmap buffer descriptor returned by IStream.createMmapBuffer().
|
||||
* Used by streams opened in mmap mode.
|
||||
*/
|
||||
struct MmapBufferInfo {
|
||||
/** Mmap memory buffer */
|
||||
memory sharedMemory;
|
||||
/** Total buffer size in frames */
|
||||
uint32_t bufferSizeFrames;
|
||||
/** Transfer size granularity in frames */
|
||||
uint32_t burstSizeFrames;
|
||||
/** Attributes describing the buffer. */
|
||||
bitfield<MmapBufferFlag> flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* Mmap buffer read/write position returned by IStream.getMmapPosition().
|
||||
* Used by streams opened in mmap mode.
|
||||
*/
|
||||
struct MmapPosition {
|
||||
int64_t timeNanoseconds; // time stamp in ns, CLOCK_MONOTONIC
|
||||
int32_t positionFrames; // increasing 32 bit frame count reset when IStream.stop() is called
|
||||
};
|
||||
|
||||
/**
|
||||
* The message queue flags used to synchronize reads and writes from
|
||||
* message queues used by StreamIn and StreamOut.
|
||||
*/
|
||||
enum MessageQueueFlagBits : uint32_t {
|
||||
NOT_EMPTY = 1 << 0,
|
||||
NOT_FULL = 1 << 1
|
||||
};
|
||||
|
||||
/*
|
||||
* Microphone information
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* A 3D point used to represent position or orientation of a microphone.
|
||||
*
|
||||
* Position: Coordinates of the microphone's capsule, in meters, from the
|
||||
* bottom-left-back corner of the bounding box of android device in natural
|
||||
* orientation (PORTRAIT for phones, LANDSCAPE for tablets, tvs, etc).
|
||||
* The orientation musth match the reported by the api Display.getRotation().
|
||||
*
|
||||
* Orientation: Normalized vector to signal the main orientation of the
|
||||
* microphone's capsule. Magnitude = sqrt(x^2 + y^2 + z^2) = 1
|
||||
*/
|
||||
struct AudioMicrophoneCoordinate {
|
||||
float x;
|
||||
float y;
|
||||
float z;
|
||||
};
|
||||
|
||||
/**
|
||||
* Enum to identify the type of channel mapping for active microphones.
|
||||
* Used channels further identify if the microphone has any significative
|
||||
* process (e.g. High Pass Filtering, dynamic compression)
|
||||
* Simple processing as constant gain adjustment must be DIRECT.
|
||||
*/
|
||||
enum AudioMicrophoneChannelMapping : uint32_t {
|
||||
UNUSED = 0, /* Channel not used */
|
||||
DIRECT = 1, /* Channel used and signal not processed */
|
||||
PROCESSED = 2, /* Channel used and signal has some process */
|
||||
};
|
||||
|
||||
/**
|
||||
* Enum to identify locations of microphones in regards to the body of the
|
||||
* android device.
|
||||
*/
|
||||
enum AudioMicrophoneLocation : uint32_t {
|
||||
UNKNOWN = 0,
|
||||
MAINBODY = 1,
|
||||
MAINBODY_MOVABLE = 2,
|
||||
PERIPHERAL = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* Identifier to help group related microphones together
|
||||
* e.g. microphone arrays should belong to the same group
|
||||
*/
|
||||
typedef int32_t AudioMicrophoneGroup;
|
||||
|
||||
/**
|
||||
* Enum with standard polar patterns of microphones
|
||||
*/
|
||||
enum AudioMicrophoneDirectionality : uint32_t {
|
||||
UNKNOWN = 0,
|
||||
OMNI = 1,
|
||||
BI_DIRECTIONAL = 2,
|
||||
CARDIOID = 3,
|
||||
HYPER_CARDIOID = 4,
|
||||
SUPER_CARDIOID = 5,
|
||||
};
|
||||
|
||||
/**
|
||||
* A (frequency, level) pair. Used to represent frequency response.
|
||||
*/
|
||||
struct AudioFrequencyResponsePoint {
|
||||
/** In Hz */
|
||||
float frequency;
|
||||
/** In dB */
|
||||
float level;
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure used by the HAL to describe microphone's characteristics
|
||||
* Used by StreamIn and Device
|
||||
*/
|
||||
struct MicrophoneInfo {
|
||||
/** Unique alphanumeric id for microphone. Guaranteed to be the same
|
||||
* even after rebooting.
|
||||
*/
|
||||
string deviceId;
|
||||
/**
|
||||
* Device specific information
|
||||
*/
|
||||
DeviceAddress deviceAddress;
|
||||
/** Each element of the vector must describe the channel with the same
|
||||
* index.
|
||||
*/
|
||||
vec<AudioMicrophoneChannelMapping> channelMapping;
|
||||
/** Location of the microphone in regard to the body of the device */
|
||||
AudioMicrophoneLocation location;
|
||||
/** Identifier to help group related microphones together
|
||||
* e.g. microphone arrays should belong to the same group
|
||||
*/
|
||||
AudioMicrophoneGroup group;
|
||||
/** Index of this microphone within the group.
|
||||
* (group, index) must be unique within the same device.
|
||||
*/
|
||||
uint32_t indexInTheGroup;
|
||||
/** Level in dBFS produced by a 1000 Hz tone at 94 dB SPL */
|
||||
float sensitivity;
|
||||
/** Level in dB of the max SPL supported at 1000 Hz */
|
||||
float maxSpl;
|
||||
/** Level in dB of the min SPL supported at 1000 Hz */
|
||||
float minSpl;
|
||||
/** Standard polar pattern of the microphone */
|
||||
AudioMicrophoneDirectionality directionality;
|
||||
/** Vector with ordered frequency responses (from low to high frequencies)
|
||||
* with the frequency response of the microphone.
|
||||
* Levels are in dB, relative to level at 1000 Hz
|
||||
*/
|
||||
vec<AudioFrequencyResponsePoint> frequencyResponse;
|
||||
/** Position of the microphone's capsule in meters, from the
|
||||
* bottom-left-back corner of the bounding box of device.
|
||||
*/
|
||||
AudioMicrophoneCoordinate position;
|
||||
/** Normalized point to signal the main orientation of the microphone's
|
||||
* capsule. sqrt(x^2 + y^2 + z^2) = 1
|
||||
*/
|
||||
AudioMicrophoneCoordinate orientation;
|
||||
};
|
||||
|
||||
/**
|
||||
* Constants used by the HAL to determine how to select microphones and process those inputs in
|
||||
* order to optimize for capture in the specified direction.
|
||||
*
|
||||
* MicrophoneDirection Constants are defined in MicrophoneDirection.java.
|
||||
*/
|
||||
@export(name="audio_microphone_direction_t", value_prefix="MIC_DIRECTION_")
|
||||
enum MicrophoneDirection : int32_t {
|
||||
/**
|
||||
* Don't do any directionality processing of the activated microphone(s).
|
||||
*/
|
||||
UNSPECIFIED = 0,
|
||||
/**
|
||||
* Optimize capture for audio coming from the screen-side of the device.
|
||||
*/
|
||||
FRONT = 1,
|
||||
/**
|
||||
* Optimize capture for audio coming from the side of the device opposite the screen.
|
||||
*/
|
||||
BACK = 2,
|
||||
/**
|
||||
* Optimize capture for audio coming from an off-device microphone.
|
||||
*/
|
||||
EXTERNAL = 3,
|
||||
};
|
||||
|
||||
|
||||
/* Dual Mono handling is used when a stereo audio stream
|
||||
* contains separate audio content on the left and right channels.
|
||||
* Such information about the content of the stream may be found, for example,
|
||||
* in ITU T-REC-J.94-201610 A.6.2.3 Component descriptor.
|
||||
*/
|
||||
@export(name="audio_dual_mono_mode_t", value_prefix="AUDIO_DUAL_MONO_MODE_")
|
||||
enum DualMonoMode : int32_t {
|
||||
// Need to be in sync with DUAL_MONO_MODE* constants in
|
||||
// frameworks/base/media/java/android/media/AudioTrack.java
|
||||
/**
|
||||
* Disable any Dual Mono presentation effect.
|
||||
*
|
||||
*/
|
||||
OFF = 0,
|
||||
/**
|
||||
* This mode indicates that a stereo stream should be presented
|
||||
* with the left and right audio channels blended together
|
||||
* and delivered to both channels.
|
||||
*
|
||||
* Behavior for non-stereo streams is implementation defined.
|
||||
* A suggested guideline is that the left-right stereo symmetric
|
||||
* channels are pairwise blended, the other channels such as center
|
||||
* are left alone.
|
||||
*/
|
||||
LR = 1,
|
||||
/**
|
||||
* This mode indicates that a stereo stream should be presented
|
||||
* with the left audio channel replicated into the right audio channel.
|
||||
*
|
||||
* Behavior for non-stereo streams is implementation defined.
|
||||
* A suggested guideline is that all channels with left-right
|
||||
* stereo symmetry will have the left channel position replicated
|
||||
* into the right channel position. The center channels (with no
|
||||
* left/right symmetry) or unbalanced channels are left alone.
|
||||
*/
|
||||
LL = 2,
|
||||
/**
|
||||
* This mode indicates that a stereo stream should be presented
|
||||
* with the right audio channel replicated into the left audio channel.
|
||||
*
|
||||
* Behavior for non-stereo streams is implementation defined.
|
||||
* A suggested guideline is that all channels with left-right
|
||||
* stereo symmetry will have the right channel position replicated
|
||||
* into the left channel position. The center channels (with no
|
||||
* left/right symmetry) or unbalanced channels are left alone.
|
||||
*/
|
||||
RR = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* Algorithms used for timestretching (preserving pitch while playing audio
|
||||
* content at different speed).
|
||||
*/
|
||||
@export(name="audio_timestretch_stretch_mode_t", value_prefix="AUDIO_TIMESTRETCH_STRETCH_")
|
||||
enum TimestretchMode : int32_t {
|
||||
// Need to be in sync with AUDIO_STRETCH_MODE_* constants in
|
||||
// frameworks/base/media/java/android/media/PlaybackParams.java
|
||||
DEFAULT = 0,
|
||||
/** Selects timestretch algorithm best suitable for voice (speech) content. */
|
||||
VOICE = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* Behavior when the values for speed and / or pitch are out
|
||||
* of applicable range.
|
||||
*/
|
||||
@export(name="audio_timestretch_fallback_mode_t", value_prefix="AUDIO_TIMESTRETCH_FALLBACK_")
|
||||
enum TimestretchFallbackMode : int32_t {
|
||||
// Need to be in sync with AUDIO_FALLBACK_MODE_* constants in
|
||||
// frameworks/base/media/java/android/media/PlaybackParams.java
|
||||
/** Play silence for parameter values that are out of range. */
|
||||
MUTE = 1,
|
||||
/** Return an error while trying to set the parameters. */
|
||||
FAIL = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* Parameters determining playback behavior. They are used to speed up or
|
||||
* slow down playback and / or change the tonal frequency of the audio content
|
||||
* (pitch).
|
||||
*/
|
||||
struct PlaybackRate {
|
||||
/**
|
||||
* Speed factor (multiplier). Normal speed has the value of 1.0f.
|
||||
* Values less than 1.0f slow down playback, value greater than 1.0f
|
||||
* speed it up.
|
||||
*/
|
||||
float speed;
|
||||
/**
|
||||
* Pitch factor (multiplier). Setting pitch value to 1.0f together
|
||||
* with changing playback speed preserves the pitch, this is often
|
||||
* called "timestretching." Setting the pitch value equal to speed produces
|
||||
* the same effect as playing audio content at different sampling rate.
|
||||
*/
|
||||
float pitch;
|
||||
/**
|
||||
* Selects the algorithm used for timestretching (preserving pitch while
|
||||
* playing audio at different speed).
|
||||
*/
|
||||
TimestretchMode timestretchMode;
|
||||
/**
|
||||
* Selects the behavior when the specified values for speed and / or pitch
|
||||
* are out of applicable range.
|
||||
*/
|
||||
TimestretchFallbackMode fallbackMode;
|
||||
};
|
14
audio/common/7.0/Android.bp
Normal file
14
audio/common/7.0/Android.bp
Normal file
|
@ -0,0 +1,14 @@
|
|||
// This file is autogenerated by hidl-gen -Landroidbp.
|
||||
|
||||
hidl_interface {
|
||||
name: "android.hardware.audio.common@7.0",
|
||||
root: "android.hardware",
|
||||
srcs: [
|
||||
"types.hal",
|
||||
],
|
||||
interfaces: [
|
||||
"android.hidl.safe_union@1.0",
|
||||
],
|
||||
gen_java: true,
|
||||
gen_java_constants: true,
|
||||
}
|
1191
audio/common/7.0/types.hal
Normal file
1191
audio/common/7.0/types.hal
Normal file
File diff suppressed because it is too large
Load diff
|
@ -117,3 +117,16 @@ cc_library_shared {
|
|||
"-include common/all-versions/VersionMacro.h",
|
||||
]
|
||||
}
|
||||
|
||||
cc_library_shared {
|
||||
name: "android.hardware.audio.common@7.0-util",
|
||||
defaults: ["android.hardware.audio.common-util_default"],
|
||||
shared_libs: [
|
||||
"android.hardware.audio.common@7.0",
|
||||
],
|
||||
cflags: [
|
||||
"-DMAJOR_VERSION=7",
|
||||
"-DMINOR_VERSION=0",
|
||||
"-include common/all-versions/VersionMacro.h",
|
||||
]
|
||||
}
|
||||
|
|
|
@ -123,3 +123,18 @@ cc_library_shared {
|
|||
name: "android.hardware.audio@6.0-impl",
|
||||
defaults: ["android.hardware.audio@6.0-impl_default"],
|
||||
}
|
||||
|
||||
cc_library_shared {
|
||||
name: "android.hardware.audio@7.0-impl",
|
||||
defaults: ["android.hardware.audio-impl_default"],
|
||||
shared_libs: [
|
||||
"android.hardware.audio@7.0",
|
||||
"android.hardware.audio.common@7.0",
|
||||
"android.hardware.audio.common@7.0-util",
|
||||
],
|
||||
cflags: [
|
||||
"-DMAJOR_VERSION=7",
|
||||
"-DMINOR_VERSION=0",
|
||||
"-include common/all-versions/VersionMacro.h",
|
||||
],
|
||||
}
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// pull in all the <= 6.0 tests
|
||||
#include "6.0/AudioPrimaryHidlHalTest.cpp"
|
|
@ -128,3 +128,26 @@ cc_test {
|
|||
// TODO(b/146104851): Add auto-gen rules and remove it.
|
||||
test_config: "VtsHalAudioV6_0TargetTest.xml",
|
||||
}
|
||||
|
||||
cc_test {
|
||||
name: "VtsHalAudioV7_0TargetTest",
|
||||
defaults: ["VtsHalAudioTargetTest_defaults"],
|
||||
srcs: [
|
||||
"7.0/AudioPrimaryHidlHalTest.cpp",
|
||||
],
|
||||
static_libs: [
|
||||
"android.hardware.audio@7.0",
|
||||
"android.hardware.audio.common@7.0",
|
||||
],
|
||||
cflags: [
|
||||
"-DMAJOR_VERSION=7",
|
||||
"-DMINOR_VERSION=0",
|
||||
"-include common/all-versions/VersionMacro.h",
|
||||
],
|
||||
data: [
|
||||
":audio_policy_configuration_V7_0",
|
||||
],
|
||||
// Use test_config for vts suite.
|
||||
// TODO(b/146104851): Add auto-gen rules and remove it.
|
||||
test_config: "VtsHalAudioV7_0TargetTest.xml",
|
||||
}
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Copyright (C) 2020 The Android Open Source Project
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<configuration description="Runs VtsHalAudioV7_0TargetTest.">
|
||||
<option name="test-suite-tag" value="apct" />
|
||||
<option name="test-suite-tag" value="apct-native" />
|
||||
|
||||
<target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer"/>
|
||||
<target_preparer class="com.android.tradefed.targetprep.StopServicesSetup"/>
|
||||
|
||||
<target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
|
||||
<option name="run-command" value="setprop vts.native_server.on 1"/>
|
||||
<option name="teardown-command" value="setprop vts.native_server.on 0"/>
|
||||
</target_preparer>
|
||||
|
||||
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
|
||||
<option name="cleanup" value="true" />
|
||||
<option name="push" value="VtsHalAudioV7_0TargetTest->/data/local/tmp/VtsHalAudioV7_0TargetTest" />
|
||||
<option name="push" value="audio_policy_configuration_V7_0.xsd->/data/local/tmp/audio_policy_configuration_V7_0.xsd" />
|
||||
</target_preparer>
|
||||
|
||||
<test class="com.android.tradefed.testtype.GTest" >
|
||||
<option name="native-test-device-path" value="/data/local/tmp" />
|
||||
<option name="module-name" value="VtsHalAudioV7_0TargetTest" />
|
||||
</test>
|
||||
</configuration>
|
30
audio/effect/7.0/Android.bp
Normal file
30
audio/effect/7.0/Android.bp
Normal file
|
@ -0,0 +1,30 @@
|
|||
// This file is autogenerated by hidl-gen -Landroidbp.
|
||||
|
||||
hidl_interface {
|
||||
name: "android.hardware.audio.effect@7.0",
|
||||
root: "android.hardware",
|
||||
srcs: [
|
||||
"types.hal",
|
||||
"IAcousticEchoCancelerEffect.hal",
|
||||
"IAutomaticGainControlEffect.hal",
|
||||
"IBassBoostEffect.hal",
|
||||
"IDownmixEffect.hal",
|
||||
"IEffect.hal",
|
||||
"IEffectBufferProviderCallback.hal",
|
||||
"IEffectsFactory.hal",
|
||||
"IEnvironmentalReverbEffect.hal",
|
||||
"IEqualizerEffect.hal",
|
||||
"ILoudnessEnhancerEffect.hal",
|
||||
"INoiseSuppressionEffect.hal",
|
||||
"IPresetReverbEffect.hal",
|
||||
"IVirtualizerEffect.hal",
|
||||
"IVisualizerEffect.hal",
|
||||
],
|
||||
interfaces: [
|
||||
"android.hardware.audio.common@7.0",
|
||||
"android.hidl.base@1.0",
|
||||
"android.hidl.safe_union@1.0",
|
||||
],
|
||||
gen_java: false,
|
||||
gen_java_constants: true,
|
||||
}
|
32
audio/effect/7.0/IAcousticEchoCancelerEffect.hal
Normal file
32
audio/effect/7.0/IAcousticEchoCancelerEffect.hal
Normal file
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IAcousticEchoCancelerEffect extends IEffect {
|
||||
/**
|
||||
* Sets echo delay value in milliseconds.
|
||||
*/
|
||||
setEchoDelay(uint32_t echoDelayMs) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets echo delay value in milliseconds.
|
||||
*/
|
||||
getEchoDelay() generates (Result retval, uint32_t echoDelayMs);
|
||||
};
|
68
audio/effect/7.0/IAutomaticGainControlEffect.hal
Normal file
68
audio/effect/7.0/IAutomaticGainControlEffect.hal
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IAutomaticGainControlEffect extends IEffect {
|
||||
/**
|
||||
* Sets target level in millibels.
|
||||
*/
|
||||
setTargetLevel(int16_t targetLevelMb) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets target level.
|
||||
*/
|
||||
getTargetLevel() generates (Result retval, int16_t targetLevelMb);
|
||||
|
||||
/**
|
||||
* Sets gain in the compression range in millibels.
|
||||
*/
|
||||
setCompGain(int16_t compGainMb) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets gain in the compression range.
|
||||
*/
|
||||
getCompGain() generates (Result retval, int16_t compGainMb);
|
||||
|
||||
/**
|
||||
* Enables or disables limiter.
|
||||
*/
|
||||
setLimiterEnabled(bool enabled) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns whether limiter is enabled.
|
||||
*/
|
||||
isLimiterEnabled() generates (Result retval, bool enabled);
|
||||
|
||||
struct AllProperties {
|
||||
int16_t targetLevelMb;
|
||||
int16_t compGainMb;
|
||||
bool limiterEnabled;
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets all properties at once.
|
||||
*/
|
||||
setAllProperties(AllProperties properties) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets all properties at once.
|
||||
*/
|
||||
getAllProperties() generates (Result retval, AllProperties properties);
|
||||
};
|
48
audio/effect/7.0/IBassBoostEffect.hal
Normal file
48
audio/effect/7.0/IBassBoostEffect.hal
Normal file
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IBassBoostEffect extends IEffect {
|
||||
/**
|
||||
* Returns whether setting bass boost strength is supported.
|
||||
*/
|
||||
isStrengthSupported() generates (Result retval, bool strengthSupported);
|
||||
|
||||
enum StrengthRange : uint16_t {
|
||||
MIN = 0,
|
||||
MAX = 1000
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets bass boost strength.
|
||||
*
|
||||
* @param strength strength of the effect. The valid range for strength
|
||||
* strength is [0, 1000], where 0 per mille designates the
|
||||
* mildest effect and 1000 per mille designates the
|
||||
* strongest.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setStrength(uint16_t strength) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets virtualization strength.
|
||||
*/
|
||||
getStrength() generates (Result retval, uint16_t strength);
|
||||
};
|
37
audio/effect/7.0/IDownmixEffect.hal
Normal file
37
audio/effect/7.0/IDownmixEffect.hal
Normal file
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IDownmixEffect extends IEffect {
|
||||
enum Type : int32_t {
|
||||
STRIP, // throw away the extra channels
|
||||
FOLD // mix the extra channels with FL/FR
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the current downmix preset.
|
||||
*/
|
||||
setType(Type preset) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the current downmix preset.
|
||||
*/
|
||||
getType() generates (Result retval, Type preset);
|
||||
};
|
421
audio/effect/7.0/IEffect.hal
Normal file
421
audio/effect/7.0/IEffect.hal
Normal file
|
@ -0,0 +1,421 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffectBufferProviderCallback;
|
||||
|
||||
interface IEffect {
|
||||
/**
|
||||
* Initialize effect engine--all configurations return to default.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
@entry
|
||||
init() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Apply new audio parameters configurations for input and output buffers.
|
||||
* The provider callbacks may be empty, but in this case the buffer
|
||||
* must be provided in the EffectConfig structure.
|
||||
*
|
||||
* @param config configuration descriptor.
|
||||
* @param inputBufferProvider optional buffer provider reference.
|
||||
* @param outputBufferProvider optional buffer provider reference.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setConfig(EffectConfig config,
|
||||
IEffectBufferProviderCallback inputBufferProvider,
|
||||
IEffectBufferProviderCallback outputBufferProvider)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Reset the effect engine. Keep configuration but resets state and buffer
|
||||
* content.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
reset() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Enable processing.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
@callflow(next={"prepareForProcessing"})
|
||||
enable() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Disable processing.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
@callflow(next={"close"})
|
||||
disable() generates (Result retval);
|
||||
|
||||
/**
|
||||
* Set the rendering device the audio output path is connected to. The
|
||||
* effect implementation must set EFFECT_FLAG_DEVICE_IND flag in its
|
||||
* descriptor to receive this command when the device changes.
|
||||
*
|
||||
* Note: this method is only supported for effects inserted into
|
||||
* the output chain.
|
||||
*
|
||||
* @param device output device specification.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setDevice(bitfield<AudioDevice> device) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Set and get volume. Used by audio framework to delegate volume control to
|
||||
* effect engine. The effect implementation must set EFFECT_FLAG_VOLUME_CTRL
|
||||
* flag in its descriptor to receive this command. The effect engine must
|
||||
* return the volume that should be applied before the effect is
|
||||
* processed. The overall volume (the volume actually applied by the effect
|
||||
* engine multiplied by the returned value) should match the value indicated
|
||||
* in the command.
|
||||
*
|
||||
* @param volumes vector containing volume for each channel defined in
|
||||
* EffectConfig for output buffer expressed in 8.24 fixed
|
||||
* point format.
|
||||
* @return result updated volume values.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setAndGetVolume(vec<uint32_t> volumes)
|
||||
generates (Result retval, vec<uint32_t> result);
|
||||
|
||||
/**
|
||||
* Notify the effect of the volume change. The effect implementation must
|
||||
* set EFFECT_FLAG_VOLUME_IND flag in its descriptor to receive this
|
||||
* command.
|
||||
*
|
||||
* @param volumes vector containing volume for each channel defined in
|
||||
* EffectConfig for output buffer expressed in 8.24 fixed
|
||||
* point format.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
volumeChangeNotification(vec<uint32_t> volumes)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Set the audio mode. The effect implementation must set
|
||||
* EFFECT_FLAG_AUDIO_MODE_IND flag in its descriptor to receive this command
|
||||
* when the audio mode changes.
|
||||
*
|
||||
* @param mode desired audio mode.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setAudioMode(AudioMode mode) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Apply new audio parameters configurations for input and output buffers of
|
||||
* reverse stream. An example of reverse stream is the echo reference
|
||||
* supplied to an Acoustic Echo Canceler.
|
||||
*
|
||||
* @param config configuration descriptor.
|
||||
* @param inputBufferProvider optional buffer provider reference.
|
||||
* @param outputBufferProvider optional buffer provider reference.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setConfigReverse(EffectConfig config,
|
||||
IEffectBufferProviderCallback inputBufferProvider,
|
||||
IEffectBufferProviderCallback outputBufferProvider)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Set the capture device the audio input path is connected to. The effect
|
||||
* implementation must set EFFECT_FLAG_DEVICE_IND flag in its descriptor to
|
||||
* receive this command when the device changes.
|
||||
*
|
||||
* Note: this method is only supported for effects inserted into
|
||||
* the input chain.
|
||||
*
|
||||
* @param device input device specification.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setInputDevice(bitfield<AudioDevice> device) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Read audio parameters configurations for input and output buffers.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return config configuration descriptor.
|
||||
*/
|
||||
getConfig() generates (Result retval, EffectConfig config);
|
||||
|
||||
/**
|
||||
* Read audio parameters configurations for input and output buffers of
|
||||
* reverse stream.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return config configuration descriptor.
|
||||
*/
|
||||
getConfigReverse() generates (Result retval, EffectConfig config);
|
||||
|
||||
/**
|
||||
* Queries for supported combinations of main and auxiliary channels
|
||||
* (e.g. for a multi-microphone noise suppressor).
|
||||
*
|
||||
* @param maxConfigs maximum number of the combinations to return.
|
||||
* @return retval absence of the feature support is indicated using
|
||||
* NOT_SUPPORTED code. RESULT_TOO_BIG is returned if
|
||||
* the number of supported combinations exceeds 'maxConfigs'.
|
||||
* @return result list of configuration descriptors.
|
||||
*/
|
||||
getSupportedAuxChannelsConfigs(uint32_t maxConfigs)
|
||||
generates (Result retval, vec<EffectAuxChannelsConfig> result);
|
||||
|
||||
/**
|
||||
* Retrieves the current configuration of main and auxiliary channels.
|
||||
*
|
||||
* @return retval absence of the feature support is indicated using
|
||||
* NOT_SUPPORTED code.
|
||||
* @return result configuration descriptor.
|
||||
*/
|
||||
getAuxChannelsConfig()
|
||||
generates (Result retval, EffectAuxChannelsConfig result);
|
||||
|
||||
/**
|
||||
* Sets the current configuration of main and auxiliary channels.
|
||||
*
|
||||
* @return retval operation completion status; absence of the feature
|
||||
* support is indicated using NOT_SUPPORTED code.
|
||||
*/
|
||||
setAuxChannelsConfig(EffectAuxChannelsConfig config)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Set the audio source the capture path is configured for (Camcorder, voice
|
||||
* recognition...).
|
||||
*
|
||||
* Note: this method is only supported for effects inserted into
|
||||
* the input chain.
|
||||
*
|
||||
* @param source source descriptor.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setAudioSource(AudioSource source) generates (Result retval);
|
||||
|
||||
/**
|
||||
* This command indicates if the playback thread the effect is attached to
|
||||
* is offloaded or not, and updates the I/O handle of the playback thread
|
||||
* the effect is attached to.
|
||||
*
|
||||
* @param param effect offload descriptor.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
offload(EffectOffloadParameter param) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns the effect descriptor.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return descriptor effect descriptor.
|
||||
*/
|
||||
getDescriptor() generates (Result retval, EffectDescriptor descriptor);
|
||||
|
||||
/**
|
||||
* Set up required transports for passing audio buffers to the effect.
|
||||
*
|
||||
* The transport consists of shared memory and a message queue for reporting
|
||||
* effect processing operation status. The shared memory is set up
|
||||
* separately using 'setProcessBuffers' method.
|
||||
*
|
||||
* Processing is requested by setting 'REQUEST_PROCESS' or
|
||||
* 'REQUEST_PROCESS_REVERSE' EventFlags associated with the status message
|
||||
* queue. The result of processing may be one of the following:
|
||||
* OK if there were no errors during processing;
|
||||
* INVALID_ARGUMENTS if audio buffers are invalid;
|
||||
* INVALID_STATE if the engine has finished the disable phase;
|
||||
* NOT_INITIALIZED if the audio buffers were not set;
|
||||
* NOT_SUPPORTED if the requested processing type is not supported by
|
||||
* the effect.
|
||||
*
|
||||
* @return retval OK if both message queues were created successfully.
|
||||
* INVALID_STATE if the method was already called.
|
||||
* INVALID_ARGUMENTS if there was a problem setting up
|
||||
* the queue.
|
||||
* @return statusMQ a message queue used for passing status from the effect.
|
||||
*/
|
||||
@callflow(next={"setProcessBuffers"})
|
||||
prepareForProcessing() generates (Result retval, fmq_sync<Result> statusMQ);
|
||||
|
||||
/**
|
||||
* Set up input and output buffers for processing audio data. The effect
|
||||
* may modify both the input and the output buffer during the operation.
|
||||
* Buffers may be set multiple times during effect lifetime.
|
||||
*
|
||||
* The input and the output buffer may be reused between different effects,
|
||||
* and the input buffer may be used as an output buffer. Buffers are
|
||||
* distinguished using 'AudioBuffer.id' field.
|
||||
*
|
||||
* @param inBuffer input audio buffer.
|
||||
* @param outBuffer output audio buffer.
|
||||
* @return retval OK if both buffers were mapped successfully.
|
||||
* INVALID_ARGUMENTS if there was a problem with mapping
|
||||
* any of the buffers.
|
||||
*/
|
||||
setProcessBuffers(AudioBuffer inBuffer, AudioBuffer outBuffer)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Execute a vendor specific command on the effect. The command code
|
||||
* and data, as well as result data are not interpreted by Android
|
||||
* Framework and are passed as-is between the application and the effect.
|
||||
*
|
||||
* The effect must use standard POSIX.1-2001 error codes for the operation
|
||||
* completion status.
|
||||
*
|
||||
* Use this method only if the effect is provided by a third party, and
|
||||
* there is no interface defined for it. This method only works for effects
|
||||
* implemented in software.
|
||||
*
|
||||
* @param commandId the ID of the command.
|
||||
* @param data command data.
|
||||
* @param resultMaxSize maximum size in bytes of the result; can be 0.
|
||||
* @return status command completion status.
|
||||
* @return result result data.
|
||||
*/
|
||||
command(uint32_t commandId, vec<uint8_t> data, uint32_t resultMaxSize)
|
||||
generates (int32_t status, vec<uint8_t> result);
|
||||
|
||||
/**
|
||||
* Set a vendor-specific parameter and apply it immediately. The parameter
|
||||
* code and data are not interpreted by Android Framework and are passed
|
||||
* as-is between the application and the effect.
|
||||
*
|
||||
* The effect must use INVALID_ARGUMENTS return code if the parameter ID is
|
||||
* unknown or if provided parameter data is invalid. If the effect does not
|
||||
* support setting vendor-specific parameters, it must return NOT_SUPPORTED.
|
||||
*
|
||||
* Use this method only if the effect is provided by a third party, and
|
||||
* there is no interface defined for it. This method only works for effects
|
||||
* implemented in software.
|
||||
*
|
||||
* @param parameter identifying data of the parameter.
|
||||
* @param value the value of the parameter.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setParameter(vec<uint8_t> parameter, vec<uint8_t> value)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Get a vendor-specific parameter value. The parameter code and returned
|
||||
* data are not interpreted by Android Framework and are passed as-is
|
||||
* between the application and the effect.
|
||||
*
|
||||
* The effect must use INVALID_ARGUMENTS return code if the parameter ID is
|
||||
* unknown. If the effect does not support setting vendor-specific
|
||||
* parameters, it must return NOT_SUPPORTED.
|
||||
*
|
||||
* Use this method only if the effect is provided by a third party, and
|
||||
* there is no interface defined for it. This method only works for effects
|
||||
* implemented in software.
|
||||
*
|
||||
* @param parameter identifying data of the parameter.
|
||||
* @param valueMaxSize maximum size in bytes of the value.
|
||||
* @return retval operation completion status.
|
||||
* @return result the value of the parameter.
|
||||
*/
|
||||
getParameter(vec<uint8_t> parameter, uint32_t valueMaxSize)
|
||||
generates (Result retval, vec<uint8_t> value);
|
||||
|
||||
/**
|
||||
* Get supported configs for a vendor-specific feature. The configs returned
|
||||
* are not interpreted by Android Framework and are passed as-is between the
|
||||
* application and the effect.
|
||||
*
|
||||
* The effect must use INVALID_ARGUMENTS return code if the feature ID is
|
||||
* unknown. If the effect does not support getting vendor-specific feature
|
||||
* configs, it must return NOT_SUPPORTED. If the feature is supported but
|
||||
* the total number of supported configurations exceeds the maximum number
|
||||
* indicated by the caller, the method must return RESULT_TOO_BIG.
|
||||
*
|
||||
* Use this method only if the effect is provided by a third party, and
|
||||
* there is no interface defined for it. This method only works for effects
|
||||
* implemented in software.
|
||||
*
|
||||
* @param featureId feature identifier.
|
||||
* @param maxConfigs maximum number of configs to return.
|
||||
* @param configSize size of each config in bytes.
|
||||
* @return retval operation completion status.
|
||||
* @return configsCount number of configs returned.
|
||||
* @return configsData data for all the configs returned.
|
||||
*/
|
||||
getSupportedConfigsForFeature(
|
||||
uint32_t featureId,
|
||||
uint32_t maxConfigs,
|
||||
uint32_t configSize) generates (
|
||||
Result retval,
|
||||
uint32_t configsCount,
|
||||
vec<uint8_t> configsData);
|
||||
|
||||
/**
|
||||
* Get the current config for a vendor-specific feature. The config returned
|
||||
* is not interpreted by Android Framework and is passed as-is between the
|
||||
* application and the effect.
|
||||
*
|
||||
* The effect must use INVALID_ARGUMENTS return code if the feature ID is
|
||||
* unknown. If the effect does not support getting vendor-specific
|
||||
* feature configs, it must return NOT_SUPPORTED.
|
||||
*
|
||||
* Use this method only if the effect is provided by a third party, and
|
||||
* there is no interface defined for it. This method only works for effects
|
||||
* implemented in software.
|
||||
*
|
||||
* @param featureId feature identifier.
|
||||
* @param configSize size of the config in bytes.
|
||||
* @return retval operation completion status.
|
||||
* @return configData config data.
|
||||
*/
|
||||
getCurrentConfigForFeature(uint32_t featureId, uint32_t configSize)
|
||||
generates (Result retval, vec<uint8_t> configData);
|
||||
|
||||
/**
|
||||
* Set the current config for a vendor-specific feature. The config data
|
||||
* is not interpreted by Android Framework and is passed as-is between the
|
||||
* application and the effect.
|
||||
*
|
||||
* The effect must use INVALID_ARGUMENTS return code if the feature ID is
|
||||
* unknown. If the effect does not support getting vendor-specific
|
||||
* feature configs, it must return NOT_SUPPORTED.
|
||||
*
|
||||
* Use this method only if the effect is provided by a third party, and
|
||||
* there is no interface defined for it. This method only works for effects
|
||||
* implemented in software.
|
||||
*
|
||||
* @param featureId feature identifier.
|
||||
* @param configData config data.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setCurrentConfigForFeature(uint32_t featureId, vec<uint8_t> configData)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Called by the framework to deinitialize the effect and free up
|
||||
* all currently allocated resources. It is recommended to close
|
||||
* the effect on the client side as soon as it is becomes unused.
|
||||
*
|
||||
* The client must ensure that this function is not called while
|
||||
* audio data is being transferred through the effect's message queues.
|
||||
*
|
||||
* @return retval OK in case the success.
|
||||
* INVALID_STATE if the effect was already closed.
|
||||
*/
|
||||
@exit
|
||||
close() generates (Result retval);
|
||||
};
|
38
audio/effect/7.0/IEffectBufferProviderCallback.hal
Normal file
38
audio/effect/7.0/IEffectBufferProviderCallback.hal
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
/**
|
||||
* This callback interface contains functions that can be used by the effect
|
||||
* engine 'process' function to exchange input and output audio buffers.
|
||||
*/
|
||||
interface IEffectBufferProviderCallback {
|
||||
/**
|
||||
* Called to retrieve a buffer where data should read from by 'process'
|
||||
* function.
|
||||
*
|
||||
* @return buffer audio buffer for processing
|
||||
*/
|
||||
getBuffer() generates (AudioBuffer buffer);
|
||||
|
||||
/**
|
||||
* Called to provide a buffer with the data written by 'process' function.
|
||||
*
|
||||
* @param buffer audio buffer for processing
|
||||
*/
|
||||
putBuffer(AudioBuffer buffer);
|
||||
};
|
62
audio/effect/7.0/IEffectsFactory.hal
Normal file
62
audio/effect/7.0/IEffectsFactory.hal
Normal file
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IEffectsFactory {
|
||||
/**
|
||||
* Returns descriptors of different effects in all loaded libraries.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return result list of effect descriptors.
|
||||
*/
|
||||
getAllDescriptors() generates(Result retval, vec<EffectDescriptor> result);
|
||||
|
||||
/**
|
||||
* Returns a descriptor of a particular effect.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return result effect descriptor.
|
||||
*/
|
||||
getDescriptor(Uuid uid) generates(Result retval, EffectDescriptor result);
|
||||
|
||||
/**
|
||||
* Creates an effect engine of the specified type. To release the effect
|
||||
* engine, it is necessary to release references to the returned effect
|
||||
* object.
|
||||
*
|
||||
* @param uid effect uuid.
|
||||
* @param session audio session to which this effect instance will be
|
||||
* attached. All effects created with the same session ID
|
||||
* are connected in series and process the same signal
|
||||
* stream.
|
||||
* @param ioHandle identifies the output or input stream this effect is
|
||||
* directed to in audio HAL.
|
||||
* @param device identifies the sink or source device this effect is directed to in the
|
||||
* audio HAL. Must be specified if session is AudioSessionConsts.DEVICE.
|
||||
* "device" is the AudioPortHandle used for the device when the audio
|
||||
* patch is created at the audio HAL.
|
||||
* @return retval operation completion status.
|
||||
* @return result the interface for the created effect.
|
||||
* @return effectId the unique ID of the effect to be used with
|
||||
* IStream::addEffect and IStream::removeEffect methods.
|
||||
*/
|
||||
createEffect(Uuid uid, AudioSession session, AudioIoHandle ioHandle, AudioPortHandle device)
|
||||
generates (Result retval, IEffect result, uint64_t effectId);
|
||||
};
|
178
audio/effect/7.0/IEnvironmentalReverbEffect.hal
Normal file
178
audio/effect/7.0/IEnvironmentalReverbEffect.hal
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IEnvironmentalReverbEffect extends IEffect {
|
||||
/**
|
||||
* Sets whether the effect should be bypassed.
|
||||
*/
|
||||
setBypass(bool bypass) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets whether the effect should be bypassed.
|
||||
*/
|
||||
getBypass() generates (Result retval, bool bypass);
|
||||
|
||||
enum ParamRange : int16_t {
|
||||
ROOM_LEVEL_MIN = -6000,
|
||||
ROOM_LEVEL_MAX = 0,
|
||||
ROOM_HF_LEVEL_MIN = -4000,
|
||||
ROOM_HF_LEVEL_MAX = 0,
|
||||
DECAY_TIME_MIN = 100,
|
||||
DECAY_TIME_MAX = 20000,
|
||||
DECAY_HF_RATIO_MIN = 100,
|
||||
DECAY_HF_RATIO_MAX = 1000,
|
||||
REFLECTIONS_LEVEL_MIN = -6000,
|
||||
REFLECTIONS_LEVEL_MAX = 0,
|
||||
REFLECTIONS_DELAY_MIN = 0,
|
||||
REFLECTIONS_DELAY_MAX = 65,
|
||||
REVERB_LEVEL_MIN = -6000,
|
||||
REVERB_LEVEL_MAX = 0,
|
||||
REVERB_DELAY_MIN = 0,
|
||||
REVERB_DELAY_MAX = 65,
|
||||
DIFFUSION_MIN = 0,
|
||||
DIFFUSION_MAX = 1000,
|
||||
DENSITY_MIN = 0,
|
||||
DENSITY_MAX = 1000
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the room level.
|
||||
*/
|
||||
setRoomLevel(int16_t roomLevel) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the room level.
|
||||
*/
|
||||
getRoomLevel() generates (Result retval, int16_t roomLevel);
|
||||
|
||||
/**
|
||||
* Sets the room high frequencies level.
|
||||
*/
|
||||
setRoomHfLevel(int16_t roomHfLevel) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the room high frequencies level.
|
||||
*/
|
||||
getRoomHfLevel() generates (Result retval, int16_t roomHfLevel);
|
||||
|
||||
/**
|
||||
* Sets the room decay time.
|
||||
*/
|
||||
setDecayTime(uint32_t decayTime) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the room decay time.
|
||||
*/
|
||||
getDecayTime() generates (Result retval, uint32_t decayTime);
|
||||
|
||||
/**
|
||||
* Sets the ratio of high frequencies decay.
|
||||
*/
|
||||
setDecayHfRatio(int16_t decayHfRatio) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the ratio of high frequencies decay.
|
||||
*/
|
||||
getDecayHfRatio() generates (Result retval, int16_t decayHfRatio);
|
||||
|
||||
/**
|
||||
* Sets the level of reflections in the room.
|
||||
*/
|
||||
setReflectionsLevel(int16_t reflectionsLevel) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the level of reflections in the room.
|
||||
*/
|
||||
getReflectionsLevel() generates (Result retval, int16_t reflectionsLevel);
|
||||
|
||||
/**
|
||||
* Sets the reflections delay in the room.
|
||||
*/
|
||||
setReflectionsDelay(uint32_t reflectionsDelay) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the reflections delay in the room.
|
||||
*/
|
||||
getReflectionsDelay() generates (Result retval, uint32_t reflectionsDelay);
|
||||
|
||||
/**
|
||||
* Sets the reverb level of the room.
|
||||
*/
|
||||
setReverbLevel(int16_t reverbLevel) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the reverb level of the room.
|
||||
*/
|
||||
getReverbLevel() generates (Result retval, int16_t reverbLevel);
|
||||
|
||||
/**
|
||||
* Sets the reverb delay of the room.
|
||||
*/
|
||||
setReverbDelay(uint32_t reverDelay) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the reverb delay of the room.
|
||||
*/
|
||||
getReverbDelay() generates (Result retval, uint32_t reverbDelay);
|
||||
|
||||
/**
|
||||
* Sets room diffusion.
|
||||
*/
|
||||
setDiffusion(int16_t diffusion) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets room diffusion.
|
||||
*/
|
||||
getDiffusion() generates (Result retval, int16_t diffusion);
|
||||
|
||||
/**
|
||||
* Sets room wall density.
|
||||
*/
|
||||
setDensity(int16_t density) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets room wall density.
|
||||
*/
|
||||
getDensity() generates (Result retval, int16_t density);
|
||||
|
||||
struct AllProperties {
|
||||
int16_t roomLevel; // in millibels, range -6000 to 0
|
||||
int16_t roomHfLevel; // in millibels, range -4000 to 0
|
||||
uint32_t decayTime; // in milliseconds, range 100 to 20000
|
||||
int16_t decayHfRatio; // in permilles, range 100 to 1000
|
||||
int16_t reflectionsLevel; // in millibels, range -6000 to 0
|
||||
uint32_t reflectionsDelay; // in milliseconds, range 0 to 65
|
||||
int16_t reverbLevel; // in millibels, range -6000 to 0
|
||||
uint32_t reverbDelay; // in milliseconds, range 0 to 65
|
||||
int16_t diffusion; // in permilles, range 0 to 1000
|
||||
int16_t density; // in permilles, range 0 to 1000
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets all properties at once.
|
||||
*/
|
||||
setAllProperties(AllProperties properties) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets all properties at once.
|
||||
*/
|
||||
getAllProperties() generates (Result retval, AllProperties properties);
|
||||
};
|
93
audio/effect/7.0/IEqualizerEffect.hal
Normal file
93
audio/effect/7.0/IEqualizerEffect.hal
Normal file
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IEqualizerEffect extends IEffect {
|
||||
/**
|
||||
* Gets the number of frequency bands that the equalizer supports.
|
||||
*/
|
||||
getNumBands() generates (Result retval, uint16_t numBands);
|
||||
|
||||
/**
|
||||
* Returns the minimum and maximum band levels supported.
|
||||
*/
|
||||
getLevelRange()
|
||||
generates (Result retval, int16_t minLevel, int16_t maxLevel);
|
||||
|
||||
/**
|
||||
* Sets the gain for the given equalizer band.
|
||||
*/
|
||||
setBandLevel(uint16_t band, int16_t level) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the gain for the given equalizer band.
|
||||
*/
|
||||
getBandLevel(uint16_t band) generates (Result retval, int16_t level);
|
||||
|
||||
/**
|
||||
* Gets the center frequency of the given band, in milliHertz.
|
||||
*/
|
||||
getBandCenterFrequency(uint16_t band)
|
||||
generates (Result retval, uint32_t centerFreqmHz);
|
||||
|
||||
/**
|
||||
* Gets the frequency range of the given frequency band, in milliHertz.
|
||||
*/
|
||||
getBandFrequencyRange(uint16_t band)
|
||||
generates (Result retval, uint32_t minFreqmHz, uint32_t maxFreqmHz);
|
||||
|
||||
/**
|
||||
* Gets the band that has the most effect on the given frequency
|
||||
* in milliHertz.
|
||||
*/
|
||||
getBandForFrequency(uint32_t freqmHz)
|
||||
generates (Result retval, uint16_t band);
|
||||
|
||||
/**
|
||||
* Gets the names of all presets the equalizer supports.
|
||||
*/
|
||||
getPresetNames() generates (Result retval, vec<string> names);
|
||||
|
||||
/**
|
||||
* Sets the current preset using the index of the preset in the names
|
||||
* vector returned via 'getPresetNames'.
|
||||
*/
|
||||
setCurrentPreset(uint16_t preset) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the current preset.
|
||||
*/
|
||||
getCurrentPreset() generates (Result retval, uint16_t preset);
|
||||
|
||||
struct AllProperties {
|
||||
uint16_t curPreset;
|
||||
vec<int16_t> bandLevels;
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets all properties at once.
|
||||
*/
|
||||
setAllProperties(AllProperties properties) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets all properties at once.
|
||||
*/
|
||||
getAllProperties() generates (Result retval, AllProperties properties);
|
||||
};
|
32
audio/effect/7.0/ILoudnessEnhancerEffect.hal
Normal file
32
audio/effect/7.0/ILoudnessEnhancerEffect.hal
Normal file
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface ILoudnessEnhancerEffect extends IEffect {
|
||||
/**
|
||||
* Sets target gain expressed in millibels.
|
||||
*/
|
||||
setTargetGain(int32_t targetGainMb) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets target gain expressed in millibels.
|
||||
*/
|
||||
getTargetGain() generates (Result retval, int32_t targetGainMb);
|
||||
};
|
68
audio/effect/7.0/INoiseSuppressionEffect.hal
Normal file
68
audio/effect/7.0/INoiseSuppressionEffect.hal
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface INoiseSuppressionEffect extends IEffect {
|
||||
enum Level : int32_t {
|
||||
LOW,
|
||||
MEDIUM,
|
||||
HIGH
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets suppression level.
|
||||
*/
|
||||
setSuppressionLevel(Level level) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets suppression level.
|
||||
*/
|
||||
getSuppressionLevel() generates (Result retval, Level level);
|
||||
|
||||
enum Type : int32_t {
|
||||
SINGLE_CHANNEL,
|
||||
MULTI_CHANNEL
|
||||
};
|
||||
|
||||
/**
|
||||
* Set suppression type.
|
||||
*/
|
||||
setSuppressionType(Type type) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Get suppression type.
|
||||
*/
|
||||
getSuppressionType() generates (Result retval, Type type);
|
||||
|
||||
struct AllProperties {
|
||||
Level level;
|
||||
Type type;
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets all properties at once.
|
||||
*/
|
||||
setAllProperties(AllProperties properties) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets all properties at once.
|
||||
*/
|
||||
getAllProperties() generates (Result retval, AllProperties properties);
|
||||
};
|
43
audio/effect/7.0/IPresetReverbEffect.hal
Normal file
43
audio/effect/7.0/IPresetReverbEffect.hal
Normal file
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IPresetReverbEffect extends IEffect {
|
||||
enum Preset : int32_t {
|
||||
NONE, // no reverb or reflections
|
||||
SMALLROOM, // a small room less than five meters in length
|
||||
MEDIUMROOM, // a medium room with a length of ten meters or less
|
||||
LARGEROOM, // a large-sized room suitable for live performances
|
||||
MEDIUMHALL, // a medium-sized hall
|
||||
LARGEHALL, // a large-sized hall suitable for a full orchestra
|
||||
PLATE, // synthesis of the traditional plate reverb
|
||||
LAST = PLATE
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the current preset.
|
||||
*/
|
||||
setPreset(Preset preset) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the current preset.
|
||||
*/
|
||||
getPreset() generates (Result retval, Preset preset);
|
||||
};
|
77
audio/effect/7.0/IVirtualizerEffect.hal
Normal file
77
audio/effect/7.0/IVirtualizerEffect.hal
Normal file
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IVirtualizerEffect extends IEffect {
|
||||
/**
|
||||
* Returns whether setting virtualization strength is supported.
|
||||
*/
|
||||
isStrengthSupported() generates (bool strengthSupported);
|
||||
|
||||
enum StrengthRange : uint16_t {
|
||||
MIN = 0,
|
||||
MAX = 1000
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets virtualization strength.
|
||||
*
|
||||
* @param strength strength of the effect. The valid range for strength
|
||||
* strength is [0, 1000], where 0 per mille designates the
|
||||
* mildest effect and 1000 per mille designates the
|
||||
* strongest.
|
||||
* @return retval operation completion status.
|
||||
*/
|
||||
setStrength(uint16_t strength) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets virtualization strength.
|
||||
*/
|
||||
getStrength() generates (Result retval, uint16_t strength);
|
||||
|
||||
struct SpeakerAngle {
|
||||
/** Speaker channel mask */
|
||||
bitfield<AudioChannelMask> mask;
|
||||
// all angles are expressed in degrees and
|
||||
// are relative to the listener.
|
||||
int16_t azimuth; // 0 is the direction the listener faces
|
||||
// 180 is behind the listener
|
||||
// -90 is to their left
|
||||
int16_t elevation; // 0 is the horizontal plane
|
||||
// +90 is above the listener, -90 is below
|
||||
};
|
||||
/**
|
||||
* Retrieves virtual speaker angles for the given channel mask on the
|
||||
* specified device.
|
||||
*/
|
||||
getVirtualSpeakerAngles(bitfield<AudioChannelMask> mask, AudioDevice device)
|
||||
generates (Result retval, vec<SpeakerAngle> speakerAngles);
|
||||
|
||||
/**
|
||||
* Forces the virtualizer effect for the given output device.
|
||||
*/
|
||||
forceVirtualizationMode(AudioDevice device) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Returns audio device reflecting the current virtualization mode,
|
||||
* AUDIO_DEVICE_NONE when not virtualizing.
|
||||
*/
|
||||
getVirtualizationMode() generates (Result retval, AudioDevice device);
|
||||
};
|
110
audio/effect/7.0/IVisualizerEffect.hal
Normal file
110
audio/effect/7.0/IVisualizerEffect.hal
Normal file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
import IEffect;
|
||||
|
||||
interface IVisualizerEffect extends IEffect {
|
||||
enum CaptureSizeRange : int32_t {
|
||||
MAX = 1024, // maximum capture size in samples
|
||||
MIN = 128 // minimum capture size in samples
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the number PCM samples in the capture.
|
||||
*/
|
||||
setCaptureSize(uint16_t captureSize) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the number PCM samples in the capture.
|
||||
*/
|
||||
getCaptureSize() generates (Result retval, uint16_t captureSize);
|
||||
|
||||
enum ScalingMode : int32_t {
|
||||
// Keep in sync with SCALING_MODE_... in
|
||||
// frameworks/base/media/java/android/media/audiofx/Visualizer.java
|
||||
NORMALIZED = 0,
|
||||
AS_PLAYED = 1
|
||||
};
|
||||
|
||||
/**
|
||||
* Specifies the way the captured data is scaled.
|
||||
*/
|
||||
setScalingMode(ScalingMode scalingMode) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Retrieves the way the captured data is scaled.
|
||||
*/
|
||||
getScalingMode() generates (Result retval, ScalingMode scalingMode);
|
||||
|
||||
/**
|
||||
* Informs the visualizer about the downstream latency.
|
||||
*/
|
||||
setLatency(uint32_t latencyMs) generates (Result retval);
|
||||
|
||||
/**
|
||||
* Gets the downstream latency.
|
||||
*/
|
||||
getLatency() generates (Result retval, uint32_t latencyMs);
|
||||
|
||||
enum MeasurementMode : int32_t {
|
||||
// Keep in sync with MEASUREMENT_MODE_... in
|
||||
// frameworks/base/media/java/android/media/audiofx/Visualizer.java
|
||||
NONE = 0x0,
|
||||
PEAK_RMS = 0x1
|
||||
};
|
||||
|
||||
/**
|
||||
* Specifies which measurements are to be made.
|
||||
*/
|
||||
setMeasurementMode(MeasurementMode measurementMode)
|
||||
generates (Result retval);
|
||||
|
||||
/**
|
||||
* Retrieves which measurements are to be made.
|
||||
*/
|
||||
getMeasurementMode() generates (
|
||||
Result retval, MeasurementMode measurementMode);
|
||||
|
||||
/**
|
||||
* Retrieves the latest PCM snapshot captured by the visualizer engine. The
|
||||
* number of samples to capture is specified by 'setCaptureSize' parameter.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return samples samples in 8 bit unsigned format (0 = 0x80)
|
||||
*/
|
||||
capture() generates (Result retval, vec<uint8_t> samples);
|
||||
|
||||
struct Measurement {
|
||||
MeasurementMode mode; // discriminator
|
||||
union Values {
|
||||
struct PeakAndRms {
|
||||
int32_t peakMb; // millibels
|
||||
int32_t rmsMb; // millibels
|
||||
} peakAndRms;
|
||||
} value;
|
||||
};
|
||||
/**
|
||||
* Retrieves the latest measurements. The measurements to be made
|
||||
* are specified by 'setMeasurementMode' parameter.
|
||||
*
|
||||
* @return retval operation completion status.
|
||||
* @return result measurement.
|
||||
*/
|
||||
measure() generates (Result retval, Measurement result);
|
||||
};
|
301
audio/effect/7.0/types.hal
Normal file
301
audio/effect/7.0/types.hal
Normal file
|
@ -0,0 +1,301 @@
|
|||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.audio.effect@7.0;
|
||||
|
||||
import android.hardware.audio.common@7.0;
|
||||
|
||||
enum Result : int32_t {
|
||||
OK,
|
||||
NOT_INITIALIZED,
|
||||
INVALID_ARGUMENTS,
|
||||
INVALID_STATE,
|
||||
NOT_SUPPORTED,
|
||||
RESULT_TOO_BIG
|
||||
};
|
||||
|
||||
/**
|
||||
* Effect engine capabilities/requirements flags.
|
||||
*
|
||||
* Definitions for flags field of effect descriptor.
|
||||
*
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | description | bits | values
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | connection | 0..2 | 0 insert: after track process
|
||||
* | mode | | 1 auxiliary: connect to track auxiliary
|
||||
* | | | output and use send level
|
||||
* | | | 2 replace: replaces track process function;
|
||||
* | | | must implement SRC, volume and mono to stereo.
|
||||
* | | | 3 pre processing: applied below audio HAL on in
|
||||
* | | | 4 post processing: applied below audio HAL on out
|
||||
* | | | 5 - 7 reserved
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | insertion | 3..5 | 0 none
|
||||
* | preference | | 1 first of the chain
|
||||
* | | | 2 last of the chain
|
||||
* | | | 3 exclusive (only effect in the insert chain)
|
||||
* | | | 4..7 reserved
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Volume | 6..8 | 0 none
|
||||
* | management | | 1 implements volume control
|
||||
* | | | 2 requires volume indication
|
||||
* | | | 3 monitors requested volume
|
||||
* | | | 4 reserved
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Device | 9..11 | 0 none
|
||||
* | indication | | 1 requires device updates
|
||||
* | | | 2, 4 reserved
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Sample input | 12..13 | 1 direct: process() function or
|
||||
* | mode | | EFFECT_CMD_SET_CONFIG command must specify
|
||||
* | | | a buffer descriptor
|
||||
* | | | 2 provider: process() function uses the
|
||||
* | | | bufferProvider indicated by the
|
||||
* | | | EFFECT_CMD_SET_CONFIG command to request input.
|
||||
* | | | buffers.
|
||||
* | | | 3 both: both input modes are supported
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Sample output | 14..15 | 1 direct: process() function or
|
||||
* | mode | | EFFECT_CMD_SET_CONFIG command must specify
|
||||
* | | | a buffer descriptor
|
||||
* | | | 2 provider: process() function uses the
|
||||
* | | | bufferProvider indicated by the
|
||||
* | | | EFFECT_CMD_SET_CONFIG command to request output
|
||||
* | | | buffers.
|
||||
* | | | 3 both: both output modes are supported
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Hardware | 16..17 | 0 No hardware acceleration
|
||||
* | acceleration | | 1 non tunneled hw acceleration: the process()
|
||||
* | | | function reads the samples, send them to HW
|
||||
* | | | accelerated effect processor, reads back
|
||||
* | | | the processed samples and returns them
|
||||
* | | | to the output buffer.
|
||||
* | | | 2 tunneled hw acceleration: the process()
|
||||
* | | | function is transparent. The effect interface
|
||||
* | | | is only used to control the effect engine.
|
||||
* | | | This mode is relevant for global effects
|
||||
* | | | actually applied by the audio hardware on
|
||||
* | | | the output stream.
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Audio Mode | 18..19 | 0 none
|
||||
* | indication | | 1 requires audio mode updates
|
||||
* | | | 2..3 reserved
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Audio source | 20..21 | 0 none
|
||||
* | indication | | 1 requires audio source updates
|
||||
* | | | 2..3 reserved
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Effect offload | 22 | 0 The effect cannot be offloaded to an audio DSP
|
||||
* | supported | | 1 The effect can be offloaded to an audio DSP
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
* | Process | 23 | 0 The effect implements a process function.
|
||||
* | function | | 1 The effect does not implement a process
|
||||
* | not | | function: enabling the effect has no impact
|
||||
* | implemented | | on latency or CPU load.
|
||||
* | | | Effect implementations setting this flag do not
|
||||
* | | | have to implement a process function.
|
||||
* +----------------+--------+--------------------------------------------------
|
||||
*/
|
||||
@export(name="", value_prefix="EFFECT_FLAG_")
|
||||
enum EffectFlags : int32_t {
|
||||
// Insert mode
|
||||
TYPE_SHIFT = 0,
|
||||
TYPE_SIZE = 3,
|
||||
TYPE_MASK = ((1 << TYPE_SIZE) -1) << TYPE_SHIFT,
|
||||
TYPE_INSERT = 0 << TYPE_SHIFT,
|
||||
TYPE_AUXILIARY = 1 << TYPE_SHIFT,
|
||||
TYPE_REPLACE = 2 << TYPE_SHIFT,
|
||||
TYPE_PRE_PROC = 3 << TYPE_SHIFT,
|
||||
TYPE_POST_PROC = 4 << TYPE_SHIFT,
|
||||
|
||||
// Insert preference
|
||||
INSERT_SHIFT = TYPE_SHIFT + TYPE_SIZE,
|
||||
INSERT_SIZE = 3,
|
||||
INSERT_MASK = ((1 << INSERT_SIZE) -1) << INSERT_SHIFT,
|
||||
INSERT_ANY = 0 << INSERT_SHIFT,
|
||||
INSERT_FIRST = 1 << INSERT_SHIFT,
|
||||
INSERT_LAST = 2 << INSERT_SHIFT,
|
||||
INSERT_EXCLUSIVE = 3 << INSERT_SHIFT,
|
||||
|
||||
// Volume control
|
||||
VOLUME_SHIFT = INSERT_SHIFT + INSERT_SIZE,
|
||||
VOLUME_SIZE = 3,
|
||||
VOLUME_MASK = ((1 << VOLUME_SIZE) -1) << VOLUME_SHIFT,
|
||||
VOLUME_CTRL = 1 << VOLUME_SHIFT,
|
||||
VOLUME_IND = 2 << VOLUME_SHIFT,
|
||||
VOLUME_MONITOR = 3 << VOLUME_SHIFT,
|
||||
VOLUME_NONE = 0 << VOLUME_SHIFT,
|
||||
|
||||
// Device indication
|
||||
DEVICE_SHIFT = VOLUME_SHIFT + VOLUME_SIZE,
|
||||
DEVICE_SIZE = 3,
|
||||
DEVICE_MASK = ((1 << DEVICE_SIZE) -1) << DEVICE_SHIFT,
|
||||
DEVICE_IND = 1 << DEVICE_SHIFT,
|
||||
DEVICE_NONE = 0 << DEVICE_SHIFT,
|
||||
|
||||
// Sample input modes
|
||||
INPUT_SHIFT = DEVICE_SHIFT + DEVICE_SIZE,
|
||||
INPUT_SIZE = 2,
|
||||
INPUT_MASK = ((1 << INPUT_SIZE) -1) << INPUT_SHIFT,
|
||||
INPUT_DIRECT = 1 << INPUT_SHIFT,
|
||||
INPUT_PROVIDER = 2 << INPUT_SHIFT,
|
||||
INPUT_BOTH = 3 << INPUT_SHIFT,
|
||||
|
||||
// Sample output modes
|
||||
OUTPUT_SHIFT = INPUT_SHIFT + INPUT_SIZE,
|
||||
OUTPUT_SIZE = 2,
|
||||
OUTPUT_MASK = ((1 << OUTPUT_SIZE) -1) << OUTPUT_SHIFT,
|
||||
OUTPUT_DIRECT = 1 << OUTPUT_SHIFT,
|
||||
OUTPUT_PROVIDER = 2 << OUTPUT_SHIFT,
|
||||
OUTPUT_BOTH = 3 << OUTPUT_SHIFT,
|
||||
|
||||
// Hardware acceleration mode
|
||||
HW_ACC_SHIFT = OUTPUT_SHIFT + OUTPUT_SIZE,
|
||||
HW_ACC_SIZE = 2,
|
||||
HW_ACC_MASK = ((1 << HW_ACC_SIZE) -1) << HW_ACC_SHIFT,
|
||||
HW_ACC_SIMPLE = 1 << HW_ACC_SHIFT,
|
||||
HW_ACC_TUNNEL = 2 << HW_ACC_SHIFT,
|
||||
|
||||
// Audio mode indication
|
||||
AUDIO_MODE_SHIFT = HW_ACC_SHIFT + HW_ACC_SIZE,
|
||||
AUDIO_MODE_SIZE = 2,
|
||||
AUDIO_MODE_MASK = ((1 << AUDIO_MODE_SIZE) -1) << AUDIO_MODE_SHIFT,
|
||||
AUDIO_MODE_IND = 1 << AUDIO_MODE_SHIFT,
|
||||
AUDIO_MODE_NONE = 0 << AUDIO_MODE_SHIFT,
|
||||
|
||||
// Audio source indication
|
||||
AUDIO_SOURCE_SHIFT = AUDIO_MODE_SHIFT + AUDIO_MODE_SIZE,
|
||||
AUDIO_SOURCE_SIZE = 2,
|
||||
AUDIO_SOURCE_MASK = ((1 << AUDIO_SOURCE_SIZE) -1) << AUDIO_SOURCE_SHIFT,
|
||||
AUDIO_SOURCE_IND = 1 << AUDIO_SOURCE_SHIFT,
|
||||
AUDIO_SOURCE_NONE = 0 << AUDIO_SOURCE_SHIFT,
|
||||
|
||||
// Effect offload indication
|
||||
OFFLOAD_SHIFT = AUDIO_SOURCE_SHIFT + AUDIO_SOURCE_SIZE,
|
||||
OFFLOAD_SIZE = 1,
|
||||
OFFLOAD_MASK = ((1 << OFFLOAD_SIZE) -1) << OFFLOAD_SHIFT,
|
||||
OFFLOAD_SUPPORTED = 1 << OFFLOAD_SHIFT,
|
||||
|
||||
// Effect has no process indication
|
||||
NO_PROCESS_SHIFT = OFFLOAD_SHIFT + OFFLOAD_SIZE,
|
||||
NO_PROCESS_SIZE = 1,
|
||||
NO_PROCESS_MASK = ((1 << NO_PROCESS_SIZE) -1) << NO_PROCESS_SHIFT,
|
||||
NO_PROCESS = 1 << NO_PROCESS_SHIFT
|
||||
};
|
||||
|
||||
/**
|
||||
* The effect descriptor contains necessary information to facilitate the
|
||||
* enumeration of the effect engines present in a library.
|
||||
*/
|
||||
struct EffectDescriptor {
|
||||
Uuid type; // UUID of to the OpenSL ES interface implemented
|
||||
// by this effect
|
||||
Uuid uuid; // UUID for this particular implementation
|
||||
bitfield<EffectFlags> flags; // effect engine capabilities/requirements flags
|
||||
uint16_t cpuLoad; // CPU load indication expressed in 0.1 MIPS units
|
||||
// as estimated on an ARM9E core (ARMv5TE) with 0 WS
|
||||
uint16_t memoryUsage; // data memory usage expressed in KB and includes
|
||||
// only dynamically allocated memory
|
||||
uint8_t[64] name; // human readable effect name
|
||||
uint8_t[64] implementor; // human readable effect implementor name
|
||||
};
|
||||
|
||||
/**
|
||||
* A buffer is a chunk of audio data for processing. Multi-channel audio is
|
||||
* always interleaved. The channel order is from LSB to MSB with regard to the
|
||||
* channel mask definition in audio.h, audio_channel_mask_t, e.g.:
|
||||
* Stereo: L, R; 5.1: FL, FR, FC, LFE, BL, BR.
|
||||
*
|
||||
* The buffer size is expressed in frame count, a frame being composed of
|
||||
* samples for all channels at a given time. Frame size for unspecified format
|
||||
* (AUDIO_FORMAT_OTHER) is 8 bit by definition.
|
||||
*/
|
||||
struct AudioBuffer {
|
||||
uint64_t id;
|
||||
uint32_t frameCount;
|
||||
memory data;
|
||||
};
|
||||
|
||||
@export(name="effect_buffer_access_e", value_prefix="EFFECT_BUFFER_")
|
||||
enum EffectBufferAccess : int32_t {
|
||||
ACCESS_WRITE,
|
||||
ACCESS_READ,
|
||||
ACCESS_ACCUMULATE
|
||||
};
|
||||
|
||||
/**
|
||||
* Determines what fields of EffectBufferConfig need to be considered.
|
||||
*/
|
||||
@export(name="", value_prefix="EFFECT_CONFIG_")
|
||||
enum EffectConfigParameters : int32_t {
|
||||
BUFFER = 0x0001, // buffer field
|
||||
SMP_RATE = 0x0002, // samplingRate
|
||||
CHANNELS = 0x0004, // channels
|
||||
FORMAT = 0x0008, // format
|
||||
ACC_MODE = 0x0010, // accessMode
|
||||
// Note that the 2.0 ALL have been moved to an helper function
|
||||
};
|
||||
|
||||
/**
|
||||
* The buffer config structure specifies the input or output audio format
|
||||
* to be used by the effect engine.
|
||||
*/
|
||||
struct EffectBufferConfig {
|
||||
AudioBuffer buffer;
|
||||
uint32_t samplingRateHz;
|
||||
bitfield<AudioChannelMask> channels;
|
||||
AudioFormat format;
|
||||
EffectBufferAccess accessMode;
|
||||
bitfield<EffectConfigParameters> mask;
|
||||
};
|
||||
|
||||
struct EffectConfig {
|
||||
EffectBufferConfig inputCfg;
|
||||
EffectBufferConfig outputCfg;
|
||||
};
|
||||
|
||||
@export(name="effect_feature_e", value_prefix="EFFECT_FEATURE_")
|
||||
enum EffectFeature : int32_t {
|
||||
AUX_CHANNELS, // supports auxiliary channels
|
||||
// (e.g. dual mic noise suppressor)
|
||||
CNT
|
||||
};
|
||||
|
||||
struct EffectAuxChannelsConfig {
|
||||
bitfield<AudioChannelMask> mainChannels; // channel mask for main channels
|
||||
bitfield<AudioChannelMask> auxChannels; // channel mask for auxiliary channels
|
||||
};
|
||||
|
||||
struct EffectOffloadParameter {
|
||||
bool isOffload; // true if the playback thread the effect
|
||||
// is attached to is offloaded
|
||||
AudioIoHandle ioHandle; // io handle of the playback thread
|
||||
// the effect is attached to
|
||||
};
|
||||
|
||||
/**
|
||||
* The message queue flags used to synchronize reads and writes from
|
||||
* the status message queue used by effects.
|
||||
*/
|
||||
enum MessageQueueFlagBits : uint32_t {
|
||||
DONE_PROCESSING = 1 << 0,
|
||||
REQUEST_PROCESS = 1 << 1,
|
||||
REQUEST_PROCESS_REVERSE = 1 << 2,
|
||||
REQUEST_QUIT = 1 << 3,
|
||||
REQUEST_PROCESS_ALL =
|
||||
REQUEST_PROCESS | REQUEST_PROCESS_REVERSE | REQUEST_QUIT
|
||||
};
|
5
audio/effect/7.0/xml/Android.bp
Normal file
5
audio/effect/7.0/xml/Android.bp
Normal file
|
@ -0,0 +1,5 @@
|
|||
xsd_config {
|
||||
name: "audio_effects_conf_V7_0",
|
||||
srcs: ["audio_effects_conf.xsd"],
|
||||
package_name: "audio.effects.V7_0",
|
||||
}
|
208
audio/effect/7.0/xml/api/current.txt
Normal file
208
audio/effect/7.0/xml/api/current.txt
Normal file
|
@ -0,0 +1,208 @@
|
|||
// Signature format: 2.0
|
||||
package audio.effects.V7_0 {
|
||||
|
||||
public class AudioEffectsConf {
|
||||
ctor public AudioEffectsConf();
|
||||
method public audio.effects.V7_0.AudioEffectsConf.DeviceEffects getDeviceEffects();
|
||||
method public audio.effects.V7_0.EffectsType getEffects();
|
||||
method public audio.effects.V7_0.LibrariesType getLibraries();
|
||||
method public audio.effects.V7_0.AudioEffectsConf.Postprocess getPostprocess();
|
||||
method public audio.effects.V7_0.AudioEffectsConf.Preprocess getPreprocess();
|
||||
method public audio.effects.V7_0.VersionType getVersion();
|
||||
method public void setDeviceEffects(audio.effects.V7_0.AudioEffectsConf.DeviceEffects);
|
||||
method public void setEffects(audio.effects.V7_0.EffectsType);
|
||||
method public void setLibraries(audio.effects.V7_0.LibrariesType);
|
||||
method public void setPostprocess(audio.effects.V7_0.AudioEffectsConf.Postprocess);
|
||||
method public void setPreprocess(audio.effects.V7_0.AudioEffectsConf.Preprocess);
|
||||
method public void setVersion(audio.effects.V7_0.VersionType);
|
||||
}
|
||||
|
||||
public static class AudioEffectsConf.DeviceEffects {
|
||||
ctor public AudioEffectsConf.DeviceEffects();
|
||||
method public java.util.List<audio.effects.V7_0.DeviceProcessType> getDevicePort();
|
||||
}
|
||||
|
||||
public static class AudioEffectsConf.Postprocess {
|
||||
ctor public AudioEffectsConf.Postprocess();
|
||||
method public java.util.List<audio.effects.V7_0.StreamPostprocessType> getStream();
|
||||
}
|
||||
|
||||
public static class AudioEffectsConf.Preprocess {
|
||||
ctor public AudioEffectsConf.Preprocess();
|
||||
method public java.util.List<audio.effects.V7_0.StreamPreprocessType> getStream();
|
||||
}
|
||||
|
||||
public class DeviceProcessType extends audio.effects.V7_0.StreamProcessingType {
|
||||
ctor public DeviceProcessType();
|
||||
method public String getAddress();
|
||||
method public audio.effects.V7_0.DeviceType getType();
|
||||
method public void setAddress(String);
|
||||
method public void setType(audio.effects.V7_0.DeviceType);
|
||||
}
|
||||
|
||||
public enum DeviceType {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_AUX_DIGITAL;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_BACK_MIC;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_BLUETOOTH_BLE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_BUILTIN_MIC;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_BUS;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_COMMUNICATION;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_ECHO_REFERENCE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_FM_TUNER;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_HDMI;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_HDMI_ARC;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_IP;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_LINE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_LOOPBACK;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_PROXY;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_REMOTE_SUBMIX;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_SPDIF;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_TELEPHONY_RX;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_TV_TUNER;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_USB_ACCESSORY;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_USB_DEVICE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_USB_HEADSET;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_VOICE_CALL;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_IN_WIRED_HEADSET;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_AUX_DIGITAL;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_AUX_LINE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_BUS;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_EARPIECE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_ECHO_CANCELLER;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_FM;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_HDMI;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_HDMI_ARC;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_HEARING_AID;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_IP;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_LINE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_PROXY;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_SPDIF;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_SPEAKER;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_SPEAKER_SAFE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_TELEPHONY_TX;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_USB_ACCESSORY;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_USB_DEVICE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_USB_HEADSET;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
|
||||
enum_constant public static final audio.effects.V7_0.DeviceType AUDIO_DEVICE_OUT_WIRED_HEADSET;
|
||||
}
|
||||
|
||||
public class EffectImplType {
|
||||
ctor public EffectImplType();
|
||||
method public String getLibrary();
|
||||
method public String getUuid();
|
||||
method public void setLibrary(String);
|
||||
method public void setUuid(String);
|
||||
}
|
||||
|
||||
public class EffectProxyType extends audio.effects.V7_0.EffectType {
|
||||
ctor public EffectProxyType();
|
||||
method public audio.effects.V7_0.EffectImplType getLibhw();
|
||||
method public audio.effects.V7_0.EffectImplType getLibsw();
|
||||
method public void setLibhw(audio.effects.V7_0.EffectImplType);
|
||||
method public void setLibsw(audio.effects.V7_0.EffectImplType);
|
||||
}
|
||||
|
||||
public class EffectType extends audio.effects.V7_0.EffectImplType {
|
||||
ctor public EffectType();
|
||||
method public String getName();
|
||||
method public void setName(String);
|
||||
}
|
||||
|
||||
public class EffectsType {
|
||||
ctor public EffectsType();
|
||||
method public java.util.List<audio.effects.V7_0.EffectProxyType> getEffectProxy_optional();
|
||||
method public java.util.List<audio.effects.V7_0.EffectType> getEffect_optional();
|
||||
}
|
||||
|
||||
public class LibrariesType {
|
||||
ctor public LibrariesType();
|
||||
method public java.util.List<audio.effects.V7_0.LibrariesType.Library> getLibrary();
|
||||
}
|
||||
|
||||
public static class LibrariesType.Library {
|
||||
ctor public LibrariesType.Library();
|
||||
method public String getName();
|
||||
method public String getPath();
|
||||
method public void setName(String);
|
||||
method public void setPath(String);
|
||||
}
|
||||
|
||||
public enum StreamInputType {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType camcorder;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType echo_reference;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType fm_tuner;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType mic;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType unprocessed;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType voice_call;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType voice_communication;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType voice_downlink;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType voice_performance;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType voice_recognition;
|
||||
enum_constant public static final audio.effects.V7_0.StreamInputType voice_uplink;
|
||||
}
|
||||
|
||||
public enum StreamOutputType {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType alarm;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType assistant;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType bluetooth_sco;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType dtmf;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType enforced_audible;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType music;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType notification;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType ring;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType system;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType tts;
|
||||
enum_constant public static final audio.effects.V7_0.StreamOutputType voice_call;
|
||||
}
|
||||
|
||||
public class StreamPostprocessType extends audio.effects.V7_0.StreamProcessingType {
|
||||
ctor public StreamPostprocessType();
|
||||
method public audio.effects.V7_0.StreamOutputType getType();
|
||||
method public void setType(audio.effects.V7_0.StreamOutputType);
|
||||
}
|
||||
|
||||
public class StreamPreprocessType extends audio.effects.V7_0.StreamProcessingType {
|
||||
ctor public StreamPreprocessType();
|
||||
method public audio.effects.V7_0.StreamInputType getType();
|
||||
method public void setType(audio.effects.V7_0.StreamInputType);
|
||||
}
|
||||
|
||||
public class StreamProcessingType {
|
||||
ctor public StreamProcessingType();
|
||||
method public java.util.List<audio.effects.V7_0.StreamProcessingType.Apply> getApply();
|
||||
}
|
||||
|
||||
public static class StreamProcessingType.Apply {
|
||||
ctor public StreamProcessingType.Apply();
|
||||
method public String getEffect();
|
||||
method public void setEffect(String);
|
||||
}
|
||||
|
||||
public enum VersionType {
|
||||
method public String getRawName();
|
||||
enum_constant public static final audio.effects.V7_0.VersionType _2_0;
|
||||
}
|
||||
|
||||
public class XmlParser {
|
||||
ctor public XmlParser();
|
||||
method public static audio.effects.V7_0.AudioEffectsConf read(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
|
||||
method public static String readText(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
|
||||
method public static void skip(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
|
||||
}
|
||||
|
||||
}
|
||||
|
0
audio/effect/7.0/xml/api/last_current.txt
Normal file
0
audio/effect/7.0/xml/api/last_current.txt
Normal file
0
audio/effect/7.0/xml/api/last_removed.txt
Normal file
0
audio/effect/7.0/xml/api/last_removed.txt
Normal file
1
audio/effect/7.0/xml/api/removed.txt
Normal file
1
audio/effect/7.0/xml/api/removed.txt
Normal file
|
@ -0,0 +1 @@
|
|||
// Signature format: 2.0
|
323
audio/effect/7.0/xml/audio_effects_conf.xsd
Normal file
323
audio/effect/7.0/xml/audio_effects_conf.xsd
Normal file
|
@ -0,0 +1,323 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Copyright (C) 2020 The Android Open Source Project
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://schemas.android.com/audio/audio_effects_conf/v2_0"
|
||||
xmlns:aec="http://schemas.android.com/audio/audio_effects_conf/v2_0"
|
||||
elementFormDefault="qualified">
|
||||
<!-- Simple types -->
|
||||
<xs:simpleType name="versionType">
|
||||
<xs:restriction base="xs:decimal">
|
||||
<xs:enumeration value="2.0"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="uuidType">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="streamInputType">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="mic"/>
|
||||
<xs:enumeration value="voice_uplink"/>
|
||||
<xs:enumeration value="voice_downlink"/>
|
||||
<xs:enumeration value="voice_call"/>
|
||||
<xs:enumeration value="camcorder"/>
|
||||
<xs:enumeration value="voice_recognition"/>
|
||||
<xs:enumeration value="voice_communication"/>
|
||||
<xs:enumeration value="unprocessed"/>
|
||||
<xs:enumeration value="voice_performance"/>
|
||||
<xs:enumeration value="echo_reference"/>
|
||||
<xs:enumeration value="fm_tuner"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="streamOutputType">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="voice_call"/>
|
||||
<xs:enumeration value="system"/>
|
||||
<xs:enumeration value="ring"/>
|
||||
<xs:enumeration value="music"/>
|
||||
<xs:enumeration value="alarm"/>
|
||||
<xs:enumeration value="notification"/>
|
||||
<xs:enumeration value="bluetooth_sco"/>
|
||||
<xs:enumeration value="enforced_audible"/>
|
||||
<xs:enumeration value="dtmf"/>
|
||||
<xs:enumeration value="tts"/>
|
||||
<xs:enumeration value="assistant"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="relativePathType">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="[^/].*"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<xs:simpleType name="deviceType">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_EARPIECE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_SPEAKER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_WIRED_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_WIRED_HEADPHONE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_AUX_DIGITAL"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_HDMI"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_ACCESSORY"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_DEVICE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_REMOTE_SUBMIX"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_TELEPHONY_TX"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_LINE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_HDMI_ARC"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_SPDIF"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_FM"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_AUX_LINE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_SPEAKER_SAFE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_IP"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_BUS"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_PROXY"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_USB_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_HEARING_AID"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_OUT_ECHO_CANCELLER"/>
|
||||
<!-- Due to the xml format, IN types can not be a separated from OUT types -->
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_COMMUNICATION"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BUILTIN_MIC"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_WIRED_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_AUX_DIGITAL"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_HDMI"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_VOICE_CALL"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_TELEPHONY_RX"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BACK_MIC"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_REMOTE_SUBMIX"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_USB_ACCESSORY"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_USB_DEVICE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_FM_TUNER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_TV_TUNER"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_LINE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_SPDIF"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_A2DP"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_LOOPBACK"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_IP"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BUS"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_PROXY"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_USB_HEADSET"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_BLE"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_HDMI_ARC"/>
|
||||
<xs:enumeration value="AUDIO_DEVICE_IN_ECHO_REFERENCE"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- Complex types -->
|
||||
<xs:complexType name="librariesType">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
List of effect libraries to load. Each library element must have "name" and
|
||||
"path" attributes. The latter is giving the path of the library .so file
|
||||
relative to the standard effect folders: /(vendor|odm|system)/lib(64)?/soundfx/
|
||||
Example for a library in "/vendor/lib/soundfx/lib.so":
|
||||
<library name="name" path="lib.so"/>
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:sequence>
|
||||
<xs:element name="library" minOccurs="0" maxOccurs="unbounded">
|
||||
<xs:complexType>
|
||||
<xs:attribute name="name" type="xs:string" use="required"/>
|
||||
<xs:attribute name="path" type="aec:relativePathType" use="required"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="effectImplType">
|
||||
<xs:attribute name="library" type="xs:string" use="required"/>
|
||||
<xs:attribute name="uuid" type="aec:uuidType" use="required"/>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="effectType">
|
||||
<xs:complexContent>
|
||||
<xs:extension base="aec:effectImplType">
|
||||
<xs:attribute name="name" type="xs:string" use="required"/>
|
||||
</xs:extension>
|
||||
</xs:complexContent>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="effectProxyType">
|
||||
<xs:complexContent>
|
||||
<xs:extension base="aec:effectType">
|
||||
<xs:sequence>
|
||||
<xs:element name="libsw" type="aec:effectImplType"/>
|
||||
<xs:element name="libhw" type="aec:effectImplType"/>
|
||||
</xs:sequence>
|
||||
</xs:extension>
|
||||
</xs:complexContent>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="effectsType">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
List of effects to load. Each effect element must contain "name",
|
||||
"library", and "uuid" attrs. The value of the "library" attr must
|
||||
correspond to the name of a "library" element. The name of the effect
|
||||
element is indicative, only the value of the "uuid" element designates
|
||||
the effect for the audio framework. The uuid is the implementation
|
||||
specific UUID as specified by the effect vendor. This is not the generic
|
||||
effect type UUID.
|
||||
For effect proxy implementations, SW and HW implementations of the effect
|
||||
can be specified.
|
||||
Example:
|
||||
<effect name="name" library="lib" uuid="uuuu"/>
|
||||
<effectProxy name="proxied" library="proxy" uuid="xxxx">
|
||||
<libsw library="sw_bundle" uuid="yyyy"/>
|
||||
<libhw library="offload_bundle" uuid="zzzz"/>
|
||||
</effectProxy>
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:choice maxOccurs="unbounded">
|
||||
<xs:element name="effect" type="aec:effectType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xs:element name="effectProxy" type="aec:effectProxyType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xs:choice>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="streamProcessingType">
|
||||
<xs:sequence>
|
||||
<xs:element name="apply" minOccurs="0" maxOccurs="unbounded">
|
||||
<xs:complexType>
|
||||
<xs:attribute name="effect" type="xs:string" use="required"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="streamPreprocessType">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
Audio preprocessing configuration. The processing configuration consists
|
||||
of a list of elements each describing processing settings for a given
|
||||
input stream. Valid input stream types are listed in "streamInputType".
|
||||
Each stream element contains a list of "apply" elements. The value of the
|
||||
"effect" attr must correspond to the name of an "effect" element.
|
||||
Example:
|
||||
<stream type="voice_communication">
|
||||
<apply effect="effect1"/>
|
||||
<apply effect="effect2"/>
|
||||
</stream>
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:complexContent>
|
||||
<xs:extension base="aec:streamProcessingType">
|
||||
<xs:attribute name="type" type="aec:streamInputType" use="required"/>
|
||||
</xs:extension>
|
||||
</xs:complexContent>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="streamPostprocessType">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
Audio postprocessing configuration. The processing configuration consists
|
||||
of a list of elements each describing processing settings for a given
|
||||
output stream. Valid output stream types are listed in "streamOutputType".
|
||||
Each stream element contains a list of "apply" elements. The value of the
|
||||
"effect" attr must correspond to the name of an "effect" element.
|
||||
Example:
|
||||
<stream type="music">
|
||||
<apply effect="effect1"/>
|
||||
</stream>
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:complexContent>
|
||||
<xs:extension base="aec:streamProcessingType">
|
||||
<xs:attribute name="type" type="aec:streamOutputType" use="required"/>
|
||||
</xs:extension>
|
||||
</xs:complexContent>
|
||||
</xs:complexType>
|
||||
<xs:complexType name="deviceProcessType">
|
||||
<xs:annotation>
|
||||
<xs:documentation xml:lang="en">
|
||||
Audio Device Effects configuration. The processing configuration consists
|
||||
of a list of effects to be automatically added on a device Port when involved in an audio
|
||||
patch.
|
||||
Valid device type are listed in "deviceType" and shall be aligned.
|
||||
Each stream element contains a list of "apply" elements. The value of the
|
||||
"effect" attr must correspond to the name of an "effect" element.
|
||||
Note that if the device is involved in a hardware patch, the effect must be hardware
|
||||
accelerated.
|
||||
Example:
|
||||
<devicePort address="BUS00_USAGE_MAIN" type="AUDIO_DEVICE_OUT_BUS">
|
||||
<apply effect="equalizer"/>
|
||||
<apply effect="effect2"/>
|
||||
</devicePort>
|
||||
</xs:documentation>
|
||||
</xs:annotation>
|
||||
<xs:complexContent>
|
||||
<xs:extension base="aec:streamProcessingType">
|
||||
<xs:attribute name="address" type="xs:string" use="required"/>
|
||||
<xs:attribute name="type" type="aec:deviceType" use="required"/>
|
||||
</xs:extension>
|
||||
</xs:complexContent>
|
||||
</xs:complexType>
|
||||
<!-- Root element -->
|
||||
<xs:element name="audio_effects_conf">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<xs:element name="libraries" type="aec:librariesType"/>
|
||||
<xs:element name="effects" type="aec:effectsType"/>
|
||||
<xs:element name="postprocess" minOccurs="0" maxOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<xs:element name="stream" type="aec:streamPostprocessType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<xs:element name="preprocess" minOccurs="0" maxOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<xs:element name="stream" type="aec:streamPreprocessType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<xs:element name="deviceEffects" minOccurs="0" maxOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<xs:element name="devicePort" type="aec:deviceProcessType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
</xs:sequence>
|
||||
<xs:attribute name="version" type="aec:versionType" use="required"/>
|
||||
</xs:complexType>
|
||||
<!-- Keys and references -->
|
||||
<xs:key name="libraryName">
|
||||
<xs:selector xpath="aec:libraries/aec:library"/>
|
||||
<xs:field xpath="@name"/>
|
||||
</xs:key>
|
||||
<xs:keyref name="libraryNameRef1" refer="aec:libraryName">
|
||||
<xs:selector xpath="aec:effects/aec:effect"/>
|
||||
<xs:field xpath="@library"/>
|
||||
</xs:keyref>
|
||||
<xs:keyref name="libraryNameRef2" refer="aec:libraryName">
|
||||
<xs:selector xpath="aec:effects/aec:effect/aec:libsw"/>
|
||||
<xs:field xpath="@library"/>
|
||||
</xs:keyref>
|
||||
<xs:keyref name="libraryNameRef3" refer="aec:libraryName">
|
||||
<xs:selector xpath="aec:effects/aec:effect/aec:libhw"/>
|
||||
<xs:field xpath="@library"/>
|
||||
</xs:keyref>
|
||||
<xs:key name="effectName">
|
||||
<xs:selector xpath="aec:effects/aec:effect|aec:effects/aec:effectProxy"/>
|
||||
<xs:field xpath="@name"/>
|
||||
</xs:key>
|
||||
<xs:keyref name="effectNamePreRef" refer="aec:effectName">
|
||||
<xs:selector xpath="aec:preprocess/aec:stream/aec:apply"/>
|
||||
<xs:field xpath="@effect"/>
|
||||
</xs:keyref>
|
||||
<xs:keyref name="effectNamePostRef" refer="aec:effectName">
|
||||
<xs:selector xpath="aec:postprocess/aec:stream/aec:apply"/>
|
||||
<xs:field xpath="@effect"/>
|
||||
</xs:keyref>
|
||||
</xs:element>
|
||||
</xs:schema>
|
|
@ -103,3 +103,18 @@ cc_library_shared {
|
|||
"-include common/all-versions/VersionMacro.h",
|
||||
]
|
||||
}
|
||||
|
||||
cc_library_shared {
|
||||
name: "android.hardware.audio.effect@7.0-impl",
|
||||
defaults: ["android.hardware.audio.effect-impl_default"],
|
||||
shared_libs: [
|
||||
"android.hardware.audio.common@7.0",
|
||||
"android.hardware.audio.common@7.0-util",
|
||||
"android.hardware.audio.effect@7.0",
|
||||
],
|
||||
cflags: [
|
||||
"-DMAJOR_VERSION=7",
|
||||
"-DMINOR_VERSION=0",
|
||||
"-include common/all-versions/VersionMacro.h",
|
||||
]
|
||||
}
|
||||
|
|
|
@ -113,3 +113,23 @@ cc_test {
|
|||
"-include common/all-versions/VersionMacro.h",
|
||||
]
|
||||
}
|
||||
|
||||
cc_test {
|
||||
name: "VtsHalAudioEffectV7_0TargetTest",
|
||||
defaults: ["VtsHalAudioEffectTargetTest_default"],
|
||||
// Use test_config for vts suite.
|
||||
// TODO(b/146104851): Add auto-gen rules and remove it.
|
||||
test_config: "VtsHalAudioEffectV7_0TargetTest.xml",
|
||||
static_libs: [
|
||||
"android.hardware.audio.common@7.0",
|
||||
"android.hardware.audio.effect@7.0",
|
||||
],
|
||||
data: [
|
||||
":audio_effects_conf_V7_0",
|
||||
],
|
||||
cflags: [
|
||||
"-DMAJOR_VERSION=7",
|
||||
"-DMINOR_VERSION=0",
|
||||
"-include common/all-versions/VersionMacro.h",
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Copyright (C) 2019 The Android Open Source Project
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<configuration description="Runs VtsHalAudioEffectV7_0TargetTest.">
|
||||
<option name="test-suite-tag" value="apct" />
|
||||
<option name="test-suite-tag" value="apct-native" />
|
||||
|
||||
<target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer"/>
|
||||
<target_preparer class="com.android.tradefed.targetprep.StopServicesSetup"/>
|
||||
|
||||
<target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
|
||||
<option name="run-command" value="setprop vts.native_server.on 1"/>
|
||||
<option name="teardown-command" value="setprop vts.native_server.on 0"/>
|
||||
</target_preparer>
|
||||
|
||||
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
|
||||
<option name="cleanup" value="true" />
|
||||
<option name="push" value="VtsHalAudioEffectV7_0TargetTest->/data/local/tmp/VtsHalAudioEffectV7_0TargetTest" />
|
||||
<option name="push" value="audio_effects_conf_V7_0.xsd->/data/local/tmp/audio_effects_conf_V7_0.xsd" />
|
||||
</target_preparer>
|
||||
|
||||
<test class="com.android.tradefed.testtype.GTest" >
|
||||
<option name="native-test-device-path" value="/data/local/tmp" />
|
||||
<option name="module-name" value="VtsHalAudioEffectV7_0TargetTest" />
|
||||
</test>
|
||||
</configuration>
|
Loading…
Reference in a new issue