/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ANDROID_AUDIO_HAL_INTERFACE_H #define ANDROID_AUDIO_HAL_INTERFACE_H #include #include #include #include #include #include #include #include #include __BEGIN_DECLS /** * The id of this module */ #define AUDIO_HARDWARE_MODULE_ID "audio" /** * Name of the audio devices to open */ #define AUDIO_HARDWARE_INTERFACE "audio_hw_if" /* Use version 0.1 to be compatible with first generation of audio hw module with version_major * hardcoded to 1. No audio module API change. */ #define AUDIO_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1) #define AUDIO_MODULE_API_VERSION_CURRENT AUDIO_MODULE_API_VERSION_0_1 /* First generation of audio devices had version hardcoded to 0. all devices with versions < 1.0 * will be considered of first generation API. */ #define AUDIO_DEVICE_API_VERSION_0_0 HARDWARE_DEVICE_API_VERSION(0, 0) #define AUDIO_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION(1, 0) #define AUDIO_DEVICE_API_VERSION_2_0 HARDWARE_DEVICE_API_VERSION(2, 0) #define AUDIO_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0) #define AUDIO_DEVICE_API_VERSION_3_1 HARDWARE_DEVICE_API_VERSION(3, 1) #define AUDIO_DEVICE_API_VERSION_3_2 HARDWARE_DEVICE_API_VERSION(3, 2) #define AUDIO_DEVICE_API_VERSION_CURRENT AUDIO_DEVICE_API_VERSION_3_2 /* Minimal audio HAL version supported by the audio framework */ #define AUDIO_DEVICE_API_VERSION_MIN AUDIO_DEVICE_API_VERSION_2_0 /**************************************/ /** * standard audio parameters that the HAL may need to handle */ /** * audio device parameters */ /* TTY mode selection */ #define AUDIO_PARAMETER_KEY_TTY_MODE "tty_mode" #define AUDIO_PARAMETER_VALUE_TTY_OFF "tty_off" #define AUDIO_PARAMETER_VALUE_TTY_VCO "tty_vco" #define AUDIO_PARAMETER_VALUE_TTY_HCO "tty_hco" #define AUDIO_PARAMETER_VALUE_TTY_FULL "tty_full" /* Hearing Aid Compatibility - Telecoil (HAC-T) mode on/off */ #define AUDIO_PARAMETER_KEY_HAC "HACSetting" #define AUDIO_PARAMETER_VALUE_HAC_ON "ON" #define AUDIO_PARAMETER_VALUE_HAC_OFF "OFF" /* A2DP sink address set by framework */ #define AUDIO_PARAMETER_A2DP_SINK_ADDRESS "a2dp_sink_address" /* A2DP source address set by framework */ #define AUDIO_PARAMETER_A2DP_SOURCE_ADDRESS "a2dp_source_address" /* Bluetooth SCO wideband */ #define AUDIO_PARAMETER_KEY_BT_SCO_WB "bt_wbs" /* BT SCO headset name for debug */ #define AUDIO_PARAMETER_KEY_BT_SCO_HEADSET_NAME "bt_headset_name" /* BT SCO HFP control */ #define AUDIO_PARAMETER_KEY_HFP_ENABLE "hfp_enable" #define AUDIO_PARAMETER_KEY_HFP_SET_SAMPLING_RATE "hfp_set_sampling_rate" #define AUDIO_PARAMETER_KEY_HFP_VOLUME "hfp_volume" /* Set screen orientation */ #define AUDIO_PARAMETER_KEY_ROTATION "rotation" /** * audio stream parameters */ /* Enable AANC */ #define AUDIO_PARAMETER_KEY_AANC "aanc_enabled" /**************************************/ /* common audio stream parameters and operations */ struct audio_stream { /** * Return the sampling rate in Hz - eg. 44100. */ uint32_t (*get_sample_rate)(const struct audio_stream *stream); /* currently unused - use set_parameters with key * AUDIO_PARAMETER_STREAM_SAMPLING_RATE */ int (*set_sample_rate)(struct audio_stream *stream, uint32_t rate); /** * Return size of input/output buffer in bytes for this stream - eg. 4800. * It should be a multiple of the frame size. See also get_input_buffer_size. */ size_t (*get_buffer_size)(const struct audio_stream *stream); /** * Return the channel mask - * e.g. AUDIO_CHANNEL_OUT_STEREO or AUDIO_CHANNEL_IN_STEREO */ audio_channel_mask_t (*get_channels)(const struct audio_stream *stream); /** * Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT */ audio_format_t (*get_format)(const struct audio_stream *stream); /* currently unused - use set_parameters with key * AUDIO_PARAMETER_STREAM_FORMAT */ int (*set_format)(struct audio_stream *stream, audio_format_t format); /** * Put the audio hardware input/output into standby mode. * Driver should exit from standby mode at the next I/O operation. * Returns 0 on success and <0 on failure. */ int (*standby)(struct audio_stream *stream); /** dump the state of the audio input/output device */ int (*dump)(const struct audio_stream *stream, int fd); /** Return the set of device(s) which this stream is connected to */ audio_devices_t (*get_device)(const struct audio_stream *stream); /** * Currently unused - set_device() corresponds to set_parameters() with key * AUDIO_PARAMETER_STREAM_ROUTING for both input and output. * AUDIO_PARAMETER_STREAM_INPUT_SOURCE is an additional information used by * input streams only. */ int (*set_device)(struct audio_stream *stream, audio_devices_t device); /** * set/get audio stream parameters. The function accepts a list of * parameter key value pairs in the form: key1=value1;key2=value2;... * * Some keys are reserved for standard parameters (See AudioParameter class) * * If the implementation does not accept a parameter change while * the output is active but the parameter is acceptable otherwise, it must * return -ENOSYS. * * The audio flinger will put the stream in standby and then change the * parameter value. */ int (*set_parameters)(struct audio_stream *stream, const char *kv_pairs); /* * Returns a pointer to a heap allocated string. The caller is responsible * for freeing the memory for it using free(). */ char * (*get_parameters)(const struct audio_stream *stream, const char *keys); int (*add_audio_effect)(const struct audio_stream *stream, effect_handle_t effect); int (*remove_audio_effect)(const struct audio_stream *stream, effect_handle_t effect); }; typedef struct audio_stream audio_stream_t; /* type of asynchronous write callback events. Mutually exclusive */ typedef enum { STREAM_CBK_EVENT_WRITE_READY, /* non blocking write completed */ STREAM_CBK_EVENT_DRAIN_READY, /* drain completed */ STREAM_CBK_EVENT_ERROR, /* stream hit some error, let AF take action */ } stream_callback_event_t; typedef enum { STREAM_EVENT_CBK_TYPE_CODEC_FORMAT_CHANGED, /* codec format of the stream changed */ } stream_event_callback_type_t; typedef int (*stream_callback_t)(stream_callback_event_t event, void *param, void *cookie); typedef int (*stream_event_callback_t)(stream_event_callback_type_t event, void *param, void *cookie); /* type of drain requested to audio_stream_out->drain(). Mutually exclusive */ typedef enum { AUDIO_DRAIN_ALL, /* drain() returns when all data has been played */ AUDIO_DRAIN_EARLY_NOTIFY /* drain() returns a short time before all data from the current track has been played to give time for gapless track switch */ } audio_drain_type_t; typedef struct source_metadata { size_t track_count; /** Array of metadata of each track connected to this source. */ struct playback_track_metadata* tracks; } source_metadata_t; typedef struct sink_metadata { size_t track_count; /** Array of metadata of each track connected to this sink. */ struct record_track_metadata* tracks; } sink_metadata_t; /* HAL version 3.2 and higher only. */ typedef struct source_metadata_v7 { size_t track_count; /** Array of metadata of each track connected to this source. */ struct playback_track_metadata_v7* tracks; } source_metadata_v7_t; /* HAL version 3.2 and higher only. */ typedef struct sink_metadata_v7 { size_t track_count; /** Array of metadata of each track connected to this sink. */ struct record_track_metadata_v7* tracks; } sink_metadata_v7_t; /** * audio_stream_out is the abstraction interface for the audio output hardware. * * It provides information about various properties of the audio output * hardware driver. */ struct audio_stream_out { /** * Common methods of the audio stream out. This *must* be the first member of audio_stream_out * as users of this structure will cast a audio_stream to audio_stream_out pointer in contexts * where it's known the audio_stream references an audio_stream_out. */ struct audio_stream common; /** * Return the audio hardware driver estimated latency in milliseconds. */ uint32_t (*get_latency)(const struct audio_stream_out *stream); /** * Use this method in situations where audio mixing is done in the * hardware. This method serves as a direct interface with hardware, * allowing you to directly set the volume as apposed to via the framework. * This method might produce multiple PCM outputs or hardware accelerated * codecs, such as MP3 or AAC. */ int (*set_volume)(struct audio_stream_out *stream, float left, float right); /** * Write audio buffer to driver. Returns number of bytes written, or a * negative status_t. If at least one frame was written successfully prior to the error, * it is suggested that the driver return that successful (short) byte count * and then return an error in the subsequent call. * * If set_callback() has previously been called to enable non-blocking mode * the write() is not allowed to block. It must write only the number of * bytes that currently fit in the driver/hardware buffer and then return * this byte count. If this is less than the requested write size the * callback function must be called when more space is available in the * driver/hardware buffer. */ ssize_t (*write)(struct audio_stream_out *stream, const void* buffer, size_t bytes); /* return the number of audio frames written by the audio dsp to DAC since * the output has exited standby */ int (*get_render_position)(const struct audio_stream_out *stream, uint32_t *dsp_frames); /** * get the local time at which the next write to the audio driver will be presented. * The units are microseconds, where the epoch is decided by the local audio HAL. */ int (*get_next_write_timestamp)(const struct audio_stream_out *stream, int64_t *timestamp); /** * set the callback function for notifying completion of non-blocking * write and drain. * Calling this function implies that all future write() and drain() * must be non-blocking and use the callback to signal completion. */ int (*set_callback)(struct audio_stream_out *stream, stream_callback_t callback, void *cookie); /** * Notifies to the audio driver to stop playback however the queued buffers are * retained by the hardware. Useful for implementing pause/resume. Empty implementation * if not supported however should be implemented for hardware with non-trivial * latency. In the pause state audio hardware could still be using power. User may * consider calling suspend after a timeout. * * Implementation of this function is mandatory for offloaded playback. */ int (*pause)(struct audio_stream_out* stream); /** * Notifies to the audio driver to resume playback following a pause. * Returns error if called without matching pause. * * Implementation of this function is mandatory for offloaded playback. */ int (*resume)(struct audio_stream_out* stream); /** * Requests notification when data buffered by the driver/hardware has * been played. If set_callback() has previously been called to enable * non-blocking mode, the drain() must not block, instead it should return * quickly and completion of the drain is notified through the callback. * If set_callback() has not been called, the drain() must block until * completion. * If type==AUDIO_DRAIN_ALL, the drain completes when all previously written * data has been played. * If type==AUDIO_DRAIN_EARLY_NOTIFY, the drain completes shortly before all * data for the current track has played to allow time for the framework * to perform a gapless track switch. * * Drain must return immediately on stop() and flush() call * * Implementation of this function is mandatory for offloaded playback. */ int (*drain)(struct audio_stream_out* stream, audio_drain_type_t type ); /** * Notifies to the audio driver to flush the queued data. Stream must already * be paused before calling flush(). * * Implementation of this function is mandatory for offloaded playback. */ int (*flush)(struct audio_stream_out* stream); /** * Return a recent count of the number of audio frames presented to an external observer. * This excludes frames which have been written but are still in the pipeline. * The count is not reset to zero when output enters standby. * Also returns the value of CLOCK_MONOTONIC as of this presentation count. * The returned count is expected to be 'recent', * but does not need to be the most recent possible value. * However, the associated time should correspond to whatever count is returned. * Example: assume that N+M frames have been presented, where M is a 'small' number. * Then it is permissible to return N instead of N+M, * and the timestamp should correspond to N rather than N+M. * The terms 'recent' and 'small' are not defined. * They reflect the quality of the implementation. * * 3.0 and higher only. */ int (*get_presentation_position)(const struct audio_stream_out *stream, uint64_t *frames, struct timespec *timestamp); /** * Called by the framework to start a stream operating in mmap mode. * create_mmap_buffer must be called before calling start() * * \note Function only implemented by streams operating in mmap mode. * * \param[in] stream the stream object. * \return 0 in case of success. * -ENOSYS if called out of sequence or on non mmap stream */ int (*start)(const struct audio_stream_out* stream); /** * Called by the framework to stop a stream operating in mmap mode. * Must be called after start() * * \note Function only implemented by streams operating in mmap mode. * * \param[in] stream the stream object. * \return 0 in case of success. * -ENOSYS if called out of sequence or on non mmap stream */ int (*stop)(const struct audio_stream_out* stream); /** * Called by the framework to retrieve information on the mmap buffer used for audio * samples transfer. * * \note Function only implemented by streams operating in mmap mode. * * \param[in] stream the stream object. * \param[in] min_size_frames minimum buffer size requested. The actual buffer * size returned in struct audio_mmap_buffer_info can be larger. * \param[out] info address at which the mmap buffer information should be returned. * * \return 0 if the buffer was allocated. * -ENODEV in case of initialization error * -EINVAL if the requested buffer size is too large * -ENOSYS if called out of sequence (e.g. buffer already allocated) */ int (*create_mmap_buffer)(const struct audio_stream_out *stream, int32_t min_size_frames, struct audio_mmap_buffer_info *info); /** * Called by the framework to read current read/write position in the mmap buffer * with associated time stamp. * * \note Function only implemented by streams operating in mmap mode. * * \param[in] stream the stream object. * \param[out] position address at which the mmap read/write position should be returned. * * \return 0 if the position is successfully returned. * -ENODATA if the position cannot be retrieved * -ENOSYS if called before create_mmap_buffer() */ int (*get_mmap_position)(const struct audio_stream_out *stream, struct audio_mmap_position *position); /** * Called when the metadata of the stream's source has been changed. * @param source_metadata Description of the audio that is played by the clients. */ void (*update_source_metadata)(struct audio_stream_out *stream, const struct source_metadata* source_metadata); /** * Set the callback function for notifying events for an output stream. */ int (*set_event_callback)(struct audio_stream_out *stream, stream_event_callback_t callback, void *cookie); /** * Called when the metadata of the stream's source has been changed. * HAL version 3.2 and higher only. * @param source_metadata Description of the audio that is played by the clients. */ void (*update_source_metadata_v7)(struct audio_stream_out *stream, const struct source_metadata_v7* source_metadata); }; typedef struct audio_stream_out audio_stream_out_t; struct audio_stream_in { /** * Common methods of the audio stream in. This *must* be the first member of audio_stream_in * as users of this structure will cast a audio_stream to audio_stream_in pointer in contexts * where it's known the audio_stream references an audio_stream_in. */ struct audio_stream common; /** set the input gain for the audio driver. This method is for * for future use */ int (*set_gain)(struct audio_stream_in *stream, float gain); /** Read audio buffer in from audio driver. Returns number of bytes read, or a * negative status_t. If at least one frame was read prior to the error, * read should return that byte count and then return an error in the subsequent call. */ ssize_t (*read)(struct audio_stream_in *stream, void* buffer, size_t bytes); /** * Return the amount of input frames lost in the audio driver since the * last call of this function. * Audio driver is expected to reset the value to 0 and restart counting * upon returning the current value by this function call. * Such loss typically occurs when the user space process is blocked * longer than the capacity of audio driver buffers. * * Unit: the number of input audio frames */ uint32_t (*get_input_frames_lost)(struct audio_stream_in *stream); /** * Return a recent count of the number of audio frames received and * the clock time associated with that frame count. * * frames is the total frame count received. This should be as early in * the capture pipeline as possible. In general, * frames should be non-negative and should not go "backwards". * * time is the clock MONOTONIC time when frames was measured. In general, * time should be a positive quantity and should not go "backwards". * * The status returned is 0 on success, -ENOSYS if the device is not * ready/available, or -EINVAL if the arguments are null or otherwise invalid. */ int (*get_capture_position)(const struct audio_stream_in *stream, int64_t *frames, int64_t *time); /** * Called by the framework to start a stream operating in mmap mode. * create_mmap_buffer must be called before calling start() * * \note Function only implemented by streams operating in mmap mode. * * \param[in] stream the stream object. * \return 0 in case off success. * -ENOSYS if called out of sequence or on non mmap stream */ int (*start)(const struct audio_stream_in* stream); /** * Called by the framework to stop a stream operating in mmap mode. * * \note Function only implemented by streams operating in mmap mode. * * \param[in] stream the stream object. * \return 0 in case of success. * -ENOSYS if called out of sequence or on non mmap stream */ int (*stop)(const struct audio_stream_in* stream); /** * Called by the framework to retrieve information on the mmap buffer used for audio * samples transfer. * * \note Function only implemented by streams operating in mmap mode. * * \param[in] stream the stream object. * \param[in] min_size_frames minimum buffer size requested. The actual buffer * size returned in struct audio_mmap_buffer_info can be larger. * \param[out] info address at which the mmap buffer information should be returned. * * \return 0 if the buffer was allocated. * -ENODEV in case of initialization error * -EINVAL if the requested buffer size is too large * -ENOSYS if called out of sequence (e.g. buffer already allocated) */ int (*create_mmap_buffer)(const struct audio_stream_in *stream, int32_t min_size_frames, struct audio_mmap_buffer_info *info); /** * Called by the framework to read current read/write position in the mmap buffer * with associated time stamp. * * \note Function only implemented by streams operating in mmap mode. * * \param[in] stream the stream object. * \param[out] position address at which the mmap read/write position should be returned. * * \return 0 if the position is successfully returned. * -ENODATA if the position cannot be retreived * -ENOSYS if called before mmap_read_position() */ int (*get_mmap_position)(const struct audio_stream_in *stream, struct audio_mmap_position *position); /** * Called by the framework to read active microphones * * \param[in] stream the stream object. * \param[out] mic_array Pointer to first element on array with microphone info * \param[out] mic_count When called, this holds the value of the max number of elements * allowed in the mic_array. The actual number of elements written * is returned here. * if mic_count is passed as zero, mic_array will not be populated, * and mic_count will return the actual number of active microphones. * * \return 0 if the microphone array is successfully filled. * -ENOSYS if there is an error filling the data */ int (*get_active_microphones)(const struct audio_stream_in *stream, struct audio_microphone_characteristic_t *mic_array, size_t *mic_count); /** * Called by the framework to instruct the HAL to optimize the capture stream in the * specified direction. * * \param[in] stream the stream object. * \param[in] direction The direction constant (from audio-base.h) * MIC_DIRECTION_UNSPECIFIED Don't do any directionality processing of the * activated microphone(s). * MIC_DIRECTION_FRONT Optimize capture for audio coming from the screen-side * of the device. * MIC_DIRECTION_BACK Optimize capture for audio coming from the side of the * device opposite the screen. * MIC_DIRECTION_EXTERNAL Optimize capture for audio coming from an off-device * microphone. * \return OK if the call is successful, an error code otherwise. */ int (*set_microphone_direction)(const struct audio_stream_in *stream, audio_microphone_direction_t direction); /** * Called by the framework to specify to the HAL the desired zoom factor for the selected * microphone(s). * * \param[in] stream the stream object. * \param[in] zoom the zoom factor. * \return OK if the call is successful, an error code otherwise. */ int (*set_microphone_field_dimension)(const struct audio_stream_in *stream, float zoom); /** * Called when the metadata of the stream's sink has been changed. * @param sink_metadata Description of the audio that is recorded by the clients. */ void (*update_sink_metadata)(struct audio_stream_in *stream, const struct sink_metadata* sink_metadata); /** * Called when the metadata of the stream's sink has been changed. * HAL version 3.2 and higher only. * @param sink_metadata Description of the audio that is recorded by the clients. */ void (*update_sink_metadata_v7)(struct audio_stream_in *stream, const struct sink_metadata_v7* sink_metadata); }; typedef struct audio_stream_in audio_stream_in_t; /** * return the frame size (number of bytes per sample). * * Deprecated: use audio_stream_out_frame_size() or audio_stream_in_frame_size() instead. */ __attribute__((__deprecated__)) static inline size_t audio_stream_frame_size(const struct audio_stream *s) { size_t chan_samp_sz; audio_format_t format = s->get_format(s); if (audio_has_proportional_frames(format)) { chan_samp_sz = audio_bytes_per_sample(format); return popcount(s->get_channels(s)) * chan_samp_sz; } return sizeof(int8_t); } /** * return the frame size (number of bytes per sample) of an output stream. */ static inline size_t audio_stream_out_frame_size(const struct audio_stream_out *s) { size_t chan_samp_sz; audio_format_t format = s->common.get_format(&s->common); if (audio_has_proportional_frames(format)) { chan_samp_sz = audio_bytes_per_sample(format); return audio_channel_count_from_out_mask(s->common.get_channels(&s->common)) * chan_samp_sz; } return sizeof(int8_t); } /** * return the frame size (number of bytes per sample) of an input stream. */ static inline size_t audio_stream_in_frame_size(const struct audio_stream_in *s) { size_t chan_samp_sz; audio_format_t format = s->common.get_format(&s->common); if (audio_has_proportional_frames(format)) { chan_samp_sz = audio_bytes_per_sample(format); return audio_channel_count_from_in_mask(s->common.get_channels(&s->common)) * chan_samp_sz; } return sizeof(int8_t); } /**********************************************************************/ /** * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM * and the fields of this data structure must begin with hw_module_t * followed by module specific information. */ struct audio_module { struct hw_module_t common; }; struct audio_hw_device { /** * Common methods of the audio device. This *must* be the first member of audio_hw_device * as users of this structure will cast a hw_device_t to audio_hw_device pointer in contexts * where it's known the hw_device_t references an audio_hw_device. */ struct hw_device_t common; /** * used by audio flinger to enumerate what devices are supported by * each audio_hw_device implementation. * * Return value is a bitmask of 1 or more values of audio_devices_t * * NOTE: audio HAL implementations starting with * AUDIO_DEVICE_API_VERSION_2_0 do not implement this function. * All supported devices should be listed in audio_policy.conf * file and the audio policy manager must choose the appropriate * audio module based on information in this file. */ uint32_t (*get_supported_devices)(const struct audio_hw_device *dev); /** * check to see if the audio hardware interface has been initialized. * returns 0 on success, -ENODEV on failure. */ int (*init_check)(const struct audio_hw_device *dev); /** set the audio volume of a voice call. Range is between 0.0 and 1.0 */ int (*set_voice_volume)(struct audio_hw_device *dev, float volume); /** * set the audio volume for all audio activities other than voice call. * Range between 0.0 and 1.0. If any value other than 0 is returned, * the software mixer will emulate this capability. */ int (*set_master_volume)(struct audio_hw_device *dev, float volume); /** * Get the current master volume value for the HAL, if the HAL supports * master volume control. AudioFlinger will query this value from the * primary audio HAL when the service starts and use the value for setting * the initial master volume across all HALs. HALs which do not support * this method may leave it set to NULL. */ int (*get_master_volume)(struct audio_hw_device *dev, float *volume); /** * set_mode is called when the audio mode changes. AUDIO_MODE_NORMAL mode * is for standard audio playback, AUDIO_MODE_RINGTONE when a ringtone is * playing, and AUDIO_MODE_IN_CALL when a call is in progress. */ int (*set_mode)(struct audio_hw_device *dev, audio_mode_t mode); /* mic mute */ int (*set_mic_mute)(struct audio_hw_device *dev, bool state); int (*get_mic_mute)(const struct audio_hw_device *dev, bool *state); /* set/get global audio parameters */ int (*set_parameters)(struct audio_hw_device *dev, const char *kv_pairs); /* * Returns a pointer to a heap allocated string. The caller is responsible * for freeing the memory for it using free(). */ char * (*get_parameters)(const struct audio_hw_device *dev, const char *keys); /* Returns audio input buffer size according to parameters passed or * 0 if one of the parameters is not supported. * See also get_buffer_size which is for a particular stream. */ size_t (*get_input_buffer_size)(const struct audio_hw_device *dev, const struct audio_config *config); /** This method creates and opens the audio hardware output stream. * The "address" parameter qualifies the "devices" audio device type if needed. * The format format depends on the device type: * - Bluetooth devices use the MAC address of the device in the form "00:11:22:AA:BB:CC" * - USB devices use the ALSA card and device numbers in the form "card=X;device=Y" * - Other devices may use a number or any other string. */ int (*open_output_stream)(struct audio_hw_device *dev, audio_io_handle_t handle, audio_devices_t devices, audio_output_flags_t flags, struct audio_config *config, struct audio_stream_out **stream_out, const char *address); void (*close_output_stream)(struct audio_hw_device *dev, struct audio_stream_out* stream_out); /** This method creates and opens the audio hardware input stream */ int (*open_input_stream)(struct audio_hw_device *dev, audio_io_handle_t handle, audio_devices_t devices, struct audio_config *config, struct audio_stream_in **stream_in, audio_input_flags_t flags, const char *address, audio_source_t source); void (*close_input_stream)(struct audio_hw_device *dev, struct audio_stream_in *stream_in); /** * Called by the framework to read available microphones characteristics. * * \param[in] dev the hw_device object. * \param[out] mic_array Pointer to first element on array with microphone info * \param[out] mic_count When called, this holds the value of the max number of elements * allowed in the mic_array. The actual number of elements written * is returned here. * if mic_count is passed as zero, mic_array will not be populated, * and mic_count will return the actual number of microphones in the * system. * * \return 0 if the microphone array is successfully filled. * -ENOSYS if there is an error filling the data */ int (*get_microphones)(const struct audio_hw_device *dev, struct audio_microphone_characteristic_t *mic_array, size_t *mic_count); /** This method dumps the state of the audio hardware */ int (*dump)(const struct audio_hw_device *dev, int fd); /** * set the audio mute status for all audio activities. If any value other * than 0 is returned, the software mixer will emulate this capability. */ int (*set_master_mute)(struct audio_hw_device *dev, bool mute); /** * Get the current master mute status for the HAL, if the HAL supports * master mute control. AudioFlinger will query this value from the primary * audio HAL when the service starts and use the value for setting the * initial master mute across all HALs. HALs which do not support this * method may leave it set to NULL. */ int (*get_master_mute)(struct audio_hw_device *dev, bool *mute); /** * Routing control */ /* Creates an audio patch between several source and sink ports. * The handle is allocated by the HAL and should be unique for this * audio HAL module. */ int (*create_audio_patch)(struct audio_hw_device *dev, unsigned int num_sources, const struct audio_port_config *sources, unsigned int num_sinks, const struct audio_port_config *sinks, audio_patch_handle_t *handle); /* Release an audio patch */ int (*release_audio_patch)(struct audio_hw_device *dev, audio_patch_handle_t handle); /* Fills the list of supported attributes for a given audio port. * As input, "port" contains the information (type, role, address etc...) * needed by the HAL to identify the port. * As output, "port" contains possible attributes (sampling rates, formats, * channel masks, gain controllers...) for this port. */ int (*get_audio_port)(struct audio_hw_device *dev, struct audio_port *port); /* Set audio port configuration */ int (*set_audio_port_config)(struct audio_hw_device *dev, const struct audio_port_config *config); /** * Applies an audio effect to an audio device. * * @param dev the audio HAL device context. * @param device identifies the sink or source device the effect must be applied to. * "device" is the audio_port_handle_t indicated for the device when * the audio patch connecting that device was created. * @param effect effect interface handle corresponding to the effect being added. * @return retval operation completion status. */ int (*add_device_effect)(struct audio_hw_device *dev, audio_port_handle_t device, effect_handle_t effect); /** * Stops applying an audio effect to an audio device. * * @param dev the audio HAL device context. * @param device identifies the sink or source device this effect was applied to. * "device" is the audio_port_handle_t indicated for the device when * the audio patch is created. * @param effect effect interface handle corresponding to the effect being removed. * @return retval operation completion status. */ int (*remove_device_effect)(struct audio_hw_device *dev, audio_port_handle_t device, effect_handle_t effect); /** * Fills the list of supported attributes for a given audio port. * As input, "port" contains the information (type, role, address etc...) * needed by the HAL to identify the port. * As output, "port" contains possible attributes (sampling rates, formats, * channel masks, gain controllers...) for this port. The possible attributes * are saved as audio profiles, which contains audio format and the supported * sampling rates and channel masks. */ int (*get_audio_port_v7)(struct audio_hw_device *dev, struct audio_port_v7 *port); }; typedef struct audio_hw_device audio_hw_device_t; /** convenience API for opening and closing a supported device */ static inline int audio_hw_device_open(const struct hw_module_t* module, struct audio_hw_device** device) { return module->methods->open(module, AUDIO_HARDWARE_INTERFACE, TO_HW_DEVICE_T_OPEN(device)); } static inline int audio_hw_device_close(struct audio_hw_device* device) { return device->common.close(&device->common); } __END_DECLS #endif // ANDROID_AUDIO_INTERFACE_H