Merge commit 'c9940a2bfe94dbe2ef3bfe5e8692bf4e3cea5ba0' into HEAD

This commit is contained in:
The Android Open Source Project 2013-11-22 10:35:26 -08:00
commit bae5cf92cf
48 changed files with 5273 additions and 182 deletions

View file

@ -153,6 +153,10 @@ int hw_get_module_by_class(const char *class_id, const char *inst,
HAL_LIBRARY_PATH1, name, prop); HAL_LIBRARY_PATH1, name, prop);
if (access(path, R_OK) == 0) break; if (access(path, R_OK) == 0) break;
} else { } else {
snprintf(path, sizeof(path), "%s/%s.default.so",
HAL_LIBRARY_PATH2, name);
if (access(path, R_OK) == 0) break;
snprintf(path, sizeof(path), "%s/%s.default.so", snprintf(path, sizeof(path), "%s/%s.default.so",
HAL_LIBRARY_PATH1, name); HAL_LIBRARY_PATH1, name);
if (access(path, R_OK) == 0) break; if (access(path, R_OK) == 0) break;

View file

@ -67,6 +67,7 @@ __BEGIN_DECLS
#define AUDIO_HARDWARE_MODULE_ID_A2DP "a2dp" #define AUDIO_HARDWARE_MODULE_ID_A2DP "a2dp"
#define AUDIO_HARDWARE_MODULE_ID_USB "usb" #define AUDIO_HARDWARE_MODULE_ID_USB "usb"
#define AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX "r_submix" #define AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX "r_submix"
#define AUDIO_HARDWARE_MODULE_ID_CODEC_OFFLOAD "codec_offload"
/**************************************/ /**************************************/
@ -117,16 +118,35 @@ __BEGIN_DECLS
* "sup_sampling_rates=44100|48000" */ * "sup_sampling_rates=44100|48000" */
#define AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES "sup_sampling_rates" #define AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES "sup_sampling_rates"
/**
* audio codec parameters
*/
#define AUDIO_OFFLOAD_CODEC_PARAMS "music_offload_codec_param"
#define AUDIO_OFFLOAD_CODEC_BIT_PER_SAMPLE "music_offload_bit_per_sample"
#define AUDIO_OFFLOAD_CODEC_BIT_RATE "music_offload_bit_rate"
#define AUDIO_OFFLOAD_CODEC_AVG_BIT_RATE "music_offload_avg_bit_rate"
#define AUDIO_OFFLOAD_CODEC_ID "music_offload_codec_id"
#define AUDIO_OFFLOAD_CODEC_BLOCK_ALIGN "music_offload_block_align"
#define AUDIO_OFFLOAD_CODEC_SAMPLE_RATE "music_offload_sample_rate"
#define AUDIO_OFFLOAD_CODEC_ENCODE_OPTION "music_offload_encode_option"
#define AUDIO_OFFLOAD_CODEC_NUM_CHANNEL "music_offload_num_channels"
#define AUDIO_OFFLOAD_CODEC_DOWN_SAMPLING "music_offload_down_sampling"
#define AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES "delay_samples"
#define AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES "padding_samples"
/**************************************/ /**************************************/
/* common audio stream configuration parameters */ /* common audio stream configuration parameters
* You should memset() the entire structure to zero before use to
* ensure forward compatibility
*/
struct audio_config { struct audio_config {
uint32_t sample_rate; uint32_t sample_rate;
audio_channel_mask_t channel_mask; audio_channel_mask_t channel_mask;
audio_format_t format; audio_format_t format;
audio_offload_info_t offload_info;
}; };
typedef struct audio_config audio_config_t; typedef struct audio_config audio_config_t;
/* common audio stream parameters and operations */ /* common audio stream parameters and operations */
@ -213,6 +233,22 @@ struct audio_stream {
}; };
typedef struct audio_stream audio_stream_t; typedef struct audio_stream audio_stream_t;
/* type of asynchronous write callback events. Mutually exclusive */
typedef enum {
STREAM_CBK_EVENT_WRITE_READY, /* non blocking write completed */
STREAM_CBK_EVENT_DRAIN_READY /* drain completed */
} stream_callback_event_t;
typedef int (*stream_callback_t)(stream_callback_event_t event, void *param, void *cookie);
/* type of drain requested to audio_stream_out->drain(). Mutually exclusive */
typedef enum {
AUDIO_DRAIN_ALL, /* drain() returns when all data has been played */
AUDIO_DRAIN_EARLY_NOTIFY /* drain() returns a short time before all data
from the current track has been played to
give time for gapless track switch */
} audio_drain_type_t;
/** /**
* audio_stream_out is the abstraction interface for the audio output hardware. * audio_stream_out is the abstraction interface for the audio output hardware.
* *
@ -242,6 +278,13 @@ struct audio_stream_out {
* negative status_t. If at least one frame was written successfully prior to the error, * negative status_t. If at least one frame was written successfully prior to the error,
* it is suggested that the driver return that successful (short) byte count * it is suggested that the driver return that successful (short) byte count
* and then return an error in the subsequent call. * and then return an error in the subsequent call.
*
* If set_callback() has previously been called to enable non-blocking mode
* the write() is not allowed to block. It must write only the number of
* bytes that currently fit in the driver/hardware buffer and then return
* this byte count. If this is less than the requested write size the
* callback function must be called when more space is available in the
* driver/hardware buffer.
*/ */
ssize_t (*write)(struct audio_stream_out *stream, const void* buffer, ssize_t (*write)(struct audio_stream_out *stream, const void* buffer,
size_t bytes); size_t bytes);
@ -259,6 +302,80 @@ struct audio_stream_out {
int (*get_next_write_timestamp)(const struct audio_stream_out *stream, int (*get_next_write_timestamp)(const struct audio_stream_out *stream,
int64_t *timestamp); int64_t *timestamp);
/**
* set the callback function for notifying completion of non-blocking
* write and drain.
* Calling this function implies that all future write() and drain()
* must be non-blocking and use the callback to signal completion.
*/
int (*set_callback)(struct audio_stream_out *stream,
stream_callback_t callback, void *cookie);
/**
* Notifies to the audio driver to stop playback however the queued buffers are
* retained by the hardware. Useful for implementing pause/resume. Empty implementation
* if not supported however should be implemented for hardware with non-trivial
* latency. In the pause state audio hardware could still be using power. User may
* consider calling suspend after a timeout.
*
* Implementation of this function is mandatory for offloaded playback.
*/
int (*pause)(struct audio_stream_out* stream);
/**
* Notifies to the audio driver to resume playback following a pause.
* Returns error if called without matching pause.
*
* Implementation of this function is mandatory for offloaded playback.
*/
int (*resume)(struct audio_stream_out* stream);
/**
* Requests notification when data buffered by the driver/hardware has
* been played. If set_callback() has previously been called to enable
* non-blocking mode, the drain() must not block, instead it should return
* quickly and completion of the drain is notified through the callback.
* If set_callback() has not been called, the drain() must block until
* completion.
* If type==AUDIO_DRAIN_ALL, the drain completes when all previously written
* data has been played.
* If type==AUDIO_DRAIN_EARLY_NOTIFY, the drain completes shortly before all
* data for the current track has played to allow time for the framework
* to perform a gapless track switch.
*
* Drain must return immediately on stop() and flush() call
*
* Implementation of this function is mandatory for offloaded playback.
*/
int (*drain)(struct audio_stream_out* stream, audio_drain_type_t type );
/**
* Notifies to the audio driver to flush the queued data. Stream must already
* be paused before calling flush().
*
* Implementation of this function is mandatory for offloaded playback.
*/
int (*flush)(struct audio_stream_out* stream);
/**
* Return a recent count of the number of audio frames presented to an external observer.
* This excludes frames which have been written but are still in the pipeline.
* The count is not reset to zero when output enters standby.
* Also returns the value of CLOCK_MONOTONIC as of this presentation count.
* The returned count is expected to be 'recent',
* but does not need to be the most recent possible value.
* However, the associated time should correspond to whatever count is returned.
* Example: assume that N+M frames have been presented, where M is a 'small' number.
* Then it is permissible to return N instead of N+M,
* and the timestamp should correspond to N rather than N+M.
* The terms 'recent' and 'small' are not defined.
* They reflect the quality of the implementation.
*
* 3.0 and higher only.
*/
int (*get_presentation_position)(const struct audio_stream_out *stream,
uint64_t *frames, struct timespec *timestamp);
}; };
typedef struct audio_stream_out audio_stream_out_t; typedef struct audio_stream_out audio_stream_out_t;
@ -296,18 +413,14 @@ typedef struct audio_stream_in audio_stream_in_t;
static inline size_t audio_stream_frame_size(const struct audio_stream *s) static inline size_t audio_stream_frame_size(const struct audio_stream *s)
{ {
size_t chan_samp_sz; size_t chan_samp_sz;
audio_format_t format = s->get_format(s);
switch (s->get_format(s)) { if (audio_is_linear_pcm(format)) {
case AUDIO_FORMAT_PCM_16_BIT: chan_samp_sz = audio_bytes_per_sample(format);
chan_samp_sz = sizeof(int16_t); return popcount(s->get_channels(s)) * chan_samp_sz;
break;
case AUDIO_FORMAT_PCM_8_BIT:
default:
chan_samp_sz = sizeof(int8_t);
break;
} }
return popcount(s->get_channels(s)) * chan_samp_sz; return sizeof(int8_t);
} }

View file

@ -147,6 +147,9 @@ typedef struct effect_descriptor_s {
// | | | 1 requires audio source updates // | | | 1 requires audio source updates
// | | | 2..3 reserved // | | | 2..3 reserved
// +---------------------------+-----------+----------------------------------- // +---------------------------+-----------+-----------------------------------
// | Effect offload supported | 22 | 0 The effect cannot be offloaded to an audio DSP
// | | | 1 The effect can be offloaded to an audio DSP
// +---------------------------+-----------+-----------------------------------
// Insert mode // Insert mode
#define EFFECT_FLAG_TYPE_SHIFT 0 #define EFFECT_FLAG_TYPE_SHIFT 0
@ -229,6 +232,14 @@ typedef struct effect_descriptor_s {
#define EFFECT_FLAG_AUDIO_SOURCE_IND (1 << EFFECT_FLAG_AUDIO_SOURCE_SHIFT) #define EFFECT_FLAG_AUDIO_SOURCE_IND (1 << EFFECT_FLAG_AUDIO_SOURCE_SHIFT)
#define EFFECT_FLAG_AUDIO_SOURCE_NONE (0 << EFFECT_FLAG_AUDIO_SOURCE_SHIFT) #define EFFECT_FLAG_AUDIO_SOURCE_NONE (0 << EFFECT_FLAG_AUDIO_SOURCE_SHIFT)
// Effect offload indication
#define EFFECT_FLAG_OFFLOAD_SHIFT (EFFECT_FLAG_AUDIO_SOURCE_SHIFT + \
EFFECT_FLAG_AUDIO_SOURCE_SIZE)
#define EFFECT_FLAG_OFFLOAD_SIZE 1
#define EFFECT_FLAG_OFFLOAD_MASK (((1 << EFFECT_FLAG_OFFLOAD_SIZE) -1) \
<< EFFECT_FLAG_OFFLOAD_SHIFT)
#define EFFECT_FLAG_OFFLOAD_SUPPORTED (1 << EFFECT_FLAG_OFFLOAD_SHIFT)
#define EFFECT_MAKE_API_VERSION(M, m) (((M)<<16) | ((m) & 0xFFFF)) #define EFFECT_MAKE_API_VERSION(M, m) (((M)<<16) | ((m) & 0xFFFF))
#define EFFECT_API_VERSION_MAJOR(v) ((v)>>16) #define EFFECT_API_VERSION_MAJOR(v) ((v)>>16)
#define EFFECT_API_VERSION_MINOR(v) ((m) & 0xFFFF) #define EFFECT_API_VERSION_MINOR(v) ((m) & 0xFFFF)
@ -426,6 +437,8 @@ enum effect_command_e {
EFFECT_CMD_GET_FEATURE_CONFIG, // get current feature configuration EFFECT_CMD_GET_FEATURE_CONFIG, // get current feature configuration
EFFECT_CMD_SET_FEATURE_CONFIG, // set current feature configuration EFFECT_CMD_SET_FEATURE_CONFIG, // set current feature configuration
EFFECT_CMD_SET_AUDIO_SOURCE, // set the audio source (see audio.h, audio_source_t) EFFECT_CMD_SET_AUDIO_SOURCE, // set the audio source (see audio.h, audio_source_t)
EFFECT_CMD_OFFLOAD, // set if effect thread is an offload one,
// send the ioHandle of the effect thread
EFFECT_CMD_FIRST_PROPRIETARY = 0x10000 // first proprietary command code EFFECT_CMD_FIRST_PROPRIETARY = 0x10000 // first proprietary command code
}; };
@ -732,6 +745,20 @@ enum effect_command_e {
// size: 0 // size: 0
// data: N/A // data: N/A
//================================================================================================== //==================================================================================================
// command: EFFECT_CMD_OFFLOAD
//--------------------------------------------------------------------------------------------------
// description:
// 1.indicate if the playback thread the effect is attached to is offloaded or not
// 2.update the io handle of the playback thread the effect is attached to
//--------------------------------------------------------------------------------------------------
// command format:
// size: sizeof(effect_offload_param_t)
// data: effect_offload_param_t
//--------------------------------------------------------------------------------------------------
// reply format:
// size: sizeof(uint32_t)
// data: uint32_t
//--------------------------------------------------------------------------------------------------
// command: EFFECT_CMD_FIRST_PROPRIETARY // command: EFFECT_CMD_FIRST_PROPRIETARY
//-------------------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------------------
// description: // description:
@ -868,6 +895,11 @@ typedef struct effect_param_s {
char data[]; // Start of Parameter + Value data char data[]; // Start of Parameter + Value data
} effect_param_t; } effect_param_t;
// structure used by EFFECT_CMD_OFFLOAD command
typedef struct effect_offload_param_s {
bool isOffload; // true if the playback thread the effect is attached to is offloaded
int ioHandle; // io handle of the playback thread the effect is attached to
} effect_offload_param_t;
///////////////////////////////////////////////// /////////////////////////////////////////////////

View file

@ -133,7 +133,8 @@ struct audio_policy {
uint32_t samplingRate, uint32_t samplingRate,
audio_format_t format, audio_format_t format,
audio_channel_mask_t channelMask, audio_channel_mask_t channelMask,
audio_output_flags_t flags); audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo);
/* indicates to the audio policy manager that the output starts being used /* indicates to the audio policy manager that the output starts being used
* by corresponding stream. */ * by corresponding stream. */
@ -241,6 +242,10 @@ struct audio_policy {
/* dump state */ /* dump state */
int (*dump)(const struct audio_policy *pol, int fd); int (*dump)(const struct audio_policy *pol, int fd);
/* check if offload is possible for given sample rate, bitrate, duration, ... */
bool (*is_offload_supported)(const struct audio_policy *pol,
const audio_offload_info_t *info);
}; };
/* audio hw module handle used by load_hw_module(), open_output_on_module() /* audio hw module handle used by load_hw_module(), open_output_on_module()
@ -390,7 +395,8 @@ struct audio_policy_service_ops {
audio_format_t *pFormat, audio_format_t *pFormat,
audio_channel_mask_t *pChannelMask, audio_channel_mask_t *pChannelMask,
uint32_t *pLatencyMs, uint32_t *pLatencyMs,
audio_output_flags_t flags); audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo);
/* Opens an audio input on a particular HW module. /* Opens an audio input on a particular HW module.
* *

2
include/hardware/bluetooth.h Executable file → Normal file
View file

@ -451,6 +451,8 @@ typedef struct {
/* opcode MUST be one of: LE_Receiver_Test, LE_Transmitter_Test, LE_Test_End */ /* opcode MUST be one of: LE_Receiver_Test, LE_Transmitter_Test, LE_Test_End */
int (*le_test_mode)(uint16_t opcode, uint8_t *buf, uint8_t len); int (*le_test_mode)(uint16_t opcode, uint8_t *buf, uint8_t len);
/* enable or disable bluetooth HCI snoop log */
int (*config_hci_snoop_log)(uint8_t enable);
} bt_interface_t; } bt_interface_t;
/** TODO: Need to add APIs for Service Discovery, Service authorization and /** TODO: Need to add APIs for Service Discovery, Service authorization and

View file

@ -40,8 +40,8 @@ typedef struct
typedef struct typedef struct
{ {
btgatt_srvc_id_t srvc_id; btgatt_srvc_id_t srvc_id;
btgatt_char_id_t char_id; btgatt_gatt_id_t char_id;
bt_uuid_t descr_id; btgatt_gatt_id_t descr_id;
btgatt_unformatted_value_t value; btgatt_unformatted_value_t value;
uint16_t value_type; uint16_t value_type;
uint8_t status; uint8_t status;
@ -51,8 +51,8 @@ typedef struct
typedef struct typedef struct
{ {
btgatt_srvc_id_t srvc_id; btgatt_srvc_id_t srvc_id;
btgatt_char_id_t char_id; btgatt_gatt_id_t char_id;
bt_uuid_t descr_id; btgatt_gatt_id_t descr_id;
uint8_t status; uint8_t status;
} btgatt_write_params_t; } btgatt_write_params_t;
@ -62,7 +62,7 @@ typedef struct
uint8_t value[BTGATT_MAX_ATTR_LEN]; uint8_t value[BTGATT_MAX_ATTR_LEN];
bt_bdaddr_t bda; bt_bdaddr_t bda;
btgatt_srvc_id_t srvc_id; btgatt_srvc_id_t srvc_id;
btgatt_char_id_t char_id; btgatt_gatt_id_t char_id;
uint16_t len; uint16_t len;
uint8_t is_notify; uint8_t is_notify;
} btgatt_notify_params_t; } btgatt_notify_params_t;
@ -105,13 +105,13 @@ typedef void (*search_result_callback)( int conn_id, btgatt_srvc_id_t *srvc_id);
/** GATT characteristic enumeration result callback */ /** GATT characteristic enumeration result callback */
typedef void (*get_characteristic_callback)(int conn_id, int status, typedef void (*get_characteristic_callback)(int conn_id, int status,
btgatt_srvc_id_t *srvc_id, btgatt_char_id_t *char_id, btgatt_srvc_id_t *srvc_id, btgatt_gatt_id_t *char_id,
int char_prop); int char_prop);
/** GATT descriptor enumeration result callback */ /** GATT descriptor enumeration result callback */
typedef void (*get_descriptor_callback)(int conn_id, int status, typedef void (*get_descriptor_callback)(int conn_id, int status,
btgatt_srvc_id_t *srvc_id, btgatt_char_id_t *char_id, btgatt_srvc_id_t *srvc_id, btgatt_gatt_id_t *char_id,
bt_uuid_t *descr_id); btgatt_gatt_id_t *descr_id);
/** GATT included service enumeration result callback */ /** GATT included service enumeration result callback */
typedef void (*get_included_service_callback)(int conn_id, int status, typedef void (*get_included_service_callback)(int conn_id, int status,
@ -120,7 +120,7 @@ typedef void (*get_included_service_callback)(int conn_id, int status,
/** Callback invoked in response to [de]register_for_notification */ /** Callback invoked in response to [de]register_for_notification */
typedef void (*register_for_notification_callback)(int conn_id, typedef void (*register_for_notification_callback)(int conn_id,
int registered, int status, btgatt_srvc_id_t *srvc_id, int registered, int status, btgatt_srvc_id_t *srvc_id,
btgatt_char_id_t *char_id); btgatt_gatt_id_t *char_id);
/** /**
* Remote device notification callback, invoked when a remote device sends * Remote device notification callback, invoked when a remote device sends
@ -151,6 +151,11 @@ typedef void (*write_descriptor_callback)(int conn_id, int status,
typedef void (*read_remote_rssi_callback)(int client_if, bt_bdaddr_t* bda, typedef void (*read_remote_rssi_callback)(int client_if, bt_bdaddr_t* bda,
int rssi, int status); int rssi, int status);
/**
* Callback indicationg the status of a listen() operation
*/
typedef void (*listen_callback)(int status, int server_if);
typedef struct { typedef struct {
register_client_callback register_client_cb; register_client_callback register_client_cb;
scan_result_callback scan_result_cb; scan_result_callback scan_result_cb;
@ -169,6 +174,7 @@ typedef struct {
write_descriptor_callback write_descriptor_cb; write_descriptor_callback write_descriptor_cb;
execute_write_callback execute_write_cb; execute_write_callback execute_write_cb;
read_remote_rssi_callback read_remote_rssi_cb; read_remote_rssi_callback read_remote_rssi_cb;
listen_callback listen_cb;
} btgatt_client_callbacks_t; } btgatt_client_callbacks_t;
/** Represents the standard BT-GATT client interface. */ /** Represents the standard BT-GATT client interface. */
@ -191,6 +197,9 @@ typedef struct {
bt_status_t (*disconnect)( int client_if, const bt_bdaddr_t *bd_addr, bt_status_t (*disconnect)( int client_if, const bt_bdaddr_t *bd_addr,
int conn_id); int conn_id);
/** Start or stop advertisements to listen for incoming connections */
bt_status_t (*listen)(int client_if, bool start);
/** Clear the attribute cache for a given device */ /** Clear the attribute cache for a given device */
bt_status_t (*refresh)( int client_if, const bt_bdaddr_t *bd_addr ); bt_status_t (*refresh)( int client_if, const bt_bdaddr_t *bd_addr );
@ -212,36 +221,36 @@ typedef struct {
* Set start_char_id to NULL to get the first characteristic. * Set start_char_id to NULL to get the first characteristic.
*/ */
bt_status_t (*get_characteristic)( int conn_id, bt_status_t (*get_characteristic)( int conn_id,
btgatt_srvc_id_t *srvc_id, btgatt_char_id_t *start_char_id); btgatt_srvc_id_t *srvc_id, btgatt_gatt_id_t *start_char_id);
/** /**
* Enumerate descriptors for a given characteristic. * Enumerate descriptors for a given characteristic.
* Set start_descr_id to NULL to get the first descriptor. * Set start_descr_id to NULL to get the first descriptor.
*/ */
bt_status_t (*get_descriptor)( int conn_id, bt_status_t (*get_descriptor)( int conn_id,
btgatt_srvc_id_t *srvc_id, btgatt_char_id_t *char_id, btgatt_srvc_id_t *srvc_id, btgatt_gatt_id_t *char_id,
bt_uuid_t *start_descr_id); btgatt_gatt_id_t *start_descr_id);
/** Read a characteristic on a remote device */ /** Read a characteristic on a remote device */
bt_status_t (*read_characteristic)( int conn_id, bt_status_t (*read_characteristic)( int conn_id,
btgatt_srvc_id_t *srvc_id, btgatt_char_id_t *char_id, btgatt_srvc_id_t *srvc_id, btgatt_gatt_id_t *char_id,
int auth_req ); int auth_req );
/** Write a remote characteristic */ /** Write a remote characteristic */
bt_status_t (*write_characteristic)(int conn_id, bt_status_t (*write_characteristic)(int conn_id,
btgatt_srvc_id_t *srvc_id, btgatt_char_id_t *char_id, btgatt_srvc_id_t *srvc_id, btgatt_gatt_id_t *char_id,
int write_type, int len, int auth_req, int write_type, int len, int auth_req,
char* p_value); char* p_value);
/** Read the descriptor for a given characteristic */ /** Read the descriptor for a given characteristic */
bt_status_t (*read_descriptor)(int conn_id, bt_status_t (*read_descriptor)(int conn_id,
btgatt_srvc_id_t *srvc_id, btgatt_char_id_t *char_id, btgatt_srvc_id_t *srvc_id, btgatt_gatt_id_t *char_id,
bt_uuid_t *descr_id, int auth_req); btgatt_gatt_id_t *descr_id, int auth_req);
/** Write a remote descriptor for a given characteristic */ /** Write a remote descriptor for a given characteristic */
bt_status_t (*write_descriptor)( int conn_id, bt_status_t (*write_descriptor)( int conn_id,
btgatt_srvc_id_t *srvc_id, btgatt_char_id_t *char_id, btgatt_srvc_id_t *srvc_id, btgatt_gatt_id_t *char_id,
bt_uuid_t *descr_id, int write_type, int len, btgatt_gatt_id_t *descr_id, int write_type, int len,
int auth_req, char* p_value); int auth_req, char* p_value);
/** Execute a prepared write operation */ /** Execute a prepared write operation */
@ -253,12 +262,12 @@ typedef struct {
*/ */
bt_status_t (*register_for_notification)( int client_if, bt_status_t (*register_for_notification)( int client_if,
const bt_bdaddr_t *bd_addr, btgatt_srvc_id_t *srvc_id, const bt_bdaddr_t *bd_addr, btgatt_srvc_id_t *srvc_id,
btgatt_char_id_t *char_id); btgatt_gatt_id_t *char_id);
/** Deregister a previous request for notifications/indications */ /** Deregister a previous request for notifications/indications */
bt_status_t (*deregister_for_notification)( int client_if, bt_status_t (*deregister_for_notification)( int client_if,
const bt_bdaddr_t *bd_addr, btgatt_srvc_id_t *srvc_id, const bt_bdaddr_t *bd_addr, btgatt_srvc_id_t *srvc_id,
btgatt_char_id_t *char_id); btgatt_gatt_id_t *char_id);
/** Request RSSI for a given remote device */ /** Request RSSI for a given remote device */
bt_status_t (*read_remote_rssi)( int client_if, const bt_bdaddr_t *bd_addr); bt_status_t (*read_remote_rssi)( int client_if, const bt_bdaddr_t *bd_addr);
@ -266,6 +275,11 @@ typedef struct {
/** Determine the type of the remote device (LE, BR/EDR, Dual-mode) */ /** Determine the type of the remote device (LE, BR/EDR, Dual-mode) */
int (*get_device_type)( const bt_bdaddr_t *bd_addr ); int (*get_device_type)( const bt_bdaddr_t *bd_addr );
/** Set the advertising data or scan response data */
bt_status_t (*set_adv_data)(int server_if, bool set_scan_rsp, bool include_name,
bool include_txpower, int min_interval, int max_interval, int appearance,
uint16_t manufacturer_len, char* manufacturer_data);
/** Test mode interface */ /** Test mode interface */
bt_status_t (*test_command)( int command, btgatt_test_params_t* params); bt_status_t (*test_command)( int command, btgatt_test_params_t* params);
} btgatt_client_interface_t; } btgatt_client_interface_t;

View file

@ -29,17 +29,17 @@ __BEGIN_DECLS
#define BTGATT_SERVICE_TYPE_PRIMARY 0 #define BTGATT_SERVICE_TYPE_PRIMARY 0
#define BTGATT_SERVICE_TYPE_SECONDARY 1 #define BTGATT_SERVICE_TYPE_SECONDARY 1
/** GATT Characteristic ID adding instance id tracking to the UUID */ /** GATT ID adding instance id tracking to the UUID */
typedef struct typedef struct
{ {
bt_uuid_t uuid; bt_uuid_t uuid;
uint8_t inst_id; uint8_t inst_id;
} btgatt_char_id_t; } btgatt_gatt_id_t;
/** GATT Service ID also identifies the service type (primary/secondary) */ /** GATT Service ID also identifies the service type (primary/secondary) */
typedef struct typedef struct
{ {
btgatt_char_id_t id; btgatt_gatt_id_t id;
uint8_t is_primary; uint8_t is_primary;
} btgatt_srvc_id_t; } btgatt_srvc_id_t;

32
include/hardware/bt_rc.h Executable file → Normal file
View file

@ -29,6 +29,13 @@ __BEGIN_DECLS
typedef uint8_t btrc_uid_t[BTRC_UID_SIZE]; typedef uint8_t btrc_uid_t[BTRC_UID_SIZE];
typedef enum {
BTRC_FEAT_NONE = 0x00, /* AVRCP 1.0 */
BTRC_FEAT_METADATA = 0x01, /* AVRCP 1.3 */
BTRC_FEAT_ABSOLUTE_VOLUME = 0x02, /* Supports TG role and volume sync */
BTRC_FEAT_BROWSE = 0x04, /* AVRCP 1.4 and up, with Browsing support */
} btrc_remote_features_t;
typedef enum { typedef enum {
BTRC_PLAYSTATE_STOPPED = 0x00, /* Stopped */ BTRC_PLAYSTATE_STOPPED = 0x00, /* Stopped */
BTRC_PLAYSTATE_PLAYING = 0x01, /* Playing */ BTRC_PLAYSTATE_PLAYING = 0x01, /* Playing */
@ -114,6 +121,10 @@ typedef struct {
uint8_t text[BTRC_MAX_ATTR_STR_LEN]; uint8_t text[BTRC_MAX_ATTR_STR_LEN];
} btrc_element_attr_val_t; } btrc_element_attr_val_t;
/** Callback for the controller's supported feautres */
typedef void (* btrc_remote_features_callback)(bt_bdaddr_t *bd_addr,
btrc_remote_features_t features);
/** Callback for play status request */ /** Callback for play status request */
typedef void (* btrc_get_play_status_callback)(); typedef void (* btrc_get_play_status_callback)();
@ -151,10 +162,20 @@ typedef void (* btrc_get_element_attr_callback) (uint8_t num_attr, btrc_media_at
*/ */
typedef void (* btrc_register_notification_callback) (btrc_event_id_t event_id, uint32_t param); typedef void (* btrc_register_notification_callback) (btrc_event_id_t event_id, uint32_t param);
/* AVRCP 1.4 Enhancements */
/** Callback for volume change on CT
** volume: Current volume setting on the CT (0-127)
*/
typedef void (* btrc_volume_change_callback) (uint8_t volume, uint8_t ctype);
/** Callback for passthrough commands */
typedef void (* btrc_passthrough_cmd_callback) (int id, int key_state);
/** BT-RC callback structure. */ /** BT-RC callback structure. */
typedef struct { typedef struct {
/** set to sizeof(BtRcCallbacks) */ /** set to sizeof(BtRcCallbacks) */
size_t size; size_t size;
btrc_remote_features_callback remote_features_cb;
btrc_get_play_status_callback get_play_status_cb; btrc_get_play_status_callback get_play_status_cb;
btrc_list_player_app_attr_callback list_player_app_attr_cb; btrc_list_player_app_attr_callback list_player_app_attr_cb;
btrc_list_player_app_values_callback list_player_app_values_cb; btrc_list_player_app_values_callback list_player_app_values_cb;
@ -164,6 +185,8 @@ typedef struct {
btrc_set_player_app_value_callback set_player_app_value_cb; btrc_set_player_app_value_callback set_player_app_value_cb;
btrc_get_element_attr_callback get_element_attr_cb; btrc_get_element_attr_callback get_element_attr_cb;
btrc_register_notification_callback register_notification_cb; btrc_register_notification_callback register_notification_cb;
btrc_volume_change_callback volume_change_cb;
btrc_passthrough_cmd_callback passthrough_cmd_cb;
} btrc_callbacks_t; } btrc_callbacks_t;
/** Represents the standard BT-RC interface. */ /** Represents the standard BT-RC interface. */
@ -225,6 +248,15 @@ typedef struct {
btrc_notification_type_t type, btrc_notification_type_t type,
btrc_register_notification_t *p_param); btrc_register_notification_t *p_param);
/* AVRCP 1.4 enhancements */
/**Send current volume setting to remote side. Support limited to SetAbsoluteVolume
** This can be enhanced to support Relative Volume (AVRCP 1.0).
** With RelateVolume, we will send VOLUME_UP/VOLUME_DOWN opposed to absolute volume level
** volume: Should be in the range 0-127. bit7 is reseved and cannot be set
*/
bt_status_t (*set_volume)(uint8_t volume);
/** Closes the interface. */ /** Closes the interface. */
void (*cleanup)( void ); void (*cleanup)( void );
} btrc_interface_t; } btrc_interface_t;

View file

@ -21,17 +21,17 @@
#include "camera_common.h" #include "camera_common.h"
/** /**
* Camera device HAL 3.0 [ CAMERA_DEVICE_API_VERSION_3_0 ] * Camera device HAL 3.1 [ CAMERA_DEVICE_API_VERSION_3_1 ]
* *
* EXPERIMENTAL. * EXPERIMENTAL.
* *
* Supports the android.hardware.Camera API. * Supports the android.hardware.Camera API.
* *
* Camera devices that support this version of the HAL must return * Camera devices that support this version of the HAL must return
* CAMERA_DEVICE_API_VERSION_3_0 in camera_device_t.common.version and in * CAMERA_DEVICE_API_VERSION_3_1 in camera_device_t.common.version and in
* camera_info_t.device_version (from camera_module_t.get_camera_info). * camera_info_t.device_version (from camera_module_t.get_camera_info).
* *
* Camera modules that may contain version 3.0 devices must implement at least * Camera modules that may contain version 3.1 devices must implement at least
* version 2.0 of the camera module interface (as defined by * version 2.0 of the camera module interface (as defined by
* camera_module_t.common.module_api_version). * camera_module_t.common.module_api_version).
* *
@ -82,6 +82,12 @@
* management. Bidirectional streams replace STREAM_FROM_STREAM construct. * management. Bidirectional streams replace STREAM_FROM_STREAM construct.
* *
* - Limited mode semantics for older/limited hardware devices. * - Limited mode semantics for older/limited hardware devices.
*
* 3.1: Minor revision of expanded-capability HAL:
*
* - configure_streams passes consumer usage flags to the HAL.
*
* - flush call to drop all in-flight requests/buffers as fast as possible.
*/ */
/** /**
@ -392,6 +398,10 @@
* well focused. The lens is not moving. The HAL may spontaneously leave * well focused. The lens is not moving. The HAL may spontaneously leave
* this state. * this state.
* *
* AF_STATE_PASSIVE_UNFOCUSED: A continuous focus algorithm believes it is
* not well focused. The lens is not moving. The HAL may spontaneously
* leave this state.
*
* AF_STATE_ACTIVE_SCAN: A scan triggered by the user is underway. * AF_STATE_ACTIVE_SCAN: A scan triggered by the user is underway.
* *
* AF_STATE_FOCUSED_LOCKED: The AF algorithm believes it is focused. The * AF_STATE_FOCUSED_LOCKED: The AF algorithm believes it is focused. The
@ -565,10 +575,16 @@
* *
* S4.5. AF state machines * S4.5. AF state machines
* *
* when enabling AF or changing AF mode
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| Any | AF mode change| INACTIVE | |
*+--------------------+---------------+--------------------+------------------+
*
* mode = AF_MODE_OFF or AF_MODE_EDOF * mode = AF_MODE_OFF or AF_MODE_EDOF
*| state | trans. cause | new state | notes | *| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| INACTIVE | | | AF is disabled | *| INACTIVE | | INACTIVE | Never changes |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
* *
* mode = AF_MODE_AUTO or AF_MODE_MACRO * mode = AF_MODE_AUTO or AF_MODE_MACRO
@ -611,6 +627,9 @@
*| PASSIVE_SCAN | HAL completes | PASSIVE_FOCUSED | End AF scan | *| PASSIVE_SCAN | HAL completes | PASSIVE_FOCUSED | End AF scan |
*| | current scan | | Lens now locked | *| | current scan | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | HAL fails | PASSIVE_UNFOCUSED | End AF scan |
*| | current scan | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. | *| PASSIVE_SCAN | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. |
*| | | | if focus is good | *| | | | if focus is good |
*| | | | Lens now locked | *| | | | Lens now locked |
@ -626,12 +645,13 @@
*| PASSIVE_FOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan | *| PASSIVE_FOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving | *| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| PASSIVE_UNFOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_FOCUSED | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. | *| PASSIVE_FOCUSED | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. |
*| | | | if focus is good |
*| | | | Lens now locked | *| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| PASSIVE_FOCUSED | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. | *| PASSIVE_UNFOCUSED | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. |
*| | | | if focus is bad |
*| | | | Lens now locked | *| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| FOCUSED_LOCKED | AF_TRIGGER | FOCUSED_LOCKED | No effect | *| FOCUSED_LOCKED | AF_TRIGGER | FOCUSED_LOCKED | No effect |
@ -655,6 +675,9 @@
*| PASSIVE_SCAN | HAL completes | PASSIVE_FOCUSED | End AF scan | *| PASSIVE_SCAN | HAL completes | PASSIVE_FOCUSED | End AF scan |
*| | current scan | | Lens now locked | *| | current scan | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | HAL fails | PASSIVE_UNFOCUSED | End AF scan |
*| | current scan | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | AF_TRIGGER | FOCUSED_LOCKED | Eventual trans. | *| PASSIVE_SCAN | AF_TRIGGER | FOCUSED_LOCKED | Eventual trans. |
*| | | | once focus good | *| | | | once focus good |
*| | | | Lens now locked | *| | | | Lens now locked |
@ -670,12 +693,13 @@
*| PASSIVE_FOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan | *| PASSIVE_FOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving | *| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| PASSIVE_UNFOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_FOCUSED | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. | *| PASSIVE_FOCUSED | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. |
*| | | | if focus is good |
*| | | | Lens now locked | *| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| PASSIVE_FOCUSED | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. | *| PASSIVE_UNFOCUSED | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. |
*| | | | if focus is bad |
*| | | | Lens now locked | *| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| FOCUSED_LOCKED | AF_TRIGGER | FOCUSED_LOCKED | No effect | *| FOCUSED_LOCKED | AF_TRIGGER | FOCUSED_LOCKED | No effect |
@ -693,10 +717,16 @@
* FLASH_REQUIRED and PRECAPTURE states. So rows below that refer to those two * FLASH_REQUIRED and PRECAPTURE states. So rows below that refer to those two
* states should be ignored for the AWB state machine. * states should be ignored for the AWB state machine.
* *
* when enabling AE/AWB or changing AE/AWB mode
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| Any | mode change | INACTIVE | |
*+--------------------+---------------+--------------------+------------------+
*
* mode = AE_MODE_OFF / AWB mode not AUTO * mode = AE_MODE_OFF / AWB mode not AUTO
*| state | trans. cause | new state | notes | *| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
*| INACTIVE | | | AE/AWB disabled | *| INACTIVE | | INACTIVE | AE/AWB disabled |
*+--------------------+---------------+--------------------+------------------+ *+--------------------+---------------+--------------------+------------------+
* *
* mode = AE_MODE_ON_* / AWB_MODE_AUTO * mode = AE_MODE_ON_* / AWB_MODE_AUTO
@ -1041,6 +1071,9 @@ typedef enum camera3_stream_type {
* remain valid as if configure_streams() had not been called. * remain valid as if configure_streams() had not been called.
* *
* The endpoint of the stream is not visible to the camera HAL device. * The endpoint of the stream is not visible to the camera HAL device.
* In DEVICE_API_VERSION_3_1, this was changed to share consumer usage flags
* on streams where the camera is a producer (OUTPUT and BIDIRECTIONAL stream
* types) see the usage field below.
*/ */
typedef struct camera3_stream { typedef struct camera3_stream {
@ -1092,6 +1125,25 @@ typedef struct camera3_stream {
* the producer and the consumer will be combined together and then passed * the producer and the consumer will be combined together and then passed
* to the platform gralloc HAL module for allocating the gralloc buffers for * to the platform gralloc HAL module for allocating the gralloc buffers for
* each stream. * each stream.
*
* Version information:
*
* == CAMERA_DEVICE_API_VERSION_3_0:
*
* No initial value guaranteed when passed via configure_streams().
* HAL may not use this field as input, and must write over this field
* with its usage flags.
*
* >= CAMERA_DEVICE_API_VERSION_3_1:
*
* For stream_type OUTPUT and BIDIRECTIONAL, when passed via
* configure_streams(), the initial value of this is the consumer's
* usage flags. The HAL may use these consumer flags to decide stream
* configuration.
* For stream_type INPUT, when passed via configure_streams(), the initial
* value of this is 0.
* For all streams passed via configure_streams(), the HAL must write
* over this field with its usage flags.
*/ */
uint32_t usage; uint32_t usage;
@ -2035,6 +2087,49 @@ typedef struct camera3_device_ops {
*/ */
void (*dump)(const struct camera3_device *, int fd); void (*dump)(const struct camera3_device *, int fd);
/**
* flush:
*
* Flush all currently in-process captures and all buffers in the pipeline
* on the given device. The framework will use this to dump all state as
* quickly as possible in order to prepare for a configure_streams() call.
*
* No buffers are required to be successfully returned, so every buffer
* held at the time of flush() (whether sucessfully filled or not) may be
* returned with CAMERA3_BUFFER_STATUS_ERROR. Note the HAL is still allowed
* to return valid (STATUS_OK) buffers during this call, provided they are
* succesfully filled.
*
* All requests currently in the HAL are expected to be returned as soon as
* possible. Not-in-process requests should return errors immediately. Any
* interruptible hardware blocks should be stopped, and any uninterruptible
* blocks should be waited on.
*
* flush() should only return when there are no more outstanding buffers or
* requests left in the HAL. The framework may call configure_streams (as
* the HAL state is now quiesced) or may issue new requests.
*
* A flush() call should only take 100ms or less. The maximum time it can
* take is 1 second.
*
* Version information:
*
* only available if device version >= CAMERA_DEVICE_API_VERSION_3_1.
*
* Return values:
*
* 0: On a successful flush of the camera HAL.
*
* -EINVAL: If the input is malformed (the device is not valid).
*
* -ENODEV: If the camera device has encountered a serious error. After this
* error is returned, only the close() method can be successfully
* called by the framework.
*/
int (*flush)(const struct camera3_device *);
/* reserved for future use */
void *reserved[8];
} camera3_device_ops_t; } camera3_device_ops_t;
/********************************************************************** /**********************************************************************

View file

@ -67,6 +67,13 @@ __BEGIN_DECLS
* framework from the camera HAL module, which is used to notify the framework * framework from the camera HAL module, which is used to notify the framework
* about changes to the camera module state. Modules that provide a valid * about changes to the camera module state. Modules that provide a valid
* set_callbacks() method must report at least this version number. * set_callbacks() method must report at least this version number.
*
*******************************************************************************
* Version: 2.2 [CAMERA_MODULE_API_VERSION_2_2]
*
* This camera module version adds vendor tag support from the module, and
* deprecates the old vendor_tag_query_ops that were previously only
* accessible with a device open.
*/ */
/** /**
@ -80,8 +87,9 @@ __BEGIN_DECLS
#define CAMERA_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0) #define CAMERA_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0)
#define CAMERA_MODULE_API_VERSION_2_0 HARDWARE_MODULE_API_VERSION(2, 0) #define CAMERA_MODULE_API_VERSION_2_0 HARDWARE_MODULE_API_VERSION(2, 0)
#define CAMERA_MODULE_API_VERSION_2_1 HARDWARE_MODULE_API_VERSION(2, 1) #define CAMERA_MODULE_API_VERSION_2_1 HARDWARE_MODULE_API_VERSION(2, 1)
#define CAMERA_MODULE_API_VERSION_2_2 HARDWARE_MODULE_API_VERSION(2, 2)
#define CAMERA_MODULE_API_VERSION_CURRENT CAMERA_MODULE_API_VERSION_2_1 #define CAMERA_MODULE_API_VERSION_CURRENT CAMERA_MODULE_API_VERSION_2_2
/** /**
* All device versions <= HARDWARE_DEVICE_API_VERSION(1, 0xFF) must be treated * All device versions <= HARDWARE_DEVICE_API_VERSION(1, 0xFF) must be treated
@ -91,6 +99,7 @@ __BEGIN_DECLS
#define CAMERA_DEVICE_API_VERSION_2_0 HARDWARE_DEVICE_API_VERSION(2, 0) #define CAMERA_DEVICE_API_VERSION_2_0 HARDWARE_DEVICE_API_VERSION(2, 0)
#define CAMERA_DEVICE_API_VERSION_2_1 HARDWARE_DEVICE_API_VERSION(2, 1) #define CAMERA_DEVICE_API_VERSION_2_1 HARDWARE_DEVICE_API_VERSION(2, 1)
#define CAMERA_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0) #define CAMERA_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0)
#define CAMERA_DEVICE_API_VERSION_3_1 HARDWARE_DEVICE_API_VERSION(3, 1)
// Device version 2.x is outdated; device version 3.0 is experimental // Device version 2.x is outdated; device version 3.0 is experimental
#define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_1_0 #define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_1_0
@ -242,6 +251,65 @@ typedef struct camera_module_callbacks {
} camera_module_callbacks_t; } camera_module_callbacks_t;
/**
* Set up vendor-specific tag query methods. These are needed to properly query
* entries with vendor-specified tags, potentially returned by get_camera_info.
*
* This should be used in place of vendor_tag_query_ops, which are deprecated.
*/
typedef struct vendor_tag_ops vendor_tag_ops_t;
struct vendor_tag_ops {
/**
* Get the number of vendor tags supported on this platform. Used to
* calculate the size of buffer needed for holding the array of all tags
* returned by get_all_tags().
*/
int (*get_tag_count)(const vendor_tag_ops_t *v);
/**
* Fill an array with all the supported vendor tags on this platform.
* get_tag_count() returns the number of tags supported, and
* tag_array will be allocated with enough space to hold all of the tags.
*/
void (*get_all_tags)(const vendor_tag_ops_t *v, uint32_t *tag_array);
/**
* Get vendor section name for a vendor-specified entry tag. Only called for
* vendor-defined tags. The section name must start with the name of the
* vendor in the Java package style. For example, CameraZoom Inc. must
* prefix their sections with "com.camerazoom." Must return NULL if the tag
* is outside the bounds of vendor-defined sections.
*
* There may be different vendor-defined tag sections, for example the
* phone maker, the chipset maker, and the camera module maker may each
* have their own "com.vendor."-prefixed section.
*
* The memory pointed to by the return value must remain valid for the
* lifetime that the module is loaded, and is owned by the module.
*/
const char *(*get_section_name)(const vendor_tag_ops_t *v, uint32_t tag);
/**
* Get tag name for a vendor-specified entry tag. Only called for
* vendor-defined tags. Must return NULL if the it is not a vendor-defined
* tag.
*
* The memory pointed to by the return value must remain valid for the
* lifetime that the module is loaded, and is owned by the module.
*/
const char *(*get_tag_name)(const vendor_tag_ops_t *v, uint32_t tag);
/**
* Get tag type for a vendor-specified entry tag. Only called for tags >=
* 0x80000000. Must return -1 if the tag is outside the bounds of
* vendor-defined sections.
*/
int (*get_tag_type)(const vendor_tag_ops_t *v, uint32_t tag);
/* reserved for future use */
void* reserved[8];
};
typedef struct camera_module { typedef struct camera_module {
hw_module_t common; hw_module_t common;
@ -290,6 +358,25 @@ typedef struct camera_module {
*/ */
int (*set_callbacks)(const camera_module_callbacks_t *callbacks); int (*set_callbacks)(const camera_module_callbacks_t *callbacks);
/**
* get_vendor_tag_ops:
*
* Get methods to query for vendor extension metadata tag information. The
* HAL should fill in all the vendor tag operation methods, or leave ops
* unchanged if no vendor tags are defined.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_x/2_0/2_1:
* Not provided by HAL module. Framework may not call this function.
*
* CAMERA_MODULE_API_VERSION_2_2:
* Valid to be called by the framework.
*/
void (*get_vendor_tag_ops)(vendor_tag_ops_t* ops);
/* reserved for future use */
void* reserved[8];
} camera_module_t; } camera_module_t;
__END_DECLS __END_DECLS

View file

@ -0,0 +1,80 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_INCLUDE_HARDWARE_CONSUMERIR_H
#define ANDROID_INCLUDE_HARDWARE_CONSUMERIR_H
#include <stdint.h>
#include <sys/cdefs.h>
#include <hardware/hardware.h>
#include <hardware/hwcomposer_defs.h>
#define CONSUMERIR_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0)
#define CONSUMERIR_HARDWARE_MODULE_ID "consumerir"
#define CONSUMERIR_TRANSMITTER "transmitter"
typedef struct consumerir_freq_range {
int min;
int max;
} consumerir_freq_range_t;
typedef struct consumerir_module {
struct hw_module_t common;
} consumerir_module_t;
typedef struct consumerir_device {
struct hw_device_t common;
/*
* (*transmit)() is called to by the ConsumerIrService to send an IR pattern
* at a given carrier_freq.
*
* The pattern is alternating series of carrier on and off periods measured in
* microseconds. The carrier should be turned off at the end of a transmit
* even if there are and odd number of entries in the pattern array.
*
* This call should return when the transmit is complete or encounters an error.
*
* returns: 0 on success. A negative error code on error.
*/
int (*transmit)(struct consumerir_device *dev, int carrier_freq,
const int pattern[], int pattern_len);
/*
* (*get_num_carrier_freqs)() is called by the ConsumerIrService to get the
* number of carrier freqs to allocate space for, which is then filled by
* a subsequent call to (*get_carrier_freqs)().
*
* returns: the number of ranges on success. A negative error code on error.
*/
int (*get_num_carrier_freqs)(struct consumerir_device *dev);
/*
* (*get_carrier_freqs)() is called by the ConsumerIrService to enumerate
* which frequencies the IR transmitter supports. The HAL implementation
* should fill an array of consumerir_freq_range structs with the
* appropriate values for the transmitter, up to len elements.
*
* returns: the number of ranges on success. A negative error code on error.
*/
int (*get_carrier_freqs)(struct consumerir_device *dev,
size_t len, consumerir_freq_range_t *ranges);
/* Reserved for future use. Must be NULL. */
void* reserved[8 - 3];
} consumerir_device_t;
#endif /* ANDROID_INCLUDE_HARDWARE_CONSUMERIR_H */

View file

@ -0,0 +1,734 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_INCLUDE_HARDWARE_FUSED_LOCATION_H
#define ANDROID_INCLUDE_HARDWARE_FUSED_LOCATION_H
#include <hardware/hardware.h>
/**
* This header file defines the interface of the Fused Location Provider.
* Fused Location Provider is designed to fuse data from various sources
* like GPS, Wifi, Cell, Sensors, Bluetooth etc to provide a fused location to the
* upper layers. The advantage of doing fusion in hardware is power savings.
* The goal is to do this without waking up the AP to get additional data.
* The software implementation of FLP will decide when to use
* the hardware fused location. Other location features like geofencing will
* also be implemented using fusion in hardware.
*/
__BEGIN_DECLS
#define FLP_HEADER_VERSION 1
#define FLP_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1)
#define FLP_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION_2(0, 1, FLP_HEADER_VERSION)
/**
* The id of this module
*/
#define FUSED_LOCATION_HARDWARE_MODULE_ID "flp"
/**
* Name for the FLP location interface
*/
#define FLP_LOCATION_INTERFACE "flp_location"
/**
* Name for the FLP location interface
*/
#define FLP_DIAGNOSTIC_INTERFACE "flp_diagnostic"
/**
* Name for the FLP_Geofencing interface.
*/
#define FLP_GEOFENCING_INTERFACE "flp_geofencing"
/**
* Name for the FLP_device context interface.
*/
#define FLP_DEVICE_CONTEXT_INTERFACE "flp_device_context"
/**
* Constants to indicate the various subsystems
* that will be used.
*/
#define FLP_TECH_MASK_GNSS (1U<<0)
#define FLP_TECH_MASK_WIFI (1U<<1)
#define FLP_TECH_MASK_SENSORS (1U<<2)
#define FLP_TECH_MASK_CELL (1U<<3)
#define FLP_TECH_MASK_BLUETOOTH (1U<<4)
/**
* This constant is used with the batched locations
* APIs. Batching is mandatory when FLP implementation
* is supported. If the flag is set, the hardware implementation
* will wake up the application processor when the FIFO is full,
* If the flag is not set, the hardware implementation will drop
* the oldest data when the FIFO is full.
*/
#define FLP_BATCH_WAKEUP_ON_FIFO_FULL 0x0000001
/**
* While batching, the implementation should not call the
* flp_location_callback on every location fix. However,
* sometimes in high power mode, the system might need
* a location callback every single time the location
* fix has been obtained. This flag controls that option.
* Its the responsibility of the upper layers (caller) to switch
* it off, if it knows that the AP might go to sleep.
* When this bit is on amidst a batching session, batching should
* continue while location fixes are reported in real time.
*/
#define FLP_BATCH_CALLBACK_ON_LOCATION_FIX 0x0000002
/** Flags to indicate which values are valid in a FlpLocation. */
typedef uint16_t FlpLocationFlags;
// IMPORTANT: Note that the following values must match
// constants in the corresponding java file.
/** FlpLocation has valid latitude and longitude. */
#define FLP_LOCATION_HAS_LAT_LONG (1U<<0)
/** FlpLocation has valid altitude. */
#define FLP_LOCATION_HAS_ALTITUDE (1U<<1)
/** FlpLocation has valid speed. */
#define FLP_LOCATION_HAS_SPEED (1U<<2)
/** FlpLocation has valid bearing. */
#define FLP_LOCATION_HAS_BEARING (1U<<4)
/** FlpLocation has valid accuracy. */
#define FLP_LOCATION_HAS_ACCURACY (1U<<8)
typedef int64_t FlpUtcTime;
/** Represents a location. */
typedef struct {
/** set to sizeof(FlpLocation) */
size_t size;
/** Flags associated with the location object. */
FlpLocationFlags flags;
/** Represents latitude in degrees. */
double latitude;
/** Represents longitude in degrees. */
double longitude;
/**
* Represents altitude in meters above the WGS 84 reference
* ellipsoid. */
double altitude;
/** Represents speed in meters per second. */
float speed;
/** Represents heading in degrees. */
float bearing;
/** Represents expected accuracy in meters. */
float accuracy;
/** Timestamp for the location fix. */
FlpUtcTime timestamp;
/** Sources used, will be Bitwise OR of the FLP_TECH_MASK bits. */
uint32_t sources_used;
} FlpLocation;
typedef enum {
ASSOCIATE_JVM,
DISASSOCIATE_JVM,
} ThreadEvent;
/**
* Callback with location information.
* Can only be called from a thread associated to JVM using set_thread_event_cb.
* Parameters:
* num_locations is the number of batched locations available.
* location is the pointer to an array of pointers to location objects.
*/
typedef void (*flp_location_callback)(int32_t num_locations, FlpLocation** location);
/**
* Callback utility for acquiring a wakelock.
* This can be used to prevent the CPU from suspending while handling FLP events.
*/
typedef void (*flp_acquire_wakelock)();
/**
* Callback utility for releasing the FLP wakelock.
*/
typedef void (*flp_release_wakelock)();
/**
* Callback for associating a thread that can call into the Java framework code.
* This must be used to initialize any threads that report events up to the framework.
* Return value:
* FLP_RESULT_SUCCESS on success.
* FLP_RESULT_ERROR if the association failed in the current thread.
*/
typedef int (*flp_set_thread_event)(ThreadEvent event);
/** FLP callback structure. */
typedef struct {
/** set to sizeof(FlpCallbacks) */
size_t size;
flp_location_callback location_cb;
flp_acquire_wakelock acquire_wakelock_cb;
flp_release_wakelock release_wakelock_cb;
flp_set_thread_event set_thread_event_cb;
} FlpCallbacks;
/** Options with the batching FLP APIs */
typedef struct {
/**
* Maximum power in mW that the underlying implementation
* can use for this batching call.
* If max_power_allocation_mW is 0, only fixes that are generated
* at no additional cost of power shall be reported.
*/
double max_power_allocation_mW;
/** Bitwise OR of the FLP_TECH_MASKS to use */
uint32_t sources_to_use;
/**
* FLP_BATCH_WAKEUP_ON_FIFO_FULL - If set the hardware
* will wake up the AP when the buffer is full. If not set, the
* hardware will drop the oldest location object.
*
* FLP_BATCH_CALLBACK_ON_LOCATION_FIX - If set the location
* callback will be called every time there is a location fix.
* Its the responsibility of the upper layers (caller) to switch
* it off, if it knows that the AP might go to sleep. When this
* bit is on amidst a batching session, batching should continue
* while location fixes are reported in real time.
*
* Other flags to be bitwised ORed in the future.
*/
uint32_t flags;
/**
* Frequency with which location needs to be batched in nano
* seconds.
*/
int64_t period_ns;
} FlpBatchOptions;
#define FLP_RESULT_SUCCESS 0
#define FLP_RESULT_ERROR -1
#define FLP_RESULT_INSUFFICIENT_MEMORY -2
#define FLP_RESULT_TOO_MANY_GEOFENCES -3
#define FLP_RESULT_ID_EXISTS -4
#define FLP_RESULT_ID_UNKNOWN -5
#define FLP_RESULT_INVALID_GEOFENCE_TRANSITION -6
/**
* Represents the standard FLP interface.
*/
typedef struct {
/**
* set to sizeof(FlpLocationInterface)
*/
size_t size;
/**
* Opens the interface and provides the callback routines
* to the implemenation of this interface.
*/
int (*init)(FlpCallbacks* callbacks );
/**
* Return the batch size (in number of FlpLocation objects)
* available in the hardware. Note, different HW implementations
* may have different sample sizes. This shall return number
* of samples defined in the format of FlpLocation.
* This will be used by the upper layer, to decide on the batching
* interval and whether the AP should be woken up or not.
*/
int (*get_batch_size)();
/**
* Start batching locations. This API is primarily used when the AP is
* asleep and the device can batch locations in the hardware.
* flp_location_callback is used to return the locations. When the buffer
* is full and FLP_BATCH_WAKEUP_ON_FIFO_FULL is used, the AP is woken up.
* When the buffer is full and FLP_BATCH_WAKEUP_ON_FIFO_FULL is not set,
* the oldest location object is dropped. In this case the AP will not be
* woken up. The upper layer will use get_batched_location
* API to explicitly ask for the location.
* If FLP_BATCH_CALLBACK_ON_LOCATION_FIX is set, the implementation
* will call the flp_location_callback every single time there is a location
* fix. This overrides FLP_BATCH_WAKEUP_ON_FIFO_FULL flag setting.
* It's the responsibility of the upper layers (caller) to switch
* it off, if it knows that the AP might go to sleep. This is useful
* for nagivational applications when the system is in high power mode.
* Parameters:
* id - Id for the request.
* options - See FlpBatchOptions struct definition.
* Return value:
* FLP_RESULT_SUCCESS on success, FLP_RESULT_INSUFFICIENT_MEMORY,
* FLP_RESULT_ID_EXISTS, FLP_RESULT_ERROR on failure.
*/
int (*start_batching)(int id, FlpBatchOptions* options);
/**
* Update FlpBatchOptions associated with a batching request.
* When a batching operation is in progress and a batching option
* such as FLP_BATCH_WAKEUP_ON_FIFO_FULL needs to be updated, this API
* will be used. For instance, this can happen when the AP is awake and
* the maps application is being used.
* Parameters:
* id - Id of an existing batch request.
* new_options - Updated FlpBatchOptions
* Return value:
* FLP_RESULT_SUCCESS on success, FLP_RESULT_ID_UNKNOWN,
* FLP_RESULT_ERROR on error.
*/
int (*update_batching_options)(int id, FlpBatchOptions* new_options);
/**
* Stop batching.
* Parameters:
* id - Id for the request.
* Return Value:
* FLP_RESULT_SUCCESS on success, FLP_RESULT_ID_UNKNOWN or
* FLP_RESULT_ERROR on failure.
*/
int (*stop_batching)(int id);
/**
* Closes the interface. If any batch operations are in progress,
* they should be stopped.
*/
void (*cleanup)();
/**
* Get the fused location that was batched.
* flp_location_callback is used to return the location. The location object
* is dropped from the buffer only when the buffer is full. Do not remove it
* from the buffer just because it has been returned using the callback.
* In other words, when there is no new location object, two calls to
* get_batched_location(1) should return the same location object.
* Parameters:
* last_n_locations - Number of locations to get. This can be one or many.
* If the last_n_locations is 1, you get the latest location known to the
* hardware.
*/
void (*get_batched_location)(int last_n_locations);
/**
* Injects current location from another location provider
* latitude and longitude are measured in degrees
* expected accuracy is measured in meters
* Parameters:
* location - The location object being injected.
* Return value: FLP_RESULT_SUCCESS or FLP_RESULT_ERROR.
*/
int (*inject_location)(FlpLocation* location);
/**
* Get a pointer to extension information.
*/
const void* (*get_extension)(const char* name);
} FlpLocationInterface;
struct flp_device_t {
struct hw_device_t common;
/**
* Get a handle to the FLP Interface.
*/
const FlpLocationInterface* (*get_flp_interface)(struct flp_device_t* dev);
};
/**
* Callback for reports diagnostic data into the Java framework code.
*/
typedef void (*report_data)(char* data, int length);
/**
* FLP diagnostic callback structure.
* Currently, not used - but this for future extension.
*/
typedef struct {
/** set to sizeof(FlpDiagnosticCallbacks) */
size_t size;
flp_set_thread_event set_thread_event_cb;
/** reports diagnostic data into the Java framework code */
report_data data_cb;
} FlpDiagnosticCallbacks;
/** Extended interface for diagnostic support. */
typedef struct {
/** set to sizeof(FlpDiagnosticInterface) */
size_t size;
/**
* Opens the diagnostic interface and provides the callback routines
* to the implemenation of this interface.
*/
void (*init)(FlpDiagnosticCallbacks* callbacks);
/**
* Injects diagnostic data into the FLP subsystem.
* Return 0 on success, -1 on error.
**/
int (*inject_data)(char* data, int length );
} FlpDiagnosticInterface;
/**
* Context setting information.
* All these settings shall be injected to FLP HAL at FLP init time.
* Following that, only the changed setting need to be re-injected
* upon changes.
*/
#define FLP_DEVICE_CONTEXT_GPS_ENABLED (1U<<0)
#define FLP_DEVICE_CONTEXT_AGPS_ENABLED (1U<<1)
#define FLP_DEVICE_CONTEXT_NETWORK_POSITIONING_ENABLED (1U<<2)
#define FLP_DEVICE_CONTEXT_WIFI_CONNECTIVITY_ENABLED (1U<<3)
#define FLP_DEVICE_CONTEXT_WIFI_POSITIONING_ENABLED (1U<<4)
#define FLP_DEVICE_CONTEXT_HW_NETWORK_POSITIONING_ENABLED (1U<<5)
#define FLP_DEVICE_CONTEXT_AIRPLANE_MODE_ON (1U<<6)
#define FLP_DEVICE_CONTEXT_DATA_ENABLED (1U<<7)
#define FLP_DEVICE_CONTEXT_ROAMING_ENABLED (1U<<8)
#define FLP_DEVICE_CONTEXT_CURRENTLY_ROAMING (1U<<9)
#define FLP_DEVICE_CONTEXT_SENSOR_ENABLED (1U<<10)
#define FLP_DEVICE_CONTEXT_BLUETOOTH_ENABLED (1U<<11)
#define FLP_DEVICE_CONTEXT_CHARGER_ON (1U<<12)
/** Extended interface for device context support. */
typedef struct {
/** set to sizeof(FlpDeviceContextInterface) */
size_t size;
/**
* Injects debug data into the FLP subsystem.
* Return 0 on success, -1 on error.
**/
int (*inject_device_context)(uint32_t enabledMask);
} FlpDeviceContextInterface;
/**
* There are 3 states associated with a Geofence: Inside, Outside, Unknown.
* There are 3 transitions: ENTERED, EXITED, UNCERTAIN.
*
* An example state diagram with confidence level: 95% and Unknown time limit
* set as 30 secs is shown below. (confidence level and Unknown time limit are
* explained latter)
* ____________________________
* | Unknown (30 secs) |
* """"""""""""""""""""""""""""
* ^ | | ^
* UNCERTAIN| |ENTERED EXITED| |UNCERTAIN
* | v v |
* ________ EXITED _________
* | Inside | -----------> | Outside |
* | | <----------- | |
* """""""" ENTERED """""""""
*
* Inside state: We are 95% confident that the user is inside the geofence.
* Outside state: We are 95% confident that the user is outside the geofence
* Unknown state: Rest of the time.
*
* The Unknown state is better explained with an example:
*
* __________
* | c|
* | ___ | _______
* | |a| | | b |
* | """ | """""""
* | |
* """"""""""
* In the diagram above, "a" and "b" are 2 geofences and "c" is the accuracy
* circle reported by the FLP subsystem. Now with regard to "b", the system is
* confident that the user is outside. But with regard to "a" is not confident
* whether it is inside or outside the geofence. If the accuracy remains the
* same for a sufficient period of time, the UNCERTAIN transition would be
* triggered with the state set to Unknown. If the accuracy improves later, an
* appropriate transition should be triggered. This "sufficient period of time"
* is defined by the parameter in the add_geofence_area API.
* In other words, Unknown state can be interpreted as a state in which the
* FLP subsystem isn't confident enough that the user is either inside or
* outside the Geofence. It moves to Unknown state only after the expiry of the
* timeout.
*
* The geofence callback needs to be triggered for the ENTERED and EXITED
* transitions, when the FLP system is confident that the user has entered
* (Inside state) or exited (Outside state) the Geofence. An implementation
* which uses a value of 95% as the confidence is recommended. The callback
* should be triggered only for the transitions requested by the
* add_geofence_area call.
*
* Even though the diagram and explanation talks about states and transitions,
* the callee is only interested in the transistions. The states are mentioned
* here for illustrative purposes.
*
* Startup Scenario: When the device boots up, if an application adds geofences,
* and then we get an accurate FLP location fix, it needs to trigger the
* appropriate (ENTERED or EXITED) transition for every Geofence it knows about.
* By default, all the Geofences will be in the Unknown state.
*
* When the FLP system is unavailable, flp_geofence_status_callback should be
* called to inform the upper layers of the same. Similarly, when it becomes
* available the callback should be called. This is a global state while the
* UNKNOWN transition described above is per geofence.
*
*/
#define FLP_GEOFENCE_TRANSITION_ENTERED (1L<<0)
#define FLP_GEOFENCE_TRANSITION_EXITED (1L<<1)
#define FLP_GEOFENCE_TRANSITION_UNCERTAIN (1L<<2)
#define FLP_GEOFENCE_MONITOR_STATUS_UNAVAILABLE (1L<<0)
#define FLP_GEOFENCE_MONITOR_STATUS_AVAILABLE (1L<<1)
/**
* The callback associated with the geofence.
* Parameters:
* geofence_id - The id associated with the add_geofence_area.
* location - The current location as determined by the FLP subsystem.
* transition - Can be one of FLP_GEOFENCE_TRANSITION_ENTERED, FLP_GEOFENCE_TRANSITION_EXITED,
* FLP_GEOFENCE_TRANSITION_UNCERTAIN.
* timestamp - Timestamp when the transition was detected; -1 if not available.
* sources_used - Bitwise OR of FLP_TECH_MASK flags indicating which
* subsystems were used.
*
* The callback should only be called when the caller is interested in that
* particular transition. For instance, if the caller is interested only in
* ENTERED transition, then the callback should NOT be called with the EXITED
* transition.
*
* IMPORTANT: If a transition is triggered resulting in this callback, the
* subsystem will wake up the application processor, if its in suspend state.
*/
typedef void (*flp_geofence_transition_callback) (int32_t geofence_id, FlpLocation* location,
int32_t transition, FlpUtcTime timestamp, uint32_t sources_used);
/**
* The callback associated with the availablity of one the sources used for geofence
* monitoring by the FLP sub-system For example, if the GPS system determines that it cannot
* monitor geofences because of lack of reliability or unavailability of the GPS signals,
* it will call this callback with FLP_GEOFENCE_MONITOR_STATUS_UNAVAILABLE parameter and the
* source set to FLP_TECH_MASK_GNSS.
*
* Parameters:
* status - FLP_GEOFENCE_MONITOR_STATUS_UNAVAILABLE or FLP_GEOFENCE_MONITOR_STATUS_AVAILABLE.
* source - One of the FLP_TECH_MASKS
* last_location - Last known location.
*/
typedef void (*flp_geofence_monitor_status_callback) (int32_t status, uint32_t source,
FlpLocation* last_location);
/**
* The callback associated with the add_geofence call.
*
* Parameter:
* geofence_id - Id of the geofence.
* result - FLP_RESULT_SUCCESS
* FLP_RESULT_ERROR_TOO_MANY_GEOFENCES - geofence limit has been reached.
* FLP_RESULT_ID_EXISTS - geofence with id already exists
* FLP_RESULT_INVALID_GEOFENCE_TRANSITION - the monitorTransition contains an
* invalid transition
* FLP_RESULT_ERROR - for other errors.
*/
typedef void (*flp_geofence_add_callback) (int32_t geofence_id, int32_t result);
/**
* The callback associated with the remove_geofence call.
*
* Parameter:
* geofence_id - Id of the geofence.
* result - FLP_RESULT_SUCCESS
* FLP_RESULT_ID_UNKNOWN - for invalid id
* FLP_RESULT_ERROR for others.
*/
typedef void (*flp_geofence_remove_callback) (int32_t geofence_id, int32_t result);
/**
* The callback associated with the pause_geofence call.
*
* Parameter:
* geofence_id - Id of the geofence.
* result - FLP_RESULT_SUCCESS
* FLP_RESULT__ID_UNKNOWN - for invalid id
* FLP_RESULT_INVALID_TRANSITION -
* when monitor_transitions is invalid
* FLP_RESULT_ERROR for others.
*/
typedef void (*flp_geofence_pause_callback) (int32_t geofence_id, int32_t result);
/**
* The callback associated with the resume_geofence call.
*
* Parameter:
* geofence_id - Id of the geofence.
* result - FLP_RESULT_SUCCESS
* FLP_RESULT_ID_UNKNOWN - for invalid id
* FLP_RESULT_ERROR for others.
*/
typedef void (*flp_geofence_resume_callback) (int32_t geofence_id, int32_t result);
typedef struct {
/** set to sizeof(FlpGeofenceCallbacks) */
size_t size;
flp_geofence_transition_callback geofence_transition_callback;
flp_geofence_monitor_status_callback geofence_status_callback;
flp_geofence_add_callback geofence_add_callback;
flp_geofence_remove_callback geofence_remove_callback;
flp_geofence_pause_callback geofence_pause_callback;
flp_geofence_resume_callback geofence_resume_callback;
flp_set_thread_event set_thread_event_cb;
} FlpGeofenceCallbacks;
/** Type of geofence */
typedef enum {
TYPE_CIRCLE = 0,
} GeofenceType;
/** Circular geofence is represented by lat / long / radius */
typedef struct {
double latitude;
double longitude;
double radius_m;
} GeofenceCircle;
/** Represents the type of geofence and data */
typedef struct {
GeofenceType type;
union {
GeofenceCircle circle;
} geofence;
} GeofenceData;
/** Geofence Options */
typedef struct {
/**
* The current state of the geofence. For example, if
* the system already knows that the user is inside the geofence,
* this will be set to FLP_GEOFENCE_TRANSITION_ENTERED. In most cases, it
* will be FLP_GEOFENCE_TRANSITION_UNCERTAIN. */
int last_transition;
/**
* Transitions to monitor. Bitwise OR of
* FLP_GEOFENCE_TRANSITION_ENTERED, FLP_GEOFENCE_TRANSITION_EXITED and
* FLP_GEOFENCE_TRANSITION_UNCERTAIN.
*/
int monitor_transitions;
/**
* Defines the best-effort description
* of how soon should the callback be called when the transition
* associated with the Geofence is triggered. For instance, if set
* to 1000 millseconds with FLP_GEOFENCE_TRANSITION_ENTERED, the callback
* should be called 1000 milliseconds within entering the geofence.
* This parameter is defined in milliseconds.
* NOTE: This is not to be confused with the rate that the GPS is
* polled at. It is acceptable to dynamically vary the rate of
* sampling the GPS for power-saving reasons; thus the rate of
* sampling may be faster or slower than this.
*/
int notification_responsivenes_ms;
/**
* The time limit after which the UNCERTAIN transition
* should be triggered. This paramter is defined in milliseconds.
*/
int unknown_timer_ms;
/**
* The sources to use for monitoring geofences. Its a BITWISE-OR
* of FLP_TECH_MASK flags.
*/
uint32_t sources_to_use;
} GeofenceOptions;
/** Geofence struct */
typedef struct {
int32_t geofence_id;
GeofenceData* data;
GeofenceOptions* options;
} Geofence;
/** Extended interface for FLP_Geofencing support */
typedef struct {
/** set to sizeof(FlpGeofencingInterface) */
size_t size;
/**
* Opens the geofence interface and provides the callback routines
* to the implemenation of this interface.
*/
void (*init)( FlpGeofenceCallbacks* callbacks );
/**
* Add a list of geofences.
* Parameters:
* number_of_geofences - The number of geofences that needed to be added.
* geofences - Pointer to array of pointers to Geofence structure.
*/
void (*add_geofences) (int32_t number_of_geofences, Geofence** geofences);
/**
* Pause monitoring a particular geofence.
* Parameters:
* geofence_id - The id for the geofence.
*/
void (*pause_geofence) (int32_t geofence_id);
/**
* Resume monitoring a particular geofence.
* Parameters:
* geofence_id - The id for the geofence.
* monitor_transitions - Which transitions to monitor. Bitwise OR of
* FLP_GEOFENCE_TRANSITION_ENTERED, FLP_GEOFENCE_TRANSITION_EXITED and
* FLP_GEOFENCE_TRANSITION_UNCERTAIN.
* This supersedes the value associated provided in the
* add_geofence_area call.
*/
void (*resume_geofence) (int32_t geofence_id, int monitor_transitions);
/**
* Modify a particular geofence option.
* Parameters:
* geofence_id - The id for the geofence.
* options - Various options associated with the geofence. See
* GeofenceOptions structure for details.
*/
void (*modify_geofence_option) (int32_t geofence_id, GeofenceOptions* options);
/**
* Remove a list of geofences. After the function returns, no notifications
* should be sent.
* Parameter:
* number_of_geofences - The number of geofences that needed to be added.
* geofence_id - Pointer to array of geofence_ids to be removed.
*/
void (*remove_geofences) (int32_t number_of_geofences, int32_t* geofence_id);
} FlpGeofencingInterface;
__END_DECLS
#endif /* ANDROID_INCLUDE_HARDWARE_FLP_H */

View file

@ -104,6 +104,9 @@ enum {
/* mask for the software usage bit-mask */ /* mask for the software usage bit-mask */
GRALLOC_USAGE_HW_MASK = 0x00071F00, GRALLOC_USAGE_HW_MASK = 0x00071F00,
/* buffer will be used as a RenderScript Allocation */
GRALLOC_USAGE_RENDERSCRIPT = 0x00100000,
/* buffer should be displayed full-screen on an external display when /* buffer should be displayed full-screen on an external display when
* possible * possible
*/ */

View file

@ -54,6 +54,13 @@ typedef struct hwc_rect {
int bottom; int bottom;
} hwc_rect_t; } hwc_rect_t;
typedef struct hwc_frect {
float left;
float top;
float right;
float bottom;
} hwc_frect_t;
typedef struct hwc_region { typedef struct hwc_region {
size_t numRects; size_t numRects;
hwc_rect_t const* rects; hwc_rect_t const* rects;
@ -149,8 +156,17 @@ typedef struct hwc_layer_1 {
int32_t blending; int32_t blending;
/* area of the source to consider, the origin is the top-left corner of /* area of the source to consider, the origin is the top-left corner of
* the buffer */ * the buffer. As of HWC_DEVICE_API_VERSION_1_3, sourceRect uses floats.
hwc_rect_t sourceCrop; * If the h/w can't support a non-integer source crop rectangle, it should
* punt to OpenGL ES composition.
*/
union {
// crop rectangle in integer (pre HWC_DEVICE_API_VERSION_1_3)
hwc_rect_t sourceCropi;
hwc_rect_t sourceCrop; // just for source compatibility
// crop rectangle in floats (as of HWC_DEVICE_API_VERSION_1_3)
hwc_frect_t sourceCropf;
};
/* where to composite the sourceCrop onto the display. The sourceCrop /* where to composite the sourceCrop onto the display. The sourceCrop
* is scaled using linear filtering to the displayFrame. The origin is the * is scaled using linear filtering to the displayFrame. The origin is the
@ -433,12 +449,12 @@ typedef struct hwc_composer_device_1 {
* For HWC 1.0, numDisplays will always be one, and displays[0] will be * For HWC 1.0, numDisplays will always be one, and displays[0] will be
* non-NULL. * non-NULL.
* *
* For HWC 1.1, numDisplays will always be HWC_NUM_DISPLAY_TYPES. Entries * For HWC 1.1, numDisplays will always be HWC_NUM_PHYSICAL_DISPLAY_TYPES.
* for unsupported or disabled/disconnected display types will be NULL. * Entries for unsupported or disabled/disconnected display types will be
* NULL.
* *
* In a future version, numDisplays may be larger than * In HWC 1.3, numDisplays may be up to HWC_NUM_DISPLAY_TYPES. The extra
* HWC_NUM_DISPLAY_TYPES. The extra entries correspond to enabled virtual * entries correspond to enabled virtual displays, and will be non-NULL.
* displays, and will be non-NULL.
* *
* returns: 0 on success. An negative error code on error. If an error is * returns: 0 on success. An negative error code on error. If an error is
* returned, SurfaceFlinger will assume that none of the layer will be * returned, SurfaceFlinger will assume that none of the layer will be
@ -466,12 +482,12 @@ typedef struct hwc_composer_device_1 {
* For HWC 1.0, numDisplays will always be one, and displays[0] will be * For HWC 1.0, numDisplays will always be one, and displays[0] will be
* non-NULL. * non-NULL.
* *
* For HWC 1.1, numDisplays will always be HWC_NUM_DISPLAY_TYPES. Entries * For HWC 1.1, numDisplays will always be HWC_NUM_PHYSICAL_DISPLAY_TYPES.
* for unsupported or disabled/disconnected display types will be NULL. * Entries for unsupported or disabled/disconnected display types will be
* NULL.
* *
* In a future version, numDisplays may be larger than * In HWC 1.3, numDisplays may be up to HWC_NUM_DISPLAY_TYPES. The extra
* HWC_NUM_DISPLAY_TYPES. The extra entries correspond to enabled virtual * entries correspond to enabled virtual displays, and will be non-NULL.
* displays, and will be non-NULL.
* *
* IMPORTANT NOTE: There is an implicit layer containing opaque black * IMPORTANT NOTE: There is an implicit layer containing opaque black
* pixels behind all the layers in the list. It is the responsibility of * pixels behind all the layers in the list. It is the responsibility of

View file

@ -35,6 +35,7 @@ __BEGIN_DECLS
#define HWC_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION_2(1, 0, HWC_HEADER_VERSION) #define HWC_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION_2(1, 0, HWC_HEADER_VERSION)
#define HWC_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, HWC_HEADER_VERSION) #define HWC_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, HWC_HEADER_VERSION)
#define HWC_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, HWC_HEADER_VERSION) #define HWC_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, HWC_HEADER_VERSION)
#define HWC_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, HWC_HEADER_VERSION)
enum { enum {
/* hwc_composer_device_t::set failed in EGL */ /* hwc_composer_device_t::set failed in EGL */
@ -181,12 +182,16 @@ enum {
enum { enum {
HWC_DISPLAY_PRIMARY = 0, HWC_DISPLAY_PRIMARY = 0,
HWC_DISPLAY_EXTERNAL = 1, // HDMI, DP, etc. HWC_DISPLAY_EXTERNAL = 1, // HDMI, DP, etc.
HWC_NUM_DISPLAY_TYPES HWC_DISPLAY_VIRTUAL = 2,
HWC_NUM_PHYSICAL_DISPLAY_TYPES = 2,
HWC_NUM_DISPLAY_TYPES = 3,
}; };
enum { enum {
HWC_DISPLAY_PRIMARY_BIT = 1 << HWC_DISPLAY_PRIMARY, HWC_DISPLAY_PRIMARY_BIT = 1 << HWC_DISPLAY_PRIMARY,
HWC_DISPLAY_EXTERNAL_BIT = 1 << HWC_DISPLAY_EXTERNAL, HWC_DISPLAY_EXTERNAL_BIT = 1 << HWC_DISPLAY_EXTERNAL,
HWC_DISPLAY_VIRTUAL_BIT = 1 << HWC_DISPLAY_VIRTUAL,
}; };
/*****************************************************************************/ /*****************************************************************************/

160
include/hardware/memtrack.h Normal file
View file

@ -0,0 +1,160 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_INCLUDE_HARDWARE_MEMTRACK_H
#define ANDROID_INCLUDE_HARDWARE_MEMTRACK_H
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <hardware/hardware.h>
__BEGIN_DECLS
#define MEMTRACK_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1)
/**
* The id of this module
*/
#define MEMTRACK_HARDWARE_MODULE_ID "memtrack"
/*
* The Memory Tracker HAL is designed to return information about device-specific
* memory usage. The primary goal is to be able to track memory that is not
* trackable in any other way, for example texture memory that is allocated by
* a process, but not mapped in to that process' address space.
* A secondary goal is to be able to categorize memory used by a process into
* GL, graphics, etc. All memory sizes should be in real memory usage,
* accounting for stride, bit depth, rounding up to page size, etc.
*
* A process collecting memory statistics will call getMemory for each
* combination of pid and memory type. For each memory type that it recognizes
* the HAL should fill out an array of memtrack_record structures breaking
* down the statistics of that memory type as much as possible. For example,
* getMemory(<pid>, MEMTRACK_TYPE_GL) might return:
* { { 4096, ACCOUNTED | PRIVATE | SYSTEM },
* { 40960, UNACCOUNTED | PRIVATE | SYSTEM },
* { 8192, ACCOUNTED | PRIVATE | DEDICATED },
* { 8192, UNACCOUNTED | PRIVATE | DEDICATED } }
* If the HAL could not differentiate between SYSTEM and DEDICATED memory, it
* could return:
* { { 12288, ACCOUNTED | PRIVATE },
* { 49152, UNACCOUNTED | PRIVATE } }
*
* Memory should not overlap between types. For example, a graphics buffer
* that has been mapped into the GPU as a surface should show up when
* MEMTRACK_TYPE_GRAPHICS is requested, and not when MEMTRACK_TYPE_GL
* is requested.
*/
enum memtrack_type {
MEMTRACK_TYPE_OTHER = 0,
MEMTRACK_TYPE_GL = 1,
MEMTRACK_TYPE_GRAPHICS = 2,
MEMTRACK_TYPE_MULTIMEDIA = 3,
MEMTRACK_TYPE_CAMERA = 4,
MEMTRACK_NUM_TYPES,
};
struct memtrack_record {
size_t size_in_bytes;
unsigned int flags;
};
/**
* Flags to differentiate memory that can already be accounted for in
* /proc/<pid>/smaps,
* (Shared_Clean + Shared_Dirty + Private_Clean + Private_Dirty = Size).
* In general, memory mapped in to a userspace process is accounted unless
* it was mapped with remap_pfn_range.
* Exactly one of these should be set.
*/
#define MEMTRACK_FLAG_SMAPS_ACCOUNTED (1 << 1)
#define MEMTRACK_FLAG_SMAPS_UNACCOUNTED (1 << 2)
/**
* Flags to differentiate memory shared across multiple processes vs. memory
* used by a single process. Only zero or one of these may be set in a record.
* If none are set, record is assumed to count shared + private memory.
*/
#define MEMTRACK_FLAG_SHARED (1 << 3)
#define MEMTRACK_FLAG_SHARED_PSS (1 << 4) /* shared / num_procesess */
#define MEMTRACK_FLAG_PRIVATE (1 << 5)
/**
* Flags to differentiate memory taken from the kernel's allocation pool vs.
* memory that is dedicated to non-kernel allocations, for example a carveout
* or separate video memory. Only zero or one of these may be set in a record.
* If none are set, record is assumed to count system + dedicated memory.
*/
#define MEMTRACK_FLAG_SYSTEM (1 << 6)
#define MEMTRACK_FLAG_DEDICATED (1 << 7)
/**
* Flags to differentiate memory accessible by the CPU in non-secure mode vs.
* memory that is protected. Only zero or one of these may be set in a record.
* If none are set, record is assumed to count secure + nonsecure memory.
*/
#define MEMTRACK_FLAG_NONSECURE (1 << 8)
#define MEMTRACK_FLAG_SECURE (1 << 9)
/**
* Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
* and the fields of this data structure must begin with hw_module_t
* followed by module specific information.
*/
typedef struct memtrack_module {
struct hw_module_t common;
/**
* (*init)() performs memtrack management setup actions and is called
* once before any calls to getMemory().
* Returns 0 on success, -errno on error.
*/
int (*init)(const struct memtrack_module *module);
/**
* (*getMemory)() expects an array of record objects and populates up to
* *num_record structures with the sizes of memory plus associated flags for
* that memory. It also updates *num_records with the total number of
* records it could return if *num_records was large enough when passed in.
* Returning records with size 0 is expected, the number of records should
* not vary between calls to getMemory for the same memory type, even
* for different pids.
*
* The caller will often call getMemory for a type and pid with
* *num_records == 0 to determine how many records to allocate room for,
* this case should be a fast-path in the HAL, returning a constant and
* not querying any kernel files. If *num_records passed in is 0,
* then records may be NULL.
*
* This function must be thread-safe, it may get called from multiple
* threads at the same time.
*
* Returns 0 on success, -ENODEV if the type is not supported, -errno
* on other errors.
*/
int (*getMemory)(const struct memtrack_module *module,
pid_t pid,
int type,
struct memtrack_record *records,
size_t *num_records);
} memtrack_module_t;
__END_DECLS
#endif // ANDROID_INCLUDE_HARDWARE_MEMTRACK_H

View file

@ -32,6 +32,7 @@ __BEGIN_DECLS
#define SENSORS_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1) #define SENSORS_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1)
#define SENSORS_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION_2(0, 1, SENSORS_HEADER_VERSION) #define SENSORS_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION_2(0, 1, SENSORS_HEADER_VERSION)
#define SENSORS_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION_2(1, 0, SENSORS_HEADER_VERSION) #define SENSORS_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION_2(1, 0, SENSORS_HEADER_VERSION)
#define SENSORS_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, SENSORS_HEADER_VERSION)
/** /**
* The id of this module * The id of this module
@ -64,6 +65,15 @@ enum {
SENSORS_BATCH_WAKE_UPON_FIFO_FULL = 0x00000002 SENSORS_BATCH_WAKE_UPON_FIFO_FULL = 0x00000002
}; };
/*
* what field for meta_data_event_t
*/
enum {
/* a previous flush operation has completed */
META_DATA_FLUSH_COMPLETE = 1,
META_DATA_VERSION /* always last, leave auto-assigned */
};
/** /**
* Definition of the axis used by the sensor HAL API * Definition of the axis used by the sensor HAL API
* *
@ -132,8 +142,19 @@ enum {
* *
* Each sensor has a type which defines what this sensor measures and how * Each sensor has a type which defines what this sensor measures and how
* measures are reported. All types are defined below. * measures are reported. All types are defined below.
*
* Device manufacturers (OEMs) can define their own sensor types, for
* their private use by applications or services provided by them. Such
* sensor types are specific to an OEM and can't be exposed in the SDK.
* These types must start at SENSOR_TYPE_DEVICE_PRIVATE_BASE.
*/ */
/*
* Base for device manufacturers private sensor types.
* These sensor types can't be exposed in the SDK.
*/
#define SENSOR_TYPE_DEVICE_PRIVATE_BASE 0x10000
/* /*
* Sensor fusion and virtual sensors * Sensor fusion and virtual sensors
* *
@ -174,6 +195,40 @@ enum {
* *
*/ */
/*
* SENSOR_TYPE_META_DATA
* trigger-mode: n/a
* wake-up sensor: n/a
*
* NO SENSOR OF THAT TYPE MUST BE RETURNED (*get_sensors_list)()
*
* SENSOR_TYPE_META_DATA is a special token used to populate the
* sensors_meta_data_event structure. It doesn't correspond to a physical
* sensor. sensors_meta_data_event are special, they exist only inside
* the HAL and are generated spontaneously, as opposed to be related to
* a physical sensor.
*
* sensors_meta_data_event_t.version must be META_DATA_VERSION
* sensors_meta_data_event_t.sensor must be 0
* sensors_meta_data_event_t.type must be SENSOR_TYPE_META_DATA
* sensors_meta_data_event_t.reserved must be 0
* sensors_meta_data_event_t.timestamp must be 0
*
* The payload is a meta_data_event_t, where:
* meta_data_event_t.what can take the following values:
*
* META_DATA_FLUSH_COMPLETE
* This event indicates that a previous (*flush)() call has completed for the sensor
* handle specified in meta_data_event_t.sensor.
* see (*flush)() for more details
*
* All other values for meta_data_event_t.what are reserved and
* must not be used.
*
*/
#define SENSOR_TYPE_META_DATA (0)
/* /*
* SENSOR_TYPE_ACCELEROMETER * SENSOR_TYPE_ACCELEROMETER
* trigger-mode: continuous * trigger-mode: continuous
@ -451,6 +506,9 @@ enum {
* SENSOR_TYPE_MAGNETIC_FIELD must be present and both must return the * SENSOR_TYPE_MAGNETIC_FIELD must be present and both must return the
* same sensor_t::name and sensor_t::vendor. * same sensor_t::name and sensor_t::vendor.
* *
* Minimum filtering should be applied to this sensor. In particular, low pass
* filters should be avoided.
*
* See SENSOR_TYPE_MAGNETIC_FIELD for more information * See SENSOR_TYPE_MAGNETIC_FIELD for more information
*/ */
#define SENSOR_TYPE_MAGNETIC_FIELD_UNCALIBRATED (14) #define SENSOR_TYPE_MAGNETIC_FIELD_UNCALIBRATED (14)
@ -608,7 +666,7 @@ enum {
* *
* A sensor of this type returns the number of steps taken by the user since * A sensor of this type returns the number of steps taken by the user since
* the last reboot while activated. The value is returned as a uint64_t and is * the last reboot while activated. The value is returned as a uint64_t and is
* reset to zero only on a system reboot. * reset to zero only on a system / android reboot.
* *
* The timestamp of the event is set to the time when the first step * The timestamp of the event is set to the time when the first step
* for that event was taken. * for that event was taken.
@ -662,7 +720,9 @@ enum {
* of using a gyroscope. * of using a gyroscope.
* *
* This sensor must be based on a magnetometer. It cannot be implemented using * This sensor must be based on a magnetometer. It cannot be implemented using
* a gyroscope, and gyroscope input cannot be used by this sensor. * a gyroscope, and gyroscope input cannot be used by this sensor, as the
* goal of this sensor is to be low power.
* The accelerometer can be (and usually is) used.
* *
* Just like SENSOR_TYPE_ROTATION_VECTOR, this sensor reports an estimated * Just like SENSOR_TYPE_ROTATION_VECTOR, this sensor reports an estimated
* heading accuracy: * heading accuracy:
@ -740,6 +800,11 @@ typedef struct {
}; };
} uncalibrated_event_t; } uncalibrated_event_t;
typedef struct meta_data_event {
int32_t what;
int32_t sensor;
} meta_data_event_t;
/** /**
* Union of the various types of sensor data * Union of the various types of sensor data
* that can be returned. * that can be returned.
@ -761,48 +826,63 @@ typedef struct sensors_event_t {
int64_t timestamp; int64_t timestamp;
union { union {
float data[16]; union {
float data[16];
/* acceleration values are in meter per second per second (m/s^2) */ /* acceleration values are in meter per second per second (m/s^2) */
sensors_vec_t acceleration; sensors_vec_t acceleration;
/* magnetic vector values are in micro-Tesla (uT) */ /* magnetic vector values are in micro-Tesla (uT) */
sensors_vec_t magnetic; sensors_vec_t magnetic;
/* orientation values are in degrees */ /* orientation values are in degrees */
sensors_vec_t orientation; sensors_vec_t orientation;
/* gyroscope values are in rad/s */ /* gyroscope values are in rad/s */
sensors_vec_t gyro; sensors_vec_t gyro;
/* temperature is in degrees centigrade (Celsius) */ /* temperature is in degrees centigrade (Celsius) */
float temperature; float temperature;
/* distance in centimeters */ /* distance in centimeters */
float distance; float distance;
/* light in SI lux units */ /* light in SI lux units */
float light; float light;
/* pressure in hectopascal (hPa) */ /* pressure in hectopascal (hPa) */
float pressure; float pressure;
/* relative humidity in percent */ /* relative humidity in percent */
float relative_humidity; float relative_humidity;
/* step-counter */ /* uncalibrated gyroscope values are in rad/s */
uint64_t step_counter; uncalibrated_event_t uncalibrated_gyro;
/* uncalibrated gyroscope values are in rad/s */ /* uncalibrated magnetometer values are in micro-Teslas */
uncalibrated_event_t uncalibrated_gyro; uncalibrated_event_t uncalibrated_magnetic;
/* uncalibrated magnetometer values are in micro-Teslas */ /* this is a special event. see SENSOR_TYPE_META_DATA above.
uncalibrated_event_t uncalibrated_magnetic; * sensors_meta_data_event_t events are all reported with a type of
* SENSOR_TYPE_META_DATA. The handle is ignored and must be zero.
*/
meta_data_event_t meta_data;
};
union {
uint64_t data[8];
/* step-counter */
uint64_t step_counter;
} u64;
}; };
uint32_t reserved1[4]; uint32_t reserved1[4];
} sensors_event_t; } sensors_event_t;
/* see SENSOR_TYPE_META_DATA */
typedef sensors_event_t sensors_meta_data_event_t;
struct sensor_t; struct sensor_t;
@ -865,8 +945,21 @@ struct sensor_t {
*/ */
int32_t minDelay; int32_t minDelay;
/* number of events reserved for this sensor in the batch mode FIFO.
* If there is a dedicated FIFO for this sensor, then this is the
* size of this FIFO. If the FIFO is shared with other sensors,
* this is the size reserved for that sensor and it can be zero.
*/
uint32_t fifoReservedEventCount;
/* maximum number of events of this sensor that could be batched.
* This is especially relevant when the FIFO is shared between
* several sensors; this value is then set to the size of that FIFO.
*/
uint32_t fifoMaxEventCount;
/* reserved fields, must be zero */ /* reserved fields, must be zero */
void* reserved[8]; void* reserved[6];
}; };
@ -903,6 +996,10 @@ typedef struct sensors_poll_device_1 {
* handle is the handle of the sensor to change. * handle is the handle of the sensor to change.
* enabled set to 1 to enable, or 0 to disable the sensor. * enabled set to 1 to enable, or 0 to disable the sensor.
* *
* if enabled is set to 1, the sensor is activated even if
* setDelay() wasn't called before. In this case, a default rate
* should be used.
*
* unless otherwise noted in the sensor types definitions, an * unless otherwise noted in the sensor types definitions, an
* activated sensor never prevents the SoC to go into suspend * activated sensor never prevents the SoC to go into suspend
* mode; that is, the HAL shall not hold a partial wake-lock on * mode; that is, the HAL shall not hold a partial wake-lock on
@ -912,10 +1009,10 @@ typedef struct sensors_poll_device_1 {
* receiving an event and they must still accept to be deactivated * receiving an event and they must still accept to be deactivated
* through a call to activate(..., ..., 0). * through a call to activate(..., ..., 0).
* *
* if "enabled" is true and the sensor is already activated, this * if "enabled" is 1 and the sensor is already activated, this
* function is a no-op and succeeds. * function is a no-op and succeeds.
* *
* if "enabled" is false and the sensor is already de-activated, * if "enabled" is 0 and the sensor is already de-activated,
* this function is a no-op and succeeds. * this function is a no-op and succeeds.
* *
* return 0 on success, negative errno code otherwise * return 0 on success, negative errno code otherwise
@ -939,6 +1036,9 @@ typedef struct sensors_poll_device_1 {
* sensor_t::minDelay unless sensor_t::minDelay is 0, in which * sensor_t::minDelay unless sensor_t::minDelay is 0, in which
* case it is clamped to >= 1ms. * case it is clamped to >= 1ms.
* *
* setDelay will not be called when the sensor is in batching mode.
* In this case, batch() will be called with the new period.
*
* @return 0 if successful, < 0 on error * @return 0 if successful, < 0 on error
*/ */
int (*setDelay)(struct sensors_poll_device_t *dev, int (*setDelay)(struct sensors_poll_device_t *dev,
@ -1068,19 +1168,30 @@ typedef struct sensors_poll_device_1 {
* if a batch call with SENSORS_BATCH_DRY_RUN is successful, * if a batch call with SENSORS_BATCH_DRY_RUN is successful,
* the same call without SENSORS_BATCH_DRY_RUN must succeed as well). * the same call without SENSORS_BATCH_DRY_RUN must succeed as well).
* *
* If successful, 0 is returned. * When timeout is not 0:
* If the specified sensor doesn't support batch mode, -EINVAL is returned. * If successful, 0 is returned.
* If the specified sensor's trigger-mode is one-shot, -EINVAL is returned. * If the specified sensor doesn't support batch mode, return -EINVAL.
* If WAKE_UPON_FIFO_FULL is specified and the specified sensor's internal * If the specified sensor's trigger-mode is one-shot, return -EINVAL.
* FIFO is too small to store at least 10 seconds worth of data at the * If WAKE_UPON_FIFO_FULL is specified and the specified sensor's internal
* given rate, -EINVAL is returned. Note that as stated above, this has to * FIFO is too small to store at least 10 seconds worth of data at the
* be determined at compile time, and not based on the state of the system. * given rate, -EINVAL is returned. Note that as stated above, this has to
* If some other constraints above cannot be satisfied, -EINVAL is returned. * be determined at compile time, and not based on the state of the
* system.
* If some other constraints above cannot be satisfied, return -EINVAL.
* *
* Note: the timeout parameter, when > 0, has no impact on whether this * Note: the timeout parameter, when > 0, has no impact on whether this
* function succeeds or fails. * function succeeds or fails.
* *
* If timeout is set to 0, this function must succeed. * When timeout is 0:
* The caller will never set the wake_upon_fifo_full flag.
* The function must succeed, and batch mode must be deactivated.
*
* Independently of whether DRY_RUN is specified, When the call to batch()
* fails, no state should be changed. In particular, a failed call to
* batch() should not change the rate of the sensor. Example:
* setDelay(..., 10ms)
* batch(..., 20ms, ...) fails
* rate should stay 10ms.
* *
* *
* IMPLEMENTATION NOTES: * IMPLEMENTATION NOTES:
@ -1150,6 +1261,35 @@ typedef struct sensors_poll_device_1 {
int (*batch)(struct sensors_poll_device_1* dev, int (*batch)(struct sensors_poll_device_1* dev,
int handle, int flags, int64_t period_ns, int64_t timeout); int handle, int flags, int64_t period_ns, int64_t timeout);
/*
* Flush adds a META_DATA_FLUSH_COMPLETE event (sensors_event_meta_data_t)
* to the end of the "batch mode" FIFO for the specified sensor and flushes
* the FIFO; those events are delivered as usual (i.e.: as if the batch
* timeout had expired) and removed from the FIFO.
*
* See the META_DATA_FLUSH_COMPLETE section for details about the
* META_DATA_FLUSH_COMPLETE event.
*
* The flush happens asynchronously (i.e.: this function must return
* immediately).
*
* If the implementation uses a single FIFO for several sensors, that
* FIFO is flushed and the META_DATA_FLUSH_COMPLETE event is added only
* for the specified sensor.
*
* If the specified sensor wasn't in batch mode, flush succeeds and
* promptly sends a META_DATA_FLUSH_COMPLETE event for that sensor.
*
* If the FIFO was empty at the time of the call, flush returns
* 0 (success) and promptly sends a META_DATA_FLUSH_COMPLETE event
* for that sensor.
*
* If the specified sensor wasn't enabled, flush returns -EINVAL.
*
* return 0 on success, negative errno code otherwise.
*/
int (*flush)(struct sensors_poll_device_1* dev, int handle);
void (*reserved_procs[8])(void); void (*reserved_procs[8])(void);
} sensors_poll_device_1_t; } sensors_poll_device_1_t;

View file

@ -1,2 +1,3 @@
hardware_modules := gralloc hwcomposer audio nfc nfc-nci local_time power usbaudio audio_remote_submix camera hardware_modules := gralloc hwcomposer audio nfc nfc-nci local_time \
power usbaudio audio_remote_submix camera consumerir
include $(call all-named-subdir-makefiles,$(hardware_modules)) include $(call all-named-subdir-makefiles,$(hardware_modules))

View file

@ -99,7 +99,8 @@ static audio_io_handle_t ap_get_output(struct audio_policy *pol,
uint32_t sampling_rate, uint32_t sampling_rate,
audio_format_t format, audio_format_t format,
audio_channel_mask_t channelMask, audio_channel_mask_t channelMask,
audio_output_flags_t flags) audio_output_flags_t flags,
const audio_offload_info_t *info)
{ {
return 0; return 0;
} }
@ -229,6 +230,12 @@ static int ap_dump(const struct audio_policy *pol, int fd)
return -ENOSYS; return -ENOSYS;
} }
static bool ap_is_offload_supported(const struct audio_policy *pol,
const audio_offload_info_t *info)
{
return false;
}
static int create_default_ap(const struct audio_policy_device *device, static int create_default_ap(const struct audio_policy_device *device,
struct audio_policy_service_ops *aps_ops, struct audio_policy_service_ops *aps_ops,
void *service, void *service,
@ -278,6 +285,8 @@ static int create_default_ap(const struct audio_policy_device *device,
dap->policy.is_stream_active = ap_is_stream_active; dap->policy.is_stream_active = ap_is_stream_active;
dap->policy.dump = ap_dump; dap->policy.dump = ap_dump;
dap->policy.is_offload_supported = ap_is_offload_supported;
dap->service = service; dap->service = service;
dap->aps_ops = aps_ops; dap->aps_ops = aps_ops;

8
modules/audio_remote_submix/audio_hw.cpp Executable file → Normal file
View file

@ -271,7 +271,7 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
return 0; return 0;
} else { } else {
// write() returned UNDERRUN or WOULD_BLOCK, retry // write() returned UNDERRUN or WOULD_BLOCK, retry
ALOGE("out_write() write to pipe returned unexpected %16lx", written_frames); ALOGE("out_write() write to pipe returned unexpected %d", written_frames);
written_frames = sink->write(buffer, frames); written_frames = sink->write(buffer, frames);
} }
} }
@ -281,7 +281,7 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
pthread_mutex_unlock(&out->dev->lock); pthread_mutex_unlock(&out->dev->lock);
if (written_frames < 0) { if (written_frames < 0) {
ALOGE("out_write() failed writing to pipe with %16lx", written_frames); ALOGE("out_write() failed writing to pipe with %d", written_frames);
return 0; return 0;
} else { } else {
ALOGV("out_write() wrote %lu bytes)", written_frames * frame_size); ALOGV("out_write() wrote %lu bytes)", written_frames * frame_size);
@ -549,7 +549,7 @@ static int adev_open_output_stream(struct audio_hw_device *dev,
config->channel_mask = AUDIO_CHANNEL_OUT_STEREO; config->channel_mask = AUDIO_CHANNEL_OUT_STEREO;
rsxadev->config.channel_mask = config->channel_mask; rsxadev->config.channel_mask = config->channel_mask;
if ((config->sample_rate != 48000) || (config->sample_rate != 44100)) { if ((config->sample_rate != 48000) && (config->sample_rate != 44100)) {
config->sample_rate = DEFAULT_RATE_HZ; config->sample_rate = DEFAULT_RATE_HZ;
} }
rsxadev->config.rate = config->sample_rate; rsxadev->config.rate = config->sample_rate;
@ -708,7 +708,7 @@ static int adev_open_input_stream(struct audio_hw_device *dev,
config->channel_mask = AUDIO_CHANNEL_IN_STEREO; config->channel_mask = AUDIO_CHANNEL_IN_STEREO;
rsxadev->config.channel_mask = config->channel_mask; rsxadev->config.channel_mask = config->channel_mask;
if ((config->sample_rate != 48000) || (config->sample_rate != 44100)) { if ((config->sample_rate != 48000) && (config->sample_rate != 44100)) {
config->sample_rate = DEFAULT_RATE_HZ; config->sample_rate = DEFAULT_RATE_HZ;
} }
rsxadev->config.rate = config->sample_rate; rsxadev->config.rate = config->sample_rate;

View file

@ -26,11 +26,14 @@ LOCAL_C_INCLUDES += \
LOCAL_SRC_FILES := \ LOCAL_SRC_FILES := \
CameraHAL.cpp \ CameraHAL.cpp \
Camera.cpp \ Camera.cpp \
Metadata.cpp \
Stream.cpp \
LOCAL_SHARED_LIBRARIES := \ LOCAL_SHARED_LIBRARIES := \
libcamera_metadata \ libcamera_metadata \
libcutils \ libcutils \
liblog \ liblog \
libsync \
LOCAL_CFLAGS += -Wall -Wextra -fvisibility=hidden LOCAL_CFLAGS += -Wall -Wextra -fvisibility=hidden

View file

@ -17,7 +17,12 @@
#include <cstdlib> #include <cstdlib>
#include <pthread.h> #include <pthread.h>
#include <hardware/camera3.h> #include <hardware/camera3.h>
#include <sync/sync.h>
#include <system/camera_metadata.h>
#include <system/graphics.h>
#include "CameraHAL.h" #include "CameraHAL.h"
#include "Metadata.h"
#include "Stream.h"
//#define LOG_NDEBUG 0 //#define LOG_NDEBUG 0
#define LOG_TAG "Camera" #define LOG_TAG "Camera"
@ -25,9 +30,14 @@
#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL) #define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL)
#include <cutils/trace.h> #include <cutils/trace.h>
#include "ScopedTrace.h"
#include "Camera.h" #include "Camera.h"
#define CAMERA_SYNC_TIMEOUT 5000 // in msecs
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
namespace default_camera_hal { namespace default_camera_hal {
extern "C" { extern "C" {
@ -42,14 +52,19 @@ static int close_device(hw_device_t* dev)
Camera::Camera(int id) Camera::Camera(int id)
: mId(id), : mId(id),
mStaticInfo(NULL),
mBusy(false), mBusy(false),
mCallbackOps(NULL) mCallbackOps(NULL),
mStreams(NULL),
mNumStreams(0),
mSettings(NULL)
{ {
pthread_mutex_init(&mMutex, pthread_mutex_init(&mMutex, NULL);
NULL); // No pthread mutex attributes. pthread_mutex_init(&mStaticInfoMutex, NULL);
memset(&mDevice, 0, sizeof(mDevice)); memset(&mDevice, 0, sizeof(mDevice));
mDevice.common.tag = HARDWARE_DEVICE_TAG; mDevice.common.tag = HARDWARE_DEVICE_TAG;
mDevice.common.version = CAMERA_DEVICE_API_VERSION_3_0;
mDevice.common.close = close_device; mDevice.common.close = close_device;
mDevice.ops = const_cast<camera3_device_ops_t*>(&sOps); mDevice.ops = const_cast<camera3_device_ops_t*>(&sOps);
mDevice.priv = this; mDevice.priv = this;
@ -57,16 +72,17 @@ Camera::Camera(int id)
Camera::~Camera() Camera::~Camera()
{ {
pthread_mutex_destroy(&mMutex);
pthread_mutex_destroy(&mStaticInfoMutex);
} }
int Camera::open(const hw_module_t *module, hw_device_t **device) int Camera::open(const hw_module_t *module, hw_device_t **device)
{ {
ALOGI("%s:%d: Opening camera device", __func__, mId); ALOGI("%s:%d: Opening camera device", __func__, mId);
ATRACE_BEGIN(__func__); CAMTRACE_CALL();
pthread_mutex_lock(&mMutex); pthread_mutex_lock(&mMutex);
if (mBusy) { if (mBusy) {
pthread_mutex_unlock(&mMutex); pthread_mutex_unlock(&mMutex);
ATRACE_END();
ALOGE("%s:%d: Error! Camera device already opened", __func__, mId); ALOGE("%s:%d: Error! Camera device already opened", __func__, mId);
return -EBUSY; return -EBUSY;
} }
@ -77,18 +93,33 @@ int Camera::open(const hw_module_t *module, hw_device_t **device)
*device = &mDevice.common; *device = &mDevice.common;
pthread_mutex_unlock(&mMutex); pthread_mutex_unlock(&mMutex);
ATRACE_END(); return 0;
}
int Camera::getInfo(struct camera_info *info)
{
info->facing = CAMERA_FACING_FRONT;
info->orientation = 0;
info->device_version = mDevice.common.version;
pthread_mutex_lock(&mStaticInfoMutex);
if (mStaticInfo == NULL) {
mStaticInfo = initStaticInfo();
}
pthread_mutex_unlock(&mStaticInfoMutex);
info->static_camera_characteristics = mStaticInfo;
return 0; return 0;
} }
int Camera::close() int Camera::close()
{ {
ALOGI("%s:%d: Closing camera device", __func__, mId); ALOGI("%s:%d: Closing camera device", __func__, mId);
ATRACE_BEGIN(__func__); CAMTRACE_CALL();
pthread_mutex_lock(&mMutex); pthread_mutex_lock(&mMutex);
if (!mBusy) { if (!mBusy) {
pthread_mutex_unlock(&mMutex); pthread_mutex_unlock(&mMutex);
ATRACE_END();
ALOGE("%s:%d: Error! Camera device not open", __func__, mId); ALOGE("%s:%d: Error! Camera device not open", __func__, mId);
return -EINVAL; return -EINVAL;
} }
@ -97,7 +128,6 @@ int Camera::close()
mBusy = false; mBusy = false;
pthread_mutex_unlock(&mMutex); pthread_mutex_unlock(&mMutex);
ATRACE_END();
return 0; return 0;
} }
@ -105,44 +135,500 @@ int Camera::initialize(const camera3_callback_ops_t *callback_ops)
{ {
ALOGV("%s:%d: callback_ops=%p", __func__, mId, callback_ops); ALOGV("%s:%d: callback_ops=%p", __func__, mId, callback_ops);
mCallbackOps = callback_ops; mCallbackOps = callback_ops;
// Create standard settings templates
// 0 is invalid as template
mTemplates[0] = NULL;
// CAMERA3_TEMPLATE_PREVIEW = 1
mTemplates[1] = new Metadata(ANDROID_CONTROL_MODE_OFF,
ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW);
// CAMERA3_TEMPLATE_STILL_CAPTURE = 2
mTemplates[2] = new Metadata(ANDROID_CONTROL_MODE_OFF,
ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
// CAMERA3_TEMPLATE_VIDEO_RECORD = 3
mTemplates[3] = new Metadata(ANDROID_CONTROL_MODE_OFF,
ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD);
// CAMERA3_TEMPLATE_VIDEO_SNAPSHOT = 4
mTemplates[4] = new Metadata(ANDROID_CONTROL_MODE_OFF,
ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT);
// CAMERA3_TEMPLATE_STILL_ZERO_SHUTTER_LAG = 5
mTemplates[5] = new Metadata(ANDROID_CONTROL_MODE_OFF,
ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG);
// Pre-generate metadata structures
for (int i = 1; i < CAMERA3_TEMPLATE_COUNT; i++) {
mTemplates[i]->generate();
}
// TODO: create vendor templates
return 0; return 0;
} }
int Camera::configureStreams(camera3_stream_configuration_t *stream_list) camera_metadata_t *Camera::initStaticInfo()
{ {
ALOGV("%s:%d: stream_list=%p", __func__, mId, stream_list); /*
// TODO: validate input, create internal stream representations * Setup static camera info. This will have to customized per camera
* device.
*/
Metadata m;
/* android.control */
int32_t android_control_ae_available_target_fps_ranges[] = {30, 30};
m.addInt32(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
ARRAY_SIZE(android_control_ae_available_target_fps_ranges),
android_control_ae_available_target_fps_ranges);
int32_t android_control_ae_compensation_range[] = {-4, 4};
m.addInt32(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
ARRAY_SIZE(android_control_ae_compensation_range),
android_control_ae_compensation_range);
camera_metadata_rational_t android_control_ae_compensation_step[] = {{2,1}};
m.addRational(ANDROID_CONTROL_AE_COMPENSATION_STEP,
ARRAY_SIZE(android_control_ae_compensation_step),
android_control_ae_compensation_step);
int32_t android_control_max_regions[] = {1};
m.addInt32(ANDROID_CONTROL_MAX_REGIONS,
ARRAY_SIZE(android_control_max_regions),
android_control_max_regions);
/* android.jpeg */
int32_t android_jpeg_available_thumbnail_sizes[] = {0, 0, 128, 96};
m.addInt32(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
ARRAY_SIZE(android_jpeg_available_thumbnail_sizes),
android_jpeg_available_thumbnail_sizes);
/* android.lens */
float android_lens_info_available_focal_lengths[] = {1.0};
m.addFloat(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
ARRAY_SIZE(android_lens_info_available_focal_lengths),
android_lens_info_available_focal_lengths);
/* android.request */
int32_t android_request_max_num_output_streams[] = {0, 3, 1};
m.addInt32(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
ARRAY_SIZE(android_request_max_num_output_streams),
android_request_max_num_output_streams);
/* android.scaler */
int32_t android_scaler_available_formats[] = {
HAL_PIXEL_FORMAT_RAW_SENSOR,
HAL_PIXEL_FORMAT_BLOB,
HAL_PIXEL_FORMAT_RGBA_8888,
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
// These are handled by YCbCr_420_888
// HAL_PIXEL_FORMAT_YV12,
// HAL_PIXEL_FORMAT_YCrCb_420_SP,
HAL_PIXEL_FORMAT_YCbCr_420_888};
m.addInt32(ANDROID_SCALER_AVAILABLE_FORMATS,
ARRAY_SIZE(android_scaler_available_formats),
android_scaler_available_formats);
int64_t android_scaler_available_jpeg_min_durations[] = {1};
m.addInt64(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS,
ARRAY_SIZE(android_scaler_available_jpeg_min_durations),
android_scaler_available_jpeg_min_durations);
int32_t android_scaler_available_jpeg_sizes[] = {640, 480};
m.addInt32(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
ARRAY_SIZE(android_scaler_available_jpeg_sizes),
android_scaler_available_jpeg_sizes);
float android_scaler_available_max_digital_zoom[] = {1};
m.addFloat(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
ARRAY_SIZE(android_scaler_available_max_digital_zoom),
android_scaler_available_max_digital_zoom);
int64_t android_scaler_available_processed_min_durations[] = {1};
m.addInt64(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS,
ARRAY_SIZE(android_scaler_available_processed_min_durations),
android_scaler_available_processed_min_durations);
int32_t android_scaler_available_processed_sizes[] = {640, 480};
m.addInt32(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
ARRAY_SIZE(android_scaler_available_processed_sizes),
android_scaler_available_processed_sizes);
int64_t android_scaler_available_raw_min_durations[] = {1};
m.addInt64(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
ARRAY_SIZE(android_scaler_available_raw_min_durations),
android_scaler_available_raw_min_durations);
int32_t android_scaler_available_raw_sizes[] = {640, 480};
m.addInt32(ANDROID_SCALER_AVAILABLE_RAW_SIZES,
ARRAY_SIZE(android_scaler_available_raw_sizes),
android_scaler_available_raw_sizes);
/* android.sensor */
int32_t android_sensor_info_active_array_size[] = {0, 0, 640, 480};
m.addInt32(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
ARRAY_SIZE(android_sensor_info_active_array_size),
android_sensor_info_active_array_size);
int32_t android_sensor_info_sensitivity_range[] =
{100, 1600};
m.addInt32(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
ARRAY_SIZE(android_sensor_info_sensitivity_range),
android_sensor_info_sensitivity_range);
int64_t android_sensor_info_max_frame_duration[] = {30000000000};
m.addInt64(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
ARRAY_SIZE(android_sensor_info_max_frame_duration),
android_sensor_info_max_frame_duration);
float android_sensor_info_physical_size[] = {3.2, 2.4};
m.addFloat(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
ARRAY_SIZE(android_sensor_info_physical_size),
android_sensor_info_physical_size);
int32_t android_sensor_info_pixel_array_size[] = {640, 480};
m.addInt32(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
ARRAY_SIZE(android_sensor_info_pixel_array_size),
android_sensor_info_pixel_array_size);
int32_t android_sensor_orientation[] = {0};
m.addInt32(ANDROID_SENSOR_ORIENTATION,
ARRAY_SIZE(android_sensor_orientation),
android_sensor_orientation);
/* End of static camera characteristics */
return clone_camera_metadata(m.generate());
}
int Camera::configureStreams(camera3_stream_configuration_t *stream_config)
{
camera3_stream_t *astream;
Stream **newStreams = NULL;
CAMTRACE_CALL();
ALOGV("%s:%d: stream_config=%p", __func__, mId, stream_config);
if (stream_config == NULL) {
ALOGE("%s:%d: NULL stream configuration array", __func__, mId);
return -EINVAL;
}
if (stream_config->num_streams == 0) {
ALOGE("%s:%d: Empty stream configuration array", __func__, mId);
return -EINVAL;
}
// Create new stream array
newStreams = new Stream*[stream_config->num_streams];
ALOGV("%s:%d: Number of Streams: %d", __func__, mId,
stream_config->num_streams);
pthread_mutex_lock(&mMutex);
// Mark all current streams unused for now
for (int i = 0; i < mNumStreams; i++)
mStreams[i]->mReuse = false;
// Fill new stream array with reused streams and new streams
for (unsigned int i = 0; i < stream_config->num_streams; i++) {
astream = stream_config->streams[i];
if (astream->max_buffers > 0) {
ALOGV("%s:%d: Reusing stream %d", __func__, mId, i);
newStreams[i] = reuseStream(astream);
} else {
ALOGV("%s:%d: Creating new stream %d", __func__, mId, i);
newStreams[i] = new Stream(mId, astream);
}
if (newStreams[i] == NULL) {
ALOGE("%s:%d: Error processing stream %d", __func__, mId, i);
goto err_out;
}
astream->priv = newStreams[i];
}
// Verify the set of streams in aggregate
if (!isValidStreamSet(newStreams, stream_config->num_streams)) {
ALOGE("%s:%d: Invalid stream set", __func__, mId);
goto err_out;
}
// Set up all streams (calculate usage/max_buffers for each)
setupStreams(newStreams, stream_config->num_streams);
// Destroy all old streams and replace stream array with new one
destroyStreams(mStreams, mNumStreams);
mStreams = newStreams;
mNumStreams = stream_config->num_streams;
// Clear out last seen settings metadata
setSettings(NULL);
pthread_mutex_unlock(&mMutex);
return 0; return 0;
err_out:
// Clean up temporary streams, preserve existing mStreams/mNumStreams
destroyStreams(newStreams, stream_config->num_streams);
pthread_mutex_unlock(&mMutex);
return -EINVAL;
}
void Camera::destroyStreams(Stream **streams, int count)
{
if (streams == NULL)
return;
for (int i = 0; i < count; i++) {
// Only destroy streams that weren't reused
if (streams[i] != NULL && !streams[i]->mReuse)
delete streams[i];
}
delete [] streams;
}
Stream *Camera::reuseStream(camera3_stream_t *astream)
{
Stream *priv = reinterpret_cast<Stream*>(astream->priv);
// Verify the re-used stream's parameters match
if (!priv->isValidReuseStream(mId, astream)) {
ALOGE("%s:%d: Mismatched parameter in reused stream", __func__, mId);
return NULL;
}
// Mark stream to be reused
priv->mReuse = true;
return priv;
}
bool Camera::isValidStreamSet(Stream **streams, int count)
{
int inputs = 0;
int outputs = 0;
if (streams == NULL) {
ALOGE("%s:%d: NULL stream configuration streams", __func__, mId);
return false;
}
if (count == 0) {
ALOGE("%s:%d: Zero count stream configuration streams", __func__, mId);
return false;
}
// Validate there is at most one input stream and at least one output stream
for (int i = 0; i < count; i++) {
// A stream may be both input and output (bidirectional)
if (streams[i]->isInputType())
inputs++;
if (streams[i]->isOutputType())
outputs++;
}
ALOGV("%s:%d: Configuring %d output streams and %d input streams",
__func__, mId, outputs, inputs);
if (outputs < 1) {
ALOGE("%s:%d: Stream config must have >= 1 output", __func__, mId);
return false;
}
if (inputs > 1) {
ALOGE("%s:%d: Stream config must have <= 1 input", __func__, mId);
return false;
}
// TODO: check for correct number of Bayer/YUV/JPEG/Encoder streams
return true;
}
void Camera::setupStreams(Stream **streams, int count)
{
/*
* This is where the HAL has to decide internally how to handle all of the
* streams, and then produce usage and max_buffer values for each stream.
* Note, the stream array has been checked before this point for ALL invalid
* conditions, so it must find a successful configuration for this stream
* array. The HAL may not return an error from this point.
*
* In this demo HAL, we just set all streams to be the same dummy values;
* real implementations will want to avoid USAGE_SW_{READ|WRITE}_OFTEN.
*/
for (int i = 0; i < count; i++) {
uint32_t usage = 0;
if (streams[i]->isOutputType())
usage |= GRALLOC_USAGE_SW_WRITE_OFTEN |
GRALLOC_USAGE_HW_CAMERA_WRITE;
if (streams[i]->isInputType())
usage |= GRALLOC_USAGE_SW_READ_OFTEN |
GRALLOC_USAGE_HW_CAMERA_READ;
streams[i]->setUsage(usage);
streams[i]->setMaxBuffers(1);
}
} }
int Camera::registerStreamBuffers(const camera3_stream_buffer_set_t *buf_set) int Camera::registerStreamBuffers(const camera3_stream_buffer_set_t *buf_set)
{ {
ALOGV("%s:%d: buffer_set=%p", __func__, mId, buf_set); ALOGV("%s:%d: buffer_set=%p", __func__, mId, buf_set);
// TODO: register buffers with hardware if (buf_set == NULL) {
return 0; ALOGE("%s:%d: NULL buffer set", __func__, mId);
return -EINVAL;
}
if (buf_set->stream == NULL) {
ALOGE("%s:%d: NULL stream handle", __func__, mId);
return -EINVAL;
}
Stream *stream = reinterpret_cast<Stream*>(buf_set->stream->priv);
return stream->registerBuffers(buf_set);
} }
const camera_metadata_t* Camera::constructDefaultRequestSettings(int type) const camera_metadata_t* Camera::constructDefaultRequestSettings(int type)
{ {
ALOGV("%s:%d: type=%d", __func__, mId, type); ALOGV("%s:%d: type=%d", __func__, mId, type);
// TODO: return statically built default request
return NULL; if (type < 1 || type >= CAMERA3_TEMPLATE_COUNT) {
ALOGE("%s:%d: Invalid template request type: %d", __func__, mId, type);
return NULL;
}
return mTemplates[type]->generate();
} }
int Camera::processCaptureRequest(camera3_capture_request_t *request) int Camera::processCaptureRequest(camera3_capture_request_t *request)
{ {
camera3_capture_result result;
ALOGV("%s:%d: request=%p", __func__, mId, request); ALOGV("%s:%d: request=%p", __func__, mId, request);
ATRACE_BEGIN(__func__); CAMTRACE_CALL();
if (request == NULL) { if (request == NULL) {
ALOGE("%s:%d: NULL request recieved", __func__, mId); ALOGE("%s:%d: NULL request recieved", __func__, mId);
ATRACE_END();
return -EINVAL; return -EINVAL;
} }
// TODO: verify request; submit request to hardware ALOGV("%s:%d: Request Frame:%d Settings:%p", __func__, mId,
ATRACE_END(); request->frame_number, request->settings);
// NULL indicates use last settings
if (request->settings == NULL) {
if (mSettings == NULL) {
ALOGE("%s:%d: NULL settings without previous set Frame:%d Req:%p",
__func__, mId, request->frame_number, request);
return -EINVAL;
}
} else {
setSettings(request->settings);
}
if (request->input_buffer != NULL) {
ALOGV("%s:%d: Reprocessing input buffer %p", __func__, mId,
request->input_buffer);
if (!isValidReprocessSettings(request->settings)) {
ALOGE("%s:%d: Invalid settings for reprocess request: %p",
__func__, mId, request->settings);
return -EINVAL;
}
} else {
ALOGV("%s:%d: Capturing new frame.", __func__, mId);
if (!isValidCaptureSettings(request->settings)) {
ALOGE("%s:%d: Invalid settings for capture request: %p",
__func__, mId, request->settings);
return -EINVAL;
}
}
if (request->num_output_buffers <= 0) {
ALOGE("%s:%d: Invalid number of output buffers: %d", __func__, mId,
request->num_output_buffers);
return -EINVAL;
}
result.num_output_buffers = request->num_output_buffers;
result.output_buffers = new camera3_stream_buffer_t[result.num_output_buffers];
for (unsigned int i = 0; i < request->num_output_buffers; i++) {
int res = processCaptureBuffer(&request->output_buffers[i],
const_cast<camera3_stream_buffer_t*>(&result.output_buffers[i]));
if (res)
goto err_out;
}
result.frame_number = request->frame_number;
// TODO: return actual captured/reprocessed settings
result.result = request->settings;
// TODO: asynchronously return results
notifyShutter(request->frame_number, 0);
mCallbackOps->process_capture_result(mCallbackOps, &result);
return 0; return 0;
err_out:
delete [] result.output_buffers;
// TODO: this should probably be a total device failure; transient for now
return -EINVAL;
}
void Camera::setSettings(const camera_metadata_t *new_settings)
{
if (mSettings != NULL) {
free_camera_metadata(mSettings);
mSettings = NULL;
}
if (new_settings != NULL)
mSettings = clone_camera_metadata(new_settings);
}
bool Camera::isValidCaptureSettings(const camera_metadata_t* /*settings*/)
{
// TODO: reject settings that cannot be captured
return true;
}
bool Camera::isValidReprocessSettings(const camera_metadata_t* /*settings*/)
{
// TODO: reject settings that cannot be reprocessed
// input buffers unimplemented, use this to reject reprocessing requests
ALOGE("%s:%d: Input buffer reprocessing not implemented", __func__, mId);
return false;
}
int Camera::processCaptureBuffer(const camera3_stream_buffer_t *in,
camera3_stream_buffer_t *out)
{
if (in->acquire_fence != -1) {
int res = sync_wait(in->acquire_fence, CAMERA_SYNC_TIMEOUT);
if (res == -ETIME) {
ALOGE("%s:%d: Timeout waiting on buffer acquire fence",
__func__, mId);
return res;
} else if (res) {
ALOGE("%s:%d: Error waiting on buffer acquire fence: %s(%d)",
__func__, mId, strerror(-res), res);
return res;
}
}
out->stream = in->stream;
out->buffer = in->buffer;
out->status = CAMERA3_BUFFER_STATUS_OK;
// TODO: use driver-backed release fences
out->acquire_fence = -1;
out->release_fence = -1;
// TODO: lock and software-paint buffer
return 0;
}
void Camera::notifyShutter(uint32_t frame_number, uint64_t timestamp)
{
int res;
struct timespec ts;
// If timestamp is 0, get timestamp from right now instead
if (timestamp == 0) {
ALOGW("%s:%d: No timestamp provided, using CLOCK_BOOTTIME",
__func__, mId);
res = clock_gettime(CLOCK_BOOTTIME, &ts);
if (res == 0) {
timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
} else {
ALOGE("%s:%d: No timestamp and failed to get CLOCK_BOOTTIME %s(%d)",
__func__, mId, strerror(errno), errno);
}
}
camera3_notify_msg_t m;
memset(&m, 0, sizeof(m));
m.type = CAMERA3_MSG_SHUTTER;
m.message.shutter.frame_number = frame_number;
m.message.shutter.timestamp = timestamp;
mCallbackOps->notify(mCallbackOps, &m);
} }
void Camera::getMetadataVendorTagOps(vendor_tag_query_ops_t *ops) void Camera::getMetadataVendorTagOps(vendor_tag_query_ops_t *ops)
@ -153,7 +639,7 @@ void Camera::getMetadataVendorTagOps(vendor_tag_query_ops_t *ops)
void Camera::dump(int fd) void Camera::dump(int fd)
{ {
ALOGV("%s:%d: Dumping to fd %d", fd); ALOGV("%s:%d: Dumping to fd %d", __func__, mId, fd);
// TODO: dprintf all relevant state to fd // TODO: dprintf all relevant state to fd
} }

View file

@ -20,6 +20,8 @@
#include <pthread.h> #include <pthread.h>
#include <hardware/hardware.h> #include <hardware/hardware.h>
#include <hardware/camera3.h> #include <hardware/camera3.h>
#include "Metadata.h"
#include "Stream.h"
namespace default_camera_hal { namespace default_camera_hal {
// Camera represents a physical camera on a device. // Camera represents a physical camera on a device.
@ -35,6 +37,7 @@ class Camera {
// Common Camera Device Operations (see <hardware/camera_common.h>) // Common Camera Device Operations (see <hardware/camera_common.h>)
int open(const hw_module_t *module, hw_device_t **device); int open(const hw_module_t *module, hw_device_t **device);
int getInfo(struct camera_info *info);
int close(); int close();
// Camera v3 Device Operations (see <hardware/camera3.h>) // Camera v3 Device Operations (see <hardware/camera3.h>)
@ -50,8 +53,34 @@ class Camera {
camera3_device_t mDevice; camera3_device_t mDevice;
private: private:
// Separate initialization method for static metadata
camera_metadata_t *initStaticInfo();
// Reuse a stream already created by this device
Stream *reuseStream(camera3_stream_t *astream);
// Destroy all streams in a stream array, and the array itself
void destroyStreams(Stream **array, int count);
// Verify a set of streams is valid in aggregate
bool isValidStreamSet(Stream **array, int count);
// Calculate usage and max_bufs of each stream
void setupStreams(Stream **array, int count);
// Copy new settings for re-use and clean up old settings.
void setSettings(const camera_metadata_t *new_settings);
// Verify settings are valid for a capture
bool isValidCaptureSettings(const camera_metadata_t *settings);
// Verify settings are valid for reprocessing an input buffer
bool isValidReprocessSettings(const camera_metadata_t *settings);
// Process an output buffer
int processCaptureBuffer(const camera3_stream_buffer_t *in,
camera3_stream_buffer_t *out);
// Send a shutter notify message with start of exposure time
void notifyShutter(uint32_t frame_number, uint64_t timestamp);
// Identifier used by framework to distinguish cameras // Identifier used by framework to distinguish cameras
const int mId; const int mId;
// Metadata containing persistent camera characteristics
Metadata mMetadata;
// camera_metadata structure containing static characteristics
camera_metadata_t *mStaticInfo;
// Busy flag indicates camera is in use // Busy flag indicates camera is in use
bool mBusy; bool mBusy;
// Camera device operations handle shared by all devices // Camera device operations handle shared by all devices
@ -60,6 +89,17 @@ class Camera {
const camera3_callback_ops_t *mCallbackOps; const camera3_callback_ops_t *mCallbackOps;
// Lock protecting the Camera object for modifications // Lock protecting the Camera object for modifications
pthread_mutex_t mMutex; pthread_mutex_t mMutex;
// Lock protecting only static camera characteristics, which may
// be accessed without the camera device open
pthread_mutex_t mStaticInfoMutex;
// Array of handles to streams currently in use by the device
Stream **mStreams;
// Number of streams in mStreams
int mNumStreams;
// Static array of standard camera settings templates
Metadata *mTemplates[CAMERA3_TEMPLATE_COUNT];
// Most recent request settings seen, memoized to be reused
camera_metadata_t *mSettings;
}; };
} // namespace default_camera_hal } // namespace default_camera_hal

View file

@ -76,7 +76,7 @@ int CameraHAL::getCameraInfo(int id, struct camera_info* info)
return -ENODEV; return -ENODEV;
} }
// TODO: return device-specific static metadata // TODO: return device-specific static metadata
return 0; return mCameras[id]->getInfo(info);
} }
int CameraHAL::setCallbacks(const camera_module_callbacks_t *callbacks) int CameraHAL::setCallbacks(const camera_module_callbacks_t *callbacks)
@ -90,11 +90,14 @@ int CameraHAL::open(const hw_module_t* mod, const char* name, hw_device_t** dev)
{ {
int id; int id;
char *nameEnd; char *nameEnd;
Camera *cam;
ALOGV("%s: module=%p, name=%s, device=%p", __func__, mod, name, dev); ALOGV("%s: module=%p, name=%s, device=%p", __func__, mod, name, dev);
if (*name == '\0') {
ALOGE("%s: Invalid camera id name is NULL", __func__);
return -EINVAL;
}
id = strtol(name, &nameEnd, 10); id = strtol(name, &nameEnd, 10);
if (nameEnd != NULL) { if (*nameEnd != '\0') {
ALOGE("%s: Invalid camera id name %s", __func__, name); ALOGE("%s: Invalid camera id name %s", __func__, name);
return -EINVAL; return -EINVAL;
} else if (id < 0 || id >= mNumberOfCameras) { } else if (id < 0 || id >= mNumberOfCameras) {

246
modules/camera/Metadata.cpp Normal file
View file

@ -0,0 +1,246 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <pthread.h>
#include <system/camera_metadata.h>
//#define LOG_NDEBUG 0
#define LOG_TAG "Metadata"
#include <cutils/log.h>
#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL)
#include <cutils/trace.h>
#include "ScopedTrace.h"
#include "Metadata.h"
namespace default_camera_hal {
Metadata::Metadata()
: mHead(NULL),
mTail(NULL),
mEntryCount(0),
mDataCount(0),
mGenerated(NULL),
mDirty(true)
{
// NULL (default) pthread mutex attributes
pthread_mutex_init(&mMutex, NULL);
}
Metadata::~Metadata()
{
Entry *current = mHead;
while (current != NULL) {
Entry *tmp = current;
current = current->mNext;
delete tmp;
}
if (mGenerated != NULL)
free_camera_metadata(mGenerated);
pthread_mutex_destroy(&mMutex);
}
Metadata::Metadata(uint8_t mode, uint8_t intent)
: mHead(NULL),
mTail(NULL),
mEntryCount(0),
mDataCount(0),
mGenerated(NULL),
mDirty(true)
{
pthread_mutex_init(&mMutex, NULL);
if (validate(ANDROID_CONTROL_MODE, TYPE_BYTE, 1)) {
int res = add(ANDROID_CONTROL_MODE, 1, &mode);
if (res != 0) {
ALOGE("%s: Unable to add mode to template!", __func__);
}
} else {
ALOGE("%s: Invalid mode constructing template!", __func__);
}
if (validate(ANDROID_CONTROL_CAPTURE_INTENT, TYPE_BYTE, 1)) {
int res = add(ANDROID_CONTROL_CAPTURE_INTENT, 1, &intent);
if (res != 0) {
ALOGE("%s: Unable to add capture intent to template!", __func__);
}
} else {
ALOGE("%s: Invalid capture intent constructing template!", __func__);
}
}
int Metadata::addUInt8(uint32_t tag, int count, uint8_t *data)
{
if (!validate(tag, TYPE_BYTE, count)) return -EINVAL;
return add(tag, count, data);
}
int Metadata::addInt32(uint32_t tag, int count, int32_t *data)
{
if (!validate(tag, TYPE_INT32, count)) return -EINVAL;
return add(tag, count, data);
}
int Metadata::addFloat(uint32_t tag, int count, float *data)
{
if (!validate(tag, TYPE_FLOAT, count)) return -EINVAL;
return add(tag, count, data);
}
int Metadata::addInt64(uint32_t tag, int count, int64_t *data)
{
if (!validate(tag, TYPE_INT64, count)) return -EINVAL;
return add(tag, count, data);
}
int Metadata::addDouble(uint32_t tag, int count, double *data)
{
if (!validate(tag, TYPE_DOUBLE, count)) return -EINVAL;
return add(tag, count, data);
}
int Metadata::addRational(uint32_t tag, int count,
camera_metadata_rational_t *data)
{
if (!validate(tag, TYPE_RATIONAL, count)) return -EINVAL;
return add(tag, count, data);
}
bool Metadata::validate(uint32_t tag, int tag_type, int count)
{
if (get_camera_metadata_tag_type(tag) < 0) {
ALOGE("%s: Invalid metadata entry tag: %d", __func__, tag);
return false;
}
if (tag_type < 0 || tag_type >= NUM_TYPES) {
ALOGE("%s: Invalid metadata entry tag type: %d", __func__, tag_type);
return false;
}
if (tag_type != get_camera_metadata_tag_type(tag)) {
ALOGE("%s: Tag %d called with incorrect type: %s(%d)", __func__, tag,
camera_metadata_type_names[tag_type], tag_type);
return false;
}
if (count < 1) {
ALOGE("%s: Invalid metadata entry count: %d", __func__, count);
return false;
}
return true;
}
int Metadata::add(uint32_t tag, int count, void *tag_data)
{
int tag_type = get_camera_metadata_tag_type(tag);
size_t type_sz = camera_metadata_type_size[tag_type];
// Allocate array to hold new metadata
void *data = malloc(count * type_sz);
if (data == NULL)
return -ENOMEM;
memcpy(data, tag_data, count * type_sz);
pthread_mutex_lock(&mMutex);
mEntryCount++;
mDataCount += calculate_camera_metadata_entry_data_size(tag_type, count);
push(new Entry(tag, data, count));
mDirty = true;
pthread_mutex_unlock(&mMutex);
return 0;
}
camera_metadata_t* Metadata::generate()
{
pthread_mutex_lock(&mMutex);
// Reuse if old generated metadata still valid
if (!mDirty && mGenerated != NULL) {
ALOGV("%s: Reusing generated metadata at %p", __func__, mGenerated);
goto out;
}
// Destroy old metadata
if (mGenerated != NULL) {
ALOGV("%s: Freeing generated metadata at %p", __func__, mGenerated);
free_camera_metadata(mGenerated);
mGenerated = NULL;
}
// Generate new metadata structure
ALOGV("%s: Generating new camera metadata structure, Entries:%d Data:%d",
__func__, mEntryCount, mDataCount);
mGenerated = allocate_camera_metadata(mEntryCount, mDataCount);
if (mGenerated == NULL) {
ALOGE("%s: Failed to allocate metadata (%d entries %d data)",
__func__, mEntryCount, mDataCount);
goto out;
}
// Walk list of entries adding each one to newly allocated metadata
for (Entry *current = mHead; current != NULL; current = current->mNext) {
int res = add_camera_metadata_entry(mGenerated, current->mTag,
current->mData, current->mCount);
if (res != 0) {
ALOGE("%s: Failed to add camera metadata: %d", __func__, res);
free_camera_metadata(mGenerated);
mGenerated = NULL;
goto out;
}
}
out:
pthread_mutex_unlock(&mMutex);
return mGenerated;
}
Metadata::Entry::Entry(uint32_t tag, void *data, int count)
: mNext(NULL),
mPrev(NULL),
mTag(tag),
mData(data),
mCount(count)
{
}
void Metadata::push(Entry *e)
{
if (mHead == NULL) {
mHead = mTail = e;
} else {
mTail->insertAfter(e);
mTail = e;
}
}
Metadata::Entry::~Entry()
{
if (mNext != NULL)
mNext->mPrev = mPrev;
if (mPrev != NULL)
mPrev->mNext = mNext;
}
void Metadata::Entry::insertAfter(Entry *e)
{
if (e == NULL)
return;
if (mNext != NULL)
mNext->mPrev = e;
e->mNext = mNext;
e->mPrev = this;
mNext = e;
}
} // namespace default_camera_hal

79
modules/camera/Metadata.h Normal file
View file

@ -0,0 +1,79 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef METADATA_H_
#define METADATA_H_
#include <hardware/camera3.h>
#include <hardware/gralloc.h>
#include <system/camera_metadata.h>
#include <system/graphics.h>
namespace default_camera_hal {
// Metadata is a convenience class for dealing with libcamera_metadata
class Metadata {
public:
Metadata();
~Metadata();
// Constructor used for request metadata templates
Metadata(uint8_t mode, uint8_t intent);
// Parse and add an entry
int addUInt8(uint32_t tag, int count, uint8_t *data);
int addInt32(uint32_t tag, int count, int32_t *data);
int addFloat(uint32_t tag, int count, float *data);
int addInt64(uint32_t tag, int count, int64_t *data);
int addDouble(uint32_t tag, int count, double *data);
int addRational(uint32_t tag, int count,
camera_metadata_rational_t *data);
// Generate a camera_metadata structure and fill it with internal data
camera_metadata_t *generate();
private:
// Validate the tag, type and count for a metadata entry
bool validate(uint32_t tag, int tag_type, int count);
// Add a verified tag with data to this Metadata structure
int add(uint32_t tag, int count, void *tag_data);
class Entry {
public:
Entry(uint32_t tag, void *data, int count);
~Entry();
Entry *mNext;
Entry *mPrev;
const uint32_t mTag;
const void *mData;
const int mCount;
void insertAfter(Entry *e);
};
// List ends
Entry *mHead;
Entry *mTail;
// Append entry to list
void push(Entry *e);
// Total of entries and entry data size
int mEntryCount;
int mDataCount;
// Save generated metadata, invalidated on update
camera_metadata_t *mGenerated;
// Flag to force metadata regeneration
bool mDirty;
// Lock protecting the Metadata object for modifications
pthread_mutex_t mMutex;
};
} // namespace default_camera_hal
#endif // METADATA_H_

View file

@ -0,0 +1,51 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAMERA_SCOPED_TRACE_H
#define CAMERA_SCOPED_TRACE_H
#include <stdint.h>
#include <cutils/trace.h>
// See <cutils/trace.h> for more tracing macros.
// CAMTRACE_NAME traces the beginning and end of the current scope. To trace
// the correct start and end times this macro should be declared first in the
// scope body.
#define CAMTRACE_NAME(name) ScopedTrace ___tracer(ATRACE_TAG, name)
// CAMTRACE_CALL is an ATRACE_NAME that uses the current function name.
#define CAMTRACE_CALL() CAMTRACE_NAME(__FUNCTION__)
namespace default_camera_hal {
class ScopedTrace {
public:
inline ScopedTrace(uint64_t tag, const char* name)
: mTag(tag) {
atrace_begin(mTag,name);
}
inline ~ScopedTrace() {
atrace_end(mTag);
}
private:
uint64_t mTag;
};
}; // namespace default_camera_hal
#endif // CAMERA_SCOPED_TRACE_H

177
modules/camera/Stream.cpp Normal file
View file

@ -0,0 +1,177 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <pthread.h>
#include <hardware/camera3.h>
#include <hardware/gralloc.h>
#include <system/graphics.h>
//#define LOG_NDEBUG 0
#define LOG_TAG "Stream"
#include <cutils/log.h>
#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL)
#include <cutils/trace.h>
#include "ScopedTrace.h"
#include "Stream.h"
namespace default_camera_hal {
Stream::Stream(int id, camera3_stream_t *s)
: mReuse(false),
mId(id),
mStream(s),
mType(s->stream_type),
mWidth(s->width),
mHeight(s->height),
mFormat(s->format),
mUsage(0),
mMaxBuffers(0),
mRegistered(false),
mBuffers(0),
mNumBuffers(0)
{
// NULL (default) pthread mutex attributes
pthread_mutex_init(&mMutex, NULL);
}
Stream::~Stream()
{
pthread_mutex_lock(&mMutex);
unregisterBuffers_L();
pthread_mutex_unlock(&mMutex);
}
void Stream::setUsage(uint32_t usage)
{
pthread_mutex_lock(&mMutex);
if (usage != mUsage) {
mUsage = usage;
mStream->usage = usage;
unregisterBuffers_L();
}
pthread_mutex_unlock(&mMutex);
}
void Stream::setMaxBuffers(uint32_t max_buffers)
{
pthread_mutex_lock(&mMutex);
if (max_buffers != mMaxBuffers) {
mMaxBuffers = max_buffers;
mStream->max_buffers = max_buffers;
unregisterBuffers_L();
}
pthread_mutex_unlock(&mMutex);
}
int Stream::getType()
{
return mType;
}
bool Stream::isInputType()
{
return mType == CAMERA3_STREAM_INPUT ||
mType == CAMERA3_STREAM_BIDIRECTIONAL;
}
bool Stream::isOutputType()
{
return mType == CAMERA3_STREAM_OUTPUT ||
mType == CAMERA3_STREAM_BIDIRECTIONAL;
}
bool Stream::isRegistered()
{
return mRegistered;
}
bool Stream::isValidReuseStream(int id, camera3_stream_t *s)
{
if (id != mId) {
ALOGE("%s:%d: Invalid camera id for reuse. Got %d expect %d",
__func__, mId, id, mId);
return false;
}
if (s != mStream) {
ALOGE("%s:%d: Invalid stream handle for reuse. Got %p expect %p",
__func__, mId, s, mStream);
return false;
}
if (s->stream_type != mType) {
// TODO: prettyprint type string
ALOGE("%s:%d: Mismatched type in reused stream. Got %d expect %d",
__func__, mId, s->stream_type, mType);
return false;
}
if (s->format != mFormat) {
// TODO: prettyprint format string
ALOGE("%s:%d: Mismatched format in reused stream. Got %d expect %d",
__func__, mId, s->format, mFormat);
return false;
}
if (s->width != mWidth) {
ALOGE("%s:%d: Mismatched width in reused stream. Got %d expect %d",
__func__, mId, s->width, mWidth);
return false;
}
if (s->height != mHeight) {
ALOGE("%s:%d: Mismatched height in reused stream. Got %d expect %d",
__func__, mId, s->height, mHeight);
return false;
}
return true;
}
int Stream::registerBuffers(const camera3_stream_buffer_set_t *buf_set)
{
CAMTRACE_CALL();
if (buf_set->stream != mStream) {
ALOGE("%s:%d: Buffer set for invalid stream. Got %p expect %p",
__func__, mId, buf_set->stream, mStream);
return -EINVAL;
}
pthread_mutex_lock(&mMutex);
mNumBuffers = buf_set->num_buffers;
mBuffers = new buffer_handle_t*[mNumBuffers];
for (unsigned int i = 0; i < mNumBuffers; i++) {
ALOGV("%s:%d: Registering buffer %p", __func__, mId,
buf_set->buffers[i]);
mBuffers[i] = buf_set->buffers[i];
// TODO: register buffers with hw, handle error cases
}
mRegistered = true;
pthread_mutex_unlock(&mMutex);
return 0;
}
// This must only be called with mMutex held
void Stream::unregisterBuffers_L()
{
mRegistered = false;
mNumBuffers = 0;
delete [] mBuffers;
// TODO: unregister buffers from hw
}
} // namespace default_camera_hal

79
modules/camera/Stream.h Normal file
View file

@ -0,0 +1,79 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef STREAM_H_
#define STREAM_H_
#include <hardware/camera3.h>
#include <hardware/gralloc.h>
#include <system/graphics.h>
namespace default_camera_hal {
// Stream represents a single input or output stream for a camera device.
class Stream {
public:
Stream(int id, camera3_stream_t *s);
~Stream();
// validate that astream's parameters match this stream's parameters
bool isValidReuseStream(int id, camera3_stream_t *s);
// Register buffers with hardware
int registerBuffers(const camera3_stream_buffer_set_t *buf_set);
void setUsage(uint32_t usage);
void setMaxBuffers(uint32_t max_buffers);
int getType();
bool isInputType();
bool isOutputType();
bool isRegistered();
// This stream is being reused. Used in stream configuration passes
bool mReuse;
private:
// Clean up buffer state. must be called with mMutex held.
void unregisterBuffers_L();
// The camera device id this stream belongs to
const int mId;
// Handle to framework's stream, used as a cookie for buffers
camera3_stream_t *mStream;
// Stream type: CAMERA3_STREAM_* (see <hardware/camera3.h>)
const int mType;
// Width in pixels of the buffers in this stream
const uint32_t mWidth;
// Height in pixels of the buffers in this stream
const uint32_t mHeight;
// Gralloc format: HAL_PIXEL_FORMAT_* (see <system/graphics.h>)
const int mFormat;
// Gralloc usage mask : GRALLOC_USAGE_* (see <hardware/gralloc.h>)
uint32_t mUsage;
// Max simultaneous in-flight buffers for this stream
uint32_t mMaxBuffers;
// Buffers have been registered for this stream and are ready
bool mRegistered;
// Array of handles to buffers currently in use by the stream
buffer_handle_t **mBuffers;
// Number of buffers in mBuffers
unsigned int mNumBuffers;
// Lock protecting the Stream object for modifications
pthread_mutex_t mMutex;
};
} // namespace default_camera_hal
#endif // STREAM_H_

View file

@ -0,0 +1,25 @@
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := consumerir.default
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
LOCAL_SRC_FILES := consumerir.c
LOCAL_SHARED_LIBRARIES := liblog libcutils
LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)

View file

@ -0,0 +1,116 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "ConsumerIrHal"
#include <errno.h>
#include <string.h>
#include <cutils/log.h>
#include <hardware/hardware.h>
#include <hardware/consumerir.h>
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
static const consumerir_freq_range_t consumerir_freqs[] = {
{.min = 30000, .max = 30000},
{.min = 33000, .max = 33000},
{.min = 36000, .max = 36000},
{.min = 38000, .max = 38000},
{.min = 40000, .max = 40000},
{.min = 56000, .max = 56000},
};
static int consumerir_transmit(struct consumerir_device *dev,
int carrier_freq, int pattern[], int pattern_len)
{
int total_time = 0;
long i;
for (i = 0; i < pattern_len; i++)
total_time += pattern[i];
/* simulate the time spent transmitting by sleeping */
ALOGD("transmit for %d uS at %d Hz", total_time, carrier_freq);
usleep(total_time);
return 0;
}
static int consumerir_get_num_carrier_freqs(struct consumerir_device *dev)
{
return ARRAY_SIZE(consumerir_freqs);
}
static int consumerir_get_carrier_freqs(struct consumerir_device *dev,
size_t len, consumerir_freq_range_t *ranges)
{
size_t to_copy = ARRAY_SIZE(consumerir_freqs);
to_copy = len < to_copy ? len : to_copy;
memcpy(ranges, consumerir_freqs, to_copy * sizeof(consumerir_freq_range_t));
return to_copy;
}
static int consumerir_close(hw_device_t *dev)
{
free(dev);
return 0;
}
/*
* Generic device handling
*/
static int consumerir_open(const hw_module_t* module, const char* name,
hw_device_t** device)
{
if (strcmp(name, CONSUMERIR_TRANSMITTER) != 0) {
return -EINVAL;
}
if (device == NULL) {
ALOGE("NULL device on open");
return -EINVAL;
}
consumerir_device_t *dev = malloc(sizeof(consumerir_device_t));
memset(dev, 0, sizeof(consumerir_device_t));
dev->common.tag = HARDWARE_DEVICE_TAG;
dev->common.version = 0;
dev->common.module = (struct hw_module_t*) module;
dev->common.close = consumerir_close;
dev->transmit = consumerir_transmit;
dev->get_num_carrier_freqs = consumerir_get_num_carrier_freqs;
dev->get_carrier_freqs = consumerir_get_carrier_freqs;
*device = (hw_device_t*) dev;
return 0;
}
static struct hw_module_methods_t consumerir_module_methods = {
.open = consumerir_open,
};
consumerir_module_t HAL_MODULE_INFO_SYM = {
.common = {
.tag = HARDWARE_MODULE_TAG,
.module_api_version = CONSUMERIR_MODULE_API_VERSION_1_0,
.hal_api_version = HARDWARE_HAL_API_VERSION,
.id = CONSUMERIR_HARDWARE_MODULE_ID,
.name = "Demo IR HAL",
.author = "The Android Open Source Project",
.methods = &consumerir_module_methods,
},
};

View file

@ -217,8 +217,6 @@ static int gralloc_alloc(alloc_device_t* dev,
bpp = 3; bpp = 3;
break; break;
case HAL_PIXEL_FORMAT_RGB_565: case HAL_PIXEL_FORMAT_RGB_565:
case HAL_PIXEL_FORMAT_RGBA_5551:
case HAL_PIXEL_FORMAT_RGBA_4444:
case HAL_PIXEL_FORMAT_RAW_SENSOR: case HAL_PIXEL_FORMAT_RAW_SENSOR:
bpp = 2; bpp = 2;
break; break;

View file

@ -10,6 +10,7 @@ LOCAL_SRC_FILES:= \
CameraStreamTests.cpp \ CameraStreamTests.cpp \
CameraFrameTests.cpp \ CameraFrameTests.cpp \
CameraBurstTests.cpp \ CameraBurstTests.cpp \
CameraMultiStreamTests.cpp\
ForkedTests.cpp \ ForkedTests.cpp \
TestForkerEventListener.cpp \ TestForkerEventListener.cpp \
TestSettings.cpp \ TestSettings.cpp \

View file

@ -19,13 +19,14 @@
#define LOG_TAG "CameraBurstTest" #define LOG_TAG "CameraBurstTest"
//#define LOG_NDEBUG 0 //#define LOG_NDEBUG 0
#include <utils/Log.h> #include <utils/Log.h>
#include <utils/Timers.h>
#include <cmath> #include <cmath>
#include "CameraStreamFixture.h" #include "CameraStreamFixture.h"
#include "TestExtensions.h" #include "TestExtensions.h"
#define CAMERA_FRAME_TIMEOUT 1000000000 //nsecs (1 secs) #define CAMERA_FRAME_TIMEOUT 1000000000LL //nsecs (1 secs)
#define CAMERA_HEAP_COUNT 2 //HALBUG: 1 means registerBuffers fails #define CAMERA_HEAP_COUNT 2 //HALBUG: 1 means registerBuffers fails
#define CAMERA_BURST_DEBUGGING 0 #define CAMERA_BURST_DEBUGGING 0
#define CAMERA_FRAME_BURST_COUNT 10 #define CAMERA_FRAME_BURST_COUNT 10
@ -37,12 +38,21 @@
#define CAMERA_EXPOSURE_FORMAT CAMERA_STREAM_AUTO_CPU_FORMAT #define CAMERA_EXPOSURE_FORMAT CAMERA_STREAM_AUTO_CPU_FORMAT
#define CAMERA_EXPOSURE_STARTING 100000 // 1/10ms, up to 51.2ms with 10 steps #define CAMERA_EXPOSURE_STARTING 100000 // 1/10ms, up to 51.2ms with 10 steps
#define USEC 1000LL // in ns
#define MSEC 1000000LL // in ns
#define SEC 1000000000LL // in ns
#if CAMERA_BURST_DEBUGGING #if CAMERA_BURST_DEBUGGING
#define dout std::cout #define dout std::cout
#else #else
#define dout if (0) std::cout #define dout if (0) std::cout
#endif #endif
#define WARN_UNLESS(condition) (!(condition) ? (std::cerr) : (std::ostream(NULL)) << "Warning: ")
#define WARN_LE(exp, act) WARN_UNLESS((exp) <= (act))
#define WARN_LT(exp, act) WARN_UNLESS((exp) < (act))
#define WARN_GT(exp, act) WARN_UNLESS((exp) > (act))
using namespace android; using namespace android;
using namespace android::camera2; using namespace android::camera2;
@ -122,6 +132,23 @@ public:
return acc; return acc;
} }
// Parses a comma-separated string list into a Vector
template<typename T>
void ParseList(const char *src, Vector<T> &list) {
std::istringstream s(src);
while (!s.eof()) {
char c = s.peek();
if (c == ',' || c == ' ') {
s.ignore(1, EOF);
continue;
}
T val;
s >> val;
list.push_back(val);
}
}
}; };
TEST_F(CameraBurstTest, ManualExposureControl) { TEST_F(CameraBurstTest, ManualExposureControl) {
@ -161,7 +188,7 @@ TEST_F(CameraBurstTest, ManualExposureControl) {
ASSERT_EQ(OK, mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, ASSERT_EQ(OK, mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
&previewRequest)); &previewRequest));
{ {
Vector<uint8_t> outputStreamIds; Vector<int32_t> outputStreamIds;
outputStreamIds.push(mStreamId); outputStreamIds.push(mStreamId);
ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS, ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
outputStreamIds)); outputStreamIds));
@ -251,10 +278,447 @@ TEST_F(CameraBurstTest, ManualExposureControl) {
dout << "max doubling count: " << max_doubling_count << std::endl; dout << "max doubling count: " << max_doubling_count << std::endl;
EXPECT_LE(CAMERA_EXPOSURE_DOUBLING_COUNT, max_doubling_count) /**
<< "average brightness should double at least " * Make this check warning only, since the brightness calculation is not reliable
<< CAMERA_EXPOSURE_DOUBLING_COUNT * and we have separate test to cover this case. Plus it is pretty subtle to make
<< " times over each consecutive frame as the exposure is doubled"; * it right without complicating the test too much.
*/
WARN_LE(CAMERA_EXPOSURE_DOUBLING_COUNT, max_doubling_count)
<< "average brightness should double at least "
<< CAMERA_EXPOSURE_DOUBLING_COUNT
<< " times over each consecutive frame as the exposure is doubled"
<< std::endl;
}
/**
* This test varies exposure time, frame duration, and sensitivity for a
* burst of captures. It picks values by default, but the selection can be
* overridden with the environment variables
* CAMERA2_TEST_VARIABLE_BURST_EXPOSURE_TIMES
* CAMERA2_TEST_VARIABLE_BURST_FRAME_DURATIONS
* CAMERA2_TEST_VARIABLE_BURST_SENSITIVITIES
* which must all be a list of comma-separated values, and each list must be
* the same length. In addition, if the environment variable
* CAMERA2_TEST_VARIABLE_BURST_DUMP_FRAMES
* is set to 1, then the YUV buffers are dumped into files named
* "camera2_test_variable_burst_frame_NNN.yuv"
*
* For example:
* $ setenv CAMERA2_TEST_VARIABLE_BURST_EXPOSURE_TIMES 10000000,20000000
* $ setenv CAMERA2_TEST_VARIABLE_BURST_FRAME_DURATIONS 40000000,40000000
* $ setenv CAMERA2_TEST_VARIABLE_BURST_SENSITIVITIES 200,100
* $ setenv CAMERA2_TEST_VARIABLE_BURST_DUMP_FRAMES 1
* $ /data/nativetest/camera2_test/camera2_test --gtest_filter="*VariableBurst"
*/
TEST_F(CameraBurstTest, VariableBurst) {
TEST_EXTENSION_FORKING_INIT;
// Bounds for checking frame duration is within range
const nsecs_t DURATION_UPPER_BOUND = 10 * MSEC;
const nsecs_t DURATION_LOWER_BOUND = 20 * MSEC;
// Threshold for considering two captures to have equivalent exposure value,
// as a ratio of the smaller EV to the larger EV.
const float EV_MATCH_BOUND = 0.95;
// Bound for two captures with equivalent exp values to have the same
// measured brightness, in 0-255 luminance.
const float BRIGHTNESS_MATCH_BOUND = 5;
// Environment variables to look for to override test settings
const char *expEnv = "CAMERA2_TEST_VARIABLE_BURST_EXPOSURE_TIMES";
const char *durationEnv = "CAMERA2_TEST_VARIABLE_BURST_FRAME_DURATIONS";
const char *sensitivityEnv = "CAMERA2_TEST_VARIABLE_BURST_SENSITIVITIES";
const char *dumpFrameEnv = "CAMERA2_TEST_VARIABLE_BURST_DUMP_FRAMES";
// Range of valid exposure times, in nanoseconds
int64_t minExp = 0, maxExp = 0;
// List of valid sensor sensitivities
Vector<int32_t> sensitivities;
// Range of valid frame durations, in nanoseconds
int64_t minDuration = 0, maxDuration = 0;
{
camera_metadata_ro_entry exposureTimeRange =
GetStaticEntry(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE);
EXPECT_EQ(2u, exposureTimeRange.count) << "Bad exposure time range tag."
"Using default values";
if (exposureTimeRange.count == 2) {
minExp = exposureTimeRange.data.i64[0];
maxExp = exposureTimeRange.data.i64[1];
}
EXPECT_LT(0, minExp) << "Minimum exposure time is 0";
EXPECT_LT(0, maxExp) << "Maximum exposure time is 0";
EXPECT_LE(minExp, maxExp) << "Minimum exposure is greater than maximum";
if (minExp == 0) {
minExp = 1 * MSEC; // Fallback minimum exposure time
}
if (maxExp == 0) {
maxExp = 10 * SEC; // Fallback maximum exposure time
}
}
camera_metadata_ro_entry hardwareLevel =
GetStaticEntry(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
ASSERT_EQ(1u, hardwareLevel.count);
uint8_t level = hardwareLevel.data.u8[0];
ASSERT_GE(level, ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED);
ASSERT_LE(level, ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL);
if (level == ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::cerr << "Skipping test "
<< test_info->test_case_name() << "."
<< test_info->name()
<< " because HAL hardware supported level is limited "
<< std::endl;
return;
}
dout << "Stream size is " << mWidth << " x " << mHeight << std::endl;
dout << "Valid exposure range is: " <<
minExp << " - " << maxExp << " ns " << std::endl;
{
camera_metadata_ro_entry sensivityRange =
GetStaticEntry(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE);
EXPECT_EQ(2u, sensivityRange.count) << "No sensitivity range listed."
"Falling back to default set.";
int32_t minSensitivity = 100;
int32_t maxSensitivity = 800;
if (sensivityRange.count == 2) {
ASSERT_GT(sensivityRange.data.i32[0], 0);
ASSERT_GT(sensivityRange.data.i32[1], 0);
minSensitivity = sensivityRange.data.i32[0];
maxSensitivity = sensivityRange.data.i32[1];
}
int32_t count = (maxSensitivity - minSensitivity + 99) / 100;
sensitivities.push_back(minSensitivity);
for (int i = 1; i < count; i++) {
sensitivities.push_back(minSensitivity + i * 100);
}
sensitivities.push_back(maxSensitivity);
}
dout << "Available sensitivities: ";
for (size_t i = 0; i < sensitivities.size(); i++) {
dout << sensitivities[i] << " ";
}
dout << std::endl;
{
camera_metadata_ro_entry availableProcessedSizes =
GetStaticEntry(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
camera_metadata_ro_entry availableProcessedMinFrameDurations =
GetStaticEntry(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS);
EXPECT_EQ(availableProcessedSizes.count,
availableProcessedMinFrameDurations.count * 2) <<
"The number of minimum frame durations doesn't match the number of "
"available sizes. Using fallback values";
if (availableProcessedSizes.count ==
availableProcessedMinFrameDurations.count * 2) {
bool gotSize = false;
for (size_t i = 0; i < availableProcessedSizes.count; i += 2) {
if (availableProcessedSizes.data.i32[i] == mWidth &&
availableProcessedSizes.data.i32[i+1] == mHeight) {
gotSize = true;
minDuration = availableProcessedMinFrameDurations.data.i64[i/2];
}
}
EXPECT_TRUE(gotSize) << "Can't find stream size in list of "
"available sizes: " << mWidth << ", " << mHeight;
}
if (minDuration == 0) {
minDuration = 1 * SEC / 30; // Fall back to 30 fps as minimum duration
}
ASSERT_LT(0, minDuration);
camera_metadata_ro_entry maxFrameDuration =
GetStaticEntry(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION);
EXPECT_EQ(1u, maxFrameDuration.count) << "No valid maximum frame duration";
if (maxFrameDuration.count == 1) {
maxDuration = maxFrameDuration.data.i64[0];
}
EXPECT_GT(maxDuration, 0) << "Max duration is 0 or not given, using fallback";
if (maxDuration == 0) {
maxDuration = 10 * SEC; // Fall back to 10 seconds as max duration
}
}
dout << "Available frame duration range for configured stream size: "
<< minDuration << " - " << maxDuration << " ns" << std::endl;
// Get environment variables if set
const char *expVal = getenv(expEnv);
const char *durationVal = getenv(durationEnv);
const char *sensitivityVal = getenv(sensitivityEnv);
bool gotExp = (expVal != NULL);
bool gotDuration = (durationVal != NULL);
bool gotSensitivity = (sensitivityVal != NULL);
// All or none must be provided if using override envs
ASSERT_TRUE( (gotDuration && gotExp && gotSensitivity) ||
(!gotDuration && !gotExp && !gotSensitivity) ) <<
"Incomplete set of environment variable overrides provided";
Vector<int64_t> expList, durationList;
Vector<int32_t> sensitivityList;
if (gotExp) {
ParseList(expVal, expList);
ParseList(durationVal, durationList);
ParseList(sensitivityVal, sensitivityList);
ASSERT_TRUE(
(expList.size() == durationList.size()) &&
(durationList.size() == sensitivityList.size())) <<
"Mismatched sizes in env lists, or parse error";
dout << "Using burst list from environment with " << expList.size() <<
" captures" << std::endl;
} else {
// Create a default set of controls based on the available ranges
int64_t e;
int64_t d;
int32_t s;
// Exposure ramp
e = minExp;
d = minDuration;
s = sensitivities[0];
while (e < maxExp) {
expList.push_back(e);
durationList.push_back(d);
sensitivityList.push_back(s);
e = e * 2;
}
e = maxExp;
expList.push_back(e);
durationList.push_back(d);
sensitivityList.push_back(s);
// Duration ramp
e = 30 * MSEC;
d = minDuration;
s = sensitivities[0];
while (d < maxDuration) {
// make sure exposure <= frame duration
expList.push_back(e > d ? d : e);
durationList.push_back(d);
sensitivityList.push_back(s);
d = d * 2;
}
// Sensitivity ramp
e = 30 * MSEC;
d = 30 * MSEC;
d = d > minDuration ? d : minDuration;
for (size_t i = 0; i < sensitivities.size(); i++) {
expList.push_back(e);
durationList.push_back(d);
sensitivityList.push_back(sensitivities[i]);
}
// Constant-EV ramp, duration == exposure
e = 30 * MSEC; // at ISO 100
for (size_t i = 0; i < sensitivities.size(); i++) {
int64_t e_adj = e * 100 / sensitivities[i];
expList.push_back(e_adj);
durationList.push_back(e_adj > minDuration ? e_adj : minDuration);
sensitivityList.push_back(sensitivities[i]);
}
dout << "Default burst sequence created with " << expList.size() <<
" entries" << std::endl;
}
// Validate the list, but warn only
for (size_t i = 0; i < expList.size(); i++) {
EXPECT_GE(maxExp, expList[i])
<< "Capture " << i << " exposure too long: " << expList[i];
EXPECT_LE(minExp, expList[i])
<< "Capture " << i << " exposure too short: " << expList[i];
EXPECT_GE(maxDuration, durationList[i])
<< "Capture " << i << " duration too long: " << durationList[i];
EXPECT_LE(minDuration, durationList[i])
<< "Capture " << i << " duration too short: " << durationList[i];
bool validSensitivity = false;
for (size_t j = 0; j < sensitivities.size(); j++) {
if (sensitivityList[i] == sensitivities[j]) {
validSensitivity = true;
break;
}
}
EXPECT_TRUE(validSensitivity)
<< "Capture " << i << " sensitivity not in list: " << sensitivityList[i];
}
// Check if debug yuv dumps are requested
bool dumpFrames = false;
{
const char *frameDumpVal = getenv(dumpFrameEnv);
if (frameDumpVal != NULL) {
if (frameDumpVal[0] == '1') dumpFrames = true;
}
}
dout << "Dumping YUV frames " <<
(dumpFrames ? "enabled, not checking timing" : "disabled") << std::endl;
// Create a base preview request, turning off all 3A
CameraMetadata previewRequest;
ASSERT_EQ(OK, mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
&previewRequest));
{
Vector<int32_t> outputStreamIds;
outputStreamIds.push(mStreamId);
ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
outputStreamIds));
// Disable all 3A routines
uint8_t cmOff = static_cast<uint8_t>(ANDROID_CONTROL_MODE_OFF);
ASSERT_EQ(OK, previewRequest.update(ANDROID_CONTROL_MODE,
&cmOff, 1));
int requestId = 1;
ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_ID,
&requestId, 1));
}
// Submit capture requests
for (size_t i = 0; i < expList.size(); ++i) {
CameraMetadata tmpRequest = previewRequest;
ASSERT_EQ(OK, tmpRequest.update(ANDROID_SENSOR_EXPOSURE_TIME,
&expList[i], 1));
ASSERT_EQ(OK, tmpRequest.update(ANDROID_SENSOR_FRAME_DURATION,
&durationList[i], 1));
ASSERT_EQ(OK, tmpRequest.update(ANDROID_SENSOR_SENSITIVITY,
&sensitivityList[i], 1));
ALOGV("Submitting capture %d with exposure %lld, frame duration %lld, sensitivity %d",
i, expList[i], durationList[i], sensitivityList[i]);
dout << "Capture request " << i <<
": exposure is " << (expList[i]/1e6f) << " ms" <<
", frame duration is " << (durationList[i]/1e6f) << " ms" <<
", sensitivity is " << sensitivityList[i] <<
std::endl;
ASSERT_EQ(OK, mDevice->capture(tmpRequest));
}
Vector<float> brightnesses;
Vector<nsecs_t> captureTimes;
brightnesses.setCapacity(expList.size());
captureTimes.setCapacity(expList.size());
// Get each frame (metadata) and then the buffer. Calculate brightness.
for (size_t i = 0; i < expList.size(); ++i) {
ALOGV("Reading request %d", i);
dout << "Waiting for capture " << i << ": " <<
" exposure " << (expList[i]/1e6f) << " ms," <<
" frame duration " << (durationList[i]/1e6f) << " ms," <<
" sensitivity " << sensitivityList[i] <<
std::endl;
// Set wait limit based on expected frame duration, or minimum timeout
int64_t waitLimit = CAMERA_FRAME_TIMEOUT;
if (expList[i] * 2 > waitLimit) waitLimit = expList[i] * 2;
if (durationList[i] * 2 > waitLimit) waitLimit = durationList[i] * 2;
ASSERT_EQ(OK, mDevice->waitForNextFrame(waitLimit));
ALOGV("Reading capture request-1 %d", i);
CameraMetadata frameMetadata;
ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata));
ALOGV("Reading capture request-2 %d", i);
ASSERT_EQ(OK, mFrameListener->waitForFrame(CAMERA_FRAME_TIMEOUT));
ALOGV("We got the frame now");
captureTimes.push_back(systemTime());
CpuConsumer::LockedBuffer imgBuffer;
ASSERT_EQ(OK, mCpuConsumer->lockNextBuffer(&imgBuffer));
int underexposed, overexposed;
float avgBrightness = 0;
long long brightness = TotalBrightness(imgBuffer, &underexposed,
&overexposed);
int numValidPixels = mWidth * mHeight - (underexposed + overexposed);
if (numValidPixels != 0) {
avgBrightness = brightness * 1.0f / numValidPixels;
} else if (underexposed < overexposed) {
avgBrightness = 255;
}
ALOGV("Total brightness for frame %d was %lld (underexposed %d, "
"overexposed %d), avg %f", i, brightness, underexposed,
overexposed, avgBrightness);
dout << "Average brightness (frame " << i << ") was " << avgBrightness
<< " (underexposed " << underexposed << ", overexposed "
<< overexposed << ")" << std::endl;
brightnesses.push_back(avgBrightness);
if (i != 0) {
float prevEv = static_cast<float>(expList[i - 1]) * sensitivityList[i - 1];
float currentEv = static_cast<float>(expList[i]) * sensitivityList[i];
float evRatio = (prevEv > currentEv) ? (currentEv / prevEv) :
(prevEv / currentEv);
if ( evRatio > EV_MATCH_BOUND ) {
WARN_LT(fabs(brightnesses[i] - brightnesses[i - 1]),
BRIGHTNESS_MATCH_BOUND) <<
"Capture brightness different from previous, even though "
"they have the same EV value. Ev now: " << currentEv <<
", previous: " << prevEv << ". Brightness now: " <<
brightnesses[i] << ", previous: " << brightnesses[i-1] <<
std::endl;
}
// Only check timing if not saving to disk, since that slows things
// down substantially
if (!dumpFrames) {
nsecs_t timeDelta = captureTimes[i] - captureTimes[i-1];
nsecs_t expectedDelta = expList[i] > durationList[i] ?
expList[i] : durationList[i];
WARN_LT(timeDelta, expectedDelta + DURATION_UPPER_BOUND) <<
"Capture took " << timeDelta << " ns to receive, but expected"
" frame duration was " << expectedDelta << " ns." <<
std::endl;
WARN_GT(timeDelta, expectedDelta - DURATION_LOWER_BOUND) <<
"Capture took " << timeDelta << " ns to receive, but expected"
" frame duration was " << expectedDelta << " ns." <<
std::endl;
dout << "Time delta from previous frame: " << timeDelta / 1e6 <<
" ms. Expected " << expectedDelta / 1e6 << " ms" << std::endl;
}
}
if (dumpFrames) {
String8 dumpName =
String8::format("/data/local/tmp/camera2_test_variable_burst_frame_%03d.yuv", i);
dout << " Writing YUV dump to " << dumpName << std::endl;
DumpYuvToFile(dumpName, imgBuffer);
}
ASSERT_EQ(OK, mCpuConsumer->unlockBuffer(imgBuffer));
}
} }
} }

View file

@ -23,9 +23,8 @@
#include "hardware/hardware.h" #include "hardware/hardware.h"
#include "hardware/camera2.h" #include "hardware/camera2.h"
#include "CameraDeviceBase.h" #include <common/CameraDeviceBase.h>
#include "utils/StrongPointer.h" #include <utils/StrongPointer.h>
#include <gui/CpuConsumer.h> #include <gui/CpuConsumer.h>
#include <gui/Surface.h> #include <gui/Surface.h>
@ -91,7 +90,7 @@ TEST_P(CameraFrameTest, GetFrame) {
ASSERT_EQ(OK, mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, ASSERT_EQ(OK, mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
&previewRequest)); &previewRequest));
{ {
Vector<uint8_t> outputStreamIds; Vector<int32_t> outputStreamIds;
outputStreamIds.push(mStreamId); outputStreamIds.push(mStreamId);
ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS, ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
outputStreamIds)); outputStreamIds));

View file

@ -25,7 +25,7 @@
#include "hardware/hardware.h" #include "hardware/hardware.h"
#include "hardware/camera2.h" #include "hardware/camera2.h"
#include "CameraDeviceBase.h" #include "common/CameraDeviceBase.h"
#include "utils/StrongPointer.h" #include "utils/StrongPointer.h"
#include <gui/CpuConsumer.h> #include <gui/CpuConsumer.h>
@ -134,10 +134,6 @@ TEST_F(CameraMetadataTest, types) {
TEST_F(CameraMetadataTest, RequiredFormats) { TEST_F(CameraMetadataTest, RequiredFormats) {
TEST_EXTENSION_FORKING_INIT; TEST_EXTENSION_FORKING_INIT;
EXPECT_TRUE(
HasElementInArrayFromStaticTag(ANDROID_SCALER_AVAILABLE_FORMATS,
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED));
EXPECT_TRUE( EXPECT_TRUE(
HasElementInArrayFromStaticTag(ANDROID_SCALER_AVAILABLE_FORMATS, HasElementInArrayFromStaticTag(ANDROID_SCALER_AVAILABLE_FORMATS,
HAL_PIXEL_FORMAT_BLOB)); // JPEG HAL_PIXEL_FORMAT_BLOB)); // JPEG
@ -169,9 +165,11 @@ TEST_F(CameraMetadataTest, SaneResolutions) {
// Iff there are listed raw resolutions, the format should be available // Iff there are listed raw resolutions, the format should be available
int rawResolutionsCount = int rawResolutionsCount =
GetEntryCountFromStaticTag(ANDROID_SCALER_AVAILABLE_RAW_SIZES); GetEntryCountFromStaticTag(ANDROID_SCALER_AVAILABLE_RAW_SIZES);
EXPECT_EQ(rawResolutionsCount > 0, if (rawResolutionsCount > 0) {
HasElementInArrayFromStaticTag(ANDROID_SCALER_AVAILABLE_FORMATS, EXPECT_TRUE(
HAL_PIXEL_FORMAT_RAW_SENSOR)); HasElementInArrayFromStaticTag(ANDROID_SCALER_AVAILABLE_FORMATS,
HAL_PIXEL_FORMAT_RAW_SENSOR));
}
// Required processed sizes. // Required processed sizes.
int processedSizeCount = int processedSizeCount =

View file

@ -22,8 +22,9 @@
#include "hardware/hardware.h" #include "hardware/hardware.h"
#include "hardware/camera2.h" #include "hardware/camera2.h"
#include "Camera2Device.h" #include <device2/Camera2Device.h>
#include "Camera3Device.h" #include <device3/Camera3Device.h>
#include "camera2_utils.h" #include "camera2_utils.h"
#include "TestExtensions.h" #include "TestExtensions.h"

View file

@ -19,12 +19,12 @@
#define LOG_TAG "CameraModuleTest" #define LOG_TAG "CameraModuleTest"
#define LOG_NDEBUG 0 #define LOG_NDEBUG 0
#include <utils/Log.h> #include <utils/Log.h>
#include <utils/StrongPointer.h>
#include <common/CameraDeviceBase.h>
#include "hardware/hardware.h" #include "hardware/hardware.h"
#include "hardware/camera2.h" #include "hardware/camera2.h"
#include "CameraDeviceBase.h"
#include "utils/StrongPointer.h"
#include "CameraModuleFixture.h" #include "CameraModuleFixture.h"
namespace android { namespace android {
@ -78,17 +78,19 @@ TEST_F(CameraModuleTest, LoadModuleBadIndices) {
TEST_EXTENSION_FORKING_INIT; TEST_EXTENSION_FORKING_INIT;
int idx[] = { -1, mNumberOfCameras, mNumberOfCameras + 1 }; int idx[] = { -1, mNumberOfCameras, mNumberOfCameras + 1 };
hw_device_t *device = NULL;
for (unsigned i = 0; i < sizeof(idx)/sizeof(idx[0]); ++i) { for (unsigned i = 0; i < sizeof(idx)/sizeof(idx[0]); ++i) {
// Since the initialization should fail at device open(), it doesn't String8 deviceName = String8::format("%d", idx[i]);
// matter which version of CameraNDevice is used here status_t res =
mDevice = new Camera2Device(idx[i]); mModule->common.methods->open(
status_t deviceInitializeCode = initializeDevice(idx[i]); &mModule->common,
EXPECT_NE(OK, deviceInitializeCode); deviceName,
EXPECT_EQ(-ENODEV, deviceInitializeCode) &device);
<< "Incorrect error code when trying to initialize invalid index " EXPECT_NE(OK, res);
<< idx[i]; EXPECT_EQ(-ENODEV, res)
mDevice.clear(); << "Incorrect error code when trying to open camera with invalid id "
<< deviceName;
} }
} }

View file

@ -0,0 +1,683 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "CameraMultiStreamTest"
//#define LOG_NDEBUG 0
#include "CameraStreamFixture.h"
#include "TestExtensions.h"
#include <gtest/gtest.h>
#include <utils/Log.h>
#include <utils/StrongPointer.h>
#include <common/CameraDeviceBase.h>
#include <hardware/hardware.h>
#include <hardware/camera2.h>
#include <gui/SurfaceComposerClient.h>
#include <gui/Surface.h>
#define DEFAULT_FRAME_DURATION 33000000LL // 33ms
#define CAMERA_HEAP_COUNT 1
#define CAMERA_EXPOSURE_FORMAT CAMERA_STREAM_AUTO_CPU_FORMAT
#define CAMERA_DISPLAY_FORMAT HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED
#define CAMERA_MULTI_STREAM_DEBUGGING 0
#define CAMERA_FRAME_TIMEOUT 1000000000LL // nsecs (1 secs)
#define PREVIEW_RENDERING_TIME_INTERVAL 200000 // in unit of us, 200ms
#define TOLERANCE_MARGIN 0.01 // 1% tolerance margin for exposure sanity check.
/* constants for display */
#define DISPLAY_BUFFER_HEIGHT 1024
#define DISPLAY_BUFFER_WIDTH 1024
#define DISPLAY_BUFFER_FORMAT PIXEL_FORMAT_RGB_888
// This test intends to test large preview size but less than 1080p.
#define PREVIEW_WIDTH_CAP 1920
#define PREVIEW_HEIGHT_CAP 1080
// This test intends to test small metering burst size that is less than 640x480
#define METERING_WIDTH_CAP 640
#define METERING_HEIGHT_CAP 480
#define EXP_WAIT_MULTIPLIER 2
namespace android {
namespace camera2 {
namespace tests {
static const CameraStreamParams DEFAULT_STREAM_PARAMETERS = {
/*mFormat*/ CAMERA_EXPOSURE_FORMAT,
/*mHeapCount*/ CAMERA_HEAP_COUNT
};
static const CameraStreamParams DISPLAY_STREAM_PARAMETERS = {
/*mFormat*/ CAMERA_DISPLAY_FORMAT,
/*mHeapCount*/ CAMERA_HEAP_COUNT
};
class CameraMultiStreamTest
: public ::testing::Test,
public CameraStreamFixture {
public:
CameraMultiStreamTest() : CameraStreamFixture(DEFAULT_STREAM_PARAMETERS) {
TEST_EXTENSION_FORKING_CONSTRUCTOR;
if (HasFatalFailure()) {
return;
}
/**
* Don't create default stream, each test is in charge of creating
* its own streams.
*/
}
~CameraMultiStreamTest() {
TEST_EXTENSION_FORKING_DESTRUCTOR;
}
sp<SurfaceComposerClient> mComposerClient;
sp<SurfaceControl> mSurfaceControl;
void CreateOnScreenSurface(sp<ANativeWindow>& surface) {
mComposerClient = new SurfaceComposerClient;
ASSERT_EQ(NO_ERROR, mComposerClient->initCheck());
mSurfaceControl = mComposerClient->createSurface(
String8("CameraMultiStreamTest StreamingImage Surface"),
DISPLAY_BUFFER_HEIGHT, DISPLAY_BUFFER_WIDTH,
DISPLAY_BUFFER_FORMAT, 0);
ASSERT_NE((void*)NULL, mSurfaceControl.get());
ASSERT_TRUE(mSurfaceControl->isValid());
SurfaceComposerClient::openGlobalTransaction();
ASSERT_EQ(NO_ERROR, mSurfaceControl->setLayer(0x7FFFFFFF));
ASSERT_EQ(NO_ERROR, mSurfaceControl->show());
SurfaceComposerClient::closeGlobalTransaction();
surface = mSurfaceControl->getSurface();
ASSERT_NE((void*)NULL, surface.get());
}
struct Size {
int32_t width;
int32_t height;
};
// Select minimal size by number of pixels.
void GetMinSize(const int32_t* data, size_t count,
Size* min, int32_t* idx) {
ASSERT_NE((int32_t*)NULL, data);
int32_t minIdx = 0;
int32_t minSize = INT_MAX, tempSize;
for (size_t i = 0; i < count; i+=2) {
tempSize = data[i] * data[i+1];
if (minSize > tempSize) {
minSize = tempSize;
minIdx = i;
}
}
min->width = data[minIdx];
min->height = data[minIdx + 1];
*idx = minIdx;
}
// Select maximal size by number of pixels.
void GetMaxSize(const int32_t* data, size_t count,
Size* max, int32_t* idx) {
ASSERT_NE((int32_t*)NULL, data);
int32_t maxIdx = 0;
int32_t maxSize = INT_MIN, tempSize;
for (size_t i = 0; i < count; i+=2) {
tempSize = data[i] * data[i+1];
if (maxSize < tempSize) {
maxSize = tempSize;
maxIdx = i;
}
}
max->width = data[maxIdx];
max->height = data[maxIdx + 1];
*idx = maxIdx;
}
// Cap size by number of pixels.
Size CapSize(Size cap, Size input) {
if (input.width * input.height > cap.width * cap.height) {
return cap;
}
return input;
}
struct CameraStream : public RefBase {
public:
/**
* Only initialize the variables here, do the ASSERT check in
* SetUp function. To make this stream useful, the SetUp must
* be called before using it.
*/
CameraStream(
int width,
int height,
const sp<CameraDeviceBase>& device,
CameraStreamParams param, sp<ANativeWindow> surface,
bool useCpuConsumer)
: mDevice(device),
mWidth(width),
mHeight(height) {
mFormat = param.mFormat;
if (useCpuConsumer) {
sp<BufferQueue> bq = new BufferQueue();
mCpuConsumer = new CpuConsumer(bq, param.mHeapCount);
mCpuConsumer->setName(String8(
"CameraMultiStreamTest::mCpuConsumer"));
mNativeWindow = new Surface(bq);
} else {
// Render the stream to screen.
mCpuConsumer = NULL;
mNativeWindow = surface;
}
mFrameListener = new FrameListener();
if (mCpuConsumer != 0) {
mCpuConsumer->setFrameAvailableListener(mFrameListener);
}
}
/**
* Finally create camera stream, and do the ASSERT check, since we
* can not do it in ctor.
*/
void SetUp() {
ASSERT_EQ(OK,
mDevice->createStream(mNativeWindow,
mWidth, mHeight, mFormat, /*size (for jpegs)*/0,
&mStreamId));
ASSERT_NE(-1, mStreamId);
}
int GetStreamId() { return mStreamId; }
sp<CpuConsumer> GetConsumer() { return mCpuConsumer; }
sp<FrameListener> GetFrameListener() { return mFrameListener; }
protected:
~CameraStream() {
if (mDevice.get()) {
mDevice->waitUntilDrained();
mDevice->deleteStream(mStreamId);
}
// Clear producer before consumer.
mNativeWindow.clear();
mCpuConsumer.clear();
}
private:
sp<FrameListener> mFrameListener;
sp<CpuConsumer> mCpuConsumer;
sp<ANativeWindow> mNativeWindow;
sp<CameraDeviceBase> mDevice;
int mStreamId;
int mWidth;
int mHeight;
int mFormat;
};
int64_t GetExposureValue(const CameraMetadata& metaData) {
camera_metadata_ro_entry_t entry =
metaData.find(ANDROID_SENSOR_EXPOSURE_TIME);
EXPECT_EQ(1u, entry.count);
if (entry.count == 1) {
return entry.data.i64[0];
}
return -1;
}
int32_t GetSensitivity(const CameraMetadata& metaData) {
camera_metadata_ro_entry_t entry =
metaData.find(ANDROID_SENSOR_SENSITIVITY);
EXPECT_EQ(1u, entry.count);
if (entry.count == 1) {
return entry.data.i32[0];
}
return -1;
}
int64_t GetFrameDuration(const CameraMetadata& metaData) {
camera_metadata_ro_entry_t entry =
metaData.find(ANDROID_SENSOR_FRAME_DURATION);
EXPECT_EQ(1u, entry.count);
if (entry.count == 1) {
return entry.data.i64[0];
}
return -1;
}
void CreateRequests(CameraMetadata& previewRequest,
CameraMetadata& meteringRequest,
CameraMetadata& captureRequest,
int previewStreamId,
int meteringStreamId,
int captureStreamId) {
int32_t requestId = 0;
Vector<int32_t> previewStreamIds;
previewStreamIds.push(previewStreamId);
ASSERT_EQ(OK, mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
&previewRequest));
ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
previewStreamIds));
ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_ID,
&requestId, 1));
// Create metering request, manual settings
// Manual control: Disable 3A, noise reduction, edge sharping
uint8_t cmOff = static_cast<uint8_t>(ANDROID_CONTROL_MODE_OFF);
uint8_t nrOff = static_cast<uint8_t>(ANDROID_NOISE_REDUCTION_MODE_OFF);
uint8_t sharpOff = static_cast<uint8_t>(ANDROID_EDGE_MODE_OFF);
Vector<int32_t> meteringStreamIds;
meteringStreamIds.push(meteringStreamId);
ASSERT_EQ(OK, mDevice->createDefaultRequest(
CAMERA2_TEMPLATE_PREVIEW,
&meteringRequest));
ASSERT_EQ(OK, meteringRequest.update(
ANDROID_REQUEST_OUTPUT_STREAMS,
meteringStreamIds));
ASSERT_EQ(OK, meteringRequest.update(
ANDROID_CONTROL_MODE,
&cmOff, 1));
ASSERT_EQ(OK, meteringRequest.update(
ANDROID_NOISE_REDUCTION_MODE,
&nrOff, 1));
ASSERT_EQ(OK, meteringRequest.update(
ANDROID_EDGE_MODE,
&sharpOff, 1));
// Create capture request, manual settings
Vector<int32_t> captureStreamIds;
captureStreamIds.push(captureStreamId);
ASSERT_EQ(OK, mDevice->createDefaultRequest(
CAMERA2_TEMPLATE_PREVIEW,
&captureRequest));
ASSERT_EQ(OK, captureRequest.update(
ANDROID_REQUEST_OUTPUT_STREAMS,
captureStreamIds));
ASSERT_EQ(OK, captureRequest.update(
ANDROID_CONTROL_MODE,
&cmOff, 1));
ASSERT_EQ(OK, captureRequest.update(
ANDROID_NOISE_REDUCTION_MODE,
&nrOff, 1));
ASSERT_EQ(OK, captureRequest.update(
ANDROID_EDGE_MODE,
&sharpOff, 1));
}
sp<CameraStream> CreateStream(
int width,
int height,
const sp<CameraDeviceBase>& device,
CameraStreamParams param = DEFAULT_STREAM_PARAMETERS,
sp<ANativeWindow> surface = NULL,
bool useCpuConsumer = true) {
param.mFormat = MapAutoFormat(param.mFormat);
return new CameraStream(width, height, device,
param, surface, useCpuConsumer);
}
void CaptureBurst(CameraMetadata& request, size_t requestCount,
const Vector<int64_t>& exposures,
const Vector<int32_t>& sensitivities,
const sp<CameraStream>& stream,
int64_t minFrameDuration,
int32_t* requestIdStart) {
ASSERT_EQ(OK, request.update(ANDROID_SENSOR_FRAME_DURATION,
&minFrameDuration, 1));
// Submit a series of requests with the specified exposure/gain values.
int32_t targetRequestId = *requestIdStart;
for (size_t i = 0; i < requestCount; i++) {
ASSERT_EQ(OK, request.update(ANDROID_REQUEST_ID, requestIdStart, 1));
ASSERT_EQ(OK, request.update(ANDROID_SENSOR_EXPOSURE_TIME, &exposures[i], 1));
ASSERT_EQ(OK, request.update(ANDROID_SENSOR_SENSITIVITY, &sensitivities[i], 1));
ASSERT_EQ(OK, mDevice->capture(request));
ALOGV("Submitting request with: id %d with exposure %lld, sensitivity %d",
*requestIdStart, exposures[i], sensitivities[i]);
if (CAMERA_MULTI_STREAM_DEBUGGING) {
request.dump(STDOUT_FILENO);
}
(*requestIdStart)++;
}
// Get capture burst results.
Vector<nsecs_t> captureBurstTimes;
sp<CpuConsumer> consumer = stream->GetConsumer();
sp<FrameListener> listener = stream->GetFrameListener();
// Set wait limit based on expected frame duration.
int64_t waitLimit = CAMERA_FRAME_TIMEOUT;
for (size_t i = 0; i < requestCount; i++) {
ALOGV("Reading request result %d", i);
/**
* Raise the timeout to be at least twice as long as the exposure
* time. to avoid a false positive when the timeout is too short.
*/
if ((exposures[i] * EXP_WAIT_MULTIPLIER) > waitLimit) {
waitLimit = exposures[i] * EXP_WAIT_MULTIPLIER;
}
CameraMetadata frameMetadata;
int32_t resultRequestId;
do {
ASSERT_EQ(OK, mDevice->waitForNextFrame(waitLimit));
ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata));
camera_metadata_entry_t resultEntry = frameMetadata.find(ANDROID_REQUEST_ID);
ASSERT_EQ(1u, resultEntry.count);
resultRequestId = resultEntry.data.i32[0];
if (CAMERA_MULTI_STREAM_DEBUGGING) {
std::cout << "capture result req id: " << resultRequestId << std::endl;
}
} while (resultRequestId != targetRequestId);
targetRequestId++;
ALOGV("Got capture burst result for request %d", i);
// Validate capture result
if (CAMERA_MULTI_STREAM_DEBUGGING) {
frameMetadata.dump(STDOUT_FILENO);
}
// TODO: Need revisit it to figure out an accurate margin.
int64_t resultExposure = GetExposureValue(frameMetadata);
int32_t resultSensitivity = GetSensitivity(frameMetadata);
EXPECT_LE(sensitivities[i] * (1.0 - TOLERANCE_MARGIN), resultSensitivity);
EXPECT_GE(sensitivities[i] * (1.0 + TOLERANCE_MARGIN), resultSensitivity);
EXPECT_LE(exposures[i] * (1.0 - TOLERANCE_MARGIN), resultExposure);
EXPECT_GE(exposures[i] * (1.0 + TOLERANCE_MARGIN), resultExposure);
ASSERT_EQ(OK, listener->waitForFrame(waitLimit));
captureBurstTimes.push_back(systemTime());
CpuConsumer::LockedBuffer imgBuffer;
ASSERT_EQ(OK, consumer->lockNextBuffer(&imgBuffer));
ALOGV("Got capture buffer for request %d", i);
/**
* TODO: Validate capture buffer. Current brightness calculation
* is too slow, it also doesn't account for saturation effects,
* which is quite common since we are going over a significant
* range of EVs. we need figure out some reliable way to validate
* buffer data.
*/
ASSERT_EQ(OK, consumer->unlockBuffer(imgBuffer));
if (i > 0) {
nsecs_t timeDelta =
captureBurstTimes[i] - captureBurstTimes[i-1];
EXPECT_GE(timeDelta, exposures[i]);
}
}
}
/**
* Intentionally shadow default CreateStream function from base class,
* because we don't want any test in this class to use the default
* stream creation function.
*/
void CreateStream() {
}
};
/**
* This test adds multiple stream use case test, basically, test 3
* streams:
*
* 1. Preview stream, with large size that is no bigger than 1080p
* we render this stream to display and vary the exposure time for
* for certain amount of time for visualization purpose.
*
* 2. Metering stream, with small size that is no bigger than VGA size.
* a burst is issued for different exposure times and analog gains
* (or analog gain implemented sensitivities) then check if the capture
* result metadata matches the request.
*
* 3. Capture stream, this is basically similar as meterting stream, but
* has large size, which is the largest supported JPEG capture size.
*
* This multiple stream test is to test if HAL supports:
*
* 1. Multiple streams like above, HAL should support at least 3 streams
* concurrently: one preview stream, 2 other YUV stream.
*
* 2. Manual control(gain/exposure) of mutiple burst capture.
*/
TEST_F(CameraMultiStreamTest, MultiBurst) {
TEST_EXTENSION_FORKING_INIT;
camera_metadata_ro_entry availableProcessedSizes =
GetStaticEntry(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
ASSERT_EQ(0u, availableProcessedSizes.count % 2);
ASSERT_GE(availableProcessedSizes.count, 2u);
camera_metadata_ro_entry availableProcessedMinFrameDurations =
GetStaticEntry(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS);
EXPECT_EQ(availableProcessedSizes.count,
availableProcessedMinFrameDurations.count * 2);
camera_metadata_ro_entry availableJpegSizes =
GetStaticEntry(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
ASSERT_EQ(0u, availableJpegSizes.count % 2);
ASSERT_GE(availableJpegSizes.count, 2u);
camera_metadata_ro_entry hardwareLevel =
GetStaticEntry(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
ASSERT_EQ(1u, hardwareLevel.count);
uint8_t level = hardwareLevel.data.u8[0];
ASSERT_GE(level, ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED);
ASSERT_LE(level, ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL);
if (level == ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::cerr << "Skipping test "
<< test_info->test_case_name() << "."
<< test_info->name()
<< " because HAL hardware supported level is limited "
<< std::endl;
return;
}
// Find the right sizes for preview, metering, and capture streams
// assumes at least 2 entries in availableProcessedSizes.
int64_t minFrameDuration = DEFAULT_FRAME_DURATION;
Size processedMinSize, processedMaxSize, jpegMaxSize;
const int32_t* data = availableProcessedSizes.data.i32;
size_t count = availableProcessedSizes.count;
int32_t minIdx, maxIdx;
GetMinSize(data, count, &processedMinSize, &minIdx);
GetMaxSize(data, count, &processedMaxSize, &maxIdx);
ALOGV("Found processed max size: %dx%d, min size = %dx%d",
processedMaxSize.width, processedMaxSize.height,
processedMinSize.width, processedMinSize.height);
if (availableProcessedSizes.count ==
availableProcessedMinFrameDurations.count * 2) {
minFrameDuration =
availableProcessedMinFrameDurations.data.i64[maxIdx / 2];
}
EXPECT_GT(minFrameDuration, 0);
if (minFrameDuration <= 0) {
minFrameDuration = DEFAULT_FRAME_DURATION;
}
ALOGV("targeted minimal frame duration is: %lldns", minFrameDuration);
data = &(availableJpegSizes.data.i32[0]);
count = availableJpegSizes.count;
GetMaxSize(data, count, &jpegMaxSize, &maxIdx);
ALOGV("Found Jpeg size max idx = %d", maxIdx);
// Max Jpeg size should be available in processed sizes. Use it for
// YUV capture anyway.
EXPECT_EQ(processedMaxSize.width, jpegMaxSize.width);
EXPECT_EQ(processedMaxSize.height, jpegMaxSize.height);
// Cap preview size.
Size previewLimit = { PREVIEW_WIDTH_CAP, PREVIEW_HEIGHT_CAP };
// FIXME: need make sure the previewLimit is supported by HAL.
Size previewSize = CapSize(previewLimit, processedMaxSize);
// Cap Metering size.
Size meteringLimit = { METERING_WIDTH_CAP, METERING_HEIGHT_CAP };
// Cap metering size to VGA (VGA is mandatory by CDD)
Size meteringSize = CapSize(meteringLimit, processedMinSize);
// Capture stream should be the max size of jpeg sizes.
ALOGV("preview size: %dx%d, metering size: %dx%d, capture size: %dx%d",
previewSize.width, previewSize.height,
meteringSize.width, meteringSize.height,
jpegMaxSize.width, jpegMaxSize.height);
// Create streams
// Preview stream: small resolution, render on the screen.
sp<CameraStream> previewStream;
{
sp<ANativeWindow> surface;
ASSERT_NO_FATAL_FAILURE(CreateOnScreenSurface(/*out*/surface));
previewStream = CreateStream(
previewSize.width,
previewSize.height,
mDevice,
DISPLAY_STREAM_PARAMETERS,
surface,
false);
ASSERT_NE((void*)NULL, previewStream.get());
ASSERT_NO_FATAL_FAILURE(previewStream->SetUp());
}
// Metering burst stream: small resolution yuv stream
sp<CameraStream> meteringStream =
CreateStream(
meteringSize.width,
meteringSize.height,
mDevice);
ASSERT_NE((void*)NULL, meteringStream.get());
ASSERT_NO_FATAL_FAILURE(meteringStream->SetUp());
// Capture burst stream: full resolution yuv stream
sp<CameraStream> captureStream =
CreateStream(
jpegMaxSize.width,
jpegMaxSize.height,
mDevice);
ASSERT_NE((void*)NULL, captureStream.get());
ASSERT_NO_FATAL_FAILURE(captureStream->SetUp());
// Create Preview request.
CameraMetadata previewRequest, meteringRequest, captureRequest;
ASSERT_NO_FATAL_FAILURE(CreateRequests(previewRequest, meteringRequest,
captureRequest, previewStream->GetStreamId(),
meteringStream->GetStreamId(), captureStream->GetStreamId()));
// Start preview
if (CAMERA_MULTI_STREAM_DEBUGGING) {
previewRequest.dump(STDOUT_FILENO);
}
// Generate exposure and sensitivity lists
camera_metadata_ro_entry exposureTimeRange =
GetStaticEntry(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE);
ASSERT_EQ(exposureTimeRange.count, 2u);
int64_t minExp = exposureTimeRange.data.i64[0];
int64_t maxExp = exposureTimeRange.data.i64[1];
ASSERT_GT(maxExp, minExp);
camera_metadata_ro_entry sensivityRange =
GetStaticEntry(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE);
ASSERT_EQ(2u, sensivityRange.count);
int32_t minSensitivity = sensivityRange.data.i32[0];
int32_t maxSensitivity = sensivityRange.data.i32[1];
camera_metadata_ro_entry maxAnalogSenEntry =
GetStaticEntry(ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY);
EXPECT_EQ(1u, maxAnalogSenEntry.count);
int32_t maxAnalogSensitivity = maxAnalogSenEntry.data.i32[0];
EXPECT_LE(maxAnalogSensitivity, maxSensitivity);
// Only test the sensitivity implemented by analog gain.
if (maxAnalogSensitivity > maxSensitivity) {
// Fallback to maxSensitity
maxAnalogSensitivity = maxSensitivity;
}
// sensitivity list, only include the sensitivities that are implemented
// purely by analog gain if possible.
Vector<int32_t> sensitivities;
Vector<int64_t> exposures;
count = (maxAnalogSensitivity - minSensitivity + 99) / 100;
sensitivities.push_back(minSensitivity);
for (size_t i = 1; i < count; i++) {
sensitivities.push_back(minSensitivity + i * 100);
}
sensitivities.push_back(maxAnalogSensitivity);
ALOGV("Sensitivity Range: min=%d, max=%d", minSensitivity,
maxAnalogSensitivity);
int64_t exp = minExp;
while (exp < maxExp) {
exposures.push_back(exp);
exp *= 2;
}
// Sweep the exposure value for preview, just for visual inspection purpose.
uint8_t cmOff = static_cast<uint8_t>(ANDROID_CONTROL_MODE_OFF);
for (size_t i = 0; i < exposures.size(); i++) {
ASSERT_EQ(OK, previewRequest.update(
ANDROID_CONTROL_MODE,
&cmOff, 1));
ASSERT_EQ(OK, previewRequest.update(
ANDROID_SENSOR_EXPOSURE_TIME,
&exposures[i], 1));
ALOGV("Submitting preview request %d with exposure %lld",
i, exposures[i]);
ASSERT_EQ(OK, mDevice->setStreamingRequest(previewRequest));
// Let preview run 200ms on screen for each exposure time.
usleep(PREVIEW_RENDERING_TIME_INTERVAL);
}
size_t requestCount = sensitivities.size();
if (requestCount > exposures.size()) {
requestCount = exposures.size();
}
// To maintain the request id uniqueness (preview request id is 0), make burst capture start
// request id 1 here.
int32_t requestIdStart = 1;
/**
* Submit metering request, set default frame duration to minimal possible
* value, we want the capture to run as fast as possible. HAL should adjust
* the frame duration to minimal necessary value to support the requested
* exposure value if exposure is larger than frame duration.
*/
CaptureBurst(meteringRequest, requestCount, exposures, sensitivities,
meteringStream, minFrameDuration, &requestIdStart);
/**
* Submit capture request, set default frame duration to minimal possible
* value, we want the capture to run as fast as possible. HAL should adjust
* the frame duration to minimal necessary value to support the requested
* exposure value if exposure is larger than frame duration.
*/
CaptureBurst(captureRequest, requestCount, exposures, sensitivities,
captureStream, minFrameDuration, &requestIdStart);
ASSERT_EQ(OK, mDevice->clearStreamingRequest());
}
}
}
}

View file

@ -19,6 +19,7 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <iostream> #include <iostream>
#include <fstream>
#include <gui/CpuConsumer.h> #include <gui/CpuConsumer.h>
#include <gui/Surface.h> #include <gui/Surface.h>
@ -29,6 +30,8 @@
#include "CameraModuleFixture.h" #include "CameraModuleFixture.h"
#include "TestExtensions.h" #include "TestExtensions.h"
#define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) )
namespace android { namespace android {
namespace camera2 { namespace camera2 {
namespace tests { namespace tests {
@ -90,7 +93,6 @@ private:
CameraModuleFixture::SetUp(); CameraModuleFixture::SetUp();
CameraStreamParams p = mParam;
sp<CameraDeviceBase> device = mDevice; sp<CameraDeviceBase> device = mDevice;
/* use an arbitrary w,h */ /* use an arbitrary w,h */
@ -159,11 +161,11 @@ protected:
sp<CameraDeviceBase> device = mDevice; sp<CameraDeviceBase> device = mDevice;
CameraStreamParams p = mParam; CameraStreamParams p = mParam;
mCpuConsumer = new CpuConsumer(p.mHeapCount); sp<BufferQueue> bq = new BufferQueue();
mCpuConsumer = new CpuConsumer(bq, p.mHeapCount);
mCpuConsumer->setName(String8("CameraStreamTest::mCpuConsumer")); mCpuConsumer->setName(String8("CameraStreamTest::mCpuConsumer"));
mNativeWindow = new Surface( mNativeWindow = new Surface(bq);
mCpuConsumer->getProducerInterface());
int format = MapAutoFormat(p.mFormat); int format = MapAutoFormat(p.mFormat);
@ -194,6 +196,80 @@ protected:
return format; return format;
} }
void DumpYuvToFile(const String8 &fileName, const CpuConsumer::LockedBuffer &img) {
uint8_t *dataCb, *dataCr;
uint32_t stride;
uint32_t chromaStride;
uint32_t chromaStep;
switch (img.format) {
case HAL_PIXEL_FORMAT_YCbCr_420_888:
stride = img.stride;
chromaStride = img.chromaStride;
chromaStep = img.chromaStep;
dataCb = img.dataCb;
dataCr = img.dataCr;
break;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
stride = img.width;
chromaStride = img.width;
chromaStep = 2;
dataCr = img.data + img.width * img.height;
dataCb = dataCr + 1;
break;
case HAL_PIXEL_FORMAT_YV12:
stride = img.stride;
chromaStride = ALIGN(img.width / 2, 16);
chromaStep = 1;
dataCr = img.data + img.stride * img.height;
dataCb = dataCr + chromaStride * img.height/2;
break;
default:
ALOGE("Unknown format %d, not dumping", img.format);
return;
}
// Write Y
FILE *yuvFile = fopen(fileName.string(), "w");
size_t bytes;
for (size_t y = 0; y < img.height; ++y) {
bytes = fwrite(
reinterpret_cast<const char*>(img.data + stride * y),
1, img.width, yuvFile);
if (bytes != img.width) {
ALOGE("Unable to write to file %s", fileName.string());
fclose(yuvFile);
return;
}
}
// Write Cb/Cr
uint8_t *src = dataCb;
for (int c = 0; c < 2; ++c) {
for (size_t y = 0; y < img.height / 2; ++y) {
uint8_t *px = src + y * chromaStride;
if (chromaStep != 1) {
for (size_t x = 0; x < img.width / 2; ++x) {
fputc(*px, yuvFile);
px += chromaStep;
}
} else {
bytes = fwrite(reinterpret_cast<const char*>(px),
1, img.width / 2, yuvFile);
if (bytes != img.width / 2) {
ALOGE("Unable to write to file %s", fileName.string());
fclose(yuvFile);
return;
}
}
}
src = dataCr;
}
fclose(yuvFile);
}
int mWidth; int mWidth;
int mHeight; int mHeight;

View file

@ -25,12 +25,12 @@
#include "hardware/hardware.h" #include "hardware/hardware.h"
#include "hardware/camera2.h" #include "hardware/camera2.h"
#include "Camera2Device.h" #include <utils/StrongPointer.h>
#include "utils/StrongPointer.h"
#include <gui/CpuConsumer.h> #include <gui/CpuConsumer.h>
#include <gui/Surface.h> #include <gui/Surface.h>
#include <device2/Camera2Device.h>
#include "CameraStreamFixture.h" #include "CameraStreamFixture.h"
#include "TestExtensions.h" #include "TestExtensions.h"

View file

@ -201,7 +201,8 @@ class Camera2Test: public testing::Test {
if (mDevice != NULL) { if (mDevice != NULL) {
closeCameraDevice(&mDevice); closeCameraDevice(&mDevice);
} }
mDevice = openCameraDevice(id); mId = id;
mDevice = openCameraDevice(mId);
ASSERT_TRUE(NULL != mDevice) << "Failed to open camera device"; ASSERT_TRUE(NULL != mDevice) << "Failed to open camera device";
camera_info info; camera_info info;
@ -334,6 +335,7 @@ class Camera2Test: public testing::Test {
TearDownModule(); TearDownModule();
} }
int mId;
camera2_device *mDevice; camera2_device *mDevice;
const camera_metadata_t *mStaticInfo; const camera_metadata_t *mStaticInfo;
@ -386,7 +388,8 @@ TEST_F(Camera2Test, Capture1Raw) {
ASSERT_NO_FATAL_FAILURE(setUpCamera(id)); ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
sp<CpuConsumer> rawConsumer = new CpuConsumer(1); sp<BufferQueue> bq = new BufferQueue();
sp<CpuConsumer> rawConsumer = new CpuConsumer(bq, 1);
sp<FrameWaiter> rawWaiter = new FrameWaiter(); sp<FrameWaiter> rawWaiter = new FrameWaiter();
rawConsumer->setFrameAvailableListener(rawWaiter); rawConsumer->setFrameAvailableListener(rawWaiter);
@ -417,8 +420,7 @@ TEST_F(Camera2Test, Capture1Raw) {
int streamId; int streamId;
ASSERT_NO_FATAL_FAILURE( ASSERT_NO_FATAL_FAILURE(
setUpStream(rawConsumer->getProducerInterface(), setUpStream(bq, width, height, format, &streamId) );
width, height, format, &streamId) );
camera_metadata_t *request; camera_metadata_t *request;
request = allocate_camera_metadata(20, 2000); request = allocate_camera_metadata(20, 2000);
@ -520,7 +522,8 @@ TEST_F(Camera2Test, CaptureBurstRaw) {
ASSERT_NO_FATAL_FAILURE(setUpCamera(id)); ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
sp<CpuConsumer> rawConsumer = new CpuConsumer(1); sp<BufferQueue> bq = new BufferQueue();
sp<CpuConsumer> rawConsumer = new CpuConsumer(bq, 1);
sp<FrameWaiter> rawWaiter = new FrameWaiter(); sp<FrameWaiter> rawWaiter = new FrameWaiter();
rawConsumer->setFrameAvailableListener(rawWaiter); rawConsumer->setFrameAvailableListener(rawWaiter);
@ -551,8 +554,7 @@ TEST_F(Camera2Test, CaptureBurstRaw) {
int streamId; int streamId;
ASSERT_NO_FATAL_FAILURE( ASSERT_NO_FATAL_FAILURE(
setUpStream(rawConsumer->getProducerInterface(), setUpStream(bq, width, height, format, &streamId) );
width, height, format, &streamId) );
camera_metadata_t *request; camera_metadata_t *request;
request = allocate_camera_metadata(20, 2000); request = allocate_camera_metadata(20, 2000);
@ -701,7 +703,8 @@ TEST_F(Camera2Test, Capture1Jpeg) {
ASSERT_NO_FATAL_FAILURE(setUpCamera(id)); ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
sp<CpuConsumer> jpegConsumer = new CpuConsumer(1); sp<BufferQueue> bq = new BufferQueue();
sp<CpuConsumer> jpegConsumer = new CpuConsumer(bq, 1);
sp<FrameWaiter> jpegWaiter = new FrameWaiter(); sp<FrameWaiter> jpegWaiter = new FrameWaiter();
jpegConsumer->setFrameAvailableListener(jpegWaiter); jpegConsumer->setFrameAvailableListener(jpegWaiter);
@ -720,8 +723,7 @@ TEST_F(Camera2Test, Capture1Jpeg) {
int streamId; int streamId;
ASSERT_NO_FATAL_FAILURE( ASSERT_NO_FATAL_FAILURE(
setUpStream(jpegConsumer->getProducerInterface(), setUpStream(bq, width, height, format, &streamId) );
width, height, format, &streamId) );
camera_metadata_t *request; camera_metadata_t *request;
request = allocate_camera_metadata(20, 2000); request = allocate_camera_metadata(20, 2000);

View file

@ -210,7 +210,6 @@ int MetadataQueue::consumer_dequeue(const camera2_request_queue_src_ops_t *q,
int MetadataQueue::consumer_free(const camera2_request_queue_src_ops_t *q, int MetadataQueue::consumer_free(const camera2_request_queue_src_ops_t *q,
camera_metadata_t *old_buffer) { camera_metadata_t *old_buffer) {
MetadataQueue *queue = getInstance(q);
free_camera_metadata(old_buffer); free_camera_metadata(old_buffer);
return OK; return OK;
} }

14
tests/hwc/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := libcnativewindow
LOCAL_SRC_FILES := cnativewindow.c util.c
include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE := hwc-test-arrows
LOCAL_SRC_FILES := test-arrows.c
LOCAL_STATIC_LIBRARIES := libcnativewindow
LOCAL_SHARED_LIBRARIES := libEGL libGLESv2 libdl libhardware
LOCAL_CFLAGS := -DGL_GLEXT_PROTOTYPES
include $(BUILD_EXECUTABLE)

578
tests/hwc/cnativewindow.c Normal file
View file

@ -0,0 +1,578 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <pthread.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
#include <hardware/hwcomposer.h>
#include <system/window.h>
#include <cutils/native_handle.h>
// normalize and shorten type names
typedef struct android_native_base_t aBase;
typedef struct ANativeWindowBuffer aBuffer;
typedef struct ANativeWindow aWindow;
static int trace_level = 1;
#define _TRACE(n,fmt...) \
do { if (trace_level >= n) fprintf(stderr, "CNW: " fmt); } while (0)
#define ERROR(fmt...) _TRACE(0, fmt)
#define INFO(fmt...) _TRACE(1, fmt)
#define LOG(fmt...) _TRACE(2, fmt)
#define TRACE(fmt...) _TRACE(3, fmt)
#define QCT_WORKAROUND 1
typedef struct CNativeBuffer {
aBuffer base;
struct CNativeBuffer *next;
struct CNativeBuffer *prev;
int ffd;
} CNativeBuffer;
typedef struct CNativeWindow {
aWindow base;
hwc_composer_device_1_t *hwc;
framebuffer_device_t *fb;
alloc_device_t *gr;
pthread_mutex_t lock;
pthread_cond_t cvar;
aBuffer *front;
aBuffer *spare;
CNativeBuffer free_buffer_queue;
unsigned width;
unsigned height;
unsigned xdpi;
unsigned ydpi;
unsigned format;
hwc_display_contents_1_t *dclist[HWC_NUM_PHYSICAL_DISPLAY_TYPES];
hwc_display_contents_1_t dc;
hwc_layer_1_t layer[4];
} CNativeWindow;
static inline CNativeBuffer *from_abuffer(aBuffer *buf) {
return (CNativeBuffer*) buf;
}
static CNativeBuffer *get_front(struct CNativeBuffer *queue) {
CNativeBuffer *buf = queue->next;
if (buf == queue)
return 0;
buf->next->prev = queue;
queue->next = buf->next;
buf->next = buf->prev = 0;
return buf;
}
static void put_front(struct CNativeBuffer *queue, aBuffer *_buf) {
struct CNativeBuffer *buf = (struct CNativeBuffer *) _buf;
buf->prev = queue;
buf->next = queue->next;
queue->next->prev = buf;
queue->next = buf;
}
static void put_back(struct CNativeBuffer *queue, aBuffer *_buf) {
struct CNativeBuffer *buf = (struct CNativeBuffer *) _buf;
buf->next = queue;
buf->prev = queue->prev;
queue->prev->next = buf;
queue->prev = buf;
}
static void cnw_inc_ref(aBase *base) { TRACE("buf %p ref++\n",base); }
static void cnw_dec_ref(aBase *base) { TRACE("buf %p ref--\n",base); }
static inline CNativeWindow *from_base(aWindow *base) {
return (CNativeWindow *) base;
}
static inline CNativeWindow *from_base_const(const aWindow *base) {
return (CNativeWindow *) base;
}
static int cnw_set_swap_interval(aWindow *base, int interval) {
CNativeWindow *win = from_base(base);
if (win->fb && win->fb->setSwapInterval)
return win->fb->setSwapInterval(win->fb, interval);
return 0;
}
static int cnw_dequeue_buffer1(aWindow *base, aBuffer **buf, int *ffd) {
CNativeWindow *win = from_base(base);
CNativeBuffer *cnb;
pthread_mutex_lock(&win->lock);
while ((cnb = get_front(&win->free_buffer_queue)) == 0) {
pthread_cond_wait(&win->cvar, &win->lock);
}
*ffd = cnb->ffd;
*buf = &cnb->base;
cnb->ffd = -1;
LOG("<< dequeue buffer %p %d\n", *buf, *ffd);
pthread_mutex_unlock(&win->lock);
return 0;
}
static int cnw_lock_buffer0(aWindow *base, aBuffer *buffer) {
return 0;
}
static void set_layer(hwc_layer_1_t *dl, aBuffer *buf, int ffd) {
int right = buf->width;
int bottom = buf->height;
dl->compositionType = HWC_FRAMEBUFFER;
dl->hints = 0;
dl->flags = 0;
dl->handle = buf->handle;
dl->transform = 0;
dl->blending = HWC_BLENDING_NONE;
dl->sourceCrop.left = 0;
dl->sourceCrop.top = 0;
dl->sourceCrop.right = right;
dl->sourceCrop.bottom = bottom;
dl->displayFrame.left = 0;
dl->displayFrame.top = 0;
dl->displayFrame.right = right;
dl->displayFrame.bottom = bottom;
dl->visibleRegionScreen.numRects = 1;
dl->visibleRegionScreen.rects = &dl->displayFrame;
dl->acquireFenceFd = ffd;
dl->releaseFenceFd = -1;
}
static void hwc_post(CNativeWindow *win, aBuffer *buf, int ffd) {
hwc_composer_device_1_t *hwc = win->hwc;
hwc_display_contents_1_t *dc = &(win->dc);
hwc_layer_1_t *dl = win->dc.hwLayers;
int r, i;
dc->retireFenceFd = -1;
dc->outbufAcquireFenceFd = -1;
dc->flags = HWC_GEOMETRY_CHANGED;
dc->numHwLayers = 1;
// some hwcomposers fail if these are NULL
dc->dpy = (void*) 0xdeadbeef;
dc->sur = (void*) 0xdeadbeef;
set_layer(&dl[0], buf, ffd);
if (QCT_WORKAROUND) {
set_layer(&dl[1], win->spare, -1);
dl[1].compositionType = HWC_FRAMEBUFFER_TARGET;
dc->numHwLayers++;
}
r = hwc->prepare(hwc, HWC_NUM_PHYSICAL_DISPLAY_TYPES, win->dclist);
if (r) {
ERROR("hwc->prepare failed r=%d\n",r);
return;
}
// for (i = 0; i < dc->numHwLayers; i++)
// LOG("dl[%d] ctype=0x%08x hints=0x%08x flags=0x%08x\n", i,
// dl[i].compositionType, dl[0].hints, dl[0].flags);
r = hwc->set(hwc, HWC_NUM_PHYSICAL_DISPLAY_TYPES, win->dclist);
if (r) {
ERROR("hwc->set failed, r=%d\n", r);
return;
}
if (dc->retireFenceFd != -1)
close(dc->retireFenceFd);
if (dl->releaseFenceFd != -1) {
CNativeBuffer *cnb = from_abuffer(buf);
cnb->ffd = dl->releaseFenceFd;
}
if (QCT_WORKAROUND)
if (dl[1].releaseFenceFd != -1)
close(dl[1].releaseFenceFd);
}
static int cnw_queue_buffer1(aWindow *base, aBuffer *buffer, int ffd) {
CNativeWindow *win = from_base(base);
int res;
LOG(">> queue buffer %p %d\n", buffer, ffd);
if (win->fb) {
res = win->fb->post(win->fb, buffer->handle);
if (ffd != -1)
close(ffd);
} else {
hwc_post(win, buffer, ffd);
res = 0;
}
pthread_mutex_lock(&win->lock);
if (win->front)
put_back(&win->free_buffer_queue, win->front);
win->front = buffer;
pthread_cond_signal(&win->cvar);
pthread_mutex_unlock(&win->lock);
return res;
}
static int cnw_cancel_buffer1(aWindow *base, aBuffer *buf, int ffd) {
CNativeWindow *win = from_base(base);
CNativeBuffer *cnb = from_abuffer(buf);
LOG("<< cancel buffer %p %d\n", buf, ffd);
cnb->ffd = ffd;
pthread_mutex_lock(&win->lock);
put_front(&win->free_buffer_queue, buf);
pthread_mutex_unlock(&win->lock);
return 0;
}
static int cnw_dequeue_buffer0(aWindow *base, aBuffer **buf) {
int ffd = -1;
int r;
r = cnw_dequeue_buffer1(base, buf, &ffd);
if (ffd != -1)
close(ffd);
return r;
}
static int cnw_queue_buffer0(aWindow *base, aBuffer *buf) {
return cnw_queue_buffer1(base, buf, -1);
}
static int cnw_cancel_buffer0(aWindow *base, aBuffer *buf) {
return cnw_cancel_buffer1(base, buf, -1);
}
static int cnw_query(const aWindow *base, int what, int *value) {
CNativeWindow *win = from_base_const(base);
switch (what) {
case NATIVE_WINDOW_WIDTH:
case NATIVE_WINDOW_DEFAULT_WIDTH:
*value = win->width;
TRACE("query window width: %d\n", *value);
return 0;
case NATIVE_WINDOW_HEIGHT:
case NATIVE_WINDOW_DEFAULT_HEIGHT:
*value = win->height;
TRACE("query window height: %d\n", *value);
return 0;
case NATIVE_WINDOW_FORMAT:
*value = win->format;
TRACE("query window format: %d\n", *value);
return 0;
case NATIVE_WINDOW_TRANSFORM_HINT:
TRACE("query transform hint: 0\n");
*value = 0;
return 0;
case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
TRACE("query min undequeued buffers: 1\n");
*value = 1;
return 0;
default:
*value = 0;
ERROR("query %d unknown!\n", what);
return -EINVAL;
}
}
static int cnw_perform(aWindow *base, int op, ...) {
CNativeWindow *win = from_base(base);
va_list ap;
va_start(ap, op);
switch (op) {
case NATIVE_WINDOW_SET_USAGE:
TRACE("set usage %d\n", va_arg(ap,int));
return 0;
case NATIVE_WINDOW_CONNECT:
case NATIVE_WINDOW_DISCONNECT:
case NATIVE_WINDOW_API_CONNECT:
case NATIVE_WINDOW_API_DISCONNECT:
return 0;
case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
TRACE("set buffers format %d\n", va_arg(ap,int));
return 0;
case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
TRACE("set buffers transform %d\n", va_arg(ap,int));
return 0;
case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
TRACE("set buffers timestamp %lld\n", va_arg(ap,long long));
return 0;
case NATIVE_WINDOW_SET_SCALING_MODE:
TRACE("set scaling mode %d\n", va_arg(ap,int));
return 0;
case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS: {
int w = va_arg(ap,int);
int h = va_arg(ap,int);
if ((w == win->width) && (h == win->height)) {
TRACE("set buffers dimensions %d x %d\n", w, h);
return 0;
}
ERROR("cannot resize buffers to %d x %d\n", w, h);
return -1;
}
default:
ERROR("perform %d unknown!\n", op);
return -ENODEV;
}
}
static void hwc_invalidate(const struct hwc_procs *procs) {}
static void hwc_vsync(const struct hwc_procs *procs, int disp, int64_t ts) {}
static void hwc_hotplug(const struct hwc_procs *procs, int disp, int conn) {}
struct hwc_procs hprocs = {
.invalidate = hwc_invalidate,
.vsync = hwc_vsync,
.hotplug = hwc_hotplug,
};
uint32_t attrs[] = {
HWC_DISPLAY_WIDTH,
HWC_DISPLAY_HEIGHT,
HWC_DISPLAY_VSYNC_PERIOD,
HWC_DISPLAY_DPI_X,
HWC_DISPLAY_DPI_Y,
HWC_DISPLAY_NO_ATTRIBUTE,
};
static int hwc_init(CNativeWindow *win) {
hw_module_t const* module;
hwc_composer_device_1_t *hwc;
unsigned i;
int r;
uint32_t configs[32];
uint32_t numconfigs = 32;
int32_t values[8];
if (hw_get_module(HWC_HARDWARE_MODULE_ID, &module) != 0) {
ERROR("cannot open hw composer module\n");
return -ENODEV;
}
if (hwc_open_1(module, &hwc)) {
ERROR("cannot open hwc device\n");
return -ENODEV;
}
win->hwc = hwc;
LOG("hwc version 0x%08x\n", hwc->common.version);
if ((hwc->common.version & 0xFFFF0000) < 0x01010000) {
ERROR("hwc version less than 1.1\n");
hwc_close_1(hwc);
return -ENODEV;
}
hwc->registerProcs(hwc, &hprocs);
if (hwc->getDisplayConfigs(hwc, 0, configs, &numconfigs)) {
ERROR("cannot get configs\n");
return -ENODEV;
}
for (i = 0; i < numconfigs; i++)
LOG("cfg[%d] = 0x%08x\n", i, configs[i]);
if ((r = hwc->getDisplayAttributes(hwc, 0, configs[0], attrs, values))) {
ERROR("cannot get attributes %d\n", r);
return -ENODEV;
}
win->width = values[0];
win->height = values[1];
win->xdpi = values[3];
win->ydpi = values[4];
win->format = HAL_PIXEL_FORMAT_RGBA_8888;
hwc->blank(hwc, 0, 0);
win->dclist[0] = &(win->dc);
return 0;
}
static aBuffer *cnw_alloc(CNativeWindow *win, unsigned format, unsigned usage) {
CNativeBuffer *cnb;
aBuffer *buf;
int err;
if (!(cnb = malloc(sizeof(CNativeBuffer))))
return 0;
buf = &cnb->base;
cnb->ffd = -1;
buf->common.magic = ANDROID_NATIVE_BUFFER_MAGIC;
buf->common.version = sizeof(aBuffer);
buf->common.incRef = cnw_inc_ref;
buf->common.decRef = cnw_dec_ref;
buf->width = win->width;
buf->height = win->height;
buf->format = format;
buf->usage = usage;
err = win->gr->alloc(win->gr, win->width, win->height,
format, usage, &buf->handle, &buf->stride);
if (err) {
ERROR("gralloc of %d x %d failed: err=%d\n",
win->width, win->height, err);
free(buf);
return 0;
}
INFO("alloc buffer %p %d x %d\n", buf, win->width, win->height);
return buf;
}
static int cnw_init(CNativeWindow *win) {
hw_module_t const* module;
framebuffer_device_t *fb = NULL;
alloc_device_t *gr;
int err, i, n;
unsigned usage, format;
memset(win, 0, sizeof(CNativeWindow));
win->free_buffer_queue.next = &(win->free_buffer_queue);
win->free_buffer_queue.prev = &(win->free_buffer_queue);
if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module) != 0) {
ERROR("cannot open gralloc module\n");
return -ENODEV;
}
if (hwc_init(win)) {
ERROR("cannot open hwcomposer, trying legacy fb HAL\n");
err = framebuffer_open(module, &fb);
if (err) {
ERROR("cannot open fb HAL (%s)", strerror(-err));
return -ENODEV;
}
win->width = fb->width;
win->height = fb->height;
win->format = fb->format;
win->xdpi = fb->xdpi;
win->ydpi = fb->ydpi;
win->fb = fb;
}
INFO("display %d x %d fmt=%d\n",
win->width, win->height, win->format);
err = gralloc_open(module, &gr);
if (err) {
ERROR("couldn't open gralloc HAL (%s)", strerror(-err));
return -ENODEV;
}
win->gr = gr;
usage = GRALLOC_USAGE_HW_FB |
GRALLOC_USAGE_HW_COMPOSER |
GRALLOC_USAGE_HW_RENDER;
for (i = 0; i < 2; i++) {
aBuffer *buf = cnw_alloc(win, win->format, usage);
if (!buf)
return -ENOMEM;
put_back(&win->free_buffer_queue, buf);
}
if (!win->fb && QCT_WORKAROUND) {
win->spare = cnw_alloc(win, win->format, usage);
if (!win->spare)
return -ENOMEM;
}
// Disgusting, but we need to init these "const" fields
// and unlike C++ we can't use const_cast<>
*((float*) &win->base.xdpi) = win->xdpi;
*((float*) &win->base.ydpi) = win->ydpi;
*((int*) &win->base.minSwapInterval) = 1;
*((int*) &win->base.maxSwapInterval) = 1;
win->base.common.magic = ANDROID_NATIVE_WINDOW_MAGIC;
win->base.common.version = sizeof(aWindow);
win->base.common.incRef = cnw_inc_ref;
win->base.common.decRef = cnw_dec_ref;
win->base.setSwapInterval = cnw_set_swap_interval;
win->base.dequeueBuffer_DEPRECATED = cnw_dequeue_buffer0;
win->base.lockBuffer_DEPRECATED = cnw_lock_buffer0;
win->base.queueBuffer_DEPRECATED = cnw_queue_buffer0;
win->base.query = cnw_query;
win->base.perform = cnw_perform;
win->base.cancelBuffer_DEPRECATED = cnw_cancel_buffer0;
win->base.dequeueBuffer = cnw_dequeue_buffer1;
win->base.queueBuffer = cnw_queue_buffer1;
win->base.cancelBuffer = cnw_cancel_buffer1;
pthread_mutex_init(&win->lock, NULL);
pthread_cond_init(&win->cvar, NULL);
return 0;
}
void cnw_destroy(CNativeWindow *win) {
if (win->fb)
framebuffer_close(win->fb);
if (win->hwc)
hwc_close_1(win->hwc);
if (win->gr)
gralloc_close(win->gr);
free(win);
}
CNativeWindow *cnw_create(void) {
CNativeWindow *win;
char *x;
if ((x = getenv("CNWDEBUG")))
trace_level = atoi(x);
if (!(win = malloc(sizeof(CNativeWindow))))
return NULL;
if (cnw_init(win)) {
cnw_destroy(win);
return NULL;
}
return win;
}
void cnw_info(CNativeWindow *win, unsigned *w, unsigned *h, unsigned *fmt) {
*w = win->width;
*h = win->height;
*fmt = win->format;
}

164
tests/hwc/test-arrows.c Normal file
View file

@ -0,0 +1,164 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <EGL/egl.h>
#include <GLES2/gl2.h>
#include "util.h"
static const char gVertexShader[] =
"attribute vec4 aPosition;\n"
"uniform mat4 uTransform;\n"
"varying vec4 vTexCoord;\n"
"void main() {\n"
" gl_Position = aPosition * uTransform;\n"
" vTexCoord = aPosition * vec4(1.0/16.0,-1.0/16.0,0.0,0.0);\n"
"}\n";
static const char gFragmentShader[] =
"precision mediump float;\n"
"uniform sampler2D uTexture;\n"
"uniform float uAnim;\n"
"varying vec4 vTexCoord;\n"
"void main() {\n"
" vec2 tc = vec2(vTexCoord.x, uAnim + vTexCoord.y);\n"
" gl_FragColor = texture2D(uTexture, tc);\n"
"}\n";
static GLuint pgm;
static GLint aPosition, uTransform, uTexture, uAnim;
static GLfloat vtx[2 * 3 * 2];
static GLfloat mtx[16];
//#define R (0xFF0000FF)
#define R (0xFF000000)
#define G (0xFF00FF00)
uint32_t t32[] = {
R, R, R, R, R, R, R, G, G, R, R, R, R, R, R, R,
R, R, R, R, R, R, G, G, G, G, R, R, R, R, R, R,
R, R, R, R, R, G, G, G, G, G, G, R, R, R, R, R,
R, R, R, R, G, G, G, G, G, G, G, G, R, R, R, R,
R, R, R, G, G, G, G, G, G, G, G, G, G, R, R, R,
R, R, G, G, G, G, G, G, G, G, G, G, G, G, R, R,
R, R, G, G, G, G, G, G, G, G, G, G, G, G, R, R,
R, R, R, R, R, R, G, G, G, G, R, R, R, R, R, R,
R, R, R, R, R, R, G, G, G, G, R, R, R, R, R, R,
R, R, R, R, R, R, G, G, G, G, R, R, R, R, R, R,
R, R, R, R, R, R, G, G, G, G, R, R, R, R, R, R,
R, R, R, R, R, R, G, G, G, G, R, R, R, R, R, R,
R, R, R, R, R, R, G, G, G, G, R, R, R, R, R, R,
R, R, R, R, R, R, R, R, R, R, R, R, R, R, R, R,
R, R, R, R, R, R, R, R, R, R, R, R, R, R, R, R,
R, R, R, R, R, R, R, R, R, R, R, R, R, R, R, R,
};
#undef R
#undef G
int prepare(int w, int h) {
GLuint texid;
int left = w / 4;
int top = h / 4;
int right = (w / 4) * 3;
int bottom = (h / 4) * 3;
vtx[0] = left;
vtx[1] = top;
vtx[2] = left;
vtx[3] = bottom;
vtx[4] = right;
vtx[5] = bottom;
vtx[6] = right;
vtx[7] = bottom;
vtx[8] = right;
vtx[9] = top;
vtx[10] = left;
vtx[11] = top;
matrix_init_ortho(mtx, w, h);
pgm = load_program(gVertexShader, gFragmentShader);
if (!pgm)
return -1;
aPosition = glGetAttribLocation(pgm, "aPosition");
uTexture = glGetUniformLocation(pgm, "uTexture");
uTransform = glGetUniformLocation(pgm, "uTransform");
uAnim = glGetUniformLocation(pgm, "uAnim");
glViewport(0, 0, w, h);
glGenTextures(1, &texid);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glEnable(GL_TEXTURE_2D);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 16, 0,
GL_RGBA, GL_UNSIGNED_BYTE, t32);
return 0;
}
static float anim = 0.0;
void render() {
anim += 0.1;
if (anim >= 16.0) anim = 0.0;
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
glUseProgram(pgm);
glUniform1i(uTexture, 0);
glUniform1f(uAnim, anim);
glUniformMatrix4fv(uTransform, 1, 0, mtx);
glVertexAttribPointer(aPosition, 2, GL_FLOAT, GL_FALSE, 0, vtx);
glEnableVertexAttribArray(aPosition);
glDrawArrays(GL_TRIANGLES, 0, 6);
}
int main(int argc, char **argv) {
EGLDisplay display;
EGLSurface surface;
int w, h, count;
if (argc > 1)
count = atoi(argv[1]);
if (egl_create(&display, &surface, &w, &h))
return -1;
if (prepare(w, h))
return -1;
for (;;) {
render();
eglSwapBuffers(display, surface);
if (count > 0)
if (--count == 0)
break;
}
egl_destroy(display, surface);
return 0;
}

236
tests/hwc/util.c Normal file
View file

@ -0,0 +1,236 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <EGL/egl.h>
#include <GLES2/gl2.h>
#include <system/graphics.h>
#include "util.h"
void matrix_init_ortho(GLfloat *m, float w, float h) {
m[0] = 2.0 / w;
m[1] = 0.0;
m[2] = 0.0;
m[3] = -1.0;
m[4] = 0.0;
m[5] = 2.0 / h;
m[6] = 0.0;
m[7] = -1.0;
m[8] = 0.0;
m[9] = 0.0;
m[10] -1.0;
m[11] = 0.0;
m[12] = 0.0;
m[13] = 0.0;
m[14] = 0.0;
m[15] = 1.0;
}
static GLuint load_shader(GLenum shaderType, const char *src) {
GLint status = 0, len = 0;
GLuint shader;
if (!(shader = glCreateShader(shaderType)))
return 0;
glShaderSource(shader, 1, &src, NULL);
glCompileShader(shader);
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
if (status)
return shader;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &len);
if (len) {
char *msg = malloc(len);
if (msg) {
glGetShaderInfoLog(shader, len, NULL, msg);
msg[len-1] = 0;
fprintf(stderr, "error compiling shader:\n%s\n", msg);
free(msg);
}
}
glDeleteShader(shader);
return 0;
}
GLuint load_program(const char *vert_src, const char *frag_src) {
GLuint vert, frag, prog;
GLint status = 0, len = 0;
if (!(vert = load_shader(GL_VERTEX_SHADER, vert_src)))
return 0;
if (!(frag = load_shader(GL_FRAGMENT_SHADER, frag_src)))
goto fail_frag;
if (!(prog = glCreateProgram()))
goto fail_prog;
glAttachShader(prog, vert);
glAttachShader(prog, frag);
glLinkProgram(prog);
glGetProgramiv(prog, GL_LINK_STATUS, &status);
if (status)
return prog;
glGetProgramiv(prog, GL_INFO_LOG_LENGTH, &len);
if (len) {
char *buf = (char*) malloc(len);
if (buf) {
glGetProgramInfoLog(prog, len, NULL, buf);
buf[len-1] = 0;
fprintf(stderr, "error linking program:\n%s\n", buf);
free(buf);
}
}
glDeleteProgram(prog);
fail_prog:
glDeleteShader(frag);
fail_frag:
glDeleteShader(vert);
return 0;
}
int select_config_for_window(EGLDisplay dpy, EGLint *attr,
unsigned format, EGLConfig *config) {
EGLint R,G,B,A,r,g,b,a;
EGLint i, n, max;
EGLConfig *cfg;
switch (format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
R = G = B = A = 8;
break;
case HAL_PIXEL_FORMAT_RGB_565:
R = 5; G = 6; B = 5; A = 0;
break;
default:
fprintf(stderr, "unknown fb pixel format %d\n", format);
return -1;
}
if (eglGetConfigs(dpy, NULL, 0, &max) == EGL_FALSE) {
fprintf(stderr, "no EGL configurations available?!\n");
return -1;
}
cfg = (EGLConfig*) malloc(sizeof(EGLConfig) * max);
if (!cfg)
return -1;
if (eglChooseConfig(dpy, attr, cfg, max, &n) == EGL_FALSE) {
fprintf(stderr, "eglChooseConfig failed\n");
return -1;
}
for (i = 0; i < n; i++) {
EGLint r,g,b,a;
eglGetConfigAttrib(dpy, cfg[i], EGL_RED_SIZE, &r);
eglGetConfigAttrib(dpy, cfg[i], EGL_GREEN_SIZE, &g);
eglGetConfigAttrib(dpy, cfg[i], EGL_BLUE_SIZE, &b);
eglGetConfigAttrib(dpy, cfg[i], EGL_ALPHA_SIZE, &a);
if (r == R && g == G && b == B && a == A) {
*config = cfg[i];
free(cfg);
return 0;
}
}
fprintf(stderr, "cannot find matching config\n");
free(cfg);
return -1;
}
static struct CNativeWindow *_cnw = 0;
int egl_create(EGLDisplay *_display, EGLSurface *_surface, int *_w, int *_h) {
EGLBoolean res;
EGLConfig config = { 0 };
EGLint context_attrs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE };
EGLint config_attrs[] = {
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_NONE };
EGLint major, minor;
EGLContext context;
EGLSurface surface;
EGLint w, h;
EGLDisplay display;
EGLNativeWindowType window;
unsigned width, height, format;
struct CNativeWindow *cnw;
display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
if (display == EGL_NO_DISPLAY)
return -1;
if (!(res = eglInitialize(display, &major, &minor)))
return -1;
fprintf(stderr, "egl version: %d.%d\n", major, minor);
if ((cnw = cnw_create()) == 0)
return -1;
cnw_info(cnw, &width, &height, &format);
window = (EGLNativeWindowType) cnw;
if ((res = select_config_for_window(display, config_attrs, format, &config)))
goto fail;
surface = eglCreateWindowSurface(display, config, window, NULL);
if (surface == EGL_NO_SURFACE)
goto fail;
context = eglCreateContext(display, config, EGL_NO_CONTEXT, context_attrs);
if (context == EGL_NO_CONTEXT)
goto fail;
if (!(res = eglMakeCurrent(display, surface, surface, context)))
goto fail;
eglQuerySurface(display, surface, EGL_WIDTH, &w);
eglQuerySurface(display, surface, EGL_HEIGHT, &h);
fprintf(stderr, "window: %d x %d\n", w, h);
*_display = display;
*_surface = surface;
*_w = w;
*_h = h;
_cnw = cnw;
return 0;
fail:
cnw_destroy(cnw);
return -1;
}
void egl_destroy(EGLDisplay display, EGLSurface surface) {
if (_cnw) {
eglDestroySurface(display, surface);
eglTerminate(display);
cnw_destroy(_cnw);
_cnw = 0;
}
}

38
tests/hwc/util.h Normal file
View file

@ -0,0 +1,38 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _GL_UTIL_H_
#define _GL_UTIL_H_
/* convenience */
GLuint load_program(const char *vert_src, const char *frag_src);
void matrix_init_ortho(GLfloat *m, float w, float h);
/* context setup / teardown */
int egl_create(EGLDisplay *_display, EGLSurface *_surface, int *_w, int *_h);
void egl_destroy(EGLDisplay display, EGLSurface surface);
/* internals needed by util.c */
struct CNativeWindow;
struct CNativeWindow *cnw_create(void);
void cnw_destroy(struct CNativeWindow *win);
void cnw_info(struct CNativeWindow *win,
unsigned *w, unsigned *h, unsigned *fmt);
#endif