Merge "Merge Android Pie into master"

This commit is contained in:
Xin Li 2018-08-07 16:51:26 +00:00 committed by Gerrit Code Review
commit 65d52dd3c9
56 changed files with 4041 additions and 4898 deletions

1
OWNERS
View file

@ -4,3 +4,4 @@ jpawlowski@google.com
malchev@google.com
smoreland@google.com
swillden@google.com
per-file include/hardware/camera*=etalvala@google.com

View file

@ -91,6 +91,17 @@ __BEGIN_DECLS
/* Bluetooth SCO wideband */
#define AUDIO_PARAMETER_KEY_BT_SCO_WB "bt_wbs"
/* BT SCO headset name for debug */
#define AUDIO_PARAMETER_KEY_BT_SCO_HEADSET_NAME "bt_headset_name"
/* BT SCO HFP control */
#define AUDIO_PARAMETER_KEY_HFP_ENABLE "hfp_enable"
#define AUDIO_PARAMETER_KEY_HFP_SET_SAMPLING_RATE "hfp_set_sampling_rate"
#define AUDIO_PARAMETER_KEY_HFP_VOLUME "hfp_volume"
/* Set screen orientation */
#define AUDIO_PARAMETER_KEY_ROTATION "rotation"
/**
* audio stream parameters
*/
@ -201,13 +212,24 @@ typedef enum {
give time for gapless track switch */
} audio_drain_type_t;
typedef struct source_metadata {
size_t track_count;
/** Array of metadata of each track connected to this source. */
struct playback_track_metadata* tracks;
} source_metadata_t;
typedef struct sink_metadata {
size_t track_count;
/** Array of metadata of each track connected to this sink. */
struct record_track_metadata* tracks;
} sink_metadata_t;
/**
* audio_stream_out is the abstraction interface for the audio output hardware.
*
* It provides information about various properties of the audio output
* hardware driver.
*/
struct audio_stream_out {
/**
* Common methods of the audio stream out. This *must* be the first member of audio_stream_out
@ -392,6 +414,13 @@ struct audio_stream_out {
*/
int (*get_mmap_position)(const struct audio_stream_out *stream,
struct audio_mmap_position *position);
/**
* Called when the metadata of the stream's source has been changed.
* @param source_metadata Description of the audio that is played by the clients.
*/
void (*update_source_metadata)(struct audio_stream_out *stream,
const struct source_metadata* source_metadata);
};
typedef struct audio_stream_out audio_stream_out_t;
@ -501,6 +530,31 @@ struct audio_stream_in {
*/
int (*get_mmap_position)(const struct audio_stream_in *stream,
struct audio_mmap_position *position);
/**
* Called by the framework to read active microphones
*
* \param[in] stream the stream object.
* \param[out] mic_array Pointer to first element on array with microphone info
* \param[out] mic_count When called, this holds the value of the max number of elements
* allowed in the mic_array. The actual number of elements written
* is returned here.
* if mic_count is passed as zero, mic_array will not be populated,
* and mic_count will return the actual number of active microphones.
*
* \return 0 if the microphone array is successfully filled.
* -ENOSYS if there is an error filling the data
*/
int (*get_active_microphones)(const struct audio_stream_in *stream,
struct audio_microphone_characteristic_t *mic_array,
size_t *mic_count);
/**
* Called when the metadata of the stream's sink has been changed.
* @param sink_metadata Description of the audio that is recorded by the clients.
*/
void (*update_sink_metadata)(struct audio_stream_in *stream,
const struct sink_metadata* sink_metadata);
};
typedef struct audio_stream_in audio_stream_in_t;
@ -673,6 +727,25 @@ struct audio_hw_device {
void (*close_input_stream)(struct audio_hw_device *dev,
struct audio_stream_in *stream_in);
/**
* Called by the framework to read available microphones characteristics.
*
* \param[in] dev the hw_device object.
* \param[out] mic_array Pointer to first element on array with microphone info
* \param[out] mic_count When called, this holds the value of the max number of elements
* allowed in the mic_array. The actual number of elements written
* is returned here.
* if mic_count is passed as zero, mic_array will not be populated,
* and mic_count will return the actual number of microphones in the
* system.
*
* \return 0 if the microphone array is successfully filled.
* -ENOSYS if there is an error filling the data
*/
int (*get_microphones)(const struct audio_hw_device *dev,
struct audio_microphone_characteristic_t *mic_array,
size_t *mic_count);
/** This method dumps the state of the audio hardware */
int (*dump)(const struct audio_hw_device *dev, int fd);

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -21,7 +21,7 @@
#include "camera_common.h"
/**
* Camera device HAL 3.4 [ CAMERA_DEVICE_API_VERSION_3_4 ]
* Camera device HAL 3.5[ CAMERA_DEVICE_API_VERSION_3_5 ]
*
* This is the current recommended version of the camera device HAL.
*
@ -29,7 +29,7 @@
* android.hardware.camera2 API as LIMITED or above hardware level.
*
* Camera devices that support this version of the HAL must return
* CAMERA_DEVICE_API_VERSION_3_4 in camera_device_t.common.version and in
* CAMERA_DEVICE_API_VERSION_3_5 in camera_device_t.common.version and in
* camera_info_t.device_version (from camera_module_t.get_camera_info).
*
* CAMERA_DEVICE_API_VERSION_3_3 and above:
@ -157,6 +157,32 @@
* - ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL
* - ANDROID_SENSOR_OPAQUE_RAW_SIZE
* - ANDROID_SENSOR_OPTICAL_BLACK_REGIONS
*
* 3.5: Minor revisions to support session parameters and logical multi camera:
*
* - Add ANDROID_REQUEST_AVAILABLE_SESSION_KEYS static metadata, which is
* optional for implementations that want to support session parameters. If support is
* needed, then Hal should populate the list with all available capture request keys
* that can cause severe processing delays when modified by client. Typical examples
* include parameters that require time-consuming HW re-configuration or internal camera
* pipeline update.
*
* - Add a session parameter field to camera3_stream_configuration which can be populated
* by clients with initial values for the keys found in ANDROID_REQUEST_AVAILABLE_SESSION_KEYS.
*
* - Metadata additions for logical multi camera capability:
* - ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA
* - ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS
* - ANDROID_LOGICAL_MULTI_CAMERA_SYNC_TYPE
*
* - Add physical camera id field in camera3_stream, so that for a logical
* multi camera, the application has the option to specify which physical camera
* a particular stream is configured on.
*
* - Add physical camera id and settings field in camera3_capture_request, so that
* for a logical multi camera, the application has the option to specify individual
* settings for a particular physical device.
*
*/
/**
@ -1687,8 +1713,31 @@ typedef struct camera3_stream {
*/
int rotation;
/**
* The physical camera id this stream belongs to.
*
* <= CAMERA_DEVICE_API_VERISON_3_4:
*
* Not defined and must not be accessed.
*
* >= CAMERA_DEVICE_API_VERISON_3_5:
*
* Always set by camera service. If the camera device is not a logical
* multi camera, or if the camera is a logical multi camera but the stream
* is not a physical output stream, this field will point to a 0-length
* string.
*
* A logical multi camera is a camera device backed by multiple physical
* cameras that are also exposed to the application. And for a logical
* multi camera, a physical output stream is an output stream specifically
* requested on an underlying physical camera.
*
* For an input stream, this field is guaranteed to be a 0-length string.
*/
const char* physical_camera_id;
/* reserved for future use */
void *reserved[7];
void *reserved[6];
} camera3_stream_t;
@ -1734,6 +1783,18 @@ typedef struct camera3_stream_configuration {
*
*/
uint32_t operation_mode;
/**
* >= CAMERA_DEVICE_API_VERSION_3_5:
*
* The session metadata buffer contains the initial values of
* ANDROID_REQUEST_AVAILABLE_SESSION_KEYS. This field is optional
* and camera clients can choose to ignore it, in which case it will
* be set to NULL. If parameters are present, then Hal should examine
* the parameter values and configure its internal camera pipeline
* accordingly.
*/
const camera_metadata_t *session_parameters;
} camera3_stream_configuration_t;
/**
@ -2217,6 +2278,44 @@ typedef struct camera3_capture_request {
*/
const camera3_stream_buffer_t *output_buffers;
/**
* <= CAMERA_DEVICE_API_VERISON_3_4:
*
* Not defined and must not be accessed.
*
* >= CAMERA_DEVICE_API_VERSION_3_5:
* The number of physical camera settings to be applied. If 'num_physcam_settings'
* equals 0 or a physical device is not included, then Hal must decide the
* specific physical device settings based on the default 'settings'.
*/
uint32_t num_physcam_settings;
/**
* <= CAMERA_DEVICE_API_VERISON_3_4:
*
* Not defined and must not be accessed.
*
* >= CAMERA_DEVICE_API_VERSION_3_5:
* The physical camera ids. The array will contain 'num_physcam_settings'
* camera id strings for all physical devices that have specific settings.
* In case some id is invalid, the process capture request must fail and return
* -EINVAL.
*/
const char **physcam_id;
/**
* <= CAMERA_DEVICE_API_VERISON_3_4:
*
* Not defined and must not be accessed.
*
* >= CAMERA_DEVICE_API_VERSION_3_5:
* The capture settings for the physical cameras. The array will contain
* 'num_physcam_settings' settings for invididual physical devices. In
* case the settings at some particular index are empty, the process capture
* request must fail and return -EINVAL.
*/
const camera_metadata_t **physcam_settings;
} camera3_capture_request_t;
/**
@ -2394,6 +2493,37 @@ typedef struct camera3_capture_result {
*/
uint32_t partial_result;
/**
* >= CAMERA_DEVICE_API_VERSION_3_5:
*
* Specifies the number of physical camera metadata this capture result
* contains. It must be equal to the number of physical cameras being
* requested from.
*
* If the current camera device is not a logical multi-camera, or the
* corresponding capture_request doesn't request on any physical camera,
* this field must be 0.
*/
uint32_t num_physcam_metadata;
/**
* >= CAMERA_DEVICE_API_VERSION_3_5:
*
* An array of strings containing the physical camera ids for the returned
* physical camera metadata. The length of the array is
* num_physcam_metadata.
*/
const char **physcam_ids;
/**
* >= CAMERA_DEVICE_API_VERSION_3_5:
*
* The array of physical camera metadata for the physical cameras being
* requested upon. This array should have a 1-to-1 mapping with the
* physcam_ids. The length of the array is num_physcam_metadata.
*/
const camera_metadata_t **physcam_metadata;
} camera3_capture_result_t;
/**********************************************************************
@ -2926,7 +3056,8 @@ typedef struct camera3_device_ops {
* 0: On a successful start to processing the capture request
*
* -EINVAL: If the input is malformed (the settings are NULL when not
* allowed, there are 0 output buffers, etc) and capture processing
* allowed, invalid physical camera settings,
* there are 0 output buffers, etc) and capture processing
* cannot start. Failures during request processing should be
* handled by calling camera3_callback_ops_t.notify(). In case of
* this error, the framework will retain responsibility for the

View file

@ -148,10 +148,11 @@ __BEGIN_DECLS
#define CAMERA_DEVICE_API_VERSION_3_2 HARDWARE_DEVICE_API_VERSION(3, 2)
#define CAMERA_DEVICE_API_VERSION_3_3 HARDWARE_DEVICE_API_VERSION(3, 3)
#define CAMERA_DEVICE_API_VERSION_3_4 HARDWARE_DEVICE_API_VERSION(3, 4)
#define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5)
// Device version 3.4 is current, older HAL camera device versions are not
// Device version 3.5 is current, older HAL camera device versions are not
// recommended for new devices.
#define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_3_4
#define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_3_5
/**
* Defined in /system/media/camera/include/system/camera_metadata.h

View file

@ -155,7 +155,10 @@ typedef enum {
GRALLOC1_FUNCTION_UNLOCK = 20,
GRALLOC1_FUNCTION_SET_LAYER_COUNT = 21,
GRALLOC1_FUNCTION_GET_LAYER_COUNT = 22,
GRALLOC1_LAST_FUNCTION = 22,
GRALLOC1_FUNCTION_VALIDATE_BUFFER_SIZE = 23,
GRALLOC1_FUNCTION_GET_TRANSPORT_SIZE = 24,
GRALLOC1_FUNCTION_IMPORT_BUFFER = 25,
GRALLOC1_LAST_FUNCTION = 25,
} gralloc1_function_descriptor_t;
typedef enum {
@ -670,6 +673,65 @@ typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_GET_PRODUCER_USAGE)(
typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_GET_STRIDE)(
gralloc1_device_t* device, buffer_handle_t buffer, uint32_t* outStride);
/* getTransportSize(..., outNumFds, outNumInts)
* Function descriptor: GRALLOC1_FUNCTION_GET_TRANSPORT_SIZE
* This function is optional for all gralloc1 devices.
*
* Get the transport size of a buffer. An imported buffer handle is a raw
* buffer handle with the process-local runtime data appended. This
* function, for example, allows a caller to omit the process-local
* runtime data at the tail when serializing the imported buffer handle.
*
* Note that a client might or might not omit the process-local runtime
* data when sending an imported buffer handle. The mapper must support
* both cases on the receiving end.
*
* Parameters:
* outNumFds - the number of file descriptors needed for transport
* outNumInts - the number of integers needed for transport
*
* Returns GRALLOC1_ERROR_NONE or one of the following errors:
* GRALLOC1_ERROR_BAD_HANDLE - the buffer handle is invalid
* GRALLOC1_ERROR_UNSUPPORTED - the device is unable to retrieve the numFds
* and numInts; see note [1] in this section's header for more information
*/
typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_GET_TRANSPORT_SIZE)(
gralloc1_device_t* device, buffer_handle_t buffer, uint32_t *outNumFds,
uint32_t *outNumInts);
typedef struct gralloc1_buffer_descriptor_info {
uint32_t width;
uint32_t height;
uint32_t layerCount;
int32_t /*android_pixel_format_t*/ format;
uint64_t producerUsage;
uint64_t consumerUsage;
} gralloc1_buffer_descriptor_info_t;
/* validateBufferSize(..., )
* Function descriptor: GRALLOC1_FUNCTION_VALIDATE_BUFFER_SIZE
* This function is optional for all gralloc1 devices.
*
* Validate that the buffer can be safely accessed by a caller who assumes
* the specified descriptorInfo and stride. This must at least validate
* that the buffer size is large enough. Validating the buffer against
* individual buffer attributes is optional.
*
* Parameters:
* descriptor - specifies the attributes of the buffer
* stride - the buffer stride returned by IAllocator::allocate
*
* Returns GRALLOC1_ERROR_NONE or one of the following errors:
* GRALLOC1_ERROR_BAD_HANDLE - the buffer handle is invalid
* GRALLOC1_ERROR_BAD_VALUE - when buffer cannot be safely accessed
* GRALLOC1_ERROR_UNSUPPORTED - the device is unable to validate the buffer
* size; see note [1] in this section's header for more information
*/
typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_VALIDATE_BUFFER_SIZE)(
gralloc1_device_t* device, buffer_handle_t buffer,
const gralloc1_buffer_descriptor_info_t* descriptorInfo,
uint32_t stride);
/*
* Buffer management functions
*/
@ -723,6 +785,37 @@ typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_ALLOCATE)(
const gralloc1_buffer_descriptor_t* descriptors,
buffer_handle_t* outBuffers);
/* importBuffer(..., rawHandle, outBuffer);
* Function descriptor: GRALLOC1_FUNCTION_IMPORT_BUFFER
* This function is optional for all gralloc1 devices.
* When supported, GRALLOC1_CAPABILITY_RELEASE_IMPLY_DELETE must also be
* supported.
*
* Explictly imports a buffer into a proccess.
*
* This function can be called in place of retain when a raw buffer handle is
* received by a remote process. Import producess a import handle that can
* be used to access the underlying graphic buffer. The new import handle has a
* ref count of 1.
*
* This function must at least validate the raw handle before creating the
* imported handle. It must also support importing the same raw handle
* multiple times to create multiple imported handles. The imported handle
* must be considered valid everywhere in the process.
*
* Parameters:
* rawHandle - the raw buffer handle to import
* outBuffer - a handle to the newly imported buffer
*
* Returns GRALLOC1_ERROR_NONE or one of the following errors:
* GRALLOC1_ERROR_BAD_HANDLE - the buffer handle is invalid
* GRALLOC1_ERROR_NO_RESOURCES - it is not possible to add a import to this
* buffer at this time
*/
typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_IMPORT_BUFFER)(
gralloc1_device_t* device, const buffer_handle_t rawHandle,
buffer_handle_t* outBuffer);
/* retain(..., buffer)
* Function descriptor: GRALLOC1_FUNCTION_RETAIN
* Must be provided by all gralloc1 devices

View file

@ -114,7 +114,7 @@ typedef enum {
* presentDisplay should fail as fast as possible in the case a
* validateDisplay step is needed.
*/
HWC2_CAPABILITY_SKIP_VALIDATE= 4,
HWC2_CAPABILITY_SKIP_VALIDATE = 4,
} hwc2_capability_t;
/* Possible composition types for a given layer */
@ -262,6 +262,15 @@ typedef enum {
HWC2_FUNCTION_SET_POWER_MODE,
HWC2_FUNCTION_SET_VSYNC_ENABLED,
HWC2_FUNCTION_VALIDATE_DISPLAY,
HWC2_FUNCTION_SET_LAYER_FLOAT_COLOR,
HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA,
HWC2_FUNCTION_GET_PER_FRAME_METADATA_KEYS,
HWC2_FUNCTION_SET_READBACK_BUFFER,
HWC2_FUNCTION_GET_READBACK_BUFFER_ATTRIBUTES,
HWC2_FUNCTION_GET_READBACK_BUFFER_FENCE,
HWC2_FUNCTION_GET_RENDER_INTENTS,
HWC2_FUNCTION_SET_COLOR_MODE_WITH_RENDER_INTENT,
HWC2_FUNCTION_GET_DATASPACE_SATURATION_MATRIX
} hwc2_function_descriptor_t;
/* Layer requests returned from getDisplayRequests */
@ -311,6 +320,33 @@ typedef enum {
HWC2_VSYNC_DISABLE = 2,
} hwc2_vsync_t;
/* MUST match HIDL's V2_2::IComposerClient::PerFrameMetadataKey */
typedef enum {
/* SMPTE ST 2084:2014.
* Coordinates defined in CIE 1931 xy chromaticity space
*/
HWC2_DISPLAY_RED_PRIMARY_X = 0,
HWC2_DISPLAY_RED_PRIMARY_Y = 1,
HWC2_DISPLAY_GREEN_PRIMARY_X = 2,
HWC2_DISPLAY_GREEN_PRIMARY_Y = 3,
HWC2_DISPLAY_BLUE_PRIMARY_X = 4,
HWC2_DISPLAY_BLUE_PRIMARY_Y = 5,
HWC2_WHITE_POINT_X = 6,
HWC2_WHITE_POINT_Y = 7,
/* SMPTE ST 2084:2014.
* Units: nits
* max as defined by ST 2048: 10,000 nits
*/
HWC2_MAX_LUMINANCE = 8,
HWC2_MIN_LUMINANCE = 9,
/* CTA 861.3
* Units: nits
*/
HWC2_MAX_CONTENT_LIGHT_LEVEL = 10,
HWC2_MAX_FRAME_AVERAGE_LIGHT_LEVEL = 11,
} hwc2_per_frame_metadata_key_t;
/*
* Stringification Functions
*/
@ -479,6 +515,15 @@ static inline const char* getFunctionDescriptorName(
case HWC2_FUNCTION_SET_POWER_MODE: return "SetPowerMode";
case HWC2_FUNCTION_SET_VSYNC_ENABLED: return "SetVsyncEnabled";
case HWC2_FUNCTION_VALIDATE_DISPLAY: return "ValidateDisplay";
case HWC2_FUNCTION_SET_LAYER_FLOAT_COLOR: return "SetLayerFloatColor";
case HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA: return "SetLayerPerFrameMetadata";
case HWC2_FUNCTION_GET_PER_FRAME_METADATA_KEYS: return "GetPerFrameMetadataKeys";
case HWC2_FUNCTION_SET_READBACK_BUFFER: return "SetReadbackBuffer";
case HWC2_FUNCTION_GET_READBACK_BUFFER_ATTRIBUTES: return "GetReadbackBufferAttributes";
case HWC2_FUNCTION_GET_READBACK_BUFFER_FENCE: return "GetReadbackBufferFence";
case HWC2_FUNCTION_GET_RENDER_INTENTS: return "GetRenderIntents";
case HWC2_FUNCTION_SET_COLOR_MODE_WITH_RENDER_INTENT: return "SetColorModeWithRenderIntent";
case HWC2_FUNCTION_GET_DATASPACE_SATURATION_MATRIX: return "GetDataspaceSaturationMatrix";
default: return "Unknown";
}
}
@ -668,6 +713,15 @@ enum class FunctionDescriptor : int32_t {
SetPowerMode = HWC2_FUNCTION_SET_POWER_MODE,
SetVsyncEnabled = HWC2_FUNCTION_SET_VSYNC_ENABLED,
ValidateDisplay = HWC2_FUNCTION_VALIDATE_DISPLAY,
SetLayerFloatColor = HWC2_FUNCTION_SET_LAYER_FLOAT_COLOR,
SetLayerPerFrameMetadata = HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA,
GetPerFrameMetadataKeys = HWC2_FUNCTION_GET_PER_FRAME_METADATA_KEYS,
SetReadbackBuffer = HWC2_FUNCTION_SET_READBACK_BUFFER,
GetReadbackBufferAttributes = HWC2_FUNCTION_GET_READBACK_BUFFER_ATTRIBUTES,
GetReadbackBufferFence = HWC2_FUNCTION_GET_READBACK_BUFFER_FENCE,
GetRenderIntents = HWC2_FUNCTION_GET_RENDER_INTENTS,
SetColorModeWithRenderIntent = HWC2_FUNCTION_SET_COLOR_MODE_WITH_RENDER_INTENT,
GetDataspaceSaturationMatrix = HWC2_FUNCTION_GET_DATASPACE_SATURATION_MATRIX,
};
TO_STRING(hwc2_function_descriptor_t, FunctionDescriptor,
getFunctionDescriptorName)
@ -969,6 +1023,27 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_REGISTER_CALLBACK)(
int32_t /*hwc2_callback_descriptor_t*/ descriptor,
hwc2_callback_data_t callbackData, hwc2_function_pointer_t pointer);
/* getDataspaceSaturationMatrix(..., dataspace, outMatrix)
* Descriptor: HWC2_FUNCTION_GET_DATASPACE_SATURATION_MATRIX
* Provided by HWC2 devices which don't return nullptr function pointer.
*
* Get the saturation matrix of the specified dataspace. The saturation matrix
* can be used to approximate the dataspace saturation operation performed by
* the HWC2 device when non-colorimetric mapping is allowed. It is to be
* applied on linear pixel values.
*
* Parameters:
* dataspace - the dataspace to query for
* outMatrix - a column-major 4x4 matrix (16 floats). It must be an identity
* matrix unless dataspace is HAL_DATASPACE_SRGB_LINEAR.
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_PARAMETER - dataspace was invalid
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_DATASPACE_SATURATION_MATRIX)(
hwc2_device_t* device, int32_t /*android_dataspace_t*/ dataspace,
float* outMatrix);
/*
* Display Functions
*
@ -1146,6 +1221,35 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_COLOR_MODES)(
hwc2_device_t* device, hwc2_display_t display, uint32_t* outNumModes,
int32_t* /*android_color_mode_t*/ outModes);
/* getRenderIntents(..., mode, outNumIntents, outIntents)
* Descriptor: HWC2_FUNCTION_GET_RENDER_INTENTS
* Provided by HWC2 devices which don't return nullptr function pointer.
*
* Returns the render intents supported on this display.
*
* The valid render intents can be found in android_render_intent_v1_1_t in
* <system/graphics.h>. All HWC2 devices must support at least
* HAL_RENDER_INTENT_COLORIMETRIC.
*
* outNumIntents may be NULL to retrieve the number of intents which will be
* returned.
*
* Parameters:
* mode - the color mode to query the render intents for
* outNumIntents - if outIntents was NULL, the number of intents which would
* have been returned; if outIntents was not NULL, the number of intents
* returned, which must not exceed the value stored in outNumIntents
* prior to the call; pointer will be non-NULL
* outIntents - an array of render intents
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_RENDER_INTENTS)(
hwc2_device_t* device, hwc2_display_t display, int32_t mode,
uint32_t* outNumIntents,
int32_t* /*android_render_intent_v1_1_t*/ outIntents);
/* getDisplayAttribute(..., config, attribute, outValue)
* Descriptor: HWC2_FUNCTION_GET_DISPLAY_ATTRIBUTE
* Must be provided by all HWC2 devices
@ -1470,8 +1574,8 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_CLIENT_TARGET)(
*
* Sets the color mode of the given display.
*
* Upon returning from this function, the color mode change must have fully
* taken effect.
* This must be called outside of validateDisplay/presentDisplay, and it takes
* effect on next presentDisplay.
*
* The valid color modes can be found in android_color_mode_t in
* <system/graphics.h>. All HWC2 devices must support at least
@ -1490,6 +1594,34 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_COLOR_MODE)(
hwc2_device_t* device, hwc2_display_t display,
int32_t /*android_color_mode_t*/ mode);
/* setColorModeWithIntent(..., mode, intent)
* Descriptor: HWC2_FUNCTION_SET_COLOR_MODE_WITH_RENDER_INTENT
* Provided by HWC2 devices which don't return nullptr function pointer.
*
* This must be called outside of validateDisplay/presentDisplay, and it takes
* effect on next presentDisplay.
*
* The valid color modes and render intents can be found in
* android_color_mode_t and android_render_intent_v1_1_t in
* <system/graphics.h>. All HWC2 devices must support at least
* HAL_COLOR_MODE_NATIVE and HAL_RENDER_INTENT_COLORIMETRIC, and displays are
* assumed to be in this mode and intent upon hotplug.
*
* Parameters:
* mode - the mode to set
* intent - the intent to set
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
* HWC2_ERROR_BAD_PARAMETER - mode/intent is not a valid color mode or
* render intent
* HWC2_ERROR_UNSUPPORTED - mode or intent is not supported on this display
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_COLOR_MODE_WITH_RENDER_INTENT)(
hwc2_device_t* device, hwc2_display_t display,
int32_t /*android_color_mode_t*/ mode,
int32_t /*android_render_intent_v1_1_t */ intent);
/* setColorTransform(..., matrix, hint)
* Descriptor: HWC2_FUNCTION_SET_COLOR_TRANSFORM
* Must be provided by all HWC2 devices
@ -1537,6 +1669,34 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_COLOR_TRANSFORM)(
hwc2_device_t* device, hwc2_display_t display, const float* matrix,
int32_t /*android_color_transform_t*/ hint);
/* getPerFrameMetadataKeys(..., outKeys)
* Descriptor: HWC2_FUNCTION_GET_PER_FRAME_METADATA_KEYS
* Optional for HWC2 devices
*
* If supported (getFunction(HWC2_FUNCTION_GET_PER_FRAME_METADATA_KEYS) is non-null),
* getPerFrameMetadataKeys returns the list of supported PerFrameMetadataKeys
* which are invariant with regard to the active configuration.
*
* Devices which are not HDR-capable, must return null when getFunction is called
* with HWC2_FUNCTION_GET_PER_FRAME_METADATA_KEYS.
*
* If outKeys is NULL, the required number of PerFrameMetadataKey keys
* must be returned in outNumKeys.
*
* Parameters:
* outNumKeys - if outKeys is NULL, the number of keys which would have
* been returned; if outKeys is not NULL, the number of keys stored in
* outKeys, which must not exceed the value stored in outNumKeys prior
* to the call; pointer will be non-NULL
* outKeys - an array of hwc2_per_frame_metadata_key_t keys
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_PER_FRAME_METADATA_KEYS)(
hwc2_device_t* device, hwc2_display_t display, uint32_t* outNumKeys,
int32_t* /*hwc2_per_frame_metadata_key_t*/ outKeys);
/* setOutputBuffer(..., buffer, releaseFence)
* Descriptor: HWC2_FUNCTION_SET_OUTPUT_BUFFER
* Must be provided by all HWC2 devices
@ -1591,6 +1751,132 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_POWER_MODE)(
hwc2_device_t* device, hwc2_display_t display,
int32_t /*hwc2_power_mode_t*/ mode);
/* getReadbackBufferAttributes(..., outFormat, outDataspace)
* Optional for HWC2 devices
*
* Returns the format which should be used when allocating a buffer for use by
* device readback as well as the dataspace in which its contents should be
* interpreted.
*
* If readback is not supported by this HWC implementation, this call will also
* be able to return HWC2_ERROR_UNSUPPORTED so we can fall back to another method.
* Returning NULL to a getFunction request for this function will also indicate
* that readback is not supported.
*
* The width and height of this buffer will be those of the currently-active
* display configuration, and the usage flags will consist of the following:
* BufferUsage::CPU_READ | BufferUsage::GPU_TEXTURE |
* BufferUsage::COMPOSER_OUTPUT
*
* The format and dataspace provided must be sufficient such that if a
* correctly-configured buffer is passed into setReadbackBuffer, filled by
* the device, and then displayed by the client as a full-screen buffer, the
* output of the display remains the same (subject to the note about protected
* content in the description of setReadbackBuffer).
*
* If the active configuration or color mode of this display has changed since
* the previous call to this function, it will be called again prior to setting
* a readback buffer such that the returned format and dataspace can be updated
* accordingly.
*
* Parameters:
* outFormat - the format the client should use when allocating a device
* readback buffer; pointer will be non-NULL
* outDataspace - the dataspace the client will use when interpreting the
* contents of a device readback buffer; pointer will be non-NULL
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
*
* See also:
* setReadbackBuffer
* getReadbackBufferFence
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_READBACK_BUFFER_ATTRIBUTES)(
hwc2_device_t* device, hwc2_display_t display,
int32_t* /*android_pixel_format_t*/ outFormat,
int32_t* /*android_dataspace_t*/ outDataspace);
/* getReadbackBufferFence(..., outFence)
* Optional for HWC2 devices
*
* Returns an acquire sync fence file descriptor which will signal when the
* buffer provided to setReadbackBuffer has been filled by the device and is
* safe for the client to read.
*
* If it is already safe to read from this buffer, -1 may be returned instead.
* The client takes ownership of this file descriptor and is responsible for
* closing it when it is no longer needed.
*
* This function will be called immediately after the composition cycle being
* captured into the readback buffer. The complete ordering of a readback buffer
* capture is as follows:
*
* getReadbackBufferAttributes
* // Readback buffer is allocated
* // Many frames may pass
*
* setReadbackBuffer
* validateDisplay
* presentDisplay
* getReadbackBufferFence
* // Implicitly wait on the acquire fence before accessing the buffer
*
* Parameters:
* outFence - a sync fence file descriptor as described above; pointer
* will be non-NULL
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
* HWC2_ERROR_NO_RESOURCES - the readback operation was successful, but
* resulted in a different validate result than would have occurred
* without readback
* HWC2_ERROR_UNSUPPORTED - the readback operation was unsuccessful because
* of resource constraints, the presence of protected content, or other
* reasons; -1 must be returned in outFence
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_READBACK_BUFFER_FENCE)(
hwc2_device_t* device, hwc2_display_t display,
int32_t* outFence);
/* setReadbackBuffer(..., buffer, releaseFence)
* Optional for HWC2 devices
*
* Sets the readback buffer to be filled with the contents of the next
* composition performed for this display (i.e., the contents present at the
* time of the next validateDisplay/presentDisplay cycle).
*
* This buffer will have been allocated as described in
* getReadbackBufferAttributes and will be interpreted as being in the dataspace
* provided by the same.
*
* If there is hardware protected content on the display at the time of the next
* composition, the area of the readback buffer covered by such content must be
* completely black. Any areas of the buffer not covered by such content may
* optionally be black as well.
*
* The release fence file descriptor provided works identically to the one
* described for setOutputBuffer.
*
* This function will not be called between any call to validateDisplay and a
* subsequent call to presentDisplay.
*
* Parameters:
* buffer - the new readback buffer
* releaseFence - a sync fence file descriptor as described in setOutputBuffer
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
* HWC2_ERROR_BAD_PARAMETER - the new readback buffer handle was invalid
*
* See also:
* getReadbackBufferAttributes
* getReadbackBufferFence
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_READBACK_BUFFER)(
hwc2_device_t* device, hwc2_display_t display,
buffer_handle_t buffer, int32_t releaseFence);
/* setVsyncEnabled(..., enabled)
* Descriptor: HWC2_FUNCTION_SET_VSYNC_ENABLED
* Must be provided by all HWC2 devices
@ -1758,6 +2044,35 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_LAYER_SURFACE_DAMAGE)(
hwc2_device_t* device, hwc2_display_t display, hwc2_layer_t layer,
hwc_region_t damage);
/* setLayerPerFrameMetadata(..., numMetadata, metadata)
* Descriptor: HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA
* Optional for HWC2 devices
*
* If supported (getFunction(HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA) is
* non-null), sets the metadata for the given display for all following
* frames.
*
* Upon returning from this function, the metadata change must have
* fully taken effect.
*
* This function will only be called if getPerFrameMetadataKeys is non-NULL
* and returns at least one key.
*
* Parameters:
* numElements is the number of elements in each of the keys and metadata arrays
* keys is a pointer to the array of keys.
* outMetadata is a pointer to the corresponding array of metadata.
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
* HWC2_ERROR_BAD_PARAMETER - metadata is not valid
* HWC2_ERROR_UNSUPPORTED - metadata is not supported on this display
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_LAYER_PER_FRAME_METADATA)(
hwc2_device_t* device, hwc2_display_t display, hwc2_layer_t layer,
uint32_t numElements, const int32_t* /*hw2_per_frame_metadata_key_t*/ keys,
const float* metadata);
/*
* Layer State Functions
*
@ -1805,6 +2120,25 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_LAYER_COLOR)(
hwc2_device_t* device, hwc2_display_t display, hwc2_layer_t layer,
hwc_color_t color);
/* setLayerFloatColor(..., color)
* Descriptor: HWC2_FUNCTION_SET_LAYER_FLOAT_COLOR
* Provided by HWC2 devices which don't return nullptr function pointer.
*
* Sets the color of the given layer. If the composition type of the layer is
* not HWC2_COMPOSITION_SOLID_COLOR, this call must return HWC2_ERROR_NONE and
* have no other effect.
*
* Parameters:
* color - the new color in float type, rage is [0.0, 1.0], the colorspace is
* defined by the dataspace that gets set by calling setLayerDataspace.
*
* Returns HWC2_ERROR_NONE or one of the following errors:
* HWC2_ERROR_BAD_LAYER - an invalid layer handle was passed in
*/
typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_LAYER_FLOAT_COLOR)(
hwc2_device_t* device, hwc2_display_t display, hwc2_layer_t layer,
hwc_float_color_t color);
/* setLayerCompositionType(..., type)
* Descriptor: HWC2_FUNCTION_SET_LAYER_COMPOSITION_TYPE
* Must be provided by all HWC2 devices

View file

@ -58,6 +58,13 @@ typedef struct hwc_color {
uint8_t a;
} hwc_color_t;
typedef struct hwc_float_color {
float r;
float g;
float b;
float a;
} hwc_float_color_t;
typedef struct hwc_frect {
float left;
float top;

View file

@ -112,6 +112,8 @@ typedef enum {
KM_TAG_ALLOW_WHILE_ON_BODY = KM_BOOL | 506, /* Allow key to be used after authentication timeout
* if device is still on-body (requires secure
* on-body sensor. */
KM_TAG_UNLOCKED_DEVICE_REQUIRED = KM_BOOL | 508, /* Require the device screen to be unlocked if the
* key is used. */
/* Application access control */
KM_TAG_ALL_APPLICATIONS = KM_BOOL | 600, /* Specified to indicate key is usable by all
@ -182,6 +184,7 @@ typedef enum {
/* Block ciphers algorithms */
KM_ALGORITHM_AES = 32,
KM_ALGORITHM_TRIPLE_DES = 33,
/* MAC algorithms */
KM_ALGORITHM_HMAC = 128,
@ -296,6 +299,8 @@ typedef enum {
KM_PURPOSE_SIGN = 2, /* Usable with RSA, EC and HMAC keys. */
KM_PURPOSE_VERIFY = 3, /* Usable with RSA, EC and HMAC keys. */
KM_PURPOSE_DERIVE_KEY = 4, /* Usable with EC keys. */
KM_PURPOSE_WRAP = 5, /* Usable with wrapped keys. */
} keymaster_purpose_t;
typedef struct {
@ -449,6 +454,7 @@ typedef enum {
KM_ERROR_KEYMASTER_NOT_CONFIGURED = -64,
KM_ERROR_ATTESTATION_APPLICATION_ID_MISSING = -65,
KM_ERROR_CANNOT_ATTEST_IDS = -66,
KM_ERROR_DEVICE_LOCKED = -72,
KM_ERROR_UNIMPLEMENTED = -100,
KM_ERROR_VERSION_MISMATCH = -101,

File diff suppressed because it is too large Load diff

View file

@ -32,6 +32,15 @@
#include <hardware/hardware.h>
#include <system/audio.h>
#define STUB_DEFAULT_SAMPLE_RATE 48000
#define STUB_DEFAULT_AUDIO_FORMAT AUDIO_FORMAT_PCM_16_BIT
#define STUB_INPUT_BUFFER_MILLISECONDS 20
#define STUB_INPUT_DEFAULT_CHANNEL_MASK AUDIO_CHANNEL_IN_STEREO
#define STUB_OUTPUT_BUFFER_MILLISECONDS 10
#define STUB_OUTPUT_DEFAULT_CHANNEL_MASK AUDIO_CHANNEL_OUT_STEREO
struct stub_audio_device {
struct audio_hw_device device;
};
@ -39,46 +48,71 @@ struct stub_audio_device {
struct stub_stream_out {
struct audio_stream_out stream;
int64_t last_write_time_us;
uint32_t sample_rate;
audio_channel_mask_t channel_mask;
audio_format_t format;
size_t frame_count;
};
struct stub_stream_in {
struct audio_stream_in stream;
int64_t last_read_time_us;
uint32_t sample_rate;
audio_channel_mask_t channel_mask;
audio_format_t format;
size_t frame_count;
};
static uint32_t out_get_sample_rate(const struct audio_stream *stream)
{
return 44100;
const struct stub_stream_out *out = (const struct stub_stream_out *)stream;
ALOGV("out_get_sample_rate: %u", out->sample_rate);
return out->sample_rate;
}
static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate)
{
ALOGV("out_set_sample_rate: %d", 0);
return -ENOSYS;
struct stub_stream_out *out = (struct stub_stream_out *)stream;
ALOGV("out_set_sample_rate: %d", rate);
out->sample_rate = rate;
return 0;
}
static size_t out_get_buffer_size(const struct audio_stream *stream)
{
ALOGV("out_get_buffer_size: %d", 4096);
return 4096;
const struct stub_stream_out *out = (const struct stub_stream_out *)stream;
size_t buffer_size = out->frame_count *
audio_stream_out_frame_size(&out->stream);
ALOGV("out_get_buffer_size: %zu", buffer_size);
return buffer_size;
}
static audio_channel_mask_t out_get_channels(const struct audio_stream *stream)
{
ALOGV("out_get_channels");
return AUDIO_CHANNEL_OUT_STEREO;
const struct stub_stream_out *out = (const struct stub_stream_out *)stream;
ALOGV("out_get_channels: %x", out->channel_mask);
return out->channel_mask;
}
static audio_format_t out_get_format(const struct audio_stream *stream)
{
ALOGV("out_get_format");
return AUDIO_FORMAT_PCM_16_BIT;
const struct stub_stream_out *out = (const struct stub_stream_out *)stream;
ALOGV("out_get_format: %d", out->format);
return out->format;
}
static int out_set_format(struct audio_stream *stream, audio_format_t format)
{
ALOGV("out_set_format: %d",format);
return -ENOSYS;
struct stub_stream_out *out = (struct stub_stream_out *)stream;
ALOGV("out_set_format: %d", format);
out->format = format;
return 0;
}
static int out_standby(struct audio_stream *stream)
@ -109,7 +143,7 @@ static char * out_get_parameters(const struct audio_stream *stream, const char *
static uint32_t out_get_latency(const struct audio_stream_out *stream)
{
ALOGV("out_get_latency");
return 0;
return STUB_OUTPUT_BUFFER_MILLISECONDS;
}
static int out_set_volume(struct audio_stream_out *stream, float left,
@ -182,36 +216,54 @@ static int out_get_next_write_timestamp(const struct audio_stream_out *stream,
/** audio_stream_in implementation **/
static uint32_t in_get_sample_rate(const struct audio_stream *stream)
{
ALOGV("in_get_sample_rate");
return 8000;
const struct stub_stream_in *in = (const struct stub_stream_in *)stream;
ALOGV("in_get_sample_rate: %u", in->sample_rate);
return in->sample_rate;
}
static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
{
ALOGV("in_set_sample_rate: %d", rate);
return -ENOSYS;
struct stub_stream_in *in = (struct stub_stream_in *)stream;
ALOGV("in_set_sample_rate: %u", rate);
in->sample_rate = rate;
return 0;
}
static size_t in_get_buffer_size(const struct audio_stream *stream)
{
ALOGV("in_get_buffer_size: %d", 320);
return 320;
const struct stub_stream_in *in = (const struct stub_stream_in *)stream;
size_t buffer_size = in->frame_count *
audio_stream_in_frame_size(&in->stream);
ALOGV("in_get_buffer_size: %zu", buffer_size);
return buffer_size;
}
static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
{
ALOGV("in_get_channels: %d", AUDIO_CHANNEL_IN_MONO);
return AUDIO_CHANNEL_IN_MONO;
const struct stub_stream_in *in = (const struct stub_stream_in *)stream;
ALOGV("in_get_channels: %x", in->channel_mask);
return in->channel_mask;
}
static audio_format_t in_get_format(const struct audio_stream *stream)
{
return AUDIO_FORMAT_PCM_16_BIT;
const struct stub_stream_in *in = (const struct stub_stream_in *)stream;
ALOGV("in_get_format: %d", in->format);
return in->format;
}
static int in_set_format(struct audio_stream *stream, audio_format_t format)
{
return -ENOSYS;
struct stub_stream_in *in = (struct stub_stream_in *)stream;
ALOGV("in_set_format: %d", format);
in->format = format;
return 0;
}
static int in_standby(struct audio_stream *stream)
@ -290,6 +342,13 @@ static int in_remove_audio_effect(const struct audio_stream *stream, effect_hand
return 0;
}
static size_t samples_per_milliseconds(size_t milliseconds,
uint32_t sample_rate,
size_t channel_count)
{
return milliseconds * sample_rate * channel_count / 1000;
}
static int adev_open_output_stream(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
@ -323,7 +382,22 @@ static int adev_open_output_stream(struct audio_hw_device *dev,
out->stream.write = out_write;
out->stream.get_render_position = out_get_render_position;
out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
out->sample_rate = config->sample_rate;
if (out->sample_rate == 0)
out->sample_rate = STUB_DEFAULT_SAMPLE_RATE;
out->channel_mask = config->channel_mask;
if (out->channel_mask == AUDIO_CHANNEL_NONE)
out->channel_mask = STUB_OUTPUT_DEFAULT_CHANNEL_MASK;
out->format = config->format;
if (out->format == AUDIO_FORMAT_DEFAULT)
out->format = STUB_DEFAULT_AUDIO_FORMAT;
out->frame_count = samples_per_milliseconds(
STUB_OUTPUT_BUFFER_MILLISECONDS,
out->sample_rate, 1);
ALOGV("adev_open_output_stream: sample_rate: %u, channels: %x, format: %d,"
" frames: %zu", out->sample_rate, out->channel_mask, out->format,
out->frame_count);
*stream_out = &out->stream;
return 0;
}
@ -405,8 +479,21 @@ static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state)
static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
const struct audio_config *config)
{
ALOGV("adev_get_input_buffer_size: %d", 320);
return 320;
size_t buffer_size = samples_per_milliseconds(
STUB_INPUT_BUFFER_MILLISECONDS,
config->sample_rate,
audio_channel_count_from_in_mask(
config->channel_mask));
if (!audio_has_proportional_frames(config->format)) {
// Since the audio data is not proportional choose an arbitrary size for
// the buffer.
buffer_size *= 4;
} else {
buffer_size *= audio_bytes_per_sample(config->format);
}
ALOGV("adev_get_input_buffer_size: %zu", buffer_size);
return buffer_size;
}
static int adev_open_input_stream(struct audio_hw_device *dev,
@ -440,7 +527,21 @@ static int adev_open_input_stream(struct audio_hw_device *dev,
in->stream.set_gain = in_set_gain;
in->stream.read = in_read;
in->stream.get_input_frames_lost = in_get_input_frames_lost;
in->sample_rate = config->sample_rate;
if (in->sample_rate == 0)
in->sample_rate = STUB_DEFAULT_SAMPLE_RATE;
in->channel_mask = config->channel_mask;
if (in->channel_mask == AUDIO_CHANNEL_NONE)
in->channel_mask = STUB_INPUT_DEFAULT_CHANNEL_MASK;
in->format = config->format;
if (in->format == AUDIO_FORMAT_DEFAULT)
in->format = STUB_DEFAULT_AUDIO_FORMAT;
in->frame_count = samples_per_milliseconds(
STUB_INPUT_BUFFER_MILLISECONDS, in->sample_rate, 1);
ALOGV("adev_open_input_stream: sample_rate: %u, channels: %x, format: %d,"
"frames: %zu", in->sample_rate, in->channel_mask, in->format,
in->frame_count);
*stream_in = &in->stream;
return 0;
}

View file

@ -52,9 +52,9 @@ extern "C" {
namespace android {
// Set to 1 to enable extremely verbose logging in this module.
#define SUBMIX_VERBOSE_LOGGING 0
#if SUBMIX_VERBOSE_LOGGING
// Uncomment to enable extremely verbose logging in this module.
// #define SUBMIX_VERBOSE_LOGGING
#if defined(SUBMIX_VERBOSE_LOGGING)
#define SUBMIX_ALOGV(...) ALOGV(__VA_ARGS__)
#define SUBMIX_ALOGE(...) ALOGE(__VA_ARGS__)
#else
@ -205,7 +205,7 @@ struct submix_stream_in {
int log_fd;
#endif // LOG_STREAMS_TO_FILES
volatile int16_t read_error_count;
volatile uint16_t read_error_count;
};
// Determine whether the specified sample rate is supported by the submix module.
@ -467,11 +467,9 @@ static void submix_audio_device_release_pipe_l(struct submix_audio_device * cons
rsxadev->routes[route_idx].address);
if (rsxadev->routes[route_idx].rsxSink != 0) {
rsxadev->routes[route_idx].rsxSink.clear();
rsxadev->routes[route_idx].rsxSink = 0;
}
if (rsxadev->routes[route_idx].rsxSource != 0) {
rsxadev->routes[route_idx].rsxSource.clear();
rsxadev->routes[route_idx].rsxSource = 0;
}
memset(rsxadev->routes[route_idx].address, 0, AUDIO_DEVICE_MAX_ADDRESS_LEN);
#ifdef ENABLE_RESAMPLING
@ -816,8 +814,8 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
static uint8_t flush_buffer[64];
const size_t flushBufferSizeFrames = sizeof(flush_buffer) / frame_size;
size_t frames_to_flush_from_source = frames - availableToWrite;
SUBMIX_ALOGV("out_write(): flushing %d frames from the pipe to avoid blocking",
frames_to_flush_from_source);
SUBMIX_ALOGV("out_write(): flushing %llu frames from the pipe to avoid blocking",
(unsigned long long)frames_to_flush_from_source);
while (frames_to_flush_from_source) {
const size_t flush_size = min(frames_to_flush_from_source, flushBufferSizeFrames);
frames_to_flush_from_source -= flush_size;
@ -898,7 +896,8 @@ static int out_get_presentation_position(const struct audio_stream_out *stream,
}
SUBMIX_ALOGV("out_get_presentation_position() got frames=%llu timestamp sec=%llu",
frames ? *frames : -1, timestamp ? timestamp->tv_sec : -1);
frames ? (unsigned long long)*frames : -1ULL,
timestamp ? (unsigned long long)timestamp->tv_sec : -1ULL);
return ret;
}
@ -1541,7 +1540,7 @@ static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
audio_bytes_per_sample(config->format);
const size_t buffer_size = max_buffer_period_size_frames * frame_size_in_bytes;
SUBMIX_ALOGV("adev_get_input_buffer_size() returns %zu bytes, %zu frames",
buffer_size, buffer_period_size_frames);
buffer_size, max_buffer_period_size_frames);
return buffer_size;
}
return 0;
@ -1692,10 +1691,10 @@ static int adev_dump(const audio_hw_device_t *device, int fd)
reinterpret_cast<const uint8_t *>(device) -
offsetof(struct submix_audio_device, device));
char msg[100];
int n = sprintf(msg, "\nReroute submix audio module:\n");
int n = snprintf(msg, sizeof(msg), "\nReroute submix audio module:\n");
write(fd, &msg, n);
for (int i=0 ; i < MAX_ROUTES ; i++) {
n = sprintf(msg, " route[%d] rate in=%d out=%d, addr=[%s]\n", i,
n = snprintf(msg, sizeof(msg), " route[%d] rate in=%d out=%d, addr=[%s]\n", i,
rsxadev->routes[i].config.input_sample_rate,
rsxadev->routes[i].config.output_sample_rate,
rsxadev->routes[i].address);

View file

@ -1,4 +1,4 @@
// Copyright (C) 2012 The Android Open Source Project
// Copyright (C) 2018 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,23 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
cc_library_shared {
name: "vehicle.default",
cc_test {
name: "r_submix_tests",
srcs: ["remote_submix_tests.cpp"],
relative_install_path: "hw",
vendor: true,
srcs: [
"vehicle.c",
"timeUtil.cpp",
],
cflags: [
"-Wall",
"-Werror",
],
header_libs: ["libhardware_headers"],
shared_libs: [
"libhardware",
"liblog",
"libcutils",
"libutils",
],
cflags: ["-Wall", "-Werror", "-O0", "-g",],
header_libs: ["libaudiohal_headers"],
}

View file

@ -0,0 +1,332 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// To run this test (as root):
// 1) Build it
// 2) adb push to /vendor/bin
// 3) adb shell /vendor/bin/r_submix_tests
#define LOG_TAG "RemoteSubmixTest"
#include <memory>
#include <gtest/gtest.h>
#include <hardware/audio.h>
#include <utils/Errors.h>
#include <utils/Log.h>
using namespace android;
static status_t load_audio_interface(const char* if_name, audio_hw_device_t **dev)
{
const hw_module_t *mod;
int rc;
rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
if (rc) {
ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
goto out;
}
rc = audio_hw_device_open(mod, dev);
if (rc) {
ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
goto out;
}
if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
rc = BAD_VALUE;
audio_hw_device_close(*dev);
goto out;
}
return OK;
out:
*dev = NULL;
return rc;
}
class RemoteSubmixTest : public testing::Test {
protected:
void SetUp() override;
void TearDown() override;
void GenerateData(char* buffer, size_t bufferSize);
void OpenInputStream(
const char* address, bool mono, uint32_t sampleRate, audio_stream_in_t** streamIn);
void OpenOutputStream(
const char* address, bool mono, uint32_t sampleRate, audio_stream_out_t** streamOut);
void ReadFromStream(audio_stream_in_t* streamIn, char* buffer, size_t bufferSize);
void VerifyBufferAllZeroes(char* buffer, size_t bufferSize);
void VerifyBufferNotZeroes(char* buffer, size_t bufferSize);
void VerifyOutputInput(
audio_stream_out_t* streamOut, size_t outBufferSize,
audio_stream_in_t* streamIn, size_t inBufferSize, size_t repeats);
void WriteIntoStream(audio_stream_out_t* streamOut, const char* buffer, size_t bufferSize);
void WriteSomethingIntoStream(audio_stream_out_t* streamOut, size_t bufferSize, size_t repeats);
audio_hw_device_t* mDev;
};
void RemoteSubmixTest::SetUp() {
mDev = nullptr;
ASSERT_EQ(OK, load_audio_interface(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, &mDev));
ASSERT_NE(nullptr, mDev);
}
void RemoteSubmixTest::TearDown() {
if (mDev != nullptr) {
int status = audio_hw_device_close(mDev);
mDev = nullptr;
ALOGE_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
ASSERT_EQ(0, status);
}
}
void RemoteSubmixTest::GenerateData(char* buffer, size_t bufferSize) {
for (size_t i = 0; i < bufferSize; ++i) {
buffer[i] = static_cast<char>(i & 0x7f);
}
}
void RemoteSubmixTest::OpenInputStream(
const char* address, bool mono, uint32_t sampleRate, audio_stream_in_t** streamIn) {
*streamIn = nullptr;
struct audio_config configIn = {};
configIn.channel_mask = mono ? AUDIO_CHANNEL_IN_MONO : AUDIO_CHANNEL_IN_STEREO;
configIn.sample_rate = sampleRate;
status_t result = mDev->open_input_stream(mDev,
AUDIO_IO_HANDLE_NONE, AUDIO_DEVICE_NONE, &configIn,
streamIn, AUDIO_INPUT_FLAG_NONE, address, AUDIO_SOURCE_DEFAULT);
ASSERT_EQ(OK, result);
ASSERT_NE(nullptr, *streamIn);
}
void RemoteSubmixTest::OpenOutputStream(
const char* address, bool mono, uint32_t sampleRate, audio_stream_out_t** streamOut) {
*streamOut = nullptr;
struct audio_config configOut = {};
configOut.channel_mask = mono ? AUDIO_CHANNEL_OUT_MONO : AUDIO_CHANNEL_OUT_STEREO;
configOut.sample_rate = sampleRate;
status_t result = mDev->open_output_stream(mDev,
AUDIO_IO_HANDLE_NONE, AUDIO_DEVICE_NONE, AUDIO_OUTPUT_FLAG_NONE,
&configOut, streamOut, address);
ASSERT_EQ(OK, result);
ASSERT_NE(nullptr, *streamOut);
}
void RemoteSubmixTest::ReadFromStream(
audio_stream_in_t* streamIn, char* buffer, size_t bufferSize) {
ssize_t result = streamIn->read(streamIn, buffer, bufferSize);
EXPECT_EQ(bufferSize, static_cast<size_t>(result));
}
void RemoteSubmixTest::VerifyBufferAllZeroes(char* buffer, size_t bufferSize) {
for (size_t i = 0; i < bufferSize; ++i) {
if (buffer[i]) {
ADD_FAILURE();
return;
}
}
}
void RemoteSubmixTest::VerifyBufferNotZeroes(char* buffer, size_t bufferSize) {
for (size_t i = 0; i < bufferSize; ++i) {
if (buffer[i]) return;
}
ADD_FAILURE();
}
void RemoteSubmixTest::VerifyOutputInput(
audio_stream_out_t* streamOut, size_t outBufferSize,
audio_stream_in_t* streamIn, size_t inBufferSize,
size_t repeats) {
std::unique_ptr<char[]> outBuffer(new char[outBufferSize]), inBuffer(new char[inBufferSize]);
GenerateData(outBuffer.get(), outBufferSize);
for (size_t i = 0; i < repeats; ++i) {
WriteIntoStream(streamOut, outBuffer.get(), outBufferSize);
memset(inBuffer.get(), 0, inBufferSize);
ReadFromStream(streamIn, inBuffer.get(), inBufferSize);
if (inBufferSize == outBufferSize) {
ASSERT_EQ(0, memcmp(outBuffer.get(), inBuffer.get(), inBufferSize));
} else {
VerifyBufferNotZeroes(inBuffer.get(), inBufferSize);
}
}
}
void RemoteSubmixTest::WriteIntoStream(
audio_stream_out_t* streamOut, const char* buffer, size_t bufferSize) {
ssize_t result = streamOut->write(streamOut, buffer, bufferSize);
EXPECT_EQ(bufferSize, static_cast<size_t>(result));
}
void RemoteSubmixTest::WriteSomethingIntoStream(
audio_stream_out_t* streamOut, size_t bufferSize, size_t repeats) {
std::unique_ptr<char[]> buffer(new char[bufferSize]);
GenerateData(buffer.get(), bufferSize);
for (size_t i = 0; i < repeats; ++i) {
WriteIntoStream(streamOut, buffer.get(), bufferSize);
}
}
TEST_F(RemoteSubmixTest, InitSuccess) {
// SetUp must finish with no assertions.
}
// Verifies that when no input was opened, writing into an output stream does not block.
TEST_F(RemoteSubmixTest, OutputDoesNotBlockWhenNoInput) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
WriteSomethingIntoStream(streamOut, 1024, 16);
mDev->close_output_stream(mDev, streamOut);
}
// Verifies that when input is opened but not reading, writing into an output stream does not block.
// !!! Currently does not finish because requires setting a parameter from another thread !!!
// TEST_F(RemoteSubmixTest, OutputDoesNotBlockWhenInputStuck) {
// const char* address = "1";
// audio_stream_out_t* streamOut;
// OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
// audio_stream_in_t* streamIn;
// OpenInputStream(address, true /*mono*/, 48000, &streamIn);
// WriteSomethingIntoStream(streamOut, 1024, 16);
// mDev->close_input_stream(mDev, streamIn);
// mDev->close_output_stream(mDev, streamOut);
// }
TEST_F(RemoteSubmixTest, OutputAndInput) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
audio_stream_in_t* streamIn;
OpenInputStream(address, true /*mono*/, 48000, &streamIn);
const size_t bufferSize = 1024;
VerifyOutputInput(streamOut, bufferSize, streamIn, bufferSize, 16);
mDev->close_input_stream(mDev, streamIn);
mDev->close_output_stream(mDev, streamOut);
}
// Verifies that reading and writing into a closed stream fails gracefully.
TEST_F(RemoteSubmixTest, OutputAndInputAfterClose) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
audio_stream_in_t* streamIn;
OpenInputStream(address, true /*mono*/, 48000, &streamIn);
mDev->close_input_stream(mDev, streamIn);
mDev->close_output_stream(mDev, streamOut);
const size_t bufferSize = 1024;
std::unique_ptr<char[]> buffer(new char[bufferSize]);
memset(buffer.get(), 0, bufferSize);
ASSERT_EQ(0, streamOut->write(streamOut, buffer.get(), bufferSize));
ASSERT_EQ(static_cast<ssize_t>(bufferSize), streamIn->read(streamIn, buffer.get(), bufferSize));
VerifyBufferAllZeroes(buffer.get(), bufferSize);
}
TEST_F(RemoteSubmixTest, PresentationPosition) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
uint64_t frames;
struct timespec timestamp;
EXPECT_EQ(0, streamOut->get_presentation_position(streamOut, &frames, &timestamp));
EXPECT_EQ(uint64_t{0}, frames);
uint64_t prevFrames = frames;
for (size_t i = 0; i < 16; ++i) {
WriteSomethingIntoStream(streamOut, 1024, 1);
EXPECT_EQ(0, streamOut->get_presentation_position(streamOut, &frames, &timestamp));
EXPECT_LE(prevFrames, frames);
prevFrames = frames;
}
mDev->close_output_stream(mDev, streamOut);
}
TEST_F(RemoteSubmixTest, RenderPosition) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
uint32_t frames;
EXPECT_EQ(0, streamOut->get_render_position(streamOut, &frames));
EXPECT_EQ(0U, frames);
uint32_t prevFrames = frames;
for (size_t i = 0; i < 16; ++i) {
WriteSomethingIntoStream(streamOut, 1024, 1);
EXPECT_EQ(0, streamOut->get_render_position(streamOut, &frames));
EXPECT_LE(prevFrames, frames);
prevFrames = frames;
}
mDev->close_output_stream(mDev, streamOut);
}
// This requires ENABLE_CHANNEL_CONVERSION to be set in the HAL module
TEST_F(RemoteSubmixTest, MonoToStereoConversion) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
audio_stream_in_t* streamIn;
OpenInputStream(address, false /*mono*/, 48000, &streamIn);
const size_t bufferSize = 1024;
VerifyOutputInput(streamOut, bufferSize, streamIn, bufferSize * 2, 16);
mDev->close_input_stream(mDev, streamIn);
mDev->close_output_stream(mDev, streamOut);
}
// This requires ENABLE_CHANNEL_CONVERSION to be set in the HAL module
TEST_F(RemoteSubmixTest, StereoToMonoConversion) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, false /*mono*/, 48000, &streamOut);
audio_stream_in_t* streamIn;
OpenInputStream(address, true /*mono*/, 48000, &streamIn);
const size_t bufferSize = 1024;
VerifyOutputInput(streamOut, bufferSize * 2, streamIn, bufferSize, 16);
mDev->close_input_stream(mDev, streamIn);
mDev->close_output_stream(mDev, streamOut);
}
// This requires ENABLE_RESAMPLING to be set in the HAL module
TEST_F(RemoteSubmixTest, OutputAndInputResampling) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
audio_stream_in_t* streamIn;
OpenInputStream(address, true /*mono*/, 24000, &streamIn);
const size_t bufferSize = 1024;
VerifyOutputInput(streamOut, bufferSize * 2, streamIn, bufferSize, 16);
mDev->close_input_stream(mDev, streamIn);
mDev->close_output_stream(mDev, streamOut);
}
// This requires ENABLE_LEGACY_INPUT_OPEN to be set in the HAL module
TEST_F(RemoteSubmixTest, OpenInputMultipleTimes) {
const char* address = "1";
audio_stream_out_t* streamOut;
OpenOutputStream(address, true /*mono*/, 48000, &streamOut);
const size_t streamInCount = 3;
audio_stream_in_t* streamIn[streamInCount];
for (size_t i = 0; i < streamInCount; ++i) {
OpenInputStream(address, true /*mono*/, 48000, &streamIn[i]);
}
const size_t bufferSize = 1024;
for (size_t i = 0; i < streamInCount; ++i) {
VerifyOutputInput(streamOut, bufferSize, streamIn[i], bufferSize, 16);
mDev->close_input_stream(mDev, streamIn[i]);
}
mDev->close_output_stream(mDev, streamOut);
}

View file

@ -22,21 +22,31 @@ ifeq ($(USE_CAMERA_V4L2_HAL), true)
v4l2_shared_libs := \
libbase \
libchrome \
libcamera_client \
libcamera_metadata \
libcutils \
libexif \
libhardware \
liblog \
libsync \
libutils \
v4l2_static_libs :=
v4l2_static_libs := \
libyuv_static \
libjpeg_static_ndk \
v4l2_cflags := -fno-short-enums -Wall -Wno-error -Wextra -fvisibility=hidden
v4l2_cflags := -fno-short-enums -Wall -Wno-error -Wextra -fvisibility=hidden -DHAVE_JPEG
v4l2_c_includes := $(call include-path-for, camera)
v4l2_c_includes := $(call include-path-for, camera) \
external/libyuv/files/include \
v4l2_src_files := \
arc/cached_frame.cpp \
arc/exif_utils.cpp \
arc/frame_buffer.cpp \
arc/image_processor.cpp \
arc/jpeg_compressor.cpp \
camera.cpp \
capture_request.cpp \
format_metadata_factory.cpp \
@ -49,7 +59,6 @@ v4l2_src_files := \
stream_format.cpp \
v4l2_camera.cpp \
v4l2_camera_hal.cpp \
v4l2_gralloc.cpp \
v4l2_metadata_factory.cpp \
v4l2_wrapper.cpp \

View file

@ -146,6 +146,3 @@ is underfeatured compared to the ideal/what is possible.
* A variety of features are unimplemented: High speed capture,
flash torch mode, hotplugging/unplugging.
* The HAL uses BGR for RGBA. Again, the HAL was designed for the Raspberry Pi
camera, which doesn't support RGB, but RGB is a common default format for
graphics stacks.

View file

@ -0,0 +1,218 @@
/* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "arc/cached_frame.h"
#include <errno.h>
#include <libyuv.h>
#include "arc/common.h"
#include "arc/common_types.h"
namespace arc {
using android::CameraMetadata;
CachedFrame::CachedFrame()
: source_frame_(nullptr),
cropped_buffer_capacity_(0),
yu12_frame_(new AllocatedFrameBuffer(0)) {}
CachedFrame::~CachedFrame() { UnsetSource(); }
int CachedFrame::SetSource(const FrameBuffer* frame, int rotate_degree) {
source_frame_ = frame;
int res = ConvertToYU12();
if (res != 0) {
return res;
}
if (rotate_degree > 0) {
res = CropRotateScale(rotate_degree);
}
return res;
}
void CachedFrame::UnsetSource() { source_frame_ = nullptr; }
uint8_t* CachedFrame::GetSourceBuffer() const {
return source_frame_->GetData();
}
size_t CachedFrame::GetSourceDataSize() const {
return source_frame_->GetDataSize();
}
uint32_t CachedFrame::GetSourceFourCC() const {
return source_frame_->GetFourcc();
}
uint8_t* CachedFrame::GetCachedBuffer() const { return yu12_frame_->GetData(); }
uint32_t CachedFrame::GetCachedFourCC() const {
return yu12_frame_->GetFourcc();
}
uint32_t CachedFrame::GetWidth() const { return yu12_frame_->GetWidth(); }
uint32_t CachedFrame::GetHeight() const { return yu12_frame_->GetHeight(); }
size_t CachedFrame::GetConvertedSize(int fourcc) const {
return ImageProcessor::GetConvertedSize(fourcc, yu12_frame_->GetWidth(),
yu12_frame_->GetHeight());
}
int CachedFrame::Convert(const CameraMetadata& metadata, FrameBuffer* out_frame,
bool video_hack) {
if (video_hack && out_frame->GetFourcc() == V4L2_PIX_FMT_YVU420) {
out_frame->SetFourcc(V4L2_PIX_FMT_YUV420);
}
FrameBuffer* source_frame = yu12_frame_.get();
if (GetWidth() != out_frame->GetWidth() ||
GetHeight() != out_frame->GetHeight()) {
size_t cache_size = ImageProcessor::GetConvertedSize(
yu12_frame_->GetFourcc(), out_frame->GetWidth(),
out_frame->GetHeight());
if (cache_size == 0) {
return -EINVAL;
} else if (cache_size > scaled_frame_->GetBufferSize()) {
scaled_frame_.reset(new AllocatedFrameBuffer(cache_size));
}
scaled_frame_->SetWidth(out_frame->GetWidth());
scaled_frame_->SetHeight(out_frame->GetHeight());
ImageProcessor::Scale(*yu12_frame_.get(), scaled_frame_.get());
source_frame = scaled_frame_.get();
}
return ImageProcessor::ConvertFormat(metadata, *source_frame, out_frame);
}
int CachedFrame::ConvertToYU12() {
size_t cache_size = ImageProcessor::GetConvertedSize(
V4L2_PIX_FMT_YUV420, source_frame_->GetWidth(),
source_frame_->GetHeight());
if (cache_size == 0) {
return -EINVAL;
}
yu12_frame_->SetDataSize(cache_size);
yu12_frame_->SetFourcc(V4L2_PIX_FMT_YUV420);
yu12_frame_->SetWidth(source_frame_->GetWidth());
yu12_frame_->SetHeight(source_frame_->GetHeight());
int res = ImageProcessor::ConvertFormat(CameraMetadata(), *source_frame_,
yu12_frame_.get());
if (res) {
LOGF(ERROR) << "Convert from " << FormatToString(source_frame_->GetFourcc())
<< " to YU12 fails.";
return res;
}
return 0;
}
int CachedFrame::CropRotateScale(int rotate_degree) {
// TODO(henryhsu): Move libyuv part to ImageProcessor.
if (yu12_frame_->GetHeight() % 2 != 0 || yu12_frame_->GetWidth() % 2 != 0) {
LOGF(ERROR) << "yu12_frame_ has odd dimension: " << yu12_frame_->GetWidth()
<< "x" << yu12_frame_->GetHeight();
return -EINVAL;
}
if (yu12_frame_->GetHeight() > yu12_frame_->GetWidth()) {
LOGF(ERROR) << "yu12_frame_ is tall frame already: "
<< yu12_frame_->GetWidth() << "x" << yu12_frame_->GetHeight();
return -EINVAL;
}
// Step 1: Crop and rotate
//
// Original frame Cropped frame Rotated frame
// -------------------- --------
// | | | | | | ---------------
// | | | | | | | |
// | | | | =======>> | | =======>> | |
// | | | | | | ---------------
// | | | | | |
// -------------------- --------
//
int cropped_width = yu12_frame_->GetHeight() * yu12_frame_->GetHeight() /
yu12_frame_->GetWidth();
if (cropped_width % 2 == 1) {
// Make cropped_width to the closest even number.
cropped_width++;
}
int cropped_height = yu12_frame_->GetHeight();
int margin = (yu12_frame_->GetWidth() - cropped_width) / 2;
int rotated_height = cropped_width;
int rotated_width = cropped_height;
int rotated_y_stride = rotated_width;
int rotated_uv_stride = rotated_width / 2;
size_t rotated_size =
rotated_y_stride * rotated_height + rotated_uv_stride * rotated_height;
if (rotated_size > cropped_buffer_capacity_) {
cropped_buffer_.reset(new uint8_t[rotated_size]);
cropped_buffer_capacity_ = rotated_size;
}
uint8_t* rotated_y_plane = cropped_buffer_.get();
uint8_t* rotated_u_plane =
rotated_y_plane + rotated_y_stride * rotated_height;
uint8_t* rotated_v_plane =
rotated_u_plane + rotated_uv_stride * rotated_height / 2;
libyuv::RotationMode rotation_mode = libyuv::RotationMode::kRotate90;
switch (rotate_degree) {
case 90:
rotation_mode = libyuv::RotationMode::kRotate90;
break;
case 270:
rotation_mode = libyuv::RotationMode::kRotate270;
break;
default:
LOGF(ERROR) << "Invalid rotation degree: " << rotate_degree;
return -EINVAL;
}
// This libyuv method first crops the frame and then rotates it 90 degrees
// clockwise.
int res = libyuv::ConvertToI420(
yu12_frame_->GetData(), yu12_frame_->GetDataSize(), rotated_y_plane,
rotated_y_stride, rotated_u_plane, rotated_uv_stride, rotated_v_plane,
rotated_uv_stride, margin, 0, yu12_frame_->GetWidth(),
yu12_frame_->GetHeight(), cropped_width, cropped_height, rotation_mode,
libyuv::FourCC::FOURCC_I420);
if (res) {
LOGF(ERROR) << "ConvertToI420 failed: " << res;
return res;
}
// Step 2: Scale
//
// Final frame
// Rotated frame ---------------------
// -------------- | |
// | | =====>> | |
// | | | |
// -------------- | |
// | |
// ---------------------
//
//
res = libyuv::I420Scale(
rotated_y_plane, rotated_y_stride, rotated_u_plane, rotated_uv_stride,
rotated_v_plane, rotated_uv_stride, rotated_width, rotated_height,
yu12_frame_->GetData(), yu12_frame_->GetWidth(),
yu12_frame_->GetData() +
yu12_frame_->GetWidth() * yu12_frame_->GetHeight(),
yu12_frame_->GetWidth() / 2,
yu12_frame_->GetData() +
yu12_frame_->GetWidth() * yu12_frame_->GetHeight() * 5 / 4,
yu12_frame_->GetWidth() / 2, yu12_frame_->GetWidth(),
yu12_frame_->GetHeight(), libyuv::FilterMode::kFilterNone);
LOGF_IF(ERROR, res) << "I420Scale failed: " << res;
return res;
}
} // namespace arc

View file

@ -0,0 +1,86 @@
/* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef HAL_USB_CACHED_FRAME_H_
#define HAL_USB_CACHED_FRAME_H_
#include <memory>
#include <camera/CameraMetadata.h>
#include "arc/image_processor.h"
namespace arc {
// CachedFrame contains a source FrameBuffer and a cached, converted
// FrameBuffer. The incoming frames would be converted to YU12, the default
// format of libyuv, to allow convenient processing.
class CachedFrame {
public:
CachedFrame();
~CachedFrame();
// SetSource() doesn't take ownership of |frame|. The caller can only release
// |frame| after calling UnsetSource(). SetSource() immediately converts
// incoming frame into YU12. Return non-zero values if it encounters errors.
// If |rotate_degree| is 90 or 270, |frame| will be cropped, rotated by the
// specified amount and scaled.
// If |rotate_degree| is -1, |frame| will not be cropped, rotated, and scaled.
// This function will return an error if |rotate_degree| is not -1, 90, or
// 270.
int SetSource(const FrameBuffer* frame, int rotate_degree);
void UnsetSource();
uint8_t* GetSourceBuffer() const;
size_t GetSourceDataSize() const;
uint32_t GetSourceFourCC() const;
uint8_t* GetCachedBuffer() const;
uint32_t GetCachedFourCC() const;
uint32_t GetWidth() const;
uint32_t GetHeight() const;
// Calculate the output buffer size when converting to the specified pixel
// format. |fourcc| is defined as V4L2_PIX_FMT_* in linux/videodev2.h. Return
// 0 on error.
size_t GetConvertedSize(int fourcc) const;
// Caller should fill everything except |data_size| and |fd| of |out_frame|.
// The function will do format conversion and scale to fit |out_frame|
// requirement.
// If |video_hack| is true, it outputs YU12 when |hal_pixel_format| is YV12
// (swapping U/V planes). Caller should fill |fourcc|, |data|, and
// Return non-zero error code on failure; return 0 on success.
int Convert(const android::CameraMetadata& metadata, FrameBuffer* out_frame,
bool video_hack = false);
private:
int ConvertToYU12();
// When we have a landscape mounted camera and the current camera activity is
// portrait, the frames shown in the activity would be stretched. Therefore,
// we want to simulate a native portrait camera. That's why we want to crop,
// rotate |rotate_degree| clockwise and scale the frame. HAL would not change
// CameraInfo.orientation. Instead, framework would fake the
// CameraInfo.orientation. Framework would then tell HAL how much the frame
// needs to rotate clockwise by |rotate_degree|.
int CropRotateScale(int rotate_degree);
const FrameBuffer* source_frame_;
// const V4L2FrameBuffer* source_frame_;
// Temporary buffer for cropped and rotated results.
std::unique_ptr<uint8_t[]> cropped_buffer_;
size_t cropped_buffer_capacity_;
// Cache YU12 decoded results.
std::unique_ptr<AllocatedFrameBuffer> yu12_frame_;
// Temporary buffer for scaled results.
std::unique_ptr<AllocatedFrameBuffer> scaled_frame_;
};
} // namespace arc
#endif // HAL_USB_CACHED_FRAME_H_

View file

@ -0,0 +1,28 @@
/* Copyright 2016 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef INCLUDE_ARC_COMMON_H_
#define INCLUDE_ARC_COMMON_H_
#include <string>
#include <base/logging.h>
#define LOGF(level) LOG(level) << __FUNCTION__ << "(): "
#define LOGFID(level, id) LOG(level) << __FUNCTION__ << "(): id: " << id << ": "
#define LOGF_IF(level, res) LOG_IF(level, res) << __FUNCTION__ << "(): "
#define VLOGF(level) VLOG(level) << __FUNCTION__ << "(): "
#define VLOGFID(level, id) \
VLOG(level) << __FUNCTION__ << "(): id: " << id << ": "
#define VLOGF_ENTER() VLOGF(1) << "enter"
#define VLOGF_EXIT() VLOGF(1) << "exit"
inline std::string FormatToString(int32_t format) {
return std::string(reinterpret_cast<char*>(&format), 4);
}
#endif // INCLUDE_ARC_COMMON_H_

View file

@ -0,0 +1,55 @@
/*
* Copyright 2016 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef HAL_USB_COMMON_TYPES_H_
#define HAL_USB_COMMON_TYPES_H_
#include <string>
#include <vector>
namespace arc {
struct DeviceInfo {
// ex: /dev/video0
std::string device_path;
// USB vender id
std::string usb_vid;
// USB product id
std::string usb_pid;
// Some cameras need to wait several frames to output correct images.
uint32_t frames_to_skip_after_streamon;
// Member definitions can be found in https://developer.android.com/
// reference/android/hardware/camera2/CameraCharacteristics.html
uint32_t lens_facing;
int32_t sensor_orientation;
float horizontal_view_angle_16_9;
float horizontal_view_angle_4_3;
std::vector<float> lens_info_available_focal_lengths;
float lens_info_minimum_focus_distance;
float lens_info_optimal_focus_distance;
float vertical_view_angle_16_9;
float vertical_view_angle_4_3;
};
typedef std::vector<DeviceInfo> DeviceInfos;
struct SupportedFormat {
uint32_t width;
uint32_t height;
uint32_t fourcc;
// All the supported frame rates in fps with given width, height, and
// pixelformat. This is not sorted. For example, suppose width, height, and
// fourcc are 640x480 YUYV. If frameRates are 15.0 and 30.0, the camera
// supports outputting 640X480 YUYV in 15fps or 30fps.
std::vector<float> frameRates;
};
typedef std::vector<SupportedFormat> SupportedFormats;
} // namespace arc
#endif // HAL_USB_COMMON_TYPES_H_

View file

@ -0,0 +1,513 @@
/*
* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "arc/exif_utils.h"
#include <cstdlib>
#include <ctime>
#include <libyuv.h>
#include "arc/common.h"
namespace std {
template <>
struct default_delete<ExifEntry> {
inline void operator()(ExifEntry* entry) const { exif_entry_unref(entry); }
};
} // namespace std
namespace arc {
// This comes from the Exif Version 2.3 standard table 9.
const uint8_t gExifAsciiPrefix[] = {0x41, 0x53, 0x43, 0x49,
0x49, 0x0, 0x0, 0x0};
static void SetLatitudeOrLongitudeData(unsigned char* data, double num) {
// Take the integer part of |num|.
ExifLong degrees = static_cast<ExifLong>(num);
ExifLong minutes = static_cast<ExifLong>(60 * (num - degrees));
ExifLong microseconds =
static_cast<ExifLong>(3600000000u * (num - degrees - minutes / 60.0));
exif_set_rational(data, EXIF_BYTE_ORDER_INTEL, {degrees, 1});
exif_set_rational(data + sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
{minutes, 1});
exif_set_rational(data + 2 * sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
{microseconds, 1000000});
}
ExifUtils::ExifUtils()
: yu12_buffer_(nullptr),
yu12_width_(0),
yu12_height_(0),
thumbnail_width_(0),
thumbnail_height_(0),
exif_data_(nullptr),
app1_buffer_(nullptr),
app1_length_(0) {}
ExifUtils::~ExifUtils() { Reset(); }
bool ExifUtils::Initialize(const uint8_t* buffer, uint16_t width,
uint16_t height, int quality) {
Reset();
if (width % 2 != 0 || height % 2 != 0) {
LOGF(ERROR) << "invalid image size " << width << "x" << height;
return false;
}
if (quality < 1 || quality > 100) {
LOGF(ERROR) << "invalid jpeg quality " << quality;
return false;
}
thumbnail_jpeg_quality_ = quality;
yu12_buffer_ = buffer;
yu12_width_ = width;
yu12_height_ = height;
exif_data_ = exif_data_new();
if (exif_data_ == nullptr) {
LOGF(ERROR) << "allocate memory for exif_data_ failed";
return false;
}
// Set the image options.
exif_data_set_option(exif_data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
exif_data_set_data_type(exif_data_, EXIF_DATA_TYPE_COMPRESSED);
exif_data_set_byte_order(exif_data_, EXIF_BYTE_ORDER_INTEL);
// Set image width and length.
SetImageWidth(width);
SetImageLength(height);
return true;
}
bool ExifUtils::SetMaker(const std::string& maker) {
size_t entrySize = maker.length() + 1;
std::unique_ptr<ExifEntry> entry = AddVariableLengthEntry(
EXIF_IFD_0, EXIF_TAG_MAKE, EXIF_FORMAT_ASCII, entrySize, entrySize);
if (!entry) {
LOGF(ERROR) << "Adding Make exif entry failed";
return false;
}
memcpy(entry->data, maker.c_str(), entrySize);
return true;
}
bool ExifUtils::SetModel(const std::string& model) {
size_t entrySize = model.length() + 1;
std::unique_ptr<ExifEntry> entry = AddVariableLengthEntry(
EXIF_IFD_0, EXIF_TAG_MODEL, EXIF_FORMAT_ASCII, entrySize, entrySize);
if (!entry) {
LOGF(ERROR) << "Adding Model exif entry failed";
return false;
}
memcpy(entry->data, model.c_str(), entrySize);
return true;
}
bool ExifUtils::SetDateTime(const struct tm& t) {
// The length is 20 bytes including NULL for termination in Exif standard.
char str[20];
int result = snprintf(str, sizeof(str), "%04i:%02i:%02i %02i:%02i:%02i",
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
t.tm_min, t.tm_sec);
if (result != sizeof(str) - 1) {
LOGF(WARNING) << "Input time is invalid";
return false;
}
std::unique_ptr<ExifEntry> entry =
AddVariableLengthEntry(EXIF_IFD_0, EXIF_TAG_DATE_TIME, EXIF_FORMAT_ASCII,
sizeof(str), sizeof(str));
if (!entry) {
LOGF(ERROR) << "Adding DateTime exif entry failed";
return false;
}
memcpy(entry->data, str, sizeof(str));
return true;
}
bool ExifUtils::SetFocalLength(uint32_t numerator, uint32_t denominator) {
std::unique_ptr<ExifEntry> entry =
AddEntry(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH);
if (!entry) {
LOGF(ERROR) << "Adding FocalLength exif entry failed";
return false;
}
exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL,
{numerator, denominator});
return true;
}
bool ExifUtils::SetGpsLatitude(double latitude) {
const ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE_REF);
std::unique_ptr<ExifEntry> refEntry =
AddVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_ASCII, 2, 2);
if (!refEntry) {
LOGF(ERROR) << "Adding GPSLatitudeRef exif entry failed";
return false;
}
if (latitude >= 0) {
memcpy(refEntry->data, "N", sizeof("N"));
} else {
memcpy(refEntry->data, "S", sizeof("S"));
latitude *= -1;
}
const ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE);
std::unique_ptr<ExifEntry> entry = AddVariableLengthEntry(
EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 3, 3 * sizeof(ExifRational));
if (!entry) {
exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
LOGF(ERROR) << "Adding GPSLatitude exif entry failed";
return false;
}
SetLatitudeOrLongitudeData(entry->data, latitude);
return true;
}
bool ExifUtils::SetGpsLongitude(double longitude) {
ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE_REF);
std::unique_ptr<ExifEntry> refEntry =
AddVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_ASCII, 2, 2);
if (!refEntry) {
LOGF(ERROR) << "Adding GPSLongitudeRef exif entry failed";
return false;
}
if (longitude >= 0) {
memcpy(refEntry->data, "E", sizeof("E"));
} else {
memcpy(refEntry->data, "W", sizeof("W"));
longitude *= -1;
}
ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE);
std::unique_ptr<ExifEntry> entry = AddVariableLengthEntry(
EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 3, 3 * sizeof(ExifRational));
if (!entry) {
exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
LOGF(ERROR) << "Adding GPSLongitude exif entry failed";
return false;
}
SetLatitudeOrLongitudeData(entry->data, longitude);
return true;
}
bool ExifUtils::SetGpsAltitude(double altitude) {
ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE_REF);
std::unique_ptr<ExifEntry> refEntry =
AddVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_BYTE, 1, 1);
if (!refEntry) {
LOGF(ERROR) << "Adding GPSAltitudeRef exif entry failed";
return false;
}
if (altitude >= 0) {
*refEntry->data = 0;
} else {
*refEntry->data = 1;
altitude *= -1;
}
ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE);
std::unique_ptr<ExifEntry> entry = AddVariableLengthEntry(
EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 1, sizeof(ExifRational));
if (!entry) {
exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
LOGF(ERROR) << "Adding GPSAltitude exif entry failed";
return false;
}
exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL,
{static_cast<ExifLong>(altitude * 1000), 1000});
return true;
}
bool ExifUtils::SetGpsTimestamp(const struct tm& t) {
const ExifTag dateTag = static_cast<ExifTag>(EXIF_TAG_GPS_DATE_STAMP);
const size_t kGpsDateStampSize = 11;
std::unique_ptr<ExifEntry> entry =
AddVariableLengthEntry(EXIF_IFD_GPS, dateTag, EXIF_FORMAT_ASCII,
kGpsDateStampSize, kGpsDateStampSize);
if (!entry) {
LOGF(ERROR) << "Adding GPSDateStamp exif entry failed";
return false;
}
int result =
snprintf(reinterpret_cast<char*>(entry->data), kGpsDateStampSize,
"%04i:%02i:%02i", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday);
if (result != kGpsDateStampSize - 1) {
LOGF(WARNING) << "Input time is invalid";
return false;
}
const ExifTag timeTag = static_cast<ExifTag>(EXIF_TAG_GPS_TIME_STAMP);
entry = AddVariableLengthEntry(EXIF_IFD_GPS, timeTag, EXIF_FORMAT_RATIONAL, 3,
3 * sizeof(ExifRational));
if (!entry) {
LOGF(ERROR) << "Adding GPSTimeStamp exif entry failed";
return false;
}
exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL,
{static_cast<ExifLong>(t.tm_hour), 1});
exif_set_rational(entry->data + sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
{static_cast<ExifLong>(t.tm_min), 1});
exif_set_rational(entry->data + 2 * sizeof(ExifRational),
EXIF_BYTE_ORDER_INTEL,
{static_cast<ExifLong>(t.tm_sec), 1});
return true;
}
bool ExifUtils::SetGpsProcessingMethod(const std::string& method) {
ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_PROCESSING_METHOD);
size_t size = sizeof(gExifAsciiPrefix) + method.length();
std::unique_ptr<ExifEntry> entry = AddVariableLengthEntry(
EXIF_IFD_GPS, tag, EXIF_FORMAT_UNDEFINED, size, size);
if (!entry) {
LOGF(ERROR) << "Adding GPSProcessingMethod exif entry failed";
return false;
}
memcpy(entry->data, gExifAsciiPrefix, sizeof(gExifAsciiPrefix));
// Since the exif format is undefined, NULL termination is not necessary.
memcpy(entry->data + sizeof(gExifAsciiPrefix), method.c_str(),
method.length());
return true;
}
bool ExifUtils::SetThumbnailSize(uint16_t width, uint16_t height) {
if (width % 2 != 0 || height % 2 != 0) {
LOGF(ERROR) << "Invalid thumbnail size " << width << "x" << height;
return false;
}
thumbnail_width_ = width;
thumbnail_height_ = height;
return true;
}
bool ExifUtils::SetOrientation(uint16_t orientation) {
std::unique_ptr<ExifEntry> entry = AddEntry(EXIF_IFD_0, EXIF_TAG_ORIENTATION);
if (!entry) {
LOGF(ERROR) << "Adding Orientation exif entry failed";
return false;
}
/*
* Orientation value:
* 1 2 3 4 5 6 7 8
*
* 888888 888888 88 88 8888888888 88 88 8888888888
* 88 88 88 88 88 88 88 88 88 88 88 88
* 8888 8888 8888 8888 88 8888888888 8888888888 88
* 88 88 88 88
* 88 88 888888 888888
*/
int value = 1;
switch (orientation) {
case 90:
value = 6;
break;
case 180:
value = 3;
break;
case 270:
value = 8;
break;
default:
break;
}
exif_set_short(entry->data, EXIF_BYTE_ORDER_INTEL, value);
return true;
}
bool ExifUtils::GenerateApp1() {
DestroyApp1();
if (thumbnail_width_ > 0 && thumbnail_height_ > 0) {
if (!GenerateThumbnail()) {
LOGF(ERROR) << "Generate thumbnail image failed";
return false;
}
exif_data_->data = const_cast<uint8_t*>(
static_cast<const uint8_t*>(compressor_.GetCompressedImagePtr()));
exif_data_->size = compressor_.GetCompressedImageSize();
}
// Save the result into |app1_buffer_|.
exif_data_save_data(exif_data_, &app1_buffer_, &app1_length_);
if (!app1_length_) {
LOGF(ERROR) << "Allocate memory for app1_buffer_ failed";
return false;
}
/*
* The JPEG segment size is 16 bits in spec. The size of APP1 segment should
* be smaller than 65533 because there are two bytes for segment size field.
*/
if (app1_length_ > 65533) {
DestroyApp1();
LOGF(ERROR) << "The size of APP1 segment is too large";
return false;
}
return true;
}
const uint8_t* ExifUtils::GetApp1Buffer() { return app1_buffer_; }
unsigned int ExifUtils::GetApp1Length() { return app1_length_; }
void ExifUtils::Reset() {
yu12_buffer_ = nullptr;
yu12_width_ = 0;
yu12_height_ = 0;
thumbnail_width_ = 0;
thumbnail_height_ = 0;
DestroyApp1();
if (exif_data_) {
/*
* Since we decided to ignore the original APP1, we are sure that there is
* no thumbnail allocated by libexif. |exif_data_->data| is actually
* allocated by JpegCompressor. Sets |exif_data_->data| to nullptr to
* prevent exif_data_unref() destroy it incorrectly.
*/
exif_data_->data = nullptr;
exif_data_->size = 0;
exif_data_unref(exif_data_);
exif_data_ = nullptr;
}
}
std::unique_ptr<ExifEntry> ExifUtils::AddVariableLengthEntry(
ExifIfd ifd, ExifTag tag, ExifFormat format, uint64_t components,
unsigned int size) {
// Remove old entry if exists.
exif_content_remove_entry(exif_data_->ifd[ifd],
exif_content_get_entry(exif_data_->ifd[ifd], tag));
ExifMem* mem = exif_mem_new_default();
if (!mem) {
LOGF(ERROR) << "Allocate memory for exif entry failed";
return nullptr;
}
std::unique_ptr<ExifEntry> entry(exif_entry_new_mem(mem));
if (!entry) {
LOGF(ERROR) << "Allocate memory for exif entry failed";
exif_mem_unref(mem);
return nullptr;
}
void* tmpBuffer = exif_mem_alloc(mem, size);
if (!tmpBuffer) {
LOGF(ERROR) << "Allocate memory for exif entry failed";
exif_mem_unref(mem);
return nullptr;
}
entry->data = static_cast<unsigned char*>(tmpBuffer);
entry->tag = tag;
entry->format = format;
entry->components = components;
entry->size = size;
exif_content_add_entry(exif_data_->ifd[ifd], entry.get());
exif_mem_unref(mem);
return entry;
}
std::unique_ptr<ExifEntry> ExifUtils::AddEntry(ExifIfd ifd, ExifTag tag) {
std::unique_ptr<ExifEntry> entry(
exif_content_get_entry(exif_data_->ifd[ifd], tag));
if (entry) {
// exif_content_get_entry() won't ref the entry, so we ref here.
exif_entry_ref(entry.get());
return entry;
}
entry.reset(exif_entry_new());
if (!entry) {
LOGF(ERROR) << "Allocate memory for exif entry failed";
return nullptr;
}
entry->tag = tag;
exif_content_add_entry(exif_data_->ifd[ifd], entry.get());
exif_entry_initialize(entry.get(), tag);
return entry;
}
bool ExifUtils::SetImageWidth(uint16_t width) {
std::unique_ptr<ExifEntry> entry = AddEntry(EXIF_IFD_0, EXIF_TAG_IMAGE_WIDTH);
if (!entry) {
LOGF(ERROR) << "Adding ImageWidth exif entry failed";
return false;
}
exif_set_short(entry->data, EXIF_BYTE_ORDER_INTEL, width);
return true;
}
bool ExifUtils::SetImageLength(uint16_t length) {
std::unique_ptr<ExifEntry> entry =
AddEntry(EXIF_IFD_0, EXIF_TAG_IMAGE_LENGTH);
if (!entry) {
LOGF(ERROR) << "Adding ImageLength exif entry failed";
return false;
}
exif_set_short(entry->data, EXIF_BYTE_ORDER_INTEL, length);
return true;
}
bool ExifUtils::GenerateThumbnail() {
// Resize yuv image to |thumbnail_width_| x |thumbnail_height_|.
std::vector<uint8_t> scaled_buffer;
if (!GenerateYuvThumbnail(&scaled_buffer)) {
LOGF(ERROR) << "Generate YUV thumbnail failed";
return false;
}
// Compress thumbnail to JPEG.
if (!compressor_.CompressImage(scaled_buffer.data(), thumbnail_width_,
thumbnail_height_, thumbnail_jpeg_quality_,
NULL, 0)) {
LOGF(ERROR) << "Compress thumbnail failed";
return false;
}
return true;
}
bool ExifUtils::GenerateYuvThumbnail(std::vector<uint8_t>* scaled_buffer) {
size_t y_plane_size = yu12_width_ * yu12_height_;
const uint8* y_plane = yu12_buffer_;
const uint8* u_plane = y_plane + y_plane_size;
const uint8* v_plane = u_plane + y_plane_size / 4;
size_t scaled_y_plane_size = thumbnail_width_ * thumbnail_height_;
scaled_buffer->resize(scaled_y_plane_size * 3 / 2);
uint8* scaled_y_plane = scaled_buffer->data();
uint8* scaled_u_plane = scaled_y_plane + scaled_y_plane_size;
uint8* scaled_v_plane = scaled_u_plane + scaled_y_plane_size / 4;
int result = libyuv::I420Scale(
y_plane, yu12_width_, u_plane, yu12_width_ / 2, v_plane, yu12_width_ / 2,
yu12_width_, yu12_height_, scaled_y_plane, thumbnail_width_,
scaled_u_plane, thumbnail_width_ / 2, scaled_v_plane,
thumbnail_width_ / 2, thumbnail_width_, thumbnail_height_,
libyuv::kFilterNone);
if (result != 0) {
LOGF(ERROR) << "Scale I420 image failed";
return false;
}
return true;
}
void ExifUtils::DestroyApp1() {
/*
* Since there is no API to access ExifMem in ExifData->priv, we use free
* here, which is the default free function in libexif. See
* exif_data_save_data() for detail.
*/
free(app1_buffer_);
app1_buffer_ = nullptr;
app1_length_ = 0;
}
} // namespace arc

View file

@ -0,0 +1,178 @@
/*
* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef INCLUDE_ARC_EXIF_UTILS_H_
#define INCLUDE_ARC_EXIF_UTILS_H_
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
extern "C" {
#include <libexif/exif-data.h>
}
#include "arc/jpeg_compressor.h"
namespace arc {
// ExifUtils can generate APP1 segment with tags which caller set. ExifUtils can
// also add a thumbnail in the APP1 segment if thumbnail size is specified.
// ExifUtils can be reused with different images by calling initialize().
//
// Example of using this class :
// ExifUtils utils;
// utils.initialize(inputYU12Buffer, inputWidth, inputHeight,
// outputJpegQuality);
// ...
// // Call ExifUtils functions to set Exif tags.
// ...
// utils.generateApp1();
// unsigned int app1Length = utils.getApp1Length();
// uint8_t* app1Buffer = new uint8_t[app1Length];
// memcpy(app1Buffer, utils.getApp1Buffer(), app1Length);
class ExifUtils {
public:
ExifUtils();
~ExifUtils();
// Sets input YU12 image |buffer| with |width| x |height|. |quality| is the
// compressed JPEG image quality. The caller should not release |buffer| until
// generateApp1() or the destructor is called. initialize() can be called
// multiple times. The setting of Exif tags will be cleared.
bool Initialize(const uint8_t* buffer, uint16_t width, uint16_t height,
int quality);
// Sets the manufacturer of camera.
// Returns false if memory allocation fails.
bool SetMaker(const std::string& maker);
// Sets the model number of camera.
// Returns false if memory allocation fails.
bool SetModel(const std::string& model);
// Sets the date and time of image last modified. It takes local time. The
// name of the tag is DateTime in IFD0.
// Returns false if memory allocation fails.
bool SetDateTime(const struct tm& t);
// Sets the focal length of lens used to take the image in millimeters.
// Returns false if memory allocation fails.
bool SetFocalLength(uint32_t numerator, uint32_t denominator);
// Sets the latitude with degrees minutes seconds format.
// Returns false if memory allocation fails.
bool SetGpsLatitude(double latitude);
// Sets the longitude with degrees minutes seconds format.
// Returns false if memory allocation fails.
bool SetGpsLongitude(double longitude);
// Sets the altitude in meters.
// Returns false if memory allocation fails.
bool SetGpsAltitude(double altitude);
// Sets GPS date stamp and time stamp (atomic clock). It takes UTC time.
// Returns false if memory allocation fails.
bool SetGpsTimestamp(const struct tm& t);
// Sets GPS processing method.
// Returns false if memory allocation fails.
bool SetGpsProcessingMethod(const std::string& method);
// Since the size of APP1 segment is limited, it is recommended the
// resolution of thumbnail is equal to or smaller than 640x480. If the
// thumbnail is too big, generateApp1() will return false.
// Returns false if |width| or |height| is not even.
bool SetThumbnailSize(uint16_t width, uint16_t height);
// Sets image orientation.
// Returns false if memory allocation fails.
bool SetOrientation(uint16_t orientation);
// Generates APP1 segment.
// Returns false if generating APP1 segment fails.
bool GenerateApp1();
// Gets buffer of APP1 segment. This method must be called only after calling
// generateAPP1().
const uint8_t* GetApp1Buffer();
// Gets length of APP1 segment. This method must be called only after calling
// generateAPP1().
unsigned int GetApp1Length();
private:
// Resets the pointers and memories.
void Reset();
// Adds a variable length tag to |exif_data_|. It will remove the original one
// if the tag exists.
// Returns the entry of the tag. The reference count of returned ExifEntry is
// two.
std::unique_ptr<ExifEntry> AddVariableLengthEntry(ExifIfd ifd, ExifTag tag,
ExifFormat format,
uint64_t components,
unsigned int size);
// Adds a entry of |tag| in |exif_data_|. It won't remove the original one if
// the tag exists.
// Returns the entry of the tag. It adds one reference count to returned
// ExifEntry.
std::unique_ptr<ExifEntry> AddEntry(ExifIfd ifd, ExifTag tag);
// Sets the width (number of columes) of main image.
// Returns false if memory allocation fails.
bool SetImageWidth(uint16_t width);
// Sets the length (number of rows) of main image.
// Returns false if memory allocation fails.
bool SetImageLength(uint16_t length);
// Generates a thumbnail. Calls compressor_.getCompressedImagePtr() to get the
// result image.
// Returns false if failed.
bool GenerateThumbnail();
// Resizes the thumbnail yuv image to |thumbnail_width_| x |thumbnail_height_|
// and stores in |scaled_buffer|.
// Returns false if scale image failed.
bool GenerateYuvThumbnail(std::vector<uint8_t>* scaled_buffer);
// Destroys the buffer of APP1 segment if exists.
void DestroyApp1();
// The buffer pointer of yuv image (YU12). Not owned by this class.
const uint8_t* yu12_buffer_;
// The size of yuv image.
uint16_t yu12_width_;
uint16_t yu12_height_;
// The size of thumbnail.
uint16_t thumbnail_width_;
uint16_t thumbnail_height_;
// The Exif data (APP1). Owned by this class.
ExifData* exif_data_;
// The raw data of APP1 segment. It's allocated by ExifMem in |exif_data_| but
// owned by this class.
uint8_t* app1_buffer_;
// The length of |app1_buffer_|.
unsigned int app1_length_;
// The quality of compressed thumbnail image. The size of EXIF thumbnail has
// to be smaller than 64KB. If quality is 100, the size may be bigger than
// 64KB.
int thumbnail_jpeg_quality_;
// The YU12 to Jpeg compressor.
JpegCompressor compressor_;
};
} // namespace arc
#endif // INCLUDE_ARC_EXIF_UTILS_H_

View file

@ -0,0 +1,188 @@
/* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "arc/frame_buffer.h"
#include <sys/mman.h>
#include <utility>
#include "arc/common.h"
#include "arc/image_processor.h"
namespace arc {
FrameBuffer::FrameBuffer()
: data_(nullptr),
data_size_(0),
buffer_size_(0),
width_(0),
height_(0),
fourcc_(0) {}
FrameBuffer::~FrameBuffer() {}
int FrameBuffer::SetDataSize(size_t data_size) {
if (data_size > buffer_size_) {
LOGF(ERROR) << "Buffer overflow: Buffer only has " << buffer_size_
<< ", but data needs " << data_size;
return -EINVAL;
}
data_size_ = data_size;
return 0;
}
AllocatedFrameBuffer::AllocatedFrameBuffer(int buffer_size) {
buffer_.reset(new uint8_t[buffer_size]);
buffer_size_ = buffer_size;
data_ = buffer_.get();
}
AllocatedFrameBuffer::AllocatedFrameBuffer(uint8_t* buffer, int buffer_size) {
buffer_.reset(buffer);
buffer_size_ = buffer_size;
data_ = buffer_.get();
}
AllocatedFrameBuffer::~AllocatedFrameBuffer() {}
int AllocatedFrameBuffer::SetDataSize(size_t size) {
if (size > buffer_size_) {
buffer_.reset(new uint8_t[size]);
buffer_size_ = size;
data_ = buffer_.get();
}
data_size_ = size;
return 0;
}
void AllocatedFrameBuffer::Reset() { memset(data_, 0, buffer_size_); }
V4L2FrameBuffer::V4L2FrameBuffer(base::ScopedFD fd, int buffer_size,
uint32_t width, uint32_t height,
uint32_t fourcc)
: fd_(std::move(fd)), is_mapped_(false) {
buffer_size_ = buffer_size;
width_ = width;
height_ = height;
fourcc_ = fourcc;
}
V4L2FrameBuffer::~V4L2FrameBuffer() {
if (Unmap()) {
LOGF(ERROR) << "Unmap failed";
}
}
int V4L2FrameBuffer::Map() {
base::AutoLock l(lock_);
if (is_mapped_) {
LOGF(ERROR) << "The buffer is already mapped";
return -EINVAL;
}
void* addr = mmap(NULL, buffer_size_, PROT_READ, MAP_SHARED, fd_.get(), 0);
if (addr == MAP_FAILED) {
LOGF(ERROR) << "mmap() failed: " << strerror(errno);
return -EINVAL;
}
data_ = static_cast<uint8_t*>(addr);
is_mapped_ = true;
return 0;
}
int V4L2FrameBuffer::Unmap() {
base::AutoLock l(lock_);
if (is_mapped_ && munmap(data_, buffer_size_)) {
LOGF(ERROR) << "mummap() failed: " << strerror(errno);
return -EINVAL;
}
is_mapped_ = false;
return 0;
}
GrallocFrameBuffer::GrallocFrameBuffer(buffer_handle_t buffer, uint32_t width,
uint32_t height, uint32_t fourcc,
uint32_t device_buffer_length,
uint32_t stream_usage)
: buffer_(buffer),
is_mapped_(false),
device_buffer_length_(device_buffer_length),
stream_usage_(stream_usage) {
const hw_module_t* module = nullptr;
int ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module);
if (ret || !module) {
LOGF(ERROR) << "Failed to get gralloc module.";
return;
}
gralloc_module_ = reinterpret_cast<const gralloc_module_t*>(module);
width_ = width;
height_ = height;
fourcc_ = fourcc;
}
GrallocFrameBuffer::~GrallocFrameBuffer() {
if (Unmap()) {
LOGF(ERROR) << "Unmap failed";
}
}
int GrallocFrameBuffer::Map() {
base::AutoLock l(lock_);
if (is_mapped_) {
LOGF(ERROR) << "The buffer is already mapped";
return -EINVAL;
}
void* addr;
int ret = 0;
switch (fourcc_) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YUYV:
android_ycbcr yuv_data;
ret = gralloc_module_->lock_ycbcr(gralloc_module_, buffer_, stream_usage_,
0, 0, width_, height_, &yuv_data);
addr = yuv_data.y;
break;
case V4L2_PIX_FMT_JPEG:
ret = gralloc_module_->lock(gralloc_module_, buffer_, stream_usage_, 0, 0,
device_buffer_length_, 1, &addr);
break;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB32:
ret = gralloc_module_->lock(gralloc_module_, buffer_, stream_usage_, 0, 0,
width_, height_, &addr);
break;
default:
return -EINVAL;
}
if (ret) {
LOGF(ERROR) << "Failed to gralloc lock buffer: " << ret;
return ret;
}
data_ = static_cast<uint8_t*>(addr);
if (fourcc_ == V4L2_PIX_FMT_YVU420 || fourcc_ == V4L2_PIX_FMT_YUV420 ||
fourcc_ == V4L2_PIX_FMT_NV21 || fourcc_ == V4L2_PIX_FMT_RGB32 ||
fourcc_ == V4L2_PIX_FMT_BGR32) {
buffer_size_ = ImageProcessor::GetConvertedSize(fourcc_, width_, height_);
}
is_mapped_ = true;
return 0;
}
int GrallocFrameBuffer::Unmap() {
base::AutoLock l(lock_);
if (is_mapped_ && gralloc_module_->unlock(gralloc_module_, buffer_)) {
LOGF(ERROR) << "Failed to unmap buffer: ";
return -EINVAL;
}
is_mapped_ = false;
return 0;
}
} // namespace arc

View file

@ -0,0 +1,135 @@
/* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef HAL_USB_FRAME_BUFFER_H_
#define HAL_USB_FRAME_BUFFER_H_
#include <stdint.h>
#include <memory>
#include <string>
#include <base/files/scoped_file.h>
#include <base/synchronization/lock.h>
#include <hardware/gralloc.h>
namespace arc {
class FrameBuffer {
public:
FrameBuffer();
virtual ~FrameBuffer();
// If mapped successfully, the address will be assigned to |data_| and return
// 0. Otherwise, returns -EINVAL.
virtual int Map() = 0;
// Unmaps the mapped address. Returns 0 for success.
virtual int Unmap() = 0;
uint8_t* GetData() const { return data_; }
size_t GetDataSize() const { return data_size_; }
size_t GetBufferSize() const { return buffer_size_; }
uint32_t GetWidth() const { return width_; }
uint32_t GetHeight() const { return height_; }
uint32_t GetFourcc() const { return fourcc_; }
void SetFourcc(uint32_t fourcc) { fourcc_ = fourcc; }
virtual int SetDataSize(size_t data_size);
protected:
uint8_t* data_;
// The number of bytes used in the buffer.
size_t data_size_;
// The number of bytes allocated in the buffer.
size_t buffer_size_;
// Frame resolution.
uint32_t width_;
uint32_t height_;
// This is V4L2_PIX_FMT_* in linux/videodev2.h.
uint32_t fourcc_;
};
// AllocatedFrameBuffer is used for the buffer from hal malloc-ed. User should
// be aware to manage the memory.
class AllocatedFrameBuffer : public FrameBuffer {
public:
explicit AllocatedFrameBuffer(int buffer_size);
explicit AllocatedFrameBuffer(uint8_t* buffer, int buffer_size);
~AllocatedFrameBuffer() override;
// No-op for the two functions.
int Map() override { return 0; }
int Unmap() override { return 0; }
void SetWidth(uint32_t width) { width_ = width; }
void SetHeight(uint32_t height) { height_ = height; }
int SetDataSize(size_t data_size) override;
void Reset();
private:
std::unique_ptr<uint8_t[]> buffer_;
};
// V4L2FrameBuffer is used for the buffer from V4L2CameraDevice. Maps the fd
// in constructor. Unmaps and closes the fd in destructor.
class V4L2FrameBuffer : public FrameBuffer {
public:
V4L2FrameBuffer(base::ScopedFD fd, int buffer_size, uint32_t width,
uint32_t height, uint32_t fourcc);
// Unmaps |data_| and closes |fd_|.
~V4L2FrameBuffer();
int Map() override;
int Unmap() override;
int GetFd() const { return fd_.get(); }
private:
// File descriptor of V4L2 frame buffer.
base::ScopedFD fd_;
bool is_mapped_;
// Lock to guard |is_mapped_|.
base::Lock lock_;
};
// GrallocFrameBuffer is used for the buffer from Android framework. Uses
// CameraBufferMapper to lock and unlock the buffer.
class GrallocFrameBuffer : public FrameBuffer {
public:
GrallocFrameBuffer(buffer_handle_t buffer, uint32_t width, uint32_t height,
uint32_t fourcc, uint32_t device_buffer_length,
uint32_t stream_usage);
~GrallocFrameBuffer();
int Map() override;
int Unmap() override;
private:
// The currently used buffer for |buffer_mapper_| operations.
buffer_handle_t buffer_;
// Used to import gralloc buffer.
const gralloc_module_t* gralloc_module_;
bool is_mapped_;
// Lock to guard |is_mapped_|.
base::Lock lock_;
// Camera stream and device buffer context.
uint32_t device_buffer_length_;
uint32_t stream_usage_;
};
} // namespace arc
#endif // HAL_USB_FRAME_BUFFER_H_

View file

@ -0,0 +1,491 @@
/* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "arc/image_processor.h"
#include <errno.h>
#include <libyuv.h>
#include <time.h>
#include "arc/common.h"
#include "arc/common_types.h"
#include "arc/exif_utils.h"
#include "arc/jpeg_compressor.h"
namespace arc {
using android::CameraMetadata;
/*
* Formats have different names in different header files. Here is the mapping
* table:
*
* android_pixel_format_t videodev2.h FOURCC in libyuv
* -----------------------------------------------------------------------------
* HAL_PIXEL_FORMAT_YV12 = V4L2_PIX_FMT_YVU420 = FOURCC_YV12
* HAL_PIXEL_FORMAT_YCrCb_420_SP = V4L2_PIX_FMT_NV21 = FOURCC_NV21
* HAL_PIXEL_FORMAT_RGBA_8888 = V4L2_PIX_FMT_RGB32 = FOURCC_BGR4
* HAL_PIXEL_FORMAT_YCbCr_422_I = V4L2_PIX_FMT_YUYV = FOURCC_YUYV
* = FOURCC_YUY2
* V4L2_PIX_FMT_YUV420 = FOURCC_I420
* = FOURCC_YU12
* V4L2_PIX_FMT_MJPEG = FOURCC_MJPG
*
* Camera device generates FOURCC_YUYV and FOURCC_MJPG.
* Preview needs FOURCC_ARGB format.
* Software video encoder needs FOURCC_YU12.
* CTS requires FOURCC_YV12 and FOURCC_NV21 for applications.
*
* Android stride requirement:
* YV12 horizontal stride should be a multiple of 16 pixels. See
* android.graphics.ImageFormat.YV12.
* The stride of ARGB, YU12, and NV21 are always equal to the width.
*
* Conversion Path:
* MJPG/YUYV (from camera) -> YU12 -> ARGB (preview)
* -> NV21 (apps)
* -> YV12 (apps)
* -> YU12 (video encoder)
*/
// YV12 horizontal stride should be a multiple of 16 pixels for each plane.
// |dst_stride_uv| is the pixel stride of u or v plane.
static int YU12ToYV12(const void* yv12, void* yu12, int width, int height,
int dst_stride_y, int dst_stride_uv);
static int YU12ToNV21(const void* yv12, void* nv21, int width, int height);
static bool ConvertToJpeg(const CameraMetadata& metadata,
const FrameBuffer& in_frame, FrameBuffer* out_frame);
static bool SetExifTags(const CameraMetadata& metadata, ExifUtils* utils);
// How precise the float-to-rational conversion for EXIF tags would be.
static const int kRationalPrecision = 10000;
// Default JPEG quality settings.
static const int DEFAULT_JPEG_QUALITY = 80;
inline static size_t Align16(size_t value) { return (value + 15) & ~15; }
size_t ImageProcessor::GetConvertedSize(int fourcc, uint32_t width,
uint32_t height) {
if ((width % 2) || (height % 2)) {
LOGF(ERROR) << "Width or height is not even (" << width << " x " << height
<< ")";
return 0;
}
switch (fourcc) {
case V4L2_PIX_FMT_YVU420: // YV12
return Align16(width) * height + Align16(width / 2) * height;
case V4L2_PIX_FMT_YUV420: // YU12
// Fall-through.
case V4L2_PIX_FMT_NV21: // NV21
return width * height * 3 / 2;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB32:
return width * height * 4;
default:
LOGF(ERROR) << "Pixel format " << FormatToString(fourcc)
<< " is unsupported.";
return 0;
}
}
bool ImageProcessor::SupportsConversion(uint32_t from_fourcc,
uint32_t to_fourcc) {
switch (from_fourcc) {
case V4L2_PIX_FMT_YUYV:
return (to_fourcc == V4L2_PIX_FMT_YUV420);
case V4L2_PIX_FMT_YUV420:
return (
to_fourcc == V4L2_PIX_FMT_YUV420 ||
to_fourcc == V4L2_PIX_FMT_YVU420 || to_fourcc == V4L2_PIX_FMT_NV21 ||
to_fourcc == V4L2_PIX_FMT_RGB32 || to_fourcc == V4L2_PIX_FMT_BGR32 ||
to_fourcc == V4L2_PIX_FMT_JPEG);
case V4L2_PIX_FMT_MJPEG:
return (to_fourcc == V4L2_PIX_FMT_YUV420);
default:
return false;
}
}
int ImageProcessor::ConvertFormat(const CameraMetadata& metadata,
const FrameBuffer& in_frame,
FrameBuffer* out_frame) {
if ((in_frame.GetWidth() % 2) || (in_frame.GetHeight() % 2)) {
LOGF(ERROR) << "Width or height is not even (" << in_frame.GetWidth()
<< " x " << in_frame.GetHeight() << ")";
return -EINVAL;
}
size_t data_size = GetConvertedSize(
out_frame->GetFourcc(), in_frame.GetWidth(), in_frame.GetHeight());
if (out_frame->SetDataSize(data_size)) {
LOGF(ERROR) << "Set data size failed";
return -EINVAL;
}
if (in_frame.GetFourcc() == V4L2_PIX_FMT_YUYV) {
switch (out_frame->GetFourcc()) {
case V4L2_PIX_FMT_YUV420: // YU12
{
int res = libyuv::YUY2ToI420(
in_frame.GetData(), /* src_yuy2 */
in_frame.GetWidth() * 2, /* src_stride_yuy2 */
out_frame->GetData(), /* dst_y */
out_frame->GetWidth(), /* dst_stride_y */
out_frame->GetData() +
out_frame->GetWidth() * out_frame->GetHeight(), /* dst_u */
out_frame->GetWidth() / 2, /* dst_stride_u */
out_frame->GetData() + out_frame->GetWidth() *
out_frame->GetHeight() * 5 /
4, /* dst_v */
out_frame->GetWidth() / 2, /* dst_stride_v */
in_frame.GetWidth(), in_frame.GetHeight());
LOGF_IF(ERROR, res) << "YUY2ToI420() for YU12 returns " << res;
return res ? -EINVAL : 0;
}
default:
LOGF(ERROR) << "Destination pixel format "
<< FormatToString(out_frame->GetFourcc())
<< " is unsupported for YUYV source format.";
return -EINVAL;
}
} else if (in_frame.GetFourcc() == V4L2_PIX_FMT_YUV420) {
// V4L2_PIX_FMT_YVU420 is YV12. I420 is usually referred to YU12
// (V4L2_PIX_FMT_YUV420), and YV12 is similar to YU12 except that U/V
// planes are swapped.
switch (out_frame->GetFourcc()) {
case V4L2_PIX_FMT_YVU420: // YV12
{
int ystride = Align16(in_frame.GetWidth());
int uvstride = Align16(in_frame.GetWidth() / 2);
int res = YU12ToYV12(in_frame.GetData(), out_frame->GetData(),
in_frame.GetWidth(), in_frame.GetHeight(), ystride,
uvstride);
LOGF_IF(ERROR, res) << "YU12ToYV12() returns " << res;
return res ? -EINVAL : 0;
}
case V4L2_PIX_FMT_YUV420: // YU12
{
memcpy(out_frame->GetData(), in_frame.GetData(),
in_frame.GetDataSize());
return 0;
}
case V4L2_PIX_FMT_NV21: // NV21
{
// TODO(henryhsu): Use libyuv::I420ToNV21.
int res = YU12ToNV21(in_frame.GetData(), out_frame->GetData(),
in_frame.GetWidth(), in_frame.GetHeight());
LOGF_IF(ERROR, res) << "YU12ToNV21() returns " << res;
return res ? -EINVAL : 0;
}
case V4L2_PIX_FMT_BGR32: {
int res = libyuv::I420ToABGR(
in_frame.GetData(), /* src_y */
in_frame.GetWidth(), /* src_stride_y */
in_frame.GetData() +
in_frame.GetWidth() * in_frame.GetHeight(), /* src_u */
in_frame.GetWidth() / 2, /* src_stride_u */
in_frame.GetData() +
in_frame.GetWidth() * in_frame.GetHeight() * 5 / 4, /* src_v */
in_frame.GetWidth() / 2, /* src_stride_v */
out_frame->GetData(), /* dst_abgr */
out_frame->GetWidth() * 4, /* dst_stride_abgr */
in_frame.GetWidth(), in_frame.GetHeight());
LOGF_IF(ERROR, res) << "I420ToABGR() returns " << res;
return res ? -EINVAL : 0;
}
case V4L2_PIX_FMT_RGB32: {
int res = libyuv::I420ToARGB(
in_frame.GetData(), /* src_y */
in_frame.GetWidth(), /* src_stride_y */
in_frame.GetData() +
in_frame.GetWidth() * in_frame.GetHeight(), /* src_u */
in_frame.GetWidth() / 2, /* src_stride_u */
in_frame.GetData() +
in_frame.GetWidth() * in_frame.GetHeight() * 5 / 4, /* src_v */
in_frame.GetWidth() / 2, /* src_stride_v */
out_frame->GetData(), /* dst_argb */
out_frame->GetWidth() * 4, /* dst_stride_argb */
in_frame.GetWidth(), in_frame.GetHeight());
LOGF_IF(ERROR, res) << "I420ToARGB() returns " << res;
return res ? -EINVAL : 0;
}
case V4L2_PIX_FMT_JPEG: {
bool res = ConvertToJpeg(metadata, in_frame, out_frame);
LOGF_IF(ERROR, !res) << "ConvertToJpeg() returns " << res;
return res ? -EINVAL : 0;
}
default:
LOGF(ERROR) << "Destination pixel format "
<< FormatToString(out_frame->GetFourcc())
<< " is unsupported for YU12 source format.";
return -EINVAL;
}
} else if (in_frame.GetFourcc() == V4L2_PIX_FMT_MJPEG) {
switch (out_frame->GetFourcc()) {
case V4L2_PIX_FMT_YUV420: // YU12
{
int res = libyuv::MJPGToI420(
in_frame.GetData(), /* sample */
in_frame.GetDataSize(), /* sample_size */
out_frame->GetData(), /* dst_y */
out_frame->GetWidth(), /* dst_stride_y */
out_frame->GetData() +
out_frame->GetWidth() * out_frame->GetHeight(), /* dst_u */
out_frame->GetWidth() / 2, /* dst_stride_u */
out_frame->GetData() + out_frame->GetWidth() *
out_frame->GetHeight() * 5 /
4, /* dst_v */
out_frame->GetWidth() / 2, /* dst_stride_v */
in_frame.GetWidth(), in_frame.GetHeight(), out_frame->GetWidth(),
out_frame->GetHeight());
LOGF_IF(ERROR, res) << "MJPEGToI420() returns " << res;
return res ? -EINVAL : 0;
}
default:
LOGF(ERROR) << "Destination pixel format "
<< FormatToString(out_frame->GetFourcc())
<< " is unsupported for MJPEG source format.";
return -EINVAL;
}
} else {
LOGF(ERROR) << "Convert format doesn't support source format "
<< FormatToString(in_frame.GetFourcc());
return -EINVAL;
}
}
int ImageProcessor::Scale(const FrameBuffer& in_frame, FrameBuffer* out_frame) {
if (in_frame.GetFourcc() != V4L2_PIX_FMT_YUV420) {
LOGF(ERROR) << "Pixel format " << FormatToString(in_frame.GetFourcc())
<< " is unsupported.";
return -EINVAL;
}
size_t data_size = GetConvertedSize(
in_frame.GetFourcc(), out_frame->GetWidth(), out_frame->GetHeight());
if (out_frame->SetDataSize(data_size)) {
LOGF(ERROR) << "Set data size failed";
return -EINVAL;
}
out_frame->SetFourcc(in_frame.GetFourcc());
VLOGF(1) << "Scale image from " << in_frame.GetWidth() << "x"
<< in_frame.GetHeight() << " to " << out_frame->GetWidth() << "x"
<< out_frame->GetHeight();
int ret = libyuv::I420Scale(
in_frame.GetData(), in_frame.GetWidth(),
in_frame.GetData() + in_frame.GetWidth() * in_frame.GetHeight(),
in_frame.GetWidth() / 2,
in_frame.GetData() + in_frame.GetWidth() * in_frame.GetHeight() * 5 / 4,
in_frame.GetWidth() / 2, in_frame.GetWidth(), in_frame.GetHeight(),
out_frame->GetData(), out_frame->GetWidth(),
out_frame->GetData() + out_frame->GetWidth() * out_frame->GetHeight(),
out_frame->GetWidth() / 2,
out_frame->GetData() +
out_frame->GetWidth() * out_frame->GetHeight() * 5 / 4,
out_frame->GetWidth() / 2, out_frame->GetWidth(), out_frame->GetHeight(),
libyuv::FilterMode::kFilterNone);
LOGF_IF(ERROR, ret) << "I420Scale failed: " << ret;
return ret;
}
static int YU12ToYV12(const void* yu12, void* yv12, int width, int height,
int dst_stride_y, int dst_stride_uv) {
if ((width % 2) || (height % 2)) {
LOGF(ERROR) << "Width or height is not even (" << width << " x " << height
<< ")";
return -EINVAL;
}
if (dst_stride_y < width || dst_stride_uv < width / 2) {
LOGF(ERROR) << "Y plane stride (" << dst_stride_y
<< ") or U/V plane stride (" << dst_stride_uv
<< ") is invalid for width " << width;
return -EINVAL;
}
const uint8_t* src = reinterpret_cast<const uint8_t*>(yu12);
uint8_t* dst = reinterpret_cast<uint8_t*>(yv12);
const uint8_t* u_src = src + width * height;
uint8_t* u_dst = dst + dst_stride_y * height + dst_stride_uv * height / 2;
const uint8_t* v_src = src + width * height * 5 / 4;
uint8_t* v_dst = dst + dst_stride_y * height;
return libyuv::I420Copy(src, width, u_src, width / 2, v_src, width / 2, dst,
dst_stride_y, u_dst, dst_stride_uv, v_dst,
dst_stride_uv, width, height);
}
static int YU12ToNV21(const void* yu12, void* nv21, int width, int height) {
if ((width % 2) || (height % 2)) {
LOGF(ERROR) << "Width or height is not even (" << width << " x " << height
<< ")";
return -EINVAL;
}
const uint8_t* src = reinterpret_cast<const uint8_t*>(yu12);
uint8_t* dst = reinterpret_cast<uint8_t*>(nv21);
const uint8_t* u_src = src + width * height;
const uint8_t* v_src = src + width * height * 5 / 4;
uint8_t* vu_dst = dst + width * height;
memcpy(dst, src, width * height);
for (int i = 0; i < height / 2; i++) {
for (int j = 0; j < width / 2; j++) {
*vu_dst++ = *v_src++;
*vu_dst++ = *u_src++;
}
}
return 0;
}
static bool ConvertToJpeg(const CameraMetadata& metadata,
const FrameBuffer& in_frame, FrameBuffer* out_frame) {
ExifUtils utils;
int jpeg_quality, thumbnail_jpeg_quality;
camera_metadata_ro_entry entry;
if (metadata.exists(ANDROID_JPEG_QUALITY)) {
entry = metadata.find(ANDROID_JPEG_QUALITY);
jpeg_quality = entry.data.u8[0];
} else {
LOGF(ERROR) << "Could not find jpeg quality in metadata, defaulting to "
<< DEFAULT_JPEG_QUALITY;
jpeg_quality = DEFAULT_JPEG_QUALITY;
}
if (metadata.exists(ANDROID_JPEG_THUMBNAIL_QUALITY)) {
entry = metadata.find(ANDROID_JPEG_THUMBNAIL_QUALITY);
thumbnail_jpeg_quality = entry.data.u8[0];
} else {
thumbnail_jpeg_quality = jpeg_quality;
}
if (!utils.Initialize(in_frame.GetData(), in_frame.GetWidth(),
in_frame.GetHeight(), thumbnail_jpeg_quality)) {
LOGF(ERROR) << "ExifUtils initialization failed.";
return false;
}
if (!SetExifTags(metadata, &utils)) {
LOGF(ERROR) << "Setting Exif tags failed.";
return false;
}
if (!utils.GenerateApp1()) {
LOGF(ERROR) << "Generating APP1 segment failed.";
return false;
}
JpegCompressor compressor;
if (!compressor.CompressImage(in_frame.GetData(), in_frame.GetWidth(),
in_frame.GetHeight(), jpeg_quality,
utils.GetApp1Buffer(), utils.GetApp1Length())) {
LOGF(ERROR) << "JPEG image compression failed";
return false;
}
size_t buffer_length = compressor.GetCompressedImageSize();
memcpy(out_frame->GetData(), compressor.GetCompressedImagePtr(),
buffer_length);
return true;
}
static bool SetExifTags(const CameraMetadata& metadata, ExifUtils* utils) {
time_t raw_time = 0;
struct tm time_info;
bool time_available = time(&raw_time) != -1;
localtime_r(&raw_time, &time_info);
if (!utils->SetDateTime(time_info)) {
LOGF(ERROR) << "Setting data time failed.";
return false;
}
float focal_length;
camera_metadata_ro_entry entry = metadata.find(ANDROID_LENS_FOCAL_LENGTH);
if (entry.count) {
focal_length = entry.data.f[0];
} else {
LOGF(ERROR) << "Cannot find focal length in metadata.";
return false;
}
if (!utils->SetFocalLength(
static_cast<uint32_t>(focal_length * kRationalPrecision),
kRationalPrecision)) {
LOGF(ERROR) << "Setting focal length failed.";
return false;
}
if (metadata.exists(ANDROID_JPEG_GPS_COORDINATES)) {
entry = metadata.find(ANDROID_JPEG_GPS_COORDINATES);
if (entry.count < 3) {
LOGF(ERROR) << "Gps coordinates in metadata is not complete.";
return false;
}
if (!utils->SetGpsLatitude(entry.data.d[0])) {
LOGF(ERROR) << "Setting gps latitude failed.";
return false;
}
if (!utils->SetGpsLongitude(entry.data.d[1])) {
LOGF(ERROR) << "Setting gps longitude failed.";
return false;
}
if (!utils->SetGpsAltitude(entry.data.d[2])) {
LOGF(ERROR) << "Setting gps altitude failed.";
return false;
}
}
if (metadata.exists(ANDROID_JPEG_GPS_PROCESSING_METHOD)) {
entry = metadata.find(ANDROID_JPEG_GPS_PROCESSING_METHOD);
std::string method_str(reinterpret_cast<const char*>(entry.data.u8));
if (!utils->SetGpsProcessingMethod(method_str)) {
LOGF(ERROR) << "Setting gps processing method failed.";
return false;
}
}
if (time_available && metadata.exists(ANDROID_JPEG_GPS_TIMESTAMP)) {
entry = metadata.find(ANDROID_JPEG_GPS_TIMESTAMP);
time_t timestamp = static_cast<time_t>(entry.data.i64[0]);
if (gmtime_r(&timestamp, &time_info)) {
if (!utils->SetGpsTimestamp(time_info)) {
LOGF(ERROR) << "Setting gps timestamp failed.";
return false;
}
} else {
LOGF(ERROR) << "Time tranformation failed.";
return false;
}
}
if (metadata.exists(ANDROID_JPEG_ORIENTATION)) {
entry = metadata.find(ANDROID_JPEG_ORIENTATION);
if (!utils->SetOrientation(entry.data.i32[0])) {
LOGF(ERROR) << "Setting orientation failed.";
return false;
}
}
if (metadata.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) {
entry = metadata.find(ANDROID_JPEG_THUMBNAIL_SIZE);
if (entry.count < 2) {
LOGF(ERROR) << "Thumbnail size in metadata is not complete.";
return false;
}
int thumbnail_width = entry.data.i32[0];
int thumbnail_height = entry.data.i32[1];
if (thumbnail_width > 0 && thumbnail_height > 0) {
if (!utils->SetThumbnailSize(static_cast<uint16_t>(thumbnail_width),
static_cast<uint16_t>(thumbnail_height))) {
LOGF(ERROR) << "Setting thumbnail size failed.";
return false;
}
}
}
return true;
}
} // namespace arc

View file

@ -0,0 +1,48 @@
/* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef HAL_USB_IMAGE_PROCESSOR_H_
#define HAL_USB_IMAGE_PROCESSOR_H_
#include <string>
// FourCC pixel formats (defined as V4L2_PIX_FMT_*).
#include <linux/videodev2.h>
// Declarations of HAL_PIXEL_FORMAT_XXX.
#include <system/graphics.h>
#include <camera/CameraMetadata.h>
#include "frame_buffer.h"
namespace arc {
// V4L2_PIX_FMT_YVU420(YV12) in ImageProcessor has alignment requirement.
// The stride of Y, U, and V planes should a multiple of 16 pixels.
struct ImageProcessor {
// Calculate the output buffer size when converting to the specified pixel
// format. |fourcc| is defined as V4L2_PIX_FMT_* in linux/videodev2.h.
// Return 0 on error.
static size_t GetConvertedSize(int fourcc, uint32_t width, uint32_t height);
// Return whether this class supports the provided conversion.
static bool SupportsConversion(uint32_t from_fourcc, uint32_t to_fourcc);
// Convert format from |in_frame.fourcc| to |out_frame->fourcc|. Caller should
// fill |data|, |buffer_size|, |width|, and |height| of |out_frame|. The
// function will fill |out_frame->data_size|. Return non-zero error code on
// failure; return 0 on success.
static int ConvertFormat(const android::CameraMetadata& metadata,
const FrameBuffer& in_frame, FrameBuffer* out_frame);
// Scale image size according to |in_frame| and |out_frame|. Only support
// V4L2_PIX_FMT_YUV420 format. Caller should fill |data|, |width|, |height|,
// and |buffer_size| of |out_frame|. The function will fill |data_size| and
// |fourcc| of |out_frame|.
static int Scale(const FrameBuffer& in_frame, FrameBuffer* out_frame);
};
} // namespace arc
#endif // HAL_USB_IMAGE_PROCESSOR_H_

View file

@ -0,0 +1,190 @@
/*
* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "arc/jpeg_compressor.h"
#include <memory>
#include <errno.h>
#include "arc/common.h"
namespace arc {
// The destination manager that can access |result_buffer_| in JpegCompressor.
struct destination_mgr {
public:
struct jpeg_destination_mgr mgr;
JpegCompressor* compressor;
};
JpegCompressor::JpegCompressor() {}
JpegCompressor::~JpegCompressor() {}
bool JpegCompressor::CompressImage(const void* image, int width, int height,
int quality, const void* app1Buffer,
unsigned int app1Size) {
if (width % 8 != 0 || height % 2 != 0) {
LOGF(ERROR) << "Image size can not be handled: " << width << "x" << height;
return false;
}
result_buffer_.clear();
if (!Encode(image, width, height, quality, app1Buffer, app1Size)) {
return false;
}
LOGF(INFO) << "Compressed JPEG: " << (width * height * 12) / 8 << "[" << width
<< "x" << height << "] -> " << result_buffer_.size() << " bytes";
return true;
}
const void* JpegCompressor::GetCompressedImagePtr() {
return result_buffer_.data();
}
size_t JpegCompressor::GetCompressedImageSize() {
return result_buffer_.size();
}
void JpegCompressor::InitDestination(j_compress_ptr cinfo) {
destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
std::vector<JOCTET>& buffer = dest->compressor->result_buffer_;
buffer.resize(kBlockSize);
dest->mgr.next_output_byte = &buffer[0];
dest->mgr.free_in_buffer = buffer.size();
}
boolean JpegCompressor::EmptyOutputBuffer(j_compress_ptr cinfo) {
destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
std::vector<JOCTET>& buffer = dest->compressor->result_buffer_;
size_t oldsize = buffer.size();
buffer.resize(oldsize + kBlockSize);
dest->mgr.next_output_byte = &buffer[oldsize];
dest->mgr.free_in_buffer = kBlockSize;
return true;
}
void JpegCompressor::TerminateDestination(j_compress_ptr cinfo) {
destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
std::vector<JOCTET>& buffer = dest->compressor->result_buffer_;
buffer.resize(buffer.size() - dest->mgr.free_in_buffer);
}
void JpegCompressor::OutputErrorMessage(j_common_ptr cinfo) {
char buffer[JMSG_LENGTH_MAX];
/* Create the message */
(*cinfo->err->format_message)(cinfo, buffer);
LOGF(ERROR) << buffer;
}
bool JpegCompressor::Encode(const void* inYuv, int width, int height,
int jpegQuality, const void* app1Buffer,
unsigned int app1Size) {
jpeg_compress_struct cinfo;
jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
// Override output_message() to print error log with ALOGE().
cinfo.err->output_message = &OutputErrorMessage;
jpeg_create_compress(&cinfo);
SetJpegDestination(&cinfo);
SetJpegCompressStruct(width, height, jpegQuality, &cinfo);
jpeg_start_compress(&cinfo, TRUE);
if (app1Buffer != nullptr && app1Size > 0) {
jpeg_write_marker(&cinfo, JPEG_APP0 + 1,
static_cast<const JOCTET*>(app1Buffer), app1Size);
}
if (!Compress(&cinfo, static_cast<const uint8_t*>(inYuv))) {
return false;
}
jpeg_finish_compress(&cinfo);
return true;
}
void JpegCompressor::SetJpegDestination(jpeg_compress_struct* cinfo) {
destination_mgr* dest =
static_cast<struct destination_mgr*>((*cinfo->mem->alloc_small)(
(j_common_ptr)cinfo, JPOOL_PERMANENT, sizeof(destination_mgr)));
dest->compressor = this;
dest->mgr.init_destination = &InitDestination;
dest->mgr.empty_output_buffer = &EmptyOutputBuffer;
dest->mgr.term_destination = &TerminateDestination;
cinfo->dest = reinterpret_cast<struct jpeg_destination_mgr*>(dest);
}
void JpegCompressor::SetJpegCompressStruct(int width, int height, int quality,
jpeg_compress_struct* cinfo) {
cinfo->image_width = width;
cinfo->image_height = height;
cinfo->input_components = 3;
cinfo->in_color_space = JCS_YCbCr;
jpeg_set_defaults(cinfo);
jpeg_set_quality(cinfo, quality, TRUE);
jpeg_set_colorspace(cinfo, JCS_YCbCr);
cinfo->raw_data_in = TRUE;
cinfo->dct_method = JDCT_IFAST;
// Configure sampling factors. The sampling factor is JPEG subsampling 420
// because the source format is YUV420.
cinfo->comp_info[0].h_samp_factor = 2;
cinfo->comp_info[0].v_samp_factor = 2;
cinfo->comp_info[1].h_samp_factor = 1;
cinfo->comp_info[1].v_samp_factor = 1;
cinfo->comp_info[2].h_samp_factor = 1;
cinfo->comp_info[2].v_samp_factor = 1;
}
bool JpegCompressor::Compress(jpeg_compress_struct* cinfo, const uint8_t* yuv) {
JSAMPROW y[kCompressBatchSize];
JSAMPROW cb[kCompressBatchSize / 2];
JSAMPROW cr[kCompressBatchSize / 2];
JSAMPARRAY planes[3]{y, cb, cr};
size_t y_plane_size = cinfo->image_width * cinfo->image_height;
size_t uv_plane_size = y_plane_size / 4;
uint8_t* y_plane = const_cast<uint8_t*>(yuv);
uint8_t* u_plane = const_cast<uint8_t*>(yuv + y_plane_size);
uint8_t* v_plane = const_cast<uint8_t*>(yuv + y_plane_size + uv_plane_size);
std::unique_ptr<uint8_t[]> empty(new uint8_t[cinfo->image_width]);
memset(empty.get(), 0, cinfo->image_width);
while (cinfo->next_scanline < cinfo->image_height) {
for (int i = 0; i < kCompressBatchSize; ++i) {
size_t scanline = cinfo->next_scanline + i;
if (scanline < cinfo->image_height) {
y[i] = y_plane + scanline * cinfo->image_width;
} else {
y[i] = empty.get();
}
}
// cb, cr only have half scanlines
for (int i = 0; i < kCompressBatchSize / 2; ++i) {
size_t scanline = cinfo->next_scanline / 2 + i;
if (scanline < cinfo->image_height / 2) {
int offset = scanline * (cinfo->image_width / 2);
cb[i] = u_plane + offset;
cr[i] = v_plane + offset;
} else {
cb[i] = cr[i] = empty.get();
}
}
int processed = jpeg_write_raw_data(cinfo, planes, kCompressBatchSize);
if (processed != kCompressBatchSize) {
LOGF(ERROR) << "Number of processed lines does not equal input lines.";
return false;
}
}
return true;
}
} // namespace arc

View file

@ -0,0 +1,74 @@
/*
* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef INCLUDE_ARC_JPEG_COMPRESSOR_H_
#define INCLUDE_ARC_JPEG_COMPRESSOR_H_
// We must include cstdio before jpeglib.h. It is a requirement of libjpeg.
#include <cstdio>
#include <string>
#include <vector>
extern "C" {
#include <jerror.h>
#include <jpeglib.h>
}
namespace arc {
// Encapsulates a converter from YU12 to JPEG format. This class is not
// thread-safe.
class JpegCompressor {
public:
JpegCompressor();
~JpegCompressor();
// Compresses YU12 image to JPEG format. After calling this method, call
// GetCompressedImagePtr() to get the image. |quality| is the resulted jpeg
// image quality. It ranges from 1 (poorest quality) to 100 (highest quality).
// |app1Buffer| is the buffer of APP1 segment (exif) which will be added to
// the compressed image. Returns false if errors occur during compression.
bool CompressImage(const void* image, int width, int height, int quality,
const void* app1Buffer, unsigned int app1Size);
// Returns the compressed JPEG buffer pointer. This method must be called only
// after calling CompressImage().
const void* GetCompressedImagePtr();
// Returns the compressed JPEG buffer size. This method must be called only
// after calling CompressImage().
size_t GetCompressedImageSize();
private:
// InitDestination(), EmptyOutputBuffer() and TerminateDestination() are
// callback functions to be passed into jpeg library.
static void InitDestination(j_compress_ptr cinfo);
static boolean EmptyOutputBuffer(j_compress_ptr cinfo);
static void TerminateDestination(j_compress_ptr cinfo);
static void OutputErrorMessage(j_common_ptr cinfo);
// Returns false if errors occur.
bool Encode(const void* inYuv, int width, int height, int jpegQuality,
const void* app1Buffer, unsigned int app1Size);
void SetJpegDestination(jpeg_compress_struct* cinfo);
void SetJpegCompressStruct(int width, int height, int quality,
jpeg_compress_struct* cinfo);
// Returns false if errors occur.
bool Compress(jpeg_compress_struct* cinfo, const uint8_t* yuv);
// The block size for encoded jpeg image buffer.
static const int kBlockSize = 16384;
// Process 16 lines of Y and 16 lines of U/V each time.
// We must pass at least 16 scanlines according to libjpeg documentation.
static const int kCompressBatchSize = 16;
// The buffer that holds the compressed result.
std::vector<JOCTET> result_buffer_;
};
} // namespace arc
#endif // INCLUDE_ARC_JPEG_COMPRESSOR_H_

View file

@ -455,6 +455,7 @@ int Camera::preprocessCaptureBuffer(camera3_stream_buffer_t *buffer)
__func__, mId, strerror(-res), res);
return res;
}
::close(buffer->acquire_fence);
}
// Acquire fence has been waited upon.

View file

@ -16,6 +16,7 @@
#include "format_metadata_factory.h"
#include "arc/image_processor.h"
#include "metadata/array_vector.h"
#include "metadata/partial_metadata_factory.h"
#include "metadata/property.h"
@ -35,6 +36,7 @@ static int GetHalFormats(const std::shared_ptr<V4L2Wrapper>& device,
HAL_LOGE("Failed to get device formats.");
return res;
}
for (auto v4l2_format : v4l2_formats) {
int32_t hal_format = StreamFormat::V4L2ToHalPixelFormat(v4l2_format);
if (hal_format < 0) {
@ -44,21 +46,17 @@ static int GetHalFormats(const std::shared_ptr<V4L2Wrapper>& device,
result_formats->insert(hal_format);
}
// In addition to well-defined formats, there may be an
// "Implementation Defined" format chosen by the HAL (in this
// case what that means is managed by the StreamFormat class).
// Get the V4L2 format for IMPLEMENTATION_DEFINED.
int v4l2_format = StreamFormat::HalToV4L2PixelFormat(
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
// If it's available, add IMPLEMENTATION_DEFINED to the result set.
if (v4l2_format && v4l2_formats.count(v4l2_format) > 0) {
result_formats->insert(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
}
return 0;
}
static int FpsRangesCompare(std::array<int32_t, 2> a,
std::array<int32_t, 2> b) {
if (a[1] == b[1]) {
return a[0] > b[0];
}
return a[1] > b[1];
}
int AddFormatComponents(
std::shared_ptr<V4L2Wrapper> device,
std::insert_iterator<PartialMetadataSet> insertion_point) {
@ -71,19 +69,39 @@ int AddFormatComponents(
return res;
}
// Requirements check: need to support YCbCr_420_888, JPEG,
// and "Implementation Defined".
std::set<int32_t> unsupported_hal_formats;
if (hal_formats.find(HAL_PIXEL_FORMAT_YCbCr_420_888) == hal_formats.end()) {
HAL_LOGE("YCbCr_420_888 not supported by device.");
return -ENODEV;
} else if (hal_formats.find(HAL_PIXEL_FORMAT_BLOB) == hal_formats.end()) {
HAL_LOGE("JPEG not supported by device.");
return -ENODEV;
} else if (hal_formats.find(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) ==
hal_formats.end()) {
HAL_LOGE("HAL implementation defined format not supported by device.");
return -ENODEV;
HAL_LOGW("YCbCr_420_888 (0x%x) not directly supported by device.",
HAL_PIXEL_FORMAT_YCbCr_420_888);
hal_formats.insert(HAL_PIXEL_FORMAT_YCbCr_420_888);
unsupported_hal_formats.insert(HAL_PIXEL_FORMAT_YCbCr_420_888);
}
if (hal_formats.find(HAL_PIXEL_FORMAT_BLOB) == hal_formats.end()) {
HAL_LOGW("JPEG (0x%x) not directly supported by device.",
HAL_PIXEL_FORMAT_BLOB);
hal_formats.insert(HAL_PIXEL_FORMAT_BLOB);
unsupported_hal_formats.insert(HAL_PIXEL_FORMAT_BLOB);
}
// As hal_formats is populated by reading and converting V4L2 formats to the
// matching HAL formats, we will never see an implementation defined format in
// the list. We populate it ourselves and map it to a qualified format. If no
// qualified formats exist, this will be the first available format.
hal_formats.insert(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
unsupported_hal_formats.insert(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
// Qualified formats are the set of formats supported by this camera that the
// image processor can translate into the YU12 format. We additionally check
// that the conversion from YU12 to the desired hal format is supported.
std::vector<uint32_t> qualified_formats;
res = device->GetQualifiedFormats(&qualified_formats);
if (res && unsupported_hal_formats.size() > 1) {
HAL_LOGE(
"Failed to retrieve qualified formats, cannot perform conversions.");
return res;
}
HAL_LOGI("Supports %d qualified formats.", qualified_formats.size());
// Find sizes and frame/stall durations for all formats.
// We also want to find the smallest max frame duration amongst all formats,
@ -96,7 +114,7 @@ int AddFormatComponents(
// Stall durations are {format, width, height, duration} (duration in ns).
ArrayVector<int64_t, 4> stall_durations;
int64_t min_max_frame_duration = std::numeric_limits<int64_t>::max();
int64_t max_min_frame_duration_yuv = std::numeric_limits<int64_t>::min();
std::vector<std::array<int32_t, 2>> fps_ranges;
for (auto hal_format : hal_formats) {
// Get the corresponding V4L2 format.
uint32_t v4l2_format = StreamFormat::HalToV4L2PixelFormat(hal_format);
@ -105,6 +123,42 @@ int AddFormatComponents(
// came from translating a bunch of V4L2 formats above.
HAL_LOGE("Couldn't find V4L2 format for HAL format %d", hal_format);
return -ENODEV;
} else if (unsupported_hal_formats.find(hal_format) !=
unsupported_hal_formats.end()) {
if (hal_format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
if (qualified_formats.size() != 0) {
v4l2_format = qualified_formats[0];
} else if (unsupported_hal_formats.size() == 1) {
v4l2_format = StreamFormat::HalToV4L2PixelFormat(
HAL_PIXEL_FORMAT_YCbCr_420_888);
} else {
// No-op. If there are no qualified formats, and implementation
// defined is not the only unsupported format, then other unsupported
// formats will throw an error.
}
HAL_LOGW(
"Implementation-defined format is set to V4L2 pixel format 0x%x",
v4l2_format);
} else if (qualified_formats.size() == 0) {
HAL_LOGE(
"Camera does not support required format: 0x%x, and there are no "
"qualified"
"formats to transform from.",
hal_format);
return -ENODEV;
} else if (!arc::ImageProcessor::SupportsConversion(V4L2_PIX_FMT_YUV420,
v4l2_format)) {
HAL_LOGE(
"The image processor does not support conversion to required "
"format: 0x%x",
hal_format);
return -ENODEV;
} else {
v4l2_format = qualified_formats[0];
HAL_LOGW(
"Hal format 0x%x will be converted from V4L2 pixel format 0x%x",
hal_format, v4l2_format);
}
}
// Get the available sizes for this format.
@ -160,39 +214,25 @@ int AddFormatComponents(
if (size_max_frame_duration < min_max_frame_duration) {
min_max_frame_duration = size_max_frame_duration;
}
// We only care about the largest min frame duration
// (smallest max frame rate) for YUV sizes.
if (hal_format == HAL_PIXEL_FORMAT_YCbCr_420_888 &&
size_min_frame_duration > max_min_frame_duration_yuv) {
max_min_frame_duration_yuv = size_min_frame_duration;
// ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES will contain all
// the fps ranges for YUV_420_888 only since YUV_420_888 format is
// the default camera format by Android.
if (hal_format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
// Convert from frame durations measured in ns.
// Min, max fps supported by all YUV formats.
const int32_t min_fps = 1000000000 / size_max_frame_duration;
const int32_t max_fps = 1000000000 / size_min_frame_duration;
if (std::find(fps_ranges.begin(), fps_ranges.end(),
std::array<int32_t, 2>{min_fps, max_fps}) ==
fps_ranges.end()) {
fps_ranges.push_back({min_fps, max_fps});
}
}
}
}
// Convert from frame durations measured in ns.
// Min fps supported by all formats.
int32_t min_fps = 1000000000 / min_max_frame_duration;
if (min_fps > 15) {
HAL_LOGE("Minimum FPS %d is larger than HAL max allowable value of 15",
min_fps);
return -ENODEV;
}
// Max fps supported by all YUV formats.
int32_t max_yuv_fps = 1000000000 / max_min_frame_duration_yuv;
// ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES should be at minimum
// {mi, ma}, {ma, ma} where mi and ma are min and max frame rates for
// YUV_420_888. Min should be at most 15.
std::vector<std::array<int32_t, 2>> fps_ranges;
fps_ranges.push_back({{min_fps, max_yuv_fps}});
std::array<int32_t, 2> video_fps_range;
int32_t video_fps = 30;
if (video_fps >= max_yuv_fps) {
video_fps_range = {{max_yuv_fps, max_yuv_fps}};
} else {
video_fps_range = {{video_fps, video_fps}};
}
fps_ranges.push_back(video_fps_range);
// Sort fps ranges in descending order.
std::sort(fps_ranges.begin(), fps_ranges.end(), FpsRangesCompare);
// Construct the metadata components.
insertion_point = std::make_unique<Property<ArrayVector<int32_t, 4>>>(
@ -208,10 +248,9 @@ int AddFormatComponents(
// TODO(b/31019725): This should probably not be a NoEffect control.
insertion_point = NoEffectMenuControl<std::array<int32_t, 2>>(
ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
fps_ranges,
{{CAMERA3_TEMPLATE_VIDEO_RECORD, video_fps_range},
{OTHER_TEMPLATES, fps_ranges[0]}});
ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, fps_ranges,
{{CAMERA3_TEMPLATE_VIDEO_RECORD, fps_ranges.front()},
{OTHER_TEMPLATES, fps_ranges.back()}});
return 0;
}

View file

@ -17,6 +17,7 @@
#ifndef V4L2_CAMERA_HAL_FORMAT_METADATA_FACTORY_H_
#define V4L2_CAMERA_HAL_FORMAT_METADATA_FACTORY_H_
#include <algorithm>
#include <iterator>
#include <memory>
#include <set>

View file

@ -46,10 +46,12 @@ class FormatMetadataFactoryTest : public Test {
};
TEST_F(FormatMetadataFactoryTest, GetFormatMetadata) {
std::set<uint32_t> formats{V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420};
std::set<uint32_t> formats{V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YUYV};
std::map<uint32_t, std::set<std::array<int32_t, 2>>> sizes{
{V4L2_PIX_FMT_JPEG, {{{10, 20}}, {{30, 60}}, {{120, 240}}}},
{V4L2_PIX_FMT_YUV420, {{{1, 2}}, {{3, 6}}, {{12, 24}}}}};
{V4L2_PIX_FMT_YUV420, {{{1, 2}}, {{3, 6}}, {{12, 24}}}},
{V4L2_PIX_FMT_YUYV, {{{20, 40}}, {{80, 160}}, {{320, 640}}}}};
// These need to be on the correct order of magnitude,
// as there is a check for min fps > 15.
std::map<uint32_t, std::map<std::array<int32_t, 2>, std::array<int64_t, 2>>>
@ -60,19 +62,24 @@ TEST_F(FormatMetadataFactoryTest, GetFormatMetadata) {
{V4L2_PIX_FMT_YUV420,
{{{{1, 2}}, {{10000000000, 20000000000}}},
{{{3, 6}}, {{11000000000, 21000000000}}},
{{{12, 24}}, {{10500000000, 19000000000}}}}}};
{{{12, 24}}, {{10500000000, 19000000000}}}}},
{V4L2_PIX_FMT_YUYV,
{{{{20, 40}}, {{11000000000, 22000000000}}},
{{{80, 160}}, {{13000000000, 25000000000}}},
{{{320, 640}}, {{10100000000, 19000000000}}}}}};
// The camera must report at least one qualified format.
std::vector<uint32_t> qualified_formats = {V4L2_PIX_FMT_YUYV};
// Device must support IMPLEMENTATION_DEFINED (as well as JPEG & YUV).
// Just duplicate the values from another format.
uint32_t imp_defined_format = StreamFormat::HalToV4L2PixelFormat(
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
formats.insert(imp_defined_format);
sizes[imp_defined_format] = sizes[V4L2_PIX_FMT_YUV420];
durations[imp_defined_format] = durations[V4L2_PIX_FMT_YUV420];
// For USB cameras, we assume that this format will not be present, and it
// will default to a qualified format or one of the other required formats.
EXPECT_CALL(*mock_device_, GetFormats(_))
.WillOnce(DoAll(SetArgPointee<0>(formats), Return(0)));
EXPECT_CALL(*mock_device_, GetQualifiedFormats(_))
.WillOnce(DoAll(SetArgPointee<0>(qualified_formats), Return(0)));
for (auto format : formats) {
std::set<std::array<int32_t, 2>> format_sizes = sizes[format];
EXPECT_CALL(*mock_device_, GetFormatFrameSizes(format, _))
@ -94,7 +101,7 @@ TEST_F(FormatMetadataFactoryTest, GetFormatMetadata) {
for (auto& component : components) {
android::CameraMetadata metadata;
component->PopulateStaticFields(&metadata);
ASSERT_EQ(metadata.entryCount(), 1);
ASSERT_EQ(metadata.entryCount(), 1u);
int32_t tag = component->StaticTags()[0];
switch (tag) {
case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS: // Fall through.
@ -119,39 +126,71 @@ TEST_F(FormatMetadataFactoryTest, GetFormatMetadata) {
}
}
TEST_F(FormatMetadataFactoryTest, GetFormatMetadataMissingJpeg) {
uint32_t imp_defined_format = StreamFormat::HalToV4L2PixelFormat(
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
std::set<uint32_t> formats{V4L2_PIX_FMT_YUV420, imp_defined_format};
EXPECT_CALL(*mock_device_, GetFormats(_))
.WillOnce(DoAll(SetArgPointee<0>(formats), Return(0)));
PartialMetadataSet components;
ASSERT_EQ(AddFormatComponents(mock_device_,
std::inserter(components, components.end())),
-ENODEV);
}
TEST_F(FormatMetadataFactoryTest, GetFormatMetadataMissingRequired) {
std::set<uint32_t> formats{V4L2_PIX_FMT_YUYV};
std::map<uint32_t, std::set<std::array<int32_t, 2>>> sizes{
{V4L2_PIX_FMT_YUYV, {{{640, 480}}, {{320, 240}}}}};
std::map<uint32_t, std::map<std::array<int32_t, 2>, std::array<int64_t, 2>>>
durations{{V4L2_PIX_FMT_YUYV,
{{{{640, 480}}, {{100000000, 200000000}}},
{{{320, 240}}, {{100000000, 200000000}}}}}};
TEST_F(FormatMetadataFactoryTest, GetFormatMetadataMissingYuv) {
uint32_t imp_defined_format = StreamFormat::HalToV4L2PixelFormat(
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
std::set<uint32_t> formats{V4L2_PIX_FMT_JPEG, imp_defined_format};
EXPECT_CALL(*mock_device_, GetFormats(_))
.WillOnce(DoAll(SetArgPointee<0>(formats), Return(0)));
PartialMetadataSet components;
ASSERT_EQ(AddFormatComponents(mock_device_,
std::inserter(components, components.end())),
-ENODEV);
}
// If a qualified format is present, we expect that required fields are
// populated as if they are supported.
std::vector<uint32_t> qualified_formats = {V4L2_PIX_FMT_YUYV};
TEST_F(FormatMetadataFactoryTest,
GetFormatMetadataMissingImplementationDefined) {
std::set<uint32_t> formats{V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420};
EXPECT_CALL(*mock_device_, GetFormats(_))
.WillOnce(DoAll(SetArgPointee<0>(formats), Return(0)));
EXPECT_CALL(*mock_device_, GetQualifiedFormats(_))
.WillOnce(DoAll(SetArgPointee<0>(qualified_formats), Return(0)));
for (auto format : formats) {
std::set<std::array<int32_t, 2>> format_sizes = sizes[format];
EXPECT_CALL(*mock_device_, GetFormatFrameSizes(format, _))
.Times(AtLeast(1))
.WillRepeatedly(DoAll(SetArgPointee<1>(format_sizes), Return(0)));
for (auto size : format_sizes) {
EXPECT_CALL(*mock_device_, GetFormatFrameDurationRange(format, size, _))
.Times(AtLeast(1))
.WillRepeatedly(
DoAll(SetArgPointee<2>(durations[format][size]), Return(0)));
}
}
// Check that all required formats are present.
PartialMetadataSet components;
ASSERT_EQ(AddFormatComponents(mock_device_,
std::inserter(components, components.end())),
-ENODEV);
0);
std::vector<std::array<int32_t, 2>> target_fps_ranges{{{5, 10}}, {{10, 10}}};
for (auto& component : components) {
android::CameraMetadata metadata;
component->PopulateStaticFields(&metadata);
ASSERT_EQ(metadata.entryCount(), 1u);
int32_t tag = component->StaticTags()[0];
switch (tag) {
case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS: // Fall through.
case ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS: // Fall through.
case ANDROID_SCALER_AVAILABLE_STALL_DURATIONS: // Fall through.
// Two sizes per format, four elements per config.
// # formats + 3 for YUV420, JPEG, IMPLEMENTATION_DEFINED.
ExpectMetadataTagCount(metadata, tag, (formats.size() + 3) * 2 * 4);
break;
case ANDROID_SENSOR_INFO_MAX_FRAME_DURATION:
// The lowest max duration from above.
ExpectMetadataEq(metadata, tag, 200000000);
break;
case ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES:
// 2 ranges ({min, max} and {max, max}), each with a min and max.
ExpectMetadataTagCount(metadata, tag, 4);
ExpectMetadataEq(metadata, tag, target_fps_ranges);
break;
default:
FAIL() << "Unexpected component created.";
break;
}
}
}
} // namespace v4l2_camera_hal

View file

@ -64,15 +64,15 @@ class ControlTest : public Test {
virtual void ExpectTags() {
if (use_options_ && report_options_) {
ASSERT_EQ(control_->StaticTags().size(), 1);
ASSERT_EQ(control_->StaticTags().size(), 1u);
EXPECT_EQ(control_->StaticTags()[0], options_tag_);
} else {
EXPECT_TRUE(control_->StaticTags().empty());
}
// Controls use the same delgate, and thus tag, for getting and setting.
ASSERT_EQ(control_->ControlTags().size(), 1);
ASSERT_EQ(control_->ControlTags().size(), 1u);
EXPECT_EQ(control_->ControlTags()[0], delegate_tag_);
ASSERT_EQ(control_->DynamicTags().size(), 1);
ASSERT_EQ(control_->DynamicTags().size(), 1u);
EXPECT_EQ(control_->DynamicTags()[0], delegate_tag_);
}
@ -81,10 +81,10 @@ class ControlTest : public Test {
android::CameraMetadata metadata;
ASSERT_EQ(control_->PopulateStaticFields(&metadata), 0);
if (use_options_ && report_options_) {
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
ExpectMetadataEq(metadata, options_tag_, options);
} else {
EXPECT_EQ(metadata.entryCount(), 0);
EXPECT_EQ(metadata.entryCount(), 0u);
// Shouldn't be expecting any options.
EXPECT_TRUE(options.empty());
}
@ -93,7 +93,7 @@ class ControlTest : public Test {
virtual void ExpectValue(uint8_t value) {
android::CameraMetadata metadata;
ASSERT_EQ(control_->PopulateDynamicFields(&metadata), 0);
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
ExpectMetadataEq(metadata, delegate_tag_, value);
}

View file

@ -44,11 +44,11 @@ class PartialMetadataFactoryTest : public Test {
}
virtual void ExpectControlTags() {
ASSERT_EQ(control_->StaticTags().size(), 1);
ASSERT_EQ(control_->StaticTags().size(), 1u);
EXPECT_EQ(control_->StaticTags()[0], options_tag_);
ASSERT_EQ(control_->ControlTags().size(), 1);
ASSERT_EQ(control_->ControlTags().size(), 1u);
EXPECT_EQ(control_->ControlTags()[0], delegate_tag_);
ASSERT_EQ(control_->DynamicTags().size(), 1);
ASSERT_EQ(control_->DynamicTags().size(), 1u);
EXPECT_EQ(control_->DynamicTags()[0], delegate_tag_);
}
@ -56,14 +56,14 @@ class PartialMetadataFactoryTest : public Test {
// Options should be available.
android::CameraMetadata metadata;
ASSERT_EQ(control_->PopulateStaticFields(&metadata), 0);
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
ExpectMetadataEq(metadata, options_tag_, options);
}
virtual void ExpectControlValue(uint8_t value) {
android::CameraMetadata metadata;
ASSERT_EQ(control_->PopulateDynamicFields(&metadata), 0);
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
ExpectMetadataEq(metadata, delegate_tag_, value);
}
@ -84,14 +84,14 @@ TEST_F(PartialMetadataFactoryTest, FixedState) {
uint8_t value = 13;
std::unique_ptr<State<uint8_t>> state = FixedState(delegate_tag_, value);
ASSERT_EQ(state->StaticTags().size(), 0);
ASSERT_EQ(state->ControlTags().size(), 0);
ASSERT_EQ(state->DynamicTags().size(), 1);
ASSERT_EQ(state->StaticTags().size(), 0u);
ASSERT_EQ(state->ControlTags().size(), 0u);
ASSERT_EQ(state->DynamicTags().size(), 1u);
EXPECT_EQ(state->DynamicTags()[0], delegate_tag_);
android::CameraMetadata metadata;
ASSERT_EQ(state->PopulateDynamicFields(&metadata), 0);
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
ExpectMetadataEq(metadata, delegate_tag_, value);
}

View file

@ -49,9 +49,9 @@ TEST_F(PropertyTest, Tags) {
Property<int32_t> property(int_tag_, 1);
// Should have only the single tag it was constructed with.
EXPECT_EQ(property.ControlTags().size(), 0);
EXPECT_EQ(property.DynamicTags().size(), 0);
ASSERT_EQ(property.StaticTags().size(), 1);
EXPECT_EQ(property.ControlTags().size(), 0u);
EXPECT_EQ(property.DynamicTags().size(), 0u);
ASSERT_EQ(property.StaticTags().size(), 1u);
// The macro doesn't like the int_tag_ variable being passed in directly.
int32_t expected_tag = int_tag_;
EXPECT_EQ(property.StaticTags()[0], expected_tag);
@ -68,7 +68,7 @@ TEST_F(PropertyTest, PopulateStaticSingleNumber) {
// Check the results.
// Should only have added 1 entry.
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
// Should have added the right entry.
ExpectMetadataEq(metadata, int_tag_, data);
}
@ -86,7 +86,7 @@ TEST_F(PropertyTest, PopulateStaticVector) {
// Check the results.
// Should only have added 1 entry.
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
// Should have added the right entry.
ExpectMetadataEq(metadata, float_tag_, data);
}
@ -102,7 +102,7 @@ TEST_F(PropertyTest, PopulateStaticArray) {
// Check the results.
// Should only have added 1 entry.
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
// Should have added the right entry.
ExpectMetadataEq(metadata, float_tag_, data);
}
@ -120,7 +120,7 @@ TEST_F(PropertyTest, PopulateStaticArrayVector) {
// Check the results.
// Should only have added 1 entry.
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
// Should have added the right entry.
ExpectMetadataEq(metadata, byte_tag_, data);
}

View file

@ -59,7 +59,7 @@ TEST_F(StateTest, Tags) {
PrepareState();
EXPECT_TRUE(state_->StaticTags().empty());
EXPECT_TRUE(state_->ControlTags().empty());
ASSERT_EQ(state_->DynamicTags().size(), 1);
ASSERT_EQ(state_->DynamicTags().size(), 1u);
EXPECT_EQ(state_->DynamicTags()[0], tag_);
}
@ -79,7 +79,7 @@ TEST_F(StateTest, PopulateDynamic) {
android::CameraMetadata metadata;
ASSERT_EQ(state_->PopulateDynamicFields(&metadata), 0);
EXPECT_EQ(metadata.entryCount(), 1);
EXPECT_EQ(metadata.entryCount(), 1u);
ExpectMetadataEq(metadata, tag_, expected);
}

View file

@ -80,9 +80,9 @@ static void ExpectMetadataEq(const android::CameraMetadata& metadata,
// Vector of arrays.
template <typename T, size_t N>
static int ExpectMetadataEq(const android::CameraMetadata& metadata,
int32_t tag,
const std::vector<std::array<T, N>>& expected) {
static void ExpectMetadataEq(const android::CameraMetadata& metadata,
int32_t tag,
const std::vector<std::array<T, N>>& expected) {
// Convert to array vector so we know all the elements are contiguous.
ArrayVector<T, N> array_vector;
for (const auto& array : expected) {

View file

@ -20,10 +20,21 @@
#include <system/graphics.h>
#include "arc/image_processor.h"
#include "common.h"
namespace v4l2_camera_hal {
using arc::SupportedFormat;
using arc::SupportedFormats;
static const std::vector<uint32_t> GetSupportedFourCCs() {
// The preference of supported fourccs in the list is from high to low.
static const std::vector<uint32_t> kSupportedFourCCs = {V4L2_PIX_FMT_YUYV,
V4L2_PIX_FMT_MJPEG};
return kSupportedFourCCs;
}
StreamFormat::StreamFormat(int format, uint32_t width, uint32_t height)
// TODO(b/30000211): multiplanar support.
: type_(V4L2_BUF_TYPE_VIDEO_CAPTURE),
@ -42,6 +53,14 @@ StreamFormat::StreamFormat(const v4l2_format& format)
bytes_per_line_(format.fmt.pix.bytesperline),
min_buffer_size_(format.fmt.pix.sizeimage) {}
StreamFormat::StreamFormat(const arc::SupportedFormat& format)
: type_(V4L2_BUF_TYPE_VIDEO_CAPTURE),
v4l2_pixel_format_(format.fourcc),
width_(format.width),
height_(format.height),
bytes_per_line_(0),
min_buffer_size_(0) {}
void StreamFormat::FillFormatRequest(v4l2_format* format) const {
memset(format, 0, sizeof(*format));
format->type = type_;
@ -79,47 +98,127 @@ bool StreamFormat::operator!=(const StreamFormat& other) const {
int StreamFormat::V4L2ToHalPixelFormat(uint32_t v4l2_pixel_format) {
// Translate V4L2 format to HAL format.
int hal_pixel_format = -1;
switch (v4l2_pixel_format) {
case V4L2_PIX_FMT_JPEG:
hal_pixel_format = HAL_PIXEL_FORMAT_BLOB;
break;
case V4L2_PIX_FMT_YUV420:
hal_pixel_format = HAL_PIXEL_FORMAT_YCbCr_420_888;
break;
case V4L2_PIX_FMT_BGR32:
hal_pixel_format = HAL_PIXEL_FORMAT_RGBA_8888;
break;
return HAL_PIXEL_FORMAT_RGBA_8888;
case V4L2_PIX_FMT_JPEG:
return HAL_PIXEL_FORMAT_BLOB;
case V4L2_PIX_FMT_NV21:
return HAL_PIXEL_FORMAT_YCrCb_420_SP;
case V4L2_PIX_FMT_YUV420:
return HAL_PIXEL_FORMAT_YCbCr_420_888;
case V4L2_PIX_FMT_YUYV:
return HAL_PIXEL_FORMAT_YCbCr_422_I;
case V4L2_PIX_FMT_YVU420:
return HAL_PIXEL_FORMAT_YV12;
default:
// Unrecognized format.
HAL_LOGV("Unrecognized v4l2 pixel format %u", v4l2_pixel_format);
break;
}
return hal_pixel_format;
return -1;
}
uint32_t StreamFormat::HalToV4L2PixelFormat(int hal_pixel_format) {
// Translate HAL format to V4L2 format.
uint32_t v4l2_pixel_format = 0;
switch (hal_pixel_format) {
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: // fall-through.
case HAL_PIXEL_FORMAT_RGBA_8888:
// Should be RGB32, but RPi doesn't support that.
// For now we accept that the colors will be off.
v4l2_pixel_format = V4L2_PIX_FMT_BGR32;
break;
case HAL_PIXEL_FORMAT_YCbCr_420_888:
v4l2_pixel_format = V4L2_PIX_FMT_YUV420;
break;
case HAL_PIXEL_FORMAT_BLOB:
v4l2_pixel_format = V4L2_PIX_FMT_JPEG;
break;
return V4L2_PIX_FMT_JPEG;
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: // Fall-through
case HAL_PIXEL_FORMAT_RGBA_8888:
return V4L2_PIX_FMT_BGR32;
case HAL_PIXEL_FORMAT_YCbCr_420_888:
// This is a flexible YUV format that depends on platform. Different
// platform may have different format. It can be YVU420 or NV12. Now we
// return YVU420 first.
// TODO(): call drm_drv.get_fourcc() to get correct format.
return V4L2_PIX_FMT_YUV420;
case HAL_PIXEL_FORMAT_YCbCr_422_I:
return V4L2_PIX_FMT_YUYV;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
return V4L2_PIX_FMT_NV21;
case HAL_PIXEL_FORMAT_YV12:
return V4L2_PIX_FMT_YVU420;
default:
// Unrecognized format.
HAL_LOGV("Unrecognized HAL pixel format %d", hal_pixel_format);
HAL_LOGV("Pixel format 0x%x is unsupported.", hal_pixel_format);
break;
}
return v4l2_pixel_format;
return -1;
}
// Copy the qualified format into out_format and return true if there is a
// proper and fitting format in the given format lists.
bool StreamFormat::FindBestFitFormat(const SupportedFormats& supported_formats,
const SupportedFormats& qualified_formats,
uint32_t fourcc, uint32_t width,
uint32_t height,
SupportedFormat* out_format) {
// Match exact format and resolution if possible.
for (const auto& format : supported_formats) {
if (format.fourcc == fourcc && format.width == width &&
format.height == height) {
if (out_format != NULL) {
*out_format = format;
}
return true;
}
}
// All conversions will be done through CachedFrame for now, which will
// immediately convert the qualified format into YU12 (YUV420). We check
// here that the conversion between YU12 and |fourcc| is supported.
if (!arc::ImageProcessor::SupportsConversion(V4L2_PIX_FMT_YUV420, fourcc)) {
HAL_LOGE("Conversion between YU12 and 0x%x not supported.", fourcc);
return false;
}
// Choose the qualified format with a matching resolution.
for (const auto& format : qualified_formats) {
if (format.width == width && format.height == height) {
if (out_format != NULL) {
*out_format = format;
}
return true;
}
}
return false;
}
// Copy corresponding format into out_format and return true by matching
// resolution |width|x|height| in |formats|.
bool StreamFormat::FindFormatByResolution(const SupportedFormats& formats,
uint32_t width, uint32_t height,
SupportedFormat* out_format) {
for (const auto& format : formats) {
if (format.width == width && format.height == height) {
if (out_format != NULL) {
*out_format = format;
}
return true;
}
}
return false;
}
SupportedFormats StreamFormat::GetQualifiedFormats(
const SupportedFormats& supported_formats) {
// The preference of supported fourccs in the list is from high to low.
const std::vector<uint32_t> supported_fourccs = GetSupportedFourCCs();
SupportedFormats qualified_formats;
for (const auto& supported_fourcc : supported_fourccs) {
for (const auto& supported_format : supported_formats) {
if (supported_format.fourcc != supported_fourcc) {
continue;
}
// Skip if |qualified_formats| already has the same resolution with a more
// preferred fourcc.
if (FindFormatByResolution(qualified_formats, supported_format.width,
supported_format.height, NULL)) {
continue;
}
qualified_formats.push_back(supported_format);
}
}
return qualified_formats;
}
} // namespace v4l2_camera_hal

View file

@ -21,6 +21,7 @@
#include <linux/videodev2.h>
#include "arc/common_types.h"
#include "common.h"
namespace v4l2_camera_hal {
@ -36,6 +37,7 @@ class StreamFormat {
public:
StreamFormat(int format, uint32_t width, uint32_t height);
StreamFormat(const v4l2_format& format);
StreamFormat(const arc::SupportedFormat& format);
virtual ~StreamFormat() = default;
// Only uint32_t members, use default generated copy and assign.
@ -44,6 +46,9 @@ class StreamFormat {
// Accessors.
inline uint32_t type() const { return type_; };
inline uint32_t width() const { return width_; };
inline uint32_t height() const { return height_; };
inline uint32_t v4l2_pixel_format() const { return v4l2_pixel_format_; }
inline uint32_t bytes_per_line() const { return bytes_per_line_; };
bool operator==(const StreamFormat& other) const;
@ -55,6 +60,18 @@ class StreamFormat {
// Returns -1 for unrecognized.
static int V4L2ToHalPixelFormat(uint32_t v4l2_pixel_format);
// ARC++ SupportedFormat Helpers
static bool FindBestFitFormat(const arc::SupportedFormats& supported_formats,
const arc::SupportedFormats& qualified_formats,
uint32_t fourcc, uint32_t width,
uint32_t height,
arc::SupportedFormat* out_format);
static bool FindFormatByResolution(const arc::SupportedFormats& formats,
uint32_t width, uint32_t height,
arc::SupportedFormat* out_format);
static arc::SupportedFormats GetQualifiedFormats(
const arc::SupportedFormats& supported_formats);
private:
uint32_t type_;
uint32_t v4l2_pixel_format_;

View file

@ -123,16 +123,7 @@ void V4L2Camera::disconnect() {
int V4L2Camera::flushBuffers() {
HAL_LOG_ENTER();
int res = device_->StreamOff();
// This is not strictly necessary, but prevents a buildup of aborted
// requests in the in_flight_ map. These should be cleared
// whenever the stream is turned off.
std::lock_guard<std::mutex> guard(in_flight_lock_);
in_flight_.clear();
return res;
return device_->StreamOff();
}
int V4L2Camera::initStaticInfo(android::CameraMetadata* out) {
@ -262,65 +253,55 @@ bool V4L2Camera::enqueueRequestBuffers() {
}
// Actually enqueue the buffer for capture.
{
std::lock_guard<std::mutex> guard(in_flight_lock_);
uint32_t index;
res = device_->EnqueueBuffer(&request->output_buffers[0], &index);
if (res) {
HAL_LOGE("Device failed to enqueue buffer.");
completeRequest(request, res);
return true;
}
// Make sure the stream is on (no effect if already on).
res = device_->StreamOn();
if (res) {
HAL_LOGE("Device failed to turn on stream.");
// Don't really want to send an error for only the request here,
// since this is a full device error.
// TODO: Should trigger full flush.
return true;
}
// Note: the request should be dequeued/flushed from the device
// before removal from in_flight_.
in_flight_.emplace(index, request);
buffers_in_flight_.notify_one();
res = device_->EnqueueRequest(request);
if (res) {
HAL_LOGE("Device failed to enqueue buffer.");
completeRequest(request, res);
return true;
}
// Make sure the stream is on (no effect if already on).
res = device_->StreamOn();
if (res) {
HAL_LOGE("Device failed to turn on stream.");
// Don't really want to send an error for only the request here,
// since this is a full device error.
// TODO: Should trigger full flush.
return true;
}
std::unique_lock<std::mutex> lock(in_flight_lock_);
in_flight_buffer_count_++;
buffers_in_flight_.notify_one();
return true;
}
bool V4L2Camera::dequeueRequestBuffers() {
// Dequeue a buffer.
uint32_t result_index;
int res = device_->DequeueBuffer(&result_index);
if (res) {
if (res == -EAGAIN) {
// EAGAIN just means nothing to dequeue right now.
// Wait until something is available before looping again.
std::unique_lock<std::mutex> lock(in_flight_lock_);
while (in_flight_.empty()) {
buffers_in_flight_.wait(lock);
std::shared_ptr<default_camera_hal::CaptureRequest> request;
int res;
{
std::unique_lock<std::mutex> lock(in_flight_lock_);
res = device_->DequeueRequest(&request);
if (!res) {
if (request) {
completeRequest(request, res);
in_flight_buffer_count_--;
}
} else {
HAL_LOGW("Device failed to dequeue buffer: %d", res);
return true;
}
return true;
}
// Find the associated request and complete it.
std::lock_guard<std::mutex> guard(in_flight_lock_);
auto index_request = in_flight_.find(result_index);
if (index_request != in_flight_.end()) {
completeRequest(index_request->second, 0);
in_flight_.erase(index_request);
if (res == -EAGAIN) {
// EAGAIN just means nothing to dequeue right now.
// Wait until something is available before looping again.
std::unique_lock<std::mutex> lock(in_flight_lock_);
while (in_flight_buffer_count_ == 0) {
buffers_in_flight_.wait(lock);
}
} else {
HAL_LOGW(
"Dequeued non in-flight buffer index %d. "
"This buffer may have been flushed from the HAL but not the device.",
index_request->first);
HAL_LOGW("Device failed to dequeue buffer: %d", res);
}
return true;
}
@ -346,10 +327,11 @@ int V4L2Camera::setupStreams(camera3_stream_configuration_t* stream_config) {
std::lock_guard<std::mutex> guard(in_flight_lock_);
// The framework should be enforcing this, but doesn't hurt to be safe.
if (!in_flight_.empty()) {
if (device_->GetInFlightBufferCount() != 0) {
HAL_LOGE("Can't set device format while frames are in flight.");
return -EINVAL;
}
in_flight_buffer_count_ = 0;
// stream_config should have been validated; assume at least 1 stream.
camera3_stream_t* stream = stream_config->streams[0];
@ -409,6 +391,11 @@ int V4L2Camera::setupStreams(camera3_stream_configuration_t* stream_config) {
for (uint32_t i = 0; i < stream_config->num_streams; ++i) {
stream = stream_config->streams[i];
// Override HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED format.
if (stream->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
stream->format = HAL_PIXEL_FORMAT_RGBA_8888;
}
// Max buffers as reported by the device.
stream->max_buffers = max_buffers;

View file

@ -99,9 +99,7 @@ class V4L2Camera : public default_camera_hal::Camera {
std::queue<std::shared_ptr<default_camera_hal::CaptureRequest>>
request_queue_;
std::mutex in_flight_lock_;
// Maps buffer index : request.
std::map<uint32_t, std::shared_ptr<default_camera_hal::CaptureRequest>>
in_flight_;
uint32_t in_flight_buffer_count_;
// Threads require holding an Android strong pointer.
android::sp<android::Thread> buffer_enqueuer_;
android::sp<android::Thread> buffer_dequeuer_;

View file

@ -1,335 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "v4l2_gralloc.h"
#include <linux/videodev2.h>
#include <cerrno>
#include <cstdlib>
#include <hardware/camera3.h>
#include <hardware/gralloc.h>
#include <system/graphics.h>
#include "common.h"
#include "stream_format.h"
namespace v4l2_camera_hal {
// Copy |height| lines from |src| to |dest|,
// where |src| and |dest| may have different line lengths.
void copyWithPadding(uint8_t* dest,
const uint8_t* src,
size_t dest_stride,
size_t src_stride,
size_t height) {
size_t copy_stride = dest_stride;
if (copy_stride > src_stride) {
// Adding padding, not reducing. 0 out the extra memory.
memset(dest, 0, src_stride * height);
copy_stride = src_stride;
}
uint8_t* dest_line_start = dest;
const uint8_t* src_line_start = src;
for (size_t row = 0; row < height;
++row, dest_line_start += dest_stride, src_line_start += src_stride) {
memcpy(dest_line_start, src_line_start, copy_stride);
}
}
V4L2Gralloc* V4L2Gralloc::NewV4L2Gralloc() {
// Initialize and check the gralloc module.
const hw_module_t* module = nullptr;
int res = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module);
if (res || !module) {
HAL_LOGE("Couldn't get gralloc module.");
return nullptr;
}
const gralloc_module_t* gralloc =
reinterpret_cast<const gralloc_module_t*>(module);
// This class only supports Gralloc v0, not Gralloc V1.
if (gralloc->common.module_api_version > GRALLOC_MODULE_API_VERSION_0_3) {
HAL_LOGE(
"Invalid gralloc version %x. Only 0.3 (%x) "
"and below are supported by this HAL.",
gralloc->common.module_api_version,
GRALLOC_MODULE_API_VERSION_0_3);
return nullptr;
}
return new V4L2Gralloc(gralloc);
}
// Private. As checked by above factory, module will be non-null
// and a supported version.
V4L2Gralloc::V4L2Gralloc(const gralloc_module_t* module) : mModule(module) {}
V4L2Gralloc::~V4L2Gralloc() {
// Unlock buffers that are still locked.
unlockAllBuffers();
}
int V4L2Gralloc::lock(const camera3_stream_buffer_t* camera_buffer,
uint32_t bytes_per_line,
v4l2_buffer* device_buffer) {
// Lock the camera buffer (varies depending on if the buffer is YUV or not).
std::unique_ptr<BufferData> buffer_data(
new BufferData{camera_buffer, nullptr, bytes_per_line});
buffer_handle_t buffer = *camera_buffer->buffer;
void* data;
camera3_stream_t* stream = camera_buffer->stream;
int ret = 0;
switch (StreamFormat::HalToV4L2PixelFormat(stream->format)) {
// TODO(b/30119452): support more YCbCr formats.
case V4L2_PIX_FMT_YUV420:
android_ycbcr yuv_data;
ret = mModule->lock_ycbcr(mModule,
buffer,
stream->usage,
0,
0,
stream->width,
stream->height,
&yuv_data);
if (ret) {
HAL_LOGE("Failed to lock ycbcr buffer: %d", ret);
return ret;
}
// Check if gralloc format matches v4l2 format
// (same padding, not interleaved, contiguous).
if (yuv_data.ystride == bytes_per_line &&
yuv_data.cstride == bytes_per_line / 2 && yuv_data.chroma_step == 1 &&
(reinterpret_cast<uint8_t*>(yuv_data.cb) ==
reinterpret_cast<uint8_t*>(yuv_data.y) +
(stream->height * yuv_data.ystride)) &&
(reinterpret_cast<uint8_t*>(yuv_data.cr) ==
reinterpret_cast<uint8_t*>(yuv_data.cb) +
(stream->height / 2 * yuv_data.cstride))) {
// If so, great, point to the beginning.
data = yuv_data.y;
} else {
// If not, allocate a contiguous buffer of appropriate size
// (to be transformed back upon unlock).
data = new uint8_t[device_buffer->length];
// Make a dynamically-allocated copy of yuv_data,
// since it will be needed at transform time.
buffer_data->transform_dest.reset(new android_ycbcr(yuv_data));
}
break;
case V4L2_PIX_FMT_JPEG:
// Jpeg buffers are just contiguous blobs; lock length * 1.
ret = mModule->lock(mModule,
buffer,
stream->usage,
0,
0,
device_buffer->length,
1,
&data);
if (ret) {
HAL_LOGE("Failed to lock jpeg buffer: %d", ret);
return ret;
}
break;
case V4L2_PIX_FMT_BGR32: // Fall-through.
case V4L2_PIX_FMT_RGB32:
// RGB formats have nice agreed upon representation. Unless using android
// flex formats.
ret = mModule->lock(mModule,
buffer,
stream->usage,
0,
0,
stream->width,
stream->height,
&data);
if (ret) {
HAL_LOGE("Failed to lock RGB buffer: %d", ret);
return ret;
}
break;
default:
return -EINVAL;
}
if (!data) {
ALOGE("Gralloc lock returned null ptr");
return -ENODEV;
}
// Set up the device buffer.
static_assert(sizeof(unsigned long) >= sizeof(void*),
"void* must be able to fit in the v4l2_buffer m.userptr "
"field (unsigned long) for this code to work");
device_buffer->m.userptr = reinterpret_cast<unsigned long>(data);
// Note the mapping of data:buffer info for when unlock is called.
mBufferMap.emplace(data, buffer_data.release());
return 0;
}
int V4L2Gralloc::unlock(const v4l2_buffer* device_buffer) {
// TODO(b/30000211): support multi-planar data (video_capture_mplane).
if (device_buffer->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
return -EINVAL;
}
void* data = reinterpret_cast<void*>(device_buffer->m.userptr);
// Find and pop the matching entry in the map.
auto map_entry = mBufferMap.find(data);
if (map_entry == mBufferMap.end()) {
HAL_LOGE("No matching buffer for data at %p", data);
return -EINVAL;
}
std::unique_ptr<const BufferData> buffer_data(map_entry->second);
mBufferMap.erase(map_entry);
const camera3_stream_buffer_t* camera_buffer = buffer_data->camera_buffer;
const buffer_handle_t buffer = *camera_buffer->buffer;
// Check for transform.
if (buffer_data->transform_dest) {
HAL_LOGV("Transforming V4L2 YUV to gralloc YUV.");
// In this case data was allocated by this class, put it in a unique_ptr
// to ensure it gets cleaned up no matter which way this function exits.
std::unique_ptr<uint8_t[]> data_cleanup(reinterpret_cast<uint8_t*>(data));
uint32_t bytes_per_line = buffer_data->v4l2_bytes_per_line;
android_ycbcr* yuv_data = buffer_data->transform_dest.get();
// Should only occur in error situations.
if (device_buffer->bytesused == 0) {
return -EINVAL;
}
// Transform V4L2 to Gralloc, copying each plane to the correct place,
// adjusting padding, and interleaving if necessary.
uint32_t height = camera_buffer->stream->height;
// Y data first.
size_t y_len = bytes_per_line * height;
if (yuv_data->ystride == bytes_per_line) {
// Data should match exactly.
memcpy(yuv_data->y, data, y_len);
} else {
HAL_LOGV("Changing padding on Y plane from %u to %u.",
bytes_per_line,
yuv_data->ystride);
// Wrong padding from V4L2.
copyWithPadding(reinterpret_cast<uint8_t*>(yuv_data->y),
reinterpret_cast<uint8_t*>(data),
yuv_data->ystride,
bytes_per_line,
height);
}
// C data.
// TODO(b/30119452): These calculations assume YCbCr_420_888.
size_t c_len = y_len / 4;
uint32_t c_bytes_per_line = bytes_per_line / 2;
// V4L2 is packed, meaning the data is stored as contiguous {y, cb, cr}.
uint8_t* cb_device = reinterpret_cast<uint8_t*>(data) + y_len;
uint8_t* cr_device = cb_device + c_len;
size_t step = yuv_data->chroma_step;
if (step == 1) {
// Still planar.
if (yuv_data->cstride == c_bytes_per_line) {
// Data should match exactly.
memcpy(yuv_data->cb, cb_device, c_len);
memcpy(yuv_data->cr, cr_device, c_len);
} else {
HAL_LOGV("Changing padding on C plane from %u to %u.",
c_bytes_per_line,
yuv_data->cstride);
// Wrong padding from V4L2.
copyWithPadding(reinterpret_cast<uint8_t*>(yuv_data->cb),
cb_device,
yuv_data->cstride,
c_bytes_per_line,
height / 2);
copyWithPadding(reinterpret_cast<uint8_t*>(yuv_data->cr),
cr_device,
yuv_data->cstride,
c_bytes_per_line,
height / 2);
}
} else {
// Desire semiplanar (cb and cr interleaved).
HAL_LOGV("Interleaving cb and cr. Padding going from %u to %u.",
c_bytes_per_line,
yuv_data->cstride);
uint32_t c_height = height / 2;
uint32_t c_width = camera_buffer->stream->width / 2;
// Zero out destination
uint8_t* cb_gralloc = reinterpret_cast<uint8_t*>(yuv_data->cb);
uint8_t* cr_gralloc = reinterpret_cast<uint8_t*>(yuv_data->cr);
memset(cb_gralloc, 0, c_width * c_height * step);
// Interleaving means we need to copy the cb and cr bytes one by one.
for (size_t line = 0; line < c_height; ++line,
cb_gralloc += yuv_data->cstride,
cr_gralloc += yuv_data->cstride,
cb_device += c_bytes_per_line,
cr_device += c_bytes_per_line) {
for (size_t i = 0; i < c_width; ++i) {
*(cb_gralloc + (i * step)) = *(cb_device + i);
*(cr_gralloc + (i * step)) = *(cr_device + i);
}
}
}
}
// Unlock.
int res = mModule->unlock(mModule, buffer);
if (res) {
HAL_LOGE("Failed to unlock buffer at %p", buffer);
return -ENODEV;
}
return 0;
}
int V4L2Gralloc::unlockAllBuffers() {
HAL_LOG_ENTER();
bool failed = false;
for (auto const& entry : mBufferMap) {
int res = mModule->unlock(mModule, *entry.second->camera_buffer->buffer);
if (res) {
failed = true;
}
// When there is a transform to be made, the buffer returned by lock()
// is dynamically allocated (to hold the pre-transform data).
if (entry.second->transform_dest) {
delete[] reinterpret_cast<uint8_t*>(entry.first);
}
// The BufferData entry is always dynamically allocated in lock().
delete entry.second;
}
mBufferMap.clear();
// If any unlock failed, return error.
if (failed) {
return -ENODEV;
}
return 0;
}
} // namespace default_camera_hal

View file

@ -1,71 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef V4L2_CAMERA_HAL_V4L2_GRALLOC_H_
#define V4L2_CAMERA_HAL_V4L2_GRALLOC_H_
#include <linux/videodev2.h>
#include <unordered_map>
#include <hardware/camera3.h>
#include <hardware/gralloc.h>
#include <system/graphics.h>
namespace v4l2_camera_hal {
// Generously allow up to 6MB (the largest JPEG on the RPi camera is about 5MB).
static constexpr size_t V4L2_MAX_JPEG_SIZE = 6000000;
// V4L2Gralloc is a wrapper around relevant parts of a gralloc module,
// with some assistive transformations.
class V4L2Gralloc {
public:
// Use this method to create V4L2Gralloc objects. Functionally equivalent
// to "new V4L2Gralloc", except that it may return nullptr in case of failure.
static V4L2Gralloc* NewV4L2Gralloc();
virtual ~V4L2Gralloc();
// Lock a camera buffer. Uses device buffer length, sets user pointer.
int lock(const camera3_stream_buffer_t* camera_buffer,
uint32_t bytes_per_line,
v4l2_buffer* device_buffer);
// Unlock a buffer that was locked by this helper (equality determined
// based on buffer user pointer, not the specific object).
int unlock(const v4l2_buffer* device_buffer);
// Release all held locks.
int unlockAllBuffers();
private:
// Constructor is private to allow failing on bad input.
// Use NewV4L2Gralloc instead.
V4L2Gralloc(const gralloc_module_t* module);
const gralloc_module_t* mModule;
struct BufferData {
const camera3_stream_buffer_t* camera_buffer;
// Below fields only used when a ycbcr format transform is necessary.
std::unique_ptr<android_ycbcr> transform_dest; // nullptr if no transform.
uint32_t v4l2_bytes_per_line;
};
// Map data pointer : BufferData about that buffer.
std::unordered_map<void*, const BufferData*> mBufferMap;
};
} // namespace default_camera_hal
#endif // V4L2_CAMERA_HAL_V4L2_GRALLOC_H_

View file

@ -27,7 +27,6 @@
#include "metadata/partial_metadata_factory.h"
#include "metadata/property.h"
#include "metadata/scaling_converter.h"
#include "v4l2_gralloc.h"
namespace v4l2_camera_hal {
@ -37,6 +36,8 @@ const camera_metadata_rational_t kAeCompensationUnit = {1, 1000};
const int64_t kV4L2ExposureTimeStepNs = 100000;
// According to spec, each unit of V4L2_CID_ISO_SENSITIVITY is ISO/1000.
const int32_t kV4L2SensitivityDenominator = 1000;
// Generously allow up to 6MB (the largest size on the RPi Camera is about 5MB).
const size_t kV4L2MaxJpegSize = 6000000;
int GetV4L2Metadata(std::shared_ptr<V4L2Wrapper> device,
std::unique_ptr<Metadata>* result) {
@ -433,10 +434,9 @@ int GetV4L2Metadata(std::shared_ptr<V4L2Wrapper> device,
ANDROID_JPEG_THUMBNAIL_SIZE,
ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
{{{0, 0}}}));
// TODO(b/31022752): Get this from the device,
// not constant (from V4L2Gralloc.h).
// TODO(b/31022752): Get this from the device, not constant.
components.insert(std::unique_ptr<PartialMetadataInterface>(
new Property<int32_t>(ANDROID_JPEG_MAX_SIZE, V4L2_MAX_JPEG_SIZE)));
new Property<int32_t>(ANDROID_JPEG_MAX_SIZE, kV4L2MaxJpegSize)));
// TODO(b/31021672): Other JPEG controls (GPS, quality, orientation).
// TODO(b/29939583): V4L2 can only support 1 stream at a time.
// For now, just reporting minimum allowable for LIMITED devices.
@ -476,7 +476,7 @@ int GetV4L2Metadata(std::shared_ptr<V4L2Wrapper> device,
new Property<uint8_t>(ANDROID_SCALER_CROPPING_TYPE,
ANDROID_SCALER_CROPPING_TYPE_FREEFORM)));
// Spoof pixel array size for now, eventually get from CROPCAP.
std::array<int32_t, 2> pixel_array_size = {{640, 480}};
std::array<int32_t, 2> pixel_array_size = {{3280, 2464}};
components.insert(std::unique_ptr<PartialMetadataInterface>(
new Property<std::array<int32_t, 2>>(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
pixel_array_size)));

View file

@ -29,29 +29,35 @@
#include <android-base/unique_fd.h>
#include "common.h"
#include "stream_format.h"
#include "v4l2_gralloc.h"
#include "arc/cached_frame.h"
namespace v4l2_camera_hal {
const int32_t kStandardSizes[][2] = {{640, 480}, {320, 240}};
using arc::AllocatedFrameBuffer;
using arc::SupportedFormat;
using arc::SupportedFormats;
using default_camera_hal::CaptureRequest;
const int32_t kStandardSizes[][2] = {
{4096, 2160}, // 4KDCI (for USB camera)
{3840, 2160}, // 4KUHD (for USB camera)
{3280, 2464}, // 8MP
{2560, 1440}, // QHD
{1920, 1080}, // HD1080
{1640, 1232}, // 2MP
{1280, 720}, // HD
{1024, 768}, // XGA
{ 640, 480}, // VGA
{ 320, 240}, // QVGA
{ 176, 144} // QCIF
};
V4L2Wrapper* V4L2Wrapper::NewV4L2Wrapper(const std::string device_path) {
std::unique_ptr<V4L2Gralloc> gralloc(V4L2Gralloc::NewV4L2Gralloc());
if (!gralloc) {
HAL_LOGE("Failed to initialize gralloc helper.");
return nullptr;
}
return new V4L2Wrapper(device_path, std::move(gralloc));
return new V4L2Wrapper(device_path);
}
V4L2Wrapper::V4L2Wrapper(const std::string device_path,
std::unique_ptr<V4L2Gralloc> gralloc)
: device_path_(std::move(device_path)),
gralloc_(std::move(gralloc)),
connection_count_(0) {}
V4L2Wrapper::V4L2Wrapper(const std::string device_path)
: device_path_(std::move(device_path)), connection_count_(0) {}
V4L2Wrapper::~V4L2Wrapper() {}
@ -85,6 +91,10 @@ int V4L2Wrapper::Connect() {
// (Alternatively, better hotplugging support may make this unecessary
// by disabling cameras that get disconnected and checking newly connected
// cameras, so Connect() is never called on an unsupported camera)
supported_formats_ = GetSupportedFormats();
qualified_formats_ = StreamFormat::GetQualifiedFormats(supported_formats_);
return 0;
}
@ -102,15 +112,16 @@ void V4L2Wrapper::Disconnect() {
--connection_count_;
if (connection_count_ > 0) {
HAL_LOGV("Disconnected from camera device %s. %d connections remain.",
device_path_.c_str());
device_path_.c_str(), connection_count_);
return;
}
device_fd_.reset(-1); // Includes close().
format_.reset();
buffers_.clear();
// Closing the device releases all queued buffers back to the user.
gralloc_->unlockAllBuffers();
{
std::lock_guard<std::mutex> buffer_lock(buffer_queue_lock_);
buffers_.clear();
}
}
// Helper function. Should be used instead of ioctl throughout this class.
@ -134,7 +145,7 @@ int V4L2Wrapper::StreamOn() {
int32_t type = format_->type();
if (IoctlLocked(VIDIOC_STREAMON, &type) < 0) {
HAL_LOGE("STREAMON fails: %s", strerror(errno));
HAL_LOGE("STREAMON fails (%d): %s", errno, strerror(errno));
return -ENODEV;
}
@ -152,20 +163,16 @@ int V4L2Wrapper::StreamOff() {
int32_t type = format_->type();
int res = IoctlLocked(VIDIOC_STREAMOFF, &type);
// Calling STREAMOFF releases all queued buffers back to the user.
int gralloc_res = gralloc_->unlockAllBuffers();
// No buffers in flight.
for (size_t i = 0; i < buffers_.size(); ++i) {
buffers_[i] = false;
}
if (res < 0) {
HAL_LOGE("STREAMOFF fails: %s", strerror(errno));
return -ENODEV;
}
if (gralloc_res < 0) {
HAL_LOGE("Failed to unlock all buffers after turning stream off.");
return gralloc_res;
std::lock_guard<std::mutex> lock(buffer_queue_lock_);
for (auto& buffer : buffers_) {
buffer.active = false;
buffer.request.reset();
}
HAL_LOGV("Stream turned off.");
return 0;
}
@ -304,6 +311,36 @@ int V4L2Wrapper::SetControl(uint32_t control_id,
return 0;
}
const SupportedFormats V4L2Wrapper::GetSupportedFormats() {
SupportedFormats formats;
std::set<uint32_t> pixel_formats;
int res = GetFormats(&pixel_formats);
if (res) {
HAL_LOGE("Failed to get device formats.");
return formats;
}
arc::SupportedFormat supported_format;
std::set<std::array<int32_t, 2>> frame_sizes;
for (auto pixel_format : pixel_formats) {
supported_format.fourcc = pixel_format;
frame_sizes.clear();
res = GetFormatFrameSizes(pixel_format, &frame_sizes);
if (res) {
HAL_LOGE("Failed to get frame sizes for format: 0x%x", pixel_format);
continue;
}
for (auto frame_size : frame_sizes) {
supported_format.width = frame_size[0];
supported_format.height = frame_size[1];
formats.push_back(supported_format);
}
}
return formats;
}
int V4L2Wrapper::GetFormats(std::set<uint32_t>* v4l2_formats) {
HAL_LOG_ENTER();
@ -324,6 +361,22 @@ int V4L2Wrapper::GetFormats(std::set<uint32_t>* v4l2_formats) {
return 0;
}
int V4L2Wrapper::GetQualifiedFormats(std::vector<uint32_t>* v4l2_formats) {
HAL_LOG_ENTER();
if (!connected()) {
HAL_LOGE(
"Device is not connected, qualified formats may not have been set.");
return -EINVAL;
}
v4l2_formats->clear();
std::set<uint32_t> unique_fourccs;
for (auto& format : qualified_formats_) {
unique_fourccs.insert(format.fourcc);
}
v4l2_formats->assign(unique_fourccs.begin(), unique_fourccs.end());
return 0;
}
int V4L2Wrapper::GetFormatFrameSizes(uint32_t v4l2_format,
std::set<std::array<int32_t, 2>>* sizes) {
v4l2_frmsizeenum size_query;
@ -364,7 +417,7 @@ int V4L2Wrapper::GetFormatFrameSizes(uint32_t v4l2_format,
desired_height,
v4l2_format);
continue;
} else if (desired_width > size_query.stepwise.max_width &&
} else if (desired_width > size_query.stepwise.max_width ||
desired_height > size_query.stepwise.max_height) {
HAL_LOGV("Standard size %u x %u is too big for format %d",
desired_width,
@ -447,8 +500,6 @@ int V4L2Wrapper::SetFormat(const StreamFormat& desired_format,
return 0;
}
// Not in the correct format, set the new one.
if (format_) {
// If we had an old format, first request 0 buffers to inform the device
// we're no longer using any previously "allocated" buffers from the old
@ -461,19 +512,34 @@ int V4L2Wrapper::SetFormat(const StreamFormat& desired_format,
}
}
// Select the matching format, or if not available, select a qualified format
// we can convert from.
SupportedFormat format;
if (!StreamFormat::FindBestFitFormat(supported_formats_, qualified_formats_,
desired_format.v4l2_pixel_format(),
desired_format.width(),
desired_format.height(), &format)) {
HAL_LOGE(
"Unable to find supported resolution in list, "
"width: %d, height: %d",
desired_format.width(), desired_format.height());
return -EINVAL;
}
// Set the camera to the new format.
v4l2_format new_format;
desired_format.FillFormatRequest(&new_format);
const StreamFormat resolved_format(format);
resolved_format.FillFormatRequest(&new_format);
// TODO(b/29334616): When async, this will need to check if the stream
// is on, and if so, lock it off while setting format.
if (IoctlLocked(VIDIOC_S_FMT, &new_format) < 0) {
HAL_LOGE("S_FMT failed: %s", strerror(errno));
return -ENODEV;
}
// Check that the driver actually set to the requested values.
if (desired_format != new_format) {
if (resolved_format != new_format) {
HAL_LOGE("Device doesn't support desired stream configuration.");
return -EINVAL;
}
@ -500,28 +566,22 @@ int V4L2Wrapper::RequestBuffers(uint32_t num_requested) {
int res = IoctlLocked(VIDIOC_REQBUFS, &req_buffers);
// Calling REQBUFS releases all queued buffers back to the user.
int gralloc_res = gralloc_->unlockAllBuffers();
if (res < 0) {
HAL_LOGE("REQBUFS failed: %s", strerror(errno));
return -ENODEV;
}
if (gralloc_res < 0) {
HAL_LOGE("Failed to unlock all buffers when setting up new buffers.");
return gralloc_res;
}
// V4L2 will set req_buffers.count to a number of buffers it can handle.
if (num_requested > 0 && req_buffers.count < 1) {
HAL_LOGE("REQBUFS claims it can't handle any buffers.");
return -ENODEV;
}
buffers_.resize(req_buffers.count, false);
buffers_.resize(req_buffers.count);
return 0;
}
int V4L2Wrapper::EnqueueBuffer(const camera3_stream_buffer_t* camera_buffer,
uint32_t* enqueued_index) {
int V4L2Wrapper::EnqueueRequest(
std::shared_ptr<default_camera_hal::CaptureRequest> request) {
if (!format_) {
HAL_LOGE("Stream format must be set before enqueuing buffers.");
return -ENODEV;
@ -533,8 +593,8 @@ int V4L2Wrapper::EnqueueBuffer(const camera3_stream_buffer_t* camera_buffer,
int index = -1;
{
std::lock_guard<std::mutex> guard(buffer_queue_lock_);
for (int i = 0; i < buffers_.size(); ++i) {
if (!buffers_[i]) {
for (size_t i = 0; i < buffers_.size(); ++i) {
if (!buffers_[i].active) {
index = i;
break;
}
@ -557,33 +617,42 @@ int V4L2Wrapper::EnqueueBuffer(const camera3_stream_buffer_t* camera_buffer,
// and fill out remaining fields.
if (IoctlLocked(VIDIOC_QUERYBUF, &device_buffer) < 0) {
HAL_LOGE("QUERYBUF fails: %s", strerror(errno));
// Return buffer index.
std::lock_guard<std::mutex> guard(buffer_queue_lock_);
buffers_[index].active = false;
return -ENODEV;
}
// Lock the buffer for writing (fills in the user pointer field).
int res =
gralloc_->lock(camera_buffer, format_->bytes_per_line(), &device_buffer);
if (res) {
HAL_LOGE("Gralloc failed to lock buffer.");
return res;
// Setup our request context and fill in the user pointer field.
RequestContext* request_context;
void* data;
{
std::lock_guard<std::mutex> guard(buffer_queue_lock_);
request_context = &buffers_[index];
request_context->camera_buffer->SetDataSize(device_buffer.length);
request_context->camera_buffer->Reset();
request_context->camera_buffer->SetFourcc(format_->v4l2_pixel_format());
request_context->camera_buffer->SetWidth(format_->width());
request_context->camera_buffer->SetHeight(format_->height());
request_context->request = request;
data = request_context->camera_buffer->GetData();
}
device_buffer.m.userptr = reinterpret_cast<unsigned long>(data);
// Pass the buffer to the camera.
if (IoctlLocked(VIDIOC_QBUF, &device_buffer) < 0) {
HAL_LOGE("QBUF fails: %s", strerror(errno));
gralloc_->unlock(&device_buffer);
return -ENODEV;
}
// Mark the buffer as in flight.
std::lock_guard<std::mutex> guard(buffer_queue_lock_);
buffers_[index] = true;
request_context->active = true;
if (enqueued_index) {
*enqueued_index = index;
}
return 0;
}
int V4L2Wrapper::DequeueBuffer(uint32_t* dequeued_index) {
int V4L2Wrapper::DequeueRequest(std::shared_ptr<CaptureRequest>* request) {
if (!format_) {
HAL_LOGV(
"Format not set, so stream can't be on, "
@ -607,23 +676,63 @@ int V4L2Wrapper::DequeueBuffer(uint32_t* dequeued_index) {
}
}
// Mark the buffer as no longer in flight.
{
std::lock_guard<std::mutex> guard(buffer_queue_lock_);
buffers_[buffer.index] = false;
std::lock_guard<std::mutex> guard(buffer_queue_lock_);
RequestContext* request_context = &buffers_[buffer.index];
// Lock the camera stream buffer for painting.
const camera3_stream_buffer_t* stream_buffer =
&request_context->request->output_buffers[0];
uint32_t fourcc =
StreamFormat::HalToV4L2PixelFormat(stream_buffer->stream->format);
if (request) {
*request = request_context->request;
}
// Now that we're done painting the buffer, we can unlock it.
res = gralloc_->unlock(&buffer);
// Note that the device buffer length is passed to the output frame. If the
// GrallocFrameBuffer does not have support for the transformation to
// |fourcc|, it will assume that the amount of data to lock is based on
// |buffer.length|, otherwise it will use the ImageProcessor::ConvertedSize.
arc::GrallocFrameBuffer output_frame(
*stream_buffer->buffer, stream_buffer->stream->width,
stream_buffer->stream->height, fourcc, buffer.length,
stream_buffer->stream->usage);
res = output_frame.Map();
if (res) {
HAL_LOGE("Gralloc failed to unlock buffer after dequeueing.");
return res;
HAL_LOGE("Failed to map output frame.");
request_context->request.reset();
return -EINVAL;
}
if (request_context->camera_buffer->GetFourcc() == fourcc &&
request_context->camera_buffer->GetWidth() ==
stream_buffer->stream->width &&
request_context->camera_buffer->GetHeight() ==
stream_buffer->stream->height) {
// If no format conversion needs to be applied, directly copy the data over.
memcpy(output_frame.GetData(), request_context->camera_buffer->GetData(),
request_context->camera_buffer->GetDataSize());
} else {
// Perform the format conversion.
arc::CachedFrame cached_frame;
cached_frame.SetSource(request_context->camera_buffer.get(), 0);
cached_frame.Convert(request_context->request->settings, &output_frame);
}
if (dequeued_index) {
*dequeued_index = buffer.index;
}
request_context->request.reset();
// Mark the buffer as not in flight.
request_context->active = false;
return 0;
}
int V4L2Wrapper::GetInFlightBufferCount() {
int count = 0;
std::lock_guard<std::mutex> guard(buffer_queue_lock_);
for (auto& buffer : buffers_) {
if (buffer.active) {
count++;
}
}
return count;
}
} // namespace v4l2_camera_hal

View file

@ -26,11 +26,14 @@
#include <android-base/unique_fd.h>
#include "arc/common_types.h"
#include "arc/frame_buffer.h"
#include "capture_request.h"
#include "common.h"
#include "stream_format.h"
#include "v4l2_gralloc.h"
namespace v4l2_camera_hal {
class V4L2Wrapper {
public:
// Use this method to create V4L2Wrapper objects. Functionally equivalent
@ -67,8 +70,10 @@ class V4L2Wrapper {
int32_t* result = nullptr);
// Manage format.
virtual int GetFormats(std::set<uint32_t>* v4l2_formats);
virtual int GetQualifiedFormats(std::vector<uint32_t>* v4l2_formats);
virtual int GetFormatFrameSizes(uint32_t v4l2_format,
std::set<std::array<int32_t, 2>>* sizes);
// Durations are returned in ns.
virtual int GetFormatFrameDurationRange(
uint32_t v4l2_format,
@ -77,15 +82,16 @@ class V4L2Wrapper {
virtual int SetFormat(const StreamFormat& desired_format,
uint32_t* result_max_buffers);
// Manage buffers.
virtual int EnqueueBuffer(const camera3_stream_buffer_t* camera_buffer,
uint32_t* enqueued_index = nullptr);
virtual int DequeueBuffer(uint32_t* dequeued_index = nullptr);
virtual int EnqueueRequest(
std::shared_ptr<default_camera_hal::CaptureRequest> request);
virtual int DequeueRequest(
std::shared_ptr<default_camera_hal::CaptureRequest>* request);
virtual int GetInFlightBufferCount();
private:
// Constructor is private to allow failing on bad input.
// Use NewV4L2Wrapper instead.
V4L2Wrapper(const std::string device_path,
std::unique_ptr<V4L2Gralloc> gralloc);
V4L2Wrapper(const std::string device_path);
// Connect or disconnect to the device. Access by creating/destroying
// a V4L2Wrapper::Connection object.
@ -99,20 +105,19 @@ class V4L2Wrapper {
inline bool connected() { return device_fd_.get() >= 0; }
// Format management.
const arc::SupportedFormats GetSupportedFormats();
// The camera device path. For example, /dev/video0.
const std::string device_path_;
// The opened device fd.
android::base::unique_fd device_fd_;
// The underlying gralloc module.
std::unique_ptr<V4L2Gralloc> gralloc_;
// std::unique_ptr<V4L2Gralloc> gralloc_;
// Whether or not the device supports the extended control query.
bool extended_query_supported_;
// The format this device is set up for.
std::unique_ptr<StreamFormat> format_;
// Map indecies to buffer status. True if the index is in-flight.
// |buffers_.size()| will always be the maximum number of buffers this device
// can handle in its current format.
std::vector<bool> buffers_;
// Lock protecting use of the buffer tracker.
std::mutex buffer_queue_lock_;
// Lock protecting use of the device.
@ -121,6 +126,28 @@ class V4L2Wrapper {
std::mutex connection_lock_;
// Reference count connections.
int connection_count_;
// Supported formats.
arc::SupportedFormats supported_formats_;
// Qualified formats.
arc::SupportedFormats qualified_formats_;
class RequestContext {
public:
RequestContext()
: active(false),
camera_buffer(std::make_shared<arc::AllocatedFrameBuffer>(0)){};
~RequestContext(){};
// Indicates whether this request context is in use.
bool active;
// Buffer handles of the context.
std::shared_ptr<arc::AllocatedFrameBuffer> camera_buffer;
std::shared_ptr<default_camera_hal::CaptureRequest> request;
};
// Map of in flight requests.
// |buffers_.size()| will always be the maximum number of buffers this device
// can handle in its current format.
std::vector<RequestContext> buffers_;
friend class Connection;
friend class V4L2WrapperMock;

View file

@ -27,7 +27,7 @@ namespace v4l2_camera_hal {
class V4L2WrapperMock : public V4L2Wrapper {
public:
V4L2WrapperMock() : V4L2Wrapper("", nullptr){};
V4L2WrapperMock() : V4L2Wrapper(""){};
MOCK_METHOD0(StreamOn, int());
MOCK_METHOD0(StreamOff, int());
MOCK_METHOD2(QueryControl,
@ -36,17 +36,15 @@ class V4L2WrapperMock : public V4L2Wrapper {
MOCK_METHOD3(SetControl,
int(uint32_t control_id, int32_t desired, int32_t* result));
MOCK_METHOD1(GetFormats, int(std::set<uint32_t>*));
MOCK_METHOD1(GetQualifiedFormats, int(std::vector<uint32_t>*));
MOCK_METHOD2(GetFormatFrameSizes,
int(uint32_t, std::set<std::array<int32_t, 2>>*));
MOCK_METHOD3(GetFormatFrameDurationRange,
int(uint32_t,
const std::array<int32_t, 2>&,
std::array<int64_t, 2>*));
MOCK_METHOD4(SetFormat,
int(int format,
uint32_t width,
uint32_t height,
uint32_t* result_max_buffers));
MOCK_METHOD2(SetFormat, int(const StreamFormat& desired_format,
uint32_t* result_max_buffers));
MOCK_METHOD2(EnqueueBuffer,
int(const camera3_stream_buffer_t* camera_buffer,
uint32_t* enqueued_index));

View file

@ -332,6 +332,8 @@ int fb_device_open(hw_module_t const* module, const char* name,
const_cast<int&>(dev->device.minSwapInterval) = 1;
const_cast<int&>(dev->device.maxSwapInterval) = 1;
*device = &dev->device.common;
} else {
free(dev);
}
}
return status;

View file

@ -1,2 +1,2 @@
pengxu@google.com
ashutoshj@google.com
arthuri@google.com
bduddie@google.com

View file

@ -44,8 +44,6 @@
#include "alsa_device_proxy.h"
#include "alsa_logging.h"
#define DEFAULT_INPUT_BUFFER_SIZE_MS 20
/* Lock play & record samples rates at or above this threshold */
#define RATELOCK_THRESHOLD 96000
@ -69,6 +67,8 @@ struct audio_device {
bool mic_muted;
bool standby;
int32_t inputs_open; /* number of input streams currently open. */
};
struct stream_lock {
@ -85,7 +85,12 @@ struct stream_out {
struct audio_device *adev; /* hardware information - only using this for the lock */
alsa_device_profile * profile; /* Points to the alsa_device_profile in the audio_device */
const alsa_device_profile *profile; /* Points to the alsa_device_profile in the audio_device.
* Const, so modifications go through adev->out_profile
* and thus should have the hardware lock and ensure
* stream is not active and no other open output streams.
*/
alsa_device_proxy proxy; /* state of the stream */
unsigned hal_channel_count; /* channel count exposed to AudioFlinger.
@ -116,7 +121,12 @@ struct stream_in {
struct audio_device *adev; /* hardware information - only using this for the lock */
alsa_device_profile * profile; /* Points to the alsa_device_profile in the audio_device */
const alsa_device_profile *profile; /* Points to the alsa_device_profile in the audio_device.
* Const, so modifications go through adev->out_profile
* and thus should have the hardware lock and ensure
* stream is not active and no other open input streams.
*/
alsa_device_proxy proxy; /* state of the stream */
unsigned hal_channel_count; /* channel count exposed to AudioFlinger.
@ -232,7 +242,7 @@ static bool parse_card_device_params(const char *kvpairs, int *card, int *device
return *card >= 0 && *device >= 0;
}
static char * device_get_parameters(alsa_device_profile * profile, const char * keys)
static char *device_get_parameters(const alsa_device_profile *profile, const char * keys)
{
if (profile->card < 0 || profile->device < 0) {
return strdup("");
@ -383,12 +393,12 @@ static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
else {
int saved_card = out->profile->card;
int saved_device = out->profile->device;
out->profile->card = card;
out->profile->device = device;
ret_value = profile_read_device_info(out->profile) ? 0 : -EINVAL;
out->adev->out_profile.card = card;
out->adev->out_profile.device = device;
ret_value = profile_read_device_info(&out->adev->out_profile) ? 0 : -EINVAL;
if (ret_value != 0) {
out->profile->card = saved_card;
out->profile->device = saved_device;
out->adev->out_profile.card = saved_card;
out->adev->out_profile.device = saved_device;
}
}
}
@ -571,9 +581,9 @@ static int adev_open_output_stream(struct audio_hw_device *hw_dev,
memset(&proxy_config, 0, sizeof(proxy_config));
/* Pull out the card/device pair */
parse_card_device_params(address, &(out->profile->card), &(out->profile->device));
parse_card_device_params(address, &out->adev->out_profile.card, &out->adev->out_profile.device);
profile_read_device_info(out->profile);
profile_read_device_info(&out->adev->out_profile);
int ret = 0;
@ -780,18 +790,18 @@ static int in_set_parameters(struct audio_stream *stream, const char *kvpairs)
device_lock(in->adev);
if (card >= 0 && device >= 0 && !profile_is_cached_for(in->profile, card, device)) {
/* cannot read pcm device info if playback is active */
if (!in->standby)
/* cannot read pcm device info if playback is active, or more than one open stream */
if (!in->standby || in->adev->inputs_open > 1)
ret_value = -ENOSYS;
else {
int saved_card = in->profile->card;
int saved_device = in->profile->device;
in->profile->card = card;
in->profile->device = device;
ret_value = profile_read_device_info(in->profile) ? 0 : -EINVAL;
in->adev->in_profile.card = card;
in->adev->in_profile.device = device;
ret_value = profile_read_device_info(&in->adev->in_profile) ? 0 : -EINVAL;
if (ret_value != 0) {
in->profile->card = saved_card;
in->profile->device = saved_device;
in->adev->in_profile.card = saved_card;
in->adev->in_profile.device = saved_device;
}
}
}
@ -931,10 +941,17 @@ static int adev_open_input_stream(struct audio_hw_device *hw_dev,
ALOGV("adev_open_input_stream() rate:%" PRIu32 ", chanMask:0x%" PRIX32 ", fmt:%" PRIu8,
config->sample_rate, config->channel_mask, config->format);
struct stream_in *in = (struct stream_in *)calloc(1, sizeof(struct stream_in));
int ret = 0;
/* Pull out the card/device pair */
int32_t card, device;
if (!parse_card_device_params(address, &card, &device)) {
ALOGW("%s fail - invalid address %s", __func__, address);
*stream_in = NULL;
return -EINVAL;
}
struct stream_in * const in = (struct stream_in *)calloc(1, sizeof(struct stream_in));
if (in == NULL) {
*stream_in = NULL;
return -ENOMEM;
}
@ -966,10 +983,29 @@ static int adev_open_input_stream(struct audio_hw_device *hw_dev,
struct pcm_config proxy_config;
memset(&proxy_config, 0, sizeof(proxy_config));
/* Pull out the card/device pair */
parse_card_device_params(address, &(in->profile->card), &(in->profile->device));
profile_read_device_info(in->profile);
int ret = 0;
/* Check if an input stream is already open */
if (in->adev->inputs_open > 0) {
if (!profile_is_cached_for(in->profile, card, device)) {
ALOGW("%s fail - address card:%d device:%d doesn't match existing profile",
__func__, card, device);
ret = -EINVAL;
}
} else {
/* Read input profile only if necessary */
in->adev->in_profile.card = card;
in->adev->in_profile.device = device;
if (!profile_read_device_info(&in->adev->in_profile)) {
ALOGW("%s fail - cannot read profile", __func__);
ret = -EINVAL;
}
}
if (ret != 0) {
device_unlock(in->adev);
free(in);
*stream_in = NULL;
return ret;
}
/* Rate */
if (config->sample_rate == 0) {
@ -1074,6 +1110,10 @@ static int adev_open_input_stream(struct audio_hw_device *hw_dev,
free(in);
}
device_lock(in->adev);
++in->adev->inputs_open;
device_unlock(in->adev);
return ret;
}
@ -1085,6 +1125,12 @@ static void adev_close_input_stream(struct audio_hw_device *hw_dev,
adev_remove_stream_from_list(in->adev, &in->list_node);
device_lock(in->adev);
--in->adev->inputs_open;
LOG_ALWAYS_FATAL_IF(in->adev->inputs_open < 0,
"invalid inputs_open: %d", in->adev->inputs_open);
device_unlock(in->adev);
/* Close the pcm device */
in_standby(&stream->common);

View file

@ -1,24 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <utils/SystemClock.h>
extern "C" {
int64_t elapsedRealtimeNano() {
return android::elapsedRealtimeNano();
}
}

View file

@ -1,579 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "vehicle_hw_default"
#define LOG_NDEBUG 1
#define RADIO_PRESET_NUM 6
#define UNUSED __attribute__((__unused__))
#include <errno.h>
#include <inttypes.h>
#include <malloc.h>
#include <pthread.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/prctl.h>
#include <sys/time.h>
#include <time.h>
#include <log/log.h>
#include <system/radio.h>
#include <hardware/hardware.h>
#include <hardware/vehicle.h>
extern int64_t elapsedRealtimeNano();
static char VEHICLE_MAKE[] = "android_car";
typedef struct vehicle_device_impl {
vehicle_hw_device_t vehicle_device;
uint32_t initialized_;
vehicle_event_callback_fn event_fn_;
vehicle_error_callback_fn error_fn_;
} vehicle_device_impl_t ;
static pthread_mutex_t lock_;
typedef struct subscription {
// Each subscription has it's own thread.
pthread_t thread_id;
int32_t prop;
float sample_rate;
pthread_mutex_t lock;
// This field should be protected by the above mutex.
// TODO change this to something better as flag alone takes long time to finish.
uint32_t stop_thread;
vehicle_device_impl_t* impl;
pthread_t thread;
pthread_cond_t cond;
char name[100];
} subscription_t;
static vehicle_prop_config_t CONFIGS[] = {
{
.prop = VEHICLE_PROPERTY_INFO_MAKE,
.access = VEHICLE_PROP_ACCESS_READ,
.change_mode = VEHICLE_PROP_CHANGE_MODE_STATIC,
.value_type = VEHICLE_VALUE_TYPE_STRING,
.min_sample_rate = 0,
.max_sample_rate = 0,
.hal_data = NULL,
},
{
.prop = VEHICLE_PROPERTY_GEAR_SELECTION,
.access = VEHICLE_PROP_ACCESS_READ,
.change_mode = VEHICLE_PROP_CHANGE_MODE_ON_CHANGE,
.value_type = VEHICLE_VALUE_TYPE_INT32,
.min_sample_rate = 0,
.max_sample_rate = 0,
.hal_data = NULL,
},
{
.prop = VEHICLE_PROPERTY_DRIVING_STATUS,
.access = VEHICLE_PROP_ACCESS_READ,
.change_mode = VEHICLE_PROP_CHANGE_MODE_ON_CHANGE,
.value_type = VEHICLE_VALUE_TYPE_INT32,
.min_sample_rate = 0,
.max_sample_rate = 0,
.hal_data = NULL,
},
{
.prop = VEHICLE_PROPERTY_PARKING_BRAKE_ON,
.access = VEHICLE_PROP_ACCESS_READ,
.change_mode = VEHICLE_PROP_CHANGE_MODE_ON_CHANGE,
.value_type = VEHICLE_VALUE_TYPE_BOOLEAN,
.min_sample_rate = 0,
.max_sample_rate = 0,
.hal_data = NULL,
},
{
.prop = VEHICLE_PROPERTY_PERF_VEHICLE_SPEED,
.access = VEHICLE_PROP_ACCESS_READ,
.change_mode = VEHICLE_PROP_CHANGE_MODE_CONTINUOUS,
.value_type = VEHICLE_VALUE_TYPE_FLOAT,
.min_sample_rate = 0.1,
.max_sample_rate = 10.0,
.hal_data = NULL,
},
{
.prop = VEHICLE_PROPERTY_RADIO_PRESET,
.access = VEHICLE_PROP_ACCESS_READ_WRITE,
.change_mode = VEHICLE_PROP_CHANGE_MODE_ON_CHANGE,
.value_type = VEHICLE_VALUE_TYPE_INT32_VEC4,
.vehicle_radio_num_presets = RADIO_PRESET_NUM,
.min_sample_rate = 0,
.max_sample_rate = 0,
.hal_data = NULL,
},
};
vehicle_prop_config_t* find_config(int prop) {
unsigned int i;
for (i = 0; i < sizeof(CONFIGS) / sizeof(vehicle_prop_config_t); i++) {
if (CONFIGS[i].prop == prop) {
return &CONFIGS[i];
}
}
return NULL;
}
static int alloc_vehicle_str_from_cstr(const char* string, vehicle_str_t* vehicle_str) {
int len = strlen(string);
vehicle_str->data = (uint8_t*) malloc(len);
if (vehicle_str->data == NULL) {
return -ENOMEM;
}
memcpy(vehicle_str->data, string, len);
vehicle_str->len = len;
return 0;
}
static vehicle_prop_config_t const * vdev_list_properties(vehicle_hw_device_t* device UNUSED,
int* num_properties) {
ALOGD("vdev_list_properties.");
*num_properties = sizeof(CONFIGS) / sizeof(vehicle_prop_config_t);
return CONFIGS;
}
static int vdev_init(vehicle_hw_device_t* device,
vehicle_event_callback_fn event_callback_fn,
vehicle_error_callback_fn error_callback_fn) {
ALOGD("vdev_init.");
vehicle_device_impl_t* impl = (vehicle_device_impl_t*)device;
pthread_mutex_lock(&lock_);
if (impl->initialized_) {
ALOGE("vdev_init: Callback and Error functions are already existing.");
pthread_mutex_unlock(&lock_);
return -EEXIST;
}
impl->initialized_ = 1;
impl->event_fn_ = event_callback_fn;
impl->error_fn_ = error_callback_fn;
pthread_mutex_unlock(&lock_);
return 0;
}
static int vdev_release(vehicle_hw_device_t* device) {
vehicle_device_impl_t* impl = (vehicle_device_impl_t*)device;
pthread_mutex_lock(&lock_);
if (!impl->initialized_) {
ALOGD("vdev_release: Already released before, returning early.");
} else {
// unsubscribe_all()
impl->initialized_ = 0;
}
pthread_mutex_unlock(&lock_);
return 0;
}
static int vdev_get(vehicle_hw_device_t* device UNUSED, vehicle_prop_value_t* data) {
ALOGD("vdev_get.");
//TODO all data supporting read should support get
if (!data) {
ALOGE("vdev_get: Data cannot be null.");
return -EINVAL;
}
vehicle_prop_config_t* config = find_config(data->prop);
if (config == NULL) {
ALOGE("vdev_get: cannot find config 0x%x", data->prop);
return -EINVAL;
}
data->value_type = config->value_type;
// for STATIC type, time can be just 0 instead
data->timestamp = elapsedRealtimeNano();
int r;
switch (data->prop) {
case VEHICLE_PROPERTY_INFO_MAKE:
r = alloc_vehicle_str_from_cstr(VEHICLE_MAKE, &(data->value.str_value));
if (r != 0) {
ALOGE("vdev_get: alloc failed");
return r;
}
break;
case VEHICLE_PROPERTY_RADIO_PRESET: {
int radio_preset = data->value.int32_array[0];
if (radio_preset < VEHICLE_RADIO_PRESET_MIN_VALUE ||
radio_preset >= RADIO_PRESET_NUM) {
ALOGE("%s Invalid radio preset: %d\n", __func__, radio_preset);
return -1;
}
ALOGD("%s Radio Preset number: %d", __func__, radio_preset);
int32_t selector = radio_preset % 2 == 0;
// Populate the channel and subchannel to be some variation of the
// preset number for mocking.
// Restore the preset number.
data->value.int32_array[0] = radio_preset;
// Channel type values taken from
// system/core/include/system/radio.h
data->value.int32_array[1] = selector ? RADIO_BAND_FM : RADIO_BAND_AM;
// For FM set a value in Mhz and for AM set a value in Khz range
// (channel).
data->value.int32_array[2] = selector ? 99000000 : 100000;
// For FM we have a sub-channel and we care about it, for AM pass
// a dummy value.
data->value.int32_array[3] = selector ? radio_preset : -1;
break;
}
default:
// actual implementation will be much complex than this. It should track proper last
// state. Here just fill with zero.
memset(&(data->value), 0, sizeof(data->value));
break;
}
ALOGI("vdev_get, type 0x%x, time %" PRId64 ", value_type %d", data->prop, data->timestamp,
data->value_type);
return 0;
}
static void vdev_release_memory_from_get(struct vehicle_hw_device* device UNUSED,
vehicle_prop_value_t *data) {
switch (data->value_type) {
case VEHICLE_VALUE_TYPE_STRING:
case VEHICLE_VALUE_TYPE_BYTES:
free(data->value.str_value.data);
data->value.str_value.data = NULL;
break;
default:
ALOGW("release_memory_from_get for property 0x%x which is not string or bytes type 0x%x"
, data->prop, data->value_type);
break;
}
}
static int vdev_set(vehicle_hw_device_t* device UNUSED, const vehicle_prop_value_t* data) {
ALOGD("vdev_set.");
// Just print what data will be setting here.
ALOGD("Setting property %d with value type %d\n", data->prop, data->value_type);
vehicle_prop_config_t* config = find_config(data->prop);
if (config == NULL) {
ALOGE("vdev_set: cannot find config 0x%x", data->prop);
return -EINVAL;
}
if (config->value_type != data->value_type) {
ALOGE("vdev_set: type mismatch, passed 0x%x expecting 0x%x", data->value_type,
config->value_type);
return -EINVAL;
}
switch (data->value_type) {
case VEHICLE_VALUE_TYPE_FLOAT:
ALOGD("Value type: FLOAT\nValue: %f\n", data->value.float_value);
break;
case VEHICLE_VALUE_TYPE_INT32:
ALOGD("Value type: INT32\nValue: %" PRId32 "\n", data->value.int32_value);
break;
case VEHICLE_VALUE_TYPE_INT64:
ALOGD("Value type: INT64\nValue: %" PRId64 "\n", data->value.int64_value);
break;
case VEHICLE_VALUE_TYPE_BOOLEAN:
ALOGD("Value type: BOOLEAN\nValue: %d\n", data->value.boolean_value);
break;
case VEHICLE_VALUE_TYPE_STRING:
ALOGD("Value type: STRING\n Size: %d\n", data->value.str_value.len);
// NOTE: We only handle ASCII strings here.
// Print the UTF-8 string.
char *ascii_out = (char *) malloc ((data->value.str_value.len + 1) * sizeof (char));
memcpy(ascii_out, data->value.str_value.data, data->value.str_value.len);
ascii_out[data->value.str_value.len] = '\0';
ALOGD("Value: %s\n", ascii_out);
break;
case VEHICLE_VALUE_TYPE_INT32_VEC4:
ALOGD("Value type: INT32_VEC4\nValue[0]: %d Value[1] %d Value[2] %d Value[3] %d",
data->value.int32_array[0], data->value.int32_array[1],
data->value.int32_array[2], data->value.int32_array[3]);
break;
default:
ALOGD("Value type not yet handled: %d.\n", data->value_type);
}
return 0;
}
void print_subscribe_info(vehicle_device_impl_t* impl UNUSED) {
unsigned int i;
for (i = 0; i < sizeof(CONFIGS) / sizeof(vehicle_prop_config_t); i++) {
subscription_t* sub = (subscription_t*)CONFIGS[i].hal_data;
if (sub != NULL) {
ALOGD("prop: %d rate: %f", sub->prop, sub->sample_rate);
}
}
}
// This should be run in a separate thread always.
void fake_event_thread(struct subscription *sub) {
if (!sub) {
ALOGE("oops! subscription object cannot be NULL.");
exit(-1);
}
prctl(PR_SET_NAME, (unsigned long)sub->name, 0, 0, 0);
// Emit values in a loop, every 2 seconds.
while (1) {
// Create a random value depending on the property type.
vehicle_prop_value_t event;
event.prop = sub->prop;
event.timestamp = elapsedRealtimeNano();
switch (sub->prop) {
case VEHICLE_PROPERTY_GEAR_SELECTION:
event.value_type = VEHICLE_VALUE_TYPE_INT32;
switch ((event.timestamp & 0x30000000)>>28) {
case 0:
event.value.gear_selection = VEHICLE_GEAR_PARK;
break;
case 1:
event.value.gear_selection = VEHICLE_GEAR_NEUTRAL;
break;
case 2:
event.value.gear_selection = VEHICLE_GEAR_DRIVE;
break;
case 3:
event.value.gear_selection = VEHICLE_GEAR_REVERSE;
break;
}
break;
case VEHICLE_PROPERTY_PARKING_BRAKE_ON:
event.value_type = VEHICLE_VALUE_TYPE_BOOLEAN;
if (event.timestamp & 0x20000000) {
event.value.parking_brake = VEHICLE_FALSE;
} else {
event.value.parking_brake = VEHICLE_TRUE;
}
break;
case VEHICLE_PROPERTY_PERF_VEHICLE_SPEED:
event.value_type = VEHICLE_VALUE_TYPE_FLOAT;
event.value.vehicle_speed = (float) ((event.timestamp & 0xff000000)>>24);
break;
case VEHICLE_PROPERTY_RADIO_PRESET:
event.value_type = VEHICLE_VALUE_TYPE_INT32_VEC4;
int presetInfo1[4] = {1 /* preset number */, 0 /* AM Band */, 1000, 0};
int presetInfo2[4] = {2 /* preset number */, 1 /* FM Band */, 1000, 0};
if (event.timestamp & 0x20000000) {
memcpy(event.value.int32_array, presetInfo1, sizeof(presetInfo1));
} else {
memcpy(event.value.int32_array, presetInfo2, sizeof(presetInfo2));
}
break;
default: // unsupported
if (sub->impl == NULL) {
ALOGE("subscription impl NULL");
return;
}
if (sub->impl->error_fn_ != NULL) {
sub->impl->error_fn_(-EINVAL, VEHICLE_PROPERTY_INVALID,
VEHICLE_OPERATION_GENERIC);
} else {
ALOGE("Error function is null");
}
ALOGE("Unsupported prop 0x%x, quit", sub->prop);
return;
}
if (sub->impl->event_fn_ != NULL) {
sub->impl->event_fn_(&event);
} else {
ALOGE("Event function is null");
return;
}
pthread_mutex_lock(&sub->lock);
if (sub->stop_thread) {
ALOGD("exiting subscription request here.");
// Do any cleanup here.
pthread_mutex_unlock(&sub->lock);
return;
}
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
now.tv_sec += 1; // sleep for one sec
pthread_cond_timedwait(&sub->cond, &sub->lock, &now);
pthread_mutex_unlock(&sub->lock);
}
}
static int vdev_subscribe(vehicle_hw_device_t* device, int32_t prop, float sample_rate,
int32_t zones UNUSED) {
ALOGD("vdev_subscribe 0x%x, %f", prop, sample_rate);
vehicle_device_impl_t* impl = (vehicle_device_impl_t*)device;
// Check that the device is initialized.
pthread_mutex_lock(&lock_);
if (!impl->initialized_) {
pthread_mutex_unlock(&lock_);
ALOGE("vdev_subscribe: have you called init()?");
return -EINVAL;
}
vehicle_prop_config_t* config = find_config(prop);
if (config == NULL) {
pthread_mutex_unlock(&lock_);
ALOGE("vdev_subscribe not supported property 0x%x", prop);
return -EINVAL;
}
if ((config->access != VEHICLE_PROP_ACCESS_READ) &&
(config->access != VEHICLE_PROP_ACCESS_READ_WRITE)) {
pthread_mutex_unlock(&lock_);
ALOGE("vdev_subscribe read not supported on the property 0x%x", prop);
return -EINVAL;
}
if (config->change_mode == VEHICLE_PROP_CHANGE_MODE_STATIC) {
pthread_mutex_unlock(&lock_);
ALOGE("vdev_subscribe cannot subscribe static property 0x%x", prop);
return -EINVAL;
}
if ((config->change_mode == VEHICLE_PROP_CHANGE_MODE_ON_CHANGE) && (sample_rate != 0)) {
pthread_mutex_unlock(&lock_);
ALOGE("vdev_subscribe on change type should have 0 sample rate, property 0x%x, sample rate %f",
prop, sample_rate);
return -EINVAL;
}
if ((config->max_sample_rate < sample_rate) || (config->min_sample_rate > sample_rate)) {
ALOGE("vdev_subscribe property 0x%x, invalid sample rate %f, min:%f, max:%f",
prop, sample_rate, config->min_sample_rate, config->max_sample_rate);
pthread_mutex_unlock(&lock_);
return -EINVAL;
}
subscription_t* sub = (subscription_t*)config->hal_data;
if (sub == NULL) {
sub = calloc(1, sizeof(subscription_t));
sub->prop = prop;
sub->sample_rate = sample_rate;
sub->stop_thread = 0;
sub->impl = impl;
pthread_mutex_init(&sub->lock, NULL);
pthread_cond_init(&sub->cond, NULL);
config->hal_data = sub;
sprintf(sub->name, "vhal0x%x", prop);
} else if (sub->sample_rate != sample_rate){ // sample rate changed
//TODO notify this to fake sensor thread
sub->sample_rate = sample_rate;
pthread_mutex_unlock(&lock_);
return 0;
}
int ret_code = pthread_create(
&sub->thread, NULL, (void *(*)(void*))fake_event_thread, sub);
if (ret_code != 0) {
return -ret_code;
}
print_subscribe_info(impl);
pthread_mutex_unlock(&lock_);
return 0;
}
static int vdev_unsubscribe(vehicle_hw_device_t* device, int32_t prop) {
ALOGD("vdev_unsubscribe 0x%x", prop);
vehicle_device_impl_t* impl = (vehicle_device_impl_t*)device;
pthread_mutex_lock(&lock_);
vehicle_prop_config_t* config = find_config(prop);
if (config == NULL) {
pthread_mutex_unlock(&lock_);
return -EINVAL;
}
subscription_t* sub = (subscription_t*)config->hal_data;
if (sub == NULL) {
pthread_mutex_unlock(&lock_);
return -EINVAL;
}
config->hal_data = NULL;
pthread_mutex_unlock(&lock_);
pthread_mutex_lock(&sub->lock);
sub->stop_thread = 1;
pthread_cond_signal(&sub->cond);
pthread_mutex_unlock(&sub->lock);
pthread_join(sub->thread, NULL);
pthread_cond_destroy(&sub->cond);
pthread_mutex_destroy(&sub->lock);
free(sub);
pthread_mutex_lock(&lock_);
print_subscribe_info(impl);
pthread_mutex_unlock(&lock_);
return 0;
}
static int vdev_close(hw_device_t* device) {
vehicle_device_impl_t* impl = (vehicle_device_impl_t*)device;
if (impl) {
free(impl);
return 0;
} else {
return -1;
}
}
static int vdev_dump(struct vehicle_hw_device* device UNUSED, int fd UNUSED) {
//TODO
return 0;
}
/*
* The open function is provided as an interface in harwdare.h which fills in
* all the information about specific implementations and version specific
* informations in hw_device_t structure. After calling open() the client should
* use the hw_device_t to execute any Vehicle HAL device specific functions.
*/
static int vdev_open(const hw_module_t* module, const char* name UNUSED,
hw_device_t** device) {
ALOGD("vdev_open");
// Oops, out of memory!
vehicle_device_impl_t* vdev = calloc(1, sizeof(vehicle_device_impl_t));
if (vdev == NULL) {
return -ENOMEM;
}
// Common functions provided by harware.h to access module and device(s).
vdev->vehicle_device.common.tag = HARDWARE_DEVICE_TAG;
vdev->vehicle_device.common.version = VEHICLE_DEVICE_API_VERSION_1_0;
vdev->vehicle_device.common.module = (hw_module_t *) module;
vdev->vehicle_device.common.close = vdev_close;
// Define the Vehicle HAL device specific functions.
vdev->vehicle_device.list_properties = vdev_list_properties;
vdev->vehicle_device.init = vdev_init;
vdev->vehicle_device.release = vdev_release;
vdev->vehicle_device.get = vdev_get;
vdev->vehicle_device.release_memory_from_get = vdev_release_memory_from_get;
vdev->vehicle_device.set = vdev_set;
vdev->vehicle_device.subscribe = vdev_subscribe;
vdev->vehicle_device.unsubscribe = vdev_unsubscribe;
vdev->vehicle_device.dump = vdev_dump;
*device = (hw_device_t *) vdev;
return 0;
}
static struct hw_module_methods_t hal_module_methods = {
.open = vdev_open,
};
/*
* This structure is mandatory to be implemented by each HAL implementation. It
* exposes the open method (see hw_module_methods_t above) which opens a device.
* The vehicle HAL is supposed to be used as a single device HAL hence all the
* functions should be implemented inside of the vehicle_hw_device_t struct (see
* the vehicle.h in include/ folder.
*/
vehicle_module_t HAL_MODULE_INFO_SYM = {
.common = {
.tag = HARDWARE_MODULE_TAG,
.module_api_version = VEHICLE_MODULE_API_VERSION_1_0,
.hal_api_version = HARDWARE_HAL_API_VERSION,
.id = VEHICLE_HARDWARE_MODULE_ID,
.name = "Default vehicle HW HAL",
.author = "",
.methods = &hal_module_methods,
},
};

View file

@ -1,48 +0,0 @@
//
// Copyright (C) 2015 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Build native tests.
cc_test {
name: "vehicle_tests",
srcs: ["vehicle_tests.cpp"],
shared_libs: [
"liblog",
"libhardware",
],
cflags: [
"-Wall",
"-Wextra",
"-Werror",
],
}
// Build HAL command line utility.
cc_binary {
name: "vehicle-hal-tool",
srcs: ["vehicle-hal-tool.c"],
cflags: [
"-Wall",
"-Wno-unused-parameter",
"-Werror",
],
shared_libs: [
"libcutils",
"libhardware",
"liblog",
],
}

View file

@ -1,73 +0,0 @@
What does this document tell?
This document details how to use the vehicle service if you are implementhing
HAL. It lists the various places to look for code and how to build and test the
code on your own dev device.
This code also provides a simple command line utility for the target to test the
vehicle HAL.
What is the code?
The code is split into folowing logical components:
a) hardware/libhardware/include/hardware/vehicle.h - this is the main HAL
interface that will be required to be implemented by the OEMs. It includes all
documentation necessary to understand what vehicle subsystems are exposed,
various units, capabilities and any other relevant details about the HAL design
itself.
b) hardware/libhardware/modules/vehicle/vehicle.c
This is a reference implementation for the OEMs to have a peek into getting
started with a barebones structure. There are implementation for each of the
critical HAL functions such as, get(), set() and subscribe().
c) hardware/libhardware/tests/vehicle/vehicle_test.cpp & vehicle_test_fixtures.h
These are native tests that can be run on the target to validate basic
features of HAL implementation. Things such as loading of HAL and
basic functions are implemented (by check if the returned functions are not NULL
pointers) can be asserted. It also checks if the subscribe function is doing its
job by spitting out data at continuous intervals and printed on the stdout.
d) hardware/libhardware/tests/vehicle/vehicle-hal-tool.c
This tool will provide you with a simple utility which can set commands to the
HAL such as:
i) Getting a property (and printing its value).
ii) Setting a property (and the HAL will take some setting action).
iii) Subscribe to a property (and the HAL should send you values at some certain
intevals).
See the usage() function in vehicle-hal-tool.c for details on how to use the
binary.
How to build and run?
You can build everything by issuing the following from the top of directory. It
is assumed that you have done a first run of make from the top level so that
intermediates are generated.
$ croot
$ mmm hardware/libhardware
This will generate the following binaries that we care about:
i) out/target/product/XXX/vendor/lib/hw/vehicle.default.so
ii) out/target/product/XXX/data/nativetest/vehicle_tests
iii) out/target/product/XXX/system/bin/vehicle-hal-tool
The location for the first shared library would be:
$ adb push out/target/product/XXX/vendor/lib/hw/vehicle.default.so
/vendor/lib/hw
You can also use 'adb sync' if you like, although this is the easiest least
hassle way of putting it in place.
The second binary is a native test - which is nothing but an executable for the
target device. You can load it anywhere in your /data directory and run it as:
$ adb push out/target/product/XXX/data/nativetest/vehicle_tests
/data/tmp/vehicle_tests
$ adb shell
$ ./data/tmp/vehicle_tests
<...output should be spitted with passing tests for atleast the reference
implementation ...>
The last binary is the command line tool, to push the binary on your target do:
$ adb push out/target/product/XXX/system/bin/vehicle-hal-tool
/data/tmp/vehicle-hal-tool

View file

@ -1,537 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "vehicle-hal-tool"
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <log/log.h>
#include <hardware/hardware.h>
#include <hardware/vehicle.h>
void usage() {
printf("Usage: "
"./vehicle-hal-tool [-l] [-m -p -t [-v]]\n"
"-l - List properties\n"
"-m - Mode (cannot be used with -l). Accepted strings: get, set or sub.\n"
"-p - Property (only used with -m)\n"
"-t - Type (only used with -m)\n"
"-w - Wait time in seconds (only used with -m set to sub)\n"
"-v - Value to which vehicle_prop_value is set\n"
"Depending on the type pass the value:\n"
"Int: pass a quoted integer\n"
"Float: pass a quoted float\n"
"Int array: pass a quoted space delimited int array, eg: \"1 2 3 4\" for\n:"
"setting int32_array's all 4 elements (see VEHICLE_VALUE_TYPE_INT32_VEC4\n"
"String: pass a normal string\n\n"
"The configurations to use the tool are as follows:\n"
"List Properties\n"
"---------------\n"
"./vehicle-hal-tool -l \n"
"Lists the various properties defined in HAL implementation. Use this to check if "
"the HAL implementation is correctly set up and exposing the capabilities correctly.\n"
"Get Properties\n"
"---------------\n"
"./vehicle-hal-tool -m get -p <prop> -t <type> [-v <vehicle_prop_value>]\n"
"Example: ./vehicle-hal-tool -m get -p 1028 -t 3 # VEHICLE_PROPERTY_DRIVING_STATUS\n"
"./vehicle-hal-tool -m get -p 257 -t 1 # VEHICLE_PROPERTY_INFO_MAKE\n"
"./vehicle-hal-tool -m get -p 2049 -t 19 -v \"3 0 0 0\"\n"
" # VEHICLE_PROPERTY_RADIO_PRESET\n"
"with preset value set to 3.\n\n"
"Set properties\n"
"--------------\n"
"./vehicle-hal-tool -m set -p 10 -t 1 -v random_property\n"
"Set properties may not be applicable to most properties\n\n"
"Subscribe properties\n"
"--------------------\n"
"Subscribes to be notified about a property change (depending on whether\n"
"it is a on change property or a continuous property) for seconds provided\n"
"as -w paramter.\n"
"./vehicle-hal-tool -m sub -p 1028 -w 10\n"
);
}
void list_all_properties(vehicle_hw_device_t *device) {
int num_configs = -1;
const vehicle_prop_config_t *configs = device->list_properties(device, &num_configs);
if (num_configs < 0) {
printf("List configs error. %d", num_configs);
exit(1);
}
printf("Listing configs\n--------------------\n");
int i = 0;
for (i = 0; i < num_configs; i++) {
const vehicle_prop_config_t *config_temp = configs + i;
printf("Property ID: %d\n"
"Property config_flags: %d\n"
"Property change mode: %d\n"
"Property min sample rate: %f\n"
"Property max sample rate: %f\n",
config_temp->prop, config_temp->config_flags, config_temp->change_mode,
config_temp->min_sample_rate, config_temp->max_sample_rate);
}
}
static void print_property(const vehicle_prop_value_t *data) {
switch (data->value_type) {
case VEHICLE_VALUE_TYPE_STRING:
printf("Value type: STRING\n Size: %d\n", data->value.str_value.len);
// This implementation only supports ASCII.
char *ascii_out = (char *) malloc((data->value.str_value.len + 1) * sizeof(char));
memcpy(ascii_out, data->value.str_value.data, data->value.str_value.len);
ascii_out[data->value.str_value.len] = '\0';
printf("Value Type: STRING %s\n", ascii_out);
free(ascii_out);
break;
case VEHICLE_VALUE_TYPE_BYTES:
printf("Value type: BYTES\n Size: %d", data->value.bytes_value.len);
for (int i = 0; i < data->value.bytes_value.len; i++) {
if ((i % 16) == 0) {
printf("\n %04X: ", i);
}
printf("%02X ", data->value.bytes_value.data[i]);
}
printf("\n");
break;
case VEHICLE_VALUE_TYPE_BOOLEAN:
printf("Value type: BOOLEAN\nValue: %d\n", data->value.boolean_value);
break;
case VEHICLE_VALUE_TYPE_ZONED_BOOLEAN:
printf("Value type: ZONED_BOOLEAN\nZone: %d\n", data->zone);
printf("Value: %d\n", data->value.boolean_value);
break;
case VEHICLE_VALUE_TYPE_INT64:
printf("Value type: INT64\nValue: %" PRId64 "\n", data->value.int64_value);
break;
case VEHICLE_VALUE_TYPE_FLOAT:
printf("Value type: FLOAT\nValue: %f\n", data->value.float_value);
break;
case VEHICLE_VALUE_TYPE_FLOAT_VEC2:
printf("Value type: FLOAT_VEC2\nValue[0]: %f ", data->value.float_array[0]);
printf("Value[1]: %f\n", data->value.float_array[1]);
break;
case VEHICLE_VALUE_TYPE_FLOAT_VEC3:
printf("Value type: FLOAT_VEC3\nValue[0]: %f ", data->value.float_array[0]);
printf("Value[1]: %f ", data->value.float_array[1]);
printf("Value[2]: %f\n", data->value.float_array[2]);
break;
case VEHICLE_VALUE_TYPE_FLOAT_VEC4:
printf("Value type: FLOAT_VEC4\nValue[0]: %f ", data->value.float_array[0]);
printf("Value[1]: %f ", data->value.float_array[1]);
printf("Value[2]: %f ", data->value.float_array[2]);
printf("Value[3]: %f\n", data->value.float_array[3]);
break;
case VEHICLE_VALUE_TYPE_INT32:
printf("Value type: INT32\nValue: %d\n", data->value.int32_value);
break;
case VEHICLE_VALUE_TYPE_INT32_VEC2:
printf("Value type: INT32_VEC2\nValue[0]: %d ", data->value.int32_array[0]);
printf("Value[1]: %d\n", data->value.int32_array[1]);
break;
case VEHICLE_VALUE_TYPE_INT32_VEC3:
printf("Value type: INT32_VEC3\nValue[0]: %d ", data->value.int32_array[0]);
printf("Value[1]: %d ", data->value.int32_array[1]);
printf("Value[2]: %d\n", data->value.int32_array[2]);
break;
case VEHICLE_VALUE_TYPE_INT32_VEC4:
printf("Value type: INT32_VEC4\nValue[0]: %d ", data->value.int32_array[0]);
printf("Value[1]: %d ", data->value.int32_array[1]);
printf("Value[2]: %d ", data->value.int32_array[2]);
printf("Value[3]: %d\n", data->value.int32_array[3]);
break;
case VEHICLE_VALUE_TYPE_ZONED_FLOAT:
printf("Value type: ZONED_FLOAT\nZone: %d ", data->zone);
printf("Value: %f\n", data->value.float_value);
break;
case VEHICLE_VALUE_TYPE_ZONED_FLOAT_VEC2:
printf("Value type: ZONED_FLOAT_VEC2\nZone: %d ", data->zone);
printf("Value[0]: %f", data->value.float_array[0]);
printf("Value[1]: %f\n", data->value.float_array[1]);
break;
case VEHICLE_VALUE_TYPE_ZONED_FLOAT_VEC3:
printf("Value type: ZONED_FLOAT_VEC3\nZone: %d ", data->zone);
printf("Value[0]: %f ", data->value.float_array[0]);
printf("Value[1]: %f ", data->value.float_array[1]);
printf("Value[2]: %f\n", data->value.float_array[2]);
break;
case VEHICLE_VALUE_TYPE_ZONED_FLOAT_VEC4:
printf("Value type: ZONED_FLOAT_VEC4\nZone: %d ", data->zone);
printf("Value[0]: %f ", data->value.float_array[0]);
printf("Value[1]: %f ", data->value.float_array[1]);
printf("Value[2]: %f ", data->value.float_array[2]);
printf("Value[3]: %f\n", data->value.float_array[3]);
break;
case VEHICLE_VALUE_TYPE_ZONED_INT32:
printf("Value type: ZONED_INT32\nZone: %d ", data->zone);
printf("Value: %d\n", data->value.int32_value);
break;
case VEHICLE_VALUE_TYPE_ZONED_INT32_VEC2:
printf("Value type: ZONED_INT32_VEC2\nZone: %d ", data->zone);
printf("Value[0]: %d ", data->value.int32_array[0]);
printf("Value[1]: %d\n", data->value.int32_array[1]);
break;
case VEHICLE_VALUE_TYPE_ZONED_INT32_VEC3:
printf("Value type: ZONED_INT32_VEC3\nZone: %d ", data->zone);
printf("Value[0]: %d ", data->value.int32_array[0]);
printf("Value[1]: %d ", data->value.int32_array[1]);
printf("Value[2]: %d\n", data->value.int32_array[2]);
break;
case VEHICLE_VALUE_TYPE_ZONED_INT32_VEC4:
printf("Value type: ZONED_INT32_VEC4\nZone: %d ", data->zone);
printf("Value[0]: %d ", data->value.int32_array[0]);
printf("Value[1]: %d ", data->value.int32_array[1]);
printf("Value[2]: %d ", data->value.int32_array[2]);
printf("Value[3]: %d\n", data->value.int32_array[3]);
break;
default:
printf("Value type not yet handled: %d.\n", data->value_type);
}
}
void get_property(
vehicle_hw_device_t *device, int32_t property, int32_t type, char *value_string) {
vehicle_prop_value_t *data = (vehicle_prop_value_t *) malloc (sizeof(vehicle_prop_value_t));
// Parse the string according to type.
if (value_string != NULL && strlen(value_string) > 0) {
switch (type) {
case VEHICLE_VALUE_TYPE_INT32:
sscanf(value_string, "%d", &(data->value.int32_value));
break;
case VEHICLE_VALUE_TYPE_INT32_VEC4:
{
int32_t vec[4];
sscanf(value_string, "%d %d %d %d", &vec[0], &vec[1], &vec[2], &vec[3]);
memcpy(data->value.int32_array, vec, sizeof(vec));
break;
}
default:
printf("%s Setting value type not supported: %d\n", __func__, type);
exit(1);
}
}
data->prop = property;
int ret_code = device->get(device, data);
if (ret_code != 0) {
printf("Cannot get property: %d\n", ret_code);
exit(1);
}
// We simply convert the data into the type mentioned by the result of the
// get call.
printf("Get output\n------------\n");
print_property(data);
free(data);
}
void set_property(vehicle_hw_device_t *device,
int32_t property,
int32_t type,
char *data) {
vehicle_prop_value_t vehicle_data;
vehicle_data.prop = property;
vehicle_data.value_type = type;
int32_t zone = 0;
float value = 0.0;
switch (type) {
case VEHICLE_VALUE_TYPE_STRING:
// TODO: Make the code generic to UTF8 characters.
vehicle_data.value.str_value.len = strlen(data);
vehicle_data.value.str_value.data =
(uint8_t *) malloc (strlen(data) * sizeof(uint8_t));
memcpy(vehicle_data.value.str_value.data, data, strlen(data) + 1);
break;
case VEHICLE_VALUE_TYPE_BYTES: {
int len = strlen(data);
int numBytes = (len + 1) / 3;
uint8_t *buf = calloc(numBytes, sizeof(uint8_t));
char *byte = strtok(data, " ");
for (int i = 0; byte != NULL && i < numBytes; i++) {
buf[i] = strtol(data, NULL, 16);
byte = strtok(NULL, " ");
}
vehicle_data.value.bytes_value.len = numBytes;
vehicle_data.value.bytes_value.data = buf;
}
break;
case VEHICLE_VALUE_TYPE_BOOLEAN:
vehicle_data.value.boolean_value = atoi(data);
break;
case VEHICLE_VALUE_TYPE_ZONED_BOOLEAN:
sscanf(data, "%d %d", &vehicle_data.zone,
&vehicle_data.value.boolean_value);
break;
case VEHICLE_VALUE_TYPE_INT64:
vehicle_data.value.int64_value = atoi(data);
break;
case VEHICLE_VALUE_TYPE_FLOAT:
vehicle_data.value.float_value = atof(data);
break;
case VEHICLE_VALUE_TYPE_FLOAT_VEC2:
sscanf(data, "%f %f", &vehicle_data.value.float_array[0],
&vehicle_data.value.float_array[1]);
break;
case VEHICLE_VALUE_TYPE_FLOAT_VEC3:
sscanf(data, "%f %f %f", &vehicle_data.value.float_array[0],
&vehicle_data.value.float_array[1],
&vehicle_data.value.float_array[2]);
break;
case VEHICLE_VALUE_TYPE_FLOAT_VEC4:
sscanf(data, "%f %f %f %f", &vehicle_data.value.float_array[0],
&vehicle_data.value.float_array[1],
&vehicle_data.value.float_array[2],
&vehicle_data.value.float_array[3]);
break;
case VEHICLE_VALUE_TYPE_INT32:
vehicle_data.value.int32_value = atoi(data);
break;
case VEHICLE_VALUE_TYPE_INT32_VEC2:
sscanf(data, "%d %d", &vehicle_data.value.int32_array[0],
&vehicle_data.value.int32_array[1]);
break;
case VEHICLE_VALUE_TYPE_INT32_VEC3:
sscanf(data, "%d %d %d", &vehicle_data.value.int32_array[0],
&vehicle_data.value.int32_array[1],
&vehicle_data.value.int32_array[2]);
break;
case VEHICLE_VALUE_TYPE_INT32_VEC4:
sscanf(data, "%d %d %d %d", &vehicle_data.value.int32_array[0],
&vehicle_data.value.int32_array[1],
&vehicle_data.value.int32_array[2],
&vehicle_data.value.int32_array[3]);
break;
case VEHICLE_VALUE_TYPE_ZONED_FLOAT:
sscanf(data, "%d %f", &zone, &value);
vehicle_data.zone = zone;
vehicle_data.value.float_value = value;
break;
case VEHICLE_VALUE_TYPE_ZONED_FLOAT_VEC2:
sscanf(data, "%d %f %f", &vehicle_data.zone,
&vehicle_data.value.float_array[0],
&vehicle_data.value.float_array[1]);
break;
case VEHICLE_VALUE_TYPE_ZONED_FLOAT_VEC3:
sscanf(data, "%d %f %f %f", &vehicle_data.zone,
&vehicle_data.value.float_array[0],
&vehicle_data.value.float_array[1],
&vehicle_data.value.float_array[2]);
break;
case VEHICLE_VALUE_TYPE_ZONED_FLOAT_VEC4:
sscanf(data, "%d %f %f %f %f", &vehicle_data.zone,
&vehicle_data.value.float_array[0],
&vehicle_data.value.float_array[1],
&vehicle_data.value.float_array[2],
&vehicle_data.value.float_array[3]);
break;
case VEHICLE_VALUE_TYPE_ZONED_INT32:
sscanf(data, "%d %d", &vehicle_data.zone,
&vehicle_data.value.int32_value);
break;
case VEHICLE_VALUE_TYPE_ZONED_INT32_VEC2:
sscanf(data, "%d %d %d", &vehicle_data.zone,
&vehicle_data.value.int32_array[0],
&vehicle_data.value.int32_array[1]);
break;
case VEHICLE_VALUE_TYPE_ZONED_INT32_VEC3:
sscanf(data, "%d %d %d %d", &vehicle_data.zone,
&vehicle_data.value.int32_array[0],
&vehicle_data.value.int32_array[1],
&vehicle_data.value.int32_array[2]);
break;
case VEHICLE_VALUE_TYPE_ZONED_INT32_VEC4:
sscanf(data, "%d %d %d %d %d", &vehicle_data.zone,
&vehicle_data.value.int32_array[0],
&vehicle_data.value.int32_array[1],
&vehicle_data.value.int32_array[2],
&vehicle_data.value.int32_array[3]);
break;
default:
printf("set_property: Value type not yet handled: %d\n", type);
exit(1);
}
printf("Setting Property id: %d\n", vehicle_data.prop);
print_property(&vehicle_data);
int ret_code = device->set(device, &vehicle_data);
if (ret_code != 0) {
printf("Cannot set property: %d\n", ret_code);
exit(1);
}
}
int vehicle_event_callback(const vehicle_prop_value_t *event_data) {
// Print what we got.
printf("Got some value from callback property: %d\n", event_data->prop);
printf("Timestamp: %" PRId64 "\n", event_data->timestamp);
print_property(event_data);
return 0;
}
int vehicle_error_callback(int32_t error_code, int32_t property, int32_t operation) {
// Print what we got.
printf("Error code obtained: %d\n", error_code);
return 0;
}
void subscribe_to_property(
vehicle_hw_device_t *device,
int32_t prop,
float sample_rate,
uint32_t wait_in_seconds) {
// Init the device with a callback.
int ret_code = device->subscribe(device, prop, 0, 0);
if (ret_code != 0) {
printf("Could not subscribe: %d\n", ret_code);
exit(1);
}
// Callbacks will happen on one of the threads created by the HAL hence we
// can simply sleep here and see the output.
sleep(wait_in_seconds);
// Unsubscribe and uninit.
ret_code = device->unsubscribe(device, prop);
if (ret_code != 0) {
printf("Error unsubscribing the HAL, still continuining to uninit HAL ...");
}
}
int main(int argc, char* argv[]) {
// Open the vehicle module and just ask for the list of properties.
const hw_module_t *hw_module = NULL;
int ret_code = hw_get_module(VEHICLE_HARDWARE_MODULE_ID, &hw_module);
if (ret_code != 0) {
printf("Cannot open the hw module. Does the HAL exist? %d\n", ret_code);
return -1;
}
vehicle_module_t *vehicle_module = (vehicle_module_t *)(hw_module);
hw_device_t *device = NULL;
ret_code = vehicle_module->common.methods->open(hw_module, NULL, &device);
if (!device) {
printf("Cannot open the hw device: %d\n", ret_code);
return -1;
}
vehicle_hw_device_t *vehicle_device = (vehicle_hw_device_t *) (device);
printf("HAL Loaded!\n");
vehicle_device->init(vehicle_device, vehicle_event_callback, vehicle_error_callback);
// If this is a list properties command - we check for -l command.
int list_properties = 0;
// Type of the property (see #defines in vehicle.h).
int property = -1;
// Type of the value of the property (see enum vehicle_value_type).
int type = -1;
// Whether the mode is "get" or "set".
char mode[100] = "";
// Actual value as a string representation (supports only PODs for now).
// TODO: Support structures and complex types in the tool.
char value[100] = "";
// Wait time for the subscribe type of calls.
// We keep a default in case the user does not specify one.
int wait_time_in_sec = 10;
// Sample rate for subscribe type of calls.
// Default value is 0 for onchange type of properties.
int sample_rate = 0;
// Int array string which represents the vehicle_value_t in array of
// numbers. See vehicle_prop_value_t.value.int32_array.
char int_array_string[1000]; int_array_string[0] = '\0';
int opt;
while ((opt = getopt(argc, argv, "lm:p:t:v:w:s:")) != -1) {
switch (opt) {
case 'l':
list_properties = 1;
break;
case 'm':
strcpy(mode, optarg);
break;
case 'p':
property = atoi(optarg);
break;
case 't':
type = atoi(optarg);
break;
case 'v':
strcpy(value, optarg);
break;
case 'w':
wait_time_in_sec = atoi(optarg);
break;
case 's':
sample_rate = atoi(optarg);
break;
}
}
// We should have atleast one of list properties or mode (for get or set).
if (!list_properties &&
!(!strcmp(mode, "get") || !strcmp(mode, "set") || !strcmp(mode, "sub"))) {
usage();
exit(1);
}
if (list_properties) {
printf("Listing properties...\n");
list_all_properties(vehicle_device);
} else if (!strcmp(mode, "get")) {
printf("Getting property ...\n");
if (property == -1) {
printf("Use -p to pass a valid Property.\n");
usage();
exit(1);
}
int32_t int_array_list[4];
int count = -1;
if (strlen(int_array_string) > 0) {
count = sscanf(int_array_string, "%d%d%d%d",
&int_array_list[0], &int_array_list[1], &int_array_list[2], &int_array_list[3]);
}
get_property(vehicle_device, property, type, value);
} else if (!strcmp(mode, "set")) {
printf("Setting property ...\n");
if (property == -1 || type == -1) {
printf("Use -p to pass a valid Property and -t to pass a valid Type.\n");
usage();
exit(1);
}
set_property(vehicle_device, property, type, value);
} else if (!strcmp(mode, "sub")) {
printf("Subscribing property ...\n");
if (property == -1 || wait_time_in_sec <= 0) {
printf("Use -p to pass a valid property and -w to pass a valid wait time(s)\n");
usage();
exit(1);
}
subscribe_to_property(vehicle_device, property, sample_rate, wait_time_in_sec);
}
ret_code = vehicle_device->release(vehicle_device);
if (ret_code != 0) {
printf("Error uniniting HAL, exiting anyways.");
}
return 0;
}

View file

@ -1,97 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __ANDROID_HAL_VEHICLE_TEST_
#define __ANDROID_HAL_VEHICLE_TEST_
#include <gtest/gtest.h>
#include <hardware/hardware.h>
#include <hardware/vehicle.h>
namespace tests {
static const uint64_t kVersion = HARDWARE_DEVICE_API_VERSION_2(1, 0, 1);
class VehicleModule : public testing::Test {
public:
VehicleModule() :
vehicle_module_(NULL) {}
~VehicleModule() {}
protected:
virtual void SetUp() {
const hw_module_t *hw_module = NULL;
ASSERT_EQ(0, hw_get_module(VEHICLE_HARDWARE_MODULE_ID, &hw_module))
<< "Can't get vehicle module";
ASSERT_TRUE(NULL != hw_module)
<< "hw_get_module didn't return a valid hardware module";
vehicle_module_ = reinterpret_cast<const vehicle_module_t*>(hw_module);
}
const vehicle_module_t* vehicle_module() { return vehicle_module_; }
private:
const vehicle_module_t* vehicle_module_;
};
int VehicleEventCallback(const vehicle_prop_value_t* event_data) {
// Print what we got.
std::cout << "got some value from callback: "
<< event_data->prop
<< " uint32 value: "
<< event_data->value.int32_value << "\n";
return 0;
}
int VehicleErrorCallback(int32_t /*error_code*/, int32_t /*property*/, int32_t /*operation*/) {
// Do nothing.
return 0;
}
class VehicleDevice : public VehicleModule {
public:
VehicleDevice() :
vehicle_device_(NULL) {}
~VehicleDevice() {}
protected:
virtual void SetUp() {
VehicleModule::SetUp();
hw_device_t *device = NULL;
ASSERT_TRUE(NULL != vehicle_module()->common.methods->open)
<< "Vehicle open() is unimplemented";
ASSERT_EQ(0, vehicle_module()->common.methods->open(
(const hw_module_t*)vehicle_module(), NULL, &device))
<< "Can't open vehicle device";
ASSERT_TRUE(NULL != device)
<< "Vehicle open() returned a NULL device";
ASSERT_EQ(kVersion, device->version)
<< "Unsupported version";
vehicle_device_ = reinterpret_cast<vehicle_hw_device_t*>(device);
}
vehicle_hw_device_t* vehicle_device() { return vehicle_device_; }
vehicle_event_callback_fn callback_fn() {
return VehicleEventCallback;
}
vehicle_error_callback_fn error_fn() {
return VehicleErrorCallback;
}
private:
vehicle_hw_device_t* vehicle_device_;
};
} // namespace tests
#endif // __ANDROID_HAL_VEHICLE_TEST_

View file

@ -1,129 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "vehicle_test_fixtures.h"
#include "hardware/vehicle.h"
namespace tests {
// Check if list_properties command exists.
TEST_F(VehicleDevice, isThereListProperties) {
ASSERT_TRUE(NULL != vehicle_device()->list_properties)
<< "list_properties() function is not implemented";
std::cout << "Test succeeds.\n";
}
// HAL should provide atleast one property. The output of this command should be
// used to verify the vailidity of the function.
TEST_F(VehicleDevice, listPropertiesMoreThanOne) {
vehicle_prop_config_t const* config;
int num_configs = -1;
config = vehicle_device()->list_properties(vehicle_device(), &num_configs);
ASSERT_TRUE(num_configs > -1) << "list_properties() call failed.";
ASSERT_TRUE(num_configs > 0) << "list_properties() returned zero items.";
std::cout << "Number of properties reported: " << num_configs << "\n";
for (int i = 0; i < num_configs; i++) {
// Print each of the properties.
const vehicle_prop_config_t& config_temp = config[i];
std::cout << "Property ID: " << config_temp.prop << "\n";
std::cout << "Property flags: " << config_temp.config_flags << "\n";
std::cout << "Property change mode: " << config_temp.change_mode << "\n";
std::cout << "Property min sample rate: " << config_temp.min_sample_rate << "\n";
std::cout << "Property max sample rate: " << config_temp.max_sample_rate << "\n\n";
}
}
// Test get() command.
// The fields are hardcoded in the dummy implementation and here.
TEST_F(VehicleDevice, getDriveState) {
vehicle_prop_value_t data;
data.prop = VEHICLE_PROPERTY_DRIVING_STATUS;
// Set drive_state field to EINVAL so that we can check that its valid when
// it comes back.
data.value_type = -EINVAL;
data.value.driving_status = -EINVAL;
vehicle_device()->get(vehicle_device(), &data);
// Check that retured values are not invalid.
ASSERT_NE(data.value_type, -EINVAL) << "Drive state value type should be integer.";
ASSERT_NE(data.value.driving_status, -EINVAL) << "Driving status should be positive.";
std::cout << "Driving status value type: " << data.value_type << "\n"
<< "Driving status: " << data.value.driving_status << "\n";
}
// Test the workflows for subscribe and init/release.
// Subscribe will return error before init() is called or after release() is
// called.
TEST_F(VehicleDevice, initTest) {
// Test that init on a new device works. When getting an instance, we are
// already calling 'open' on the device.
int ret_code =
vehicle_device()->init(vehicle_device(), callback_fn(), error_fn());
ASSERT_EQ(ret_code, 0) << "ret code: " << ret_code;
// Trying to init again should return an error.
ret_code = vehicle_device()->init(vehicle_device(), callback_fn(), error_fn());
ASSERT_EQ(ret_code, -EEXIST) << "ret code: " << ret_code;
// Uninit should always return 0.
ret_code = vehicle_device()->release(vehicle_device());
ASSERT_EQ(ret_code, 0) << "ret code: " << ret_code;
// We should be able to init again.
ret_code = vehicle_device()->init(vehicle_device(), callback_fn(), error_fn());
ASSERT_EQ(ret_code, 0) << "ret code: " << ret_code;
// Finally release.
ret_code = vehicle_device()->release(vehicle_device());
ASSERT_EQ(ret_code, 0) << "ret_code: " << ret_code;
}
// Test that subscribe works.
// We wait for 10 seconds while which the vehicle.c can post messages from
// within it's own thread.
TEST_F(VehicleDevice, subscribeTest) {
// If the device is not init subscribe should fail off the bat.
int ret_code = vehicle_device()->subscribe(vehicle_device(), VEHICLE_PROPERTY_DRIVING_STATUS,
0, 0);
ASSERT_EQ(ret_code, -EINVAL) << "Return code is: " << ret_code;
// Let's init the device.
ret_code = vehicle_device()->init(vehicle_device(), callback_fn(), error_fn());
ASSERT_EQ(ret_code, 0) << "Return code is: " << ret_code;
// Subscribe should now go through.
ret_code = vehicle_device()->subscribe(vehicle_device(), VEHICLE_PROPERTY_DRIVING_STATUS, 0, 0);
ASSERT_EQ(ret_code, 0) << "Return code is: " << ret_code;
// We should start getting some messages thrown from the callback. Let's
// wait for 20 seconds before unsubscribing.
std::cout << "Sleeping for 20 seconds.";
sleep(20);
std::cout << "Waking from sleep.";
// This property does not exist, so we should get -EINVAL.
ret_code = vehicle_device()->unsubscribe(vehicle_device(), VEHICLE_PROPERTY_INFO_VIN);
ASSERT_EQ(ret_code, -EINVAL) << "Return code is: " << ret_code;
// This property exists, so we should get a success return code - also this
// will be a blocking call.
ret_code = vehicle_device()->unsubscribe(vehicle_device(), VEHICLE_PROPERTY_DRIVING_STATUS);
ASSERT_EQ(ret_code, 0) << "Return code is: " << ret_code;
}
} // namespace tests