Non ABI changes to neuralnetworks HAL
NNAPI HAL comment strings were reformated due to template changes for new versioning scheme. Test: Image built Bug: 177298018 Change-Id: I67b3a38c087edf6ba3c295f5ac43ca02b8359f6a
This commit is contained in:
parent
bd8ae92147
commit
87ff8d20ae
4 changed files with 24 additions and 14 deletions
|
@ -779,8 +779,9 @@ bda492ec4021d13869de72bd6f8c15c5837b78d6136b8d538efec5320573a5ec android.hardwar
|
||||||
6017b4f2481feb0fffceae81c62bc372c898998b2d8fe69fbd39859d3a315e5e android.hardware.keymaster@4.0::IKeymasterDevice
|
6017b4f2481feb0fffceae81c62bc372c898998b2d8fe69fbd39859d3a315e5e android.hardware.keymaster@4.0::IKeymasterDevice
|
||||||
dabe23dde7c9e3ad65c61def7392f186d7efe7f4216f9b6f9cf0863745b1a9f4 android.hardware.keymaster@4.1::IKeymasterDevice
|
dabe23dde7c9e3ad65c61def7392f186d7efe7f4216f9b6f9cf0863745b1a9f4 android.hardware.keymaster@4.1::IKeymasterDevice
|
||||||
cd84ab19c590e0e73dd2307b591a3093ee18147ef95e6d5418644463a6620076 android.hardware.neuralnetworks@1.2::IDevice
|
cd84ab19c590e0e73dd2307b591a3093ee18147ef95e6d5418644463a6620076 android.hardware.neuralnetworks@1.2::IDevice
|
||||||
9625e85f56515ad2cf87b6a1847906db669f746ea4ab02cd3d4ca25abc9b0109 android.hardware.neuralnetworks@1.2::types
|
f729ee6a5f136b25d79ea6895d24700fce413df555baaecf2c39e4440d15d043 android.hardware.neuralnetworks@1.0::types
|
||||||
9e758e208d14f7256e0885d6d8ad0b61121b21d8c313864f981727ae55bffd16 android.hardware.neuralnetworks@1.3::types
|
c6ae443608502339aec4256feef48e7b2d36f7477ca5361cc95cd27a8ed9c612 android.hardware.neuralnetworks@1.2::types
|
||||||
|
9fe5a4093043c2b5da4e9491aed1646c388a5d3059b8fd77d5b6a9807e6d3a3e android.hardware.neuralnetworks@1.3::types
|
||||||
e8c86c69c438da8d1549856c1bb3e2d1b8da52722f8235ff49a30f2cce91742c android.hardware.soundtrigger@2.1::ISoundTriggerHwCallback
|
e8c86c69c438da8d1549856c1bb3e2d1b8da52722f8235ff49a30f2cce91742c android.hardware.soundtrigger@2.1::ISoundTriggerHwCallback
|
||||||
b9fbb6e2e061ed0960939d48b785e9700210add1f13ed32ecd688d0f1ca20ef7 android.hardware.renderscript@1.0::types
|
b9fbb6e2e061ed0960939d48b785e9700210add1f13ed32ecd688d0f1ca20ef7 android.hardware.renderscript@1.0::types
|
||||||
0f53d70e1eadf8d987766db4bf6ae2048004682168f4cab118da576787def3fa android.hardware.radio@1.0::types
|
0f53d70e1eadf8d987766db4bf6ae2048004682168f4cab118da576787def3fa android.hardware.radio@1.0::types
|
||||||
|
|
|
@ -308,8 +308,9 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output 4-D tensor, of shape
|
* * 0: The output 4-D tensor, of shape
|
||||||
* [batches, out_height, out_width, depth_out].
|
* [batches, out_height, out_width, depth_out].
|
||||||
* For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
* For output tensor of
|
||||||
* the following condition must be satisfied: output_scale > input_scale * filter_scale
|
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
|
||||||
|
* be satisfied: output_scale > input_scale * filter_scale
|
||||||
*/
|
*/
|
||||||
CONV_2D = 3,
|
CONV_2D = 3,
|
||||||
|
|
||||||
|
|
|
@ -314,7 +314,8 @@ enum OperationType : int32_t {
|
||||||
* tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
|
* tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
|
||||||
* Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
||||||
* the scale and zeroPoint values can be different from
|
* the scale and zeroPoint values can be different from
|
||||||
* input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
|
* input tensors. Before HAL version 1.2 they have to be the same as for the
|
||||||
|
* input tensors.
|
||||||
*/
|
*/
|
||||||
CONCATENATION = @1.1::OperationType:CONCATENATION,
|
CONCATENATION = @1.1::OperationType:CONCATENATION,
|
||||||
|
|
||||||
|
@ -460,8 +461,9 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output 4-D tensor, of shape
|
* * 0: The output 4-D tensor, of shape
|
||||||
* [batches, out_height, out_width, depth_out].
|
* [batches, out_height, out_width, depth_out].
|
||||||
* Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
* Before HAL version 1.2, for output tensor of
|
||||||
* the following condition must be satisfied: output_scale > input_scale * filter_scale
|
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
|
||||||
|
* be satisfied: output_scale > input_scale * filter_scale
|
||||||
*/
|
*/
|
||||||
CONV_2D = @1.1::OperationType:CONV_2D,
|
CONV_2D = @1.1::OperationType:CONV_2D,
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,8 @@ enum OperationType : int32_t {
|
||||||
* tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
|
* tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
|
||||||
* Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
||||||
* the scale and zeroPoint values can be different from
|
* the scale and zeroPoint values can be different from
|
||||||
* input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
|
* input tensors. Before HAL version 1.2 they have to be the same as for the
|
||||||
|
* input tensors.
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint values can be different from input tensors.
|
* the scale and zeroPoint values can be different from input tensors.
|
||||||
*/
|
*/
|
||||||
|
@ -312,7 +313,8 @@ enum OperationType : int32_t {
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
||||||
* * * input.scale * filter.scale).
|
* * * input.scale * filter.scale).
|
||||||
*
|
*
|
||||||
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
|
* * Quantized signed with filter symmetric per channel quantization
|
||||||
|
* (since HAL version 1.3):
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
||||||
|
@ -425,8 +427,9 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output 4-D tensor, of shape
|
* * 0: The output 4-D tensor, of shape
|
||||||
* [batches, out_height, out_width, depth_out].
|
* [batches, out_height, out_width, depth_out].
|
||||||
* Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
* Before HAL version 1.2, for output tensor of
|
||||||
* the following condition must be satisfied: output_scale > input_scale * filter_scale
|
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
|
||||||
|
* be satisfied: output_scale > input_scale * filter_scale
|
||||||
*/
|
*/
|
||||||
CONV_2D = @1.2::OperationType:CONV_2D,
|
CONV_2D = @1.2::OperationType:CONV_2D,
|
||||||
|
|
||||||
|
@ -477,7 +480,8 @@ enum OperationType : int32_t {
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
||||||
* * * input.scale * filter.scale).
|
* * * input.scale * filter.scale).
|
||||||
*
|
*
|
||||||
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
|
* * Quantized signed with filter symmetric per channel quantization
|
||||||
|
* (since HAL version 1.3):
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
||||||
|
@ -3354,7 +3358,8 @@ enum OperationType : int32_t {
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
||||||
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
|
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
|
||||||
*
|
*
|
||||||
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
|
* * Quantized signed with filter symmetric per channel quantization
|
||||||
|
* (since HAL version 1.3):
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
||||||
|
@ -4615,7 +4620,8 @@ enum OperationType : int32_t {
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
||||||
* * * input.scale * filter.scale).
|
* * * input.scale * filter.scale).
|
||||||
*
|
*
|
||||||
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
|
* * Quantized signed with filter symmetric per channel quantization
|
||||||
|
* (since HAL version 1.3):
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
||||||
|
|
Loading…
Reference in a new issue