Add TENSOR_QUANT8_ASYMM_SIGNED support to ADD, MUL and SUB.

am: 53c79d179a

Change-Id: Ic86f1abb032a65f07b0fe1e64377ae0bc631074a
This commit is contained in:
Lev Proleev 2019-11-28 08:11:27 -08:00 committed by android-build-merger
commit 761a0807cd
2 changed files with 12 additions and 5 deletions

View file

@ -597,7 +597,7 @@ adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardwar
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
554baa3b317e077b850afcbaac99daeef56861b1786540e56275a4fcad1f43e3 android.hardware.neuralnetworks@1.3::types
103cb87c5ed46851badac097f8d190da60f39b5ab32d60e2e93f64d3014ea75c android.hardware.neuralnetworks@1.3::types
274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication

View file

@ -109,6 +109,7 @@ enum OperationType : int32_t {
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4
*
@ -116,7 +117,8 @@ enum OperationType : int32_t {
* * 0: A tensor.
* * 1: A tensor of the same {@link OperandType}, and compatible dimensions
* as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scales and zeroPoint can be different from input0 scale and zeroPoint.
* * 2: An {@link OperandType::INT32} scalar, and has to be one of the
* {@link FusedActivationFunc} values. Specifies the activation to
@ -124,7 +126,8 @@ enum OperationType : int32_t {
*
* Outputs:
* * 0: The sum, a tensor of the same {@link OperandType} as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
* {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
ADD = @1.2::OperationType:ADD,
@ -1345,6 +1348,7 @@ enum OperationType : int32_t {
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4
*
@ -1358,7 +1362,8 @@ enum OperationType : int32_t {
*
* Outputs:
* * 0: The product, a tensor of the same {@link OperandType} as input0.
* For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
* and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
* the following condition must be satisfied:
* output_scale > input1_scale * input2_scale.
*/
@ -2083,6 +2088,7 @@ enum OperationType : int32_t {
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4
*
@ -2096,7 +2102,8 @@ enum OperationType : int32_t {
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
SUB = @1.2::OperationType:SUB,