Add TENSOR_QUANT8_ASYMM_SIGNED support for more ops am: 90fc2cc193
am: 6655e46b2a
Change-Id: I46282bba2d453e715eec3dccea812bc33824cba5
This commit is contained in:
commit
f22e69b499
4 changed files with 101 additions and 29 deletions
10
current.txt
10
current.txt
|
@ -578,7 +578,7 @@ f1109cbb10297b7429a11fab42afa912710b303c9bf20bd5cdb8bd57b9c84186 android.hardwar
|
||||||
9d8ee57c490ffeaa28f702eaea8d198cb510e4bbfb99e6cb5f63e73341057c7c android.hardware.neuralnetworks@1.1::types
|
9d8ee57c490ffeaa28f702eaea8d198cb510e4bbfb99e6cb5f63e73341057c7c android.hardware.neuralnetworks@1.1::types
|
||||||
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
|
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
|
||||||
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
|
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
|
||||||
72de91c3feba4b19c159cd1c413cbea596b78240caa43e31194e20e6f5b05c49 android.hardware.neuralnetworks@1.2::types
|
b40eb9dc491e3b203be2edca330ccd0417e9ca77b1b1b0f4c628a5fd269764a2 android.hardware.neuralnetworks@1.2::types
|
||||||
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
|
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
|
||||||
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
|
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
|
||||||
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
|
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
|
||||||
|
@ -597,7 +597,13 @@ adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardwar
|
||||||
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
|
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
|
||||||
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
|
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
|
||||||
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
|
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
|
||||||
2d16429145dc1158bf3e45c7de86a39e461dec3ec00512c11a7e5249535a2e96 android.hardware.neuralnetworks@1.3::types
|
8900b3a4ef6c0be540821fad0ad8b58b3654b70cfa38df662c640b88ea486d9f android.hardware.neuralnetworks@1.3::types
|
||||||
|
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
|
||||||
|
a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
|
||||||
|
44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
|
||||||
|
619fc9839ec6e369cfa9b28e3e9412e6885720ff8f9b5750c1b6ffb905120391 android.hardware.wifi.supplicant@1.3::ISupplicantStaIfaceCallback
|
||||||
|
c9273429fcf98d797d3bb07fdba6f1be95bf960f9255cde169fd1ca4db85f856 android.hardware.wifi.supplicant@1.3::ISupplicantStaNetwork
|
||||||
|
9b0a3ab6f4f74b971ed094426d8a443e29b512ff03e1ab50c07156396cdb2483 android.hardware.wifi.supplicant@1.3::types
|
||||||
274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
|
274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
|
||||||
c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
|
c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
|
||||||
a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication
|
a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication
|
||||||
|
|
|
@ -3141,8 +3141,8 @@ enum OperationType : int32_t {
|
||||||
* {@link SymmPerChannelQuantParams}) must be set to 0.
|
* {@link SymmPerChannelQuantParams}) must be set to 0.
|
||||||
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
|
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
|
||||||
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
|
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
|
||||||
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
|
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
|
||||||
* type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
* For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
||||||
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
|
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
|
||||||
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
|
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
|
||||||
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
|
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
|
||||||
|
@ -3181,7 +3181,8 @@ enum OperationType : int32_t {
|
||||||
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
|
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
|
||||||
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
|
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
|
||||||
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
|
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
|
||||||
* type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
|
||||||
|
* For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
||||||
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
|
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
|
||||||
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
|
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
|
||||||
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
|
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
|
||||||
|
@ -3668,14 +3669,17 @@ enum OperationType : int32_t {
|
||||||
/**
|
/**
|
||||||
* Quantizes the input tensor.
|
* Quantizes the input tensor.
|
||||||
*
|
*
|
||||||
* The formula is:
|
* The formula for {@link OperandType::TENSOR_QUANT8_ASYMM} output tensor is:
|
||||||
*
|
*
|
||||||
* output = max(0, min(255, round(input / scale) + zeroPoint)
|
* output = max(0, min(255, round(input / scale) + zeroPoint)
|
||||||
*
|
*
|
||||||
* Supported tensor {@link OperandType}:
|
* Supported input tensor {@link OperandType}:
|
||||||
* * {@link OperandType::TENSOR_FLOAT16}
|
* * {@link OperandType::TENSOR_FLOAT16}
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
*
|
*
|
||||||
|
* Supported output tensor {@link OperandType}:
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
*
|
||||||
* Supported tensor rank: from 1
|
* Supported tensor rank: from 1
|
||||||
*
|
*
|
||||||
* Inputs:
|
* Inputs:
|
||||||
|
|
|
@ -563,6 +563,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
||||||
* With the default data layout NHWC, the data is stored in the order of:
|
* With the default data layout NHWC, the data is stored in the order of:
|
||||||
|
@ -583,7 +584,8 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output 4-D tensor, of shape [batch, height*block_size,
|
* * 0: The output 4-D tensor, of shape [batch, height*block_size,
|
||||||
* width*block_size, depth/(block_size*block_size)].
|
* width*block_size, depth/(block_size*block_size)].
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
DEPTH_TO_SPACE = @1.2::OperationType:DEPTH_TO_SPACE,
|
DEPTH_TO_SPACE = @1.2::OperationType:DEPTH_TO_SPACE,
|
||||||
|
@ -1499,6 +1501,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
||||||
* With the default data layout NHWC, the data is stored in the order of:
|
* With the default data layout NHWC, the data is stored in the order of:
|
||||||
|
@ -1541,7 +1544,8 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output 4-D tensor, of shape
|
* * 0: The output 4-D tensor, of shape
|
||||||
* [batches, new_height, new_width, depth].
|
* [batches, new_height, new_width, depth].
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
RESIZE_BILINEAR = @1.2::OperationType:RESIZE_BILINEAR,
|
RESIZE_BILINEAR = @1.2::OperationType:RESIZE_BILINEAR,
|
||||||
|
@ -1660,6 +1664,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
||||||
* With the default data layout NHWC, the data is stored in the order of:
|
* With the default data layout NHWC, the data is stored in the order of:
|
||||||
|
@ -1680,7 +1685,8 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output 4-D tensor, of shape [batches, height/block_size,
|
* * 0: The output 4-D tensor, of shape [batches, height/block_size,
|
||||||
* width/block_size, depth_in*block_size*block_size].
|
* width/block_size, depth_in*block_size*block_size].
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
SPACE_TO_DEPTH = @1.2::OperationType:SPACE_TO_DEPTH,
|
SPACE_TO_DEPTH = @1.2::OperationType:SPACE_TO_DEPTH,
|
||||||
|
@ -1804,6 +1810,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
||||||
* With the default data layout NHWC, the data is stored in the order of:
|
* With the default data layout NHWC, the data is stored in the order of:
|
||||||
|
@ -1822,7 +1829,8 @@ enum OperationType : int32_t {
|
||||||
*
|
*
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: A tensor of the same {@link OperandType} as input0.
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
BATCH_TO_SPACE_ND = @1.2::OperationType:BATCH_TO_SPACE_ND,
|
BATCH_TO_SPACE_ND = @1.2::OperationType:BATCH_TO_SPACE_ND,
|
||||||
|
@ -1915,6 +1923,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
* (full support since HAL version 1.2, see the output section)
|
* (full support since HAL version 1.2, see the output section)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: up to 4
|
* Supported tensor rank: up to 4
|
||||||
|
@ -1937,7 +1946,8 @@ enum OperationType : int32_t {
|
||||||
* of the padding:
|
* of the padding:
|
||||||
* output0.dimension[i] =
|
* output0.dimension[i] =
|
||||||
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
|
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*
|
*
|
||||||
* NOTE: Before HAL version 1.2, the pad value for
|
* NOTE: Before HAL version 1.2, the pad value for
|
||||||
|
@ -1961,6 +1971,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
* (full support since HAL version 1.2, see the output section)
|
* (full support since HAL version 1.2, see the output section)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
||||||
|
@ -1988,7 +1999,8 @@ enum OperationType : int32_t {
|
||||||
*
|
*
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: A tensor of the same {@link OperandType} as input0.
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*
|
*
|
||||||
* NOTE: Before HAL version 1.2, the pad value for
|
* NOTE: Before HAL version 1.2, the pad value for
|
||||||
|
@ -2137,6 +2149,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: up to 4
|
* Supported tensor rank: up to 4
|
||||||
*
|
*
|
||||||
|
@ -2148,7 +2161,8 @@ enum OperationType : int32_t {
|
||||||
*
|
*
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: A tensor of the same {@link OperandType} as input0.
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
TRANSPOSE = @1.2::OperationType:TRANSPOSE,
|
TRANSPOSE = @1.2::OperationType:TRANSPOSE,
|
||||||
|
@ -2694,6 +2708,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16}
|
* * {@link OperandType::TENSOR_FLOAT16}
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: up to 4
|
* Supported tensor rank: up to 4
|
||||||
*
|
*
|
||||||
|
@ -2708,7 +2723,8 @@ enum OperationType : int32_t {
|
||||||
*
|
*
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
|
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
CHANNEL_SHUFFLE = @1.2::OperationType:CHANNEL_SHUFFLE,
|
CHANNEL_SHUFFLE = @1.2::OperationType:CHANNEL_SHUFFLE,
|
||||||
|
@ -3067,12 +3083,23 @@ enum OperationType : int32_t {
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
||||||
* * * input.scale * filter.scale).
|
* * * input.scale * filter.scale).
|
||||||
*
|
*
|
||||||
|
* * Quantized signed (since HAL version 1.3):
|
||||||
|
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
|
||||||
|
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
||||||
|
* * * input.scale * filter.scale).
|
||||||
|
*
|
||||||
* * Quantized with symmetric per channel quantization for the filter:
|
* * Quantized with symmetric per channel quantization for the filter:
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
|
* * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
|
||||||
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||||
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
||||||
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
|
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
|
||||||
*
|
*
|
||||||
|
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
|
||||||
|
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
|
||||||
|
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||||
|
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
||||||
|
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
|
||||||
|
*
|
||||||
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
||||||
* With the default data layout NHWC, the data is stored in the order of:
|
* With the default data layout NHWC, the data is stored in the order of:
|
||||||
* [batch, height, width, channels]. Alternatively, the data layout could
|
* [batch, height, width, channels]. Alternatively, the data layout could
|
||||||
|
@ -3091,8 +3118,9 @@ enum OperationType : int32_t {
|
||||||
* {@link SymmPerChannelQuantParams}) must be set to 0.
|
* {@link SymmPerChannelQuantParams}) must be set to 0.
|
||||||
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
|
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
|
||||||
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
|
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
|
||||||
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
|
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
|
||||||
* type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
* For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
|
||||||
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
|
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
|
||||||
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
|
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
|
||||||
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
|
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
|
||||||
|
@ -3131,7 +3159,9 @@ enum OperationType : int32_t {
|
||||||
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
|
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
|
||||||
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
|
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
|
||||||
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
|
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
|
||||||
* type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
|
||||||
|
* For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
|
||||||
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
|
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
|
||||||
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
|
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
|
||||||
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
|
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
|
||||||
|
@ -3156,7 +3186,8 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output 4-D tensor, of shape
|
* * 0: The output 4-D tensor, of shape
|
||||||
* [batches, out_height, out_width, depth_out].
|
* [batches, out_height, out_width, depth_out].
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
||||||
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
|
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
|
||||||
*/
|
*/
|
||||||
GROUPED_CONV_2D = @1.2::OperationType:GROUPED_CONV_2D,
|
GROUPED_CONV_2D = @1.2::OperationType:GROUPED_CONV_2D,
|
||||||
|
@ -3512,6 +3543,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16}
|
* * {@link OperandType::TENSOR_FLOAT16}
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: up to 4
|
* Supported tensor rank: up to 4
|
||||||
*
|
*
|
||||||
|
@ -3529,7 +3561,8 @@ enum OperationType : int32_t {
|
||||||
* pad value must be of {@link OperandType::FLOAT16}.
|
* pad value must be of {@link OperandType::FLOAT16}.
|
||||||
* For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
|
* For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
|
||||||
* pad value must be of {@link OperandType::FLOAT32}.
|
* pad value must be of {@link OperandType::FLOAT32}.
|
||||||
* For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
* For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
|
||||||
* the pad value must be of {@link OperandType::INT32}. The
|
* the pad value must be of {@link OperandType::INT32}. The
|
||||||
* scale and zeroPoint are assumed to be the same as in input0.
|
* scale and zeroPoint are assumed to be the same as in input0.
|
||||||
*
|
*
|
||||||
|
@ -3541,7 +3574,8 @@ enum OperationType : int32_t {
|
||||||
* of the padding:
|
* of the padding:
|
||||||
* output0.dimension[i] =
|
* output0.dimension[i] =
|
||||||
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
|
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
PAD_V2 = @1.2::OperationType:PAD_V2,
|
PAD_V2 = @1.2::OperationType:PAD_V2,
|
||||||
|
@ -3618,14 +3652,23 @@ enum OperationType : int32_t {
|
||||||
/**
|
/**
|
||||||
* Quantizes the input tensor.
|
* Quantizes the input tensor.
|
||||||
*
|
*
|
||||||
* The formula is:
|
* The formula for {@link OperandType::TENSOR_QUANT8_ASYMM} output tensor is:
|
||||||
*
|
*
|
||||||
* output = max(0, min(255, round(input / scale) + zeroPoint)
|
* output = max(0, min(255, round(input / scale) + zeroPoint)
|
||||||
*
|
*
|
||||||
* Supported tensor {@link OperandType}:
|
* The formula for {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} output
|
||||||
|
* tensor is:
|
||||||
|
*
|
||||||
|
* output = max(-128, min(127, round(input / scale) + zeroPoint)
|
||||||
|
*
|
||||||
|
* Supported input tensor {@link OperandType}:
|
||||||
* * {@link OperandType::TENSOR_FLOAT16}
|
* * {@link OperandType::TENSOR_FLOAT16}
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
*
|
*
|
||||||
|
* Supported output tensor {@link OperandType}:
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
|
*
|
||||||
* Supported tensor rank: from 1
|
* Supported tensor rank: from 1
|
||||||
*
|
*
|
||||||
* Inputs:
|
* Inputs:
|
||||||
|
@ -3633,7 +3676,8 @@ enum OperationType : int32_t {
|
||||||
*
|
*
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output tensor of same shape as input0, but with
|
* * 0: The output tensor of same shape as input0, but with
|
||||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}.
|
* {@link OperandType::TENSOR_QUANT8_ASYMM} or.
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}.
|
||||||
*/
|
*/
|
||||||
QUANTIZE = @1.2::OperationType:QUANTIZE,
|
QUANTIZE = @1.2::OperationType:QUANTIZE,
|
||||||
|
|
||||||
|
@ -4140,6 +4184,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_INT32}
|
* * {@link OperandType::TENSOR_INT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: from 1
|
* Supported tensor rank: from 1
|
||||||
*
|
*
|
||||||
|
@ -4152,7 +4197,8 @@ enum OperationType : int32_t {
|
||||||
*
|
*
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0 ~ (num_splits - 1): Resulting subtensors.
|
* * 0 ~ (num_splits - 1): Resulting subtensors.
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
SPLIT = @1.2::OperationType:SPLIT,
|
SPLIT = @1.2::OperationType:SPLIT,
|
||||||
|
@ -4188,6 +4234,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_INT32}
|
* * {@link OperandType::TENSOR_INT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: from 1
|
* Supported tensor rank: from 1
|
||||||
*
|
*
|
||||||
|
@ -4198,7 +4245,8 @@ enum OperationType : int32_t {
|
||||||
*
|
*
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
|
* * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
TILE = @1.2::OperationType:TILE,
|
TILE = @1.2::OperationType:TILE,
|
||||||
|
@ -4214,6 +4262,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_INT32}
|
* * {@link OperandType::TENSOR_INT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: from 1
|
* Supported tensor rank: from 1
|
||||||
*
|
*
|
||||||
|
@ -4225,7 +4274,8 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: An n-D tensor of the same type as the input, containing the k
|
* * 0: An n-D tensor of the same type as the input, containing the k
|
||||||
* largest elements along each last dimensional slice.
|
* largest elements along each last dimensional slice.
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
* * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
|
* * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
|
||||||
* containing the indices of values within the last dimension of input.
|
* containing the indices of values within the last dimension of input.
|
||||||
|
@ -4521,6 +4571,7 @@ enum OperationType : int32_t {
|
||||||
* * {@link OperandType::TENSOR_FLOAT16}
|
* * {@link OperandType::TENSOR_FLOAT16}
|
||||||
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_FLOAT32}
|
||||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||||
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||||
*
|
*
|
||||||
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
||||||
* With the default data layout NHWC, the data is stored in the order of:
|
* With the default data layout NHWC, the data is stored in the order of:
|
||||||
|
@ -4560,7 +4611,8 @@ enum OperationType : int32_t {
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* * 0: The output 4-D tensor, of shape
|
* * 0: The output 4-D tensor, of shape
|
||||||
* [batches, new_height, new_width, depth].
|
* [batches, new_height, new_width, depth].
|
||||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||||
|
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||||
* the scale and zeroPoint must be the same as input0.
|
* the scale and zeroPoint must be the same as input0.
|
||||||
*/
|
*/
|
||||||
RESIZE_NEAREST_NEIGHBOR = @1.2::OperationType:RESIZE_NEAREST_NEIGHBOR,
|
RESIZE_NEAREST_NEIGHBOR = @1.2::OperationType:RESIZE_NEAREST_NEIGHBOR,
|
||||||
|
|
|
@ -344,7 +344,17 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case OperationType::QUANTIZE:
|
case OperationType::QUANTIZE: {
|
||||||
|
if (operand == operation.inputs[0] &&
|
||||||
|
(type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (operand == operation.outputs[0] &&
|
||||||
|
(type == OperandType::TENSOR_QUANT8_ASYMM ||
|
||||||
|
type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} break;
|
||||||
case OperationType::RANDOM_MULTINOMIAL: {
|
case OperationType::RANDOM_MULTINOMIAL: {
|
||||||
if (operand == operation.inputs[0] &&
|
if (operand == operation.inputs[0] &&
|
||||||
(type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
|
(type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
|
||||||
|
|
Loading…
Reference in a new issue