Merge "Fix HAL and AIDL files to be consistent with API generation tool." am: 650dc957ee am: df56bdcee9

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1818131

Change-Id: I151f1e600fb39edb651bd0df9f699c3631c6bf90
This commit is contained in:
Ian Hua 2021-09-09 13:17:19 +00:00 committed by Automerger Merge Worker
commit 159bc14bad
5 changed files with 149 additions and 44 deletions

View file

@ -901,5 +901,7 @@ c8a57364f6ad20842be14f4db284df5304f7521ca8eac6bcc1fa6c5b466fb8a6 android.hardwar
# ABI preserving changes to HALs during Android T # ABI preserving changes to HALs during Android T
62ace52d9c3ff1f60f94118557a2aaf0b953513e59dcd34d5f94ae28d4c7e780 android.hardware.fastboot@1.0::IFastboot 62ace52d9c3ff1f60f94118557a2aaf0b953513e59dcd34d5f94ae28d4c7e780 android.hardware.fastboot@1.0::IFastboot
ca62a2a95d173ed323309e5e00f653ad3cceec82a6e5e4976a249cb5aafe2515 android.hardware.neuralnetworks@1.2::types
fa76bced6b1b71c40fc706c508a9011284c57f57831cd0cf5f45653ed4ea463e android.hardware.neuralnetworks@1.3::types
# There should be no more HIDL HALs - please use AIDL instead. # There should be no more HIDL HALs - please use AIDL instead.

View file

@ -80,7 +80,7 @@ enum OperandType : @1.0::OperandType {
* - scales: an array of positive 32 bit floating point values. * - scales: an array of positive 32 bit floating point values.
* The size of the scales array must be equal to dimensions[channelDim]. * The size of the scales array must be equal to dimensions[channelDim].
* *
*{@link SymmPerChannelQuantParams} must hold the parameters for an Operand of this type. * {@link SymmPerChannelQuantParams} must hold the parameters for an Operand of this type.
* The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0). * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0).
* *
* The formula is: * The formula is:

View file

@ -41,7 +41,6 @@ enum OperandType : @1.2::OperandType {
* real_value = (integer_value - zeroPoint) * scale. * real_value = (integer_value - zeroPoint) * scale.
*/ */
TENSOR_QUANT8_ASYMM_SIGNED = 14, TENSOR_QUANT8_ASYMM_SIGNED = 14,
/** /**
* A reference to a subgraph. * A reference to a subgraph.
* *
@ -5230,7 +5229,7 @@ enum OperationType : int32_t {
* The output is calculated using the following formula: * The output is calculated using the following formula:
* *
* h-swish(x) = x * max(0, min(6, (x + 3))) / 6 * h-swish(x) = x * max(0, min(6, (x + 3))) / 6
*
* Supported tensor {@link OperandType}: * Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} * * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32} * * {@link OperandType::TENSOR_FLOAT32}

View file

@ -24,35 +24,30 @@ package android.hardware.neuralnetworks;
* Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors * Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors
* with at least one dimension). Types not prefaced by TENSOR_* represent * with at least one dimension). Types not prefaced by TENSOR_* represent
* scalar values and must have no dimensions. * scalar values and must have no dimensions.
*
* Although we define many types, most operators accept just a few
* types. Most used are {@link OperandType::TENSOR_FLOAT32},
* {@link OperandType::TENSOR_QUANT8_ASYMM},
* and {@link OperandType::INT32}.
*/ */
@VintfStability @VintfStability
@Backing(type="int") @Backing(type="int")
enum OperandType { enum OperandType {
/** /** A 32 bit floating point scalar value. */
* A 32 bit floating point scalar value.
*/
FLOAT32 = 0, FLOAT32 = 0,
/** /** A signed 32 bit integer scalar value. */
* A signed 32 bit integer scalar value.
*/
INT32 = 1, INT32 = 1,
/** /** An unsigned 32 bit integer scalar value. */
* An unsigned 32 bit integer scalar value.
*/
UINT32 = 2, UINT32 = 2,
/** /** A tensor of 32 bit floating point values. */
* A tensor of 32 bit floating point values.
*/
TENSOR_FLOAT32 = 3, TENSOR_FLOAT32 = 3,
/** /** A tensor of 32 bit integer values. */
* A tensor of 32 bit integer values.
*/
TENSOR_INT32 = 4, TENSOR_INT32 = 4,
/** /**
* A tensor of 8 bit unsigned integers that represent real numbers. * A tensor of 8 bit unsigned integers that represent real numbers.
* *
* Attached to this tensor are two numbers that can be used to convert the 8 bit integer to the * Attached to this tensor are two numbers that can be used to convert the
* real value and vice versa. These two numbers are: * 8 bit integer to the real value and vice versa. These two numbers are:
* - scale: a 32 bit floating point value greater than zero. * - scale: a 32 bit floating point value greater than zero.
* - zeroPoint: a 32 bit integer, in range [0, 255]. * - zeroPoint: a 32 bit integer, in range [0, 255].
* *
@ -63,15 +58,15 @@ enum OperandType {
/** /**
* An 8 bit boolean scalar value. * An 8 bit boolean scalar value.
* *
* Values of this operand type are either true or false. A zero value represents false; any * Values of this operand type are either true or false. A zero value
* other value represents true. * represents false; any other value represents true.
*/ */
BOOL = 6, BOOL = 6,
/** /**
* A tensor of 16 bit signed integers that represent real numbers. * A tensor of 16 bit signed integers that represent real numbers.
* *
* Attached to this tensor is a number representing real value scale that is used to convert the * Attached to this tensor is a number representing real value scale that is
* 16 bit number to a real value in the following way: * used to convert the 16 bit number to a real value in the following way:
* realValue = integerValue * scale. * realValue = integerValue * scale.
* *
* scale is a 32 bit floating point with value greater than zero. * scale is a 32 bit floating point with value greater than zero.
@ -84,8 +79,8 @@ enum OperandType {
/** /**
* A tensor of 8 bit boolean values. * A tensor of 8 bit boolean values.
* *
* Values of this operand type are either true or false. A zero value represents false; any * Values of this operand type are either true or false. A zero value
* other value represents true. * represents false; any other value represents true.
*/ */
TENSOR_BOOL8 = 9, TENSOR_BOOL8 = 9,
/** /**
@ -95,8 +90,9 @@ enum OperandType {
/** /**
* A tensor of 8 bit signed integers that represent real numbers. * A tensor of 8 bit signed integers that represent real numbers.
* *
* This tensor is associated with additional fields that can be used to convert the 8 bit signed * This tensor is associated with additional fields that can
* integer to the real value and vice versa. These fields are: * be used to convert the 8 bit signed integer to the real value and vice versa.
* These fields are:
* - channelDim: a 32 bit unsigned integer indicating channel dimension. * - channelDim: a 32 bit unsigned integer indicating channel dimension.
* - scales: an array of positive 32 bit floating point values. * - scales: an array of positive 32 bit floating point values.
* The size of the scales array must be equal to dimensions[channelDim]. * The size of the scales array must be equal to dimensions[channelDim].
@ -113,8 +109,8 @@ enum OperandType {
/** /**
* A tensor of 16 bit unsigned integers that represent real numbers. * A tensor of 16 bit unsigned integers that represent real numbers.
* *
* Attached to this tensor are two numbers that can be used to convert the 16 bit integer to the * Attached to this tensor are two numbers that can be used to convert the
* real value and vice versa. These two numbers are: * 16 bit integer to the real value and vice versa. These two numbers are:
* - scale: a 32 bit floating point value greater than zero. * - scale: a 32 bit floating point value greater than zero.
* - zeroPoint: a 32 bit integer, in range [0, 65535]. * - zeroPoint: a 32 bit integer, in range [0, 65535].
* *
@ -125,8 +121,8 @@ enum OperandType {
/** /**
* A tensor of 8 bit signed integers that represent real numbers. * A tensor of 8 bit signed integers that represent real numbers.
* *
* Attached to this tensor is a number representing real value scale that is used to convert the * Attached to this tensor is a number representing real value scale that is
* 8 bit number to a real value in the following way: * used to convert the 8 bit number to a real value in the following way:
* realValue = integerValue * scale. * realValue = integerValue * scale.
* *
* scale is a 32 bit floating point with value greater than zero. * scale is a 32 bit floating point with value greater than zero.
@ -135,8 +131,8 @@ enum OperandType {
/** /**
* A tensor of 8 bit signed integers that represent real numbers. * A tensor of 8 bit signed integers that represent real numbers.
* *
* Attached to this tensor are two numbers that can be used to convert the 8 bit integer to the * Attached to this tensor are two numbers that can be used to convert the
* real value and vice versa. These two numbers are: * 8 bit integer to the real value and vice versa. These two numbers are:
* - scale: a 32 bit floating point value greater than zero. * - scale: a 32 bit floating point value greater than zero.
* - zeroPoint: a 32 bit integer, in range [-128, 127]. * - zeroPoint: a 32 bit integer, in range [-128, 127].
* *

View file

@ -78,6 +78,7 @@ enum OperationType {
* the scale and zeroPoint can be different from inputs' scale and zeroPoint. * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/ */
ADD = 0, ADD = 0,
/** /**
* Performs a 2-D average pooling operation. * Performs a 2-D average pooling operation.
* *
@ -162,6 +163,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
AVERAGE_POOL_2D = 1, AVERAGE_POOL_2D = 1,
/** /**
* Concatenates the input tensors along the given dimension. * Concatenates the input tensors along the given dimension.
* *
@ -195,11 +197,13 @@ enum OperationType {
* tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
* Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, * Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint values can be different from * the scale and zeroPoint values can be different from
* input tensors. Before HAL version 1.2 they have to be the same as for the input tensors. * input tensors. Before HAL version 1.2 they have to be the same as for the
* input tensors.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, * For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint values can be different from input tensors. * the scale and zeroPoint values can be different from input tensors.
*/ */
CONCATENATION = 2, CONCATENATION = 2,
/** /**
* Performs a 2-D convolution operation. * Performs a 2-D convolution operation.
* *
@ -243,7 +247,8 @@ enum OperationType {
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
* * * input.scale * filter.scale). * * * input.scale * filter.scale).
* *
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3): * * Quantized signed with filter symmetric per channel quantization
* (since HAL version 1.3):
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
@ -356,10 +361,12 @@ enum OperationType {
* Outputs: * Outputs:
* * 0: The output 4-D tensor, of shape * * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth_out]. * [batches, out_height, out_width, depth_out].
* Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, * Before HAL version 1.2, for output tensor of
* the following condition must be satisfied: output_scale > input_scale * filter_scale * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
* be satisfied: output_scale > input_scale * filter_scale
*/ */
CONV_2D = 3, CONV_2D = 3,
/** /**
* Performs a depthwise 2-D convolution operation. * Performs a depthwise 2-D convolution operation.
* *
@ -407,7 +414,8 @@ enum OperationType {
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
* * * input.scale * filter.scale). * * * input.scale * filter.scale).
* *
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3): * * Quantized signed with filter symmetric per channel quantization
* (since HAL version 1.3):
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
@ -521,6 +529,7 @@ enum OperationType {
* output_scale > input_scale * filter_scale * output_scale > input_scale * filter_scale
*/ */
DEPTHWISE_CONV_2D = 4, DEPTHWISE_CONV_2D = 4,
/** /**
* Rearranges data from depth into blocks of spatial data. * Rearranges data from depth into blocks of spatial data.
* *
@ -566,6 +575,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
DEPTH_TO_SPACE = 5, DEPTH_TO_SPACE = 5,
/** /**
* Dequantizes the input tensor. * Dequantizes the input tensor.
* *
@ -593,6 +603,7 @@ enum OperationType {
* * 0: A tensor with the same shape as input0. * * 0: A tensor with the same shape as input0.
*/ */
DEQUANTIZE = 6, DEQUANTIZE = 6,
/** /**
* Looks up sub-tensors in the input tensor. * Looks up sub-tensors in the input tensor.
* *
@ -637,6 +648,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input1. * the scale and zeroPoint must be the same as input1.
*/ */
EMBEDDING_LOOKUP = 7, EMBEDDING_LOOKUP = 7,
/** /**
* Computes element-wise floor() on the input tensor. * Computes element-wise floor() on the input tensor.
* *
@ -654,6 +666,7 @@ enum OperationType {
* the input tensor. * the input tensor.
*/ */
FLOOR = 8, FLOOR = 8,
/** /**
* Denotes a fully (densely) connected layer, which connects all elements * Denotes a fully (densely) connected layer, which connects all elements
* in the input tensor with each element in the output tensor. * in the input tensor with each element in the output tensor.
@ -699,6 +712,7 @@ enum OperationType {
* condition must be satisfied: output_scale > input_scale * filter_scale. * condition must be satisfied: output_scale > input_scale * filter_scale.
*/ */
FULLY_CONNECTED = 9, FULLY_CONNECTED = 9,
/** /**
* Looks up sub-tensors in the input tensor using a key-value map. * Looks up sub-tensors in the input tensor using a key-value map.
* *
@ -755,6 +769,7 @@ enum OperationType {
* A non-zero byte represents True, a hit. A zero indicates otherwise. * A non-zero byte represents True, a hit. A zero indicates otherwise.
*/ */
HASHTABLE_LOOKUP = 10, HASHTABLE_LOOKUP = 10,
/** /**
* Applies L2 normalization along the axis dimension. * Applies L2 normalization along the axis dimension.
* *
@ -795,6 +810,7 @@ enum OperationType {
* are all zeros, the result is logical zero. * are all zeros, the result is logical zero.
*/ */
L2_NORMALIZATION = 11, L2_NORMALIZATION = 11,
/** /**
* Performs an 2-D L2 pooling operation. * Performs an 2-D L2 pooling operation.
* *
@ -873,6 +889,7 @@ enum OperationType {
* [batches, out_height, out_width, depth]. * [batches, out_height, out_width, depth].
*/ */
L2_POOL_2D = 12, L2_POOL_2D = 12,
/** /**
* Applies Local Response Normalization along the depth dimension. * Applies Local Response Normalization along the depth dimension.
* *
@ -927,6 +944,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
LOCAL_RESPONSE_NORMALIZATION = 13, LOCAL_RESPONSE_NORMALIZATION = 13,
/** /**
* Computes sigmoid activation on the input tensor element-wise. * Computes sigmoid activation on the input tensor element-wise.
* *
@ -954,6 +972,7 @@ enum OperationType {
* the scale must be 1.f / 256 and the zeroPoint must be -128. * the scale must be 1.f / 256 and the zeroPoint must be -128.
*/ */
LOGISTIC = 14, LOGISTIC = 14,
/** /**
* Projects an input to a bit vector via locality senstive hashing. * Projects an input to a bit vector via locality senstive hashing.
* *
@ -967,8 +986,8 @@ enum OperationType {
* *
* Inputs: * Inputs:
* * 0: Hash functions. Dim.size == 2, DataType: Float. * * 0: Hash functions. Dim.size == 2, DataType: Float.
* Tensor[0].Dim[0]: 15 of hash functions. * Tensor[0].Dim[0]: Number of hash functions.
* Tensor[0].Dim[1]: 16 of projected output bits generated by each * Tensor[0].Dim[1]: Number of projected output bits generated by each
* hash function. * hash function.
* If the projection type is Sparse: * If the projection type is Sparse:
* Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32 * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
@ -1009,6 +1028,7 @@ enum OperationType {
* The offset value for sparse projections was added in HAL version 1.2. * The offset value for sparse projections was added in HAL version 1.2.
*/ */
LSH_PROJECTION = 15, LSH_PROJECTION = 15,
/** /**
* Performs a single time step in a Long Short-Term Memory (LSTM) layer * Performs a single time step in a Long Short-Term Memory (LSTM) layer
* *
@ -1226,6 +1246,7 @@ enum OperationType {
* the same as the current “output state (out)” value. * the same as the current “output state (out)” value.
*/ */
LSTM = 16, LSTM = 16,
/** /**
* Performs an 2-D max pooling operation. * Performs an 2-D max pooling operation.
* *
@ -1310,6 +1331,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
MAX_POOL_2D = 17, MAX_POOL_2D = 17,
/** /**
* Multiplies two tensors, element-wise. * Multiplies two tensors, element-wise.
* *
@ -1356,6 +1378,7 @@ enum OperationType {
* output_scale > input1_scale * input2_scale. * output_scale > input1_scale * input2_scale.
*/ */
MUL = 18, MUL = 18,
/** /**
* Computes rectified linear activation on the input tensor element-wise. * Computes rectified linear activation on the input tensor element-wise.
* *
@ -1382,6 +1405,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
RELU = 19, RELU = 19,
/** /**
* Computes rectified linear 1 activation on the input tensor element-wise. * Computes rectified linear 1 activation on the input tensor element-wise.
* *
@ -1408,6 +1432,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
RELU1 = 20, RELU1 = 20,
/** /**
* Computes rectified linear 6 activation on the input tensor element-wise. * Computes rectified linear 6 activation on the input tensor element-wise.
* *
@ -1434,6 +1459,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
RELU6 = 21, RELU6 = 21,
/** /**
* Reshapes a tensor. * Reshapes a tensor.
* *
@ -1466,6 +1492,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
RESHAPE = 22, RESHAPE = 22,
/** /**
* Resizes images to given size using the bilinear interpretation. * Resizes images to given size using the bilinear interpretation.
* *
@ -1547,6 +1574,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
RESIZE_BILINEAR = 23, RESIZE_BILINEAR = 23,
/** /**
* A basic recurrent neural network layer. * A basic recurrent neural network layer.
* *
@ -1598,6 +1626,7 @@ enum OperationType {
* the same as the current state value. * the same as the current state value.
*/ */
RNN = 24, RNN = 24,
/** /**
* Computes the softmax activation on the input tensor element-wise, per * Computes the softmax activation on the input tensor element-wise, per
* batch, by normalizing the input vector so the maximum coefficient is * batch, by normalizing the input vector so the maximum coefficient is
@ -1645,6 +1674,7 @@ enum OperationType {
* the scale must be 1.f / 256 and the zeroPoint must be -128. * the scale must be 1.f / 256 and the zeroPoint must be -128.
*/ */
SOFTMAX = 25, SOFTMAX = 25,
/** /**
* Rearranges blocks of spatial data, into depth. * Rearranges blocks of spatial data, into depth.
* *
@ -1689,6 +1719,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
SPACE_TO_DEPTH = 26, SPACE_TO_DEPTH = 26,
/** /**
* SVDF op is a kind of stateful layer derived from the notion that a * SVDF op is a kind of stateful layer derived from the notion that a
* densely connected layer that's processing a sequence of input frames can * densely connected layer that's processing a sequence of input frames can
@ -1765,6 +1796,7 @@ enum OperationType {
* [batch_size, num_units]. * [batch_size, num_units].
*/ */
SVDF = 27, SVDF = 27,
/** /**
* Computes hyperbolic tangent of input tensor element-wise. * Computes hyperbolic tangent of input tensor element-wise.
* *
@ -1792,6 +1824,7 @@ enum OperationType {
* the scale must be 1.f / 128 and the zeroPoint must be 0. * the scale must be 1.f / 128 and the zeroPoint must be 0.
*/ */
TANH = 28, TANH = 28,
/** /**
* BatchToSpace for N-dimensional tensors. * BatchToSpace for N-dimensional tensors.
* *
@ -1830,6 +1863,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
BATCH_TO_SPACE_ND = 29, BATCH_TO_SPACE_ND = 29,
/** /**
* Element-wise division of two tensors. * Element-wise division of two tensors.
* *
@ -1880,6 +1914,7 @@ enum OperationType {
* * 0: A tensor of the same {@link OperandType} as input0. * * 0: A tensor of the same {@link OperandType} as input0.
*/ */
DIV = 30, DIV = 30,
/** /**
* Computes the mean of elements across dimensions of a tensor. * Computes the mean of elements across dimensions of a tensor.
* *
@ -1919,6 +1954,7 @@ enum OperationType {
* shape is [1]. * shape is [1].
*/ */
MEAN = 31, MEAN = 31,
/** /**
* Pads a tensor. * Pads a tensor.
* *
@ -1960,6 +1996,7 @@ enum OperationType {
* Since HAL version 1.2, the pad value is always the logical zero. * Since HAL version 1.2, the pad value is always the logical zero.
*/ */
PAD = 32, PAD = 32,
/** /**
* SpaceToBatch for N-Dimensional tensors. * SpaceToBatch for N-Dimensional tensors.
* *
@ -2012,6 +2049,7 @@ enum OperationType {
* Since HAL version 1.2, the pad value is always the logical zero. * Since HAL version 1.2, the pad value is always the logical zero.
*/ */
SPACE_TO_BATCH_ND = 33, SPACE_TO_BATCH_ND = 33,
/** /**
* Removes dimensions of size 1 from the shape of a tensor. * Removes dimensions of size 1 from the shape of a tensor.
* *
@ -2047,6 +2085,7 @@ enum OperationType {
* output shape is [1]. * output shape is [1].
*/ */
SQUEEZE = 34, SQUEEZE = 34,
/** /**
* Extracts a strided slice of a tensor. * Extracts a strided slice of a tensor.
* *
@ -2097,6 +2136,7 @@ enum OperationType {
* shape is [1]. * shape is [1].
*/ */
STRIDED_SLICE = 35, STRIDED_SLICE = 35,
/** /**
* Element-wise subtraction of two tensors. * Element-wise subtraction of two tensors.
* *
@ -2147,6 +2187,7 @@ enum OperationType {
* the scale and zeroPoint can be different from inputs' scale and zeroPoint. * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/ */
SUB = 36, SUB = 36,
/** /**
* Transposes the input tensor, permuting the dimensions according to the * Transposes the input tensor, permuting the dimensions according to the
* perm tensor. * perm tensor.
@ -2177,6 +2218,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
TRANSPOSE = 37, TRANSPOSE = 37,
/** /**
* Computes the absolute value of a tensor, element-wise. * Computes the absolute value of a tensor, element-wise.
* *
@ -2194,6 +2236,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
ABS = 38, ABS = 38,
/** /**
* Returns the index of the largest element along an axis. * Returns the index of the largest element along an axis.
* *
@ -2216,7 +2259,10 @@ enum OperationType {
* * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor. * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
* If input is 1-dimensional, the output shape is [1]. * If input is 1-dimensional, the output shape is [1].
*/ */
// There is no underscore in ARG_MAX to avoid name conflict with
// the macro defined in libc/kernel/uapi/linux/limits.h.
ARGMAX = 39, ARGMAX = 39,
/** /**
* Returns the index of the smallest element along an axis. * Returns the index of the smallest element along an axis.
* *
@ -2239,7 +2285,8 @@ enum OperationType {
* * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor. * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
* If input is 1-dimensional, the output shape is [1]. * If input is 1-dimensional, the output shape is [1].
*/ */
ARGMIN = 40, ARGMIN = 40, // See ARGMAX for naming discussion.
/** /**
* Transform axis-aligned bounding box proposals using bounding box deltas. * Transform axis-aligned bounding box proposals using bounding box deltas.
* *
@ -2286,6 +2333,7 @@ enum OperationType {
* scale must be 0.125 and the zero point must be 0. * scale must be 0.125 and the zero point must be 0.
*/ */
AXIS_ALIGNED_BBOX_TRANSFORM = 41, AXIS_ALIGNED_BBOX_TRANSFORM = 41,
/** /**
* A recurrent neural network layer that applies an LSTM cell to a * A recurrent neural network layer that applies an LSTM cell to a
* sequence of inputs in forward and backward directions. * sequence of inputs in forward and backward directions.
@ -2560,6 +2608,7 @@ enum OperationType {
* Available since HAL version 1.3. * Available since HAL version 1.3.
*/ */
BIDIRECTIONAL_SEQUENCE_LSTM = 42, BIDIRECTIONAL_SEQUENCE_LSTM = 42,
/** /**
* A recurrent neural network layer that applies a basic RNN cell to a * A recurrent neural network layer that applies a basic RNN cell to a
* sequence of inputs in forward and backward directions. * sequence of inputs in forward and backward directions.
@ -2711,6 +2760,7 @@ enum OperationType {
* Available since HAL version 1.3. * Available since HAL version 1.3.
*/ */
BIDIRECTIONAL_SEQUENCE_RNN = 43, BIDIRECTIONAL_SEQUENCE_RNN = 43,
/** /**
* Greedily selects a subset of bounding boxes in descending order of score. * Greedily selects a subset of bounding boxes in descending order of score.
* *
@ -2794,6 +2844,7 @@ enum OperationType {
* with the same batch index are grouped together. * with the same batch index are grouped together.
*/ */
BOX_WITH_NMS_LIMIT = 44, BOX_WITH_NMS_LIMIT = 44,
/** /**
* Casts a tensor to a type. * Casts a tensor to a type.
* *
@ -2824,6 +2875,7 @@ enum OperationType {
* * 0: A tensor with the same shape as input0. * * 0: A tensor with the same shape as input0.
*/ */
CAST = 45, CAST = 45,
/** /**
* Shuffle the channels of the input tensor. * Shuffle the channels of the input tensor.
* *
@ -2863,6 +2915,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
CHANNEL_SHUFFLE = 46, CHANNEL_SHUFFLE = 46,
/** /**
* Apply postprocessing steps to bounding box detections. * Apply postprocessing steps to bounding box detections.
* *
@ -2942,6 +2995,7 @@ enum OperationType {
* specifying the number of valid output detections for each batch. * specifying the number of valid output detections for each batch.
*/ */
DETECTION_POSTPROCESSING = 47, DETECTION_POSTPROCESSING = 47,
/** /**
* For input tensors x and y, computes x == y elementwise. * For input tensors x and y, computes x == y elementwise.
* *
@ -2966,6 +3020,7 @@ enum OperationType {
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
*/ */
EQUAL = 48, EQUAL = 48,
/** /**
* Computes exponential of x element-wise. * Computes exponential of x element-wise.
* *
@ -2982,6 +3037,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
EXP = 49, EXP = 49,
/** /**
* Inserts a dimension of 1 into a tensor's shape. * Inserts a dimension of 1 into a tensor's shape.
* *
@ -3012,6 +3068,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
EXPAND_DIMS = 50, EXPAND_DIMS = 50,
/** /**
* Gathers values along an axis. * Gathers values along an axis.
* *
@ -3051,6 +3108,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
GATHER = 51, GATHER = 51,
/** /**
* Generate aixs-aligned bounding box proposals. * Generate aixs-aligned bounding box proposals.
* *
@ -3132,6 +3190,7 @@ enum OperationType {
* with the same batch index are grouped together. * with the same batch index are grouped together.
*/ */
GENERATE_PROPOSALS = 52, GENERATE_PROPOSALS = 52,
/** /**
* For input tensors x and y, computes x > y elementwise. * For input tensors x and y, computes x > y elementwise.
* *
@ -3180,6 +3239,7 @@ enum OperationType {
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
*/ */
GREATER_EQUAL = 54, GREATER_EQUAL = 54,
/** /**
* Performs a grouped 2-D convolution operation. * Performs a grouped 2-D convolution operation.
* *
@ -3232,7 +3292,8 @@ enum OperationType {
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]). * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
* *
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3): * * Quantized signed with filter symmetric per channel quantization
* (since HAL version 1.3):
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
@ -3329,6 +3390,7 @@ enum OperationType {
* the scale and zeroPoint can be different from inputs' scale and zeroPoint. * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/ */
GROUPED_CONV_2D = 55, GROUPED_CONV_2D = 55,
/** /**
* Localize the maximum keypoints from heatmaps. * Localize the maximum keypoints from heatmaps.
* *
@ -3382,6 +3444,7 @@ enum OperationType {
* scale must be 0.125 and the zero point must be 0. * scale must be 0.125 and the zero point must be 0.
*/ */
HEATMAP_MAX_KEYPOINT = 56, HEATMAP_MAX_KEYPOINT = 56,
/** /**
* Applies instance normalization to the input tensor. * Applies instance normalization to the input tensor.
* *
@ -3432,6 +3495,7 @@ enum OperationType {
* * 0: A tensor of the same {@link OperandType} and same shape as input0. * * 0: A tensor of the same {@link OperandType} and same shape as input0.
*/ */
INSTANCE_NORMALIZATION = 57, INSTANCE_NORMALIZATION = 57,
/** /**
* For input tensors x and y, computes x < y elementwise. * For input tensors x and y, computes x < y elementwise.
* *
@ -3456,6 +3520,7 @@ enum OperationType {
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
*/ */
LESS = 58, LESS = 58,
/** /**
* For input tensors x and y, computes x <= y elementwise. * For input tensors x and y, computes x <= y elementwise.
* *
@ -3480,6 +3545,7 @@ enum OperationType {
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
*/ */
LESS_EQUAL = 59, LESS_EQUAL = 59,
/** /**
* Computes natural logarithm of x element-wise. * Computes natural logarithm of x element-wise.
* *
@ -3496,6 +3562,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
LOG = 60, LOG = 60,
/** /**
* Returns the truth value of x AND y element-wise. * Returns the truth value of x AND y element-wise.
* *
@ -3515,6 +3582,7 @@ enum OperationType {
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
*/ */
LOGICAL_AND = 61, LOGICAL_AND = 61,
/** /**
* Computes the truth value of NOT x element-wise. * Computes the truth value of NOT x element-wise.
* *
@ -3530,6 +3598,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
LOGICAL_NOT = 62, LOGICAL_NOT = 62,
/** /**
* Returns the truth value of x OR y element-wise. * Returns the truth value of x OR y element-wise.
* *
@ -3549,6 +3618,7 @@ enum OperationType {
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
*/ */
LOGICAL_OR = 63, LOGICAL_OR = 63,
/** /**
* Computes the log softmax activations given logits. * Computes the log softmax activations given logits.
* *
@ -3579,6 +3649,7 @@ enum OperationType {
* input0. * input0.
*/ */
LOG_SOFTMAX = 64, LOG_SOFTMAX = 64,
/** /**
* Returns the element-wise maximum of two tensors. * Returns the element-wise maximum of two tensors.
* *
@ -3605,6 +3676,7 @@ enum OperationType {
* the scale and zeroPoint can be different from inputs' scale and zeroPoint. * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/ */
MAXIMUM = 65, MAXIMUM = 65,
/** /**
* Returns the element-wise minimum of two tensors. * Returns the element-wise minimum of two tensors.
* *
@ -3631,6 +3703,7 @@ enum OperationType {
* the scale and zeroPoint can be different from inputs' scale and zeroPoint. * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/ */
MINIMUM = 66, MINIMUM = 66,
/** /**
* Computes numerical negative value element-wise. * Computes numerical negative value element-wise.
* *
@ -3648,6 +3721,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
NEG = 67, NEG = 67,
/** /**
* For input tensors x and y, computes x != y elementwise. * For input tensors x and y, computes x != y elementwise.
* *
@ -3672,6 +3746,7 @@ enum OperationType {
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
*/ */
NOT_EQUAL = 68, NOT_EQUAL = 68,
/** /**
* Pads a tensor with the given constant value according to the specified * Pads a tensor with the given constant value according to the specified
* paddings. * paddings.
@ -3716,6 +3791,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
PAD_V2 = 69, PAD_V2 = 69,
/** /**
* Computes the power of one value to another. * Computes the power of one value to another.
* *
@ -3745,6 +3821,7 @@ enum OperationType {
* * 0: An output tensor. * * 0: An output tensor.
*/ */
POW = 70, POW = 70,
/** /**
* Parametric Rectified Linear Unit. * Parametric Rectified Linear Unit.
* *
@ -3785,6 +3862,7 @@ enum OperationType {
* the scales and zeroPoint can be different from input0 scale and zeroPoint. * the scales and zeroPoint can be different from input0 scale and zeroPoint.
*/ */
PRELU = 71, PRELU = 71,
/** /**
* Quantizes the input tensor. * Quantizes the input tensor.
* *
@ -3816,6 +3894,7 @@ enum OperationType {
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}. * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}.
*/ */
QUANTIZE = 72, QUANTIZE = 72,
/** /**
* A version of quantized LSTM, using 16 bit quantization for internal * A version of quantized LSTM, using 16 bit quantization for internal
* state. * state.
@ -3920,6 +3999,7 @@ enum OperationType {
* (scale = 1/128, zeroPoint = 128). * (scale = 1/128, zeroPoint = 128).
*/ */
QUANTIZED_16BIT_LSTM = 73, QUANTIZED_16BIT_LSTM = 73,
/** /**
* Draws samples from a multinomial distribution. * Draws samples from a multinomial distribution.
* *
@ -3940,6 +4020,7 @@ enum OperationType {
* [batches, samples], containing the drawn samples. * [batches, samples], containing the drawn samples.
*/ */
RANDOM_MULTINOMIAL = 74, RANDOM_MULTINOMIAL = 74,
/** /**
* Reduces a tensor by computing the "logical and" of elements along given * Reduces a tensor by computing the "logical and" of elements along given
* dimensions. * dimensions.
@ -3966,6 +4047,7 @@ enum OperationType {
* shape is [1]. * shape is [1].
*/ */
REDUCE_ALL = 75, REDUCE_ALL = 75,
/** /**
* Reduces a tensor by computing the "logical or" of elements along given * Reduces a tensor by computing the "logical or" of elements along given
* dimensions. * dimensions.
@ -3992,6 +4074,7 @@ enum OperationType {
* shape is [1]. * shape is [1].
*/ */
REDUCE_ANY = 76, REDUCE_ANY = 76,
/** /**
* Reduces a tensor by computing the maximum of elements along given * Reduces a tensor by computing the maximum of elements along given
* dimensions. * dimensions.
@ -4024,6 +4107,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
REDUCE_MAX = 77, REDUCE_MAX = 77,
/** /**
* Reduces a tensor by computing the minimum of elements along given * Reduces a tensor by computing the minimum of elements along given
* dimensions. * dimensions.
@ -4056,6 +4140,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
REDUCE_MIN = 78, REDUCE_MIN = 78,
/** /**
* Reduces a tensor by multiplying elements along given dimensions. * Reduces a tensor by multiplying elements along given dimensions.
* *
@ -4082,6 +4167,7 @@ enum OperationType {
* shape is [1]. * shape is [1].
*/ */
REDUCE_PROD = 79, REDUCE_PROD = 79,
/** /**
* Reduces a tensor by summing elements along given dimensions. * Reduces a tensor by summing elements along given dimensions.
* *
@ -4108,6 +4194,7 @@ enum OperationType {
* shape is [1]. * shape is [1].
*/ */
REDUCE_SUM = 80, REDUCE_SUM = 80,
/** /**
* Select and scale the feature map of each region of interest to a unified * Select and scale the feature map of each region of interest to a unified
* output size by average pooling sampling points from bilinear interpolation. * output size by average pooling sampling points from bilinear interpolation.
@ -4169,6 +4256,7 @@ enum OperationType {
* the scale and zeroPoint can be different from the input0 scale and zeroPoint. * the scale and zeroPoint can be different from the input0 scale and zeroPoint.
*/ */
ROI_ALIGN = 81, ROI_ALIGN = 81,
/** /**
* Select and scale the feature map of each region of interest to a unified * Select and scale the feature map of each region of interest to a unified
* output size by max-pooling. * output size by max-pooling.
@ -4222,6 +4310,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
ROI_POOLING = 82, ROI_POOLING = 82,
/** /**
* Computes reciprocal of square root of x element-wise. * Computes reciprocal of square root of x element-wise.
* *
@ -4238,6 +4327,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
RSQRT = 83, RSQRT = 83,
/** /**
* Using a tensor of booleans c and input tensors x and y select values * Using a tensor of booleans c and input tensors x and y select values
* elementwise from both input tensors: * elementwise from both input tensors:
@ -4270,6 +4360,7 @@ enum OperationType {
* the scale and zeroPoint can be different from inputs' scale and zeroPoint. * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/ */
SELECT = 84, SELECT = 84,
/** /**
* Computes sin of x element-wise. * Computes sin of x element-wise.
* *
@ -4286,6 +4377,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
SIN = 85, SIN = 85,
/** /**
* Extracts a slice of specified size from the input tensor starting at a * Extracts a slice of specified size from the input tensor starting at a
* specified location. * specified location.
@ -4321,6 +4413,7 @@ enum OperationType {
* its scale and zeroPoint has to be same as the input0 scale and zeroPoint. * its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
*/ */
SLICE = 86, SLICE = 86,
/** /**
* Splits a tensor along a given axis into num_splits subtensors. * Splits a tensor along a given axis into num_splits subtensors.
* *
@ -4347,6 +4440,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
SPLIT = 87, SPLIT = 87,
/** /**
* Computes square root of x element-wise. * Computes square root of x element-wise.
* *
@ -4363,6 +4457,7 @@ enum OperationType {
* * 0: The output tensor of same shape as input0. * * 0: The output tensor of same shape as input0.
*/ */
SQRT = 88, SQRT = 88,
/** /**
* Constructs a tensor by tiling a given tensor. * Constructs a tensor by tiling a given tensor.
* *
@ -4393,6 +4488,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
TILE = 89, TILE = 89,
/** /**
* Finds values and indices of the k largest entries for the last dimension. * Finds values and indices of the k largest entries for the last dimension.
* *
@ -4423,6 +4519,7 @@ enum OperationType {
* containing the indices of values within the last dimension of input. * containing the indices of values within the last dimension of input.
*/ */
TOPK_V2 = 90, TOPK_V2 = 90,
/** /**
* Performs the transpose of 2-D convolution operation. * Performs the transpose of 2-D convolution operation.
* *
@ -4457,7 +4554,8 @@ enum OperationType {
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
* * * input.scale * filter.scale). * * * input.scale * filter.scale).
* *
* * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3): * * Quantized signed with filter symmetric per channel quantization
* (since HAL version 1.3):
* * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
@ -4551,6 +4649,7 @@ enum OperationType {
* the scale and zeroPoint can be different from inputs' scale and zeroPoint. * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/ */
TRANSPOSE_CONV_2D = 91, TRANSPOSE_CONV_2D = 91,
/** /**
* A recurrent neural network specified by an LSTM cell. * A recurrent neural network specified by an LSTM cell.
* *
@ -4668,6 +4767,7 @@ enum OperationType {
* Available since HAL version 1.3. * Available since HAL version 1.3.
*/ */
UNIDIRECTIONAL_SEQUENCE_LSTM = 92, UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
/** /**
* A recurrent neural network layer that applies a basic RNN cell to a * A recurrent neural network layer that applies a basic RNN cell to a
* sequence of inputs. * sequence of inputs.
@ -4726,6 +4826,7 @@ enum OperationType {
* Available since HAL version 1.3. * Available since HAL version 1.3.
*/ */
UNIDIRECTIONAL_SEQUENCE_RNN = 93, UNIDIRECTIONAL_SEQUENCE_RNN = 93,
/** /**
* Resizes images to given size using the nearest neighbor interpretation. * Resizes images to given size using the nearest neighbor interpretation.
* *
@ -4804,6 +4905,7 @@ enum OperationType {
* the scale and zeroPoint must be the same as input0. * the scale and zeroPoint must be the same as input0.
*/ */
RESIZE_NEAREST_NEIGHBOR = 94, RESIZE_NEAREST_NEIGHBOR = 94,
/** /**
* Quantized version of {@link OperationType::LSTM}. * Quantized version of {@link OperationType::LSTM}.
* *
@ -4932,6 +5034,7 @@ enum OperationType {
* Shape: [batchSize, outputSize] * Shape: [batchSize, outputSize]
*/ */
QUANTIZED_LSTM = 95, QUANTIZED_LSTM = 95,
/** /**
* Executes one of the two referenced subgraphs as determined by a boolean * Executes one of the two referenced subgraphs as determined by a boolean
* value. * value.
@ -4958,6 +5061,7 @@ enum OperationType {
* * 0 ~ (m - 1): Outputs produced by the selected subgraph. * * 0 ~ (m - 1): Outputs produced by the selected subgraph.
*/ */
IF = 96, IF = 96,
/** /**
* Executes the body subgraph until the condition subgraph outputs false. * Executes the body subgraph until the condition subgraph outputs false.
* *
@ -5024,6 +5128,7 @@ enum OperationType {
* * 0 ~ (m - 1): Outputs produced by the loop. * * 0 ~ (m - 1): Outputs produced by the loop.
*/ */
WHILE = 97, WHILE = 97,
/** /**
* Computes exponential linear activation on the input tensor element-wise. * Computes exponential linear activation on the input tensor element-wise.
* *
@ -5049,6 +5154,7 @@ enum OperationType {
* * 0: The output tensor of same shape and type as input0. * * 0: The output tensor of same shape and type as input0.
*/ */
ELU = 98, ELU = 98,
/** /**
* Computes hard-swish activation on the input tensor element-wise. * Computes hard-swish activation on the input tensor element-wise.
* *
@ -5076,6 +5182,7 @@ enum OperationType {
* tensor's parameters. * tensor's parameters.
*/ */
HARD_SWISH = 99, HARD_SWISH = 99,
/** /**
* Creates a tensor filled with a scalar value. * Creates a tensor filled with a scalar value.
* *
@ -5100,6 +5207,7 @@ enum OperationType {
* * 0: The output tensor. * * 0: The output tensor.
*/ */
FILL = 100, FILL = 100,
/** /**
* Returns the rank of a tensor. * Returns the rank of a tensor.
* *