Fix operations docs
am: fdf3c0363a
Change-Id: Ibb6c8aab16e379f295298b0f2ebb1e83cfdd8fc6
This commit is contained in:
commit
0775d27fe6
2 changed files with 61 additions and 31 deletions
|
@ -449,7 +449,7 @@ dd1ec219f5d2e2b33c6c0bcb92e63bbedb36f7c716413462848f6b6ae74fc864 android.hardwar
|
|||
92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
|
||||
83885d366f22ada42c00d8854f0b7e7ba4cf73ddf80bb0d8e168ce132cec57ea android.hardware.neuralnetworks@1.2::IPreparedModel
|
||||
e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
|
||||
769f8650631eef7a3ceedc8cf130f4b99eb52fe698a11609d55de32985a3dddf android.hardware.neuralnetworks@1.2::types
|
||||
c752cff336d86762c26dc82e7e037f4962b815b1a068d2319d40a3d068e26f68 android.hardware.neuralnetworks@1.2::types
|
||||
cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
|
||||
abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
|
||||
4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats
|
||||
|
|
|
@ -218,6 +218,7 @@ enum OperationType : int32_t {
|
|||
* ) / sum(1)
|
||||
*
|
||||
* Supported tensor {@link OperandType}:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
|
@ -333,7 +334,7 @@ enum OperationType : int32_t {
|
|||
* ) + bias[channel]
|
||||
*
|
||||
* Supported tensor {@link OperandType} configurations:
|
||||
* * 32 bit Floating point :
|
||||
* * 32 bit floating point:
|
||||
* * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
|
||||
*
|
||||
* * Quantized:
|
||||
|
@ -342,15 +343,15 @@ enum OperationType : int32_t {
|
|||
* * * input.scale * filter.scale).
|
||||
*
|
||||
* Available since API level 29:
|
||||
* * 16 bit floating point:
|
||||
* * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
|
||||
*
|
||||
* * Quantized with symmetric per channel quantization for the filter:
|
||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
|
||||
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
|
||||
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
|
||||
*
|
||||
* * 16 bit Floating point:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
|
||||
*
|
||||
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
|
||||
* With the default data layout NHWC, the data is stored in the order of:
|
||||
* [batch, height, width, channels]. Alternatively, the data layout could
|
||||
|
@ -482,7 +483,7 @@ enum OperationType : int32_t {
|
|||
* ) + bias[k * channel_multiplier + q]
|
||||
*
|
||||
* Supported tensor {@link OperandType} configurations:
|
||||
* * 32 bit Floating point :
|
||||
* * 32 bit floating point:
|
||||
* * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
|
||||
*
|
||||
* * Quantized:
|
||||
|
@ -491,6 +492,9 @@ enum OperationType : int32_t {
|
|||
* * * input.scale * filter.scale).
|
||||
*
|
||||
* Available since API level 29:
|
||||
* * 16 bit floating point:
|
||||
* * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
|
||||
*
|
||||
* * Quantized with symmetric per channel quantization for the filter:
|
||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
|
||||
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||
|
@ -1010,6 +1014,7 @@ enum OperationType : int32_t {
|
|||
* output = 1 / (1 + exp(-input))
|
||||
*
|
||||
* Supported tensor {@link OperandType}:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
|
@ -1315,6 +1320,7 @@ enum OperationType : int32_t {
|
|||
* )
|
||||
*
|
||||
* Supported tensor {@link OperandType}:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
|
@ -1623,6 +1629,7 @@ enum OperationType : int32_t {
|
|||
* independently on each 1-D slice along specified dimension.
|
||||
*
|
||||
* Supported tensor {@link OperandType}:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
|
@ -1631,8 +1638,12 @@ enum OperationType : int32_t {
|
|||
*
|
||||
* Inputs:
|
||||
* * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
|
||||
* * 1: An {@link OperandType::FLOAT32} scalar, specifying the positive
|
||||
* scaling factor for the exponent, beta.
|
||||
* * 1: A scalar, specifying the positive scaling factor for the exponent,
|
||||
* beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
|
||||
* {@link OperandType::FLOAT32}. If input0 is of {@link
|
||||
* OperandType::TENSOR_FLOAT16}, then the scalar must be of {@link
|
||||
* OperandType::FLOAT16}.
|
||||
* * 2: An optional {@link OperandType::INT32} scalar, default to -1,
|
||||
* specifying the dimension the activation would be performed on.
|
||||
* Negative index is used to specify axis from the end (e.g. -1 for
|
||||
|
@ -2706,11 +2717,17 @@ enum OperationType : int32_t {
|
|||
* * 10: An {@link OperandType::INT32} scalar, only used when input7 is
|
||||
* set to true, specifying the maximum number of detections when
|
||||
* applying NMS algorithm for each single class.
|
||||
* * 11: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
|
||||
* with scores lower than the threshold are filtered before sending
|
||||
* to the NMS algorithm.
|
||||
* * 12: An {@link OperandType::FLOAT32} scalar, specifying the IoU
|
||||
* threshold for hard NMS.
|
||||
* * 11: A scalar, score_threshold. Boxes with scores lower than the
|
||||
* threshold are filtered before sending to the NMS algorithm. The
|
||||
* scalar must be of {@link OperandType::FLOAT16} if input0 is of
|
||||
* {@link OperandType::TENSOR_FLOAT16} and of {@link
|
||||
* OperandType::FLOAT32} if input0 is of {@link
|
||||
* OperandType::TENSOR_FLOAT32}.
|
||||
* * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
|
||||
* must be of {@link OperandType::FLOAT16} if input0 is of {@link
|
||||
* OperandType::TENSOR_FLOAT16} and of {@link
|
||||
* OperandType::FLOAT32} if input0 is of {@link
|
||||
* OperandType::TENSOR_FLOAT32}.
|
||||
* * 13: An {@link OperandType::BOOL} scalar, set to true to include
|
||||
* background class in the list of label map for the output, set
|
||||
* to false to not include the background. When the background
|
||||
|
@ -3007,11 +3024,11 @@ enum OperationType : int32_t {
|
|||
* where channel_multiplier = depth_out / num_groups
|
||||
*
|
||||
* Supported tensor {@link OperandType} configurations:
|
||||
* * 32 bit Floating point :
|
||||
* * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
|
||||
* * 16 bit floating point:
|
||||
* * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
|
||||
*
|
||||
* * 16 bit Floating point:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
|
||||
* * 32 bit floating point:
|
||||
* * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
|
||||
*
|
||||
* * Quantized:
|
||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
|
||||
|
@ -3188,12 +3205,21 @@ enum OperationType : int32_t {
|
|||
*
|
||||
* Inputs:
|
||||
* * 0: An n-D tensor, specifying the tensor to be normalized.
|
||||
* * 1: An {@link OperandType::FLOAT32} scalar, specifying gamma, the
|
||||
* scale applied to the normalized tensor.
|
||||
* * 2: An {@link OperandType::FLOAT32} scalar, specifying beta, the
|
||||
* offset applied to the normalized tensor.
|
||||
* * 3: An {@link OperandType::FLOAT32} scalar, specifying epsilon, the
|
||||
* small value added to variance to avoid dividing by zero.
|
||||
* * 1: A scalar, specifying gamma, the scale applied to the normalized
|
||||
* tensor. The scalar must be of {@link OperandType::FLOAT16} if
|
||||
* input0 is of {@link OperandType::TENSOR_FLOAT16} and of {@link
|
||||
* OperandType::FLOAT32} if input0 is of {@link
|
||||
* OperandType::TENSOR_FLOAT32}.
|
||||
* * 2: A scalar, specifying beta, the offset applied to the normalized
|
||||
* tensor. The scalar must be of {@link OperandType::FLOAT16} if
|
||||
* input0 is of {@link OperandType::TENSOR_FLOAT16} and of {@link
|
||||
* OperandType::FLOAT32} if input0 is of {@link
|
||||
* OperandType::TENSOR_FLOAT32}.
|
||||
* * 3: A scalar, specifying epsilon, the small value added to variance to
|
||||
* avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if
|
||||
* input0 is of {@link OperandType::TENSOR_FLOAT16} and of {@link
|
||||
* OperandType::FLOAT32} if input0 is of {@link
|
||||
* OperandType::TENSOR_FLOAT32}.
|
||||
* * 4: An {@link OperandType::BOOL} scalar, set to true to specify
|
||||
* NCHW data layout for input0 and output0. Set to false for NHWC.
|
||||
*
|
||||
|
@ -3475,10 +3501,12 @@ enum OperationType : int32_t {
|
|||
* padding[i, 1] specifies the number of elements to be padded after
|
||||
* the end of dimension i.
|
||||
* * 2: An scalar specifying the value to use for padding input0.
|
||||
* For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
|
||||
* pad value must be of {@link OperandType::FLOAT16}.
|
||||
* For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
|
||||
* pad value should be of {@link OperandType::FLOAT32}.
|
||||
* pad value must be of {@link OperandType::FLOAT32}.
|
||||
* For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
|
||||
* the pad value should be of {@link OperandType::INT32}. The
|
||||
* the pad value must be of {@link OperandType::INT32}. The
|
||||
* scale and zeroPoint are assumed to be the same as in input0.
|
||||
*
|
||||
* Outputs:
|
||||
|
@ -3627,25 +3655,25 @@ enum OperationType : int32_t {
|
|||
* weights.
|
||||
* * 5: The recurrent-to-input weights.
|
||||
* A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* and shape [outputSize, inputSize] specifying recurrent-to-input part
|
||||
* and shape [outputSize, outputSize] specifying recurrent-to-input part
|
||||
* of weights for fully-connected layer inside the LSTM cell.
|
||||
* Quantization zero point and scale must be the same across all the
|
||||
* weights.
|
||||
* * 6: The recurrent-to-forget weights.
|
||||
* A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* and shape [outputSize, inputSize] specifying recurrent-to-forget
|
||||
* and shape [outputSize, outputSize] specifying recurrent-to-forget
|
||||
* part of weights for fully-connected layer inside the LSTM cell.
|
||||
* Quantization zero point and scale must be the same across all the
|
||||
* weights.
|
||||
* * 7: The recurrent-to-cell weights.
|
||||
* A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* and shape [outputSize, inputSize] specifying recurrent-to-cell part
|
||||
* and shape [outputSize, outputSize] specifying recurrent-to-cell part
|
||||
* of weights for fully-connected layer inside the LSTM cell.
|
||||
* Quantization zero point and scale must be the same across all the
|
||||
* weights.
|
||||
* * 8: The recurrent-to-output weights.
|
||||
* A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* and shape [outputSize, inputSize] specifying recurrent-to-output
|
||||
* and shape [outputSize, outputSize] specifying recurrent-to-output
|
||||
* part of weights for fully-connected layer inside the LSTM cell.
|
||||
* Quantization zero point and scale must be the same across all the
|
||||
* weights.
|
||||
|
@ -4205,7 +4233,10 @@ enum OperationType : int32_t {
|
|||
* padding.
|
||||
*
|
||||
* Supported tensor {@link OperandCode} configurations:
|
||||
* * 32 bit Floating point :
|
||||
* * 16 bit floating point:
|
||||
* * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
|
||||
*
|
||||
* * 32 bit floating point:
|
||||
* * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
|
||||
*
|
||||
* * Quantized:
|
||||
|
@ -4213,7 +4244,6 @@ enum OperationType : int32_t {
|
|||
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
|
||||
* * * input.scale * filter.scale).
|
||||
*
|
||||
* Available since API level 29:
|
||||
* * Quantized with symmetric per channel quantization for the filter:
|
||||
* * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
|
||||
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
|
||||
|
|
Loading…
Reference in a new issue