Add new OperandType TENSOR_QUANT16_ASYMM.
Test: NeuralNetworksTest_static Test: VtsHalNeuralnetworksV1_2TargetTest Change-Id: I8fcd6b30c32f8fbc181d2b43f9ac0b94fdc57e2f
This commit is contained in:
parent
a316581b21
commit
d49f665c42
3 changed files with 22 additions and 7 deletions
|
@ -45,13 +45,10 @@ using ::test_helper::bool8;
|
|||
using ::test_helper::compare;
|
||||
using ::test_helper::expectMultinomialDistributionWithinTolerance;
|
||||
using ::test_helper::filter;
|
||||
using ::test_helper::Float32Operands;
|
||||
using ::test_helper::for_all;
|
||||
using ::test_helper::for_each;
|
||||
using ::test_helper::Int32Operands;
|
||||
using ::test_helper::MixedTyped;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
using ::test_helper::Quant8Operands;
|
||||
using ::test_helper::resize_accordingly;
|
||||
|
||||
template <typename T>
|
||||
|
@ -67,12 +64,13 @@ void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArg
|
|||
void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
|
||||
copy_back_(&dst->float32Operands, ra, src);
|
||||
copy_back_(&dst->int32Operands, ra, src);
|
||||
copy_back_(&dst->quant8Operands, ra, src);
|
||||
copy_back_(&dst->quant16Operands, ra, src);
|
||||
copy_back_(&dst->quant8AsymmOperands, ra, src);
|
||||
copy_back_(&dst->quant16SymmOperands, ra, src);
|
||||
copy_back_(&dst->float16Operands, ra, src);
|
||||
copy_back_(&dst->bool8Operands, ra, src);
|
||||
copy_back_(&dst->quant8ChannelOperands, ra, src);
|
||||
static_assert(7 == MixedTyped::kNumTypes,
|
||||
copy_back_(&dst->quant16AsymmOperands, ra, src);
|
||||
static_assert(8 == MixedTyped::kNumTypes,
|
||||
"Number of types in MixedTyped changed, but copy_back function wasn't updated");
|
||||
}
|
||||
|
||||
|
|
|
@ -76,6 +76,18 @@ enum OperandType : @1.0::OperandType {
|
|||
* where C is an index in the Channel dimension.
|
||||
*/
|
||||
TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
|
||||
/**
|
||||
* A tensor of 16 bit unsigned integers that represent real numbers.
|
||||
*
|
||||
* Attached to this tensor are two numbers that can be used to convert the
|
||||
* 16 bit integer to the real value and vice versa. These two numbers are:
|
||||
* - scale: a 32 bit floating point value greater than zero.
|
||||
* - zeroPoint: a 32 bit integer, in range [0, 65535].
|
||||
*
|
||||
* The formula is:
|
||||
* real_value = (integer_value - zeroPoint) * scale.
|
||||
*/
|
||||
TENSOR_QUANT16_ASYMM = 12,
|
||||
/* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
|
||||
* OperandTypeRange::OPERAND_FUNDAMENTAL_MAX.
|
||||
*/
|
||||
|
@ -89,7 +101,7 @@ enum OperandType : @1.0::OperandType {
|
|||
*/
|
||||
enum OperandTypeRange : uint32_t {
|
||||
OPERAND_FUNDAMENTAL_MIN = 0,
|
||||
OPERAND_FUNDAMENTAL_MAX = 11,
|
||||
OPERAND_FUNDAMENTAL_MAX = 12,
|
||||
OPERAND_OEM_MIN = 10000,
|
||||
OPERAND_OEM_MAX = 10001,
|
||||
};
|
||||
|
|
|
@ -161,6 +161,7 @@ static uint32_t getInvalidRank(OperandType type) {
|
|||
case OperandType::TENSOR_FLOAT32:
|
||||
case OperandType::TENSOR_INT32:
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
|
||||
return 0;
|
||||
|
@ -199,6 +200,7 @@ static float getInvalidScale(OperandType type) {
|
|||
case OperandType::TENSOR_INT32:
|
||||
return -1.0f;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
return 0.0f;
|
||||
default:
|
||||
|
@ -233,6 +235,8 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
|
|||
return {1};
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
return {-1, 256};
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
return {-1, 65536};
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
return {-32769, -1, 1, 32768};
|
||||
default:
|
||||
|
@ -288,6 +292,7 @@ static void mutateOperand(Operand* operand, OperandType type) {
|
|||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
|
|
Loading…
Reference in a new issue