Add NNAPI control flow

Bug: 136735929
Bug: 139181916
Test: m
Change-Id: I7a75175f00fc98df626c40ea669021ccd40130e0
This commit is contained in:
Slava Shklyaev 2019-12-13 12:24:35 +00:00
parent 8a179f3499
commit a785a3faac
7 changed files with 373 additions and 123 deletions

View file

@ -651,10 +651,10 @@ ac429fca0da4ce91218768ec31b64ded88251f8a26d8c4f27c06abdc5b1926d9 android.hardwar
df9c79c4fdde2821550c6d5c3d07f5ec0adfb1b702561ce543c906ddef698703 android.hardware.media.c2@1.1::IComponent
a3eddd9bbdc87e8c22764070037dd1154f1cf006e6fba93364c4f85d4c134a19 android.hardware.media.c2@1.1::IComponentStore
4b5c8546533db9412fec6d32c0ef42b22e5e68dbf390c775ec3c22bb2d501102 android.hardware.neuralnetworks@1.3::IBuffer
234cc547d63d2f24a447aee0a9a76cab68b31c080adadc5a960598b827a69fa2 android.hardware.neuralnetworks@1.3::IDevice
5a6b75f13f0e010a4268defa4f627b862ab2899fb04f9d985194a25bd8f9fe0d android.hardware.neuralnetworks@1.3::IDevice
058b48f0e2e725bb2b3fa2b7917b0f0a696383d03a4c57afe26f0eadb6a7af28 android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
1435cf1724f9f89ff5f97d4aa6fe2a031b0ef43034cb5801b16229dc2ecfea82 android.hardware.neuralnetworks@1.3::types
12c51f9d04a52324510419aeee3e37bb3607e6900556cdde79774d80ed989855 android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
c67aaf26a7a40d14ea61e70e20afacbd0bb906df1704d585ac8599fbb69dd44b android.hardware.wifi.hostapd@1.2::IHostapd
11f6448d15336361180391c8ebcdfd2d7cf77b3782d577e594d583aadc9c2877 android.hardware.wifi.hostapd@1.2::types

View file

@ -48,9 +48,14 @@ interface IDevice extends @1.2::IDevice {
/**
* Gets the supported operations in a model.
*
* getSupportedOperations indicates which operations of a model are fully
* supported by the vendor driver. If an operation may not be supported for
* any reason, getSupportedOperations must return false for that operation.
* getSupportedOperations indicates which operations of the top-level
* subgraph are fully supported by the vendor driver. If an operation may
* not be supported for any reason, getSupportedOperations must return
* false for that operation.
*
* The {@link OperationType::IF} and {@link OperationType::WHILE}
* operations may only be fully supported if the vendor driver fully
* supports all operations in the referenced subgraphs.
*
* @param model A model whose operations--and their corresponding operands--
* are to be verified by the driver.

View file

@ -17,7 +17,6 @@
package android.hardware.neuralnetworks@1.3;
import @1.0::DataLocation;
import @1.0::OperandLifeTime;
import @1.0::PerformanceInfo;
import @1.0::RequestArgument;
import @1.2::Model.ExtensionNameAndPrefix;
@ -42,6 +41,13 @@ enum OperandType : @1.2::OperandType {
*/
TENSOR_QUANT8_ASYMM_SIGNED = 14,
/**
* A reference to a subgraph.
*
* Must have the lifetime {@link OperandLifeTime::SUBGRAPH}.
*/
SUBGRAPH = 15,
/*
* DEPRECATED. Since HAL version 1.2, extensions are the preferred
* alternative to OEM operation and data types.
@ -70,7 +76,7 @@ enum OperandType : @1.2::OperandType {
enum OperandTypeRange : uint32_t {
BASE_MIN = 0,
FUNDAMENTAL_MIN = 0,
FUNDAMENTAL_MAX = 14,
FUNDAMENTAL_MAX = 15,
OEM_MIN = 10000,
OEM_MAX = 10001,
BASE_MAX = 0xFFFF,
@ -4878,6 +4884,92 @@ enum OperationType : int32_t {
*/
QUANTIZED_LSTM = 95,
/**
* Executes one of the two referenced subgraphs as determined by a boolean
* value.
*
* The inputs and outputs of the two referenced subgraphs must agree with the
* signature of this operation. That is, if the operation has (3 + n) inputs
* and m outputs, both subgraphs must have n inputs and m outputs with the same
* types as the corresponding operation inputs and outputs.
*
* Inputs:
* * 0: A value of type {@link OperandType::TENSOR_BOOL8} and shape [1]
* that determines which of the two referenced subgraphs to execute.
* * 1: A {@link OperandType::SUBGRAPH} reference to the subgraph to be
* executed if the condition is true.
* * 2: A {@link OperandType::SUBGRAPH} reference to the subgraph to be
* executed if the condition is false.
* * 3 ~ (n + 2): Inputs to be passed to the subgraph selected for execution.
*
* Outputs:
* * 0 ~ (m - 1): Outputs produced by the selected subgraph.
*/
IF = 96,
/**
* Executes the body subgraph until the condition subgraph outputs false.
*
* The inputs to this operation are the condition subgraph, the body subgraph,
* and operand values for the first iteration of the loop. The values are
* implicitly split into three groups of input-output, state-only, and
* input-only values, as described below.
*
* The outputs of this operation are the final values of input-output
* operands.
*
* Both the condition and body subgraph receive (m + k + n) inputs.
* * The first m (m >= 1) inputs are input-output operands. For the first
* iteration, these are initialized from the corresponding inputs of the
* WHILE operation. In subsequent iterations, their values come from the
* corresponding outputs of the body subgraph produced during the previous
* iteration.
* * The next k (k >= 0) inputs are state-only operands. They are similar to
* the input-output operands, except that their values are no longer
* available after the loop terminates.
* * The last n (n >= 0) inputs are input-only operands. Their values come
* from the corresponding inputs of the WHILE operation.
*
* The body subgraph produces (m + k) outputs.
* * The first m outputs are input-output operands. They become the outputs
* of the WHILE operation when a termination condition is reached.
* * The last k outputs are state-only operands. Their values are no longer
* available after the loop terminates.
*
* The numbers m, k, and n are inferred by the driver as follows:
* m = (WHILE operation output count)
* k = (body subgraph output count) - m
* n = (body subgraph input count) - m - k
*
* The pseudo-code below illustrates the flow of a WHILE operation with
* inputs condition, body, initial_input_output, initial_state, input_only
* (m = 1, k = 1, n = 1):
*
* input_output = initial_input_output
* state = initial_state
* while condition(input_output, state, input_only):
* input_output, state = body(input_output, state, input_only)
* return input_output
*
* Inputs:
* * 0: A {@link OperandType::SUBGRAPH} reference to the condition
* subgraph. The subgraph must have (m + k + n) inputs with
* the same types as the corresponding inputs of the WHILE operation
* and exactly one output of {@link OperandType::TENSOR_BOOL8}
* and shape [1].
* * 1: A {@link OperandType::SUBGRAPH} reference to the body subgraph.
* The subgraph must have (m + k + n) inputs and (m + k) outputs with
* the same types as the corresponding inputs and outputs of the WHILE
* operation.
* * (m inputs): Initial values for input-output operands.
* * (k inputs): Initial values for state-only operands.
* * (n inputs): Values for input-only operands.
*
* Outputs:
* * 0 ~ (m - 1): Outputs produced by the loop.
*/
WHILE = 97,
/**
* DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
* OEM operation and data types.
@ -4900,13 +4992,12 @@ enum OperationType : int32_t {
enum OperationTypeRange : uint32_t {
BASE_MIN = 0,
FUNDAMENTAL_MIN = 0,
FUNDAMENTAL_MAX = 95,
FUNDAMENTAL_MAX = 97,
OEM_MIN = 10000,
OEM_MAX = 10000,
BASE_MAX = 0xFFFF,
};
/**
* The capabilities of a driver.
*
@ -4967,6 +5058,59 @@ struct Operation {
vec<uint32_t> outputs;
};
/**
* How an operand is used.
*/
enum OperandLifeTime : int32_t {
/**
* The operand is internal to the model. It's created by an operation and
* consumed by other operations. It must be an output operand of
* exactly one operation.
*/
TEMPORARY_VARIABLE,
/**
* The operand is an input of a subgraph. It must not be an output
* operand of any operation.
*
* An operand can't be both input and output of a subgraph.
*/
SUBGRAPH_INPUT,
/**
* The operand is an output of a subgraph. It must be an output
* operand of exactly one operation.
*
* An operand can't be both input and output of a subgraph.
*/
SUBGRAPH_OUTPUT,
/**
* The operand is a constant found in Model.operandValues. It must
* not be an output operand of any operation.
*/
CONSTANT_COPY,
/**
* The operand is a constant that was specified via a Memory
* object. It must not be an output operand of any operation.
*/
CONSTANT_REFERENCE,
/**
* The operand does not have a value. This is valid only for optional
* arguments of operations.
*/
NO_VALUE,
/**
* The operand is a reference to a subgraph. It must be an input to one
* or more {@link OperationType::IF} or {@link OperationType::WHILE}
* operations.
*/
SUBGRAPH,
};
/**
* Describes one operand of the model's graph.
*/
@ -5003,7 +5147,7 @@ struct Operand {
* . The operand has lifetime CONSTANT_COPY or
* CONSTANT_REFERENCE.
*
* . The operand has lifetime MODEL_INPUT. Fully
* . The operand has lifetime SUBGRAPH_INPUT. Fully
* specified dimensions must either be present in the
* Operand or they must be provided in the corresponding
* RequestArgument.
@ -5051,8 +5195,8 @@ struct Operand {
/**
* Where to find the data for this operand.
* If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
* NO_VALUE:
* If the lifetime is TEMPORARY_VARIABLE, SUBGRAPH_INPUT, SUBGRAPH_OUTPUT,
* or NO_VALUE:
* - All the fields must be 0.
* If the lifetime is CONSTANT_COPY:
* - location.poolIndex is 0.
@ -5062,6 +5206,11 @@ struct Operand {
* - location.poolIndex is set.
* - location.offset is the offset in bytes into the specified pool.
* - location.length is set.
* If the lifetime is SUBGRAPH:
* - location.poolIndex is 0.
* - location.offset is the index of the referenced subgraph in
* {@link Model::referenced}.
* - location.length is 0.
*/
DataLocation location;
@ -5100,32 +5249,19 @@ struct Operand {
*/
struct Model {
/**
* All operands included in the model.
* The top-level subgraph.
*/
vec<Operand> operands;
Subgraph main;
/**
* All operations included in the model.
* Referenced subgraphs.
*
* The operations are sorted into execution order. Every operand
* with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
* written before it is read.
*/
vec<Operation> operations;
/**
* Input indexes of the model. There must be at least one.
* Each subgraph is referenced by the main subgraph or at least one other
* referenced subgraph.
*
* Each value corresponds to the index of the operand in "operands".
* There must be no reference cycles.
*/
vec<uint32_t> inputIndexes;
/**
* Output indexes of the model. There must be at least one.
*
* Each value corresponds to the index of the operand in "operands".
*/
vec<uint32_t> outputIndexes;
vec<Subgraph> referenced;
/**
* A byte buffer containing operand data that were copied into the model.
@ -5177,6 +5313,39 @@ struct Model {
vec<@1.2::Model.ExtensionNameAndPrefix> extensionNameToPrefix;
};
/**
* An excerpt of the execution graph.
*/
struct Subgraph {
/**
* All operands included in the subgraph.
*/
vec<Operand> operands;
/**
* All operations included in the subgraph.
*
* The operations are sorted into execution order. Every operand
* with lifetime SUBGRAPH_OUTPUT or TEMPORARY_VARIABLE must be
* written before it is read.
*/
vec<Operation> operations;
/**
* Input indexes of the subgraph. There must be at least one.
*
* Each value corresponds to the index of the operand in "operands".
*/
vec<uint32_t> inputIndexes;
/**
* Output indexes of the subgraph. There must be at least one.
*
* Each value corresponds to the index of the operand in "operands".
*/
vec<uint32_t> outputIndexes;
};
/**
* A buffer descriptor. Describes the properties of a buffer.
*/

View file

@ -19,7 +19,6 @@
package android.hardware.neuralnetworks@1.3;
import @1.0::DataLocation;
import @1.0::OperandLifeTime;
import @1.0::PerformanceInfo;
import @1.0::RequestArgument;
import @1.2::Model.ExtensionNameAndPrefix;
@ -90,7 +89,6 @@ enum OperationTypeRange : uint32_t {
BASE_MAX = 0xFFFF,
};
/**
* The capabilities of a driver.
*
@ -151,6 +149,59 @@ struct Operation {
vec<uint32_t> outputs;
};
/**
* How an operand is used.
*/
enum OperandLifeTime : int32_t {
/**
* The operand is internal to the model. It's created by an operation and
* consumed by other operations. It must be an output operand of
* exactly one operation.
*/
TEMPORARY_VARIABLE,
/**
* The operand is an input of a subgraph. It must not be an output
* operand of any operation.
*
* An operand can't be both input and output of a subgraph.
*/
SUBGRAPH_INPUT,
/**
* The operand is an output of a subgraph. It must be an output
* operand of exactly one operation.
*
* An operand can't be both input and output of a subgraph.
*/
SUBGRAPH_OUTPUT,
/**
* The operand is a constant found in Model.operandValues. It must
* not be an output operand of any operation.
*/
CONSTANT_COPY,
/**
* The operand is a constant that was specified via a Memory
* object. It must not be an output operand of any operation.
*/
CONSTANT_REFERENCE,
/**
* The operand does not have a value. This is valid only for optional
* arguments of operations.
*/
NO_VALUE,
/**
* The operand is a reference to a subgraph. It must be an input to one
* or more {@link OperationType::IF} or {@link OperationType::WHILE}
* operations.
*/
SUBGRAPH,
};
/**
* Describes one operand of the model's graph.
*/
@ -187,7 +238,7 @@ struct Operand {
* . The operand has lifetime CONSTANT_COPY or
* CONSTANT_REFERENCE.
*
* . The operand has lifetime MODEL_INPUT. Fully
* . The operand has lifetime SUBGRAPH_INPUT. Fully
* specified dimensions must either be present in the
* Operand or they must be provided in the corresponding
* RequestArgument.
@ -235,8 +286,8 @@ struct Operand {
/**
* Where to find the data for this operand.
* If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
* NO_VALUE:
* If the lifetime is TEMPORARY_VARIABLE, SUBGRAPH_INPUT, SUBGRAPH_OUTPUT,
* or NO_VALUE:
* - All the fields must be 0.
* If the lifetime is CONSTANT_COPY:
* - location.poolIndex is 0.
@ -246,6 +297,11 @@ struct Operand {
* - location.poolIndex is set.
* - location.offset is the offset in bytes into the specified pool.
* - location.length is set.
* If the lifetime is SUBGRAPH:
* - location.poolIndex is 0.
* - location.offset is the index of the referenced subgraph in
* {@link Model::referenced}.
* - location.length is 0.
*/
DataLocation location;
@ -284,32 +340,19 @@ struct Operand {
*/
struct Model {
/**
* All operands included in the model.
* The top-level subgraph.
*/
vec<Operand> operands;
Subgraph main;
/**
* All operations included in the model.
* Referenced subgraphs.
*
* The operations are sorted into execution order. Every operand
* with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
* written before it is read.
*/
vec<Operation> operations;
/**
* Input indexes of the model. There must be at least one.
* Each subgraph is referenced by the main subgraph or at least one other
* referenced subgraph.
*
* Each value corresponds to the index of the operand in "operands".
* There must be no reference cycles.
*/
vec<uint32_t> inputIndexes;
/**
* Output indexes of the model. There must be at least one.
*
* Each value corresponds to the index of the operand in "operands".
*/
vec<uint32_t> outputIndexes;
vec<Subgraph> referenced;
/**
* A byte buffer containing operand data that were copied into the model.
@ -361,6 +404,39 @@ struct Model {
vec<@1.2::Model.ExtensionNameAndPrefix> extensionNameToPrefix;
};
/**
* An excerpt of the execution graph.
*/
struct Subgraph {
/**
* All operands included in the subgraph.
*/
vec<Operand> operands;
/**
* All operations included in the subgraph.
*
* The operations are sorted into execution order. Every operand
* with lifetime SUBGRAPH_OUTPUT or TEMPORARY_VARIABLE must be
* written before it is read.
*/
vec<Operation> operations;
/**
* Input indexes of the subgraph. There must be at least one.
*
* Each value corresponds to the index of the operand in "operands".
*/
vec<uint32_t> inputIndexes;
/**
* Output indexes of the subgraph. There must be at least one.
*
* Each value corresponds to the index of the operand in "operands".
*/
vec<uint32_t> outputIndexes;
};
/**
* A buffer descriptor. Describes the properties of a buffer.
*/

View file

@ -308,7 +308,7 @@ class CompilationCachingTestBase : public testing::Test {
model,
[&fullySupportsModel, &model](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_EQ(supported.size(), model.operations.size());
ASSERT_EQ(supported.size(), model.main.operations.size());
fullySupportsModel = std::all_of(supported.begin(), supported.end(),
[](bool valid) { return valid; });
});

View file

@ -59,7 +59,6 @@ using hidl::memory::V1_0::IMemory;
using implementation::PreparedModelCallback;
using V1_0::DataLocation;
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_0::RequestArgument;
using V1_1::ExecutionPreference;
using V1_2::Constant;
@ -269,10 +268,10 @@ Model createModel(const TestModel& testModel) {
}
}
return {.operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = testModel.inputIndexes,
.outputIndexes = testModel.outputIndexes,
return {.main = {.operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = testModel.inputIndexes,
.outputIndexes = testModel.outputIndexes},
.operandValues = std::move(operandValues),
.pools = std::move(pools),
.relaxComputationFloat32toFloat16 = testModel.isRelaxed};
@ -290,8 +289,8 @@ static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
}
static void makeOutputDimensionsUnspecified(Model* model) {
for (auto i : model->outputIndexes) {
auto& dims = model->operands[i].dimensions;
for (auto i : model->main.outputIndexes) {
auto& dims = model->main.operands[i].dimensions;
std::fill(dims.begin(), dims.end(), 0);
}
}

View file

@ -25,7 +25,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_1::ExecutionPreference;
using V1_2::SymmPerChannelQuantParams;
using HidlToken =
@ -83,22 +82,22 @@ static void validate(const sp<IDevice>& device, const std::string& message, Mode
}
static uint32_t addOperand(Model* model) {
return hidl_vec_push_back(&model->operands,
return hidl_vec_push_back(&model->main.operands,
{
.type = OperandType::INT32,
.dimensions = {},
.numberOfConsumers = 0,
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_INPUT,
.lifetime = OperandLifeTime::SUBGRAPH_INPUT,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
});
}
static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
uint32_t index = addOperand(model);
model->operands[index].numberOfConsumers = 1;
model->operands[index].lifetime = lifetime;
model->main.operands[index].numberOfConsumers = 1;
model->main.operands[index].lifetime = lifetime;
return index;
}
@ -112,13 +111,13 @@ static const uint32_t invalidOperandTypes[] = {
};
static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
for (uint32_t invalidOperandType : invalidOperandTypes) {
const std::string message = "mutateOperandTypeTest: operand " +
std::to_string(operand) + " set to value " +
std::to_string(invalidOperandType);
validate(device, message, model, [operand, invalidOperandType](Model* model) {
model->operands[operand].type = static_cast<OperandType>(invalidOperandType);
model->main.operands[operand].type = static_cast<OperandType>(invalidOperandType);
});
}
}
@ -150,15 +149,15 @@ static uint32_t getInvalidRank(OperandType type) {
}
static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
const uint32_t invalidRank = getInvalidRank(model.main.operands[operand].type);
if (invalidRank == 0) {
continue;
}
const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
" has rank of " + std::to_string(invalidRank);
validate(device, message, model, [operand, invalidRank](Model* model) {
model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
model->main.operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
});
}
}
@ -190,12 +189,12 @@ static float getInvalidScale(OperandType type) {
}
static void mutateOperandScaleTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const float invalidScale = getInvalidScale(model.operands[operand].type);
for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
const float invalidScale = getInvalidScale(model.main.operands[operand].type);
const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
" has scale of " + std::to_string(invalidScale);
validate(device, message, model, [operand, invalidScale](Model* model) {
model->operands[operand].scale = invalidScale;
model->main.operands[operand].scale = invalidScale;
});
}
}
@ -229,15 +228,15 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
}
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
const std::vector<int32_t> invalidZeroPoints =
getInvalidZeroPoints(model.operands[operand].type);
getInvalidZeroPoints(model.main.operands[operand].type);
for (int32_t invalidZeroPoint : invalidZeroPoints) {
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
std::to_string(invalidZeroPoint);
validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
model->operands[operand].zeroPoint = invalidZeroPoint;
model->main.operands[operand].zeroPoint = invalidZeroPoint;
});
}
}
@ -310,11 +309,11 @@ static void mutateOperand(Operand* operand, OperandType type) {
static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, const Model& model) {
// Do not test OEM types
if (type == model.operands[operand].type || type == OperandType::OEM ||
if (type == model.main.operands[operand].type || type == OperandType::OEM ||
type == OperandType::TENSOR_OEM_BYTE) {
return true;
}
for (const Operation& operation : model.operations) {
for (const Operation& operation : model.main.operations) {
// Skip mutateOperationOperandTypeTest for the following operations.
// - LSH_PROJECTION's second argument is allowed to have any type.
// - ARGMIN and ARGMAX's first argument can be any of
@ -401,7 +400,7 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con
}
static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
for (OperandType invalidOperandType : hidl_enum_range<OperandType>{}) {
if (mutateOperationOperandTypeSkip(operand, invalidOperandType, model)) {
continue;
@ -410,7 +409,7 @@ static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Mode
std::to_string(operand) + " set to type " +
toString(invalidOperandType);
validate(device, message, model, [operand, invalidOperandType](Model* model) {
mutateOperand(&model->operands[operand], invalidOperandType);
mutateOperand(&model->main.operands[operand], invalidOperandType);
});
}
}
@ -425,13 +424,13 @@ static const uint32_t invalidOperationTypes[] = {
};
static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
for (uint32_t invalidOperationType : invalidOperationTypes) {
const std::string message = "mutateOperationTypeTest: operation " +
std::to_string(operation) + " set to value " +
std::to_string(invalidOperationType);
validate(device, message, model, [operation, invalidOperationType](Model* model) {
model->operations[operation].type =
model->main.operations[operation].type =
static_cast<OperationType>(invalidOperationType);
});
}
@ -441,14 +440,14 @@ static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& mode
///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const uint32_t invalidOperand = model.operands.size();
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
const uint32_t invalidOperand = model.main.operands.size();
for (size_t input = 0; input < model.main.operations[operation].inputs.size(); ++input) {
const std::string message = "mutateOperationInputOperandIndexTest: operation " +
std::to_string(operation) + " input " +
std::to_string(input);
validate(device, message, model, [operation, input, invalidOperand](Model* model) {
model->operations[operation].inputs[input] = invalidOperand;
model->main.operations[operation].inputs[input] = invalidOperand;
});
}
}
@ -457,14 +456,15 @@ static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, cons
///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const uint32_t invalidOperand = model.operands.size();
for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
const uint32_t invalidOperand = model.main.operands.size();
for (size_t output = 0; output < model.main.operations[operation].outputs.size();
++output) {
const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
std::to_string(operation) + " output " +
std::to_string(output);
validate(device, message, model, [operation, output, invalidOperand](Model* model) {
model->operations[operation].outputs[output] = invalidOperand;
model->main.operations[operation].outputs[output] = invalidOperand;
});
}
}
@ -485,17 +485,17 @@ static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32
}
static void removeOperand(Model* model, uint32_t index) {
hidl_vec_removeAt(&model->operands, index);
for (Operation& operation : model->operations) {
hidl_vec_removeAt(&model->main.operands, index);
for (Operation& operation : model->main.operations) {
removeValueAndDecrementGreaterValues(&operation.inputs, index);
removeValueAndDecrementGreaterValues(&operation.outputs, index);
}
removeValueAndDecrementGreaterValues(&model->inputIndexes, index);
removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
removeValueAndDecrementGreaterValues(&model->main.inputIndexes, index);
removeValueAndDecrementGreaterValues(&model->main.outputIndexes, index);
}
static bool removeOperandSkip(size_t operand, const Model& model) {
for (const Operation& operation : model.operations) {
for (const Operation& operation : model.main.operations) {
// Skip removeOperandTest for the following operations.
// - SPLIT's outputs are not checked during prepareModel.
if (operation.type == OperationType::SPLIT) {
@ -520,7 +520,7 @@ static bool removeOperandSkip(size_t operand, const Model& model) {
}
static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
if (removeOperandSkip(operand, model)) {
continue;
}
@ -533,14 +533,14 @@ static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
///////////////////////// REMOVE OPERATION /////////////////////////
static void removeOperation(Model* model, uint32_t index) {
for (uint32_t operand : model->operations[index].inputs) {
model->operands[operand].numberOfConsumers--;
for (uint32_t operand : model->main.operations[index].inputs) {
model->main.operands[operand].numberOfConsumers--;
}
hidl_vec_removeAt(&model->operations, index);
hidl_vec_removeAt(&model->main.operations, index);
}
static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
const std::string message = "removeOperationTest: operation " + std::to_string(operation);
validate(device, message, model,
[operation](Model* model) { removeOperation(model, operation); });
@ -615,9 +615,9 @@ static bool removeOperationInputSkip(const Operation& op, size_t input) {
}
static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
const Operation& op = model.operations[operation];
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
for (size_t input = 0; input < model.main.operations[operation].inputs.size(); ++input) {
const Operation& op = model.main.operations[operation];
if (removeOperationInputSkip(op, input)) {
continue;
}
@ -625,9 +625,9 @@ static void removeOperationInputTest(const sp<IDevice>& device, const Model& mod
std::to_string(operation) + ", input " +
std::to_string(input);
validate(device, message, model, [operation, input](Model* model) {
uint32_t operand = model->operations[operation].inputs[input];
model->operands[operand].numberOfConsumers--;
hidl_vec_removeAt(&model->operations[operation].inputs, input);
uint32_t operand = model->main.operations[operation].inputs[input];
model->main.operands[operand].numberOfConsumers--;
hidl_vec_removeAt(&model->main.operations[operation].inputs, input);
});
}
}
@ -636,13 +636,14 @@ static void removeOperationInputTest(const sp<IDevice>& device, const Model& mod
///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
static void removeOperationOutputTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
for (size_t output = 0; output < model.main.operations[operation].outputs.size();
++output) {
const std::string message = "removeOperationOutputTest: operation " +
std::to_string(operation) + ", output " +
std::to_string(output);
validate(device, message, model, [operation, output](Model* model) {
hidl_vec_removeAt(&model->operations[operation].outputs, output);
hidl_vec_removeAt(&model->main.operations[operation].outputs, output);
});
}
}
@ -669,15 +670,15 @@ static bool addOperationInputSkip(const Operation& op) {
}
static void addOperationInputTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
if (addOperationInputSkip(model.operations[operation])) {
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
if (addOperationInputSkip(model.main.operations[operation])) {
continue;
}
const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
validate(device, message, model, [operation](Model* model) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
hidl_vec_push_back(&model->operations[operation].inputs, index);
hidl_vec_push_back(&model->inputIndexes, index);
uint32_t index = addOperand(model, OperandLifeTime::SUBGRAPH_INPUT);
hidl_vec_push_back(&model->main.operations[operation].inputs, index);
hidl_vec_push_back(&model->main.inputIndexes, index);
});
}
}
@ -685,13 +686,13 @@ static void addOperationInputTest(const sp<IDevice>& device, const Model& model)
///////////////////////// ADD OPERATION OUTPUT /////////////////////////
static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
const std::string message =
"addOperationOutputTest: operation " + std::to_string(operation);
validate(device, message, model, [operation](Model* model) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
hidl_vec_push_back(&model->operations[operation].outputs, index);
hidl_vec_push_back(&model->outputIndexes, index);
uint32_t index = addOperand(model, OperandLifeTime::SUBGRAPH_OUTPUT);
hidl_vec_push_back(&model->main.operations[operation].outputs, index);
hidl_vec_push_back(&model->main.outputIndexes, index);
});
}
}