Create NeuralNetworks HAL v1.1 for new OperationTypes
Test: mm Change-Id: I08efaba79ec28a2f89e94a84ab88b0fa701b7d98
This commit is contained in:
parent
075d928e0b
commit
5c6ee9ecef
3 changed files with 463 additions and 0 deletions
24
neuralnetworks/1.1/Android.bp
Normal file
24
neuralnetworks/1.1/Android.bp
Normal file
|
@ -0,0 +1,24 @@
|
|||
// This file is autogenerated by hidl-gen -Landroidbp.
|
||||
|
||||
hidl_interface {
|
||||
name: "android.hardware.neuralnetworks@1.1",
|
||||
root: "android.hardware",
|
||||
vndk: {
|
||||
enabled: true,
|
||||
},
|
||||
srcs: [
|
||||
"types.hal",
|
||||
"IDevice.hal",
|
||||
],
|
||||
interfaces: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hidl.base@1.0",
|
||||
],
|
||||
types: [
|
||||
"Model",
|
||||
"Operation",
|
||||
"OperationType",
|
||||
],
|
||||
gen_java: false,
|
||||
}
|
||||
|
106
neuralnetworks/1.1/IDevice.hal
Normal file
106
neuralnetworks/1.1/IDevice.hal
Normal file
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.neuralnetworks@1.1;
|
||||
|
||||
import @1.0::ErrorStatus;
|
||||
import @1.0::IDevice;
|
||||
import @1.0::IPreparedModelCallback;
|
||||
|
||||
/**
|
||||
* This interface represents a device driver.
|
||||
*/
|
||||
interface IDevice extends @1.0::IDevice {
|
||||
/**
|
||||
* Gets the supported operations in a model.
|
||||
*
|
||||
* getSupportedSubgraph indicates which operations of a model are fully
|
||||
* supported by the vendor driver. If an operation may not be supported for
|
||||
* any reason, getSupportedOperations must return false for that operation.
|
||||
*
|
||||
* @param model A model whose operations--and their corresponding
|
||||
* operands--are to be verified by the driver.
|
||||
* @return status Error status of the call, must be:
|
||||
* - NONE if successful
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* - INVALID_ARGUMENT if provided model is invalid
|
||||
* @return supportedOperations A list of supported operations, where true
|
||||
* indicates the operation is supported and
|
||||
* false indicates the operation is not
|
||||
* supported. The index of "supported"
|
||||
* corresponds with the index of the operation
|
||||
* it is describing.
|
||||
*/
|
||||
getSupportedOperations_1_1(Model model)
|
||||
generates (ErrorStatus status, vec<bool> supportedOperations);
|
||||
|
||||
/**
|
||||
* Creates a prepared model for execution.
|
||||
*
|
||||
* prepareModel is used to make any necessary transformations or alternative
|
||||
* representations to a model for execution, possiblly including
|
||||
* transformations on the constant data, optimization on the model's graph,
|
||||
* or compilation into the device's native binary format. The model itself
|
||||
* is not changed.
|
||||
*
|
||||
* The model is prepared asynchronously with respect to the caller. The
|
||||
* prepareModel function must verify the inputs to the prepareModel function
|
||||
* are correct. If there is an error, prepareModel must immediately invoke
|
||||
* the callback with the appropriate ErrorStatus value and nullptr for the
|
||||
* IPreparedModel, then return with the same ErrorStatus. If the inputs to
|
||||
* the prepareModel function are valid and there is no error, prepareModel
|
||||
* must launch an asynchronous task to prepare the model in the background,
|
||||
* and immediately return from prepareModel with ErrorStatus::NONE. If the
|
||||
* asynchronous task fails to launch, prepareModel must immediately invoke
|
||||
* the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the
|
||||
* IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE.
|
||||
*
|
||||
* When the asynchronous task has finished preparing the model, it must
|
||||
* immediately invoke the callback function provided as an input to
|
||||
* prepareModel. If the model was prepared successfully, the callback object
|
||||
* must be invoked with an error status of ErrorStatus::NONE and the
|
||||
* produced IPreparedModel object. If an error occurred preparing the model,
|
||||
* the callback object must be invoked with the appropriate ErrorStatus
|
||||
* value and nullptr for the IPreparedModel.
|
||||
*
|
||||
* The only information that may be unknown to the model at this stage is
|
||||
* the shape of the tensors, which may only be known at execution time. As
|
||||
* such, some driver services may return partially prepared models, where
|
||||
* the prepared model can only be finished when it is paired with a set of
|
||||
* inputs to the model. Note that the same prepared model object can be
|
||||
* used with different shapes of inputs on different (possibly concurrent)
|
||||
* executions.
|
||||
*
|
||||
* Multiple threads can call prepareModel on the same model concurrently.
|
||||
*
|
||||
* @param model The model to be prepared for execution.
|
||||
* @param callback A callback object used to return the error status of
|
||||
* preparing the model for execution and the prepared model
|
||||
* if successful, nullptr otherwise. The callback object's
|
||||
* notify function must be called exactly once, even if the
|
||||
* model could not be prepared.
|
||||
* @return status Error status of launching a task which prepares the model
|
||||
* in the background; must be:
|
||||
* - NONE if preparation task is successfully launched
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* - INVALID_ARGUMENT if one of the input arguments is
|
||||
* invalid
|
||||
*/
|
||||
prepareModel_1_1(Model model, IPreparedModelCallback callback)
|
||||
generates (ErrorStatus status);
|
||||
};
|
333
neuralnetworks/1.1/types.hal
Normal file
333
neuralnetworks/1.1/types.hal
Normal file
|
@ -0,0 +1,333 @@
|
|||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.neuralnetworks@1.1;
|
||||
|
||||
import @1.0::Operand;
|
||||
import @1.0::OperationType;
|
||||
|
||||
/**
|
||||
* Operation types.
|
||||
*
|
||||
* The type of an operation in a model.
|
||||
*/
|
||||
enum OperationType : @1.0::OperationType {
|
||||
/**
|
||||
* BatchToSpace for N-D tensors.
|
||||
*
|
||||
* This operation reshapes the "batch" dimension 0 into M + 1 dimensions of shape
|
||||
* block_shape + [batch], interleaves these blocks back into the grid defined by the
|
||||
* spatial dimensions [1, ..., M], to obtain a result with the same rank as the input.
|
||||
* The spatial dimensions of this intermediate result are then optionally cropped
|
||||
* according to the amount to crop to produce the output.
|
||||
* This is the reverse of SpaceToBatch.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
|
||||
* input tensor. All values must be >= 1.
|
||||
* 2: A 1-D Tensor of type TENSOR_INT32, the amount to crop for each spatial diemension of the
|
||||
* input tensor. All values must be >= 0.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
*/
|
||||
BATCH_TO_SPACE_ND = 29,
|
||||
|
||||
/**
|
||||
* Divides the second tensor from the first tensor, element-wise.
|
||||
*
|
||||
* Takes two input tensors of identical OperandType and compatible dimensions. The output
|
||||
* is the result of dividing the first input tensor by the second, optionally
|
||||
* modified by an activation function.
|
||||
*
|
||||
* Two dimensions are compatible when:
|
||||
* 1. they are equal, or
|
||||
* 2. one of them is 1
|
||||
*
|
||||
* The size of the output is the maximum size along each dimension of the input operands.
|
||||
* It starts with the trailing dimensions, and works its way forward.
|
||||
*
|
||||
* Example:
|
||||
* input1.dimension = {4, 1, 2}
|
||||
* input2.dimension = {5, 4, 3, 1}
|
||||
* output.dimension = {5, 4, 3, 2}
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the first input.
|
||||
* 1: A tensor of the same type, and compatible dimensions as input0.
|
||||
* 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
|
||||
* Specifies the activation to invoke on the result of each addition.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
*/
|
||||
DIV = 30,
|
||||
|
||||
/**
|
||||
* Computes the mean of elements across dimensions of a tensor.
|
||||
*
|
||||
* Reduces input tensor along the dimensions given in axis. Unless keep_dims is true,
|
||||
* the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is
|
||||
* true, the reduced dimensions are retained with length 1.
|
||||
*
|
||||
* If axis has no entries, all dimensions are reduced, and a tensor with a single
|
||||
* element is returned.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: A tensor, specifying the input.
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32. The dimensions to reduce. If None (the default),
|
||||
* reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)).
|
||||
* 2: An INT32 value, keep_dims. If positive, retains reduced dimensions with length 1.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
*/
|
||||
MEAN = 31,
|
||||
|
||||
/**
|
||||
* Pads a tensor.
|
||||
*
|
||||
* This operation pads a tensor according to the specified paddings.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: A 2-D Tensor of type TENSOR_INT32. The paddings, before and after for each spatial dimension
|
||||
* of the input tensor.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
*/
|
||||
PAD = 32,
|
||||
|
||||
/**
|
||||
* SpaceToBatch for N-D tensors.
|
||||
*
|
||||
* This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks
|
||||
* of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that
|
||||
* in the output, the spatial dimensions [1, ..., M] correspond to the position within the grid,
|
||||
* and the batch dimension combines both the position within a spatial block and the original
|
||||
* batch position. Prior to division into blocks, the spatial dimensions of the input are
|
||||
* optionally zero padded according to paddings.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
|
||||
* input tensor. All values must be >= 1.
|
||||
* 2: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial diemension of the
|
||||
* input tensor. All values must be >= 0.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
*/
|
||||
SPACE_TO_BATCH_ND = 33,
|
||||
|
||||
/**
|
||||
* Removes dimensions of size 1 from the shape of a tensor.
|
||||
*
|
||||
* Given a tensor input, this operation returns a tensor of the same type with all
|
||||
* dimensions of size 1 removed. If you don't want to remove all size 1 dimensions,
|
||||
* you can remove specific size 1 dimensions by specifying axis.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: An 1-D Tensor of type TENSOR_INT32. The dimensions to squeeze. If None (the default),
|
||||
* squeezes all dimensions. If specified, only squeezes the dimensions listed. The dimension
|
||||
* index starts at 0. It is an error to squeeze a dimension that is not 1.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0. Contains the same data as input, but has one or more
|
||||
* dimensions of size 1 removed.
|
||||
*/
|
||||
SQUEEZE = 34,
|
||||
|
||||
/**
|
||||
* Extracts a strided slice of a tensor.
|
||||
*
|
||||
* This op extracts a slice of size (end-begin)/stride from the given input tensor.
|
||||
* Starting at the location specified by begin the slice continues by adding
|
||||
* stride to the index until all dimensions are not less than end. Note that a stride can
|
||||
* be negative, which causes a reverse slice.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32, the starts of the dimensions of the input
|
||||
* tensor to be sliced.
|
||||
* 2: A 1-D Tensor of type TENSOR_INT32, the ends of the dimensions of the input
|
||||
* tensor to be sliced.
|
||||
* 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input
|
||||
* tensor to be sliced.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
*/
|
||||
STRIDED_SLICE = 35,
|
||||
|
||||
/**
|
||||
* Subtracts the second tensor from the first tensor, element-wise.
|
||||
*
|
||||
* Takes two input tensors of identical type and compatible dimensions. The output
|
||||
* is the result of subtracting the second input tensor from the first one, optionally
|
||||
* modified by an activation function.
|
||||
*
|
||||
* Two dimensions are compatible when:
|
||||
* 1. they are equal, or
|
||||
* 2. one of them is 1
|
||||
*
|
||||
* The size of the output is the maximum size along each dimension of the input operands.
|
||||
* It starts with the trailing dimensions, and works its way forward.
|
||||
*
|
||||
* Example:
|
||||
* input1.dimension = {4, 1, 2}
|
||||
* input2.dimension = {5, 4, 3, 1}
|
||||
* output.dimension = {5, 4, 3, 2}
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the first input.
|
||||
* 1: A tensor of the same type, and compatible dimensions as input0.
|
||||
* 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
|
||||
* Specifies the activation to invoke on the result of each addition.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
*/
|
||||
SUB = 36,
|
||||
|
||||
/**
|
||||
* Transposes the input tensor, permuting the dimensions according to the perm tensor.
|
||||
*
|
||||
* The returned tensor's dimension i must correspond to the input dimension perm[i].
|
||||
* If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor.
|
||||
* Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the input
|
||||
* tensor.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
*/
|
||||
TRANSPOSE = 37,
|
||||
};
|
||||
|
||||
/**
|
||||
* Describes one operation of the model's graph.
|
||||
*/
|
||||
struct Operation {
|
||||
/**
|
||||
* The operation type.
|
||||
*/
|
||||
OperationType type;
|
||||
|
||||
/**
|
||||
* Describes the table that contains the indexes of the inputs of the
|
||||
* operation. The offset is the index in the operandIndexes table.
|
||||
*/
|
||||
vec<uint32_t> inputs;
|
||||
|
||||
/**
|
||||
* Describes the table that contains the indexes of the outputs of the
|
||||
* operation. The offset is the index in the operandIndexes table.
|
||||
*/
|
||||
vec<uint32_t> outputs;
|
||||
};
|
||||
|
||||
/**
|
||||
* A Neural Network Model.
|
||||
*
|
||||
* This includes not only the execution graph, but also constant data such as
|
||||
* weights or scalars added at construction time. The only information that
|
||||
* may not be known is the shape of the input tensors.
|
||||
*/
|
||||
struct Model {
|
||||
/**
|
||||
* All operands included in the model.
|
||||
*/
|
||||
vec<Operand> operands;
|
||||
|
||||
/**
|
||||
* All operations included in the model.
|
||||
*
|
||||
* The operations are sorted into execution order.
|
||||
*/
|
||||
vec<Operation> operations;
|
||||
|
||||
/**
|
||||
* Input indexes of the model.
|
||||
*
|
||||
* Each value corresponds to the index of the operand in "operands".
|
||||
*/
|
||||
vec<uint32_t> inputIndexes;
|
||||
|
||||
/**
|
||||
* Output indexes of the model.
|
||||
*
|
||||
* Each value corresponds to the index of the operand in "operands".
|
||||
*/
|
||||
vec<uint32_t> outputIndexes;
|
||||
|
||||
/**
|
||||
* A byte buffer containing operand data that were copied into the model.
|
||||
*
|
||||
* An operand's value must be located here if and only if Operand::lifetime
|
||||
* equals OperandLifeTime::CONSTANT_COPY.
|
||||
*/
|
||||
vec<uint8_t> operandValues;
|
||||
|
||||
/**
|
||||
* A collection of shared memory pools containing operand data that were
|
||||
* registered by the model.
|
||||
*
|
||||
* An operand's value must be located here if and only if Operand::lifetime
|
||||
* equals OperandLifeTime::CONSTANT_REFERENCE.
|
||||
*/
|
||||
vec<memory> pools;
|
||||
};
|
Loading…
Reference in a new issue