Merge changes from topic 'nnapi_hal_move' into oc-mr1-dev

* changes:
  Initial VTS tests for Neural Networks HAL.
  Move neuralnetworks HAL to hardware/interfaces
This commit is contained in:
Michael Butler 2017-08-01 23:28:18 +00:00 committed by Android (Google) Code Review
commit 9154eae534
8 changed files with 669 additions and 0 deletions

View file

@ -0,0 +1,70 @@
// This file is autogenerated by hidl-gen. Do not edit manually.
filegroup {
name: "android.hardware.neuralnetworks@1.0_hal",
srcs: [
"types.hal",
"IDevice.hal",
"IPreparedModel.hal",
],
}
genrule {
name: "android.hardware.neuralnetworks@1.0_genc++",
tools: ["hidl-gen"],
cmd: "$(location hidl-gen) -o $(genDir) -Lc++-sources -randroid.hardware:hardware/interfaces -randroid.hidl:system/libhidl/transport android.hardware.neuralnetworks@1.0",
srcs: [
":android.hardware.neuralnetworks@1.0_hal",
],
out: [
"android/hardware/neuralnetworks/1.0/types.cpp",
"android/hardware/neuralnetworks/1.0/DeviceAll.cpp",
"android/hardware/neuralnetworks/1.0/PreparedModelAll.cpp",
],
}
genrule {
name: "android.hardware.neuralnetworks@1.0_genc++_headers",
tools: ["hidl-gen"],
cmd: "$(location hidl-gen) -o $(genDir) -Lc++-headers -randroid.hardware:hardware/interfaces -randroid.hidl:system/libhidl/transport android.hardware.neuralnetworks@1.0",
srcs: [
":android.hardware.neuralnetworks@1.0_hal",
],
out: [
"android/hardware/neuralnetworks/1.0/types.h",
"android/hardware/neuralnetworks/1.0/hwtypes.h",
"android/hardware/neuralnetworks/1.0/IDevice.h",
"android/hardware/neuralnetworks/1.0/IHwDevice.h",
"android/hardware/neuralnetworks/1.0/BnHwDevice.h",
"android/hardware/neuralnetworks/1.0/BpHwDevice.h",
"android/hardware/neuralnetworks/1.0/BsDevice.h",
"android/hardware/neuralnetworks/1.0/IPreparedModel.h",
"android/hardware/neuralnetworks/1.0/IHwPreparedModel.h",
"android/hardware/neuralnetworks/1.0/BnHwPreparedModel.h",
"android/hardware/neuralnetworks/1.0/BpHwPreparedModel.h",
"android/hardware/neuralnetworks/1.0/BsPreparedModel.h",
],
}
cc_library_shared {
name: "android.hardware.neuralnetworks@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.neuralnetworks@1.0_genc++"],
generated_headers: ["android.hardware.neuralnetworks@1.0_genc++_headers"],
export_generated_headers: ["android.hardware.neuralnetworks@1.0_genc++_headers"],
vendor_available: true,
shared_libs: [
"libhidlbase",
"libhidltransport",
"libhwbinder",
"liblog",
"libutils",
"libcutils",
],
export_shared_lib_headers: [
"libhidlbase",
"libhidltransport",
"libhwbinder",
"libutils",
],
}

View file

@ -0,0 +1,31 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This HAL is a work in progress */
package android.hardware.neuralnetworks@1.0;
import IPreparedModel;
interface IDevice {
initialize() generates(Capabilities capabilities);
getSupportedSubgraph(Model model) generates(vec<bool> supported);
prepareModel(Model model) generates(IPreparedModel preparedModel);
getStatus() generates(DeviceStatus status);
};

View file

@ -0,0 +1,25 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This HAL is a work in progress */
package android.hardware.neuralnetworks@1.0;
interface IPreparedModel {
// TODO: The execution is synchronous. Change that to have a callback on completion.
// Multiple threads can call this execute function concurrently.
execute(Request request) generates(bool success);
};

View file

@ -0,0 +1,174 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This HAL is a work in progress */
package android.hardware.neuralnetworks@1.0;
// The types an operand can have.
// These values are the same as found in the NeuralNetworks.h file.
// When modifying, be sure to update HAL_NUM_OPERAND_TYPES in HalIntefaces.h.
enum OperandType : uint32_t {
FLOAT16 = 0,
FLOAT32 = 1,
INT8 = 2,
UINT8 = 3,
INT16 = 4,
UINT16 = 5,
INT32 = 6,
UINT32 = 7,
TENSOR_FLOAT16 = 8,
TENSOR_FLOAT32 = 9,
TENSOR_SYMMETRICAL_QUANT8 = 10,
};
// The type of operations. Unlike the operation types found in
// NeuralNetworks.h file, these specify the data type they operate on.
// This is done to simplify the work of drivers.
// TODO: Currently they are the same. Add a conversion when finalizing the model.
// When modifying, be sure to update HAL_NUM_OPERATION_TYPES in HalIntefaces.h.
enum OperationType : uint32_t {
AVERAGE_POOL_FLOAT32 = 0,
CONCATENATION_FLOAT32 = 1,
CONV_FLOAT32 = 2,
DEPTHWISE_CONV_FLOAT32 = 3,
MAX_POOL_FLOAT32 = 4,
L2_POOL_FLOAT32 = 5,
DEPTH_TO_SPACE_FLOAT32 = 6,
SPACE_TO_DEPTH_FLOAT32 = 7,
LOCAL_RESPONSE_NORMALIZATION_FLOAT32 = 8,
SOFTMAX_FLOAT32 = 9,
RESHAPE_FLOAT32 = 10,
SPLIT_FLOAT32 = 11,
FAKE_QUANT_FLOAT32 = 12,
ADD_FLOAT32 = 13,
FULLY_CONNECTED_FLOAT32 = 14,
CAST_FLOAT32 = 15,
MUL_FLOAT32 = 16,
L2_NORMALIZATION_FLOAT32 = 17,
LOGISTIC_FLOAT32 = 18,
RELU_FLOAT32 = 19,
RELU6_FLOAT32 = 20,
RELU1_FLOAT32 = 21,
TANH_FLOAT32 = 22,
DEQUANTIZE_FLOAT32 = 23,
FLOOR_FLOAT32 = 24,
GATHER_FLOAT32 = 25,
RESIZE_BILINEAR_FLOAT32 = 26,
LSH_PROJECTION_FLOAT32 = 27,
LSTM_FLOAT32 = 28,
SVDF_FLOAT32 = 29,
RNN_FLOAT32 = 30,
N_GRAM_FLOAT32 = 31,
LOOKUP_FLOAT32 = 32,
};
// Two special values that can be used instead of a regular poolIndex.
enum LocationValues : uint32_t {
// The location will be specified at runtime. It's either a temporary
// variable, an input, or an output.
LOCATION_AT_RUN_TIME = 0xFFFFFFFF,
// The operand's value is stored in the
// TODO: Only for old
LOCATION_SAME_BLOCK = 0xFFFFFFFE
};
// Status of a device.
enum DeviceStatus : uint32_t {
AVAILABLE,
BUSY,
OFFLINE,
UNKNOWN // Do we need this?
};
// For the reference workload
// Used by a driver to report its performance characteristics.
// TODO revisit the data types and scales.
struct PerformanceInfo {
float execTime; // in nanoseconds
float powerUsage; // in picoJoules
};
// The capabilities of a driver.
struct Capabilities {
vec<OperationType> supportedOperationTypes;
// TODO Do the same for baseline model IDs
bool cachesCompilation;
// TODO revisit the data types and scales.
float bootupTime; // in nanoseconds
PerformanceInfo float16Performance;
PerformanceInfo float32Performance;
PerformanceInfo quantized8Performance;
};
// Describes the location of a data object.
struct DataLocation {
// The index of the memory pool where this location is found.
// Two special values can also be used. See the LOCATION_* constants above.
uint32_t poolIndex;
// Offset in bytes from the start of the pool.
uint32_t offset;
// The length of the data, in bytes.
uint32_t length;
};
struct Operand {
OperandType type;
vec<uint32_t> dimensions;
// The number of operations that uses this operand as input.
// TODO It would be nice to track the actual consumers, e.g. vec<uint32_t> consumers;
uint32_t numberOfConsumers;
float scale;
int32_t zeroPoint;
// Where to find the data for this operand.
DataLocation location;
};
// Describes one operation of the graph.
struct Operation {
// The type of operation.
OperationType type;
// Describes the table that contains the indexes of the inputs of the
// operation. The offset is the index in the operandIndexes table.
vec<uint32_t> inputs;
// Describes the table that contains the indexes of the outputs of the
// operation. The offset is the index in the operandIndexes table.
vec<uint32_t> outputs;
};
struct InputOutputInfo {
DataLocation location;
// If dimensions.size() > 0, we have updated dimensions.
vec<uint32_t> dimensions;
};
struct Model {
vec<Operand> operands;
vec<Operation> operations;
vec<uint32_t> inputIndexes;
vec<uint32_t> outputIndexes;
vec<uint8_t> operandValues;
vec<memory> pools;
};
struct Request {
vec<InputOutputInfo> inputs;
vec<InputOutputInfo> outputs;
vec<memory> pools;
};

View file

@ -0,0 +1,37 @@
//
// Copyright (C) 2017 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
cc_test {
name: "VtsHalNeuralnetworksV1_0TargetTest",
srcs: ["VtsHalNeuralnetworksV1_0TargetTest.cpp"],
defaults: ["hidl_defaults"],
shared_libs: [
"libbase",
"libhidlbase",
"libhidlmemory",
"libhidltransport",
"liblog",
"libutils",
"android.hardware.neuralnetworks@1.0",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
],
static_libs: ["VtsHalHidlTargetTestBase"],
cflags: [
"-O0",
"-g",
],
}

View file

@ -0,0 +1,245 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <string>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {
// A class for test environment setup
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
// This has to return a "new" object because it is freed inside
// ::testing::AddGlobalTestEnvironment when the gtest is being torn down
static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
return instance;
}
void NeuralnetworksHidlEnvironment::registerTestServices() {
registerTestService("android.hardware.neuralnetworks", "1.0", "IDevice");
}
// The main test class for NEURALNETWORK HIDL HAL.
void NeuralnetworksHidlTest::SetUp() {
std::string instance =
NeuralnetworksHidlEnvironment::getInstance()->getServiceName(IDevice::descriptor);
LOG(INFO) << "running vts test with instance: " << instance;
device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(instance);
ASSERT_NE(nullptr, device.get());
}
void NeuralnetworksHidlTest::TearDown() {}
// create device test
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
// status test
TEST_F(NeuralnetworksHidlTest, StatusTest) {
DeviceStatus status = device->getStatus();
EXPECT_EQ(DeviceStatus::AVAILABLE, status);
}
// initialization
TEST_F(NeuralnetworksHidlTest, InitializeTest) {
Return<void> ret = device->initialize([](const Capabilities& capabilities) {
EXPECT_NE(nullptr, capabilities.supportedOperationTypes.data());
EXPECT_NE(0ull, capabilities.supportedOperationTypes.size());
EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
EXPECT_LT(0.0f, capabilities.bootupTime);
EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
EXPECT_LT(0.0f, capabilities.float16Performance.powerUsage);
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
});
EXPECT_TRUE(ret.isOk());
}
namespace {
// create the model
Model createTestModel() {
const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
const uint32_t size = operand2Data.size() * sizeof(float);
const uint32_t operand1 = 0;
const uint32_t operand2 = 1;
const uint32_t operand3 = 2;
const std::vector<Operand> operands = {
{
.type = OperandType::FLOAT32,
.dimensions = {1, 2, 2, 1},
.numberOfConsumers = 1,
.scale = 0.0f,
.zeroPoint = 0,
.location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME),
.offset = 0,
.length = 0},
},
{
.type = OperandType::FLOAT32,
.dimensions = {1, 2, 2, 1},
.numberOfConsumers = 1,
.scale = 0.0f,
.zeroPoint = 0,
.location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_SAME_BLOCK),
.offset = 0,
.length = size},
},
{
.type = OperandType::FLOAT32,
.dimensions = {1, 2, 2, 1},
.numberOfConsumers = 0,
.scale = 0.0f,
.zeroPoint = 0,
.location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME),
.offset = 0,
.length = 0},
},
};
const std::vector<Operation> operations = {{
.type = OperationType::ADD_FLOAT32, .inputs = {operand1, operand2}, .outputs = {operand3},
}};
const std::vector<uint32_t> inputIndexes = {operand1};
const std::vector<uint32_t> outputIndexes = {operand3};
const std::vector<uint8_t> operandValues(reinterpret_cast<const uint8_t*>(operand2Data.data()),
reinterpret_cast<const uint8_t*>(operand2Data.data()) +
operand2Data.size() * sizeof(float));
const std::vector<hidl_memory> pools = {};
return {
.operands = operands,
.operations = operations,
.inputIndexes = inputIndexes,
.outputIndexes = outputIndexes,
.operandValues = operandValues,
.pools = pools,
};
}
// allocator helper
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
hidl_memory memory;
sp<IAllocator> allocator = IAllocator::getService(type);
if (!allocator.get()) {
return {};
}
Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
ASSERT_TRUE(success);
memory = mem;
});
if (!ret.isOk()) {
return {};
}
return memory;
}
} // anonymous namespace
// supported subgraph test
TEST_F(NeuralnetworksHidlTest, SupportedSubgraphTest) {
Model model = createTestModel();
std::vector<bool> supported;
Return<void> ret = device->getSupportedSubgraph(
model, [&](const hidl_vec<bool>& hidl_supported) { supported = hidl_supported; });
ASSERT_TRUE(ret.isOk());
EXPECT_EQ(/*model.operations.size()*/ 0ull, supported.size());
}
// execute simple graph
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) {
std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
// prpeare request
Model model = createTestModel();
sp<IPreparedModel> preparedModel = device->prepareModel(model);
ASSERT_NE(nullptr, preparedModel.get());
// prepare inputs
uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
std::vector<InputOutputInfo> inputs = {{
.location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
}};
std::vector<InputOutputInfo> outputs = {{
.location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
}};
std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
allocateSharedMemory(outputSize)};
ASSERT_NE(0ull, pools[INPUT].size());
ASSERT_NE(0ull, pools[OUTPUT].size());
// load data
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
ASSERT_NE(nullptr, inputMemory.get());
ASSERT_NE(nullptr, outputMemory.get());
float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
ASSERT_NE(nullptr, inputPtr);
ASSERT_NE(nullptr, outputPtr);
std::copy(inputData.begin(), inputData.end(), inputPtr);
std::copy(outputData.begin(), outputData.end(), outputPtr);
inputMemory->commit();
outputMemory->commit();
// execute request
bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools});
EXPECT_TRUE(success);
// validate results { 1+5, 2+6, 3+7, 4+8 }
outputMemory->update();
std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
EXPECT_EQ(expectedData, outputData);
}
} // namespace functional
} // namespace vts
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
int main(int argc, char** argv) {
::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
::testing::InitGoogleTest(&argc, argv);
NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
int status = RUN_ALL_TESTS();
return status;
}

View file

@ -0,0 +1,82 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <VtsHalHidlTargetTestBase.h>
#include <VtsHalHidlTargetTestEnvBase.h>
#include <gtest/gtest.h>
#include <string>
using ::android::hardware::neuralnetworks::V1_0::IDevice;
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
using ::android::hardware::neuralnetworks::V1_0::Model;
using ::android::hardware::neuralnetworks::V1_0::OperationType;
using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hidl::allocator::V1_0::IAllocator;
using ::android::hidl::memory::V1_0::IMemory;
using ::android::sp;
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {
// A class for test environment setup
class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
NeuralnetworksHidlEnvironment();
NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete;
NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete;
NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete;
NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
public:
static NeuralnetworksHidlEnvironment* getInstance();
virtual void registerTestServices() override;
};
// The main test class for NEURALNETWORKS HIDL HAL.
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
public:
virtual void SetUp() override;
virtual void TearDown() override;
sp<IDevice> device;
};
} // namespace functional
} // namespace vts
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H

View file

@ -0,0 +1,5 @@
// This is an autogenerated file, do not edit.
subdirs = [
"1.0",
"1.0/vts/functional",
]