1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "1.0/Utils.h"
18
19 #include "MemoryUtils.h"
20 #include "TestHarness.h"
21
22 #include <android-base/logging.h>
23 #include <android/hardware/neuralnetworks/1.0/types.h>
24 #include <android/hardware_buffer.h>
25 #include <android/hidl/allocator/1.0/IAllocator.h>
26 #include <android/hidl/memory/1.0/IMemory.h>
27 #include <hidlmemory/mapping.h>
28 #include <vndk/hardware_buffer.h>
29
30 #include <gtest/gtest.h>
31 #include <algorithm>
32 #include <cstring>
33 #include <functional>
34 #include <iostream>
35 #include <map>
36 #include <numeric>
37 #include <vector>
38
39 namespace android::hardware::neuralnetworks {
40
41 using namespace test_helper;
42 using hidl::memory::V1_0::IMemory;
43 using V1_0::DataLocation;
44 using V1_0::Request;
45 using V1_0::RequestArgument;
46
create(uint32_t size)47 std::unique_ptr<TestAshmem> TestAshmem::create(uint32_t size) {
48 auto ashmem = std::make_unique<TestAshmem>(size);
49 return ashmem->mIsValid ? std::move(ashmem) : nullptr;
50 }
51
initialize(uint32_t size)52 void TestAshmem::initialize(uint32_t size) {
53 mIsValid = false;
54 ASSERT_GT(size, 0);
55 mHidlMemory = nn::allocateSharedMemory(size);
56 ASSERT_TRUE(mHidlMemory.valid());
57 mMappedMemory = mapMemory(mHidlMemory);
58 ASSERT_NE(mMappedMemory, nullptr);
59 mPtr = static_cast<uint8_t*>(static_cast<void*>(mMappedMemory->getPointer()));
60 ASSERT_NE(mPtr, nullptr);
61 mIsValid = true;
62 }
63
create(uint32_t size)64 std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) {
65 auto ahwb = std::make_unique<TestBlobAHWB>(size);
66 return ahwb->mIsValid ? std::move(ahwb) : nullptr;
67 }
68
initialize(uint32_t size)69 void TestBlobAHWB::initialize(uint32_t size) {
70 mIsValid = false;
71 ASSERT_GT(size, 0);
72 const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
73 const AHardwareBuffer_Desc desc = {
74 .width = size,
75 .height = 1,
76 .layers = 1,
77 .format = AHARDWAREBUFFER_FORMAT_BLOB,
78 .usage = usage,
79 .stride = size,
80 };
81 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
82 ASSERT_NE(mAhwb, nullptr);
83
84 void* buffer = nullptr;
85 ASSERT_EQ(AHardwareBuffer_lock(mAhwb, usage, -1, nullptr, &buffer), 0);
86 ASSERT_NE(buffer, nullptr);
87 mPtr = static_cast<uint8_t*>(buffer);
88
89 const native_handle_t* handle = AHardwareBuffer_getNativeHandle(mAhwb);
90 ASSERT_NE(handle, nullptr);
91 mHidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width);
92 mIsValid = true;
93 }
94
~TestBlobAHWB()95 TestBlobAHWB::~TestBlobAHWB() {
96 if (mAhwb) {
97 AHardwareBuffer_unlock(mAhwb, nullptr);
98 AHardwareBuffer_release(mAhwb);
99 }
100 }
101
createRequest(const TestModel & testModel,MemoryType memoryType)102 Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
103 CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
104
105 // Model inputs.
106 hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
107 size_t inputSize = 0;
108 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
109 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
110 if (op.data.size() == 0) {
111 // Omitted input.
112 inputs[i] = {.hasNoValue = true};
113 } else {
114 DataLocation loc = {.poolIndex = kInputPoolIndex,
115 .offset = static_cast<uint32_t>(inputSize),
116 .length = static_cast<uint32_t>(op.data.size())};
117 inputSize += op.data.alignedSize();
118 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
119 }
120 }
121
122 // Model outputs.
123 hidl_vec<RequestArgument> outputs(testModel.main.outputIndexes.size());
124 size_t outputSize = 0;
125 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
126 const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
127
128 // In the case of zero-sized output, we should at least provide a one-byte buffer.
129 // This is because zero-sized tensors are only supported internally to the driver, or
130 // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
131 // tensor as model output. Otherwise, we will have two semantic conflicts:
132 // - "Zero dimension" conflicts with "unspecified dimension".
133 // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
134 size_t bufferSize = std::max<size_t>(op.data.size(), 1);
135
136 DataLocation loc = {.poolIndex = kOutputPoolIndex,
137 .offset = static_cast<uint32_t>(outputSize),
138 .length = static_cast<uint32_t>(bufferSize)};
139 outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize();
140 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
141 }
142
143 // Allocate memory pools.
144 if (memoryType == MemoryType::ASHMEM) {
145 mInputMemory = TestAshmem::create(inputSize);
146 mOutputMemory = TestAshmem::create(outputSize);
147 } else {
148 mInputMemory = TestBlobAHWB::create(inputSize);
149 mOutputMemory = TestBlobAHWB::create(outputSize);
150 }
151 EXPECT_NE(mInputMemory, nullptr);
152 EXPECT_NE(mOutputMemory, nullptr);
153 hidl_vec<hidl_memory> pools = {mInputMemory->getHidlMemory(), mOutputMemory->getHidlMemory()};
154
155 // Copy input data to the memory pool.
156 uint8_t* inputPtr = mInputMemory->getPointer();
157 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
158 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
159 if (op.data.size() > 0) {
160 const uint8_t* begin = op.data.get<uint8_t>();
161 const uint8_t* end = begin + op.data.size();
162 std::copy(begin, end, inputPtr + inputs[i].location.offset);
163 }
164 }
165
166 return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
167 }
168
getOutputBuffers(const Request & request) const169 std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& request) const {
170 // Copy out output results.
171 uint8_t* outputPtr = mOutputMemory->getPointer();
172 std::vector<TestBuffer> outputBuffers;
173 for (const auto& output : request.outputs) {
174 outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
175 }
176 return outputBuffers;
177 }
178
sizeOfData(V1_0::OperandType type)179 uint32_t sizeOfData(V1_0::OperandType type) {
180 switch (type) {
181 case V1_0::OperandType::FLOAT32:
182 case V1_0::OperandType::INT32:
183 case V1_0::OperandType::UINT32:
184 case V1_0::OperandType::TENSOR_FLOAT32:
185 case V1_0::OperandType::TENSOR_INT32:
186 return 4;
187 case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
188 return 1;
189 default:
190 CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
191 return 0;
192 }
193 }
194
isTensor(V1_0::OperandType type)195 static bool isTensor(V1_0::OperandType type) {
196 switch (type) {
197 case V1_0::OperandType::FLOAT32:
198 case V1_0::OperandType::INT32:
199 case V1_0::OperandType::UINT32:
200 return false;
201 case V1_0::OperandType::TENSOR_FLOAT32:
202 case V1_0::OperandType::TENSOR_INT32:
203 case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
204 return true;
205 default:
206 CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
207 return false;
208 }
209 }
210
sizeOfData(const V1_0::Operand & operand)211 uint32_t sizeOfData(const V1_0::Operand& operand) {
212 const uint32_t dataSize = sizeOfData(operand.type);
213 if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0;
214 return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize,
215 std::multiplies<>{});
216 }
217
gtestCompliantName(std::string name)218 std::string gtestCompliantName(std::string name) {
219 // gtest test names must only contain alphanumeric characters
220 std::replace_if(
221 name.begin(), name.end(), [](char c) { return !std::isalnum(c); }, '_');
222 return name;
223 }
224
225 } // namespace android::hardware::neuralnetworks
226
227 namespace android::hardware::neuralnetworks::V1_0 {
228
operator <<(::std::ostream & os,ErrorStatus errorStatus)229 ::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
230 return os << toString(errorStatus);
231 }
232
operator <<(::std::ostream & os,DeviceStatus deviceStatus)233 ::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
234 return os << toString(deviceStatus);
235 }
236
237 } // namespace android::hardware::neuralnetworks::V1_0
238