1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Utils"
18
19 #include "Utils.h"
20
21 #include "NeuralNetworks.h"
22 #include "NeuralNetworksOEM.h"
23 #include "OperationResolver.h"
24 #include "ValidateHal.h"
25
26 #include <android-base/logging.h>
27 #include <android-base/properties.h>
28 #include <android-base/strings.h>
29 #include <sys/system_properties.h>
30 #include <algorithm>
31 #include <unordered_map>
32
33 using ::android::hidl::allocator::V1_0::IAllocator;
34
35 namespace android {
36 namespace nn {
37
38 const char kVLogPropKey[] = "debug.nn.vlog";
39 int vLogMask = ~0;
40
41 // Split the space separated list of tags from verbose log setting and build the
42 // logging mask from it. note that '1' and 'all' are special cases to enable all
43 // verbose logging.
44 //
45 // NN API verbose logging setting comes from system property debug.nn.vlog.
46 // Example:
47 // setprop debug.nn.vlog 1 : enable all logging tags.
48 // setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and
49 // COMPILATION tags.
initVLogMask()50 void initVLogMask() {
51 vLogMask = 0;
52 const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, "");
53 if (vLogSetting.empty()) {
54 return;
55 }
56
57 std::unordered_map<std::string, int> vLogFlags = {
58 {"1", -1},
59 {"all", -1},
60 {"model", MODEL},
61 {"compilation", COMPILATION},
62 {"execution", EXECUTION},
63 {"cpuexe", CPUEXE},
64 {"manager", MANAGER},
65 {"driver", DRIVER}};
66
67 std::vector<std::string> elements = android::base::Split(vLogSetting, " ,:");
68 for (const auto& elem : elements) {
69 const auto& flag = vLogFlags.find(elem);
70 if (flag == vLogFlags.end()) {
71 LOG(ERROR) << "Unknown trace flag: " << elem;
72 continue;
73 }
74
75 if (flag->second == -1) {
76 // -1 is used for the special values "1" and "all" that enable all
77 // tracing.
78 vLogMask = ~0;
79 return;
80 } else {
81 vLogMask |= 1 << flag->second;
82 }
83 }
84 }
85
isExtensionOperandType(int32_t type)86 static bool isExtensionOperandType(int32_t type) {
87 return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperandTypeRange::BASE_MAX);
88 }
89
isExtensionOperationType(ANeuralNetworksOperationType type)90 static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
91 return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperationTypeRange::BASE_MAX);
92 }
93
isExtensionOperandType(OperandType type)94 bool isExtensionOperandType(OperandType type) {
95 return isExtensionOperandType(static_cast<int32_t>(type));
96 }
97
isExtensionOperationType(OperationType type)98 bool isExtensionOperationType(OperationType type) {
99 return isExtensionOperationType(static_cast<int32_t>(type));
100 }
101
102 namespace {
103
104 template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
tableLookup(const EntryType (& table)[entryCount],const EntryType (& tableOEM)[entryCountOEM],uint32_t code)105 EntryType tableLookup(const EntryType (&table)[entryCount],
106 const EntryType (&tableOEM)[entryCountOEM],
107 uint32_t code) {
108 if (code < entryCount) {
109 return table[code];
110 } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) {
111 return tableOEM[code - kOEMCodeBase];
112 } else {
113 nnAssert(!"tableLookup: bad code");
114 return EntryType();
115 }
116 }
117
118 class OperationValidationContext : public IOperationValidationContext {
119 DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext);
120
121 public:
OperationValidationContext(uint32_t inputCount,const uint32_t * inputIndexes,uint32_t outputCount,const uint32_t * outputIndexes,const Operand * operands,HalVersion halVersion)122 OperationValidationContext(uint32_t inputCount, const uint32_t* inputIndexes,
123 uint32_t outputCount, const uint32_t* outputIndexes,
124 const Operand* operands, HalVersion halVersion)
125 : inputCount(inputCount),
126 inputIndexes(inputIndexes),
127 outputCount(outputCount),
128 outputIndexes(outputIndexes),
129 operands(operands),
130 halVersion(halVersion) {}
131
132 HalVersion getHalVersion() const override;
133
134 uint32_t getNumInputs() const override;
135 OperandType getInputType(uint32_t index) const override;
136 Shape getInputShape(uint32_t index) const override;
137 const Operand::ExtraParams getInputExtraParams(uint32_t index) const override;
138
139 uint32_t getNumOutputs() const override;
140 OperandType getOutputType(uint32_t index) const override;
141 Shape getOutputShape(uint32_t index) const override;
142
143 private:
144 const Operand* getInputOperand(uint32_t index) const;
145 const Operand* getOutputOperand(uint32_t index) const;
146
147 uint32_t inputCount;
148 const uint32_t* inputIndexes;
149 uint32_t outputCount;
150 const uint32_t* outputIndexes;
151 const Operand* operands;
152 HalVersion halVersion;
153 };
154
getHalVersion() const155 HalVersion OperationValidationContext::getHalVersion() const {
156 return halVersion;
157 }
158
getInputOperand(uint32_t index) const159 const Operand* OperationValidationContext::getInputOperand(uint32_t index) const {
160 CHECK(index < static_cast<uint32_t>(inputCount));
161 return &operands[inputIndexes[index]];
162 }
163
getOutputOperand(uint32_t index) const164 const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const {
165 CHECK(index < static_cast<uint32_t>(outputCount));
166 return &operands[outputIndexes[index]];
167 }
168
getNumInputs() const169 uint32_t OperationValidationContext::getNumInputs() const {
170 return inputCount;
171 }
172
getNumOutputs() const173 uint32_t OperationValidationContext::getNumOutputs() const {
174 return outputCount;
175 }
176
getInputType(uint32_t index) const177 OperandType OperationValidationContext::getInputType(uint32_t index) const {
178 return getInputOperand(index)->type;
179 }
180
getInputShape(uint32_t index) const181 Shape OperationValidationContext::getInputShape(uint32_t index) const {
182 const Operand* operand = getInputOperand(index);
183 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
184 operand->extraParams};
185 }
186
getInputExtraParams(uint32_t index) const187 const Operand::ExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const {
188 return getInputOperand(index)->extraParams;
189 }
190
getOutputType(uint32_t index) const191 OperandType OperationValidationContext::getOutputType(uint32_t index) const {
192 return getOutputOperand(index)->type;
193 }
194
getOutputShape(uint32_t index) const195 Shape OperationValidationContext::getOutputShape(uint32_t index) const {
196 const Operand* operand = getOutputOperand(index);
197 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
198 operand->extraParams};
199 }
200
201 }; // anonymous namespace
202
203 #define COUNT(X) (sizeof(X) / sizeof(X[0]))
204
getOperandTypeName(OperandType type)205 std::string getOperandTypeName(OperandType type) {
206 return toString(type);
207 }
208
getOperationName(uint32_t code)209 static std::string getOperationName(uint32_t code) {
210 return getOperationName(static_cast<OperationType>(code));
211 }
212
getOperationName(OperationType type)213 std::string getOperationName(OperationType type) {
214 return toString(type);
215 }
216
217 const uint32_t kSizeOfDataType[]{
218 4, // ANEURALNETWORKS_FLOAT32
219 4, // ANEURALNETWORKS_INT32
220 4, // ANEURALNETWORKS_UINT32
221 4, // ANEURALNETWORKS_TENSOR_FLOAT32
222 4, // ANEURALNETWORKS_TENSOR_INT32
223 1, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
224 1, // ANEURALNETWORKS_BOOL
225 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
226 2, // ANEURALNETWORKS_TENSOR_FLOAT16
227 1, // ANEURALNETWORKS_TENSOR_BOOL8
228 2, // ANEURALNETWORKS_FLOAT16
229 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
230 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
231 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
232 };
233
234 static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
235
236 const bool kScalarDataType[]{
237 true, // ANEURALNETWORKS_FLOAT32
238 true, // ANEURALNETWORKS_INT32
239 true, // ANEURALNETWORKS_UINT32
240 false, // ANEURALNETWORKS_TENSOR_FLOAT32
241 false, // ANEURALNETWORKS_TENSOR_INT32
242 false, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
243 true, // ANEURALNETWORKS_BOOL
244 false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
245 false, // ANEURALNETWORKS_TENSOR_FLOAT16
246 false, // ANEURALNETWORKS_TENSOR_BOOL8
247 true, // ANEURALNETWORKS_FLOAT16
248 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
249 false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
250 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
251 };
252
253 static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
254
255 const uint32_t kSizeOfDataTypeOEM[]{
256 0, // ANEURALNETWORKS_OEM
257 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE
258 };
259
260 static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM,
261 "kSizeOfDataTypeOEM is incorrect");
262
263 const bool kScalarDataTypeOEM[]{
264 true, // ANEURALNETWORKS_OEM
265 false, // ANEURALNETWORKS_TENSOR_OEM_BYTE
266 };
267
268 static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM,
269 "kScalarDataTypeOEM is incorrect");
270
nonExtensionOperandTypeIsScalar(int type)271 bool nonExtensionOperandTypeIsScalar(int type) {
272 CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported";
273 return tableLookup(kScalarDataType, kScalarDataTypeOEM, type);
274 }
275
nonExtensionOperandSizeOfData(OperandType type,const std::vector<uint32_t> & dimensions)276 uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
277 CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
278 int n = static_cast<int>(type);
279
280 uint32_t size = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
281
282 if (tableLookup(kScalarDataType, kScalarDataTypeOEM, n) == true) {
283 return size;
284 }
285
286 if (dimensions.empty()) {
287 return 0;
288 }
289
290 for (auto d : dimensions) {
291 size *= d;
292 }
293 return size;
294 }
295
tensorHasUnspecifiedDimensions(int type,const uint32_t * dim,uint32_t dimCount)296 bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) {
297 if (!isExtensionOperandType(type)) {
298 CHECK(!nonExtensionOperandTypeIsScalar(type))
299 << "A scalar type can never have unspecified dimensions";
300 }
301 return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount);
302 }
303
tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType * type)304 bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
305 return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
306 }
307
tensorHasUnspecifiedDimensions(const Operand & operand)308 bool tensorHasUnspecifiedDimensions(const Operand& operand) {
309 return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(),
310 operand.dimensions.size());
311 }
312
allocateSharedMemory(int64_t size)313 hidl_memory allocateSharedMemory(int64_t size) {
314 static const std::string type = "ashmem";
315 static sp<IAllocator> allocator = IAllocator::getService(type);
316
317 hidl_memory memory;
318
319 // TODO: should we align memory size to nearest page? doesn't seem necessary...
320 allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
321 if (!success) {
322 LOG(ERROR) << "unable to allocate " << size << " bytes of " << type;
323 } else {
324 memory = mem;
325 }
326 });
327
328 return memory;
329 }
330
alignBytesNeeded(uint32_t index,size_t length)331 uint32_t alignBytesNeeded(uint32_t index, size_t length) {
332 uint32_t pattern;
333 if (length < 2) {
334 pattern = 0; // No alignment necessary
335 } else if (length < 4) {
336 pattern = 1; // Align on 2-byte boundary
337 } else {
338 pattern = 3; // Align on 4-byte boundary
339 }
340 uint32_t extra = (~(index - 1)) & pattern;
341 return extra;
342 }
343
logModelToInfo(const V1_0::Model & model)344 void logModelToInfo(const V1_0::Model& model) {
345 LOG(INFO) << "V1_0::Model start";
346 LOG(INFO) << "operands" << toString(model.operands);
347 LOG(INFO) << "operations" << toString(model.operations);
348 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
349 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
350 LOG(INFO) << "operandValues size" << model.operandValues.size();
351 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
352 }
353
logModelToInfo(const V1_1::Model & model)354 void logModelToInfo(const V1_1::Model& model) {
355 LOG(INFO) << "V1_1::Model start";
356 LOG(INFO) << "operands" << toString(model.operands);
357 LOG(INFO) << "operations" << toString(model.operations);
358 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
359 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
360 LOG(INFO) << "operandValues size" << model.operandValues.size();
361 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
362 }
363
validateOperandSymmPerChannelQuantParams(const Operand & halOperand,const ANeuralNetworksSymmPerChannelQuantParams & channelQuant,const char * tag)364 bool validateOperandSymmPerChannelQuantParams(
365 const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
366 const char* tag) {
367 if (halOperand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
368 return false;
369 }
370
371 NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag;
372 NN_RET_CHECK(channelQuant.scales != nullptr) << tag;
373 NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag;
374 NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u)
375 << tag << " channel dimension " << channelQuant.channelDim << " is underspecified";
376 for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) {
377 NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]";
378 }
379 return true;
380 }
381
validateScalarDimensions(const ANeuralNetworksOperandType & type,const char * tag)382 static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) {
383 NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type";
384 NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type";
385 return true;
386 }
387
validateQuant8AsymmParams(const ANeuralNetworksOperandType & type,const char * tag)388 static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
389 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255)
390 << tag << " invalid zeroPoint: " << type.zeroPoint;
391 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
392 return true;
393 }
394
validateQuant8SymmParams(const ANeuralNetworksOperandType & type,const char * tag)395 static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
396 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint;
397 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
398 return true;
399 }
400
validateQuant16AsymmParams(const ANeuralNetworksOperandType & type,const char * tag)401 static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
402 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535)
403 << tag << " invalid zeroPoint: " << type.zeroPoint;
404 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
405 return true;
406 }
407
validateQuantSymmParams(const ANeuralNetworksOperandType & type,const char * tag)408 static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
409 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
410 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
411 return true;
412 }
413
validateNoQuantParams(const ANeuralNetworksOperandType & type,const char * tag)414 static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) {
415 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
416 NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero";
417 return true;
418 }
419
validateTensorDimensions(const ANeuralNetworksOperandType & type,const char * tag,bool allowPartial)420 static bool validateTensorDimensions(const ANeuralNetworksOperandType& type, const char* tag,
421 bool allowPartial) {
422 if (allowPartial) {
423 return true;
424 }
425 NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions";
426 for (uint32_t i = 0; i < type.dimensionCount; i++) {
427 NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions";
428 }
429 return true;
430 }
431
validateOperandTypeHelper(const ANeuralNetworksOperandType & type,const Extension::OperandTypeInformation * const extensionOperandTypeInfo,const char * tag,bool allowPartial)432 static bool validateOperandTypeHelper(
433 const ANeuralNetworksOperandType& type,
434 const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
435 bool allowPartial) {
436 NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr);
437 if (isExtensionOperandType(type.type)) {
438 NN_RET_CHECK(extensionOperandTypeInfo != nullptr);
439 if (extensionOperandTypeInfo->isTensor) {
440 NN_RET_CHECK(validateTensorDimensions(type, tag, allowPartial));
441 } else {
442 NN_RET_CHECK(validateScalarDimensions(type, tag));
443 }
444 return validateNoQuantParams(type, tag);
445 }
446
447 NN_RET_CHECK(extensionOperandTypeInfo == nullptr);
448 NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type))
449 << tag << " invalid OperandType: " << type.type;
450
451 bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type);
452 if (isScalar) {
453 NN_RET_CHECK(validateScalarDimensions(type, tag));
454 if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types
455 // to use quantization parameters.
456 NN_RET_CHECK(validateNoQuantParams(type, tag));
457 }
458 } else {
459 NN_RET_CHECK(validateTensorDimensions(type, tag, allowPartial));
460 if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
461 NN_RET_CHECK(validateQuant8AsymmParams(type, tag));
462 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) {
463 NN_RET_CHECK(validateQuant8SymmParams(type, tag));
464 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) {
465 NN_RET_CHECK(validateQuant16AsymmParams(type, tag));
466 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) {
467 NN_RET_CHECK(validateQuantSymmParams(type, tag));
468 } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) {
469 // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters.
470 } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) {
471 // Historically, we have allowed OEM types to use quantization parameters.
472 } else {
473 NN_RET_CHECK(validateNoQuantParams(type, tag));
474 }
475 }
476
477 return true;
478 }
479
validateOperandType(const ANeuralNetworksOperandType & type,const Extension::OperandTypeInformation * const extensionOperandTypeInfo,const char * tag,bool allowPartial)480 int validateOperandType(const ANeuralNetworksOperandType& type,
481 const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
482 const char* tag, bool allowPartial) {
483 return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial)
484 ? ANEURALNETWORKS_NO_ERROR
485 : ANEURALNETWORKS_BAD_DATA;
486 }
487
validateOperandList(uint32_t count,const uint32_t * list,uint32_t operandCount,const char * tag)488 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
489 const char* tag) {
490 for (uint32_t i = 0; i < count; i++) {
491 if (list[i] >= operandCount) {
492 LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
493 << ", operandCount " << operandCount;
494 return ANEURALNETWORKS_BAD_DATA;
495 }
496 }
497 return ANEURALNETWORKS_NO_ERROR;
498 }
499
validateOperationOperandTypes(const std::vector<Operand> & operands,uint32_t inOperandCount,const uint32_t * inOperandIndexes,const std::vector<OperandType> & inExpectedTypes,uint32_t outOperandCount,const uint32_t * outOperandIndexes,const std::vector<OperandType> & outExpectedInTypes)500 int validateOperationOperandTypes(const std::vector<Operand>& operands,
501 uint32_t inOperandCount, const uint32_t* inOperandIndexes,
502 const std::vector<OperandType>& inExpectedTypes,
503 uint32_t outOperandCount, const uint32_t* outOperandIndexes,
504 const std::vector<OperandType>& outExpectedInTypes) {
505 if (inOperandCount != static_cast<uint32_t>(inExpectedTypes.size()) ||
506 outOperandCount != static_cast<uint32_t>(outExpectedInTypes.size())) {
507 LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and "
508 << outExpectedInTypes.size() << " outputs,"
509 << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs";
510 return ANEURALNETWORKS_BAD_DATA;
511 }
512 for (uint32_t i = 0; i < inOperandCount; i++) {
513 if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
514 LOG(ERROR) << "Invalid input tensor type "
515 << toString(operands[inOperandIndexes[i]].type)
516 << " for input " << i << ", expected " << toString(inExpectedTypes[i]);
517 return ANEURALNETWORKS_BAD_DATA;
518 }
519 }
520 for (uint32_t i = 0; i < outOperandCount; i++) {
521 if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
522 LOG(ERROR) << "Invalid output tensor type "
523 << toString(operands[outOperandIndexes[i]].type)
524 << " for input " << i << ", expected " << toString(outExpectedInTypes[i]);
525 return ANEURALNETWORKS_BAD_DATA;
526 }
527 }
528
529 return ANEURALNETWORKS_NO_ERROR;
530 }
531
validateHalVersion(ANeuralNetworksOperationType opType,HalVersion halVersion,HalVersion minSupportedHalVersion)532 static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
533 HalVersion minSupportedHalVersion) {
534 if (halVersion < minSupportedHalVersion) {
535 LOG(ERROR) << "The given inputs and outputs for operation " << getOperationName(opType)
536 << " are only supported in " << toString(minSupportedHalVersion)
537 << " and later (validating using " << toString(halVersion) << ")";
538 return ANEURALNETWORKS_BAD_DATA;
539 }
540 return ANEURALNETWORKS_NO_ERROR;
541 }
542
validateOperation(ANeuralNetworksOperationType opType,uint32_t inputCount,const uint32_t * inputIndexes,uint32_t outputCount,const uint32_t * outputIndexes,const std::vector<Operand> & operands,HalVersion halVersion)543 int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
544 const uint32_t* inputIndexes, uint32_t outputCount,
545 const uint32_t* outputIndexes, const std::vector<Operand>& operands,
546 HalVersion halVersion) {
547 NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes,
548 static_cast<uint32_t>(operands.size()),
549 "ANeuralNetworksModel_addOperation inputs"));
550 NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes,
551 static_cast<uint32_t>(operands.size()),
552 "ANeuralNetworksModel_addOperation outputs"));
553
554 if (isExtensionOperationType(opType)) {
555 if (halVersion < HalVersion::V1_2) {
556 LOG(ERROR)
557 << "Extension operations are supported since HAL version 1.2, validating using "
558 << toString(halVersion);
559 return ANEURALNETWORKS_BAD_DATA;
560 }
561 // There is no other validation we can do for an extension operation.
562 return ANEURALNETWORKS_NO_ERROR;
563 }
564
565 auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
566 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
567 << ") or output operands (" << outputCount << ", expected " << expOut
568 << ") for operation " << getOperationName(opType);
569 };
570
571 switch (opType) {
572 case ANEURALNETWORKS_OEM_OPERATION: {
573 return ANEURALNETWORKS_NO_ERROR;
574 }
575 case ANEURALNETWORKS_FLOOR: {
576 if (inputCount != 1 || outputCount != 1) {
577 logInvalidInOutNumber(1, 1);
578 return ANEURALNETWORKS_BAD_DATA;
579 }
580 auto inputType = operands[inputIndexes[0]].type;
581 std::vector<OperandType> inExpectedTypes;
582 std::vector<OperandType> outExpectedTypes;
583 if (inputType == OperandType::TENSOR_FLOAT32) {
584 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
585 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
586 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
587 } else if (inputType == OperandType::TENSOR_FLOAT16) {
588 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
589 inExpectedTypes = {OperandType::TENSOR_FLOAT16};
590 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
591 } else {
592 LOG(ERROR) << "Unsupported input tensor type for operation "
593 << getOperationName(opType);
594 return ANEURALNETWORKS_BAD_DATA;
595 }
596 return validateOperationOperandTypes(operands,
597 inputCount, inputIndexes,
598 inExpectedTypes,
599 outputCount, outputIndexes,
600 outExpectedTypes);
601 }
602 case ANEURALNETWORKS_DEPTHWISE_CONV_2D: {
603 if ((inputCount != 14 && inputCount != 12 && inputCount != 11 && inputCount != 9 &&
604 inputCount != 8) ||
605 outputCount != 1) {
606 LOG(ERROR) << "Invalid number of input operands (" << inputCount
607 << ", expected 14, 12, 11, 9 or 8) or output operands (" << outputCount
608 << ", expected 1) for operation " << getOperationName(opType);
609 return ANEURALNETWORKS_BAD_DATA;
610 }
611 auto inputType = operands[inputIndexes[0]].type;
612 auto filterType = operands[inputIndexes[1]].type;
613 std::vector<OperandType> inExpectedTypes;
614 std::vector<OperandType> outExpectedTypes;
615 if (inputType == OperandType::TENSOR_FLOAT32) {
616 inExpectedTypes = {
617 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
618 OperandType::TENSOR_FLOAT32, OperandType::INT32,
619 OperandType::INT32, OperandType::INT32,
620 OperandType::INT32, OperandType::INT32,
621 };
622 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
623 } else if (inputType == OperandType::TENSOR_FLOAT16) {
624 inExpectedTypes = {
625 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
626 OperandType::TENSOR_FLOAT16, OperandType::INT32,
627 OperandType::INT32, OperandType::INT32,
628 OperandType::INT32, OperandType::INT32,
629 };
630 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
631 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
632 if (filterType != OperandType::TENSOR_QUANT8_ASYMM &&
633 filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
634 LOG(ERROR) << "Unsupported filter tensor type for operation "
635 << getOperationName(opType);
636 return ANEURALNETWORKS_BAD_DATA;
637 }
638
639 inExpectedTypes = {
640 OperandType::TENSOR_QUANT8_ASYMM,
641 filterType,
642 OperandType::TENSOR_INT32,
643 OperandType::INT32,
644 OperandType::INT32,
645 OperandType::INT32,
646 OperandType::INT32,
647 OperandType::INT32,
648 };
649 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
650 } else {
651 LOG(ERROR) << "Unsupported input tensor type for operation "
652 << getOperationName(opType);
653 return ANEURALNETWORKS_BAD_DATA;
654 }
655
656 // NeuralNetworks.h specifies that ANEURALNETWORKS_DEPTHWISE_CONV_2D's output must
657 // meet "outputScale > inputScale * filterScale" for the operand type
658 // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM before API level 29. For other
659 // operand types (e.g., ANEURALNETWORKS_TENSOR_FLOAT32), this constraint
660 // does not apply, so by default the constraint is met.
661 bool meetsQuantizedScaleConstraintBeforeV1_2 = true;
662 if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
663 const float inputScale = operands[inputIndexes[0]].scale;
664 const float filterScale = operands[inputIndexes[1]].scale;
665 const float outputScale = operands[outputIndexes[0]].scale;
666 meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * filterScale);
667 }
668
669 bool withExplicitPadding = false;
670 bool withLayout = false;
671 bool withDilation = false;
672 if (inputCount >= 9) {
673 if (operands[inputIndexes[8]].type == OperandType::INT32 && inputCount >= 11) {
674 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
675 inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
676 explicitScalarTypes.end());
677 withExplicitPadding = true;
678 }
679 int inputOffset = withExplicitPadding ? 3 : 0;
680 if (inputCount >= 9 + inputOffset) {
681 inExpectedTypes.push_back(OperandType::BOOL);
682 withLayout = true;
683 }
684 if (inputCount == 10 + inputOffset) {
685 LOG(ERROR) << "Provided only one dilation factor value, two values are requred "
686 "for operation "
687 << getOperationName(opType);
688 return ANEURALNETWORKS_BAD_DATA;
689 }
690 if (inputCount == 11 + inputOffset) {
691 inExpectedTypes.push_back(OperandType::INT32);
692 inExpectedTypes.push_back(OperandType::INT32);
693 withDilation = true;
694 }
695 }
696
697 if (inputType == OperandType::TENSOR_FLOAT16 ||
698 filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || withLayout ||
699 withDilation || !meetsQuantizedScaleConstraintBeforeV1_2) {
700 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
701 } else {
702 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
703 }
704 return validateOperationOperandTypes(operands,
705 inputCount, inputIndexes,
706 inExpectedTypes,
707 outputCount, outputIndexes,
708 outExpectedTypes);
709 }
710 case ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION: {
711 if ((inputCount != 6 && inputCount != 5) || outputCount != 1) {
712 LOG(ERROR) << "Invalid number of input operands (" << inputCount
713 << ", expected 6 or 5) or output operands (" << outputCount
714 << ", expected 1) for operation " << getOperationName(opType);
715 return ANEURALNETWORKS_BAD_DATA;
716 }
717 auto inputType = operands[inputIndexes[0]].type;
718 std::vector<OperandType> inExpectedTypes;
719 std::vector<OperandType> outExpectedTypes;
720 if (inputType == OperandType::TENSOR_FLOAT32) {
721 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
722 inExpectedTypes = {
723 OperandType::TENSOR_FLOAT32, OperandType::INT32, OperandType::FLOAT32,
724 OperandType::FLOAT32, OperandType::FLOAT32,
725 };
726 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
727 } else if (inputType == OperandType::TENSOR_FLOAT16) {
728 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
729 inExpectedTypes = {
730 OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::FLOAT16,
731 OperandType::FLOAT16, OperandType::FLOAT16,
732 };
733 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
734 } else {
735 LOG(ERROR) << "Unsupported input tensor type for operation "
736 << getOperationName(opType);
737 return ANEURALNETWORKS_BAD_DATA;
738 }
739 if (inputCount == 6) {
740 inExpectedTypes.push_back(OperandType::INT32);
741 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
742 } else if (operands[inputIndexes[0]].dimensions.size() != 4) {
743 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
744 }
745 return validateOperationOperandTypes(operands,
746 inputCount, inputIndexes,
747 inExpectedTypes,
748 outputCount, outputIndexes,
749 outExpectedTypes);
750 }
751 case ANEURALNETWORKS_RESHAPE: {
752 if (inputCount != 2 || outputCount != 1) {
753 logInvalidInOutNumber(2, 1);
754 return ANEURALNETWORKS_BAD_DATA;
755 }
756 auto inputType = operands[inputIndexes[0]].type;
757 std::vector<OperandType> inExpectedTypes;
758 std::vector<OperandType> outExpectedTypes;
759 if (inputType == OperandType::TENSOR_FLOAT32) {
760 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
761 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
762 OperandType::TENSOR_INT32};
763 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
764 } else if (inputType == OperandType::TENSOR_FLOAT16) {
765 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
766 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
767 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
768 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
769 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
770 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
771 OperandType::TENSOR_INT32};
772 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
773 } else {
774 LOG(ERROR) << "Unsupported input tensor type for operation "
775 << getOperationName(opType);
776 return ANEURALNETWORKS_BAD_DATA;
777 }
778 return validateOperationOperandTypes(operands,
779 inputCount, inputIndexes,
780 inExpectedTypes,
781 outputCount, outputIndexes,
782 outExpectedTypes);
783 }
784 case ANEURALNETWORKS_DEPTH_TO_SPACE: {
785 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
786 LOG(ERROR) << "Invalid number of input operands (" << inputCount
787 << ", expected 3 or 2) or output operands (" << outputCount
788 << ", expected 1) for operation " << getOperationName(opType);
789 return ANEURALNETWORKS_BAD_DATA;
790 }
791 auto inputType = operands[inputIndexes[0]].type;
792 std::vector<OperandType> inExpectedTypes;
793 std::vector<OperandType> outExpectedTypes;
794 if (inputType == OperandType::TENSOR_FLOAT32) {
795 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
796 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
797 OperandType::INT32};
798 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
799 } else if (inputType == OperandType::TENSOR_FLOAT16) {
800 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
801 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
802 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
803 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
804 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
805 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
806 OperandType::INT32};
807 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
808 } else {
809 LOG(ERROR) << "Unsupported input tensor type for operation "
810 << getOperationName(opType);
811 return ANEURALNETWORKS_BAD_DATA;
812 }
813 if (inputCount == 3) {
814 inExpectedTypes.push_back(OperandType::BOOL);
815 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
816 } else {
817 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
818 }
819 return validateOperationOperandTypes(operands,
820 inputCount, inputIndexes,
821 inExpectedTypes,
822 outputCount, outputIndexes,
823 outExpectedTypes);
824 }
825 case ANEURALNETWORKS_SPACE_TO_DEPTH: {
826 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
827 LOG(ERROR) << "Invalid number of input operands (" << inputCount
828 << ", expected 3 or 2) or output operands (" << outputCount
829 << ", expected 1) for operation " << getOperationName(opType);
830 return ANEURALNETWORKS_BAD_DATA;
831 }
832 auto inputType = operands[inputIndexes[0]].type;
833 std::vector<OperandType> inExpectedTypes;
834 std::vector<OperandType> outExpectedTypes;
835 if (inputType == OperandType::TENSOR_FLOAT32) {
836 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
837 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
838 OperandType::INT32};
839 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
840 } else if (inputType == OperandType::TENSOR_FLOAT16) {
841 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
842 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
843 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
844 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
845 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
846 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
847 OperandType::INT32};
848 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
849 } else {
850 LOG(ERROR) << "Unsupported input tensor type for operation "
851 << getOperationName(opType);
852 return ANEURALNETWORKS_BAD_DATA;
853 }
854 if (inputCount == 3) {
855 inExpectedTypes.push_back(OperandType::BOOL);
856 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
857 } else {
858 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
859 }
860 return validateOperationOperandTypes(operands,
861 inputCount, inputIndexes,
862 inExpectedTypes,
863 outputCount, outputIndexes,
864 outExpectedTypes);
865 }
866 case ANEURALNETWORKS_EMBEDDING_LOOKUP: {
867 if (inputCount != 2 || outputCount != 1) {
868 logInvalidInOutNumber(2, 1);
869 return ANEURALNETWORKS_BAD_DATA;
870 }
871 auto inputType = operands[inputIndexes[1]].type;
872 if (inputType != OperandType::TENSOR_FLOAT32 &&
873 inputType != OperandType::TENSOR_INT32 &&
874 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
875 LOG(ERROR) << "Unsupported input tensor type for operation "
876 << getOperationName(opType);
877 return ANEURALNETWORKS_BAD_DATA;
878 }
879 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
880 inputType};
881 std::vector<OperandType> outExpectedTypes = {inputType};
882 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
883 return validateOperationOperandTypes(operands,
884 inputCount, inputIndexes,
885 inExpectedTypes,
886 outputCount, outputIndexes,
887 outExpectedTypes);
888 }
889 case ANEURALNETWORKS_HASHTABLE_LOOKUP: {
890 if (inputCount != 3 || outputCount != 2) {
891 logInvalidInOutNumber(3, 2);
892 return ANEURALNETWORKS_BAD_DATA;
893 }
894 auto inputType = operands[inputIndexes[2]].type;
895 if (inputType != OperandType::TENSOR_FLOAT32 &&
896 inputType != OperandType::TENSOR_INT32 &&
897 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
898 LOG(ERROR) << "Unsupported input tensor type for operation "
899 << getOperationName(opType);
900 return ANEURALNETWORKS_BAD_DATA;
901 }
902 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
903 OperandType::TENSOR_INT32,
904 inputType};
905 std::vector<OperandType> outExpectedTypes = {inputType,
906 OperandType::TENSOR_QUANT8_ASYMM};
907 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
908 return validateOperationOperandTypes(operands,
909 inputCount, inputIndexes,
910 inExpectedTypes,
911 outputCount, outputIndexes,
912 outExpectedTypes);
913 }
914 case ANEURALNETWORKS_LSH_PROJECTION: {
915 if (inputCount != 4 || outputCount != 1) {
916 logInvalidInOutNumber(4, 1);
917 return ANEURALNETWORKS_BAD_DATA;
918 }
919 auto inputType = operands[inputIndexes[1]].type;
920 if (inputType != OperandType::TENSOR_FLOAT16 &&
921 inputType != OperandType::TENSOR_FLOAT32 &&
922 inputType != OperandType::TENSOR_INT32 &&
923 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
924 LOG(ERROR) << "Unsupported input tensor type for operation "
925 << getOperationName(opType);
926 return ANEURALNETWORKS_BAD_DATA;
927 }
928 auto hashType = operands[inputIndexes[0]].type;
929 std::vector<OperandType> inExpectedTypes;
930 if (hashType == OperandType::TENSOR_FLOAT16) {
931 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
932 inExpectedTypes = {
933 OperandType::TENSOR_FLOAT16,
934 inputType,
935 OperandType::TENSOR_FLOAT16,
936 OperandType::INT32,
937 };
938 } else if (hashType == OperandType::TENSOR_FLOAT32) {
939 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
940 inExpectedTypes = {
941 OperandType::TENSOR_FLOAT32,
942 inputType,
943 OperandType::TENSOR_FLOAT32,
944 OperandType::INT32,
945 };
946 } else {
947 LOG(ERROR) << "Unsupported hash tensor type for operation "
948 << getOperationName(opType);
949 return ANEURALNETWORKS_BAD_DATA;
950 }
951 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
952 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
953 inExpectedTypes, outputCount, outputIndexes,
954 outExpectedTypes);
955 }
956 case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: {
957 std::vector<OperandType> inExpectedTypes;
958 auto inputType = operands[inputIndexes[0]].type;
959 std::vector<OperandType> outExpectedTypes{inputType, inputType};
960 std::vector<OperandType> outExpectedTypesMerged{inputType};
961 if (inputType != OperandType::TENSOR_FLOAT32 &&
962 inputType != OperandType::TENSOR_FLOAT16) {
963 LOG(ERROR) << "Unsupported input tensor type for operation "
964 << getOperationName(opType);
965 return ANEURALNETWORKS_BAD_DATA;
966 }
967 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
968
969 inExpectedTypes = {};
970 for (int i = 0; i < 48; ++i) {
971 inExpectedTypes.push_back(inputType);
972 }
973 inExpectedTypes.push_back(OperandType::INT32);
974 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
975 ? OperandType::FLOAT32
976 : OperandType::FLOAT16);
977 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
978 ? OperandType::FLOAT32
979 : OperandType::FLOAT16);
980 inExpectedTypes.push_back(OperandType::BOOL);
981 inExpectedTypes.push_back(OperandType::BOOL);
982 for (int i = 0; i < 8; ++i) {
983 inExpectedTypes.push_back(inputType);
984 }
985
986 if (inputCount != 61 || (outputCount != 1 && outputCount != 2)) {
987 LOG(ERROR) << "Invalid number of input operands (" << inputCount
988 << ", expected 61) or output operands (" << outputCount
989 << ", expected 1 or 2) for operation " << getOperationName(opType);
990 return ANEURALNETWORKS_BAD_DATA;
991 }
992 auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes,
993 inExpectedTypes, outputCount, outputIndexes,
994 outExpectedTypes);
995 if (status != ANEURALNETWORKS_NO_ERROR) {
996 status = validateOperationOperandTypes(operands, inputCount, inputIndexes,
997 inExpectedTypes, outputCount, outputIndexes,
998 outExpectedTypesMerged);
999 }
1000 return status;
1001 }
1002 case ANEURALNETWORKS_LSTM: {
1003 std::vector<OperandType> inExpectedTypes;
1004 std::vector<OperandType> outExpectedTypes;
1005 auto inputType = operands[inputIndexes[0]].type;
1006 if (inputType != OperandType::TENSOR_FLOAT32 &&
1007 inputType != OperandType::TENSOR_FLOAT16) {
1008 LOG(ERROR) << "Unsupported input tensor type for operation "
1009 << getOperationName(opType);
1010 return ANEURALNETWORKS_BAD_DATA;
1011 }
1012
1013 inExpectedTypes = {inputType, inputType, inputType, inputType, inputType,
1014 inputType, inputType, inputType, inputType, inputType,
1015 inputType, inputType, inputType, inputType, inputType,
1016 inputType, inputType, inputType, inputType, inputType,
1017 OperandType::INT32};
1018 if (inputType == OperandType::TENSOR_FLOAT32) {
1019 inExpectedTypes.push_back(OperandType::FLOAT32);
1020 inExpectedTypes.push_back(OperandType::FLOAT32);
1021 } else {
1022 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1023 inExpectedTypes.push_back(OperandType::FLOAT16);
1024 inExpectedTypes.push_back(OperandType::FLOAT16);
1025 }
1026
1027 outExpectedTypes = {inputType, inputType, inputType, inputType};
1028 if (inputCount == 23 && outputCount == 4) {
1029 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1030 } else if (inputCount == 27 && outputCount == 4) {
1031 for (int i = 0; i < 4; ++i) {
1032 inExpectedTypes.push_back(inputType);
1033 }
1034 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1035 } else {
1036 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1037 << ", expected 23 or 27) or output operands (" << outputCount
1038 << ", expected 4) for operation " << getOperationName(opType);
1039 return ANEURALNETWORKS_BAD_DATA;
1040 }
1041 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1042 inExpectedTypes, outputCount, outputIndexes,
1043 outExpectedTypes);
1044 }
1045 case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: {
1046 if (inputCount != 15 || outputCount != 2) {
1047 logInvalidInOutNumber(15, 2);
1048 return ANEURALNETWORKS_BAD_DATA;
1049 }
1050 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1051 std::vector<OperandType> inExpectedTypes = {
1052 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1053 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1054 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1055 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1056 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32,
1057 OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
1058 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM,
1059 OperandType::TENSOR_QUANT8_ASYMM};
1060 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM,
1061 OperandType::TENSOR_QUANT8_ASYMM};
1062 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1063 inExpectedTypes, outputCount, outputIndexes,
1064 outExpectedTypes);
1065 }
1066 case ANEURALNETWORKS_RANDOM_MULTINOMIAL: {
1067 if (inputCount != 3 || outputCount != 1) {
1068 logInvalidInOutNumber(3, 1);
1069 return ANEURALNETWORKS_BAD_DATA;
1070 }
1071 OperandType inputType = operands[inputIndexes[0]].type;
1072 std::vector<OperandType> inExpectedTypes;
1073 if (inputType == OperandType::TENSOR_FLOAT32 ||
1074 inputType == OperandType::TENSOR_FLOAT16) {
1075 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1076 inExpectedTypes = {
1077 inputType,
1078 OperandType::INT32,
1079 OperandType::TENSOR_INT32,
1080 };
1081 } else {
1082 LOG(ERROR) << "Unsupported input tensor type for operation "
1083 << getOperationName(opType);
1084 return ANEURALNETWORKS_BAD_DATA;
1085 }
1086 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
1087 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1088 inExpectedTypes, outputCount, outputIndexes,
1089 outExpectedTypes);
1090 }
1091 case ANEURALNETWORKS_RNN: {
1092 if (inputCount != 6 || outputCount != 2) {
1093 logInvalidInOutNumber(6, 2);
1094 return ANEURALNETWORKS_BAD_DATA;
1095 }
1096 OperandType inputType = operands[inputIndexes[0]].type;
1097 std::vector<OperandType> inExpectedTypes;
1098 std::vector<OperandType> outExpectedTypes;
1099 if (inputType == OperandType::TENSOR_FLOAT32) {
1100 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1101 inExpectedTypes = {
1102 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1103 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1104 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1105 };
1106 outExpectedTypes = {
1107 OperandType::TENSOR_FLOAT32,
1108 OperandType::TENSOR_FLOAT32,
1109 };
1110 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1111 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1112 inExpectedTypes = {
1113 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1114 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1115 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1116 };
1117 outExpectedTypes = {
1118 OperandType::TENSOR_FLOAT16,
1119 OperandType::TENSOR_FLOAT16,
1120 };
1121 } else {
1122 LOG(ERROR) << "Unsupported input tensor type for operation "
1123 << getOperationName(opType);
1124 return ANEURALNETWORKS_BAD_DATA;
1125 }
1126 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1127 inExpectedTypes, outputCount, outputIndexes,
1128 outExpectedTypes);
1129 }
1130 case ANEURALNETWORKS_SVDF: {
1131 if (inputCount != 7 || outputCount != 2) {
1132 logInvalidInOutNumber(7, 2);
1133 return ANEURALNETWORKS_BAD_DATA;
1134 }
1135 OperandType inputType = operands[inputIndexes[0]].type;
1136 if (inputType == OperandType::TENSOR_FLOAT32) {
1137 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1138
1139 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1140 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1141 } else {
1142 LOG(ERROR) << "Unsupported input tensor type for operation "
1143 << getOperationName(opType);
1144 return ANEURALNETWORKS_BAD_DATA;
1145 }
1146 std::vector<OperandType> inExpectedTypes = {
1147 inputType, inputType, inputType, inputType,
1148 inputType, OperandType::INT32, OperandType::INT32,
1149 };
1150 std::vector<OperandType> outExpectedTypes = {inputType, inputType};
1151 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1152 inExpectedTypes, outputCount, outputIndexes,
1153 outExpectedTypes);
1154 }
1155 case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
1156 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
1157 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1158 << ", expected 3 or 2) or output operands (" << outputCount
1159 << ", expected 1) for operation " << getOperationName(opType);
1160 return ANEURALNETWORKS_BAD_DATA;
1161 }
1162 auto inputType = operands[inputIndexes[0]].type;
1163 std::vector<OperandType> inExpectedTypes;
1164 std::vector<OperandType> outExpectedTypes;
1165 if (inputType == OperandType::TENSOR_FLOAT32) {
1166 inExpectedTypes = {
1167 OperandType::TENSOR_FLOAT32,
1168 OperandType::TENSOR_INT32,
1169 };
1170 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1171 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1172 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1173 inExpectedTypes = {
1174 OperandType::TENSOR_FLOAT16,
1175 OperandType::TENSOR_INT32,
1176 };
1177 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1178 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1179 inExpectedTypes = {
1180 OperandType::TENSOR_QUANT8_ASYMM,
1181 OperandType::TENSOR_INT32,
1182 };
1183 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1184 } else {
1185 LOG(ERROR) << "Unsupported input tensor type for operation "
1186 << getOperationName(opType);
1187 return ANEURALNETWORKS_BAD_DATA;
1188 }
1189 if (inputCount == 3) {
1190 inExpectedTypes.push_back(OperandType::BOOL);
1191 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1192 } else {
1193 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1194 }
1195 return validateOperationOperandTypes(operands,
1196 inputCount, inputIndexes,
1197 inExpectedTypes,
1198 outputCount, outputIndexes,
1199 outExpectedTypes);
1200 }
1201 case ANEURALNETWORKS_SPACE_TO_BATCH_ND: {
1202 if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
1203 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1204 << ", expected 4 or 3) or output operands (" << outputCount
1205 << ", expected 1) for operation " << getOperationName(opType);
1206 return ANEURALNETWORKS_BAD_DATA;
1207 }
1208 auto inputType = operands[inputIndexes[0]].type;
1209 std::vector<OperandType> inExpectedTypes;
1210 std::vector<OperandType> outExpectedTypes;
1211 if (inputType == OperandType::TENSOR_FLOAT32) {
1212 inExpectedTypes = {
1213 OperandType::TENSOR_FLOAT32,
1214 OperandType::TENSOR_INT32,
1215 OperandType::TENSOR_INT32,
1216 };
1217 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1218 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1219 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1220 inExpectedTypes = {
1221 OperandType::TENSOR_FLOAT16,
1222 OperandType::TENSOR_INT32,
1223 OperandType::TENSOR_INT32,
1224 };
1225 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1226 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1227 if (operands[inputIndexes[0]].zeroPoint != 0) {
1228 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1229 }
1230 inExpectedTypes = {
1231 OperandType::TENSOR_QUANT8_ASYMM,
1232 OperandType::TENSOR_INT32,
1233 OperandType::TENSOR_INT32,
1234 };
1235 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1236 } else {
1237 LOG(ERROR) << "Unsupported input tensor type for operation "
1238 << getOperationName(opType);
1239 return ANEURALNETWORKS_BAD_DATA;
1240 }
1241 if (inputCount == 4) {
1242 inExpectedTypes.push_back(OperandType::BOOL);
1243 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1244 } else {
1245 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1246 }
1247 return validateOperationOperandTypes(operands,
1248 inputCount, inputIndexes,
1249 inExpectedTypes,
1250 outputCount, outputIndexes,
1251 outExpectedTypes);
1252 }
1253 case ANEURALNETWORKS_PAD: {
1254 if (inputCount != 2 || outputCount != 1) {
1255 logInvalidInOutNumber(2, 1);
1256 return ANEURALNETWORKS_BAD_DATA;
1257 }
1258 auto inputType = operands[inputIndexes[0]].type;
1259 std::vector<OperandType> inExpectedTypes;
1260 std::vector<OperandType> outExpectedTypes;
1261 if (inputType == OperandType::TENSOR_FLOAT32) {
1262 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1263 inExpectedTypes = {
1264 OperandType::TENSOR_FLOAT32,
1265 OperandType::TENSOR_INT32,
1266 };
1267 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1268 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1269 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1270 inExpectedTypes = {
1271 OperandType::TENSOR_FLOAT16,
1272 OperandType::TENSOR_INT32,
1273 };
1274 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1275 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1276 if (operands[inputIndexes[0]].zeroPoint == 0) {
1277 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1278 } else {
1279 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1280 }
1281 inExpectedTypes = {
1282 OperandType::TENSOR_QUANT8_ASYMM,
1283 OperandType::TENSOR_INT32,
1284 };
1285 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1286 } else {
1287 LOG(ERROR) << "Unsupported input tensor type for operation "
1288 << getOperationName(opType);
1289 return ANEURALNETWORKS_BAD_DATA;
1290 }
1291 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1292 inExpectedTypes, outputCount, outputIndexes,
1293 outExpectedTypes);
1294 }
1295 case ANEURALNETWORKS_PAD_V2: {
1296 if (inputCount != 3 || outputCount != 1) {
1297 logInvalidInOutNumber(3, 1);
1298 return ANEURALNETWORKS_BAD_DATA;
1299 }
1300 auto inputType = operands[inputIndexes[0]].type;
1301 std::vector<OperandType> inExpectedTypes;
1302 std::vector<OperandType> outExpectedTypes;
1303 if (inputType == OperandType::TENSOR_FLOAT32) {
1304 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1305 inExpectedTypes = {
1306 OperandType::TENSOR_FLOAT32,
1307 OperandType::TENSOR_INT32,
1308 OperandType::FLOAT32,
1309 };
1310 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1311 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1312 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1313 inExpectedTypes = {
1314 OperandType::TENSOR_FLOAT16,
1315 OperandType::TENSOR_INT32,
1316 OperandType::FLOAT16,
1317 };
1318 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1319 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1320 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1321 inExpectedTypes = {
1322 OperandType::TENSOR_QUANT8_ASYMM,
1323 OperandType::TENSOR_INT32,
1324 OperandType::INT32,
1325 }; // TODO(b/116699425): Make it UINT8.
1326 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1327 } else {
1328 LOG(ERROR) << "Unsupported input tensor type for operation "
1329 << getOperationName(opType);
1330 return ANEURALNETWORKS_BAD_DATA;
1331 }
1332 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1333 inExpectedTypes, outputCount, outputIndexes,
1334 outExpectedTypes);
1335 }
1336 case ANEURALNETWORKS_CAST: {
1337 if (inputCount != 1 || outputCount != 1) {
1338 logInvalidInOutNumber(1, 1);
1339 return ANEURALNETWORKS_BAD_DATA;
1340 }
1341 auto inputType = operands[inputIndexes[0]].type;
1342 auto outputType = operands[outputIndexes[0]].type;
1343 std::vector<OperandType> inExpectedTypes;
1344 if (inputType == OperandType::TENSOR_FLOAT16 ||
1345 inputType == OperandType::TENSOR_FLOAT32 ||
1346 inputType == OperandType::TENSOR_INT32 ||
1347 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1348 inExpectedTypes = {inputType};
1349 } else {
1350 LOG(ERROR) << "Unsupported input tensor type for operation "
1351 << getOperationName(opType);
1352 return ANEURALNETWORKS_BAD_DATA;
1353 }
1354 std::vector<OperandType> outExpectedTypes;
1355 if (outputType == OperandType::TENSOR_FLOAT16 ||
1356 outputType == OperandType::TENSOR_FLOAT32 ||
1357 outputType == OperandType::TENSOR_INT32 ||
1358 outputType == OperandType::TENSOR_QUANT8_ASYMM) {
1359 outExpectedTypes = {outputType};
1360 } else {
1361 LOG(ERROR) << "Unsupported output tensor type for operation "
1362 << getOperationName(opType);
1363 return ANEURALNETWORKS_BAD_DATA;
1364 }
1365 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1366 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1367 inExpectedTypes, outputCount, outputIndexes,
1368 outExpectedTypes);
1369 }
1370 case ANEURALNETWORKS_SQUEEZE: {
1371 if (inputCount != 2 || outputCount != 1) {
1372 logInvalidInOutNumber(2, 1);
1373 return ANEURALNETWORKS_BAD_DATA;
1374 }
1375 auto inputType = operands[inputIndexes[0]].type;
1376 std::vector<OperandType> inExpectedTypes;
1377 std::vector<OperandType> outExpectedTypes;
1378 if (inputType == OperandType::TENSOR_FLOAT32) {
1379 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1380 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1381 OperandType::TENSOR_INT32};
1382 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1383 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1384 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1385 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
1386 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1387 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1388 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1389 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1390 OperandType::TENSOR_INT32};
1391 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1392 } else {
1393 LOG(ERROR) << "Unsupported input tensor type for operation "
1394 << getOperationName(opType);
1395 return ANEURALNETWORKS_BAD_DATA;
1396 }
1397 return validateOperationOperandTypes(operands,
1398 inputCount, inputIndexes,
1399 inExpectedTypes,
1400 outputCount, outputIndexes,
1401 outExpectedTypes);
1402 }
1403 case ANEURALNETWORKS_STRIDED_SLICE: {
1404 if (inputCount != 7 || outputCount != 1) {
1405 logInvalidInOutNumber(7, 1);
1406 return ANEURALNETWORKS_BAD_DATA;
1407 }
1408 auto inputType = operands[inputIndexes[0]].type;
1409 std::vector<OperandType> inExpectedTypes;
1410 std::vector<OperandType> outExpectedTypes;
1411 if (inputType == OperandType::TENSOR_FLOAT32) {
1412 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1413 inExpectedTypes = {
1414 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32,
1415 OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
1416 OperandType::INT32, OperandType::INT32,
1417 OperandType::INT32,
1418 };
1419 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1420 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1421 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1422 inExpectedTypes = {
1423 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32,
1424 OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
1425 OperandType::INT32, OperandType::INT32,
1426 OperandType::INT32,
1427 };
1428 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1429 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1430 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1431 inExpectedTypes = {
1432 OperandType::TENSOR_QUANT8_ASYMM,
1433 OperandType::TENSOR_INT32,
1434 OperandType::TENSOR_INT32,
1435 OperandType::TENSOR_INT32,
1436 OperandType::INT32,
1437 OperandType::INT32,
1438 OperandType::INT32,
1439 };
1440 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1441 } else {
1442 LOG(ERROR) << "Unsupported input tensor type for operation "
1443 << getOperationName(opType);
1444 return ANEURALNETWORKS_BAD_DATA;
1445 }
1446 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1447 inExpectedTypes, outputCount, outputIndexes,
1448 outExpectedTypes);
1449 }
1450 case ANEURALNETWORKS_MEAN: {
1451 if (inputCount != 3 || outputCount != 1) {
1452 logInvalidInOutNumber(3, 1);
1453 return ANEURALNETWORKS_BAD_DATA;
1454 }
1455 auto inputType = operands[inputIndexes[0]].type;
1456 std::vector<OperandType> inExpectedTypes;
1457 std::vector<OperandType> outExpectedTypes;
1458 if (inputType == OperandType::TENSOR_FLOAT32) {
1459 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1460 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1461 OperandType::TENSOR_INT32,
1462 OperandType::INT32};
1463 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1464 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1465 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1466 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32,
1467 OperandType::INT32};
1468 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1469 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1470 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1471 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1472 OperandType::TENSOR_INT32,
1473 OperandType::INT32};
1474 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1475 } else {
1476 LOG(ERROR) << "Unsupported input tensor type for operation "
1477 << getOperationName(opType);
1478 return ANEURALNETWORKS_BAD_DATA;
1479 }
1480 return validateOperationOperandTypes(operands,
1481 inputCount, inputIndexes,
1482 inExpectedTypes,
1483 outputCount, outputIndexes,
1484 outExpectedTypes);
1485 }
1486 case ANEURALNETWORKS_ARGMAX:
1487 case ANEURALNETWORKS_ARGMIN: {
1488 if (inputCount != 2 || outputCount != 1) {
1489 logInvalidInOutNumber(2, 1);
1490 return ANEURALNETWORKS_BAD_DATA;
1491 }
1492 auto inputType = operands[inputIndexes[0]].type;
1493 std::vector<OperandType> inExpectedTypes;
1494 std::vector<OperandType> outExpectedTypes;
1495 if (inputType == OperandType::TENSOR_FLOAT16 ||
1496 inputType == OperandType::TENSOR_FLOAT32 ||
1497 inputType == OperandType::TENSOR_INT32 ||
1498 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1499 inExpectedTypes = {inputType, OperandType::INT32};
1500 outExpectedTypes = {OperandType::TENSOR_INT32};
1501 } else {
1502 LOG(ERROR) << "Unsupported input tensor type for operation "
1503 << getOperationName(opType);
1504 return ANEURALNETWORKS_BAD_DATA;
1505 }
1506 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1507 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1508 inExpectedTypes, outputCount, outputIndexes,
1509 outExpectedTypes);
1510 }
1511 case ANEURALNETWORKS_EXPAND_DIMS: {
1512 if (inputCount != 2 || outputCount != 1) {
1513 logInvalidInOutNumber(2, 1);
1514 return ANEURALNETWORKS_BAD_DATA;
1515 }
1516 auto inputType = operands[inputIndexes[0]].type;
1517 std::vector<OperandType> inExpectedTypes;
1518 std::vector<OperandType> outExpectedTypes;
1519 if (inputType == OperandType::TENSOR_FLOAT16 ||
1520 inputType == OperandType::TENSOR_FLOAT32 ||
1521 inputType == OperandType::TENSOR_INT32 ||
1522 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1523 inExpectedTypes = {inputType, OperandType::INT32};
1524 outExpectedTypes = {inputType};
1525 } else {
1526 LOG(ERROR) << "Unsupported input tensor type for operation "
1527 << getOperationName(opType);
1528 return ANEURALNETWORKS_BAD_DATA;
1529 }
1530 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1531 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1532 inExpectedTypes, outputCount, outputIndexes,
1533 outExpectedTypes);
1534 }
1535 case ANEURALNETWORKS_SPLIT: {
1536 if (inputCount != 3) {
1537 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
1538 << getOperationName(opType);
1539 return ANEURALNETWORKS_BAD_DATA;
1540 }
1541 auto inputType = operands[inputIndexes[0]].type;
1542 if (inputType != OperandType::TENSOR_FLOAT16 &&
1543 inputType != OperandType::TENSOR_FLOAT32 &&
1544 inputType != OperandType::TENSOR_INT32 &&
1545 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
1546 LOG(ERROR) << "Unsupported input tensor type for operation "
1547 << getOperationName(opType);
1548 return ANEURALNETWORKS_BAD_DATA;
1549 }
1550 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32,
1551 OperandType::INT32};
1552 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
1553 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1554 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1555 inExpectedTypes, outputCount, outputIndexes,
1556 outExpectedTypes);
1557 }
1558 case ANEURALNETWORKS_MAXIMUM:
1559 case ANEURALNETWORKS_MINIMUM: {
1560 if (inputCount != 2 || outputCount != 1) {
1561 logInvalidInOutNumber(2, 1);
1562 return ANEURALNETWORKS_BAD_DATA;
1563 }
1564 std::vector<OperandType> inExpectedTypes;
1565 std::vector<OperandType> outExpectedTypes;
1566 OperandType inputType = operands[inputIndexes[0]].type;
1567 if (inputType == OperandType::TENSOR_FLOAT16 ||
1568 inputType == OperandType::TENSOR_FLOAT32 ||
1569 inputType == OperandType::TENSOR_INT32 ||
1570 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1571 inExpectedTypes = {inputType, inputType};
1572 outExpectedTypes = {inputType};
1573 } else {
1574 LOG(ERROR) << "Unsupported input tensor type for operation "
1575 << getOperationName(opType);
1576 return ANEURALNETWORKS_BAD_DATA;
1577 }
1578 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1579 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1580 inExpectedTypes, outputCount, outputIndexes,
1581 outExpectedTypes);
1582 }
1583 case ANEURALNETWORKS_GROUPED_CONV_2D: {
1584 if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
1585 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1586 << ", expected 12 or 9) or output operands (" << outputCount
1587 << ", expected 1) for operation " << getOperationName(opType);
1588 return ANEURALNETWORKS_BAD_DATA;
1589 }
1590 auto inputType = operands[inputIndexes[0]].type;
1591 auto filterType = operands[inputIndexes[1]].type;
1592 std::vector<OperandType> inExpectedTypes;
1593 std::vector<OperandType> outExpectedTypes;
1594 if (inputType == OperandType::TENSOR_FLOAT32) {
1595 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1596 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1597 OperandType::INT32, OperandType::INT32,
1598 OperandType::INT32, OperandType::INT32};
1599 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1600 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1601 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1602 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1603 OperandType::INT32, OperandType::INT32,
1604 OperandType::INT32, OperandType::INT32};
1605 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1606 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1607 if (filterType != OperandType::TENSOR_QUANT8_ASYMM &&
1608 filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
1609 LOG(ERROR) << "Unsupported filter tensor type for operation "
1610 << getOperationName(opType);
1611 return ANEURALNETWORKS_BAD_DATA;
1612 }
1613
1614 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
1615 operands[inputIndexes[1]].extraParams.channelQuant().channelDim != 0) {
1616 LOG(ERROR) << "Unsupported filter tensor channel dimension for operation "
1617 << getOperationName(opType);
1618 return ANEURALNETWORKS_BAD_DATA;
1619 }
1620
1621 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1622 filterType,
1623 OperandType::TENSOR_INT32,
1624 OperandType::INT32,
1625 OperandType::INT32,
1626 OperandType::INT32,
1627 OperandType::INT32,
1628 OperandType::INT32};
1629 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1630 } else {
1631 LOG(ERROR) << "Unsupported input tensor type for operation "
1632 << getOperationName(opType);
1633 return ANEURALNETWORKS_BAD_DATA;
1634 }
1635
1636 if (inputCount == 12) {
1637 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
1638 inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
1639 explicitScalarTypes.end());
1640 }
1641 inExpectedTypes.push_back(OperandType::BOOL);
1642 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1643 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1644 inExpectedTypes, outputCount, outputIndexes,
1645 outExpectedTypes);
1646 }
1647 case ANEURALNETWORKS_TILE: {
1648 if (inputCount != 2 || outputCount != 1) {
1649 logInvalidInOutNumber(2, 1);
1650 return ANEURALNETWORKS_BAD_DATA;
1651 }
1652 auto inputType = operands[inputIndexes[0]].type;
1653 std::vector<OperandType> inExpectedTypes;
1654 std::vector<OperandType> outExpectedTypes;
1655 if (inputType == OperandType::TENSOR_FLOAT16 ||
1656 inputType == OperandType::TENSOR_FLOAT32 ||
1657 inputType == OperandType::TENSOR_INT32 ||
1658 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1659 inExpectedTypes = {inputType, OperandType::TENSOR_INT32};
1660 outExpectedTypes = {inputType};
1661 } else {
1662 LOG(ERROR) << "Unsupported input tensor type for operation "
1663 << getOperationName(opType);
1664 return ANEURALNETWORKS_BAD_DATA;
1665 }
1666 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1667 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1668 inExpectedTypes, outputCount, outputIndexes,
1669 outExpectedTypes);
1670 }
1671 case ANEURALNETWORKS_POW: {
1672 if (inputCount != 2 || outputCount != 1) {
1673 logInvalidInOutNumber(2, 1);
1674 return ANEURALNETWORKS_BAD_DATA;
1675 }
1676 auto inputType = operands[inputIndexes[0]].type;
1677 std::vector<OperandType> inExpectedTypes;
1678 std::vector<OperandType> outExpectedTypes;
1679 if (inputType == OperandType::TENSOR_FLOAT16 ||
1680 inputType == OperandType::TENSOR_FLOAT32) {
1681 inExpectedTypes = {inputType, inputType};
1682 outExpectedTypes = {inputType};
1683 } else {
1684 LOG(ERROR) << "Unsupported input tensor type for operation "
1685 << getOperationName(opType);
1686 return ANEURALNETWORKS_BAD_DATA;
1687 }
1688 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1689 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1690 inExpectedTypes, outputCount, outputIndexes,
1691 outExpectedTypes);
1692 }
1693 case ANEURALNETWORKS_TOPK_V2: {
1694 if (inputCount != 2 || outputCount != 2) {
1695 logInvalidInOutNumber(2, 1);
1696 return ANEURALNETWORKS_BAD_DATA;
1697 }
1698 std::vector<OperandType> inExpectedTypes;
1699 std::vector<OperandType> outExpectedTypes;
1700 OperandType inputType = operands[inputIndexes[0]].type;
1701 if (inputType == OperandType::TENSOR_FLOAT16 ||
1702 inputType == OperandType::TENSOR_FLOAT32 ||
1703 inputType == OperandType::TENSOR_INT32 ||
1704 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1705 inExpectedTypes = {inputType, OperandType::INT32};
1706 outExpectedTypes = {inputType, OperandType::TENSOR_INT32};
1707 } else {
1708 LOG(ERROR) << "Unsupported input tensor type for operation "
1709 << getOperationName(opType);
1710 return ANEURALNETWORKS_BAD_DATA;
1711 }
1712 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1713 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1714 inExpectedTypes, outputCount, outputIndexes,
1715 outExpectedTypes);
1716 }
1717 default: {
1718 const OperationRegistration* operationRegistration =
1719 BuiltinOperationResolver::get()->findOperation(
1720 static_cast<OperationType>(opType));
1721 if (operationRegistration == nullptr) {
1722 if (0 <= opType && opType < kNumberOfOperationTypes) {
1723 LOG(ERROR) << getOperationName(opType) << " not registered";
1724 } else {
1725 LOG(ERROR) << "Operation type " << opType << " out of the range [0, "
1726 << kNumberOfOperationTypes << ")";
1727 }
1728 return ANEURALNETWORKS_UNEXPECTED_NULL;
1729 }
1730 if (operationRegistration->validate == nullptr) {
1731 LOG(ERROR) << "Incomplete operation registration: " << getOperationName(opType);
1732 return ANEURALNETWORKS_UNEXPECTED_NULL;
1733 }
1734 OperationValidationContext context(inputCount, inputIndexes, outputCount, outputIndexes,
1735 operands.data(), halVersion);
1736 if (!operationRegistration->validate(&context)) {
1737 LOG(ERROR) << "Validation failed for operation " << getOperationName(opType);
1738 return ANEURALNETWORKS_BAD_DATA;
1739 }
1740 return ANEURALNETWORKS_NO_ERROR;
1741 }
1742 }
1743 }
1744
convertResultCodeToErrorStatus(int resultCode)1745 ErrorStatus convertResultCodeToErrorStatus(int resultCode) {
1746 switch (resultCode) {
1747 case ANEURALNETWORKS_NO_ERROR:
1748 return ErrorStatus::NONE;
1749
1750 case ANEURALNETWORKS_BAD_DATA:
1751 case ANEURALNETWORKS_UNEXPECTED_NULL:
1752 return ErrorStatus::INVALID_ARGUMENT;
1753
1754 case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
1755 return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
1756
1757 case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
1758 return ErrorStatus::DEVICE_UNAVAILABLE;
1759
1760 default:
1761 LOG(ERROR) << "Unknown result code " << resultCode
1762 << " mapped to ErrorStatus::GENERAL_FAILURE";
1763 return ErrorStatus::GENERAL_FAILURE;
1764 case ANEURALNETWORKS_BAD_STATE:
1765 case ANEURALNETWORKS_INCOMPLETE:
1766 case ANEURALNETWORKS_OP_FAILED:
1767 case ANEURALNETWORKS_OUT_OF_MEMORY:
1768 case ANEURALNETWORKS_UNMAPPABLE:
1769 return ErrorStatus::GENERAL_FAILURE;
1770 }
1771 }
1772
convertErrorStatusToResultCode(ErrorStatus status)1773 int convertErrorStatusToResultCode(ErrorStatus status) {
1774 switch (status) {
1775 case ErrorStatus::NONE:
1776 return ANEURALNETWORKS_NO_ERROR;
1777
1778 case ErrorStatus::INVALID_ARGUMENT:
1779 return ANEURALNETWORKS_BAD_DATA;
1780
1781 case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
1782 return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE;
1783
1784 case ErrorStatus::DEVICE_UNAVAILABLE:
1785 return ANEURALNETWORKS_UNAVAILABLE_DEVICE;
1786
1787 default:
1788 LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
1789 << " mapped to ANEURALNETWORKS_OP_FAILED";
1790 return ANEURALNETWORKS_OP_FAILED;
1791 case ErrorStatus::GENERAL_FAILURE:
1792 return ANEURALNETWORKS_OP_FAILED;
1793 }
1794 }
1795
1796 // V1_2::Capabilities::operandPerformance utilities.
1797 // The field V1_2::Capabilities::operandPerformance is a vector sorted by the
1798 // field V1_2::Capabilities::OperandPerformance::type.
1799
nonExtensionOperandPerformance(PerformanceInfo perf)1800 hidl_vec<Capabilities::OperandPerformance> nonExtensionOperandPerformance(PerformanceInfo perf) {
1801 using OpPerf = Capabilities::OperandPerformance;
1802
1803 // Note: range presents enumerators in declaration order, not in numerical order.
1804 static constexpr ::android::hardware::hidl_enum_range<OperandType> kOperandTypeRange;
1805
1806 hidl_vec<OpPerf> ret(kOperandTypeRange.end() - kOperandTypeRange.begin());
1807
1808 std::transform(kOperandTypeRange.begin(), kOperandTypeRange.end(), ret.begin(),
1809 [perf](OperandType type) {
1810 return Capabilities::OperandPerformance{type, perf};
1811 });
1812 std::sort(ret.begin(), ret.end(),
1813 [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; });
1814
1815 return ret;
1816 }
1817
update(hidl_vec<Capabilities::OperandPerformance> * operandPerformance,OperandType type,PerformanceInfo perf)1818 void update(hidl_vec<Capabilities::OperandPerformance>* operandPerformance, OperandType type,
1819 PerformanceInfo perf) {
1820 CHECK(operandPerformance != nullptr);
1821 const auto it = std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
1822 [](const Capabilities::OperandPerformance& perf,
1823 OperandType type) { return perf.type < type; });
1824 CHECK(it != operandPerformance->end())
1825 << toString(type) << " not in " << toString(*operandPerformance);
1826 it->info = perf;
1827 }
1828
lookup(const hidl_vec<Capabilities::OperandPerformance> & operandPerformance,OperandType type)1829 PerformanceInfo lookup(const hidl_vec<Capabilities::OperandPerformance>& operandPerformance,
1830 OperandType type) {
1831 const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
1832 [](const Capabilities::OperandPerformance& perf,
1833 OperandType type) { return perf.type < type; });
1834 if (it == operandPerformance.end()) {
1835 LOG(WARNING) << "No PerformanceInfo for " << toString(type);
1836 return {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
1837 } else {
1838 return it->info;
1839 }
1840 }
1841
1842 // Versioning
1843
1844 // In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
1845 // This array must be in sorted order.
1846 static const OperandType kQuantized8PerformanceConsistentWithP[] = {
1847 OperandType::INT32, OperandType::UINT32, OperandType::TENSOR_INT32, OperandType::OEM,
1848 OperandType::TENSOR_OEM_BYTE};
1849
isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities & capabilities)1850 static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
1851 const PerformanceInfo quantized8Performance =
1852 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM);
1853 return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
1854 std::end(kQuantized8PerformanceConsistentWithP),
1855 [quantized8Performance, &capabilities](OperandType type) {
1856 return quantized8Performance ==
1857 lookup(capabilities.operandPerformance, type);
1858 });
1859 }
1860
makeQuantized8PerformanceConsistentWithP(PerformanceInfo quantized8Performance)1861 static hidl_vec<V1_2::Capabilities::OperandPerformance> makeQuantized8PerformanceConsistentWithP(
1862 PerformanceInfo quantized8Performance) {
1863 hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
1864 sizeof(kQuantized8PerformanceConsistentWithP) /
1865 sizeof(kQuantized8PerformanceConsistentWithP[0]));
1866 std::transform(
1867 std::begin(kQuantized8PerformanceConsistentWithP),
1868 std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
1869 [quantized8Performance](OperandType type) -> V1_2::Capabilities::OperandPerformance {
1870 return {type, quantized8Performance};
1871 });
1872 return ret;
1873 }
1874
compliantWithV1_0(const V1_0::Capabilities &)1875 bool compliantWithV1_0(const V1_0::Capabilities&) {
1876 return true;
1877 }
1878
compliantWithV1_0(const V1_1::Capabilities & capabilities)1879 bool compliantWithV1_0(const V1_1::Capabilities& capabilities) {
1880 return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance;
1881 }
1882
compliantWithV1_0(const V1_2::Capabilities & capabilities)1883 bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
1884 const PerformanceInfo perfTensorFloat32 =
1885 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32);
1886 const PerformanceInfo perfFloat32 =
1887 lookup(capabilities.operandPerformance, OperandType::FLOAT32);
1888 if (perfTensorFloat32 != perfFloat32 ||
1889 perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
1890 perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
1891 return false;
1892 }
1893
1894 return isQuantized8PerformanceConsistentWithP(capabilities);
1895 }
1896
compliantWithV1_1(const V1_0::Capabilities &)1897 bool compliantWithV1_1(const V1_0::Capabilities&) {
1898 return true;
1899 }
1900
compliantWithV1_1(const V1_1::Capabilities &)1901 bool compliantWithV1_1(const V1_1::Capabilities&) {
1902 return true;
1903 }
1904
compliantWithV1_1(const V1_2::Capabilities & capabilities)1905 bool compliantWithV1_1(const V1_2::Capabilities& capabilities) {
1906 if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
1907 capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
1908 (lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32) !=
1909 lookup(capabilities.operandPerformance, OperandType::FLOAT32))) {
1910 return false;
1911 }
1912
1913 return isQuantized8PerformanceConsistentWithP(capabilities);
1914 }
1915
compliantWithV1_2(const V1_0::Capabilities &)1916 bool compliantWithV1_2(const V1_0::Capabilities&) {
1917 return true;
1918 }
1919
compliantWithV1_2(const V1_1::Capabilities &)1920 bool compliantWithV1_2(const V1_1::Capabilities&) {
1921 return true;
1922 }
1923
compliantWithV1_2(const V1_0::Model &)1924 bool compliantWithV1_2(const V1_0::Model&) {
1925 return true;
1926 }
1927
compliantWithV1_0(const V1_1::Model & model)1928 bool compliantWithV1_0(const V1_1::Model& model) {
1929 // In addition to new enumeration values being introduced in V1_1::Model, a
1930 // new flag was introduced to indicate whether or not float32 data can be
1931 // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
1932 // flag is not relevant in whether a V1_1::Model is compliant with a
1933 // V1_0::Model because all 1.0 drivers require strict calculation by default
1934 // in the P NN runtime. Even if fp16 calculations are allowed, they can
1935 // still be computed by a strict fp32 driver.
1936 return std::all_of(
1937 model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) {
1938 int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
1939 op.inputs.size() > 0 ? op.inputs.data() : nullptr,
1940 op.outputs.size(),
1941 op.outputs.size() > 0 ? op.outputs.data() : nullptr,
1942 convertToV1_2(model.operands), HalVersion::V1_0);
1943 return error == ANEURALNETWORKS_NO_ERROR;
1944 });
1945 }
1946
compliantWithV1_1(const V1_0::Model &)1947 bool compliantWithV1_1(const V1_0::Model&) {
1948 return true;
1949 }
1950
compliantWithV1_1(const V1_1::Model &)1951 bool compliantWithV1_1(const V1_1::Model&) {
1952 return true;
1953 }
1954
uncheckedConvertToV1_0(V1_1::OperationType type)1955 static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) {
1956 return static_cast<V1_0::OperationType>(type);
1957 }
1958
convertToV1_1(V1_0::OperationType type)1959 static V1_1::OperationType convertToV1_1(V1_0::OperationType type) {
1960 return static_cast<V1_1::OperationType>(type);
1961 }
1962
convertToV1_0(const V1_0::Capabilities & capabilities)1963 V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) {
1964 return capabilities;
1965 }
1966
convertToV1_0(const V1_1::Capabilities & capabilities)1967 V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) {
1968 if (!compliantWithV1_0(capabilities)) {
1969 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
1970 << " from V1_1::Capabilities to V1_0::Capabilities";
1971 }
1972 return { .float32Performance = capabilities.float32Performance,
1973 .quantized8Performance = capabilities.quantized8Performance };
1974 }
1975
convertToV1_0(const V1_2::Capabilities & capabilities)1976 V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) {
1977 if (!compliantWithV1_0(capabilities)) {
1978 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
1979 << " from V1_2::Capabilities to V1_0::Capabilities";
1980 }
1981 return {.float32Performance =
1982 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
1983 .quantized8Performance =
1984 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM)};
1985 }
1986
convertToV1_1(const V1_0::Capabilities & capabilities)1987 V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
1988 return { .float32Performance = capabilities.float32Performance,
1989 .quantized8Performance = capabilities.quantized8Performance,
1990 .relaxedFloat32toFloat16Performance = capabilities.float32Performance };
1991 }
1992
convertToV1_1(const V1_1::Capabilities & capabilities)1993 V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) {
1994 return capabilities;
1995 }
1996
convertToV1_1(const V1_2::Capabilities & capabilities)1997 V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) {
1998 if (!compliantWithV1_1(capabilities)) {
1999 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2000 << " from V1_2::Capabilities to V1_1::Capabilities";
2001 }
2002 return {.float32Performance =
2003 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
2004 .quantized8Performance =
2005 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM),
2006 .relaxedFloat32toFloat16Performance =
2007 capabilities.relaxedFloat32toFloat16PerformanceTensor};
2008 }
2009
convertToV1_2(const V1_0::Capabilities & capabilities)2010 V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) {
2011 V1_2::Capabilities ret = {
2012 .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance,
2013 .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance,
2014 .operandPerformance =
2015 makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)};
2016 auto& opPerf = ret.operandPerformance;
2017 opPerf.resize(opPerf.size() + 2);
2018 opPerf[opPerf.size() - 2] = {OperandType::TENSOR_FLOAT32, capabilities.float32Performance};
2019 opPerf[opPerf.size() - 1] = {OperandType::FLOAT32, capabilities.float32Performance};
2020 using OperandPerformance = V1_2::Capabilities::OperandPerformance;
2021 std::sort(opPerf.begin(), opPerf.end(),
2022 [](const OperandPerformance& a, const OperandPerformance& b) {
2023 return a.type < b.type;
2024 });
2025 return ret;
2026 }
2027
convertToV1_2(const V1_1::Capabilities & capabilities)2028 V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) {
2029 V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar =
2030 capabilities.relaxedFloat32toFloat16Performance,
2031 .relaxedFloat32toFloat16PerformanceTensor =
2032 capabilities.relaxedFloat32toFloat16Performance,
2033 .operandPerformance = makeQuantized8PerformanceConsistentWithP(
2034 capabilities.quantized8Performance)};
2035 auto& opPerf = ret.operandPerformance;
2036 opPerf.resize(opPerf.size() + 2);
2037 opPerf[opPerf.size() - 2] = {OperandType::TENSOR_FLOAT32, capabilities.float32Performance};
2038 opPerf[opPerf.size() - 1] = {OperandType::FLOAT32, capabilities.float32Performance};
2039 using OperandPerformance = V1_2::Capabilities::OperandPerformance;
2040 std::sort(opPerf.begin(), opPerf.end(),
2041 [](const OperandPerformance& a, const OperandPerformance& b) {
2042 return a.type < b.type;
2043 });
2044 return ret;
2045 }
2046
convertToV1_2(const V1_2::Capabilities & capabilities)2047 V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) {
2048 return capabilities;
2049 }
2050
uncheckedConvertToV1_0(const V1_1::Operation & operation)2051 static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) {
2052 return {.type = uncheckedConvertToV1_0(operation.type),
2053 .inputs = operation.inputs,
2054 .outputs = operation.outputs};
2055 }
2056
convertToV1_1(const V1_0::Operation & operation)2057 static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) {
2058 return {.type = convertToV1_1(operation.type),
2059 .inputs = operation.inputs,
2060 .outputs = operation.outputs};
2061 }
2062
uncheckedConvertToV1_0(const hidl_vec<V1_1::Operation> & operations)2063 static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
2064 const hidl_vec<V1_1::Operation>& operations) {
2065 hidl_vec<V1_0::Operation> result(operations.size());
2066 std::transform(
2067 operations.begin(), operations.end(), result.begin(),
2068 [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); });
2069 return result;
2070 }
2071
convertToV1_1(const hidl_vec<V1_0::Operation> & operations)2072 static hidl_vec<V1_1::Operation> convertToV1_1(const hidl_vec<V1_0::Operation>& operations) {
2073 hidl_vec<V1_1::Operation> result(operations.size());
2074 std::transform(operations.begin(), operations.end(), result.begin(),
2075 [](const V1_0::Operation& operation) { return convertToV1_1(operation); });
2076 return result;
2077 }
2078
compliantWithV1_0(const V1_2::Operand & operand)2079 bool compliantWithV1_0(const V1_2::Operand& operand) {
2080 return validOperandType(static_cast<V1_0::OperandType>(operand.type)) &&
2081 (nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) ||
2082 operand.dimensions.size() != 0);
2083 }
2084
convertToV1_0(const V1_0::Model & model)2085 V1_0::Model convertToV1_0(const V1_0::Model& model) {
2086 return model;
2087 }
2088
convertToV1_0(const V1_1::Model & model)2089 V1_0::Model convertToV1_0(const V1_1::Model& model) {
2090 if (!compliantWithV1_0(model)) {
2091 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2092 << " from V1_1::Model to V1_0::Model";
2093 }
2094 return {.operands = model.operands,
2095 .operations = uncheckedConvertToV1_0(model.operations),
2096 .inputIndexes = model.inputIndexes,
2097 .outputIndexes = model.outputIndexes,
2098 .operandValues = model.operandValues,
2099 .pools = model.pools};
2100 }
2101
convertToV1_1(const V1_0::Model & model)2102 V1_1::Model convertToV1_1(const V1_0::Model& model) {
2103 return {.operands = model.operands,
2104 .operations = convertToV1_1(model.operations),
2105 .inputIndexes = model.inputIndexes,
2106 .outputIndexes = model.outputIndexes,
2107 .operandValues = model.operandValues,
2108 .pools = model.pools,
2109 .relaxComputationFloat32toFloat16 = false};
2110 }
2111
convertToV1_1(const V1_1::Model & model)2112 V1_1::Model convertToV1_1(const V1_1::Model& model) {
2113 return model;
2114 }
2115
logModelToInfo(const V1_2::Model & model)2116 void logModelToInfo(const V1_2::Model& model) {
2117 LOG(INFO) << "V1_2::Model start";
2118 LOG(INFO) << "operands" << toString(model.operands);
2119 LOG(INFO) << "operations" << toString(model.operations);
2120 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
2121 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
2122 LOG(INFO) << "operandValues size" << model.operandValues.size();
2123 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
2124 }
2125
compliantWith(HalVersion version,const V1_2::Model & model,std::set<uint32_t> * noncompliantOperations)2126 static bool compliantWith(HalVersion version, const V1_2::Model& model,
2127 std::set<uint32_t>* noncompliantOperations) {
2128 if (version >= HalVersion::V1_2) return true;
2129
2130 // A boolean vector indicating whether each pool is compliant with the target HAL version.
2131 std::vector<bool> isPoolCompliant(model.pools.size(), false);
2132 std::transform(model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
2133 [version](const hidl_memory& pool) { return validatePool(pool, version); });
2134
2135 // A boolean vector indicating whether each operand is compliant with the target HAL version.
2136 std::vector<bool> isOperandCompliant(model.operands.size(), false);
2137 std::transform(model.operands.begin(), model.operands.end(), isOperandCompliant.begin(),
2138 [&isPoolCompliant](const V1_2::Operand& op) {
2139 // There is no V1_1::Operand -- both V1_0::Model and V1_1::Model use
2140 // V1_0::Operand.
2141 return compliantWithV1_0(op) &&
2142 !(op.lifetime == OperandLifeTime::CONSTANT_REFERENCE &&
2143 !isPoolCompliant[op.location.poolIndex]);
2144 });
2145
2146 auto allOperandsCompliant = [&isOperandCompliant](const hidl_vec<uint32_t>& indices) {
2147 return std::all_of(
2148 indices.begin(), indices.end(),
2149 [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
2150 };
2151
2152 auto localValidateOperation = [&model, version,
2153 &allOperandsCompliant](const V1_2::Operation& op) {
2154 if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
2155 int error = validateOperation(
2156 static_cast<int32_t>(op.type), op.inputs.size(),
2157 op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(),
2158 op.outputs.size() > 0 ? op.outputs.data() : nullptr, model.operands, version);
2159 return error == ANEURALNETWORKS_NO_ERROR;
2160 };
2161
2162 if (noncompliantOperations) {
2163 CHECK(noncompliantOperations->empty());
2164 for (uint32_t idx = 0; idx < model.operations.size(); ++idx) {
2165 if (!localValidateOperation(model.operations[idx])) {
2166 noncompliantOperations->insert(idx);
2167 }
2168 }
2169 return noncompliantOperations->empty();
2170 } else {
2171 return std::all_of(model.operations.begin(), model.operations.end(),
2172 localValidateOperation);
2173 }
2174 }
2175
compliantWithV1_0(const V1_2::Model & model,std::set<uint32_t> * noncompliantOperations)2176 bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
2177 return compliantWith(HalVersion::V1_0, model, noncompliantOperations);
2178 }
2179
compliantWithV1_1(const V1_2::Model & model,std::set<uint32_t> * noncompliantOperations)2180 bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
2181 return compliantWith(HalVersion::V1_1, model, noncompliantOperations);
2182 }
2183
uncheckedConvertToV1_0(V1_2::OperationType type)2184 V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) {
2185 return static_cast<V1_0::OperationType>(type);
2186 }
2187
uncheckedConvertToV1_1(V1_2::OperationType type)2188 V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) {
2189 return static_cast<V1_1::OperationType>(type);
2190 }
2191
convertToV1_2(V1_0::OperationType type)2192 static V1_2::OperationType convertToV1_2(V1_0::OperationType type) {
2193 return static_cast<V1_2::OperationType>(type);
2194 }
2195
convertToV1_2(V1_1::OperationType type)2196 static V1_2::OperationType convertToV1_2(V1_1::OperationType type) {
2197 return static_cast<V1_2::OperationType>(type);
2198 }
2199
uncheckedConvertToV1_0(const V1_2::Operation & operation)2200 static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) {
2201 return {.type = uncheckedConvertToV1_0(operation.type),
2202 .inputs = operation.inputs,
2203 .outputs = operation.outputs};
2204 }
2205
uncheckedConvertToV1_1(const V1_2::Operation & operation)2206 static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) {
2207 return {.type = uncheckedConvertToV1_1(operation.type),
2208 .inputs = operation.inputs,
2209 .outputs = operation.outputs};
2210 }
2211
convertToV1_2(const V1_0::Operation & operation)2212 static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) {
2213 return {.type = convertToV1_2(operation.type),
2214 .inputs = operation.inputs,
2215 .outputs = operation.outputs};
2216 }
2217
convertToV1_2(const V1_1::Operation & operation)2218 static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) {
2219 return {.type = convertToV1_2(operation.type),
2220 .inputs = operation.inputs,
2221 .outputs = operation.outputs};
2222 }
2223
uncheckedConvertToV1_0(const hidl_vec<V1_2::Operation> & operations)2224 static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
2225 const hidl_vec<V1_2::Operation>& operations) {
2226 hidl_vec<V1_0::Operation> result(operations.size());
2227 std::transform(
2228 operations.begin(), operations.end(), result.begin(),
2229 [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); });
2230 return result;
2231 }
2232
uncheckedConvertToV1_1(const hidl_vec<V1_2::Operation> & operations)2233 static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
2234 const hidl_vec<V1_2::Operation>& operations) {
2235 hidl_vec<V1_1::Operation> result(operations.size());
2236 std::transform(
2237 operations.begin(), operations.end(), result.begin(),
2238 [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); });
2239 return result;
2240 }
2241
convertToV1_2(const hidl_vec<V1_0::Operation> & operations)2242 static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_0::Operation>& operations) {
2243 hidl_vec<V1_2::Operation> result(operations.size());
2244 std::transform(operations.begin(), operations.end(), result.begin(),
2245 [](const V1_0::Operation& operation) { return convertToV1_2(operation); });
2246 return result;
2247 }
2248
convertToV1_2(const hidl_vec<V1_1::Operation> & operations)2249 static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_1::Operation>& operations) {
2250 hidl_vec<V1_2::Operation> result(operations.size());
2251 std::transform(operations.begin(), operations.end(), result.begin(),
2252 [](const V1_1::Operation& operation) { return convertToV1_2(operation); });
2253 return result;
2254 }
2255
2256 // We only need to convert from 1.0 and back since there wasn't any changes to
2257 // Operand in 1.1
convertToV1_2(const V1_0::OperandType & operandType)2258 V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) {
2259 return static_cast<V1_2::OperandType>(operandType);
2260 }
2261
compliantWithV1_0(const V1_2::OperandType & operandType)2262 static bool compliantWithV1_0(const V1_2::OperandType& operandType) {
2263 return validOperandType(static_cast<V1_0::OperandType>(operandType));
2264 }
2265
convertToV1_0(const V1_2::OperandType & operandType)2266 V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) {
2267 if (!compliantWithV1_0(operandType)) {
2268 LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
2269 << " from V1_2::Operand to V1_0::Operand";
2270 }
2271 return static_cast<V1_0::OperandType>(operandType);
2272 }
2273
2274 // We only need to convert from 1.0 and back since there wasn't any changes to
2275 // Operand in 1.1
convertToV1_2(const V1_0::Operand & operand)2276 V1_2::Operand convertToV1_2(const V1_0::Operand& operand) {
2277 return {.type = convertToV1_2(operand.type),
2278 .dimensions = operand.dimensions,
2279 .numberOfConsumers = operand.numberOfConsumers,
2280 .scale = operand.scale,
2281 .zeroPoint = operand.zeroPoint,
2282 .lifetime = operand.lifetime,
2283 .location = operand.location};
2284 }
2285
convertToV1_2(const V1_2::Operand & operand)2286 V1_2::Operand convertToV1_2(const V1_2::Operand& operand) {
2287 return operand;
2288 }
2289
convertToV1_0(const V1_2::Operand & operand)2290 V1_0::Operand convertToV1_0(const V1_2::Operand& operand) {
2291 return {.type = convertToV1_0(operand.type),
2292 .dimensions = operand.dimensions,
2293 .numberOfConsumers = operand.numberOfConsumers,
2294 .scale = operand.scale,
2295 .zeroPoint = operand.zeroPoint,
2296 .lifetime = operand.lifetime,
2297 .location = operand.location};
2298 }
2299
2300 // We only need to convert from 1.0 and back since there wasn't any changes to
2301 // Operand in 1.1
convertToV1_2(const hidl_vec<V1_0::Operand> & operands)2302 hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands) {
2303 hidl_vec<V1_2::Operand> result(operands.size());
2304 std::transform(operands.begin(), operands.end(), result.begin(),
2305 [](const V1_0::Operand& operand) { return convertToV1_2(operand); });
2306 return result;
2307 }
2308
convertToV1_2(const hidl_vec<V1_2::Operand> & operands)2309 hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands) {
2310 return operands;
2311 }
2312
convertToV1_0(const hidl_vec<V1_2::Operand> & operands)2313 hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) {
2314 hidl_vec<V1_0::Operand> result(operands.size());
2315 std::transform(operands.begin(), operands.end(), result.begin(),
2316 [](const V1_2::Operand& operand) { return convertToV1_0(operand); });
2317 return result;
2318 }
2319
convertToV1_0(const V1_2::Model & model)2320 V1_0::Model convertToV1_0(const V1_2::Model& model) {
2321 if (!compliantWithV1_0(model)) {
2322 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2323 << " from V1_2::Model to V1_0::Model";
2324 }
2325 return {.operands = convertToV1_0(model.operands),
2326 .operations = uncheckedConvertToV1_0(model.operations),
2327 .inputIndexes = model.inputIndexes,
2328 .outputIndexes = model.outputIndexes,
2329 .operandValues = model.operandValues,
2330 .pools = model.pools};
2331 }
2332
convertToV1_1(const V1_2::Model & model)2333 V1_1::Model convertToV1_1(const V1_2::Model& model) {
2334 if (!compliantWithV1_1(model)) {
2335 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2336 << " from V1_2::Model to V1_1::Model";
2337 }
2338 return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical.
2339 .operations = uncheckedConvertToV1_1(model.operations),
2340 .inputIndexes = model.inputIndexes,
2341 .outputIndexes = model.outputIndexes,
2342 .operandValues = model.operandValues,
2343 .pools = model.pools,
2344 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
2345 }
2346
convertToV1_2(const V1_0::Model & model)2347 V1_2::Model convertToV1_2(const V1_0::Model& model) {
2348 return {.operands = convertToV1_2(model.operands),
2349 .operations = convertToV1_2(model.operations),
2350 .inputIndexes = model.inputIndexes,
2351 .outputIndexes = model.outputIndexes,
2352 .operandValues = model.operandValues,
2353 .pools = model.pools,
2354 .relaxComputationFloat32toFloat16 = false};
2355 }
2356
convertToV1_2(const V1_1::Model & model)2357 V1_2::Model convertToV1_2(const V1_1::Model& model) {
2358 return {.operands = convertToV1_2(model.operands),
2359 .operations = convertToV1_2(model.operations),
2360 .inputIndexes = model.inputIndexes,
2361 .outputIndexes = model.outputIndexes,
2362 .operandValues = model.operandValues,
2363 .pools = model.pools,
2364 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
2365 }
2366
convertToV1_2(const V1_2::Model & model)2367 V1_2::Model convertToV1_2(const V1_2::Model& model) {
2368 return model;
2369 }
2370
2371 #ifdef NN_DEBUGGABLE
getProp(const char * str,uint32_t defaultValue)2372 uint32_t getProp(const char* str, uint32_t defaultValue) {
2373 const std::string propStr = android::base::GetProperty(str, "");
2374 if (propStr.size() > 0) {
2375 return std::stoi(propStr);
2376 } else {
2377 return defaultValue;
2378 }
2379 }
2380 #endif // NN_DEBUGGABLE
2381
2382 } // namespace nn
2383 } // namespace android
2384