1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Utils"
18
19 #include "LegacyUtils.h"
20
21 #include <android-base/logging.h>
22 #include <android-base/properties.h>
23 #include <android-base/strings.h>
24 #include <errno.h>
25 #include <nnapi/TypeUtils.h>
26 #include <poll.h>
27
28 #include <algorithm>
29 #include <functional>
30 #include <limits>
31 #include <numeric>
32 #include <string>
33 #include <tuple>
34 #include <unordered_map>
35 #include <utility>
36 #include <vector>
37
38 #include "ControlFlow.h"
39 #include "NeuralNetworks.h"
40 #include "NeuralNetworksOEM.h"
41 #include "OperationResolver.h"
42
43 namespace android {
44 namespace nn {
45
46 const char kVLogPropKey[] = "debug.nn.vlog";
47 int vLogMask = ~0;
48
49 // Split the space separated list of tags from verbose log setting and build the
50 // logging mask from it. note that '1' and 'all' are special cases to enable all
51 // verbose logging.
52 //
53 // NN API verbose logging setting comes from system property debug.nn.vlog.
54 // Example:
55 // setprop debug.nn.vlog 1 : enable all logging tags.
56 // setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and
57 // COMPILATION tags.
initVLogMask()58 void initVLogMask() {
59 vLogMask = 0;
60 const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, "");
61 if (vLogSetting.empty()) {
62 return;
63 }
64
65 std::unordered_map<std::string, int> vLogFlags = {{"1", -1},
66 {"all", -1},
67 {"model", MODEL},
68 {"compilation", COMPILATION},
69 {"execution", EXECUTION},
70 {"cpuexe", CPUEXE},
71 {"manager", MANAGER},
72 {"driver", DRIVER},
73 {"memory", MEMORY}};
74
75 std::vector<std::string> elements = android::base::Split(vLogSetting, " ,:");
76 for (const auto& elem : elements) {
77 const auto& flag = vLogFlags.find(elem);
78 if (flag == vLogFlags.end()) {
79 LOG(ERROR) << "Unknown trace flag: " << elem;
80 continue;
81 }
82
83 if (flag->second == -1) {
84 // -1 is used for the special values "1" and "all" that enable all
85 // tracing.
86 vLogMask = ~0;
87 return;
88 } else {
89 vLogMask |= 1 << flag->second;
90 }
91 }
92 }
93
makeTimeoutDuration(uint64_t nanoseconds)94 Duration makeTimeoutDuration(uint64_t nanoseconds) {
95 constexpr auto kMaxCount = Duration::max().count();
96 using CommonType = std::common_type_t<Duration::rep, uint64_t>;
97 const auto count = std::min<CommonType>(kMaxCount, nanoseconds);
98 return Duration{static_cast<Duration::rep>(count)};
99 }
100
makeTimeoutDuration(int64_t nanoseconds)101 OptionalDuration makeTimeoutDuration(int64_t nanoseconds) {
102 CHECK_GE(nanoseconds, -1);
103 if (nanoseconds == -1) {
104 return OptionalDuration{};
105 }
106 return makeTimeoutDuration(static_cast<uint64_t>(nanoseconds));
107 }
108
makeDeadline(Duration duration)109 TimePoint makeDeadline(Duration duration) {
110 constexpr auto kMaxTime = TimePoint::max();
111 const auto currentTime = Clock::now();
112
113 // If there would be an overflow, use the max value.
114 if (duration > kMaxTime - currentTime) {
115 return kMaxTime;
116 }
117 return currentTime + duration;
118 }
119
hasDeadlinePassed(const OptionalTimePoint & deadline)120 bool hasDeadlinePassed(const OptionalTimePoint& deadline) {
121 if (!deadline.has_value()) {
122 return false;
123 }
124 return Clock::now() >= *deadline;
125 }
126
isExtensionOperandType(int32_t type)127 static bool isExtensionOperandType(int32_t type) {
128 return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
129 }
130
isExtensionOperationType(ANeuralNetworksOperationType type)131 static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
132 return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
133 }
134
isExtensionOperandType(OperandType type)135 bool isExtensionOperandType(OperandType type) {
136 return isExtensionOperandType(static_cast<int32_t>(type));
137 }
138
isExtensionOperationType(OperationType type)139 bool isExtensionOperationType(OperationType type) {
140 return isExtensionOperationType(static_cast<int32_t>(type));
141 }
142
143 namespace {
144
145 template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
tableLookup(const EntryType (& table)[entryCount],const EntryType (& tableOEM)[entryCountOEM],uint32_t code)146 EntryType tableLookup(const EntryType (&table)[entryCount],
147 const EntryType (&tableOEM)[entryCountOEM], uint32_t code) {
148 if (code < entryCount) {
149 return table[code];
150 } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) {
151 return tableOEM[code - kOEMCodeBase];
152 } else {
153 nnAssert(!"tableLookup: bad code");
154 return EntryType();
155 }
156 }
157
convert(HalVersion halVersion)158 static Version convert(HalVersion halVersion) {
159 switch (halVersion) {
160 case HalVersion::UNKNOWN:
161 break;
162 case HalVersion::V1_0:
163 return Version::ANDROID_OC_MR1;
164 case HalVersion::V1_1:
165 return Version::ANDROID_P;
166 case HalVersion::V1_2:
167 return Version::ANDROID_Q;
168 case HalVersion::V1_3:
169 return Version::ANDROID_R;
170 case HalVersion::AIDL_UNSTABLE:
171 return Version::ANDROID_S;
172 }
173 LOG(FATAL) << "Cannot convert " << halVersion;
174 return {};
175 }
176
177 class OperationValidationContext : public IOperationValidationContext {
178 DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext);
179
180 public:
OperationValidationContext(const char * operationName,uint32_t inputCount,const uint32_t * inputIndexes,uint32_t outputCount,const uint32_t * outputIndexes,const Operand * operands)181 OperationValidationContext(const char* operationName, uint32_t inputCount,
182 const uint32_t* inputIndexes, uint32_t outputCount,
183 const uint32_t* outputIndexes, const Operand* operands)
184 : operationName(operationName),
185 inputCount(inputCount),
186 inputIndexes(inputIndexes),
187 outputCount(outputCount),
188 outputIndexes(outputIndexes),
189 operands(operands) {}
190
191 const char* getOperationName() const override;
192
193 uint32_t getNumInputs() const override;
194 OperandType getInputType(uint32_t index) const override;
195 Shape getInputShape(uint32_t index) const override;
196 const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override;
197
198 uint32_t getNumOutputs() const override;
199 OperandType getOutputType(uint32_t index) const override;
200 Shape getOutputShape(uint32_t index) const override;
201
202 private:
203 const Operand* getInputOperand(uint32_t index) const;
204 const Operand* getOutputOperand(uint32_t index) const;
205
206 const char* operationName;
207 uint32_t inputCount;
208 const uint32_t* inputIndexes;
209 uint32_t outputCount;
210 const uint32_t* outputIndexes;
211 const Operand* operands;
212 };
213
getOperationName() const214 const char* OperationValidationContext::getOperationName() const {
215 return operationName;
216 }
217
getInputOperand(uint32_t index) const218 const Operand* OperationValidationContext::getInputOperand(uint32_t index) const {
219 CHECK(index < static_cast<uint32_t>(inputCount));
220 return &operands[inputIndexes[index]];
221 }
222
getOutputOperand(uint32_t index) const223 const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const {
224 CHECK(index < static_cast<uint32_t>(outputCount));
225 return &operands[outputIndexes[index]];
226 }
227
getNumInputs() const228 uint32_t OperationValidationContext::getNumInputs() const {
229 return inputCount;
230 }
231
getNumOutputs() const232 uint32_t OperationValidationContext::getNumOutputs() const {
233 return outputCount;
234 }
235
getInputType(uint32_t index) const236 OperandType OperationValidationContext::getInputType(uint32_t index) const {
237 return getInputOperand(index)->type;
238 }
239
getInputShape(uint32_t index) const240 Shape OperationValidationContext::getInputShape(uint32_t index) const {
241 const Operand* operand = getInputOperand(index);
242 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
243 operand->extraParams};
244 }
245
getInputExtraParams(uint32_t index) const246 const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const {
247 return getInputOperand(index)->extraParams;
248 }
249
getOutputType(uint32_t index) const250 OperandType OperationValidationContext::getOutputType(uint32_t index) const {
251 return getOutputOperand(index)->type;
252 }
253
getOutputShape(uint32_t index) const254 Shape OperationValidationContext::getOutputShape(uint32_t index) const {
255 const Operand* operand = getOutputOperand(index);
256 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
257 operand->extraParams};
258 }
259
260 }; // anonymous namespace
261
262 #define COUNT(X) (sizeof(X) / sizeof(X[0]))
263
264 const uint32_t kSizeOfDataType[]{
265 4, // ANEURALNETWORKS_FLOAT32
266 4, // ANEURALNETWORKS_INT32
267 4, // ANEURALNETWORKS_UINT32
268 4, // ANEURALNETWORKS_TENSOR_FLOAT32
269 4, // ANEURALNETWORKS_TENSOR_INT32
270 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
271 1, // ANEURALNETWORKS_BOOL
272 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
273 2, // ANEURALNETWORKS_TENSOR_FLOAT16
274 1, // ANEURALNETWORKS_TENSOR_BOOL8
275 2, // ANEURALNETWORKS_FLOAT16
276 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
277 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
278 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
279 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
280 0, // ANEURALNETWORKS_MODEL
281 };
282
283 static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
284
285 const bool kScalarDataType[]{
286 true, // ANEURALNETWORKS_FLOAT32
287 true, // ANEURALNETWORKS_INT32
288 true, // ANEURALNETWORKS_UINT32
289 false, // ANEURALNETWORKS_TENSOR_FLOAT32
290 false, // ANEURALNETWORKS_TENSOR_INT32
291 false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
292 true, // ANEURALNETWORKS_BOOL
293 false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
294 false, // ANEURALNETWORKS_TENSOR_FLOAT16
295 false, // ANEURALNETWORKS_TENSOR_BOOL8
296 true, // ANEURALNETWORKS_FLOAT16
297 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
298 false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
299 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
300 false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
301 true, // ANEURALNETWORKS_MODEL
302 };
303
304 static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
305
306 const uint32_t kSizeOfDataTypeOEM[]{
307 0, // ANEURALNETWORKS_OEM
308 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE
309 };
310
311 static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM,
312 "kSizeOfDataTypeOEM is incorrect");
313
314 const bool kScalarDataTypeOEM[]{
315 true, // ANEURALNETWORKS_OEM
316 false, // ANEURALNETWORKS_TENSOR_OEM_BYTE
317 };
318
319 static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM,
320 "kScalarDataTypeOEM is incorrect");
321
nonExtensionOperandTypeIsScalar(int type)322 bool nonExtensionOperandTypeIsScalar(int type) {
323 CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported";
324 return tableLookup(kScalarDataType, kScalarDataTypeOEM, type);
325 }
326
nonExtensionOperandSizeOfData(OperandType type,const std::vector<uint32_t> & dimensions)327 uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
328 const size_t size = getNonExtensionSize(type, dimensions).value();
329 CHECK_LE(size, std::numeric_limits<uint32_t>::max());
330 return size;
331 }
332
333 // Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t.
sizeOfTensorDataHelper(uint32_t sizeOfElement,const std::vector<uint32_t> & dimensions)334 static std::pair<bool, uint32_t> sizeOfTensorDataHelper(uint32_t sizeOfElement,
335 const std::vector<uint32_t>& dimensions) {
336 if (dimensions.empty()) {
337 return {false, 0};
338 }
339 uint64_t size = static_cast<uint64_t>(sizeOfElement);
340 constexpr uint64_t kMaxSize = static_cast<uint64_t>(std::numeric_limits<uint32_t>::max());
341 for (uint32_t d : dimensions) {
342 size *= d;
343 if (size > kMaxSize) return {true, 0};
344 }
345 return {false, static_cast<uint32_t>(size)};
346 }
347
sizeOfTensorData(uint32_t sizeOfElement,const std::vector<uint32_t> & dimensions)348 uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions) {
349 const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions);
350 CHECK(!overflow);
351 return size;
352 }
353
nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,const std::vector<uint32_t> & dimensions)354 bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
355 const std::vector<uint32_t>& dimensions) {
356 CHECK(!isExtension(type)) << "Size of extension operand data is unknown";
357 int n = static_cast<int>(type);
358 uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
359 return tableLookup(kScalarDataType, kScalarDataTypeOEM, n)
360 ? false
361 : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions);
362 }
363
sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement,const std::vector<uint32_t> & dimensions)364 bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement,
365 const std::vector<uint32_t>& dimensions) {
366 return sizeOfTensorDataHelper(sizeOfElement, dimensions).first;
367 }
368
tensorHasUnspecifiedDimensions(int type,const uint32_t * dim,uint32_t dimCount)369 bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) {
370 if (!isExtensionOperandType(type)) {
371 CHECK(!nonExtensionOperandTypeIsScalar(type))
372 << "A scalar type can never have unspecified dimensions";
373 }
374 return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount);
375 }
376
tensorHasUnspecifiedDimensions(OperandType type,const std::vector<uint32_t> & dimensions)377 bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions) {
378 return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(),
379 dimensions.size());
380 }
381
tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType * type)382 bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
383 return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
384 }
385
tensorHasUnspecifiedDimensions(const Operand & operand)386 bool tensorHasUnspecifiedDimensions(const Operand& operand) {
387 return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions);
388 }
389
alignBytesNeeded(uint32_t index,size_t length)390 uint32_t alignBytesNeeded(uint32_t index, size_t length) {
391 uint32_t alignment = getAlignmentForLength(length);
392 uint32_t pattern = alignment - 1;
393 uint32_t extra = (~(index - 1)) & pattern;
394 return extra;
395 }
396
logModelToInfo(const Model & model)397 void logModelToInfo(const Model& model) {
398 LOG(INFO) << model;
399 }
400
validateScalarDimensions(const ANeuralNetworksOperandType & type,const char * tag)401 static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) {
402 NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type";
403 NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type";
404 return true;
405 }
406
validateQuant8AsymmParams(const ANeuralNetworksOperandType & type,const char * tag)407 static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
408 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255)
409 << tag << " invalid zeroPoint: " << type.zeroPoint;
410 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
411 return true;
412 }
413
validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType & type,const char * tag)414 static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type,
415 const char* tag) {
416 NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127)
417 << tag << " invalid zeroPoint: " << type.zeroPoint;
418 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
419 return true;
420 }
421
validateQuant8SymmParams(const ANeuralNetworksOperandType & type,const char * tag)422 static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
423 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint;
424 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
425 return true;
426 }
427
validateQuant16AsymmParams(const ANeuralNetworksOperandType & type,const char * tag)428 static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
429 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535)
430 << tag << " invalid zeroPoint: " << type.zeroPoint;
431 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
432 return true;
433 }
434
validateQuantSymmParams(const ANeuralNetworksOperandType & type,const char * tag)435 static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
436 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
437 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
438 return true;
439 }
440
validateNoQuantParams(const ANeuralNetworksOperandType & type,const char * tag)441 static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) {
442 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
443 NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero";
444 return true;
445 }
446
validateTensorDimensions(const ANeuralNetworksOperandType & type,const Extension::OperandTypeInformation * const extensionOperandTypeInfo,const char * tag,bool allowPartial)447 static bool validateTensorDimensions(
448 const ANeuralNetworksOperandType& type,
449 const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
450 bool allowPartial) {
451 if (!allowPartial) {
452 NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions";
453 }
454 uint64_t size =
455 isExtensionOperandType(type.type)
456 ? extensionOperandTypeInfo->byteSize
457 : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast<int>(type.type));
458 constexpr uint64_t kMaxSize = std::numeric_limits<uint32_t>::max();
459 for (uint32_t i = 0; i < type.dimensionCount; i++) {
460 if (!allowPartial) {
461 NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions";
462 }
463 if (type.dimensions[i] != 0) {
464 size *= type.dimensions[i];
465 NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize;
466 }
467 }
468 return true;
469 }
470
validateOperandTypeHelper(const ANeuralNetworksOperandType & type,const Extension::OperandTypeInformation * const extensionOperandTypeInfo,const char * tag,bool allowPartial)471 static bool validateOperandTypeHelper(
472 const ANeuralNetworksOperandType& type,
473 const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
474 bool allowPartial) {
475 NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr);
476 if (isExtensionOperandType(type.type)) {
477 NN_RET_CHECK(extensionOperandTypeInfo != nullptr);
478 if (extensionOperandTypeInfo->isTensor) {
479 NN_RET_CHECK(
480 validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial));
481 } else {
482 NN_RET_CHECK(validateScalarDimensions(type, tag));
483 }
484 return validateNoQuantParams(type, tag);
485 }
486
487 NN_RET_CHECK(extensionOperandTypeInfo == nullptr);
488 NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type))
489 << tag << " invalid OperandType: " << type.type;
490
491 bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type);
492 if (isScalar) {
493 NN_RET_CHECK(validateScalarDimensions(type, tag));
494 if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types
495 // to use quantization parameters.
496 NN_RET_CHECK(validateNoQuantParams(type, tag));
497 }
498 } else {
499 NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial));
500 if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
501 NN_RET_CHECK(validateQuant8AsymmParams(type, tag));
502 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
503 NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag));
504 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) {
505 NN_RET_CHECK(validateQuant8SymmParams(type, tag));
506 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) {
507 NN_RET_CHECK(validateQuant16AsymmParams(type, tag));
508 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) {
509 NN_RET_CHECK(validateQuantSymmParams(type, tag));
510 } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) {
511 // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters.
512 } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) {
513 // Historically, we have allowed OEM types to use quantization parameters.
514 } else {
515 NN_RET_CHECK(validateNoQuantParams(type, tag));
516 }
517 }
518
519 return true;
520 }
521
validateOperandType(const ANeuralNetworksOperandType & type,const Extension::OperandTypeInformation * const extensionOperandTypeInfo,const char * tag,bool allowPartial)522 int validateOperandType(const ANeuralNetworksOperandType& type,
523 const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
524 const char* tag, bool allowPartial) {
525 return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial)
526 ? ANEURALNETWORKS_NO_ERROR
527 : ANEURALNETWORKS_BAD_DATA;
528 }
529
validateOperandList(uint32_t count,const uint32_t * list,uint32_t operandCount,const char * tag)530 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
531 const char* tag) {
532 for (uint32_t i = 0; i < count; i++) {
533 if (list[i] >= operandCount) {
534 LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
535 << ", operandCount " << operandCount;
536 return ANEURALNETWORKS_BAD_DATA;
537 }
538 }
539 return ANEURALNETWORKS_NO_ERROR;
540 }
541
validateOperationOperandTypes(const std::vector<Operand> & operands,uint32_t inOperandCount,const uint32_t * inOperandIndexes,const std::vector<OperandType> & inExpectedTypes,uint32_t outOperandCount,const uint32_t * outOperandIndexes,const std::vector<OperandType> & outExpectedInTypes)542 int validateOperationOperandTypes(const std::vector<Operand>& operands, uint32_t inOperandCount,
543 const uint32_t* inOperandIndexes,
544 const std::vector<OperandType>& inExpectedTypes,
545 uint32_t outOperandCount, const uint32_t* outOperandIndexes,
546 const std::vector<OperandType>& outExpectedInTypes) {
547 if (inOperandCount != static_cast<uint32_t>(inExpectedTypes.size()) ||
548 outOperandCount != static_cast<uint32_t>(outExpectedInTypes.size())) {
549 LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and "
550 << outExpectedInTypes.size() << " outputs,"
551 << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs";
552 return ANEURALNETWORKS_BAD_DATA;
553 }
554 for (uint32_t i = 0; i < inOperandCount; i++) {
555 if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
556 LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type
557 << " for input " << i << ", expected " << inExpectedTypes[i];
558 return ANEURALNETWORKS_BAD_DATA;
559 }
560 }
561 for (uint32_t i = 0; i < outOperandCount; i++) {
562 if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
563 LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type
564 << " for input " << i << ", expected " << outExpectedInTypes[i];
565 return ANEURALNETWORKS_BAD_DATA;
566 }
567 }
568
569 return ANEURALNETWORKS_NO_ERROR;
570 }
571
validateHalVersion(ANeuralNetworksOperationType opType,HalVersion halVersion,HalVersion minSupportedHalVersion)572 static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
573 HalVersion minSupportedHalVersion) {
574 if (halVersion < minSupportedHalVersion) {
575 LOG(ERROR) << "The given inputs and outputs for operation " << opType
576 << " are only supported in " << minSupportedHalVersion
577 << " and later (validating using " << halVersion << ")";
578 return ANEURALNETWORKS_BAD_DATA;
579 }
580 return ANEURALNETWORKS_NO_ERROR;
581 }
582
583 // Checks if two operands have the same types, ranks (if specified), dimensions
584 // (if specified), scales, zeroPoints, and extraParams.
compatible(const Operand & a,const Operand & b)585 static bool compatible(const Operand& a, const Operand& b) {
586 NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type;
587 if (a.dimensions.size() != 0 && b.dimensions.size() != 0) {
588 NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions";
589 for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) {
590 if (a.dimensions[i] != 0 && b.dimensions[i] != 0) {
591 NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions";
592 }
593 }
594 }
595 NN_RET_CHECK_EQ(a.scale, b.scale);
596 NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint);
597 NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams;
598 return true;
599 }
600
validateConditionOperand(const Operand & operand)601 static bool validateConditionOperand(const Operand& operand) {
602 NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8)
603 << "Unexpected condition operand type: " << operand.type;
604 NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton";
605 NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton";
606 return true;
607 }
608
checkSubgraphValidationHelper(const SubgraphValidationHelper & helper)609 static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) {
610 CHECK(helper.isValidSubgraphReference != nullptr);
611 CHECK(helper.getSubgraphInputCount != nullptr);
612 CHECK(helper.getSubgraphOutputCount != nullptr);
613 CHECK(helper.getSubgraphInputOperand != nullptr);
614 CHECK(helper.getSubgraphOutputOperand != nullptr);
615 }
616
validateIfOperation(uint32_t inputCount,const uint32_t * inputs,uint32_t outputCount,const uint32_t * outputs,const std::vector<Operand> & operands,const SubgraphValidationHelper & helper)617 static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
618 const uint32_t* outputs, const std::vector<Operand>& operands,
619 const SubgraphValidationHelper& helper) {
620 namespace op = operation_if;
621 checkSubgraphValidationHelper(helper);
622 NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs";
623 NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output";
624 auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool {
625 NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand))
626 << "Operand is not a valid subgraph reference";
627 const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand);
628 const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand);
629 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount);
630 NN_RET_CHECK_EQ(outputCount, branchModelOutputCount);
631 for (uint32_t i = 0; i < branchModelInputCount; ++i) {
632 const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i);
633 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
634 NN_RET_CHECK(compatible(innerOperand, outerOperand));
635 }
636 for (uint32_t i = 0; i < branchModelOutputCount; ++i) {
637 const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i);
638 const Operand& outerOperand = operands[outputs[i]];
639 NN_RET_CHECK(compatible(innerOperand, outerOperand));
640 }
641 return true;
642 };
643 NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]]))
644 << "Validation failed for IF condition operand";
645 NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]]))
646 << "Validation failed for IF then model";
647 NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]]))
648 << "Validation failed for IF else model";
649 return true;
650 }
651
validateControlFlowOperandUnknownSize(const SubgraphValidationHelper & helper,const Operand & operand)652 static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper,
653 const Operand& operand) {
654 if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) {
655 NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u);
656 }
657 return true;
658 }
659
validateWhileOperation(uint32_t inputCount,const uint32_t * inputs,uint32_t outputCount,const uint32_t * outputs,const std::vector<Operand> & operands,const SubgraphValidationHelper & helper)660 static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs,
661 uint32_t outputCount, const uint32_t* outputs,
662 const std::vector<Operand>& operands,
663 const SubgraphValidationHelper& helper) {
664 // Let the loop have
665 // - m >= 1 input-output operands,
666 // - k >= 0 state-only operands, and
667 // - n >= 0 input-only operands.
668 // Then
669 // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs.
670 // - the condition model has (m + k + n) inputs and 1 output.
671 // - the body model has (m + k + n) inputs and (m + k) outputs.
672 namespace op = operation_while;
673 checkSubgraphValidationHelper(helper);
674 NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs";
675 NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output";
676 auto validateCondOperand = [&](const Operand& condModelOperand) -> bool {
677 NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand))
678 << "Operand is not a valid subgraph reference";
679 const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand);
680 const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand);
681 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount);
682 NN_RET_CHECK_EQ(condModelOutputCount, 1u);
683 for (uint32_t i = 0; i < condModelInputCount; ++i) {
684 const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i);
685 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
686 NN_RET_CHECK(compatible(innerOperand, outerOperand));
687 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand));
688 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand));
689 }
690 NN_RET_CHECK(
691 validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0)));
692 return true;
693 };
694 auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool {
695 NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand))
696 << "Operand is not a valid subgraph reference";
697 const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand);
698 const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand);
699 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount);
700 NN_RET_CHECK_GE(bodyModelOutputCount, outputCount);
701 NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount);
702 const uint32_t inputOutputCount = outputCount;
703 const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount;
704 const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount;
705 for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) {
706 const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i);
707 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
708 NN_RET_CHECK(compatible(innerOperand, outerOperand));
709 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand));
710 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand));
711 }
712 for (uint32_t i = 0; i < inputOutputCount; ++i) {
713 const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i);
714 const Operand& outerOperand = operands[outputs[i]];
715 NN_RET_CHECK(compatible(innerOperand, outerOperand));
716 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand));
717 }
718 for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) {
719 const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i);
720 const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i);
721 NN_RET_CHECK(compatible(inputOperand, outputOperand));
722 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outputOperand));
723 }
724 return true;
725 };
726 NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]]))
727 << "Validation failed for WHILE condition model";
728 NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]]))
729 << "Validation failed for WHILE body model";
730 return true;
731 }
732
validateOperation(ANeuralNetworksOperationType opType,uint32_t inputCount,const uint32_t * inputIndexes,uint32_t outputCount,const uint32_t * outputIndexes,const std::vector<Operand> & operands,HalVersion halVersion,const SubgraphValidationHelper & helper)733 int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
734 const uint32_t* inputIndexes, uint32_t outputCount,
735 const uint32_t* outputIndexes, const std::vector<Operand>& operands,
736 HalVersion halVersion, const SubgraphValidationHelper& helper) {
737 NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes,
738 static_cast<uint32_t>(operands.size()),
739 "ANeuralNetworksModel_addOperation inputs"));
740 NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes,
741 static_cast<uint32_t>(operands.size()),
742 "ANeuralNetworksModel_addOperation outputs"));
743
744 if (isExtensionOperationType(opType)) {
745 if (halVersion < HalVersion::V1_2) {
746 LOG(ERROR)
747 << "Extension operations are supported since HAL version 1.2, validating using "
748 << halVersion;
749 return ANEURALNETWORKS_BAD_DATA;
750 }
751 // There is no other validation we can do for an extension operation.
752 return ANEURALNETWORKS_NO_ERROR;
753 }
754
755 auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
756 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
757 << ") or output operands (" << outputCount << ", expected " << expOut
758 << ") for operation " << opType;
759 };
760
761 switch (opType) {
762 case ANEURALNETWORKS_OEM_OPERATION: {
763 return ANEURALNETWORKS_NO_ERROR;
764 }
765 case ANEURALNETWORKS_RESHAPE: {
766 if (inputCount != 2 || outputCount != 1) {
767 logInvalidInOutNumber(2, 1);
768 return ANEURALNETWORKS_BAD_DATA;
769 }
770 auto inputType = operands[inputIndexes[0]].type;
771 std::vector<OperandType> inExpectedTypes;
772 std::vector<OperandType> outExpectedTypes;
773 if (inputType == OperandType::TENSOR_FLOAT32) {
774 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
775 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32};
776 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
777 } else if (inputType == OperandType::TENSOR_FLOAT16) {
778 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
779 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
780 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
781 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
782 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
783 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32};
784 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
785 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
786 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
787 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
788 OperandType::TENSOR_INT32};
789 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
790 } else {
791 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
792 return ANEURALNETWORKS_BAD_DATA;
793 }
794 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
795 if (inputRank > 4) {
796 LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
797 return ANEURALNETWORKS_BAD_DATA;
798 }
799 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
800 inExpectedTypes, outputCount, outputIndexes,
801 outExpectedTypes);
802 }
803 case ANEURALNETWORKS_DEPTH_TO_SPACE: {
804 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
805 LOG(ERROR) << "Invalid number of input operands (" << inputCount
806 << ", expected 3 or 2) or output operands (" << outputCount
807 << ", expected 1) for operation " << opType;
808 return ANEURALNETWORKS_BAD_DATA;
809 }
810 auto inputType = operands[inputIndexes[0]].type;
811 std::vector<OperandType> inExpectedTypes;
812 std::vector<OperandType> outExpectedTypes;
813 if (inputType == OperandType::TENSOR_FLOAT32) {
814 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
815 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32};
816 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
817 } else if (inputType == OperandType::TENSOR_FLOAT16) {
818 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
819 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
820 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
821 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
822 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
823 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32};
824 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
825 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
826 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
827 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
828 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
829 } else {
830 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
831 return ANEURALNETWORKS_BAD_DATA;
832 }
833 if (inputCount == 3) {
834 inExpectedTypes.push_back(OperandType::BOOL);
835 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
836 } else {
837 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
838 }
839 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
840 inExpectedTypes, outputCount, outputIndexes,
841 outExpectedTypes);
842 }
843 case ANEURALNETWORKS_SPACE_TO_DEPTH: {
844 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
845 LOG(ERROR) << "Invalid number of input operands (" << inputCount
846 << ", expected 3 or 2) or output operands (" << outputCount
847 << ", expected 1) for operation " << opType;
848 return ANEURALNETWORKS_BAD_DATA;
849 }
850 auto inputType = operands[inputIndexes[0]].type;
851 std::vector<OperandType> inExpectedTypes;
852 std::vector<OperandType> outExpectedTypes;
853 if (inputType == OperandType::TENSOR_FLOAT32) {
854 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
855 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32};
856 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
857 } else if (inputType == OperandType::TENSOR_FLOAT16) {
858 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
859 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
860 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
861 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
862 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
863 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32};
864 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
865 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
866 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
867 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
868 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
869 } else {
870 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
871 return ANEURALNETWORKS_BAD_DATA;
872 }
873 if (inputCount == 3) {
874 inExpectedTypes.push_back(OperandType::BOOL);
875 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
876 } else {
877 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
878 }
879 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
880 inExpectedTypes, outputCount, outputIndexes,
881 outExpectedTypes);
882 }
883 case ANEURALNETWORKS_EMBEDDING_LOOKUP: {
884 if (inputCount != 2 || outputCount != 1) {
885 logInvalidInOutNumber(2, 1);
886 return ANEURALNETWORKS_BAD_DATA;
887 }
888 auto inputType = operands[inputIndexes[1]].type;
889 if (inputType != OperandType::TENSOR_FLOAT16 &&
890 inputType != OperandType::TENSOR_FLOAT32 &&
891 inputType != OperandType::TENSOR_INT32 &&
892 inputType != OperandType::TENSOR_QUANT8_ASYMM &&
893 inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
894 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
895 return ANEURALNETWORKS_BAD_DATA;
896 }
897 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType};
898 std::vector<OperandType> outExpectedTypes = {inputType};
899 if (inputType == OperandType::TENSOR_FLOAT16 ||
900 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
901 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
902 } else if (inputType == OperandType::TENSOR_INT32 ||
903 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
904 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
905 } else {
906 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
907 }
908 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
909 inExpectedTypes, outputCount, outputIndexes,
910 outExpectedTypes);
911 }
912 case ANEURALNETWORKS_HASHTABLE_LOOKUP: {
913 if (inputCount != 3 || outputCount != 2) {
914 logInvalidInOutNumber(3, 2);
915 return ANEURALNETWORKS_BAD_DATA;
916 }
917 auto inputType = operands[inputIndexes[2]].type;
918 if (inputType != OperandType::TENSOR_FLOAT32 &&
919 inputType != OperandType::TENSOR_INT32 &&
920 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
921 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
922 return ANEURALNETWORKS_BAD_DATA;
923 }
924 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
925 OperandType::TENSOR_INT32, inputType};
926 std::vector<OperandType> outExpectedTypes = {inputType,
927 OperandType::TENSOR_QUANT8_ASYMM};
928 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
929 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
930 inExpectedTypes, outputCount, outputIndexes,
931 outExpectedTypes);
932 }
933 case ANEURALNETWORKS_LSH_PROJECTION: {
934 if (inputCount != 4 || outputCount != 1) {
935 logInvalidInOutNumber(4, 1);
936 return ANEURALNETWORKS_BAD_DATA;
937 }
938 auto inputType = operands[inputIndexes[1]].type;
939 if (inputType != OperandType::TENSOR_FLOAT16 &&
940 inputType != OperandType::TENSOR_FLOAT32 &&
941 inputType != OperandType::TENSOR_INT32 &&
942 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
943 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
944 return ANEURALNETWORKS_BAD_DATA;
945 }
946 auto hashType = operands[inputIndexes[0]].type;
947 std::vector<OperandType> inExpectedTypes;
948 if (hashType == OperandType::TENSOR_FLOAT16) {
949 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
950 inExpectedTypes = {
951 OperandType::TENSOR_FLOAT16,
952 inputType,
953 OperandType::TENSOR_FLOAT16,
954 OperandType::INT32,
955 };
956 } else if (hashType == OperandType::TENSOR_FLOAT32) {
957 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
958 inExpectedTypes = {
959 OperandType::TENSOR_FLOAT32,
960 inputType,
961 OperandType::TENSOR_FLOAT32,
962 OperandType::INT32,
963 };
964 } else {
965 LOG(ERROR) << "Unsupported hash tensor type for operation " << opType;
966 return ANEURALNETWORKS_BAD_DATA;
967 }
968 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
969 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
970 inExpectedTypes, outputCount, outputIndexes,
971 outExpectedTypes);
972 }
973 case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: {
974 const uint32_t kNumOutputs = 2;
975 const uint32_t kNumOutputsMerged = 1;
976 const uint32_t kNumOutputsWithState = 6;
977 const uint32_t kNumOutputsMergedWithState = 5;
978 if (inputCount != 61 ||
979 (outputCount != kNumOutputs && outputCount != kNumOutputsMerged &&
980 outputCount != kNumOutputsWithState &&
981 outputCount != kNumOutputsMergedWithState)) {
982 LOG(ERROR) << "Invalid number of input operands (" << inputCount
983 << ", expected 61) or output operands (" << outputCount
984 << ", expected 1, 2, 5 or 6) for operation " << opType;
985 return ANEURALNETWORKS_BAD_DATA;
986 }
987
988 std::vector<OperandType> inExpectedTypes;
989 auto inputType = operands[inputIndexes[0]].type;
990 if (inputType != OperandType::TENSOR_FLOAT32 &&
991 inputType != OperandType::TENSOR_FLOAT16) {
992 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
993 return ANEURALNETWORKS_BAD_DATA;
994 }
995
996 inExpectedTypes = {};
997 for (int i = 0; i < 48; ++i) {
998 inExpectedTypes.push_back(inputType);
999 }
1000 inExpectedTypes.push_back(OperandType::INT32);
1001 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
1002 ? OperandType::FLOAT32
1003 : OperandType::FLOAT16);
1004 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
1005 ? OperandType::FLOAT32
1006 : OperandType::FLOAT16);
1007 inExpectedTypes.push_back(OperandType::BOOL);
1008 inExpectedTypes.push_back(OperandType::BOOL);
1009 for (int i = 0; i < 8; ++i) {
1010 inExpectedTypes.push_back(inputType);
1011 }
1012
1013 HalVersion minSupportedHalVersion = HalVersion::V1_2;
1014 if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) {
1015 minSupportedHalVersion = HalVersion::V1_3;
1016 }
1017 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion));
1018 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
1019 auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes,
1020 inExpectedTypes, outputCount, outputIndexes,
1021 outExpectedTypes);
1022 return status;
1023 }
1024 case ANEURALNETWORKS_LSTM: {
1025 if ((inputCount != 23 && inputCount != 27) || outputCount != 4) {
1026 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1027 << ", expected 23 or 27) or output operands (" << outputCount
1028 << ", expected 4) for operation " << opType;
1029 return ANEURALNETWORKS_BAD_DATA;
1030 }
1031 std::vector<OperandType> inExpectedTypes;
1032 std::vector<OperandType> outExpectedTypes;
1033 auto inputType = operands[inputIndexes[0]].type;
1034 if (inputType != OperandType::TENSOR_FLOAT32 &&
1035 inputType != OperandType::TENSOR_FLOAT16) {
1036 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1037 return ANEURALNETWORKS_BAD_DATA;
1038 }
1039
1040 inExpectedTypes = {inputType, inputType, inputType, inputType, inputType,
1041 inputType, inputType, inputType, inputType, inputType,
1042 inputType, inputType, inputType, inputType, inputType,
1043 inputType, inputType, inputType, inputType, inputType,
1044 OperandType::INT32};
1045 if (inputType == OperandType::TENSOR_FLOAT32) {
1046 inExpectedTypes.push_back(OperandType::FLOAT32);
1047 inExpectedTypes.push_back(OperandType::FLOAT32);
1048 } else {
1049 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1050 inExpectedTypes.push_back(OperandType::FLOAT16);
1051 inExpectedTypes.push_back(OperandType::FLOAT16);
1052 }
1053
1054 outExpectedTypes = {inputType, inputType, inputType, inputType};
1055 if (inputCount == 23) {
1056 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1057 } else {
1058 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1059 for (int i = 0; i < 4; ++i) {
1060 inExpectedTypes.push_back(inputType);
1061 }
1062 }
1063 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1064 inExpectedTypes, outputCount, outputIndexes,
1065 outExpectedTypes);
1066 }
1067 case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: {
1068 if (inputCount != 15 || outputCount != 2) {
1069 logInvalidInOutNumber(15, 2);
1070 return ANEURALNETWORKS_BAD_DATA;
1071 }
1072 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1073 std::vector<OperandType> inExpectedTypes = {
1074 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1075 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1076 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1077 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1078 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32,
1079 OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
1080 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM,
1081 OperandType::TENSOR_QUANT8_ASYMM};
1082 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM,
1083 OperandType::TENSOR_QUANT8_ASYMM};
1084 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1085 inExpectedTypes, outputCount, outputIndexes,
1086 outExpectedTypes);
1087 }
1088 case ANEURALNETWORKS_RANDOM_MULTINOMIAL: {
1089 if (inputCount != 3 || outputCount != 1) {
1090 logInvalidInOutNumber(3, 1);
1091 return ANEURALNETWORKS_BAD_DATA;
1092 }
1093 OperandType inputType = operands[inputIndexes[0]].type;
1094 std::vector<OperandType> inExpectedTypes;
1095 if (inputType == OperandType::TENSOR_FLOAT32 ||
1096 inputType == OperandType::TENSOR_FLOAT16) {
1097 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1098 inExpectedTypes = {
1099 inputType,
1100 OperandType::INT32,
1101 OperandType::TENSOR_INT32,
1102 };
1103 } else {
1104 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1105 return ANEURALNETWORKS_BAD_DATA;
1106 }
1107 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
1108 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1109 inExpectedTypes, outputCount, outputIndexes,
1110 outExpectedTypes);
1111 }
1112 case ANEURALNETWORKS_RNN: {
1113 if (inputCount != 6 || outputCount != 2) {
1114 logInvalidInOutNumber(6, 2);
1115 return ANEURALNETWORKS_BAD_DATA;
1116 }
1117 OperandType inputType = operands[inputIndexes[0]].type;
1118 std::vector<OperandType> inExpectedTypes;
1119 std::vector<OperandType> outExpectedTypes;
1120 if (inputType == OperandType::TENSOR_FLOAT32) {
1121 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1122 inExpectedTypes = {
1123 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1124 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1125 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1126 };
1127 outExpectedTypes = {
1128 OperandType::TENSOR_FLOAT32,
1129 OperandType::TENSOR_FLOAT32,
1130 };
1131 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1132 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1133 inExpectedTypes = {
1134 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1135 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1136 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1137 };
1138 outExpectedTypes = {
1139 OperandType::TENSOR_FLOAT16,
1140 OperandType::TENSOR_FLOAT16,
1141 };
1142 } else {
1143 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1144 return ANEURALNETWORKS_BAD_DATA;
1145 }
1146 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1147 inExpectedTypes, outputCount, outputIndexes,
1148 outExpectedTypes);
1149 }
1150 case ANEURALNETWORKS_SVDF: {
1151 if (inputCount != 7 || outputCount != 2) {
1152 logInvalidInOutNumber(7, 2);
1153 return ANEURALNETWORKS_BAD_DATA;
1154 }
1155 OperandType inputType = operands[inputIndexes[0]].type;
1156 if (inputType == OperandType::TENSOR_FLOAT32) {
1157 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1158
1159 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1160 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1161 } else {
1162 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1163 return ANEURALNETWORKS_BAD_DATA;
1164 }
1165 std::vector<OperandType> inExpectedTypes = {
1166 inputType, inputType, inputType, inputType,
1167 inputType, OperandType::INT32, OperandType::INT32,
1168 };
1169 std::vector<OperandType> outExpectedTypes = {inputType, inputType};
1170 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1171 inExpectedTypes, outputCount, outputIndexes,
1172 outExpectedTypes);
1173 }
1174 case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
1175 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
1176 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1177 << ", expected 3 or 2) or output operands (" << outputCount
1178 << ", expected 1) for operation " << opType;
1179 return ANEURALNETWORKS_BAD_DATA;
1180 }
1181 auto inputType = operands[inputIndexes[0]].type;
1182 std::vector<OperandType> inExpectedTypes;
1183 std::vector<OperandType> outExpectedTypes;
1184 if (inputType == OperandType::TENSOR_FLOAT32) {
1185 inExpectedTypes = {
1186 OperandType::TENSOR_FLOAT32,
1187 OperandType::TENSOR_INT32,
1188 };
1189 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1190 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1191 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1192 inExpectedTypes = {
1193 OperandType::TENSOR_FLOAT16,
1194 OperandType::TENSOR_INT32,
1195 };
1196 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1197 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1198 inExpectedTypes = {
1199 OperandType::TENSOR_QUANT8_ASYMM,
1200 OperandType::TENSOR_INT32,
1201 };
1202 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1203 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1204 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1205 inExpectedTypes = {
1206 OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
1207 OperandType::TENSOR_INT32,
1208 };
1209 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
1210 } else {
1211 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1212 return ANEURALNETWORKS_BAD_DATA;
1213 }
1214 if (inputCount == 3) {
1215 inExpectedTypes.push_back(OperandType::BOOL);
1216 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1217 } else {
1218 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1219 }
1220 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1221 inExpectedTypes, outputCount, outputIndexes,
1222 outExpectedTypes);
1223 }
1224 case ANEURALNETWORKS_SPACE_TO_BATCH_ND: {
1225 if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
1226 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1227 << ", expected 4 or 3) or output operands (" << outputCount
1228 << ", expected 1) for operation " << opType;
1229 return ANEURALNETWORKS_BAD_DATA;
1230 }
1231 auto inputType = operands[inputIndexes[0]].type;
1232 std::vector<OperandType> inExpectedTypes;
1233 std::vector<OperandType> outExpectedTypes;
1234 if (inputType == OperandType::TENSOR_FLOAT32) {
1235 inExpectedTypes = {
1236 OperandType::TENSOR_FLOAT32,
1237 OperandType::TENSOR_INT32,
1238 OperandType::TENSOR_INT32,
1239 };
1240 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1241 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1242 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1243 inExpectedTypes = {
1244 OperandType::TENSOR_FLOAT16,
1245 OperandType::TENSOR_INT32,
1246 OperandType::TENSOR_INT32,
1247 };
1248 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1249 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1250 if (operands[inputIndexes[0]].zeroPoint != 0) {
1251 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1252 }
1253 inExpectedTypes = {
1254 OperandType::TENSOR_QUANT8_ASYMM,
1255 OperandType::TENSOR_INT32,
1256 OperandType::TENSOR_INT32,
1257 };
1258 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1259 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1260 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1261 inExpectedTypes = {
1262 OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
1263 OperandType::TENSOR_INT32,
1264 OperandType::TENSOR_INT32,
1265 };
1266 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
1267 } else {
1268 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1269 return ANEURALNETWORKS_BAD_DATA;
1270 }
1271 if (inputCount == 4) {
1272 inExpectedTypes.push_back(OperandType::BOOL);
1273 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1274 } else {
1275 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1276 }
1277 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1278 inExpectedTypes, outputCount, outputIndexes,
1279 outExpectedTypes);
1280 }
1281 case ANEURALNETWORKS_PAD: {
1282 if (inputCount != 2 || outputCount != 1) {
1283 logInvalidInOutNumber(2, 1);
1284 return ANEURALNETWORKS_BAD_DATA;
1285 }
1286 auto inputType = operands[inputIndexes[0]].type;
1287 std::vector<OperandType> inExpectedTypes;
1288 std::vector<OperandType> outExpectedTypes;
1289 if (inputType == OperandType::TENSOR_FLOAT32) {
1290 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1291 inExpectedTypes = {
1292 OperandType::TENSOR_FLOAT32,
1293 OperandType::TENSOR_INT32,
1294 };
1295 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1296 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1297 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1298 inExpectedTypes = {
1299 OperandType::TENSOR_FLOAT16,
1300 OperandType::TENSOR_INT32,
1301 };
1302 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1303 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1304 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1305 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1306 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1307 } else {
1308 if (operands[inputIndexes[0]].zeroPoint == 0) {
1309 NN_RETURN_IF_ERROR(
1310 validateHalVersion(opType, halVersion, HalVersion::V1_1));
1311 } else {
1312 NN_RETURN_IF_ERROR(
1313 validateHalVersion(opType, halVersion, HalVersion::V1_2));
1314 }
1315 }
1316 inExpectedTypes = {
1317 inputType,
1318 OperandType::TENSOR_INT32,
1319 };
1320 outExpectedTypes = {inputType};
1321 } else {
1322 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1323 return ANEURALNETWORKS_BAD_DATA;
1324 }
1325 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1326 if (inputRank > 4) {
1327 LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
1328 return ANEURALNETWORKS_BAD_DATA;
1329 }
1330 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1331 inExpectedTypes, outputCount, outputIndexes,
1332 outExpectedTypes);
1333 }
1334 case ANEURALNETWORKS_PAD_V2: {
1335 if (inputCount != 3 || outputCount != 1) {
1336 logInvalidInOutNumber(3, 1);
1337 return ANEURALNETWORKS_BAD_DATA;
1338 }
1339 auto inputType = operands[inputIndexes[0]].type;
1340 std::vector<OperandType> inExpectedTypes;
1341 std::vector<OperandType> outExpectedTypes;
1342 if (inputType == OperandType::TENSOR_FLOAT32) {
1343 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1344 inExpectedTypes = {
1345 OperandType::TENSOR_FLOAT32,
1346 OperandType::TENSOR_INT32,
1347 OperandType::FLOAT32,
1348 };
1349 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1350 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1351 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1352 inExpectedTypes = {
1353 OperandType::TENSOR_FLOAT16,
1354 OperandType::TENSOR_INT32,
1355 OperandType::FLOAT16,
1356 };
1357 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1358 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1359 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1360 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1361 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1362 } else {
1363 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1364 }
1365 inExpectedTypes = {
1366 inputType,
1367 OperandType::TENSOR_INT32,
1368 OperandType::INT32,
1369 }; // TODO(b/116699425): Make it UINT8.
1370 outExpectedTypes = {inputType};
1371 } else {
1372 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1373 return ANEURALNETWORKS_BAD_DATA;
1374 }
1375 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1376 if (inputRank > 4) {
1377 LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
1378 return ANEURALNETWORKS_BAD_DATA;
1379 }
1380 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1381 inExpectedTypes, outputCount, outputIndexes,
1382 outExpectedTypes);
1383 }
1384 case ANEURALNETWORKS_CAST: {
1385 if (inputCount != 1 || outputCount != 1) {
1386 logInvalidInOutNumber(1, 1);
1387 return ANEURALNETWORKS_BAD_DATA;
1388 }
1389 auto inputOperand = operands[inputIndexes[0]];
1390 auto outputOperand = operands[outputIndexes[0]];
1391 auto inputType = inputOperand.type;
1392 auto outputType = outputOperand.type;
1393 std::vector<OperandType> inExpectedTypes;
1394 std::vector<OperandType> outExpectedTypes;
1395 if ((inputType == OperandType::TENSOR_FLOAT16 ||
1396 inputType == OperandType::TENSOR_FLOAT32 ||
1397 inputType == OperandType::TENSOR_INT32 ||
1398 inputType == OperandType::TENSOR_QUANT8_ASYMM) &&
1399 (outputType == OperandType::TENSOR_FLOAT16 ||
1400 outputType == OperandType::TENSOR_FLOAT32 ||
1401 outputType == OperandType::TENSOR_INT32 ||
1402 outputType == OperandType::TENSOR_QUANT8_ASYMM)) {
1403 inExpectedTypes = {inputType};
1404 outExpectedTypes = {outputType};
1405 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1406 } else if (inputType == OperandType::TENSOR_BOOL8 ||
1407 inputType == OperandType::TENSOR_QUANT16_ASYMM ||
1408 inputType == OperandType::TENSOR_QUANT16_SYMM ||
1409 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
1410 inputType == OperandType::TENSOR_QUANT8_SYMM) {
1411 inExpectedTypes = {inputType};
1412 outExpectedTypes = {inputType}; // Only identity CAST is supported.
1413 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1414 } else {
1415 LOG(ERROR) << "Unsupported data type for operation " << opType;
1416 return ANEURALNETWORKS_BAD_DATA;
1417 }
1418 // Validate that output shape is equal to input shape if dimensions
1419 // are already known.
1420 auto getNumberOfElements = [](const std::vector<uint32_t>& dims) {
1421 if (dims.size() == 0) {
1422 return 0;
1423 }
1424 return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>());
1425 };
1426 if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 &&
1427 getNumberOfElements(outputOperand.dimensions) != 0 &&
1428 inputOperand.dimensions != outputOperand.dimensions) {
1429 return ANEURALNETWORKS_BAD_DATA;
1430 }
1431 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1432 inExpectedTypes, outputCount, outputIndexes,
1433 outExpectedTypes);
1434 }
1435 case ANEURALNETWORKS_MEAN: {
1436 if (inputCount != 3 || outputCount != 1) {
1437 logInvalidInOutNumber(3, 1);
1438 return ANEURALNETWORKS_BAD_DATA;
1439 }
1440 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1441 if (inputRank > 4) {
1442 LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
1443 return ANEURALNETWORKS_BAD_DATA;
1444 }
1445 auto inputType = operands[inputIndexes[0]].type;
1446 if (inputType == OperandType::TENSOR_FLOAT32) {
1447 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1448 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1449 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1450 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1451 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1452 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1453 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1454 } else {
1455 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1456 return ANEURALNETWORKS_BAD_DATA;
1457 }
1458 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32,
1459 OperandType::INT32};
1460 std::vector<OperandType> outExpectedTypes = {inputType};
1461 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1462 inExpectedTypes, outputCount, outputIndexes,
1463 outExpectedTypes);
1464 }
1465 case ANEURALNETWORKS_ARGMAX:
1466 case ANEURALNETWORKS_ARGMIN: {
1467 if (inputCount != 2 || outputCount != 1) {
1468 logInvalidInOutNumber(2, 1);
1469 return ANEURALNETWORKS_BAD_DATA;
1470 }
1471 auto inputType = operands[inputIndexes[0]].type;
1472 std::vector<OperandType> inExpectedTypes;
1473 std::vector<OperandType> outExpectedTypes;
1474 if (inputType == OperandType::TENSOR_FLOAT16 ||
1475 inputType == OperandType::TENSOR_FLOAT32 ||
1476 inputType == OperandType::TENSOR_INT32 ||
1477 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1478 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1479 inExpectedTypes = {inputType, OperandType::INT32};
1480 outExpectedTypes = {OperandType::TENSOR_INT32};
1481 } else {
1482 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1483 return ANEURALNETWORKS_BAD_DATA;
1484 }
1485 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1486 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1487 inExpectedTypes, outputCount, outputIndexes,
1488 outExpectedTypes);
1489 }
1490 case ANEURALNETWORKS_EXPAND_DIMS: {
1491 if (inputCount != 2 || outputCount != 1) {
1492 logInvalidInOutNumber(2, 1);
1493 return ANEURALNETWORKS_BAD_DATA;
1494 }
1495 auto inputType = operands[inputIndexes[0]].type;
1496 std::vector<OperandType> inExpectedTypes;
1497 std::vector<OperandType> outExpectedTypes;
1498 if (inputType == OperandType::TENSOR_FLOAT16 ||
1499 inputType == OperandType::TENSOR_FLOAT32 ||
1500 inputType == OperandType::TENSOR_INT32 ||
1501 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1502 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1503 inExpectedTypes = {inputType, OperandType::INT32};
1504 outExpectedTypes = {inputType};
1505 } else {
1506 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1507 return ANEURALNETWORKS_BAD_DATA;
1508 }
1509 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1510 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1511 } else {
1512 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1513 }
1514 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1515 inExpectedTypes, outputCount, outputIndexes,
1516 outExpectedTypes);
1517 }
1518 case ANEURALNETWORKS_SPLIT: {
1519 if (inputCount != 3) {
1520 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
1521 << opType;
1522 return ANEURALNETWORKS_BAD_DATA;
1523 }
1524 auto inputType = operands[inputIndexes[0]].type;
1525 if (inputType != OperandType::TENSOR_FLOAT16 &&
1526 inputType != OperandType::TENSOR_FLOAT32 &&
1527 inputType != OperandType::TENSOR_INT32 &&
1528 inputType != OperandType::TENSOR_QUANT8_ASYMM &&
1529 inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1530 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1531 return ANEURALNETWORKS_BAD_DATA;
1532 }
1533 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1534 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1535 } else {
1536 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1537 }
1538 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32,
1539 OperandType::INT32};
1540 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
1541 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1542 inExpectedTypes, outputCount, outputIndexes,
1543 outExpectedTypes);
1544 }
1545 case ANEURALNETWORKS_MAXIMUM:
1546 case ANEURALNETWORKS_MINIMUM: {
1547 if (inputCount != 2 || outputCount != 1) {
1548 logInvalidInOutNumber(2, 1);
1549 return ANEURALNETWORKS_BAD_DATA;
1550 }
1551 std::vector<OperandType> inExpectedTypes;
1552 std::vector<OperandType> outExpectedTypes;
1553 OperandType inputType = operands[inputIndexes[0]].type;
1554 if (inputType == OperandType::TENSOR_FLOAT16 ||
1555 inputType == OperandType::TENSOR_FLOAT32 ||
1556 inputType == OperandType::TENSOR_INT32 ||
1557 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1558 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1559 inExpectedTypes = {inputType, inputType};
1560 outExpectedTypes = {inputType};
1561 } else {
1562 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1563 return ANEURALNETWORKS_BAD_DATA;
1564 }
1565 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1566 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1567 } else {
1568 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1569 }
1570 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1571 inExpectedTypes, outputCount, outputIndexes,
1572 outExpectedTypes);
1573 }
1574 case ANEURALNETWORKS_GROUPED_CONV_2D: {
1575 if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
1576 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1577 << ", expected 12 or 9) or output operands (" << outputCount
1578 << ", expected 1) for operation " << opType;
1579 return ANEURALNETWORKS_BAD_DATA;
1580 }
1581 auto inputType = operands[inputIndexes[0]].type;
1582 auto filterType = operands[inputIndexes[1]].type;
1583 std::vector<OperandType> inExpectedTypes;
1584 std::vector<OperandType> outExpectedTypes;
1585 if (inputType == OperandType::TENSOR_FLOAT32) {
1586 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1587 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1588 OperandType::INT32, OperandType::INT32,
1589 OperandType::INT32, OperandType::INT32};
1590 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1591 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1592 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1593 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1594 OperandType::INT32, OperandType::INT32,
1595 OperandType::INT32, OperandType::INT32};
1596 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1597 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1598 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1599 if (filterType != inputType &&
1600 filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
1601 LOG(ERROR) << "Unsupported filter tensor type for operation " << opType;
1602 return ANEURALNETWORKS_BAD_DATA;
1603 }
1604
1605 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
1606 std::get<Operand::SymmPerChannelQuantParams>(
1607 operands[inputIndexes[1]].extraParams)
1608 .channelDim != 0) {
1609 LOG(ERROR) << "Unsupported filter tensor channel dimension for operation "
1610 << opType;
1611 return ANEURALNETWORKS_BAD_DATA;
1612 }
1613
1614 inExpectedTypes = {
1615 inputType, filterType, OperandType::TENSOR_INT32,
1616 OperandType::INT32, OperandType::INT32, OperandType::INT32,
1617 OperandType::INT32, OperandType::INT32};
1618 outExpectedTypes = {inputType};
1619 } else {
1620 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1621 return ANEURALNETWORKS_BAD_DATA;
1622 }
1623
1624 if (inputCount == 12) {
1625 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
1626 inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
1627 explicitScalarTypes.end());
1628 }
1629 inExpectedTypes.push_back(OperandType::BOOL);
1630 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1631 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1632 } else {
1633 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1634 }
1635 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1636 inExpectedTypes, outputCount, outputIndexes,
1637 outExpectedTypes);
1638 }
1639 case ANEURALNETWORKS_TILE: {
1640 if (inputCount != 2 || outputCount != 1) {
1641 logInvalidInOutNumber(2, 1);
1642 return ANEURALNETWORKS_BAD_DATA;
1643 }
1644 auto inputType = operands[inputIndexes[0]].type;
1645 std::vector<OperandType> inExpectedTypes;
1646 std::vector<OperandType> outExpectedTypes;
1647 if (inputType == OperandType::TENSOR_FLOAT16 ||
1648 inputType == OperandType::TENSOR_FLOAT32 ||
1649 inputType == OperandType::TENSOR_INT32 ||
1650 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1651 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1652 inExpectedTypes = {inputType, OperandType::TENSOR_INT32};
1653 outExpectedTypes = {inputType};
1654 } else {
1655 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1656 return ANEURALNETWORKS_BAD_DATA;
1657 }
1658 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1659 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1660 } else {
1661 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1662 }
1663 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1664 inExpectedTypes, outputCount, outputIndexes,
1665 outExpectedTypes);
1666 }
1667 case ANEURALNETWORKS_POW: {
1668 if (inputCount != 2 || outputCount != 1) {
1669 logInvalidInOutNumber(2, 1);
1670 return ANEURALNETWORKS_BAD_DATA;
1671 }
1672 auto inputType = operands[inputIndexes[0]].type;
1673 std::vector<OperandType> inExpectedTypes;
1674 std::vector<OperandType> outExpectedTypes;
1675 if (inputType == OperandType::TENSOR_FLOAT16 ||
1676 inputType == OperandType::TENSOR_FLOAT32) {
1677 inExpectedTypes = {inputType, inputType};
1678 outExpectedTypes = {inputType};
1679 } else {
1680 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1681 return ANEURALNETWORKS_BAD_DATA;
1682 }
1683 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1684 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1685 } else {
1686 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1687 }
1688 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1689 inExpectedTypes, outputCount, outputIndexes,
1690 outExpectedTypes);
1691 }
1692 case ANEURALNETWORKS_IF: {
1693 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1694 return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes,
1695 operands, helper)
1696 ? ANEURALNETWORKS_NO_ERROR
1697 : ANEURALNETWORKS_BAD_DATA;
1698 }
1699 case ANEURALNETWORKS_WHILE: {
1700 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1701 return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes,
1702 operands, helper)
1703 ? ANEURALNETWORKS_NO_ERROR
1704 : ANEURALNETWORKS_BAD_DATA;
1705 }
1706 default: {
1707 const OperationRegistration* operationRegistration =
1708 BuiltinOperationResolver::get()->findOperation(
1709 static_cast<OperationType>(opType));
1710 if (operationRegistration == nullptr) {
1711 if (0 <= opType && opType < kNumberOfOperationTypes) {
1712 LOG(ERROR) << opType << " not registered";
1713 } else {
1714 LOG(ERROR) << "Operation type " << opType << " out of the range [0, "
1715 << kNumberOfOperationTypes << ")";
1716 }
1717 return ANEURALNETWORKS_UNEXPECTED_NULL;
1718 }
1719 if (operationRegistration->validate == nullptr) {
1720 LOG(ERROR) << "Incomplete operation registration: " << opType;
1721 return ANEURALNETWORKS_UNEXPECTED_NULL;
1722 }
1723 OperationValidationContext context(operationRegistration->name, inputCount,
1724 inputIndexes, outputCount, outputIndexes,
1725 operands.data());
1726 const auto maybeVersion = operationRegistration->validate(&context);
1727 if (!maybeVersion.has_value()) {
1728 LOG(ERROR) << "Validation failed for operation " << opType << ": "
1729 << maybeVersion.error();
1730 return ANEURALNETWORKS_BAD_DATA;
1731 }
1732 if (!validateVersion(&context, convert(halVersion), maybeVersion.value())) {
1733 LOG(ERROR) << "Validation failed for operation " << opType;
1734 return ANEURALNETWORKS_BAD_DATA;
1735 }
1736 return ANEURALNETWORKS_NO_ERROR;
1737 }
1738 }
1739 }
1740
convertResultCodeToErrorStatus(int resultCode)1741 ErrorStatus convertResultCodeToErrorStatus(int resultCode) {
1742 switch (resultCode) {
1743 case ANEURALNETWORKS_NO_ERROR:
1744 return ErrorStatus::NONE;
1745
1746 case ANEURALNETWORKS_BAD_DATA:
1747 case ANEURALNETWORKS_UNEXPECTED_NULL:
1748 return ErrorStatus::INVALID_ARGUMENT;
1749
1750 case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
1751 return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
1752
1753 case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
1754 return ErrorStatus::DEVICE_UNAVAILABLE;
1755
1756 case ANEURALNETWORKS_BAD_STATE:
1757 case ANEURALNETWORKS_INCOMPLETE:
1758 case ANEURALNETWORKS_OP_FAILED:
1759 case ANEURALNETWORKS_OUT_OF_MEMORY:
1760 case ANEURALNETWORKS_UNMAPPABLE:
1761 case ANEURALNETWORKS_DEAD_OBJECT:
1762 return ErrorStatus::GENERAL_FAILURE;
1763
1764 case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT:
1765 return ErrorStatus::MISSED_DEADLINE_TRANSIENT;
1766 case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT:
1767 return ErrorStatus::MISSED_DEADLINE_PERSISTENT;
1768 case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT:
1769 return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT;
1770 case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT:
1771 return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT;
1772 }
1773 LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE";
1774 return ErrorStatus::GENERAL_FAILURE;
1775 }
1776
convertErrorStatusToResultCode(ErrorStatus status)1777 int convertErrorStatusToResultCode(ErrorStatus status) {
1778 switch (status) {
1779 case ErrorStatus::NONE:
1780 return ANEURALNETWORKS_NO_ERROR;
1781 case ErrorStatus::DEVICE_UNAVAILABLE:
1782 return ANEURALNETWORKS_UNAVAILABLE_DEVICE;
1783 case ErrorStatus::GENERAL_FAILURE:
1784 return ANEURALNETWORKS_OP_FAILED;
1785 case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
1786 return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE;
1787 case ErrorStatus::INVALID_ARGUMENT:
1788 return ANEURALNETWORKS_BAD_DATA;
1789 case ErrorStatus::MISSED_DEADLINE_TRANSIENT:
1790 return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT;
1791 case ErrorStatus::MISSED_DEADLINE_PERSISTENT:
1792 return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT;
1793 case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
1794 return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT;
1795 case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
1796 return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT;
1797 case ErrorStatus::DEAD_OBJECT:
1798 return ANEURALNETWORKS_DEAD_OBJECT;
1799 }
1800 LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED";
1801 return ANEURALNETWORKS_OP_FAILED;
1802 }
1803
getExecutionResult(ErrorStatus status,std::vector<OutputShape> outputShapes,Timing timing)1804 std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
1805 ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) {
1806 constexpr Timing kNoTiming = {};
1807 const int n = convertErrorStatusToResultCode(status);
1808 if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE &&
1809 !outputShapes.empty()) {
1810 LOG(ERROR) << "The driver returned OutputShapes when it shouldn't.";
1811 outputShapes.clear();
1812 }
1813 if (status != ErrorStatus::NONE && timing != kNoTiming) {
1814 LOG(ERROR) << "The driver returned Timing when it shouldn't.";
1815 timing = kNoTiming;
1816 }
1817 return {n, std::move(outputShapes), timing};
1818 }
1819
syncWait(int fd,int timeout)1820 FenceState syncWait(int fd, int timeout) {
1821 // This implementation is directly based on the ::sync_wait() implementation.
1822
1823 struct pollfd fds;
1824 int ret;
1825
1826 if (fd < 0) {
1827 errno = EINVAL;
1828 return FenceState::UNKNOWN;
1829 }
1830
1831 fds.fd = fd;
1832 fds.events = POLLIN;
1833
1834 do {
1835 ret = poll(&fds, 1, timeout);
1836 if (ret > 0) {
1837 if (fds.revents & POLLNVAL) {
1838 errno = EINVAL;
1839 return FenceState::UNKNOWN;
1840 }
1841 if (fds.revents & POLLERR) {
1842 errno = EINVAL;
1843 return FenceState::ERROR;
1844 }
1845 return FenceState::SIGNALED;
1846 } else if (ret == 0) {
1847 errno = ETIME;
1848 return FenceState::ACTIVE;
1849 }
1850 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1851
1852 return FenceState::UNKNOWN;
1853 }
1854
1855 #ifdef NN_DEBUGGABLE
getProp(const char * str,uint32_t defaultValue)1856 uint32_t getProp(const char* str, uint32_t defaultValue) {
1857 const std::string propStr = android::base::GetProperty(str, "");
1858 if (propStr.size() > 0) {
1859 return std::stoi(propStr);
1860 } else {
1861 return defaultValue;
1862 }
1863 }
1864 #endif // NN_DEBUGGABLE
1865
1866 } // namespace nn
1867 } // namespace android
1868