• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Utils"
18 
19 #include "Utils.h"
20 #include "NeuralNetworks.h"
21 
22 #include <android-base/logging.h>
23 #include <android-base/properties.h>
24 #include <android-base/strings.h>
25 #include <sys/system_properties.h>
26 #include <unordered_map>
27 
28 using ::android::hidl::allocator::V1_0::IAllocator;
29 
30 namespace android {
31 namespace nn {
32 
33 const char kVLogPropKey[] = "debug.nn.vlog";
34 int vLogMask = ~0;
35 
36 // Split the space separated list of tags from verbose log setting and build the
37 // logging mask from it. note that '1' and 'all' are special cases to enable all
38 // verbose logging.
39 //
40 // NN API verbose logging setting comes from system property debug.nn.vlog.
41 // Example:
42 // setprop debug.nn.vlog 1 : enable all logging tags.
43 // setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and
44 //                                             COMPILATION tags.
initVLogMask()45 void initVLogMask() {
46     vLogMask = 0;
47     const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, "");
48     if (vLogSetting.empty()) {
49         return;
50     }
51 
52     std::unordered_map<std::string, int> vLogFlags = {
53         {"1", -1},
54         {"all", -1},
55         {"model", MODEL},
56         {"compilation", COMPILATION},
57         {"execution", EXECUTION},
58         {"cpuexe", CPUEXE},
59         {"manager", MANAGER},
60         {"driver", DRIVER}};
61 
62     std::vector<std::string> elements = android::base::Split(vLogSetting, " ");
63     for (const auto& elem : elements) {
64         const auto& flag = vLogFlags.find(elem);
65         if (flag == vLogFlags.end()) {
66             LOG(ERROR) << "Unknown trace flag: " << elem;
67             continue;
68         }
69 
70         if (flag->second == -1) {
71             // -1 is used for the special values "1" and "all" that enable all
72             // tracing.
73             vLogMask = ~0;
74             return;
75         } else {
76             vLogMask |= 1 << flag->second;
77         }
78     }
79 }
80 
81 #define COUNT(X) (sizeof(X) / sizeof(X[0]))
82 
83 const char* kTypeNames[kNumberOfDataTypes] = {
84         "FLOAT32",        "INT32",        "UINT32",
85         "TENSOR_FLOAT32", "TENSOR_INT32", "TENSOR_QUANT8_ASYMM",
86 };
87 
88 static_assert(COUNT(kTypeNames) == kNumberOfDataTypes, "kTypeNames is incorrect");
89 
90 const char* kTypeNamesOEM[kNumberOfDataTypesOEM] = {
91         "OEM",            "TENSOR_OEM_BYTE",
92 };
93 
94 static_assert(COUNT(kTypeNamesOEM) == kNumberOfDataTypesOEM, "kTypeNamesOEM is incorrect");
95 
96 // TODO Check if this useful
97 const char* kErrorNames[] = {
98         "NO_ERROR", "OUT_OF_MEMORY", "INCOMPLETE", "NULL", "BAD_DATA",
99 };
100 
101 namespace {
102 
103 template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
tableLookup(const EntryType (& table)[entryCount],const EntryType (& tableOEM)[entryCountOEM],uint32_t code)104 EntryType tableLookup(const EntryType (&table)[entryCount],
105                       const EntryType (&tableOEM)[entryCountOEM],
106                       uint32_t code) {
107     if (code < entryCount) {
108         return table[code];
109     } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) {
110         return tableOEM[code - kOEMCodeBase];
111     } else {
112         nnAssert(!"tableLookup: bad code");
113         return EntryType();
114     }
115 }
116 
117 };  // anonymous namespace
118 
119 const char* kOperationNames[kNumberOfOperationTypes] = {
120         "ADD",
121         "AVERAGE_POOL",
122         "CONCATENATION",
123         "CONV",
124         "DEPTHWISE_CONV",
125         "DEPTH_TO_SPACE",
126         "DEQUANTIZE",
127         "EMBEDDING_LOOKUP",
128         "FLOOR",
129         "FULLY_CONNECTED",
130         "HASHTABLE_LOOKUP",
131         "L2_NORMALIZATION",
132         "L2_POOL",
133         "LOCAL_RESPONSE_NORMALIZATION",
134         "LOGISTIC",
135         "LSH_PROJECTION",
136         "LSTM",
137         "MAX_POOL",
138         "MUL",
139         "RELU",
140         "RELU1",
141         "RELU6",
142         "RESHAPE",
143         "RESIZE_BILINEAR",
144         "RNN",
145         "SOFTMAX",
146         "SPACE_TO_DEPTH",
147         "SVDF",
148         "TANH",
149 };
150 
151 static_assert(COUNT(kOperationNames) == kNumberOfOperationTypes, "kOperationNames is incorrect");
152 
153 const char* kOperationNamesOEM[kNumberOfOperationTypesOEM] = {
154         "OEM_OPERATION",
155 };
156 
157 static_assert(COUNT(kOperationNamesOEM) == kNumberOfOperationTypesOEM,
158               "kOperationNamesOEM is incorrect");
159 
getOperationName(OperationType type)160 const char* getOperationName(OperationType type) {
161     uint32_t n = static_cast<uint32_t>(type);
162     return tableLookup(kOperationNames, kOperationNamesOEM, n);
163 }
164 
165 const uint32_t kSizeOfDataType[]{
166         4, // ANEURALNETWORKS_FLOAT32
167         4, // ANEURALNETWORKS_INT32
168         4, // ANEURALNETWORKS_UINT32
169         4, // ANEURALNETWORKS_TENSOR_FLOAT32
170         4, // ANEURALNETWORKS_TENSOR_INT32
171         1  // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
172 };
173 
174 static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
175 
176 const bool kScalarDataType[]{
177         true,  // ANEURALNETWORKS_FLOAT32
178         true,  // ANEURALNETWORKS_INT32
179         true,  // ANEURALNETWORKS_UINT32
180         false, // ANEURALNETWORKS_TENSOR_FLOAT32
181         false, // ANEURALNETWORKS_TENSOR_INT32
182         false, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
183 };
184 
185 static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
186 
187 const uint32_t kSizeOfDataTypeOEM[]{
188         0, // ANEURALNETWORKS_OEM
189         1, // ANEURALNETWORKS_TENSOR_OEM_BYTE
190 };
191 
192 static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM,
193               "kSizeOfDataTypeOEM is incorrect");
194 
195 const bool kScalarDataTypeOEM[]{
196         true,  // ANEURALNETWORKS_OEM
197         false, // ANEURALNETWORKS_TENSOR_OEM_BYTE
198 };
199 
200 static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM,
201               "kScalarDataTypeOEM is incorrect");
202 
sizeOfData(OperandType type,const std::vector<uint32_t> & dimensions)203 uint32_t sizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
204     int n = static_cast<int>(type);
205 
206     uint32_t size = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
207 
208     if (tableLookup(kScalarDataType, kScalarDataTypeOEM, n) == true) {
209         return size;
210     }
211 
212     for (auto d : dimensions) {
213         size *= d;
214     }
215     return size;
216 }
217 
allocateSharedMemory(int64_t size)218 hidl_memory allocateSharedMemory(int64_t size) {
219     hidl_memory memory;
220 
221     // TODO: should we align memory size to nearest page? doesn't seem necessary...
222     const std::string& type = "ashmem";
223     sp<IAllocator> allocator = IAllocator::getService(type);
224     allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
225         if (!success) {
226             LOG(ERROR) << "unable to allocate " << size << " bytes of " << type;
227         } else {
228             memory = mem;
229         }
230     });
231 
232     return memory;
233 }
234 
alignBytesNeeded(uint32_t index,size_t length)235 uint32_t alignBytesNeeded(uint32_t index, size_t length) {
236     uint32_t pattern;
237     if (length < 2) {
238         pattern = 0; // No alignment necessary
239     } else if (length < 4) {
240         pattern = 1; // Align on 2-byte boundary
241     } else {
242         pattern = 3; // Align on 4-byte boundary
243     }
244     uint32_t extra = (~(index - 1)) & pattern;
245     return extra;
246 }
247 
logModelToInfo(const Model & model)248 void logModelToInfo(const Model& model) {
249     LOG(INFO) << "Model start";
250     LOG(INFO) << "operands" << toString(model.operands);
251     LOG(INFO) << "operations" << toString(model.operations);
252     LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
253     LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
254     LOG(INFO) << "operandValues size" << model.operandValues.size();
255     LOG(INFO) << "pools" << toString(model.pools);
256 }
257 
258 // Validates the type. The used dimensions can be underspecified.
validateOperandType(const ANeuralNetworksOperandType & type,const char * tag,bool allowPartial)259 int validateOperandType(const ANeuralNetworksOperandType& type, const char* tag,
260                         bool allowPartial) {
261     if (!allowPartial) {
262         for (uint32_t i = 0; i < type.dimensionCount; i++) {
263             if (type.dimensions[i] == 0) {
264                 LOG(ERROR) << tag << " OperandType invalid dimensions[" << i
265                            << "] = " << type.dimensions[i];
266                 return ANEURALNETWORKS_BAD_DATA;
267             }
268         }
269     }
270     if (!validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) {
271         LOG(ERROR) << tag << " OperandType invalid type " << type.type;
272         return ANEURALNETWORKS_BAD_DATA;
273     }
274     if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
275         if (type.zeroPoint < 0 || type.zeroPoint > 255) {
276             LOG(ERROR) << tag << " OperandType invalid zeroPoint " << type.zeroPoint;
277             return ANEURALNETWORKS_BAD_DATA;
278         }
279         if (type.scale < 0.f) {
280             LOG(ERROR) << tag << " OperandType invalid scale " << type.scale;
281             return ANEURALNETWORKS_BAD_DATA;
282         }
283     }
284     return ANEURALNETWORKS_NO_ERROR;
285 }
286 
validateOperandList(uint32_t count,const uint32_t * list,uint32_t operandCount,const char * tag)287 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
288                         const char* tag) {
289     for (uint32_t i = 0; i < count; i++) {
290         if (list[i] >= operandCount) {
291             LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
292                        << ", operandCount " << operandCount;
293             return ANEURALNETWORKS_BAD_DATA;
294         }
295     }
296     return ANEURALNETWORKS_NO_ERROR;
297 }
298 
validOperandIndexes(const hidl_vec<uint32_t> indexes,size_t operandCount)299 static bool validOperandIndexes(const hidl_vec<uint32_t> indexes, size_t operandCount) {
300     for (uint32_t i : indexes) {
301         if (i >= operandCount) {
302             LOG(ERROR) << "Index out of range " << i << "/" << operandCount;
303             return false;
304         }
305     }
306     return true;
307 }
308 
validOperands(const hidl_vec<Operand> & operands,const hidl_vec<uint8_t> & operandValues,size_t poolCount)309 static bool validOperands(const hidl_vec<Operand>& operands, const hidl_vec<uint8_t>& operandValues,
310                           size_t poolCount) {
311     for (auto& operand : operands) {
312         if (!validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM,
313                        static_cast<uint32_t>(operand.type))) {
314             LOG(ERROR) << "Invalid operand type " << toString(operand.type);
315             return false;
316         }
317         /* TODO validate dim with type
318         if (!validOperandIndexes(operand.dimensions, mDimensions)) {
319             return false;
320         }
321         */
322         switch (operand.lifetime) {
323             case OperandLifeTime::CONSTANT_COPY:
324                 if (operand.location.offset + operand.location.length > operandValues.size()) {
325                     LOG(ERROR) << "OperandValue location out of range.  Starts at "
326                                << operand.location.offset << ", length " << operand.location.length
327                            << ", max " << operandValues.size();
328                     return false;
329                 }
330                 break;
331             case OperandLifeTime::TEMPORARY_VARIABLE:
332             case OperandLifeTime::MODEL_INPUT:
333             case OperandLifeTime::MODEL_OUTPUT:
334             case OperandLifeTime::NO_VALUE:
335                 if (operand.location.offset != 0 || operand.location.length != 0) {
336                     LOG(ERROR) << "Unexpected offset " << operand.location.offset << " or length "
337                                << operand.location.length << " for runtime location.";
338                     return false;
339                 }
340                 break;
341             case OperandLifeTime::CONSTANT_REFERENCE:
342                 if (operand.location.poolIndex >= poolCount) {
343                     LOG(ERROR) << "Invalid poolIndex " << operand.location.poolIndex << "/"
344                                << poolCount;
345                     return false;
346                 }
347                 break;
348             // TODO: Validate that we are within the pool.
349             default:
350                 LOG(ERROR) << "Invalid lifetime";
351                 return false;
352         }
353     }
354     return true;
355 }
356 
validOperations(const hidl_vec<Operation> & operations,size_t operandCount)357 static bool validOperations(const hidl_vec<Operation>& operations, size_t operandCount) {
358     for (auto& op : operations) {
359         if (!validCode(kNumberOfOperationTypes, kNumberOfOperationTypesOEM,
360                        static_cast<uint32_t>(op.type))) {
361             LOG(ERROR) << "Invalid operation type " << toString(op.type);
362             return false;
363         }
364         if (!validOperandIndexes(op.inputs, operandCount) ||
365             !validOperandIndexes(op.outputs, operandCount)) {
366             return false;
367         }
368     }
369     return true;
370 }
371 
372 // TODO doublecheck
validateModel(const Model & model)373 bool validateModel(const Model& model) {
374     const size_t operandCount = model.operands.size();
375     return (validOperands(model.operands, model.operandValues, model.pools.size()) &&
376             validOperations(model.operations, operandCount) &&
377             validOperandIndexes(model.inputIndexes, operandCount) &&
378             validOperandIndexes(model.outputIndexes, operandCount));
379 }
380 
validRequestArguments(const hidl_vec<RequestArgument> & arguments,const hidl_vec<uint32_t> & operandIndexes,const hidl_vec<Operand> & operands,size_t poolCount,const char * type)381 bool validRequestArguments(const hidl_vec<RequestArgument>& arguments,
382                            const hidl_vec<uint32_t>& operandIndexes,
383                            const hidl_vec<Operand>& operands, size_t poolCount,
384                            const char* type) {
385     const size_t argumentCount = arguments.size();
386     if (argumentCount != operandIndexes.size()) {
387         LOG(ERROR) << "Request specifies " << argumentCount << " " << type << "s but the model has "
388                    << operandIndexes.size();
389         return false;
390     }
391     for (size_t argumentIndex = 0; argumentIndex < argumentCount; argumentIndex++) {
392         const RequestArgument& argument = arguments[argumentIndex];
393         const uint32_t operandIndex = operandIndexes[argumentIndex];
394         const Operand& operand = operands[operandIndex];
395         if (argument.hasNoValue) {
396             if (argument.location.poolIndex != 0 ||
397                 argument.location.offset != 0 ||
398                 argument.location.length != 0 ||
399                 argument.dimensions.size() != 0) {
400                 LOG(ERROR) << "Request " << type << " " << argumentIndex
401                            << " has no value yet has details.";
402                 return false;
403             }
404         }
405         if (argument.location.poolIndex >= poolCount) {
406             LOG(ERROR) << "Request " << type << " " << argumentIndex << " has an invalid poolIndex "
407                        << argument.location.poolIndex << "/" << poolCount;
408             return false;
409         }
410         // TODO: Validate that we are within the pool.
411         uint32_t rank = argument.dimensions.size();
412         if (rank > 0) {
413             if (rank != operand.dimensions.size()) {
414                 LOG(ERROR) << "Request " << type << " " << argumentIndex
415                            << " has number of dimensions (" << rank
416                            << ") different than the model's (" << operand.dimensions.size() << ")";
417                 return false;
418             }
419             for (size_t i = 0; i < rank; i++) {
420                 if (argument.dimensions[i] != operand.dimensions[i] &&
421                     operand.dimensions[i] != 0) {
422                     LOG(ERROR) << "Request " << type << " " << argumentIndex
423                                << " has dimension " << i << " of " << operand.dimensions[i]
424                                << " different than the model's " << operand.dimensions[i];
425                     return false;
426                 }
427                 if (argument.dimensions[i] == 0) {
428                     LOG(ERROR) << "Request " << type << " " << argumentIndex
429                                << " has dimension " << i << " of zero";
430                     return false;
431                 }
432             }
433         }
434     }
435     return true;
436 }
437 
438 // TODO doublecheck
validateRequest(const Request & request,const Model & model)439 bool validateRequest(const Request& request, const Model& model) {
440     const size_t poolCount = request.pools.size();
441     return (validRequestArguments(request.inputs, model.inputIndexes, model.operands, poolCount,
442                                   "input") &&
443             validRequestArguments(request.outputs, model.outputIndexes, model.operands, poolCount,
444                                   "output"));
445 }
446 
447 #ifdef NN_DEBUGGABLE
getProp(const char * str,uint32_t defaultValue)448 uint32_t getProp(const char* str, uint32_t defaultValue) {
449     const std::string propStr = android::base::GetProperty(str, "");
450     if (propStr.size() > 0) {
451         return std::stoi(propStr);
452     } else {
453         return defaultValue;
454     }
455 }
456 #endif  // NN_DEBUGGABLE
457 
458 } // namespace nn
459 } // namespace android
460