• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // This header file defines an unified structure for a model under test, and provides helper
18 // functions checking test results. Multiple instances of the test model structure will be
19 // generated from the model specification files under nn/runtime/test/specs directory.
20 // Both CTS and VTS will consume this test structure and convert into their own model and
21 // request format.
22 
23 #ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_TOOLS_TEST_GENERATOR_TEST_HARNESS_TEST_HARNESS_H
24 #define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_TOOLS_TEST_GENERATOR_TEST_HARNESS_TEST_HARNESS_H
25 
26 #include <algorithm>
27 #include <cstdlib>
28 #include <cstring>
29 #include <functional>
30 #include <iostream>
31 #include <limits>
32 #include <map>
33 #include <memory>
34 #include <random>
35 #include <string>
36 #include <utility>
37 #include <vector>
38 
39 namespace test_helper {
40 
41 // This class is a workaround for two issues our code relies on:
42 // 1. sizeof(bool) is implementation defined.
43 // 2. vector<bool> does not allow direct pointer access via the data() method.
44 class bool8 {
45    public:
bool8()46     bool8() : mValue() {}
bool8(bool value)47     /* implicit */ bool8(bool value) : mValue(value) {}   // NOLINT(google-explicit-constructor)
48     inline operator bool() const { return mValue != 0; }  // NOLINT(google-explicit-constructor)
49 
50    private:
51     uint8_t mValue;
52 };
53 
54 static_assert(sizeof(bool8) == 1, "size of bool8 must be 8 bits");
55 
56 // We need the following enum classes since the test harness can neither depend on NDK nor HIDL
57 // definitions.
58 
59 enum class TestOperandType {
60     FLOAT32 = 0,
61     INT32 = 1,
62     UINT32 = 2,
63     TENSOR_FLOAT32 = 3,
64     TENSOR_INT32 = 4,
65     TENSOR_QUANT8_ASYMM = 5,
66     BOOL = 6,
67     TENSOR_QUANT16_SYMM = 7,
68     TENSOR_FLOAT16 = 8,
69     TENSOR_BOOL8 = 9,
70     FLOAT16 = 10,
71     TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
72     TENSOR_QUANT16_ASYMM = 12,
73     TENSOR_QUANT8_SYMM = 13,
74     TENSOR_QUANT8_ASYMM_SIGNED = 14,
75     SUBGRAPH = 15,
76 };
77 
78 enum class TestOperandLifeTime {
79     TEMPORARY_VARIABLE = 0,
80     SUBGRAPH_INPUT = 1,
81     SUBGRAPH_OUTPUT = 2,
82     CONSTANT_COPY = 3,
83     CONSTANT_REFERENCE = 4,
84     NO_VALUE = 5,
85     SUBGRAPH = 6,
86     // DEPRECATED. Use SUBGRAPH_INPUT.
87     // This value is used in pre-1.3 VTS tests.
88     MODEL_INPUT = SUBGRAPH_INPUT,
89     // DEPRECATED. Use SUBGRAPH_OUTPUT.
90     // This value is used in pre-1.3 VTS tests.
91     MODEL_OUTPUT = SUBGRAPH_OUTPUT,
92 };
93 
94 enum class TestOperationType {
95     ADD = 0,
96     AVERAGE_POOL_2D = 1,
97     CONCATENATION = 2,
98     CONV_2D = 3,
99     DEPTHWISE_CONV_2D = 4,
100     DEPTH_TO_SPACE = 5,
101     DEQUANTIZE = 6,
102     EMBEDDING_LOOKUP = 7,
103     FLOOR = 8,
104     FULLY_CONNECTED = 9,
105     HASHTABLE_LOOKUP = 10,
106     L2_NORMALIZATION = 11,
107     L2_POOL_2D = 12,
108     LOCAL_RESPONSE_NORMALIZATION = 13,
109     LOGISTIC = 14,
110     LSH_PROJECTION = 15,
111     LSTM = 16,
112     MAX_POOL_2D = 17,
113     MUL = 18,
114     RELU = 19,
115     RELU1 = 20,
116     RELU6 = 21,
117     RESHAPE = 22,
118     RESIZE_BILINEAR = 23,
119     RNN = 24,
120     SOFTMAX = 25,
121     SPACE_TO_DEPTH = 26,
122     SVDF = 27,
123     TANH = 28,
124     BATCH_TO_SPACE_ND = 29,
125     DIV = 30,
126     MEAN = 31,
127     PAD = 32,
128     SPACE_TO_BATCH_ND = 33,
129     SQUEEZE = 34,
130     STRIDED_SLICE = 35,
131     SUB = 36,
132     TRANSPOSE = 37,
133     ABS = 38,
134     ARGMAX = 39,
135     ARGMIN = 40,
136     AXIS_ALIGNED_BBOX_TRANSFORM = 41,
137     BIDIRECTIONAL_SEQUENCE_LSTM = 42,
138     BIDIRECTIONAL_SEQUENCE_RNN = 43,
139     BOX_WITH_NMS_LIMIT = 44,
140     CAST = 45,
141     CHANNEL_SHUFFLE = 46,
142     DETECTION_POSTPROCESSING = 47,
143     EQUAL = 48,
144     EXP = 49,
145     EXPAND_DIMS = 50,
146     GATHER = 51,
147     GENERATE_PROPOSALS = 52,
148     GREATER = 53,
149     GREATER_EQUAL = 54,
150     GROUPED_CONV_2D = 55,
151     HEATMAP_MAX_KEYPOINT = 56,
152     INSTANCE_NORMALIZATION = 57,
153     LESS = 58,
154     LESS_EQUAL = 59,
155     LOG = 60,
156     LOGICAL_AND = 61,
157     LOGICAL_NOT = 62,
158     LOGICAL_OR = 63,
159     LOG_SOFTMAX = 64,
160     MAXIMUM = 65,
161     MINIMUM = 66,
162     NEG = 67,
163     NOT_EQUAL = 68,
164     PAD_V2 = 69,
165     POW = 70,
166     PRELU = 71,
167     QUANTIZE = 72,
168     QUANTIZED_16BIT_LSTM = 73,
169     RANDOM_MULTINOMIAL = 74,
170     REDUCE_ALL = 75,
171     REDUCE_ANY = 76,
172     REDUCE_MAX = 77,
173     REDUCE_MIN = 78,
174     REDUCE_PROD = 79,
175     REDUCE_SUM = 80,
176     ROI_ALIGN = 81,
177     ROI_POOLING = 82,
178     RSQRT = 83,
179     SELECT = 84,
180     SIN = 85,
181     SLICE = 86,
182     SPLIT = 87,
183     SQRT = 88,
184     TILE = 89,
185     TOPK_V2 = 90,
186     TRANSPOSE_CONV_2D = 91,
187     UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
188     UNIDIRECTIONAL_SEQUENCE_RNN = 93,
189     RESIZE_NEAREST_NEIGHBOR = 94,
190     QUANTIZED_LSTM = 95,
191     IF = 96,
192     WHILE = 97,
193     ELU = 98,
194     HARD_SWISH = 99,
195     FILL = 100,
196     RANK = 101,
197     BATCH_MATMUL = 102,
198     PACK = 103,
199     MIRROR_PAD = 104,
200     REVERSE = 105,
201 #ifdef NN_EXPERIMENTAL_FEATURE
202     DENSIFY = 20000,
203 #endif  // NN_EXPERIMENTAL_FEATURE
204 };
205 
206 // TODO(b/209797313): Deduplicate this enum class.
207 enum class TestHalVersion { UNKNOWN, V1_0, V1_1, V1_2, V1_3, AIDL_V1, AIDL_V2, AIDL_V3 };
208 
209 // Manages the data buffer for a test operand.
210 class TestBuffer {
211    public:
212     // The buffer must be aligned on a boundary of a byte size that is a multiple of the element
213     // type byte size. In NNAPI, 4-byte boundary should be sufficient for all current data types.
214     static constexpr size_t kAlignment = 4;
215 
216     TestBuffer() = default;
217 
218     // Create the buffer of a given size and initialize from data.
219     // If data is nullptr, the allocated memory stays uninitialized.
mSize(size)220     explicit TestBuffer(size_t size, const void* data = nullptr) : mSize(size) {
221         if (size > 0) {
222             // The size for aligned_alloc must be an integral multiple of alignment.
223             mBuffer.reset(aligned_alloc(kAlignment, alignedSize()), free);
224             if (data) memcpy(mBuffer.get(), data, size);
225         }
226     }
227 
228     // Explicitly create a deep copy.
copy()229     TestBuffer copy() const { return TestBuffer(mSize, mBuffer.get()); }
230 
231     // Factory method creating the buffer from a typed vector.
232     template <typename T>
createFromVector(const std::vector<T> & vec)233     static TestBuffer createFromVector(const std::vector<T>& vec) {
234         return TestBuffer(vec.size() * sizeof(T), vec.data());
235     }
236 
237     // Factory method for creating a randomized buffer with "size" number of
238     // bytes.
createRandom(size_t size,std::default_random_engine * gen)239     static TestBuffer createRandom(size_t size, std::default_random_engine* gen) {
240         static_assert(kAlignment % sizeof(uint32_t) == 0);
241         TestBuffer testBuffer(size);
242         std::uniform_int_distribution<uint32_t> dist{};
243         const size_t count = testBuffer.alignedSize() / sizeof(uint32_t);
244         std::generate_n(testBuffer.getMutable<uint32_t>(), count, [&] { return dist(*gen); });
245         return testBuffer;
246     }
247 
248     template <typename T>
get()249     const T* get() const {
250         return reinterpret_cast<const T*>(mBuffer.get());
251     }
252 
253     template <typename T>
getMutable()254     T* getMutable() {
255         return reinterpret_cast<T*>(mBuffer.get());
256     }
257 
258     // Returns the byte size of the buffer.
size()259     size_t size() const { return mSize; }
260 
261     // Returns the byte size that is aligned to kAlignment.
alignedSize()262     size_t alignedSize() const { return ((mSize + kAlignment - 1) / kAlignment) * kAlignment; }
263 
264     bool operator==(std::nullptr_t) const { return mBuffer == nullptr; }
265     bool operator!=(std::nullptr_t) const { return mBuffer != nullptr; }
266 
267    private:
268     std::shared_ptr<void> mBuffer;
269     size_t mSize = 0;
270 };
271 
272 struct TestSymmPerChannelQuantParams {
273     std::vector<float> scales;
274     uint32_t channelDim = 0;
275 };
276 
277 struct TestOperand {
278     TestOperandType type;
279     std::vector<uint32_t> dimensions;
280     uint32_t numberOfConsumers;
281     float scale = 0.0f;
282     int32_t zeroPoint = 0;
283     TestOperandLifeTime lifetime;
284     TestSymmPerChannelQuantParams channelQuant;
285 
286     // For SUBGRAPH_OUTPUT only. Set to true to skip the accuracy check on this operand.
287     bool isIgnored = false;
288 
289     // For CONSTANT_COPY/REFERENCE and SUBGRAPH_INPUT, this is the data set in model and request.
290     // For SUBGRAPH_OUTPUT,
291     // - If isIgnored == false, this is the expected results.
292     // - If isIgnored == true, this is populated but ignored
293     // For TEMPORARY_VARIABLE and NO_VALUE, this is nullptr.
294     TestBuffer data;
295 };
296 
297 struct TestOperation {
298     TestOperationType type;
299     std::vector<uint32_t> inputs;
300     std::vector<uint32_t> outputs;
301 };
302 
303 struct TestSubgraph {
304     std::vector<TestOperand> operands;
305     std::vector<TestOperation> operations;
306     std::vector<uint32_t> inputIndexes;
307     std::vector<uint32_t> outputIndexes;
308 };
309 
310 struct TestModel {
311     TestSubgraph main;
312     std::vector<TestSubgraph> referenced;
313     bool isRelaxed = false;
314 
315     // Additional testing information and flags associated with the TestModel.
316 
317     // Specifies the RANDOM_MULTINOMIAL distribution tolerance.
318     // If set to greater than zero, the input is compared as log-probabilities
319     // to the output and must be within this tolerance to pass.
320     float expectedMultinomialDistributionTolerance = 0.0f;
321 
322     // If set to true, the TestModel specifies a validation test that is expected to fail during
323     // compilation or execution.
324     bool expectFailure = false;
325 
326     // The minimum supported HAL version.
327     TestHalVersion minSupportedVersion = TestHalVersion::UNKNOWN;
328 
329     // Returns an int AIDL version number. HIDL versions are treated as AIDL version 0.
getAidlVersionIntTestModel330     int32_t getAidlVersionInt() const {
331         switch (minSupportedVersion) {
332             case TestHalVersion::AIDL_V1:
333                 return 1;
334             case TestHalVersion::AIDL_V2:
335                 return 2;
336             case TestHalVersion::AIDL_V3:
337                 return 3;
338             default:
339                 // HIDL versions are treated as AIDL version 0 so that all AIDL services are newer.
340                 return 0;
341         }
342     }
343 
forEachSubgraphTestModel344     void forEachSubgraph(const std::function<void(const TestSubgraph&)>& handler) const {
345         handler(main);
346         for (const TestSubgraph& subgraph : referenced) {
347             handler(subgraph);
348         }
349     }
350 
forEachSubgraphTestModel351     void forEachSubgraph(const std::function<void(TestSubgraph&)>& handler) {
352         handler(main);
353         for (TestSubgraph& subgraph : referenced) {
354             handler(subgraph);
355         }
356     }
357 
358     // Explicitly create a deep copy.
copyTestModel359     TestModel copy() const {
360         TestModel newTestModel(*this);
361         newTestModel.forEachSubgraph([](TestSubgraph& subgraph) {
362             for (TestOperand& operand : subgraph.operands) {
363                 operand.data = operand.data.copy();
364             }
365         });
366         return newTestModel;
367     }
368 
hasQuant8CoupledOperandsTestModel369     bool hasQuant8CoupledOperands() const {
370         bool result = false;
371         forEachSubgraph([&result](const TestSubgraph& subgraph) {
372             if (result) {
373                 return;
374             }
375             for (const TestOperation& operation : subgraph.operations) {
376                 /*
377                  *  There are several ops that are exceptions to the general quant8
378                  *  types coupling:
379                  *  HASHTABLE_LOOKUP -- due to legacy reasons uses
380                  *    TENSOR_QUANT8_ASYMM tensor as if it was TENSOR_BOOL. It
381                  *    doesn't make sense to have coupling in this case.
382                  *  LSH_PROJECTION -- hashes an input tensor treating it as raw
383                  *    bytes. We can't expect same results for coupled inputs.
384                  *  PAD_V2 -- pad_value is set using int32 scalar, so coupling
385                  *    produces a wrong result.
386                  *  CAST -- converts tensors without taking into account input's
387                  *    scale and zero point. Coupled models shouldn't produce same
388                  *    results.
389                  *  QUANTIZED_16BIT_LSTM -- the op is made for a specific use case,
390                  *    supporting signed quantization is not worth the compications.
391                  */
392                 if (operation.type == TestOperationType::HASHTABLE_LOOKUP ||
393                     operation.type == TestOperationType::LSH_PROJECTION ||
394                     operation.type == TestOperationType::PAD_V2 ||
395                     operation.type == TestOperationType::CAST ||
396                     operation.type == TestOperationType::QUANTIZED_16BIT_LSTM) {
397                     continue;
398                 }
399                 for (const auto operandIndex : operation.inputs) {
400                     if (subgraph.operands[operandIndex].type ==
401                         TestOperandType::TENSOR_QUANT8_ASYMM) {
402                         result = true;
403                         return;
404                     }
405                 }
406                 for (const auto operandIndex : operation.outputs) {
407                     if (subgraph.operands[operandIndex].type ==
408                         TestOperandType::TENSOR_QUANT8_ASYMM) {
409                         result = true;
410                         return;
411                     }
412                 }
413             }
414         });
415         return result;
416     }
417 
hasScalarOutputsTestModel418     bool hasScalarOutputs() const {
419         bool result = false;
420         forEachSubgraph([&result](const TestSubgraph& subgraph) {
421             if (result) {
422                 return;
423             }
424             for (const TestOperation& operation : subgraph.operations) {
425                 // RANK op returns a scalar and therefore shouldn't be tested
426                 // for dynamic output shape support.
427                 if (operation.type == TestOperationType::RANK) {
428                     result = true;
429                     return;
430                 }
431                 // Control flow operations do not support referenced model
432                 // outputs with dynamic shapes.
433                 if (operation.type == TestOperationType::IF ||
434                     operation.type == TestOperationType::WHILE) {
435                     result = true;
436                     return;
437                 }
438             }
439         });
440         return result;
441     }
442 
isInfiniteLoopTimeoutTestTestModel443     bool isInfiniteLoopTimeoutTest() const {
444         // This should only match the TestModel generated from while_infinite_loop.mod.py.
445         return expectFailure && main.operations[0].type == TestOperationType::WHILE;
446     }
447 };
448 
449 // Manages all generated test models.
450 class TestModelManager {
451    public:
452     // Returns the singleton manager.
get()453     static TestModelManager& get() {
454         static TestModelManager instance;
455         return instance;
456     }
457 
458     // Registers a TestModel to the manager. Returns a placeholder integer for global variable
459     // initialization.
add(std::string name,const TestModel & testModel)460     int add(std::string name, const TestModel& testModel) {
461         mTestModels.emplace(std::move(name), &testModel);
462         return 0;
463     }
464 
465     // Returns a vector of selected TestModels for which the given "filter" returns true.
466     using TestParam = std::pair<std::string, const TestModel*>;
getTestModels(const std::function<bool (const TestModel &)> & filter)467     std::vector<TestParam> getTestModels(const std::function<bool(const TestModel&)>& filter) {
468         std::vector<TestParam> testModels;
469         testModels.reserve(mTestModels.size());
470         std::copy_if(mTestModels.begin(), mTestModels.end(), std::back_inserter(testModels),
471                      [&filter](const auto& nameTestPair) { return filter(*nameTestPair.second); });
472         return testModels;
473     }
474 
475     // Returns a vector of selected TestModels for which the given "filter" returns true.
getTestModels(const std::function<bool (const std::string &)> & filter)476     std::vector<TestParam> getTestModels(const std::function<bool(const std::string&)>& filter) {
477         std::vector<TestParam> testModels;
478         testModels.reserve(mTestModels.size());
479         std::copy_if(mTestModels.begin(), mTestModels.end(), std::back_inserter(testModels),
480                      [&filter](const auto& nameTestPair) { return filter(nameTestPair.first); });
481         return testModels;
482     }
483 
484    private:
485     TestModelManager() = default;
486     TestModelManager(const TestModelManager&) = delete;
487     TestModelManager& operator=(const TestModelManager&) = delete;
488 
489     // Contains all TestModels generated from nn/runtime/test/specs directory.
490     // The TestModels are sorted by name to ensure a predictable order.
491     std::map<std::string, const TestModel*> mTestModels;
492 };
493 
494 struct AccuracyCriterion {
495     // We expect the driver results to be unbiased.
496     // Formula: abs(sum_{i}(diff) / sum(1)) <= bias, where
497     // * fixed point: diff = actual - expected
498     // * floating point: diff = (actual - expected) / max(1, abs(expected))
499     float bias = std::numeric_limits<float>::max();
500 
501     // Set the threshold on Mean Square Error (MSE).
502     // Formula: sum_{i}(diff ^ 2) / sum(1) <= mse
503     float mse = std::numeric_limits<float>::max();
504 
505     // We also set accuracy thresholds on each element to detect any particular edge cases that may
506     // be shadowed in bias or MSE. We use the similar approach as our CTS unit tests, but with much
507     // relaxed criterion.
508     // Formula: abs(actual - expected) <= atol + rtol * abs(expected)
509     //   where atol stands for Absolute TOLerance and rtol for Relative TOLerance.
510     float atol = 0.0f;
511     float rtol = 0.0f;
512 };
513 
514 struct AccuracyCriteria {
515     AccuracyCriterion float32;
516     AccuracyCriterion float16;
517     AccuracyCriterion int32;
518     AccuracyCriterion quant8Asymm;
519     AccuracyCriterion quant8AsymmSigned;
520     AccuracyCriterion quant8Symm;
521     AccuracyCriterion quant16Asymm;
522     AccuracyCriterion quant16Symm;
523     float bool8AllowedErrorRatio = 0.1f;
524     bool allowInvalidFpValues = true;
525 };
526 
527 // Check the output results against the expected values in test model by calling
528 // GTEST_ASSERT/EXPECT. The index of the results corresponds to the index in
529 // model.main.outputIndexes. E.g., results[i] corresponds to model.main.outputIndexes[i].
530 void checkResults(const TestModel& model, const std::vector<TestBuffer>& results);
531 void checkResults(const TestModel& model, const std::vector<TestBuffer>& results,
532                   const AccuracyCriteria& criteria);
533 
534 bool isQuantizedType(TestOperandType type);
535 
536 TestModel convertQuant8AsymmOperandsToSigned(const TestModel& testModel);
537 
538 std::ostream& operator<<(std::ostream& os, const TestOperandType& type);
539 std::ostream& operator<<(std::ostream& os, const TestOperationType& type);
540 
541 // Dump a test model in the format of a spec file for debugging and visualization purpose.
542 class SpecDumper {
543    public:
SpecDumper(const TestModel & testModel,std::ostream & os)544     SpecDumper(const TestModel& testModel, std::ostream& os) : kTestModel(testModel), mOs(os) {}
545     void dumpTestModel();
546     void dumpResults(const std::string& name, const std::vector<TestBuffer>& results);
547 
548    private:
549     // Dump a test model operand.
550     // e.g. op0 = Input("op0", "TENSOR_FLOAT32", "{1, 2, 6, 1}")
551     // e.g. op1 = Parameter("op1", "INT32", "{}", [2])
552     void dumpTestOperand(const TestOperand& operand, uint32_t index);
553 
554     // Dump a test model operation.
555     // e.g. model = model.Operation("CONV_2D", op0, op1, op2, op3, op4, op5, op6).To(op7)
556     void dumpTestOperation(const TestOperation& operation);
557 
558     // Dump a test buffer as a python 1D list.
559     // e.g. [1, 2, 3, 4, 5]
560     //
561     // If useHexFloat is set to true and the operand type is float, the buffer values will be
562     // dumped in hex representation.
563     void dumpTestBuffer(TestOperandType type, const TestBuffer& buffer, bool useHexFloat);
564 
565     const TestModel& kTestModel;
566     std::ostream& mOs;
567 };
568 
569 // Convert the test model to an equivalent float32 model. It will return std::nullopt if the
570 // conversion is not supported, or if there is no equivalent float32 model.
571 std::optional<TestModel> convertToFloat32Model(const TestModel& testModel);
572 
573 // Used together with convertToFloat32Model. Convert the results computed from the float model to
574 // the actual data type in the original model.
575 void setExpectedOutputsFromFloat32Results(const std::vector<TestBuffer>& results, TestModel* model);
576 
577 }  // namespace test_helper
578 
579 #endif  // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_TOOLS_TEST_GENERATOR_TEST_HARNESS_TEST_HARNESS_H
580