1 /* 2 * Copyright (C) 2019 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURES_OPERATION_SIGNATURE_UTILS_H 18 #define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURES_OPERATION_SIGNATURE_UTILS_H 19 20 #include <algorithm> 21 #include <functional> 22 #include <memory> 23 #include <string> 24 #include <vector> 25 26 #include "TestHarness.h" 27 #include "TestNeuralNetworksWrapper.h" 28 #include "fuzzing/OperationManager.h" 29 #include "fuzzing/RandomGraphGenerator.h" 30 #include "fuzzing/RandomGraphGeneratorUtils.h" 31 32 namespace android { 33 namespace nn { 34 namespace fuzzing_test { 35 36 namespace { 37 38 using namespace test_helper; 39 40 // From TestOperandType to cpp type. 41 template <TestOperandType type> 42 struct CppType; 43 template <> 44 struct CppType<TestOperandType::TENSOR_FLOAT32> { 45 using type = float; 46 }; 47 template <> 48 struct CppType<TestOperandType::FLOAT32> { 49 using type = float; 50 }; 51 template <> 52 struct CppType<TestOperandType::TENSOR_INT32> { 53 using type = int32_t; 54 }; 55 template <> 56 struct CppType<TestOperandType::INT32> { 57 using type = int32_t; 58 }; 59 template <> 60 struct CppType<TestOperandType::TENSOR_QUANT8_ASYMM> { 61 using type = uint8_t; 62 }; 63 template <> 64 struct CppType<TestOperandType::TENSOR_QUANT8_SYMM> { 65 using type = int8_t; 66 }; 67 template <> 68 struct CppType<TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED> { 69 using type = int8_t; 70 }; 71 template <> 72 struct CppType<TestOperandType::TENSOR_QUANT16_ASYMM> { 73 using type = uint16_t; 74 }; 75 template <> 76 struct CppType<TestOperandType::TENSOR_QUANT16_SYMM> { 77 using type = int16_t; 78 }; 79 template <> 80 struct CppType<TestOperandType::TENSOR_BOOL8> { 81 using type = bool8; 82 }; 83 template <> 84 struct CppType<TestOperandType::BOOL> { 85 using type = bool8; 86 }; 87 template <> 88 struct CppType<TestOperandType::TENSOR_FLOAT16> { 89 using type = _Float16; 90 }; 91 template <> 92 struct CppType<TestOperandType::FLOAT16> { 93 using type = _Float16; 94 }; 95 96 // The buffer value X is chosen uniformly in the range [kMinFloat32, kMaxFloat32]. kMinFloat32 and 97 // kMaxFloat32 are selected by setting: 98 // * E[X] = 0, so that the sum will less likely to overflow or underflow; 99 // * E[abs(X)] = 1, so that the production will less likely to overflow or underflow. 100 constexpr float kMaxFloat32 = 2.0f; 101 constexpr float kMinFloat32 = -kMaxFloat32; 102 103 template <typename T> 104 inline T getUniformValue(int valueProperties, T low, T up, T zeroPoint) { 105 if (valueProperties & RandomOperand::NON_NEGATIVE) { 106 NN_FUZZER_CHECK(up >= zeroPoint); 107 low = std::max(low, zeroPoint); 108 } 109 if (valueProperties & RandomOperand::NON_ZERO) { 110 return getUniformNonZero(low, up, zeroPoint); 111 } else { 112 return getUniform(low, up); 113 } 114 } 115 template <> 116 inline bool8 getUniformValue(int, bool8, bool8, bool8) { 117 return getBernoulli(0.5f); 118 } 119 120 template <typename T> 121 inline void uniform(T low, T up, T zeroPoint, RandomOperand* op) { 122 T* data = reinterpret_cast<T*>(op->buffer.data()); 123 uint32_t len = op->getNumberOfElements(); 124 for (uint32_t i = 0; i < len; i++) { 125 data[i] = getUniformValue<T>(op->valueProperties, low, up, zeroPoint); 126 } 127 } 128 129 // Generate random buffer values with uniform distribution. 130 // Dispatch to different generators by operand dataType. 131 inline void uniformFinalizer(RandomOperand* op) { 132 switch (op->dataType) { 133 case TestOperandType::TENSOR_FLOAT32: 134 case TestOperandType::FLOAT32: 135 uniform<float>(kMinFloat32, kMaxFloat32, 0.0f, op); 136 break; 137 case TestOperandType::TENSOR_INT32: 138 case TestOperandType::INT32: 139 uniform<int32_t>(0, 255, op->zeroPoint, op); 140 break; 141 case TestOperandType::TENSOR_QUANT8_ASYMM: 142 uniform<uint8_t>(0, 255, op->zeroPoint, op); 143 break; 144 case TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED: 145 uniform<int8_t>(-128, 127, op->zeroPoint, op); 146 break; 147 case TestOperandType::TENSOR_QUANT8_SYMM: 148 uniform<int8_t>(-128, 127, op->zeroPoint, op); 149 break; 150 case TestOperandType::TENSOR_QUANT16_ASYMM: 151 uniform<uint16_t>(0, 65535, op->zeroPoint, op); 152 break; 153 case TestOperandType::TENSOR_QUANT16_SYMM: 154 uniform<int16_t>(-32768, 32767, op->zeroPoint, op); 155 break; 156 case TestOperandType::TENSOR_BOOL8: 157 uniform<bool8>(true, false, false, op); 158 break; 159 case TestOperandType::TENSOR_FLOAT16: 160 case TestOperandType::FLOAT16: 161 uniform<_Float16>(kMinFloat32, kMaxFloat32, 0.0f, op); 162 break; 163 default: 164 NN_FUZZER_CHECK(false) << "Unsupported data type."; 165 } 166 } 167 168 // Get a random value between [-rank, rank) for the "axis" parameter of NNAPI operations. 169 inline int32_t getRandomAxis(int32_t rank) { 170 return getUniform(-rank, rank - 1); 171 } 172 173 // Convert a potentially negative axis index to the equivalent positive axis index. 174 inline int32_t toPositiveAxis(int32_t axis, int32_t rank) { 175 return axis >= 0 ? axis : axis + rank; 176 } 177 178 // A helper struct for DEFINE_OPERATION_SIGNATURE macro. 179 struct OperationSignatureHelper { 180 std::string name; 181 OperationSignatureHelper(const std::string& name) : name(name) {} 182 int operator+(const OperationSignature& op) { 183 OperationManager::get()->addSignature(name, op); 184 return 0; 185 } 186 }; 187 188 } // namespace 189 190 inline void implicitPadding(const RandomVariable& input, const RandomVariable& filter, 191 const RandomVariable& stride, const RandomVariable& dilation, 192 int32_t paddingScheme, RandomVariable* output) { 193 switch (paddingScheme) { 194 case ANEURALNETWORKS_PADDING_SAME: 195 *output = (input + (stride - 1)) / stride; 196 break; 197 case ANEURALNETWORKS_PADDING_VALID: 198 *output = (input - filter * dilation + (dilation + stride - 1)) / stride; 199 break; 200 default: 201 NN_FUZZER_CHECK(false) << "Unknown padding scheme"; 202 } 203 } 204 205 inline void explicitPadding(const RandomVariable& input, const RandomVariable& filter, 206 const RandomVariable& stride, const RandomVariable& dilation, 207 const RandomVariable& paddingHead, const RandomVariable& paddingTail, 208 RandomVariable* output) { 209 auto effectiveFilter = (filter - 1) * dilation + 1; 210 *output = (input - effectiveFilter + (stride + paddingHead + paddingTail)) / stride; 211 // TFLite will crash if the filter size is less than or equal to the paddings. 212 effectiveFilter.setGreaterThan(paddingHead); 213 effectiveFilter.setGreaterThan(paddingTail); 214 } 215 216 inline void implicitPaddingTranspose(const RandomVariable& input, const RandomVariable& filter, 217 const RandomVariable& stride, int32_t paddingScheme, 218 RandomVariable* output) { 219 switch (paddingScheme) { 220 case ANEURALNETWORKS_PADDING_SAME: 221 *output = input * stride; 222 break; 223 case ANEURALNETWORKS_PADDING_VALID: 224 *output = (input - 1) * stride + filter; 225 break; 226 default: 227 NN_FUZZER_CHECK(false) << "Unknown padding scheme"; 228 } 229 } 230 231 inline void explicitPaddingTranspose(const RandomVariable& input, const RandomVariable& filter, 232 const RandomVariable& stride, 233 const RandomVariable& paddingHead, 234 const RandomVariable& paddingTail, RandomVariable* output) { 235 *output = stride * input + filter - (stride + paddingHead + paddingTail); 236 } 237 238 inline void setSameQuantization(const std::shared_ptr<RandomOperand>& to, 239 const std::shared_ptr<RandomOperand>& from) { 240 NN_FUZZER_CHECK(to->dataType == from->dataType); 241 to->scale = from->scale; 242 to->zeroPoint = from->zeroPoint; 243 } 244 245 inline void setFreeDimensions(const std::shared_ptr<RandomOperand>& op, uint32_t rank) { 246 op->dimensions.resize(rank); 247 for (uint32_t i = 0; i < rank; i++) op->dimensions[i] = RandomVariableType::FREE; 248 } 249 250 inline void setConvFCScale(bool applyOutputScaleBound, RandomOperation* op) { 251 if (isQuantizedType(op->inputs[0]->dataType)) { 252 float biasScale = op->inputs[0]->scale * op->inputs[1]->scale; 253 op->inputs[2]->scale = biasScale; 254 if (applyOutputScaleBound) { 255 op->outputs[0]->scale = getUniform(biasScale, biasScale * 5); 256 } 257 } 258 } 259 260 // For ops with input0 and output0 of the same dimension. 261 inline void sameDimensionOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) { 262 setFreeDimensions(op->inputs[0], rank); 263 op->outputs[0]->dimensions = op->inputs[0]->dimensions; 264 } 265 266 // For ops with input0 and output0 of the same shape including scale and zeroPoint. 267 inline void sameShapeOpConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) { 268 sameDimensionOpConstructor(dataType, rank, op); 269 setSameQuantization(op->outputs[0], op->inputs[0]); 270 } 271 272 inline void defaultOperandConstructor(TestOperandType dataType, uint32_t, RandomOperand* op) { 273 op->dataType = dataType; 274 if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM) { 275 op->scale = getUniform<float>(0.1, 2.0); 276 op->zeroPoint = getUniform<int32_t>(0, 255); 277 } else if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED) { 278 op->scale = getUniform<float>(0.1, 2.0); 279 op->zeroPoint = getUniform<int32_t>(-128, 127); 280 } else if (dataType == TestOperandType::TENSOR_QUANT8_SYMM) { 281 op->scale = getUniform<float>(0.1, 2.0); 282 op->zeroPoint = 0; 283 } else if (dataType == TestOperandType::TENSOR_QUANT16_ASYMM) { 284 op->scale = getUniform<float>(0.1, 2.0); 285 op->zeroPoint = getUniform<int32_t>(0, 65535); 286 } else if (dataType == TestOperandType::TENSOR_QUANT16_SYMM) { 287 op->scale = getUniform<float>(0.1, 2.0); 288 op->zeroPoint = 0; 289 } else { 290 op->scale = 0.0f; 291 op->zeroPoint = 0; 292 } 293 } 294 295 inline void defaultScalarOperandConstructor(TestOperandType dataType, uint32_t, RandomOperand* op) { 296 switch (dataType) { 297 case TestOperandType::TENSOR_FLOAT32: 298 op->dataType = TestOperandType::FLOAT32; 299 op->scale = 0.0f; 300 op->zeroPoint = 0; 301 break; 302 case TestOperandType::TENSOR_FLOAT16: 303 op->dataType = TestOperandType::FLOAT16; 304 op->scale = 0.0f; 305 op->zeroPoint = 0; 306 break; 307 case TestOperandType::TENSOR_INT32: 308 op->dataType = TestOperandType::INT32; 309 op->scale = 0.0f; 310 op->zeroPoint = 0; 311 break; 312 default: 313 NN_FUZZER_CHECK(false) << "Data type " << dataType 314 << " is not supported in defaultScalarOperandConstructor."; 315 } 316 } 317 318 // An INPUT operand with uniformly distributed buffer values. The operand's data type is set the 319 // same as the operation's primary data type. In the case of quantized data type, the quantization 320 // parameters are chosen randomly and uniformly. 321 #define INPUT_DEFAULT \ 322 { \ 323 .type = RandomOperandType::INPUT, .constructor = defaultOperandConstructor, \ 324 .finalizer = uniformFinalizer \ 325 } 326 327 // A scalar operand with an uniformly distributed value. The operand's data type is set to the 328 // corresponding scalar type of the operation's primary data type (which is always a tensor type). 329 #define INPUT_SCALAR \ 330 { \ 331 .type = RandomOperandType::INPUT, .constructor = defaultScalarOperandConstructor, \ 332 .finalizer = uniformFinalizer \ 333 } 334 335 // An INPUT operand with a specified data type and uniformly distributed buffer values. In the case 336 // of quantized data type, the quantization parameters are chosen randomly and uniformly. 337 #define INPUT_TYPED(opType) \ 338 { \ 339 .type = RandomOperandType::INPUT, \ 340 .constructor = [](TestOperandType, uint32_t rank, \ 341 RandomOperand* op) { defaultOperandConstructor((opType), rank, op); }, \ 342 .finalizer = uniformFinalizer \ 343 } 344 345 // For the bias tensor in convolutions and fully connected operator. 346 // An INPUT operand with uniformly distributed buffer values. The operand's data type is set to 347 // TENSOR_INT32 if the operation's primary data type is TENSOR_QUANT8_ASYMM. Otherwise, it is the 348 // same as INPUT_DEFAULT. 349 #define INPUT_BIAS \ 350 { \ 351 .type = RandomOperandType::INPUT, \ 352 .constructor = \ 353 [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \ 354 if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM || \ 355 dataType == TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED) { \ 356 dataType = TestOperandType::TENSOR_INT32; \ 357 } \ 358 defaultOperandConstructor(dataType, rank, op); \ 359 }, \ 360 .finalizer = uniformFinalizer \ 361 } 362 363 // A helper macro for common code block filling operand buffer with random method. 364 #define PARAMETER_FILL_BUFFER_HELPER(opType, len, method, ...) \ 365 op->dataType = opType; \ 366 int length = (len); \ 367 if (kScalarDataType[static_cast<int>(opType)]) { \ 368 NN_FUZZER_CHECK(length == 1); \ 369 } else { \ 370 op->dimensions = {length}; \ 371 } \ 372 op->resizeBuffer<CppType<opType>::type>(length); \ 373 auto data = reinterpret_cast<CppType<opType>::type*>(op->buffer.data()); \ 374 for (int i = 0; i < length; i++) { \ 375 data[i] = method<CppType<opType>::type>(__VA_ARGS__); \ 376 } 377 378 // A 1-D vector of CONST parameters of length len, each uniformly selected within range [low, up]. 379 #define PARAMETER_VEC_RANGE(opType, len, low, up) \ 380 { \ 381 .type = RandomOperandType::CONST, \ 382 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \ 383 PARAMETER_FILL_BUFFER_HELPER(opType, len, getUniform, low, up); \ 384 } \ 385 } 386 387 // A CONST scalar uniformly selected within range [low, up]. 388 #define PARAMETER_RANGE(opType, low, up) PARAMETER_VEC_RANGE(opType, 1, low, up) 389 390 // A CONST floating point scalar uniformly selected within range [low, up]. The operand's data type 391 // is set to FLOAT16 if the operation's primary data type is TENSOR_FLOAT16. Otherwise, the data 392 // type is set to FLOAT32. 393 #define PARAMETER_FLOAT_RANGE(low, up) \ 394 { \ 395 .type = RandomOperandType::CONST, \ 396 .constructor = [](TestOperandType dataType, uint32_t, RandomOperand* op) { \ 397 if (dataType == TestOperandType::TENSOR_FLOAT16) { \ 398 PARAMETER_FILL_BUFFER_HELPER(TestOperandType::FLOAT16, 1, getUniform, low, up); \ 399 } else { \ 400 PARAMETER_FILL_BUFFER_HELPER(TestOperandType::FLOAT32, 1, getUniform, low, up); \ 401 } \ 402 } \ 403 } 404 405 // A CONST scalar uniformly selected from the provided choices. 406 #define PARAMETER_CHOICE(opType, ...) \ 407 { \ 408 .type = RandomOperandType::CONST, \ 409 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \ 410 const std::vector<CppType<opType>::type> choices = {__VA_ARGS__}; \ 411 PARAMETER_FILL_BUFFER_HELPER(opType, 1, getRandomChoice, choices); \ 412 } \ 413 } 414 415 // A CONST scalar with unintialized buffer value. The buffer values are expected to be filled in the 416 // operation constructor or finalizer. 417 #define PARAMETER_NONE(opType) \ 418 { \ 419 .type = RandomOperandType::CONST, \ 420 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { op->dataType = opType; } \ 421 } 422 423 // A CONST omitted operand. 424 #define PARAMETER_NO_VALUE(opType) \ 425 { \ 426 .type = RandomOperandType::NO_VALUE, \ 427 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { op->dataType = opType; } \ 428 } 429 430 // A CONST integer scalar with value set as a FREE RandomVariable within default range. 431 #define RANDOM_INT_FREE \ 432 { \ 433 .type = RandomOperandType::CONST, \ 434 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \ 435 op->dataType = TestOperandType::INT32; \ 436 op->randomBuffer = {RandomVariableType::FREE}; \ 437 } \ 438 } 439 440 // A CONST integer scalar with value set as a FREE RandomVariable within range [low, up]. 441 #define RANDOM_INT_RANGE(low, up) \ 442 { \ 443 .type = RandomOperandType::CONST, \ 444 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \ 445 op->dataType = TestOperandType::INT32; \ 446 op->randomBuffer = {RandomVariable((low), (up))}; \ 447 } \ 448 } 449 450 // An OUTPUT operand with data type set the same as the operation primary data type. In the case of 451 // quantized data type, the quantization parameters are chosen randomly and uniformly. 452 #define OUTPUT_DEFAULT \ 453 { .type = RandomOperandType::OUTPUT, .constructor = defaultOperandConstructor } 454 455 // An OUTPUT operand with a specified data type. In the case of quantized data type, the 456 // quantization parameters are chosen randomly and uniformly. 457 #define OUTPUT_TYPED(opType) \ 458 { \ 459 .type = RandomOperandType::OUTPUT, \ 460 .constructor = [](TestOperandType, uint32_t rank, RandomOperand* op) { \ 461 defaultOperandConstructor((opType), rank, op); \ 462 } \ 463 } 464 465 // An OUTPUT operand with data type set the same as the operation primary data type. In the case of 466 // quantized data type, the quantization parameters are set to the specified values. 467 #define OUTPUT_QUANT(fixedScale, fixedZeroPoint) \ 468 { \ 469 .type = RandomOperandType::OUTPUT, \ 470 .constructor = [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \ 471 defaultOperandConstructor(dataType, rank, op); \ 472 if (isQuantizedType(op->dataType)) { \ 473 op->scale = (fixedScale); \ 474 op->zeroPoint = (fixedZeroPoint); \ 475 } \ 476 } \ 477 } 478 479 // DEFINE_OPERATION_SIGNATURE creates a OperationSignature by aggregate initialization and adds it 480 // to the global OperationManager singleton. 481 // 482 // Usage: 483 // DEFINE_OPERATION_SIGNATURE(name) { aggregate_initialization }; 484 // 485 // Example: 486 // DEFINE_OPERATION_SIGNATURE(RELU_V1_0) { 487 // .opType = TestOperationType::RELU, 488 // .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, 489 // TestOperandType::TENSOR_QUANT8_ASYMM}, .supportedRanks = {1, 2, 3, 4}, .version = 490 // TestHalVersion::V1_0, .inputs = {INPUT_DEFAULT}, .outputs = {OUTPUT_DEFAULT}, .constructor 491 // = sameShapeOpConstructor}; 492 // 493 #define DEFINE_OPERATION_SIGNATURE(name) \ 494 const int dummy_##name = OperationSignatureHelper(#name) + OperationSignature 495 496 } // namespace fuzzing_test 497 } // namespace nn 498 } // namespace android 499 500 #endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURES_OPERATION_SIGNATURE_UTILS_H 501