/packages/modules/NeuralNetworks/common/ |
D | LegacyUtils.cpp | 135 bool isExtensionOperandType(OperandType type) { in isExtensionOperandType() 194 OperandType getInputType(uint32_t index) const override; 199 OperandType getOutputType(uint32_t index) const override; 236 OperandType OperationValidationContext::getInputType(uint32_t index) const { in getInputType() 250 OperandType OperationValidationContext::getOutputType(uint32_t index) const { in getOutputType() 327 uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) { in nonExtensionOperandSizeOfData() 354 bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, in nonExtensionOperandSizeOfDataOverflowsUInt32() 377 bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions) { in tensorHasUnspecifiedDimensions() 544 const std::vector<OperandType>& inExpectedTypes, in validateOperationOperandTypes() 546 const std::vector<OperandType>& outExpectedInTypes) { in validateOperationOperandTypes() [all …]
|
D | Validation.cpp | 174 Result<Version> validateOperandType(const OperandType& operandType) { in validateOperandType() 176 case OperandType::FLOAT32: in validateOperandType() 177 case OperandType::INT32: in validateOperandType() 178 case OperandType::UINT32: in validateOperandType() 179 case OperandType::TENSOR_FLOAT32: in validateOperandType() 180 case OperandType::TENSOR_INT32: in validateOperandType() 181 case OperandType::TENSOR_QUANT8_ASYMM: in validateOperandType() 182 case OperandType::OEM: in validateOperandType() 183 case OperandType::TENSOR_OEM_BYTE: in validateOperandType() 185 case OperandType::BOOL: in validateOperandType() [all …]
|
D | ValidateHal.cpp | 101 case V1_3::OperandType::FLOAT32: in validateOperandExtraParams() 102 case V1_3::OperandType::INT32: in validateOperandExtraParams() 103 case V1_3::OperandType::UINT32: in validateOperandExtraParams() 104 case V1_3::OperandType::BOOL: in validateOperandExtraParams() 105 case V1_3::OperandType::SUBGRAPH: in validateOperandExtraParams() 106 case V1_3::OperandType::TENSOR_FLOAT32: in validateOperandExtraParams() 107 case V1_3::OperandType::TENSOR_FLOAT16: in validateOperandExtraParams() 108 case V1_3::OperandType::TENSOR_INT32: in validateOperandExtraParams() 109 case V1_3::OperandType::TENSOR_QUANT8_ASYMM: in validateOperandExtraParams() 110 case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: in validateOperandExtraParams() [all …]
|
D | TypeUtils.cpp | 64 bool isExtension(OperandType type) { in isExtension() 72 bool isNonExtensionScalar(OperandType operandType) { in isNonExtensionScalar() 75 case OperandType::FLOAT32: in isNonExtensionScalar() 76 case OperandType::INT32: in isNonExtensionScalar() 77 case OperandType::UINT32: in isNonExtensionScalar() 78 case OperandType::BOOL: in isNonExtensionScalar() 79 case OperandType::FLOAT16: in isNonExtensionScalar() 80 case OperandType::SUBGRAPH: in isNonExtensionScalar() 81 case OperandType::OEM: in isNonExtensionScalar() 83 case OperandType::TENSOR_FLOAT32: in isNonExtensionScalar() [all …]
|
/packages/modules/NeuralNetworks/common/operations/ |
D | Comparisons.cpp | 56 if (aShape.type == OperandType::TENSOR_QUANT8_ASYMM || in compute() 57 aShape.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in compute() 129 OperandType inputType = context->getInputType(kInputTensor1); in validate() 131 inputType == OperandType::TENSOR_BOOL8 || inputType == OperandType::TENSOR_FLOAT16 || in validate() 132 inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32 || in validate() 133 inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 134 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validate() 137 NN_RET_CHECK(validateOutputTypes(context, {OperandType::TENSOR_BOOL8})); in validate() 138 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 155 case OperandType::TENSOR_FLOAT16: in executeLess() [all …]
|
D | UnidirectionalSequenceLSTM.cpp | 124 const OperandType inputType = context->getInputType(kInputTensor); in validate() 125 std::vector<OperandType> inExpectedTypes; in validate() 126 std::vector<OperandType> outExpectedTypes; in validate() 127 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 128 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 129 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 130 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 131 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 132 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 133 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() [all …]
|
D | GenerateProposals.cpp | 208 std::vector<OperandType> inExpectedTypes; in validate() 211 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { in validate() 212 inExpectedTypes = {inputType, inputType, OperandType::TENSOR_INT32, inputType}; in validate() 213 } else if (inputType == OperandType::TENSOR_QUANT16_ASYMM) { in validate() 214 if (deltaInputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 215 deltaInputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 216 inExpectedTypes = {OperandType::TENSOR_QUANT16_ASYMM, deltaInputType, in validate() 217 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_ASYMM}; in validate() 255 if (roiShape.type == OperandType::TENSOR_QUANT16_ASYMM) { in prepare() 266 if (roiShape.type == OperandType::TENSOR_QUANT16_ASYMM) { in prepare() [all …]
|
D | Reduce.cpp | 76 OperandType inputType = context->getInputType(kInputTensor); in validateProdSum() 77 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateProdSum() 78 inputType == OperandType::TENSOR_FLOAT32) in validateProdSum() 81 validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL})); in validateProdSum() 93 OperandType inputType = context->getInputType(kInputTensor); in validateMaxMin() 94 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateMaxMin() 95 inputType == OperandType::TENSOR_FLOAT32 || in validateMaxMin() 96 inputType == OperandType::TENSOR_QUANT8_ASYMM || in validateMaxMin() 97 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validateMaxMin() 100 validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL})); in validateMaxMin() [all …]
|
D | Dequantize.cpp | 82 const OperandType inputType = context->getInputType(kInputTensor); in validate() 83 const OperandType outputType = context->getOutputType(kOutputTensor); in validate() 90 if (inputType == OperandType::TENSOR_QUANT8_ASYMM && in validate() 91 outputType == OperandType::TENSOR_FLOAT32) { in validate() 95 NN_RET_CHECK(inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 96 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || in validate() 97 inputType == OperandType::TENSOR_QUANT8_SYMM || in validate() 98 inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) in validate() 100 NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 || in validate() 101 outputType == OperandType::TENSOR_FLOAT32) in validate() [all …]
|
D | RoiPooling.cpp | 195 std::vector<OperandType> inExpectedTypes; in validate() 197 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 198 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 199 OperandType::TENSOR_INT32, OperandType::INT32, in validate() 200 OperandType::INT32, OperandType::FLOAT32, in validate() 201 OperandType::FLOAT32, OperandType::BOOL}; in validate() 202 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 203 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() 204 OperandType::TENSOR_INT32, OperandType::INT32, in validate() 205 OperandType::INT32, OperandType::FLOAT16, in validate() [all …]
|
D | Pooling.cpp | 116 if (output.type == OperandType::TENSOR_QUANT8_ASYMM) { in toTfliteParam() 123 } else if (output.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in toTfliteParam() 301 std::vector<OperandType> inExpectedTypes; in validate() 303 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 306 inputType, OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 307 OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 309 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 312 OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::INT32, in validate() 313 OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 314 OperandType::INT32, in validate() [all …]
|
D | DepthwiseConv2D.cpp | 62 if ((inCount >= 9 && context->getInputType(8) == OperandType::BOOL) || inCount == 8) { in initialize() 76 } else if (inCount >= 11 && context->getInputType(8) == OperandType::INT32) { in initialize() 428 std::vector<OperandType> inExpectedTypes; in validate() 429 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 431 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 432 OperandType::TENSOR_FLOAT32, OperandType::INT32, in validate() 433 OperandType::INT32, OperandType::INT32, in validate() 434 OperandType::INT32, OperandType::INT32, in validate() 436 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 438 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() [all …]
|
D | Fill.cpp | 46 bool getValueType(OperandType outputType, OperandType* valueType) { in getValueType() 48 case OperandType::TENSOR_FLOAT16: in getValueType() 49 *valueType = OperandType::FLOAT16; in getValueType() 51 case OperandType::TENSOR_FLOAT32: in getValueType() 52 *valueType = OperandType::FLOAT32; in getValueType() 54 case OperandType::TENSOR_INT32: in getValueType() 55 *valueType = OperandType::INT32; in getValueType() 69 OperandType outputType = context->getOutputType(kOutputTensor); in validate() 70 NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 || in validate() 71 outputType == OperandType::TENSOR_FLOAT32 || in validate() [all …]
|
D | FullyConnected.cpp | 190 if (input.type == OperandType::TENSOR_QUANT8_ASYMM || in validateShapes() 191 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validateShapes() 192 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32); in validateShapes() 229 std::vector<OperandType> inExpectedTypes; in validate() 230 std::vector<OperandType> outExpectedTypes; in validate() 232 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 235 OperandType::TENSOR_FLOAT32, in validate() 236 OperandType::TENSOR_FLOAT32, in validate() 237 OperandType::TENSOR_FLOAT32, in validate() 238 OperandType::INT32, in validate() [all …]
|
D | RoiAlign.cpp | 348 std::vector<OperandType> inExpectedTypes; in validate() 350 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 351 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 352 OperandType::TENSOR_INT32, OperandType::INT32, in validate() 353 OperandType::INT32, OperandType::FLOAT32, in validate() 354 OperandType::FLOAT32, OperandType::INT32, in validate() 355 OperandType::INT32, OperandType::BOOL}; in validate() 356 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 357 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() 358 OperandType::TENSOR_INT32, OperandType::INT32, in validate() [all …]
|
D | Conv2D.cpp | 75 if ((inCount >= 8 && context->getInputType(7) == OperandType::BOOL) || inCount == 7) { in initialize() 88 } else if (inCount >= 10 && context->getInputType(7) == OperandType::INT32) { in initialize() 550 std::vector<OperandType> inExpectedTypes; in validate() 551 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 552 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 553 OperandType::TENSOR_FLOAT32, OperandType::INT32, in validate() 554 OperandType::INT32, OperandType::INT32, in validate() 555 OperandType::INT32}; in validate() 556 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 557 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() [all …]
|
D | StridedSlice.cpp | 107 OperandType inputType = context->getInputType(kInputTensor); in validate() 108 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 109 inputType == OperandType::TENSOR_FLOAT32 || in validate() 110 inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 111 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validate() 115 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 117 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 125 OperandType::TENSOR_INT32, in validate() 126 OperandType::TENSOR_INT32, in validate() 127 OperandType::TENSOR_INT32, in validate() [all …]
|
D | LocalResponseNormalization.cpp | 144 const OperandType inputType = context->getInputType(kInputTensor); in validate() 145 std::vector<OperandType> inExpectedTypes; in validate() 146 std::vector<OperandType> outExpectedTypes; in validate() 148 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 151 OperandType::TENSOR_FLOAT32, OperandType::INT32, OperandType::FLOAT32, in validate() 152 OperandType::FLOAT32, OperandType::FLOAT32, in validate() 154 outExpectedTypes = {OperandType::TENSOR_FLOAT32}; in validate() 155 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 158 OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::FLOAT16, in validate() 159 OperandType::FLOAT16, OperandType::FLOAT16, in validate() [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_vts_variation/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_vts_naming/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_vts_implicit_variation/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_variation/ |
D | stdout.txt.expect | 17 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 18 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 19 OperandType type2(Type::TENSOR_FLOAT32, {1}); 20 OperandType type3(Type::INT32, {}); 90 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 91 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 92 OperandType type2(Type::TENSOR_FLOAT32, {1}); 93 OperandType type3(Type::INT32, {}); 165 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 166 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_naming/ |
D | stdout.txt.expect | 17 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 18 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 19 OperandType type2(Type::TENSOR_FLOAT32, {1}); 20 OperandType type3(Type::INT32, {}); 90 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 91 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 92 OperandType type2(Type::TENSOR_FLOAT32, {1}); 93 OperandType type3(Type::INT32, {}); 165 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 166 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_implicit_variation/ |
D | stdout.txt.expect | 17 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 18 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 19 OperandType type2(Type::TENSOR_FLOAT32, {1}); 20 OperandType type3(Type::INT32, {}); 90 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 91 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 92 OperandType type2(Type::TENSOR_FLOAT32, {1}); 93 OperandType type3(Type::INT32, {}); 165 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 166 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_vts_backward_compatibility_float/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::TENSOR_FLOAT32, 53 .type = OperandType::TENSOR_FLOAT32, 62 .type = OperandType::TENSOR_FLOAT32, 71 .type = OperandType::TENSOR_FLOAT32, 80 .type = OperandType::TENSOR_FLOAT32, 89 .type = OperandType::TENSOR_FLOAT32, 98 .type = OperandType::TENSOR_FLOAT32, [all …]
|