/packages/modules/NeuralNetworks/common/types/operations/src/ |
D | GenerateProposals.cpp | 31 std::vector<OperandType> inExpectedTypes; in validate() 34 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { in validate() 35 inExpectedTypes = {inputType, inputType, OperandType::TENSOR_INT32, inputType}; in validate() 36 } else if (inputType == OperandType::TENSOR_QUANT16_ASYMM) { in validate() 37 if (deltaInputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 38 deltaInputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 39 inExpectedTypes = {OperandType::TENSOR_QUANT16_ASYMM, deltaInputType, in validate() 40 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_ASYMM}; in validate() 59 std::vector<OperandType> inExpectedTypes; in validate() 60 std::vector<OperandType> outExpectedTypes; in validate() [all …]
|
D | Reshape.cpp | 35 std::vector<OperandType> inExpectedTypes; in validateDepthToSpace() 36 std::vector<OperandType> outExpectedTypes; in validateDepthToSpace() 37 if (inputType == OperandType::TENSOR_FLOAT32) { in validateDepthToSpace() 39 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; in validateDepthToSpace() 40 outExpectedTypes = {OperandType::TENSOR_FLOAT32}; in validateDepthToSpace() 41 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validateDepthToSpace() 43 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; in validateDepthToSpace() 44 outExpectedTypes = {OperandType::TENSOR_FLOAT16}; in validateDepthToSpace() 45 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validateDepthToSpace() 47 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; in validateDepthToSpace() [all …]
|
D | UnidirectionalSequenceLSTM.cpp | 30 const OperandType inputType = context->getInputType(kInputTensor); in validate() 31 std::vector<OperandType> inExpectedTypes; in validate() 32 std::vector<OperandType> outExpectedTypes; in validate() 33 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 34 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 35 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 36 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 37 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 38 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 39 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() [all …]
|
D | RoiAlign.cpp | 29 std::vector<OperandType> inExpectedTypes; in validate() 31 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 32 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 33 OperandType::TENSOR_INT32, OperandType::INT32, in validate() 34 OperandType::INT32, OperandType::FLOAT32, in validate() 35 OperandType::FLOAT32, OperandType::INT32, in validate() 36 OperandType::INT32, OperandType::BOOL}; in validate() 37 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 38 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() 39 OperandType::TENSOR_INT32, OperandType::INT32, in validate() [all …]
|
D | GroupedConv2D.cpp | 34 std::vector<OperandType> inExpectedTypes; in validate() 35 std::vector<OperandType> outExpectedTypes; in validate() 36 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 37 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 38 OperandType::TENSOR_FLOAT32, OperandType::INT32, in validate() 39 OperandType::INT32, OperandType::INT32, in validate() 40 OperandType::INT32, OperandType::INT32}; in validate() 41 outExpectedTypes = {OperandType::TENSOR_FLOAT32}; in validate() 42 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 43 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() [all …]
|
D | DepthwiseConv2D.cpp | 33 std::vector<OperandType> inExpectedTypes; in validate() 34 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 36 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 37 OperandType::TENSOR_FLOAT32, OperandType::INT32, in validate() 38 OperandType::INT32, OperandType::INT32, in validate() 39 OperandType::INT32, OperandType::INT32, in validate() 41 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 43 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() 44 OperandType::TENSOR_FLOAT16, OperandType::INT32, in validate() 45 OperandType::INT32, OperandType::INT32, in validate() [all …]
|
D | Pooling.cpp | 32 std::vector<OperandType> inExpectedTypes; in validate() 34 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 37 inputType, OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 38 OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 40 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 43 OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::INT32, in validate() 44 OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 45 OperandType::INT32, in validate() 48 inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 51 OperandType::TENSOR_QUANT8_ASYMM, in validate() [all …]
|
D | Conv2D.cpp | 42 std::vector<OperandType> inExpectedTypes; in validate() 43 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 44 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 45 OperandType::TENSOR_FLOAT32, OperandType::INT32, in validate() 46 OperandType::INT32, OperandType::INT32, in validate() 47 OperandType::INT32}; in validate() 48 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 49 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() 50 OperandType::TENSOR_FLOAT16, OperandType::INT32, in validate() 51 OperandType::INT32, OperandType::INT32, in validate() [all …]
|
D | RoiPooling.cpp | 29 std::vector<OperandType> inExpectedTypes; in validate() 31 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 32 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 33 OperandType::TENSOR_INT32, OperandType::INT32, in validate() 34 OperandType::INT32, OperandType::FLOAT32, in validate() 35 OperandType::FLOAT32, OperandType::BOOL}; in validate() 36 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 37 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() 38 OperandType::TENSOR_INT32, OperandType::INT32, in validate() 39 OperandType::INT32, OperandType::FLOAT16, in validate() [all …]
|
D | TransposeConv2D.cpp | 32 std::vector<OperandType> inExpectedTypes; in validate() 34 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { in validate() 36 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 37 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 38 NN_RET_CHECK(filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || in validate() 41 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { in validate() 49 inExpectedTypes = {inputType, filterType, OperandType::TENSOR_INT32}; in validate() 50 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 57 std::vector<OperandType> argExpectedTypes; in validate() 59 argExpectedTypes = {OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() [all …]
|
D | RNN.cpp | 29 OperandType inputType = context->getInputType(0); in validate() 31 std::vector<OperandType> inExpectedTypes; in validate() 32 std::vector<OperandType> outExpectedTypes; in validate() 33 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 36 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 37 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 38 OperandType::TENSOR_FLOAT32, OperandType::INT32, in validate() 41 OperandType::TENSOR_FLOAT32, in validate() 42 OperandType::TENSOR_FLOAT32, in validate() 44 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() [all …]
|
D | QuantizedLSTM.cpp | 29 std::vector<OperandType> inExpectedTypes = { in validate() 30 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, in validate() 31 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, in validate() 32 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, in validate() 33 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, in validate() 34 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32, in validate() 35 OperandType::TENSOR_INT32, OperandType::TENSOR_INT32, in validate() 36 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM, in validate() 37 OperandType::TENSOR_QUANT8_ASYMM}; in validate() 38 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM, in validate() [all …]
|
D | FullyConnected.cpp | 30 if (input.type == OperandType::TENSOR_QUANT8_ASYMM || in validateShapes() 31 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validateShapes() 32 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32); in validateShapes() 68 std::vector<OperandType> inExpectedTypes; in validate() 69 std::vector<OperandType> outExpectedTypes; in validate() 71 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 74 OperandType::TENSOR_FLOAT32, in validate() 75 OperandType::TENSOR_FLOAT32, in validate() 76 OperandType::TENSOR_FLOAT32, in validate() 77 OperandType::INT32, in validate() [all …]
|
D | LocalResponseNormalization.cpp | 31 const OperandType inputType = context->getInputType(kInputTensor); in validate() 32 std::vector<OperandType> inExpectedTypes; in validate() 33 std::vector<OperandType> outExpectedTypes; in validate() 35 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 38 OperandType::TENSOR_FLOAT32, OperandType::INT32, OperandType::FLOAT32, in validate() 39 OperandType::FLOAT32, OperandType::FLOAT32, in validate() 41 outExpectedTypes = {OperandType::TENSOR_FLOAT32}; in validate() 42 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 45 OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::FLOAT16, in validate() 46 OperandType::FLOAT16, OperandType::FLOAT16, in validate() [all …]
|
D | HeatmapMaxKeypoint.cpp | 29 std::vector<OperandType> inExpectedTypes; in validate() 30 std::vector<OperandType> outExpectedTypes; in validate() 33 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { in validate() 34 inExpectedTypes = {inputType, inputType, OperandType::BOOL}; in validate() 36 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 37 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT16_ASYMM, in validate() 38 OperandType::BOOL}; in validate() 39 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT16_ASYMM}; in validate() 40 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 41 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, in validate() [all …]
|
D | QLSTM.cpp | 30 std::vector<OperandType> inExpectedTypes; in validate() 32 inExpectedTypes.push_back(OperandType::TENSOR_QUANT8_ASYMM_SIGNED); in validate() 35 inExpectedTypes.push_back(OperandType::TENSOR_QUANT8_SYMM); in validate() 39 inExpectedTypes.push_back(OperandType::TENSOR_QUANT16_SYMM); in validate() 43 inExpectedTypes.push_back(OperandType::TENSOR_INT32); in validate() 46 inExpectedTypes.push_back(OperandType::TENSOR_QUANT8_SYMM); in validate() 47 inExpectedTypes.push_back(OperandType::TENSOR_INT32); in validate() 49 inExpectedTypes.push_back(OperandType::TENSOR_QUANT8_ASYMM_SIGNED); in validate() 51 inExpectedTypes.push_back(OperandType::TENSOR_QUANT16_SYMM); in validate() 54 inExpectedTypes.push_back(OperandType::TENSOR_QUANT16_SYMM); in validate() [all …]
|
D | Elementwise.cpp | 27 OperandType inputType = context->getInputType(kInputTensor); in validate() 28 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 29 inputType == OperandType::TENSOR_FLOAT32) in validate() 39 OperandType inputType = context->getInputType(kInputTensor); in validateAbs() 40 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateAbs() 41 inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32) in validateAbs() 45 return inputType == OperandType::TENSOR_INT32 ? kVersionFeatureLevel4 : kVersionFeatureLevel3; in validateAbs() 52 OperandType inputType = context->getInputType(kInputTensor); in validateFloor() 53 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateFloor() 54 inputType == OperandType::TENSOR_FLOAT32) in validateFloor() [all …]
|
/packages/modules/NeuralNetworks/common/ |
D | LegacyUtils.cpp | 108 bool isExtensionOperandType(OperandType type) { in isExtensionOperandType() 171 OperandType getInputType(uint32_t index) const override; 176 OperandType getOutputType(uint32_t index) const override; 213 OperandType OperationValidationContext::getInputType(uint32_t index) const { in getInputType() 227 OperandType OperationValidationContext::getOutputType(uint32_t index) const { in getOutputType() 304 uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) { in nonExtensionOperandSizeOfData() 331 bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, in nonExtensionOperandSizeOfDataOverflowsUInt32() 512 const std::vector<OperandType>& inExpectedTypes, in validateOperationOperandTypes() 514 const std::vector<OperandType>& outExpectedInTypes) { in validateOperationOperandTypes() 570 NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8) in validateConditionOperand() [all …]
|
D | ValidateHal.cpp | 101 case V1_3::OperandType::FLOAT32: in validateOperandExtraParams() 102 case V1_3::OperandType::INT32: in validateOperandExtraParams() 103 case V1_3::OperandType::UINT32: in validateOperandExtraParams() 104 case V1_3::OperandType::BOOL: in validateOperandExtraParams() 105 case V1_3::OperandType::SUBGRAPH: in validateOperandExtraParams() 106 case V1_3::OperandType::TENSOR_FLOAT32: in validateOperandExtraParams() 107 case V1_3::OperandType::TENSOR_FLOAT16: in validateOperandExtraParams() 108 case V1_3::OperandType::TENSOR_INT32: in validateOperandExtraParams() 109 case V1_3::OperandType::TENSOR_QUANT8_ASYMM: in validateOperandExtraParams() 110 case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: in validateOperandExtraParams() [all …]
|
/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | Comparisons.cpp | 50 if (aShape.type == OperandType::TENSOR_QUANT8_ASYMM || in compute() 51 aShape.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in compute() 130 case OperandType::TENSOR_FLOAT16: in executeLess() 132 case OperandType::TENSOR_FLOAT32: in executeLess() 134 case OperandType::TENSOR_INT32: in executeLess() 136 case OperandType::TENSOR_QUANT8_ASYMM: in executeLess() 138 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: in executeLess() 140 case OperandType::TENSOR_BOOL8: in executeLess() 149 case OperandType::TENSOR_FLOAT16: in executeLessEqual() 151 case OperandType::TENSOR_FLOAT32: in executeLessEqual() [all …]
|
/packages/modules/NeuralNetworks/common/types/src/ |
D | TypeUtils.cpp | 69 static constexpr OperandType kOperandTypes[] = { in makeOperandPerformance() 70 OperandType::FLOAT32, in makeOperandPerformance() 71 OperandType::INT32, in makeOperandPerformance() 72 OperandType::UINT32, in makeOperandPerformance() 73 OperandType::TENSOR_FLOAT32, in makeOperandPerformance() 74 OperandType::TENSOR_INT32, in makeOperandPerformance() 75 OperandType::TENSOR_QUANT8_ASYMM, in makeOperandPerformance() 76 OperandType::BOOL, in makeOperandPerformance() 77 OperandType::TENSOR_QUANT16_SYMM, in makeOperandPerformance() 78 OperandType::TENSOR_FLOAT16, in makeOperandPerformance() [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_vts_variation/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_vts_naming/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_vts_implicit_variation/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_variation/ |
D | stdout.txt.expect | 17 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 18 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 19 OperandType type2(Type::TENSOR_FLOAT32, {1}); 20 OperandType type3(Type::INT32, {}); 90 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 91 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 92 OperandType type2(Type::TENSOR_FLOAT32, {1}); 93 OperandType type3(Type::INT32, {}); 165 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 166 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); [all …]
|