/frameworks/ml/nn/common/ |
D | Utils.cpp | 580 auto inputType = operands[inputIndexes[0]].type; in validateOperation() local 583 if (inputType == OperandType::TENSOR_FLOAT32) { in validateOperation() 587 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validateOperation() 611 auto inputType = operands[inputIndexes[0]].type; in validateOperation() local 615 if (inputType == OperandType::TENSOR_FLOAT32) { in validateOperation() 623 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validateOperation() 631 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validateOperation() 662 if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validateOperation() 697 if (inputType == OperandType::TENSOR_FLOAT16 || in validateOperation() 717 auto inputType = operands[inputIndexes[0]].type; in validateOperation() local [all …]
|
/frameworks/ml/nn/runtime/test/fibonacci_extension/ |
D | FibonacciExtensionTest.cpp | 121 void addNopOperation(ExtensionModel* model, ExtensionOperandType inputType, uint32_t input, in addNopOperation() argument 124 ASSERT_EQ(inputType.operandType.type, ANEURALNETWORKS_TENSOR_FLOAT32); in addNopOperation() 125 ASSERT_EQ(inputType.dimensions.size(), 1u); in addNopOperation() 127 uint32_t inputZeros = model->addOperand(&inputType); in addNopOperation() 128 uint32_t inputSize = inputType.dimensions[0]; in addNopOperation() 142 void createModel(ExtensionModel* model, ExtensionOperandType inputType, in createModel() argument 144 uint32_t fibonacciInput = model->addOperand(&inputType); in createModel() 147 uint32_t modelInput = addNopOperations ? model->addOperand(&inputType) : fibonacciInput; in createModel() 151 addNopOperation(model, inputType, modelInput, fibonacciInput); in createModel() 170 ExtensionOperandType inputType( in TEST_F() local [all …]
|
/frameworks/ml/nn/common/operations/ |
D | Dequantize.cpp | 81 const OperandType inputType = context->getInputType(kInputTensor); in validate() local 84 if (inputType == OperandType::TENSOR_QUANT8_ASYMM && in validate() 89 NN_RET_CHECK(inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 90 inputType == OperandType::TENSOR_QUANT8_SYMM || in validate() 91 inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) in validate() 92 << "Unsupported input operand type for DEQUANTIZE op: " << toString(inputType); in validate() 110 const OperandType inputType = context->getInputType(kInputTensor); in execute() local 114 if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in execute() 122 } else if (inputType == OperandType::TENSOR_QUANT8_SYMM) { in execute() 130 } else if (inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { in execute() [all …]
|
D | Quantize.cpp | 69 const OperandType inputType = context->getInputType(kInputTensor); in validate() local 72 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 73 inputType == OperandType::TENSOR_FLOAT32) in validate() 74 << "Unsupported input operand type for QUANTIZE op: " << toString(inputType); in validate() 91 const OperandType inputType = context->getInputType(kInputTensor); in execute() local 92 if (inputType == OperandType::TENSOR_FLOAT32) { in execute() 96 } else if (inputType == OperandType::TENSOR_FLOAT16) { in execute() 102 << toString(inputType) in execute()
|
D | Reduce.cpp | 69 OperandType inputType = context->getInputType(kInputTensor); in validateProdSum() local 70 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateProdSum() 71 inputType == OperandType::TENSOR_FLOAT32) in validateProdSum() 74 validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL})); in validateProdSum() 75 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validateProdSum() 82 OperandType inputType = context->getInputType(kInputTensor); in validateMaxMin() local 83 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateMaxMin() 84 inputType == OperandType::TENSOR_FLOAT32 || in validateMaxMin() 85 inputType == OperandType::TENSOR_QUANT8_ASYMM) in validateMaxMin() 88 validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL})); in validateMaxMin() [all …]
|
D | Select.cpp | 72 OperandType inputType = context->getInputType(kInputTensor1); in validate() local 74 inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32 || in validate() 75 inputType == OperandType::TENSOR_INT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) in validate() 76 << "Unsupported input operand type for select op: " << toString(inputType); in validate() 77 NN_RET_CHECK(validateInputTypes(context, {OperandType::TENSOR_BOOL8, inputType, inputType})); in validate() 78 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | Neg.cpp | 54 OperandType inputType = context->getInputType(kInputTensor); in validate() local 55 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 56 inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32) in validate() 58 NN_RET_CHECK(validateInputTypes(context, {inputType})); in validate() 59 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | BidirectionalSequenceRNN.cpp | 254 OperandType inputType = context->getInputType(kInputTensor); in validate() local 255 if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) { in validate() 257 << toString(inputType); in validate() 261 context, {inputType, inputType, inputType, inputType, inputType, inputType, inputType, in validate() 262 inputType, inputType, inputType, inputType, inputType, OperandType::INT32, in validate() 265 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate() 267 NN_RET_CHECK(validateOutputTypes(context, {inputType, inputType})); in validate()
|
D | UnidirectionalSequenceRNN.cpp | 119 OperandType inputType = context->getInputType(kInputTensor); in validate() local 120 if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) { in validate() 122 << toString(inputType); in validate() 125 NN_RET_CHECK(validateInputTypes(context, {inputType, inputType, inputType, inputType, inputType, in validate() 127 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | ResizeImageOps.cpp | 113 auto inputType = context->getInputType(kInputTensor); in validate() local 115 std::vector<OperandType> inExpectedTypes = {inputType, scalarType, scalarType}; in validate() 116 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 117 inputType == OperandType::TENSOR_FLOAT32 || in validate() 118 inputType == OperandType::TENSOR_QUANT8_ASYMM) in validate() 120 if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 125 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 127 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 129 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 140 validateOutputTypes(context, {inputType}); in validate()
|
D | Gather.cpp | 66 OperandType inputType = context->getInputType(kInputTensor); in validate() local 68 inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32 || in validate() 69 inputType == OperandType::TENSOR_INT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) in validate() 72 {inputType, OperandType::INT32, OperandType::TENSOR_INT32})); in validate() 73 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | ChannelShuffle.cpp | 64 auto inputType = context->getInputType(kInputTensor); in validate() local 65 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 66 inputType == OperandType::TENSOR_FLOAT32 || in validate() 67 inputType == OperandType::TENSOR_QUANT8_ASYMM) in validate() 69 NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::INT32, OperandType::INT32})); in validate() 70 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | LogicalNot.cpp | 47 OperandType inputType = context->getInputType(kInputTensor); in validate() local 48 NN_RET_CHECK(inputType == OperandType::TENSOR_BOOL8) in validate() 50 NN_RET_CHECK(validateInputTypes(context, {inputType})); in validate() 51 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | PRelu.cpp | 98 auto inputType = context->getInputType(kInputTensor); in validate() local 99 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 100 inputType == OperandType::TENSOR_FLOAT32 || in validate() 101 inputType == OperandType::TENSOR_QUANT8_ASYMM) in validate() 103 NN_RET_CHECK(validateInputTypes(context, {inputType, inputType})); in validate() 104 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | LogicalAndOr.cpp | 64 OperandType inputType = context->getInputType(kInputTensor1); in validate() local 65 NN_RET_CHECK(inputType == OperandType::TENSOR_BOOL8) in validate() 67 NN_RET_CHECK(validateInputTypes(context, {inputType, inputType})); in validate() 68 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | Elementwise.cpp | 67 OperandType inputType = context->getInputType(kInputTensor); in validate() local 68 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 69 inputType == OperandType::TENSOR_FLOAT32) in validate() 71 NN_RET_CHECK(validateInputTypes(context, {inputType})); in validate() 72 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | Transpose.cpp | 73 const OperandType inputType = context->getInputType(kInputTensor); in validate() local 74 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 76 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 81 return validateInputTypes(context, {inputType, OperandType::TENSOR_INT32}) && in validate() 82 validateOutputTypes(context, {inputType}); in validate()
|
D | Concatenation.cpp | 104 const OperandType inputType = context->getInputType(0); in validate() local 105 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 107 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 112 std::vector<OperandType> inExpectedTypes(inputCount - 1, inputType); in validate() 115 inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 124 validateOutputTypes(context, {inputType}); in validate()
|
D | Slice.cpp | 82 const OperandType inputType = context->getInputType(kInputTensor); in validate() local 84 inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32 || in validate() 85 inputType == OperandType::TENSOR_INT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) in validate() 89 {inputType, OperandType::TENSOR_INT32, OperandType::TENSOR_INT32}) && in validate() 90 validateOutputTypes(context, {inputType}); in validate()
|
D | Comparisons.cpp | 126 OperandType inputType = context->getInputType(kInputTensor1); in validate() local 128 inputType == OperandType::TENSOR_BOOL8 || inputType == OperandType::TENSOR_FLOAT16 || in validate() 129 inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32 || in validate() 130 inputType == OperandType::TENSOR_QUANT8_ASYMM) in validate() 131 << "Unsupported input operand type for comparison op: " << toString(inputType); in validate() 132 NN_RET_CHECK(validateInputTypes(context, {inputType, inputType})); in validate()
|
D | HeatmapMaxKeypoint.cpp | 207 auto inputType = context->getInputType(kHeatmapTensor); in validate() local 208 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { in validate() 209 inExpectedTypes = {inputType, inputType, OperandType::BOOL}; in validate() 210 outExpectedTypes = {inputType, inputType}; in validate() 211 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate()
|
D | L2Normalization.cpp | 150 const OperandType inputType = context->getInputType(kInputTensor); in validate() local 151 std::vector<OperandType> inExpectedTypes = {inputType}; in validate() 152 if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 154 } else if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 166 validateOutputTypes(context, {inputType}); in validate()
|
D | Softmax.cpp | 230 auto inputType = context->getInputType(kInputTensor); in validate() local 232 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validate() 234 inExpectedTypes = {inputType, OperandType::FLOAT32}; in validate() 235 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 237 inExpectedTypes = {inputType, OperandType::FLOAT16}; in validate() 251 validateOutputTypes(context, {inputType}); in validate()
|
/frameworks/base/core/java/android/view/inputmethod/ |
D | EditorInfo.java | 97 public int inputType = TYPE_NULL; field in EditorInfo 511 switch (inputType&(TYPE_MASK_CLASS|TYPE_MASK_VARIATION)) { in makeCompatible() 513 inputType = TYPE_CLASS_TEXT|TYPE_TEXT_VARIATION_EMAIL_ADDRESS in makeCompatible() 514 | (inputType&TYPE_MASK_FLAGS); in makeCompatible() 517 inputType = TYPE_CLASS_TEXT|TYPE_TEXT_VARIATION_PASSWORD in makeCompatible() 518 | (inputType&TYPE_MASK_FLAGS); in makeCompatible() 522 inputType = TYPE_CLASS_NUMBER in makeCompatible() 523 | (inputType&TYPE_MASK_FLAGS); in makeCompatible() 533 pw.println(prefix + "inputType=0x" + Integer.toHexString(inputType) in dump() 562 dest.writeInt(inputType); in writeToParcel() [all …]
|
/frameworks/opt/gamesdk/third_party/protobuf-3.0.0/csharp/src/Google.Protobuf/Reflection/ |
D | MethodDescriptor.cs | 42 private MessageDescriptor inputType; field in Google.Protobuf.Reflection.MethodDescriptor 53 public MessageDescriptor InputType { get { return inputType; } } 93 inputType = (MessageDescriptor) lookup; in CrossLink()
|