/external/tensorflow/tensorflow/lite/kernels/ |
D | comparisons.cc | 36 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in ComparisonPrepare() local 42 input1->type != kTfLiteString || input1->type != kTfLiteBool); in ComparisonPrepare() 44 TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); in ComparisonPrepare() 47 bool requires_broadcast = !HaveSameShapes(input1, input2); in ComparisonPrepare() 52 context, input1, input2, &output_size)); in ComparisonPrepare() 54 output_size = TfLiteIntArrayCopy(input1->dims); in ComparisonPrepare() 64 const TfLiteTensor* input1, \ 67 if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) { \ 68 auto input1_offset = -input1->params.zero_point; \ 74 QuantizeMultiplierSmallerThanOneExp(input1->params.scale, \ [all …]
|
D | floor_mod.cc | 49 T FloorMod(T input1, T input2) { in FloorMod() argument 54 T trunc_mod = mod_func(input1, input2); in FloorMod() 55 return (input1 < T(0)) == (input2 < T(0)) in FloorMod() 77 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 81 TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); in Prepare() 83 const TfLiteType type = input1->type; in Prepare() 91 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 96 context, input1, input2, &output_size)); in Prepare() 98 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 106 const TfLiteTensor* input1, const TfLiteTensor* input2, in EvalImpl() argument [all …]
|
D | mul.cc | 70 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 74 TF_LITE_ENSURE_EQ(context, input1->type, input2->type); in Prepare() 76 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 81 context, input1, input2, &output_size)); in Prepare() 83 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 100 input1->params.scale * input2->params.scale / output->params.scale; in Prepare() 110 const OpData* data, const TfLiteTensor* input1, in EvalMul() argument 119 type::opname(op_params, GetTensorShape(input1), \ in EvalMul() 120 GetTensorData<data_type>(input1), GetTensorShape(input2), \ in EvalMul() 159 const TfLiteTensor* input1, in EvalQuantized() argument [all …]
|
D | floor_div.cc | 38 T FloorDiv(T input1, T input2) { in FloorDiv() argument 39 return std::floor(std::divides<double>()(static_cast<double>(input1), in FloorDiv() 60 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 64 TF_LITE_ENSURE_EQ(context, input1->type, input2->type); in Prepare() 66 const TfLiteType type = input1->type; in Prepare() 73 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 78 context, input1, input2, &output_size)); in Prepare() 80 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 88 const TfLiteTensor* input1, const TfLiteTensor* input2, in EvalImpl() argument 101 GetTensorShape(input1), GetTensorData<T>(input1), in EvalImpl() [all …]
|
D | squared_difference.cc | 38 T SquaredDifference(T input1, T input2) { in SquaredDifference() argument 39 const T difference = input1 - input2; in SquaredDifference() 59 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 63 TF_LITE_ENSURE_EQ(context, input1->type, input2->type); in Prepare() 66 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 71 context, input1, input2, &output_size)); in Prepare() 73 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 81 const OpData* data, const TfLiteTensor* input1, in EvalSquaredDifference() argument 85 GetTensorShape(input1), GetTensorData<T>(input1), in EvalSquaredDifference() 90 GetTensorShape(input1), GetTensorData<T>(input1), in EvalSquaredDifference() [all …]
|
D | comparisons_test.cc | 38 ComparisonOpModel(const TensorData& input1, const TensorData& input2, in ComparisonOpModel() argument 40 input1_ = AddInput(input1); in ComparisonOpModel() 47 int input1() { return input1_; } in input1() function in tflite::__anon92c241bf0111::ComparisonOpModel 98 model.PopulateTensor<bool>(model.input1(), {true, false, true, false}); in TEST() 109 model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3}); in TEST() 120 model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3}); in TEST() 131 model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3}); in TEST() 142 model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8}); in TEST() 154 model.PopulateTensor<bool>(model.input1(), {true, false, true, false}); in TEST() 165 model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3}); in TEST() [all …]
|
D | pow.cc | 53 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 57 TF_LITE_ENSURE_EQ(context, input1->type, input2->type); in Prepare() 59 const TfLiteType type = input1->type; in Prepare() 66 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 71 context, input1, input2, &output_size)); in Prepare() 73 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 80 void PowImpl(const TfLiteTensor* input1, const TfLiteTensor* input2, in PowImpl() argument 84 GetTensorShape(input1), GetTensorData<T>(input1), in PowImpl() 88 reference_ops::Pow(GetTensorShape(input1), GetTensorData<T>(input1), in PowImpl() 110 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Eval() local [all …]
|
D | add.cc | 79 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 83 TF_LITE_ENSURE_EQ(context, input1->type, input2->type); in Prepare() 86 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 91 context, input1, input2, &output_size)); in Prepare() 93 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 98 data->input1_offset = -input1->params.zero_point; in Prepare() 103 2 * std::max(input1->params.scale, input2->params.scale); in Prepare() 105 input1->params.scale / twice_max_input_scale; in Prepare() 139 TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0); in Prepare() 145 CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); in Prepare() [all …]
|
D | logical.cc | 54 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 58 TF_LITE_ENSURE_EQ(context, input1->type, input2->type); in Prepare() 60 const TfLiteType type = input1->type; in Prepare() 67 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 72 context, input1, input2, &output_size)); in Prepare() 74 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 84 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in LogicalImpl() local 90 GetTensorShape(input1), GetTensorData<bool>(input1), in LogicalImpl() 94 reference_ops::Logical(GetTensorShape(input1), GetTensorData<bool>(input1), in LogicalImpl()
|
D | floor_mod_test.cc | 29 FloorModModel(const TensorData& input1, const TensorData& input2, in FloorModModel() argument 31 input1_ = AddInput(input1); in FloorModModel() 39 int input1() { return input1_; } in input1() function in tflite::__anon60c92a720111::FloorModModel 55 model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3}); in TEST() 66 model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7}); in TEST() 76 model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7}); in TEST() 86 model.PopulateTensor<int64_t>(model.input1(), {10, -9, -11, (1LL << 34) + 9}); in TEST() 98 model.PopulateTensor<float>(model.input1(), {10, 9, 11, 3}); in TEST() 109 model.PopulateTensor<float>(model.input1(), {10, -9, -11, 7}); in TEST() 120 model.PopulateTensor<float>(model.input1(), {10, -9, -11, 7}); in TEST()
|
D | sub.cc | 145 const TfLiteTensor* input1, in PrepareInt16SubOp() argument 156 TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0); in PrepareInt16SubOp() 162 CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); in PrepareInt16SubOp() 197 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 201 TF_LITE_ENSURE_EQ(context, input1->type, input2->type); in Prepare() 204 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 209 context, input1, input2, &output_size)); in Prepare() 211 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 215 TF_LITE_ENSURE_OK(context, Prepare8BitSubOp(context, input1, input2, output, in Prepare() 218 TF_LITE_ENSURE_OK(context, PrepareInt16SubOp(context, input1, input2, in Prepare() [all …]
|
D | div_test.cc | 28 BaseDivOpModel(const TensorData& input1, const TensorData& input2, in BaseDivOpModel() argument 31 input1_ = AddInput(input1); in BaseDivOpModel() 39 int input1() { return input1_; } in input1() function in tflite::__anon86027ab40111::BaseDivOpModel 66 m.PopulateTensor<float>(m.input1(), {-0.2, 0.2, -1.2, 0.8}); in TEST() 77 m.PopulateTensor<float>(m.input1(), {-0.2, 0.2, -1.2, 0.8}); in TEST() 91 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.3, 0.8, 1.1, -2.0}); in TEST() 108 m.PopulateTensor<float>(m.input1(), {-0.2, 0.2, 0.07, 0.08, 0.11, -0.123}); in TEST() 122 m.PopulateTensor<int32_t>(m.input1(), {-2, 2, -15, 8}); in TEST() 132 m.PopulateTensor<int32_t>(m.input1(), {-2, 2, -12, 8}); in TEST() 145 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 3, 8, 11, -20}); in TEST() [all …]
|
D | mul_test.cc | 28 BaseMulOpModel(const TensorData& input1, const TensorData& input2, in BaseMulOpModel() argument 31 input1_ = AddInput(input1); in BaseMulOpModel() 39 int input1() { return input1_; } in input1() function in tflite::__anond9e73d7f0111::BaseMulOpModel 92 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8}); in TEST() 103 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8}); in TEST() 117 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}); in TEST() 134 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}); in TEST() 148 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8}); in TEST() 158 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8}); in TEST() 171 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20}); in TEST() [all …]
|
D | add_test.cc | 28 BaseAddOpModel(const TensorData& input1, const TensorData& input2, in BaseAddOpModel() argument 31 input1_ = AddInput(input1); in BaseAddOpModel() 39 int input1() { return input1_; } in input1() function in tflite::__anonb0f7933a0111::BaseAddOpModel 93 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8}); in TEST() 103 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8}); in TEST() 116 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}); in TEST() 132 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}); in TEST() 146 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8}); in TEST() 156 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8}); in TEST() 169 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20}); in TEST() [all …]
|
D | maximum_minimum.cc | 40 input1 = GetInput(context, node, kInputTensor1); in OpContext() 44 const TfLiteTensor* input1; member 54 TF_LITE_ENSURE_EQ(context, op_context.input1->type, op_context.input2->type); in Prepare() 55 op_context.output->type = op_context.input1->type; in Prepare() 58 !HaveSameShapes(op_context.input1, op_context.input2); in Prepare() 63 context, CalculateShapeForBroadcast(context, op_context.input1, in Prepare() 66 output_size = TfLiteIntArrayCopy(op_context.input1->dims); in Prepare() 90 GetTensorShape(op_context.input1), in TFLiteOperation() 91 GetTensorData<data_type>(op_context.input1), in TFLiteOperation()
|
D | div.cc | 60 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 64 TF_LITE_ENSURE_EQ(context, input1->type, input2->type); in Prepare() 67 data->requires_broadcast = !HaveSameShapes(input1, input2); in Prepare() 72 context, input1, input2, &output_size)); in Prepare() 74 output_size = TfLiteIntArrayCopy(input1->dims); in Prepare() 82 const OpData* data, const TfLiteTensor* input1, in EvalDiv() argument 91 type::opname(op_params, GetTensorShape(input1), \ in EvalDiv() 92 GetTensorData<data_type>(input1), GetTensorShape(input2), \ in EvalDiv() 132 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Eval() local 137 EvalDiv<kernel_type>(context, node, params, data, input1, input2, output); in Eval()
|
D | sub_test.cc | 28 BaseSubOpModel(const TensorData& input1, const TensorData& input2, in BaseSubOpModel() argument 31 input1_ = AddInput(input1); in BaseSubOpModel() 39 int input1() { return input1_; } in input1() function in tflite::__anon6d3712fb0111::BaseSubOpModel 93 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5}); in TEST() 104 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5}); in TEST() 118 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5, -1.1, 2.0}); in TEST() 135 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5, -1.1, 2.0}); in TEST() 149 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8}); in TEST() 159 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8}); in TEST() 172 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20}); in TEST() [all …]
|
D | pow_test.cc | 30 PowOpModel(const TensorData& input1, const TensorData& input2, in PowOpModel() argument 32 input1_ = AddInput(input1); in PowOpModel() 40 int input1() { return input1_; } in input1() function in tflite::__anon5316d1870111::PowOpModel 56 model.PopulateTensor<int32_t>(model.input1(), {12, 2, 7, 8}); in TEST() 67 model.PopulateTensor<int32_t>(model.input1(), {0, 2, -7, 8}); in TEST() 78 model.PopulateTensor<float>(model.input1(), {0.3, 0.4, 0.7, 5.8}); in TEST() 91 model.PopulateTensor<float>(model.input1(), {0.3, 0.4, 0.7, 5.8}); in TEST() 103 model.PopulateTensor<int32_t>(model.input1(), {12, 2, 7, 8}); in TEST()
|
D | squared_difference_test.cc | 28 BaseSquaredDifferenceOpModel(const TensorData& input1, in BaseSquaredDifferenceOpModel() argument 31 input1_ = AddInput(input1); in BaseSquaredDifferenceOpModel() 40 int input1() { return input1_; } in input1() function in tflite::__anonbd4518900111::BaseSquaredDifferenceOpModel 67 m.PopulateTensor<float>(m.input1(), {-0.2, 0.2, -1.2, 0.8}); in TEST() 81 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.3, 0.8, 1.1, -2.0}); in TEST() 99 m.PopulateTensor<float>(m.input1(), {-0.2, 0.2, 0.5, 0.8, 0.11, 1.1}); in TEST() 113 m.PopulateTensor<int32_t>(m.input1(), {-2, 2, -15, 8}); in TEST() 126 m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 3, 8, 11, -20}); in TEST() 142 m.PopulateTensor<int32_t>(m.input1(), {-20, 10, 7, 3, 1, 13}); in TEST()
|
D | add_n.cc | 34 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in Prepare() local 36 output->type = input1->type; in Prepare() 41 TF_LITE_ENSURE(context, HaveSameShapes(input1, input)); in Prepare() 42 TF_LITE_ENSURE_EQ(context, input1->type, input->type); in Prepare() 47 TfLiteIntArray* input1_dims = input1->dims; in Prepare() 58 const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); in EvalAddN() local 59 reference_ops::AddN<T>(GetTensorShape(input1), num_inputs, all_inputs.data(), in EvalAddN()
|
/external/deqp-deps/glslang/Test/baseResults/ |
D | hlsl.max.frag.out | 7 0:2 'input1' ( in 4-component vector of float) 12 0:3 'input1' ( in 4-component vector of float) 18 0:? 'input1' ( temp 4-component vector of float) 19 0:? 'input1' (layout( location=0) in 4-component vector of float) 26 0:? 'input1' ( temp 4-component vector of float) 30 0:? 'input1' (layout( location=0) in 4-component vector of float) 42 0:2 'input1' ( in 4-component vector of float) 47 0:3 'input1' ( in 4-component vector of float) 53 0:? 'input1' ( temp 4-component vector of float) 54 0:? 'input1' (layout( location=0) in 4-component vector of float) [all …]
|
/external/tensorflow/tensorflow/contrib/quantize/python/ |
D | quantize_test.py | 75 input1 = array_ops.zeros((batch_size, height, width, depth)) 77 conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME', 114 input1 = array_ops.zeros((batch_size, height, width, depth)) 116 conv = separable_conv2d(input1, None, [5, 5], stride=2, 151 input1 = array_ops.zeros((batch_size, height, width, depth)) 154 input1, 206 input1 = array_ops.zeros((batch_size, height, width, depth)) 208 input1, 233 input1 = array_ops.zeros((batch_size, height, width, depth)) 235 input1, [all …]
|
/external/v8/src/compiler/ |
D | node-matchers.cc | 40 Node* input1 = merge->InputAt(1); in DiamondMatcher() local 41 if (input1->InputCount() != 1) return; in DiamondMatcher() 43 if (branch != input1->InputAt(0)) return; in DiamondMatcher() 46 input1->opcode() == IrOpcode::kIfFalse) { in DiamondMatcher() 49 if_false_ = input1; in DiamondMatcher() 51 input1->opcode() == IrOpcode::kIfTrue) { in DiamondMatcher() 53 if_true_ = input1; in DiamondMatcher()
|
/external/deqp/external/vulkancts/modules/vulkan/shaderexecutor/ |
D | vktAtomicOperationTests.cpp | 338 const T input1 = *reinterpret_cast<const T*>(&original.input[elementNdx + NUM_ELEMENTS / 2]); in checkOperation() local 347 …exp.push_back(Expected<T>(originalInout + input0 + input1, originalInout, originalInout + input0)); in checkOperation() 348 …exp.push_back(Expected<T>(originalInout + input0 + input1, originalInout + input1, originalInout)); in checkOperation() 354 …exp.push_back(Expected<T>(originalInout & input0 & input1, originalInout, originalInout & input0)); in checkOperation() 355 …exp.push_back(Expected<T>(originalInout & input0 & input1, originalInout & input1, originalInout)); in checkOperation() 361 …exp.push_back(Expected<T>(originalInout | input0 | input1, originalInout, originalInout | input0)); in checkOperation() 362 …exp.push_back(Expected<T>(originalInout | input0 | input1, originalInout | input1, originalInout)); in checkOperation() 368 …exp.push_back(Expected<T>(originalInout ^ input0 ^ input1, originalInout, originalInout ^ input0)); in checkOperation() 369 …exp.push_back(Expected<T>(originalInout ^ input0 ^ input1, originalInout ^ input1, originalInout)); in checkOperation() 375 …exp.push_back(Expected<T>(de::min(de::min(originalInout, input0), input1), originalInout, de::min(… in checkOperation() [all …]
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_chipping.cpp | 186 Tensor<float, 5, DataLayout> input1(2,3,5,7,11); in test_chip_in_expr() local 187 input1.setRandom(); in test_chip_in_expr() 191 Tensor<float, 4, DataLayout> result = input1.template chip<0>(0) + input2; in test_chip_in_expr() 196 float expected = input1(0,i,j,k,l) + input2(i,j,k,l); in test_chip_in_expr() 205 Tensor<float, 3, DataLayout> result2 = input1.template chip<0>(0).template chip<1>(2) + input3; in test_chip_in_expr() 209 float expected = input1(0,i,2,j,k) + input3(i,j,k); in test_chip_in_expr() 219 Tensor<float, 5, DataLayout> input1(2,3,5,7,11); in test_chip_as_lvalue() local 220 input1.setRandom(); in test_chip_as_lvalue() 224 Tensor<float, 5, DataLayout> tensor = input1; in test_chip_as_lvalue() 232 VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m)); in test_chip_as_lvalue() [all …]
|