Home
last modified time | relevance | path

Searched refs:input0 (Results 1 – 25 of 244) sorted by relevance

12345678910

/external/armnn/src/backends/backendsCommon/test/layerTests/
DSubtractionTestImpl.cpp27 std::vector<uint8_t> input0 = { 10, 12, 14, 16 }; in SubtractionUint8Test() local
35 input0, in SubtractionUint8Test()
57 std::vector<uint8_t> input0 = { 10, 12, 14, 16 }; in SubtractionBroadcast1ElementUint8Test() local
67 input0, in SubtractionBroadcast1ElementUint8Test()
89 std::vector<uint8_t> input0 = { 10, 12, 14, 16 }; in SubtractionBroadcastUint8Test() local
99 input0, in SubtractionBroadcastUint8Test()
115 std::vector<float> input0 = { 1, 2, 3, 4 }; in SubtractionTest() local
123 input0, in SubtractionTest()
139 std::vector<float> input0 = { 1, 2, 3, 4 }; in SubtractionBroadcast1ElementTest() local
149 input0, in SubtractionBroadcast1ElementTest()
[all …]
DDivisionTestImpl.cpp32 std::vector<float> input0 = in DivisionByZeroTest() local
54 input0, in DivisionByZeroTest()
74 std::vector<float> input0 = in DivisionTest() local
96 input0, in DivisionTest()
112 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16}); in DivisionBroadcast1ElementTest() local
122 input0, in DivisionBroadcast1ElementTest()
138 std::vector<float> input0 = in DivisionBroadcast1DVectorTest() local
158 input0, in DivisionBroadcast1DVectorTest()
180 std::vector<armnn::Half> input0 = in DivisionFloat16Test() local
202 input0, in DivisionFloat16Test()
[all …]
DMaximumTestImpl.cpp32 std::vector<float> input0 = in MaximumSimpleTest() local
54 input0, in MaximumSimpleTest()
70 std::vector<float> input0 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f }; in MaximumBroadcast1ElementTest() local
80 input0, in MaximumBroadcast1ElementTest()
96 std::vector<float> input0 = in MaximumBroadcast1DVectorTest() local
114 input0, in MaximumBroadcast1DVectorTest()
136 std::vector<armnn::Half> input0 = in MaximumFloat16Test() local
158 input0, in MaximumFloat16Test()
176 std::vector<armnn::Half> input0 = { 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; in MaximumBroadcast1ElementFloat16Test() local
186 input0, in MaximumBroadcast1ElementFloat16Test()
[all …]
DMinimumTestImpl.cpp28 std::vector<float> input0 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f }; in MinimumBroadcast1ElementTest1() local
38 input0, in MinimumBroadcast1ElementTest1()
54 std::vector<float> input0 = { 1.f, 6.f, 3.f, 2.f, 8.f, 9.f, 1.f, 10.f }; in MinimumBroadcast1ElementTest2() local
64 input0, in MinimumBroadcast1ElementTest2()
80 std::vector<uint8_t> input0 = in MinimumBroadcast1DVectorUint8Test() local
98 input0, in MinimumBroadcast1DVectorUint8Test()
115 std::vector<armnn::Half> input0 = in MinimumFloat16Test() local
137 input0, in MinimumFloat16Test()
155 std::vector<armnn::Half> input0 = in MinimumBroadcast1ElementFloat16Test() local
173 input0, in MinimumBroadcast1ElementFloat16Test()
[all …]
DMultiplicationTestImpl.cpp31 std::vector<float> input0 = in MultiplicationTest() local
53 input0, in MultiplicationTest()
73 std::vector<float> input0 = in Multiplication5dTest() local
125 input0, in Multiplication5dTest()
141 std::vector<float> input0 = { 1, 2, 3, 4, 5, 6, 7, 8}; in MultiplicationBroadcast1ElementTest() local
151 input0, in MultiplicationBroadcast1ElementTest()
167 std::vector<float> input0 = in MultiplicationBroadcast1DVectorTest() local
187 input0, in MultiplicationBroadcast1DVectorTest()
208 std::vector<uint8_t> input0 = in MultiplicationUint8Test() local
233 input0, in MultiplicationUint8Test()
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dpack.cc41 const TfLiteTensor* input0; in Prepare() local
42 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input0)); in Prepare()
43 const int dimension_size = NumDimensions(input0) + 1; in Prepare()
47 TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis); in Prepare()
50 if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32 && in Prepare()
51 input0->type != kTfLiteUInt8 && input0->type != kTfLiteInt8 && in Prepare()
52 input0->type != kTfLiteInt16 && input0->type != kTfLiteInt64) { in Prepare()
54 TfLiteTypeGetName(input0->type)); in Prepare()
61 TF_LITE_ENSURE(context, HaveSameShapes(input0, input)); in Prepare()
62 TF_LITE_ENSURE_TYPES_EQ(context, input0->type, input->type); in Prepare()
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/
Dadd.pbtxt1 …f-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=input0,input1 -tf-input-d…
2 …f-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=input0,input1 -tf-input-s…
3 …f-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=input0,input1 -tf-input-s…
4 …f-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=input0,input1 -tf-input-s…
5 …f-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=input0,input1 -tf-input-s…
10 input: "input0"
20 name: "input0"
46 # CHECK-SAME: inputs = "input0,input1"
54 # SOME-SAME: inputs = "input0,input1"
62 # NONE-SAME: inputs = "input0,input1"
[all …]
/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tosa/lib/Transforms/
Dlegalize_mhlo.pdll48 replace op<mhlo.add>(input0 : Value<_: Tosa_Tensor>,
50 with op<tosa.add>(input0, input1);
52 replace op<mhlo.maximum>(input0 : Value<_: Tosa_Tensor>,
54 with op<tosa.maximum>(input0, input1);
56 replace op<mhlo.multiply>(input0 : Value<_: Tosa_Tensor>,
58 with op<tosa.mul>(input0, input1) {shift = attr<"0 : i32">};
60 replace op<mhlo.subtract>(input0 : Value<_: Tosa_Tensor>,
62 with op<tosa.sub>(input0, input1);
66 replace op<mhlo.select>(input0 : Value<_: Tosa_Tensor>,
69 with op<tosa.select>(input0, input1, input2);
/external/tensorflow/tensorflow/python/kernel_tests/strings_ops/
Dstring_join_op_test.py25 input0 = ["a", "b"]
30 output = string_ops.string_join([input0, input1])
33 output = string_ops.string_join([input0, input1], separator="--")
36 output = string_ops.string_join([input0, input1, input0], separator="--")
46 string_ops.string_join([input0, input2]).eval()
/external/armnn/src/armnn/layers/
DElementwiseBaseLayer.cpp26 TensorShape input0 = inputShapes[0]; in InferOutputShapes() local
32 input0 = inputShapes[1]; in InferOutputShapes()
35 unsigned int numDims = input0.GetNumDimensions(); in InferOutputShapes()
36 unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions(); in InferOutputShapes()
42 unsigned int dim0 = input0[i]; in InferOutputShapes()
55 dims[i] = input0[i]; in InferOutputShapes()
DComparisonLayer.cpp39 TensorShape input0 = inputShapes[0]; in InferOutputShapes() local
45 input0 = inputShapes[1]; in InferOutputShapes()
47 unsigned int numDims = input0.GetNumDimensions(); in InferOutputShapes()
48 unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions(); in InferOutputShapes()
54 unsigned int dim0 = input0[i]; in InferOutputShapes()
67 dims[i] = input0[i]; in InferOutputShapes()
DElementwiseBinaryLayer.cpp34 TensorShape input0 = inputShapes[0]; in InferOutputShapes() local
40 input0 = inputShapes[1]; in InferOutputShapes()
43 unsigned int numDims = input0.GetNumDimensions(); in InferOutputShapes()
44 unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions(); in InferOutputShapes()
50 unsigned int dim0 = input0[i]; in InferOutputShapes()
63 dims[i] = input0[i]; in InferOutputShapes()
/external/armnn/src/backends/reference/workloads/
DRefElementwiseBinaryWorkload.cpp37 std::unique_ptr<Decoder<DataType>> input0 = MakeDecoder<DataType>(inputInfo0, inputs[0]->Map()); in ExecuteFunction() local
52 AddFunction(inShape0, inShape1, outShape, *input0, *input1, *output); in ExecuteFunction()
57 DivFunction(inShape0, inShape1, outShape, *input0, *input1, *output); in ExecuteFunction()
62 MaximumFunction(inShape0, inShape1, outShape, *input0, *input1, *output); in ExecuteFunction()
67 MinimumFunction(inShape0, inShape1, outShape, *input0, *input1, *output); in ExecuteFunction()
72 MulFunction(inShape0, inShape1, outShape, *input0, *input1, *output); in ExecuteFunction()
77 SubFunction(inShape0, inShape1, outShape, *input0, *input1, *output); in ExecuteFunction()
/external/armnn/delegate/python/test/
Dtest_external_delegate.py181 input0 = np.array([1, 2, 3, 4], dtype=np.float32).reshape(tensor_shape)
183 inputs = [input0, input0, input1]
206 input0 = np.array([1, 2, 3, 4], dtype=np.float32).reshape(tensor_shape)
208 inputs = [input0, input0, input1]
230 input0 = np.array([1, 2, 3, 4], dtype=np.uint8).reshape(tensor_shape)
232 inputs = [input0, input0, input1]
/external/deqp/external/vulkancts/modules/vulkan/shaderexecutor/
DvktAtomicOperationTests.cpp534 const T input0 = *reinterpret_cast<const T*>(&original.input[elementNdx]); in checkOperation() local
544 …exp.push_back(Expected<T>(originalInout + input0 + input1, originalInout, originalInout + input0)); in checkOperation()
545 …exp.push_back(Expected<T>(originalInout + input0 + input1, originalInout + input1, originalInout)); in checkOperation()
551 …exp.push_back(Expected<T>(originalInout & input0 & input1, originalInout, originalInout & input0)); in checkOperation()
552 …exp.push_back(Expected<T>(originalInout & input0 & input1, originalInout & input1, originalInout)); in checkOperation()
558 …exp.push_back(Expected<T>(originalInout | input0 | input1, originalInout, originalInout | input0)); in checkOperation()
559 …exp.push_back(Expected<T>(originalInout | input0 | input1, originalInout | input1, originalInout)); in checkOperation()
565 …exp.push_back(Expected<T>(originalInout ^ input0 ^ input1, originalInout, originalInout ^ input0)); in checkOperation()
566 …exp.push_back(Expected<T>(originalInout ^ input0 ^ input1, originalInout ^ input1, originalInout)); in checkOperation()
572 …k(Expected<T>(de::min(de::min(originalInout, input0), input1), originalInout, de::min(originalInou… in checkOperation()
[all …]
/external/armnn/src/armnn/test/optimizations/
DAddBroadcastReshapeLayerTests.cpp28 auto input0 = graph.AddLayer<InputLayer>(0, "input0"); in AddBroadcastReshapeLayerOptimizerTest() local
32 input0->GetOutputSlot().SetTensorInfo(info0); in AddBroadcastReshapeLayerOptimizerTest()
36 input0->GetOutputSlot().Connect(add->GetInputSlot(0)); in AddBroadcastReshapeLayerOptimizerTest()
122 auto input0 = graph.AddLayer<InputLayer>(0, "input0"); variable
126 input0->GetOutputSlot().SetTensorInfo(info0);
130 input0->GetOutputSlot().Connect(sub->GetInputSlot(0));
167 auto input0 = graph.AddLayer<InputLayer>(0, "input0"); variable
171 input0->GetOutputSlot().SetTensorInfo(info0);
175 input0->GetOutputSlot().Connect(div->GetInputSlot(0));
212 auto input0 = graph.AddLayer<InputLayer>(0, "input0"); variable
[all …]
/external/armnn/src/backends/cl/workloads/
DClMultiplicationWorkload.cpp18 arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0, in ClMultiplicationWorkloadValidate() argument
23 …const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0); in ClMultiplicationWorkloadValidate()
27 …auto convertPolicy = (IsQuantizedType(input0.GetDataType()) || IsQuantizedType(input1.GetDataType(… in ClMultiplicationWorkloadValidate()
54 arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); in ClMultiplicationWorkload() local
69 &input0, in ClMultiplicationWorkload()
DClMaximumWorkload.cpp24 arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo& input0, in ClMaximumWorkloadValidate() argument
28 const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0); in ClMaximumWorkloadValidate()
46 arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); in ClMaximumWorkload() local
52 m_MaximumLayer.configure(clCompileContext, &input0, &input1, &output); in ClMaximumWorkload()
/external/ComputeLibrary/src/graph/backends/CL/
DCLFunctionsFactory.cpp135 CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0)); in create_detection_output_layer() local
141 ARM_COMPUTE_ERROR_ON(input0 == nullptr); in create_detection_output_layer()
148 func->configure(input0, input1, input2, output, detect_info); in create_detection_output_layer()
155 << " Data Type: " << input0->info()->data_type() in create_detection_output_layer()
156 << " Input0 shape: " << input0->info()->tensor_shape() in create_detection_output_layer()
166 wrap_function->register_tensor(input0); in create_detection_output_layer()
179 … CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0)); in create_detection_post_process_layer() local
188 ARM_COMPUTE_ERROR_ON(input0 == nullptr); in create_detection_post_process_layer()
198 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info); in create_detection_post_process_layer()
205 << " Data Type: " << input0->info()->data_type() in create_detection_post_process_layer()
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tfjs/tests/e2e/
Dadd.pbtxt1 # RUN: tf_tfjs_translate %s -tf-input-arrays=input0,input1 -tf-input-data-types=DT_INT32,DT_INT32 -…
7 input: "input0"
17 name: "input0"
52 # CHECK: "name": "input0"
61 # CHECK-NEXT: "input0"
/external/armnn/src/backends/neon/workloads/
DNeonMinimumWorkload.cpp15 arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo& input0, in NeonMinimumWorkloadValidate() argument
19 …const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0); in NeonMinimumWorkloadValidate()
34 …arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTens… in NeonMinimumWorkload() local
38 m_MinLayer.configure(&input0, &input1, &output); in NeonMinimumWorkload()
DNeonMaximumWorkload.cpp14 arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo& input0, in NeonMaximumWorkloadValidate() argument
18 …const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0); in NeonMaximumWorkloadValidate()
33 …arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTens… in NeonMaximumWorkload() local
37 m_MaxLayer.configure(&input0, &input1, &output); in NeonMaximumWorkload()
DNeonLogicalOrWorkload.cpp18 arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo& input0, in NeonLogicalOrWorkloadValidate() argument
22 const arm_compute::TensorInfo aclInputInfo0 = BuildArmComputeTensorInfo(input0); in NeonLogicalOrWorkloadValidate()
44 …arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTens… in NeonLogicalOrWorkload() local
48 m_LogicalOrLayer.configure(&input0, &input1, &output); in NeonLogicalOrWorkload()
DNeonLogicalAndWorkload.cpp18 arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo& input0, in NeonLogicalAndWorkloadValidate() argument
22 const arm_compute::TensorInfo aclInputInfo0 = BuildArmComputeTensorInfo(input0); in NeonLogicalAndWorkloadValidate()
44 …arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTens… in NeonLogicalAndWorkload() local
48 m_LogicalAndLayer.configure(&input0, &input1, &output); in NeonLogicalAndWorkload()
/external/armnn/include/armnn/
DLayerSupport.hpp28 const TensorInfo& input0,
118 const TensorInfo& input0,
126 const TensorInfo& input0,
158 const TensorInfo& input0,
188 const TensorInfo& input0,
211 const TensorInfo& input0,
220 const TensorInfo& input0,
228 const TensorInfo& input0,
377 const TensorInfo& input0,
385 const TensorInfo& input0,

12345678910