Home
last modified time | relevance | path

Searched refs:output_tensor (Results 1 – 25 of 225) sorted by relevance

123456789

/external/tensorflow/tensorflow/core/kernels/
Ddebug_ops.h164 Tensor* output_tensor; in ApplyGrpcGating() local
166 if (!context->allocate_output(0, shape, &output_tensor).ok()) { in ApplyGrpcGating()
235 Tensor* output_tensor; in Compute() local
255 OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output_tensor)); in Compute()
256 output_tensor->vec<int64>()(0) = nan_count; in Compute()
257 OP_REQUIRES_OK(context, PublishTensor(*output_tensor)); in Compute()
278 Tensor* output_tensor; in Compute() local
358 OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output_tensor)); in Compute()
359 output_tensor->vec<double>()(0) = static_cast<double>(is_initialized); in Compute()
360 output_tensor->vec<double>()(1) = static_cast<double>(element_count); in Compute()
[all …]
Dbroadcast_to_op.h50 void ReshapeAndBCast(const Device &device, Tensor &output_tensor, in ReshapeAndBCast()
53 output_tensor.NumElements() < kint32max && in ReshapeAndBCast()
57 device, output_tensor.template shaped<T, NDIMS>(bcast.result_shape()), in ReshapeAndBCast()
62 device, output_tensor.template shaped<T, NDIMS>(bcast.result_shape()), in ReshapeAndBCast()
72 Tensor &output_tensor, const TensorShape &output_shape, in operator()
78 ReshapeAndBCast<1>(device, output_tensor, input_tensor, bcast); in operator()
81 ReshapeAndBCast<2>(device, output_tensor, input_tensor, bcast); in operator()
84 ReshapeAndBCast<3>(device, output_tensor, input_tensor, bcast); in operator()
87 ReshapeAndBCast<4>(device, output_tensor, input_tensor, bcast); in operator()
90 ReshapeAndBCast<5>(device, output_tensor, input_tensor, bcast); in operator()
Dbase64_ops.cc35 Tensor* output_tensor = nullptr; in Compute() local
37 &output_tensor)); in Compute()
40 auto output = output_tensor->flat<tstring>(); in Compute()
60 Tensor* output_tensor = nullptr; in Compute() local
62 &output_tensor)); in Compute()
65 auto output = output_tensor->flat<tstring>(); in Compute()
Dnth_element_op.cc71 Tensor* output_tensor = nullptr; in Compute() local
73 context->allocate_output(0, out_shape, &output_tensor)); in Compute()
76 nthElementFunc(context, input_in, *output_tensor, n, reverse_); in Compute()
88 Tensor& output_tensor, int n, bool reverse) { in operator ()()
90 T* output = output_tensor.flat<T>().data(); in operator ()()
94 const int num_rows = output_tensor.NumElements(); in operator ()()
Dregex_full_match_op.cc51 Tensor* output_tensor = nullptr; in Compute() local
53 &output_tensor)); in Compute()
54 auto output_flat = output_tensor->flat<bool>(); in Compute()
104 Tensor* output_tensor = nullptr; in Compute() local
106 &output_tensor)); in Compute()
107 auto output_flat = output_tensor->flat<bool>(); in Compute()
Dregex_replace_op.cc38 Tensor* output_tensor; in InternalCompute() local
44 output_tensor = maybe_forwarded.get(); in InternalCompute()
45 TF_RETURN_IF_ERROR(ctx->set_output("output", *output_tensor)); in InternalCompute()
48 ctx->allocate_output("output", input_tensor->shape(), &output_tensor)); in InternalCompute()
49 output_tensor->flat<tstring>() = input_tensor->flat<tstring>(); in InternalCompute()
51 auto output_flat = output_tensor->flat<tstring>(); in InternalCompute()
/external/tensorflow/tensorflow/lite/kernels/hashtable/
Dhashtable_find.cc53 TfLiteTensor* output_tensor; in PrepareHashtableFind() local
55 context, GetOutputSafe(context, node, kOutputTensor, &output_tensor)); in PrepareHashtableFind()
56 TF_LITE_ENSURE_EQ(context, default_value_tensor->type, output_tensor->type); in PrepareHashtableFind()
58 output_tensor->type == kTfLiteString) || in PrepareHashtableFind()
60 output_tensor->type == kTfLiteInt64)); in PrepareHashtableFind()
61 return context->ResizeTensor(context, output_tensor, in PrepareHashtableFind()
77 TfLiteTensor* output_tensor; in EvalHashtableFind() local
78 TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor)); in EvalHashtableFind()
85 lookup->CheckKeyAndValueTypes(context, key_tensor, output_tensor)); in EvalHashtableFind()
87 lookup->Lookup(context, key_tensor, output_tensor, default_value_tensor); in EvalHashtableFind()
Dhashtable_size.cc43 TfLiteTensor* output_tensor; in PrepareHashtableSize() local
45 context, GetOutputSafe(context, node, kOutputTensor, &output_tensor)); in PrepareHashtableSize()
46 TF_LITE_ENSURE_EQ(context, output_tensor->type, kTfLiteInt64); in PrepareHashtableSize()
49 return context->ResizeTensor(context, output_tensor, outputSize); in PrepareHashtableSize()
58 TfLiteTensor* output_tensor; in EvalHashtableSize() local
60 context, GetOutputSafe(context, node, kOutputTensor, &output_tensor)); in EvalHashtableSize()
61 auto* output_data = GetTensorData<std::int64_t>(output_tensor); in EvalHashtableSize()
/external/tflite-support/tensorflow_lite_support/cc/task/vision/
Dimage_segmenter.cc216 const TfLiteTensor* output_tensor = TfLiteEngine::GetOutput(interpreter, 0); in CheckAndSetOutputs() local
219 if (output_tensor->dims->size != 4) { in CheckAndSetOutputs()
224 output_tensor->dims->size), in CheckAndSetOutputs()
227 if (output_tensor->dims->data[0] != 1) { in CheckAndSetOutputs()
231 output_tensor->dims->data[0]), in CheckAndSetOutputs()
234 output_height_ = output_tensor->dims->data[1]; in CheckAndSetOutputs()
235 output_width_ = output_tensor->dims->data[2]; in CheckAndSetOutputs()
236 output_depth_ = output_tensor->dims->data[3]; in CheckAndSetOutputs()
246 if (output_tensor->type != kTfLiteFloat32 && in CheckAndSetOutputs()
247 output_tensor->type != kTfLiteUInt8) { in CheckAndSetOutputs()
[all …]
Dimage_classifier.cc187 const tflite::TensorMetadata* output_tensor = in CheckAndSetOutputs() local
192 BuildClassificationHead(*metadata_extractor, *output_tensor, in CheckAndSetOutputs()
220 const TfLiteTensor* output_tensor = in CheckAndSetOutputs() local
222 const int num_dimensions = output_tensor->dims->size; in CheckAndSetOutputs()
224 if (output_tensor->dims->data[1] != 1 || in CheckAndSetOutputs()
225 output_tensor->dims->data[2] != 1) { in CheckAndSetOutputs()
230 i, output_tensor->dims->data[2], in CheckAndSetOutputs()
231 output_tensor->dims->data[1]), in CheckAndSetOutputs()
244 if (output_tensor->dims->data[0] != 1) { in CheckAndSetOutputs()
249 output_tensor->dims->data[0], i), in CheckAndSetOutputs()
[all …]
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/
Dreduction_test.py62 output_tensor = reduction.Reduction(reduction=reduction_str)(input_tensor)
63 model = keras.Model(input_tensor, output_tensor)
104 output_tensor = reduction.Reduction(reduction=reduction_str)(
106 model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
119 output_tensor = reduction.Reduction(reduction="mean")(
121 model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
154 output_tensor = reduction.Reduction(reduction=reduction_str)(input_tensor)
155 model = keras.Model(input_tensor, output_tensor)
196 output_tensor = reduction.Reduction(reduction=reduction_str)(
198 model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
[all …]
/external/tensorflow/tensorflow/lite/c/
Dc_test.c110 const TfLiteTensor* output_tensor = in TestSmokeTest() local
112 ASSERT_NE(output_tensor, NULL); in TestSmokeTest()
113 ASSERT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32); in TestSmokeTest()
114 ASSERT_EQ(TfLiteTensorNumDims(output_tensor), 1); in TestSmokeTest()
115 ASSERT_EQ(TfLiteTensorDim(output_tensor, 0), 2); in TestSmokeTest()
116 ASSERT_EQ(TfLiteTensorByteSize(output_tensor), sizeof(float) * 2); in TestSmokeTest()
117 ASSERT_NE(TfLiteTensorData(output_tensor), NULL); in TestSmokeTest()
118 ASSERT_STREQ(TfLiteTensorName(output_tensor), "output"); in TestSmokeTest()
121 TfLiteTensorQuantizationParams(output_tensor); in TestSmokeTest()
126 ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output, in TestSmokeTest()
Dc_api_test.cc80 const TfLiteTensor* output_tensor = in TEST() local
82 ASSERT_NE(output_tensor, nullptr); in TEST()
83 EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32); in TEST()
84 EXPECT_EQ(TfLiteTensorNumDims(output_tensor), 1); in TEST()
85 EXPECT_EQ(TfLiteTensorDim(output_tensor, 0), 2); in TEST()
86 EXPECT_EQ(TfLiteTensorByteSize(output_tensor), sizeof(float) * 2); in TEST()
87 EXPECT_NE(TfLiteTensorData(output_tensor), nullptr); in TEST()
88 EXPECT_STREQ(TfLiteTensorName(output_tensor), "output"); in TEST()
91 TfLiteTensorQuantizationParams(output_tensor); in TEST()
96 ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(), in TEST()
[all …]
/external/tensorflow/tensorflow/lite/delegates/xnnpack/
Dxnnpack_delegate.cc1053 const TfLiteTensor& output_tensor = tensors[node->outputs->data[0]]; in VisitAbsNode() local
1055 logging_context, output_tensor, node->outputs->data[0], node_index)); in VisitAbsNode()
1057 logging_context, output_tensor, node->outputs->data[0], node_index)); in VisitAbsNode()
1093 const TfLiteTensor& output_tensor = tensors[node->outputs->data[0]]; in VisitAddNode() local
1095 logging_context, output_tensor, node->outputs->data[0], node_index)); in VisitAddNode()
1097 logging_context, output_tensor, node->outputs->data[0], node_index)); in VisitAddNode()
1137 const TfLiteTensor& output_tensor = tensors[node->outputs->data[0]]; in VisitAveragePool2DNode() local
1139 logging_context, output_tensor, node->outputs->data[0], node_index)); in VisitAveragePool2DNode()
1141 logging_context, output_tensor, node->outputs->data[0], node_index)); in VisitAveragePool2DNode()
1202 const TfLiteTensor& output_tensor = tensors[node->outputs->data[0]]; in VisitCeilNode() local
[all …]
/external/tensorflow/tensorflow/lite/micro/kernels/
Dreshape_test.cc53 TfLiteTensor* output_tensor = &tensors[outputs_array->data[0]]; in ValidateReshapeGoldens() local
54 const T* output_data = GetTensorData<T>(output_tensor); in ValidateReshapeGoldens()
59 static_cast<size_t>(output_tensor->dims->size)); in ValidateReshapeGoldens()
61 TF_LITE_MICRO_EXPECT_EQ(expected_dims[i], output_tensor->dims->data[i]); in ValidateReshapeGoldens()
67 TfLiteTensor* output_tensor, const T* expected_output, in TestReshapeWithShape() argument
77 tensors[2] = *output_tensor; in TestReshapeWithShape()
92 TfLiteTensor* output_tensor, in TestReshapeWithoutShape() argument
103 tensors[1] = *output_tensor; in TestReshapeWithoutShape()
126 TfLiteTensor output_tensor = CreateTensor(output_data, output_dims); in TestReshape() local
128 TestReshapeWithShape(&input_tensor, &shape_tensor, &output_tensor, in TestReshape()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Dfractional_max_pool_op_test.py104 output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
109 output_tensor[batch, :, :, channel] = self._MaxPoolAlongCols(
112 return output_tensor
377 output_tensor = nn_ops.max_pool(input_tensor, window_size,
379 output_data = self.evaluate(output_tensor)
382 input_tensor, output_tensor, output_backprop, window_size,
389 output_tensor,
412 output_tensor = nn_ops.max_pool(input_tensor, window_size,
414 output_data = self.evaluate(output_tensor)
417 input_tensor, output_tensor, output_backprop, window_size,
[all …]
Dfractional_avg_pool_op_test.py104 output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
109 output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(
112 return output_tensor
367 output_tensor = nn_ops.avg_pool(input_tensor, window_size,
369 output_data = self.evaluate(output_tensor)
406 output_tensor = nn_ops.avg_pool(input_tensor, window_size,
408 output_data = self.evaluate(output_tensor)
442 output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool_v2(
448 output_data = self.evaluate(output_tensor)
455 output_tensor,
[all …]
Dackermann_op.cc35 Tensor* output_tensor = nullptr; in Compute() local
37 context->allocate_output(0, TensorShape(), &output_tensor)); in Compute()
38 auto output = output_tensor->scalar<tstring>(); in Compute()
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/
Dimporter_test_min_max.cc128 auto& output_tensor = sub_graph->tensors[op->outputs[0]]; in InjectStatsToFullyConnected() local
129 auto shape = output_tensor->shape; in InjectStatsToFullyConnected()
130 output_tensor->quantization->scale.clear(); in InjectStatsToFullyConnected()
131 output_tensor->quantization->zero_point.clear(); in InjectStatsToFullyConnected()
133 output_tensor->quantization->min.push_back(-1.0 * i); in InjectStatsToFullyConnected()
134 output_tensor->quantization->max.push_back(1.0 * i); in InjectStatsToFullyConnected()
136 output_tensor->quantization->quantized_dimension = shape.size() - 1; in InjectStatsToFullyConnected()
/external/tensorflow/tensorflow/lite/kernels/
Dmirror_pad.cc177 TfLiteTensor* output_tensor; in Eval() local
178 TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor)); in Eval()
179 if (IsDynamicTensor(output_tensor)) { in Eval()
185 context->ResizeTensor(context, output_tensor, output_size.release())); in Eval()
192 output_dims_num_elements[i + 1] * output_tensor->dims->data[i + 1]; in Eval()
205 const int output_size = NumElements(output_tensor); in Eval()
215 eval_data.output_data = GetTensorData<type>(output_tensor); \ in Eval()
228 switch (output_tensor->type) { in Eval()
268 TfLiteTensor* output_tensor; in Prepare() local
269 TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor)); in Prepare()
[all …]
/external/tensorflow/tensorflow/lite/micro/
Dmemory_helpers_test.cc204 TfLiteTensor output_tensor = tflite::testing::CreateTensor<int32_t>( in TF_LITE_MICRO_TEST() local
213 &context, &input_tensor1, &input_tensor2, &output_tensor)); in TF_LITE_MICRO_TEST()
215 TF_LITE_MICRO_EXPECT_EQ(output_tensor.bytes, input_tensor2.bytes); in TF_LITE_MICRO_TEST()
218 output_tensor.dims->data[i]); in TF_LITE_MICRO_TEST()
220 output_tensor.dims->data[i] = 0; in TF_LITE_MICRO_TEST()
223 output_tensor.dims->size = 0; in TF_LITE_MICRO_TEST()
226 &context, &input_tensor2, &input_tensor1, &output_tensor)); in TF_LITE_MICRO_TEST()
229 output_tensor.dims->data[i]); in TF_LITE_MICRO_TEST()
231 TF_LITE_MICRO_EXPECT_EQ(output_tensor.bytes, input_tensor2.bytes); in TF_LITE_MICRO_TEST()
/external/tensorflow/tensorflow/core/kernels/mkl/
Dmkl_tfconv_op.h101 Tensor* output_tensor = nullptr; in ConvertMklToTf() local
103 input_number, output_shape, &output_tensor)); in ConvertMklToTf()
104 DCHECK(output_tensor); in ConvertMklToTf()
111 input.CheckReorderToOpMem(output_tf_md, output_tensor, context), in ConvertMklToTf()
116 output_tensor->CopyFrom(input_tensor, output_shape), in ConvertMklToTf()
/external/tensorflow/tensorflow/c/experimental/saved_model/core/
Dsaved_variable_loading_test.cc136 AbstractTensorPtr output_tensor(output_handle->Resolve(&status)); in TEST_P() local
140 EXPECT_EQ(output_tensor->Type(), expected_tensor->Type()); in TEST_P()
141 EXPECT_EQ(output_tensor->NumElements(), expected_tensor->NumElements()); in TEST_P()
143 output_tensor->Type(), output_tensor->NumElements(), in TEST_P()
144 output_tensor->Data(), expected_tensor->Data()); in TEST_P()
/external/tensorflow/tensorflow/core/common_runtime/
Dsingle_threaded_cpu_device.cc72 void CopyTensorInSameDevice(const Tensor* input_tensor, Tensor* output_tensor, in CopyTensorInSameDevice() argument
75 if (input_tensor->NumElements() != output_tensor->NumElements()) { in CopyTensorInSameDevice()
78 input_tensor->shape(), ", output=", output_tensor->shape())); in CopyTensorInSameDevice()
81 tensor::DeepCopy(*input_tensor, output_tensor); in CopyTensorInSameDevice()
/external/tensorflow/tensorflow/compiler/tf2tensorrt/utils/
Dtrt_engine_utils.cc238 Tensor* output_tensor = nullptr; in SetTrtEngineOutputs() local
240 TF_RETURN_IF_ERROR(ctx->allocate_output(i, output_shape, &output_tensor)); in SetTrtEngineOutputs()
246 output_tensor = &(outputs->at(i).tensor); in SetTrtEngineOutputs()
247 bool status = output_tensor->CopyFrom(*output_tensor, output_shape); in SetTrtEngineOutputs()
259 const_cast<float*>(output_tensor->flat<float>().data()); in SetTrtEngineOutputs()
263 const_cast<Eigen::half*>(output_tensor->flat<Eigen::half>().data()); in SetTrtEngineOutputs()
269 const_cast<int32*>(output_tensor->flat<int32>().data()); in SetTrtEngineOutputs()

123456789