Home
last modified time | relevance | path

Searched refs:output_tensor (Results 1 – 25 of 148) sorted by relevance

123456

/external/tensorflow/tensorflow/core/kernels/
Ddebug_ops.h165 Tensor* output_tensor; in ApplyGrpcGating() local
167 if (!context->allocate_output(0, shape, &output_tensor).ok()) { in ApplyGrpcGating()
236 Tensor* output_tensor; in Compute() local
256 OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output_tensor)); in Compute()
257 output_tensor->vec<int64>()(0) = nan_count; in Compute()
258 OP_REQUIRES_OK(context, PublishTensor(*output_tensor)); in Compute()
279 Tensor* output_tensor; in Compute() local
359 OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output_tensor)); in Compute()
360 output_tensor->vec<double>()(0) = static_cast<double>(is_initialized); in Compute()
361 output_tensor->vec<double>()(1) = static_cast<double>(element_count); in Compute()
[all …]
Dbroadcast_to_op.h50 void ReshapeAndBCast(const Device &device, Tensor &output_tensor, in ReshapeAndBCast()
53 output_tensor.NumElements() < kint32max && in ReshapeAndBCast()
57 device, output_tensor.template shaped<T, NDIMS>(bcast.result_shape()), in ReshapeAndBCast()
62 device, output_tensor.template shaped<T, NDIMS>(bcast.result_shape()), in ReshapeAndBCast()
72 Tensor &output_tensor, const TensorShape &output_shape, in operator()
78 ReshapeAndBCast<1>(device, output_tensor, input_tensor, bcast); in operator()
81 ReshapeAndBCast<2>(device, output_tensor, input_tensor, bcast); in operator()
84 ReshapeAndBCast<3>(device, output_tensor, input_tensor, bcast); in operator()
87 ReshapeAndBCast<4>(device, output_tensor, input_tensor, bcast); in operator()
90 ReshapeAndBCast<5>(device, output_tensor, input_tensor, bcast); in operator()
Dbase64_ops.cc35 Tensor* output_tensor = nullptr; in Compute() local
37 &output_tensor)); in Compute()
40 auto output = output_tensor->flat<string>(); in Compute()
60 Tensor* output_tensor = nullptr; in Compute() local
62 &output_tensor)); in Compute()
65 auto output = output_tensor->flat<string>(); in Compute()
Dregex_full_match_op.cc47 Tensor* output_tensor = nullptr; in Compute() local
49 &output_tensor)); in Compute()
50 auto output_flat = output_tensor->flat<bool>(); in Compute()
76 Tensor* output_tensor = nullptr; in Compute() local
78 &output_tensor)); in Compute()
79 auto output_flat = output_tensor->flat<bool>(); in Compute()
Dstring_to_hash_bucket_op.h41 Tensor* output_tensor = nullptr; in Compute() local
44 &output_tensor)); in Compute()
45 auto output_flat = output_tensor->flat<int64>(); in Compute()
83 Tensor* output_tensor = nullptr; in Compute() local
86 &output_tensor)); in Compute()
87 auto output_flat = output_tensor->flat<int64>(); in Compute()
Dnth_element_op.cc71 Tensor* output_tensor = nullptr; in Compute() local
73 context->allocate_output(0, out_shape, &output_tensor)); in Compute()
76 nthElementFunc(context, input_in, *output_tensor, n, reverse_); in Compute()
88 Tensor& output_tensor, int n, bool reverse) { in operator ()()
90 T* output = output_tensor.flat<T>().data(); in operator ()()
94 const int num_rows = output_tensor.NumElements(); in operator ()()
Dregex_replace_op.cc36 Tensor* output_tensor; in InternalCompute() local
42 output_tensor = maybe_forwarded.get(); in InternalCompute()
43 TF_RETURN_IF_ERROR(ctx->set_output("output", *output_tensor)); in InternalCompute()
46 ctx->allocate_output("output", input_tensor->shape(), &output_tensor)); in InternalCompute()
47 output_tensor->flat<string>() = input_tensor->flat<string>(); in InternalCompute()
49 auto output_flat = output_tensor->flat<string>(); in InternalCompute()
Dmkl_tfconv_op.h99 Tensor* output_tensor = NULL; in ConvertMklToTf() local
101 input_number, output_shape, &output_tensor)); in ConvertMklToTf()
102 CHECK_NOTNULL(output_tensor); in ConvertMklToTf()
107 CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, output_tensor), true); in ConvertMklToTf()
110 CHECK(output_tensor->CopyFrom(input_tensor, output_shape)); in ConvertMklToTf()
Ddecode_raw_op.cc55 Tensor* output_tensor = nullptr; in Compute() local
57 &output_tensor)); in Compute()
67 Tensor* output_tensor = nullptr; in Compute() local
69 context, context->allocate_output("output", out_shape, &output_tensor)); in Compute()
70 auto out = output_tensor->flat_inner_dims<T>(); in Compute()
Dunravel_index_op.cc81 Tensor* output_tensor = nullptr; in Compute() local
85 &output_tensor)); in Compute()
87 auto output = output_tensor->vec<Tidx>(); in Compute()
96 &output_tensor)); in Compute()
98 auto output = output_tensor->matrix<Tidx>(); in Compute()
/external/tensorflow/tensorflow/lite/experimental/c/
Dc_api_test.cc73 const TFL_Tensor* output_tensor = in TEST() local
75 ASSERT_NE(output_tensor, nullptr); in TEST()
76 EXPECT_EQ(TFL_TensorType(output_tensor), kTfLiteFloat32); in TEST()
77 EXPECT_EQ(TFL_TensorNumDims(output_tensor), 1); in TEST()
78 EXPECT_EQ(TFL_TensorDim(output_tensor, 0), 2); in TEST()
79 EXPECT_EQ(TFL_TensorByteSize(output_tensor), sizeof(float) * 2); in TEST()
80 EXPECT_NE(TFL_TensorData(output_tensor), nullptr); in TEST()
81 EXPECT_STREQ(TFL_TensorName(output_tensor), "output"); in TEST()
84 TFL_TensorQuantizationParams(output_tensor); in TEST()
89 ASSERT_EQ(TFL_TensorCopyToBuffer(output_tensor, output.data(), in TEST()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Dfractional_max_pool_op_test.py104 output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
109 output_tensor[batch, :, :, channel] = self._MaxPoolAlongCols(
112 return output_tensor
377 output_tensor = nn_ops.max_pool(input_tensor, window_size,
379 output_data = self.evaluate(output_tensor)
382 input_tensor, output_tensor, output_backprop, window_size,
389 output_tensor,
412 output_tensor = nn_ops.max_pool(input_tensor, window_size,
414 output_data = self.evaluate(output_tensor)
417 input_tensor, output_tensor, output_backprop, window_size,
[all …]
Dfractional_avg_pool_op_test.py104 output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
109 output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(
112 return output_tensor
367 output_tensor = nn_ops.avg_pool(input_tensor, window_size,
369 output_data = self.evaluate(output_tensor)
406 output_tensor = nn_ops.avg_pool(input_tensor, window_size,
408 output_data = self.evaluate(output_tensor)
442 output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool_v2(
448 output_data = self.evaluate(output_tensor)
455 output_tensor,
[all …]
Dbias_op_test.py148 output_tensor = nn_ops.bias_add(
151 input_tensor, np_input.shape, output_tensor, np_input.shape)
153 bias_tensor, bias.shape, output_tensor, np_input.shape)
157 nn_ops.l2_loss(output_tensor), bias_tensor)[0]
159 output_tensor, np_input.shape, bias_add_grad, bias.shape)
171 output_tensor = nn_ops.bias_add(
175 output_tensor,
179 output_tensor,
183 nn_ops.l2_loss(output_tensor), bias_tensor)[0]
184 _, grad_jacob_n = gradient_checker.compute_gradient(output_tensor,
Dackermann_op.cc35 Tensor* output_tensor = nullptr; in Compute() local
37 context->allocate_output(0, TensorShape(), &output_tensor)); in Compute()
38 auto output = output_tensor->scalar<string>(); in Compute()
/external/tensorflow/tensorflow/contrib/periodic_resample/python/kernel_tests/
Dperiodic_resample_op_test.py39 output_tensor = input_tensor.reshape((6, 2))
44 self.assertAllEqual(result, output_tensor)
50 output_tensor = input_tensor.reshape((6, 2))[:-1]
55 self.assertAllEqual(result, output_tensor)
61 output_tensor = numpy.array([[[0], [2], [4], [6]], [[1], [3], [5], [7]],
73 self.assertAllEqual(result, output_tensor)
79 output_tensor = numpy.array(
94 self.assertAllEqual(result, output_tensor)
/external/tensorflow/tensorflow/compiler/jit/
Dxla_launch_util.cc305 Tensor* output_tensor; in PopulateOutputs() local
316 ctx->allocate_output(i, const_tensor.shape(), &output_tensor)); in PopulateOutputs()
323 &const_tensor, device, output_tensor, in PopulateOutputs()
339 output_tensor = ctx->mutable_output(i); in PopulateOutputs()
341 if (XlaTensor* xla_tensor = XlaTensor::FromTensor(output_tensor)) { in PopulateOutputs()
356 Tensor* output_tensor; in PopulateOutputs() local
357 TF_RETURN_IF_ERROR(ctx->allocate_output(i, shape, &output_tensor)); in PopulateOutputs()
358 XlaTensor* xla_tensor = XlaTensor::FromTensor(output_tensor); in PopulateOutputs()
367 CHECK_EQ(output_tensor->TotalBytes(), 0); in PopulateOutputs()
370 Tensor output_tensor = XlaTensorBuffer::MakeTensor( in PopulateOutputs() local
[all …]
/external/tensorflow/tensorflow/contrib/mpi_collectives/kernels/
Dmpi_ops.cc429 Tensor* output_tensor; in PerformCollectiveOp() local
450 output_tensor = record.out_t; in PerformCollectiveOp()
472 context, input_tensor, sizes_vec, output_tensor) in PerformCollectiveOp()
474 context, input_tensor, sizes_vec, output_tensor); in PerformCollectiveOp()
477 context, input_tensor, sizes_vec, output_tensor) in PerformCollectiveOp()
479 sizes_vec, output_tensor); in PerformCollectiveOp()
482 context, input_tensor, sizes_vec, output_tensor) in PerformCollectiveOp()
484 context, input_tensor, sizes_vec, output_tensor); in PerformCollectiveOp()
491 context, input_tensor, &temp_tensor, output_tensor) in PerformCollectiveOp()
493 context, input_tensor, &temp_tensor, output_tensor); in PerformCollectiveOp()
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dmirror_pad.cc167 TfLiteTensor* output_tensor = GetOutput(context, node, 0); in Eval() local
168 if (IsDynamicTensor(output_tensor)) { in Eval()
174 context->ResizeTensor(context, output_tensor, output_size.release())); in Eval()
201 eval_data.output_data = GetTensorData<type>(output_tensor); \ in Eval()
202 eval_data.output_size = NumElements(output_tensor); \ in Eval()
206 switch (output_tensor->type) { in Eval()
242 TfLiteTensor* output_tensor = GetOutput(context, node, 0); in Prepare() local
253 SetTensorToDynamic(output_tensor); in Prepare()
262 return context->ResizeTensor(context, output_tensor, output_size.release()); in Prepare()
/external/tensorflow/tensorflow/contrib/mpi_collectives/
Dmpi_ops.cc430 Tensor* output_tensor; in PerformCollectiveOp() local
451 output_tensor = record.out_t; in PerformCollectiveOp()
473 context, input_tensor, sizes_vec, output_tensor) in PerformCollectiveOp()
475 context, input_tensor, sizes_vec, output_tensor); in PerformCollectiveOp()
478 context, input_tensor, sizes_vec, output_tensor) in PerformCollectiveOp()
480 sizes_vec, output_tensor); in PerformCollectiveOp()
483 context, input_tensor, sizes_vec, output_tensor) in PerformCollectiveOp()
485 context, input_tensor, sizes_vec, output_tensor); in PerformCollectiveOp()
492 context, input_tensor, &temp_tensor, output_tensor) in PerformCollectiveOp()
494 context, input_tensor, &temp_tensor, output_tensor); in PerformCollectiveOp()
[all …]
/external/tensorflow/tensorflow/contrib/boosted_trees/lib/models/
Dmultiple_additive_trees_test.cc58 auto output_tensor = AsTensor<float>({9.0f, 23.0f}, {2, 1}); in TEST_F() local
59 auto output_matrix = output_tensor.matrix<float>(); in TEST_F()
94 auto output_tensor = AsTensor<float>({0.0f, 0.0f}, {2, 1}); in TEST_F() local
95 auto output_matrix = output_tensor.matrix<float>(); in TEST_F()
191 auto output_tensor = AsTensor<float>({0.0f, 0.0f, 0.0f, 0.0f}, {2, 2}); in TEST_F() local
192 auto output_matrix = output_tensor.matrix<float>(); in TEST_F()
277 auto output_tensor = in TEST_F() local
279 auto output_matrix = output_tensor.matrix<float>(); in TEST_F()
/external/tensorflow/tensorflow/lite/tools/optimize/
Dsubgraph_quantizer.cc275 auto output_tensor = subgraph_->tensors[op->outputs[0]].get(); in PropagateMinMaxForAvgAndMaxPool() local
276 if (output_tensor->type != TensorType_FLOAT32) { in PropagateMinMaxForAvgAndMaxPool()
305 output_tensor->quantization = std::move(quant_params); in PropagateMinMaxForAvgAndMaxPool()
306 output_tensor->type = TensorType_INT8; in PropagateMinMaxForAvgAndMaxPool()
319 auto output_tensor = subgraph_->tensors[op->outputs[0]].get(); in AsymmetricQuantizeSoftmax() local
320 if (output_tensor->type != TensorType_FLOAT32) { in AsymmetricQuantizeSoftmax()
325 output_tensor->type = TensorType_INT8; in AsymmetricQuantizeSoftmax()
326 output_tensor->quantization->scale = {1.0f / 256.0f}; in AsymmetricQuantizeSoftmax()
327 output_tensor->quantization->zero_point = {-128}; in AsymmetricQuantizeSoftmax()
345 auto output_tensor = subgraph_->tensors[op->outputs[output_idx]].get(); in AsymmetricQuantizeInputsAndOutputs() local
[all …]
/external/tensorflow/tensorflow/compiler/tf2tensorrt/convert/
Dconvert_nodes.cc1156 const nvinfer1::ITensor** output_tensor) { in TransposeTensor() argument
1189 *output_tensor = layer->getOutput(0); in TransposeTensor()
1749 const nvinfer1::ITensor* output_tensor = layer->getOutput(0); in BinaryTensorOpWeight() local
1753 const_cast<nvinfer1::ITensor*>(output_tensor), permutation, in BinaryTensorOpWeight()
1754 &output_tensor)); in BinaryTensorOpWeight()
1759 TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor))); in BinaryTensorOpWeight()
1922 const nvinfer1::ITensor* output_tensor = conv_layer->getOutput(0); in ConvertConv2DHelper() local
1927 const_cast<nvinfer1::ITensor*>(output_tensor), {0, 2, 3, 1}, in ConvertConv2DHelper()
1928 &output_tensor)); in ConvertConv2DHelper()
1931 TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor))); in ConvertConv2DHelper()
[all …]
/external/tensorflow/tensorflow/python/tpu/
Dtensor_tracer.py165 for output_tensor in outputs:
167 tensor_tracepoint(output_tensor, '%s_%d' % (checkpoint_name, idx))
616 for output_tensor in op.outputs:
617 if output_tensor.name not in tensorname_idx_map:
618 tensor_list.append(output_tensor)
619 tensorname_idx_map[output_tensor.name] = len(tensor_list)-1
885 output_tensor = control_flow_ops.cond(mask,
889 output_tensor = constant_op.constant(0.0)
891 output_tensor = array_ops.reshape(output_tensor, [1])
892 return output_tensor
[all …]
/external/tensorflow/tensorflow/core/user_ops/
Dfact.cc33 tensorflow::Tensor* output_tensor = nullptr; in Compute() local
35 0, tensorflow::TensorShape(), &output_tensor)); in Compute()
37 auto output = output_tensor->template scalar<string>(); in Compute()

123456