/external/tensorflow/tensorflow/core/kernels/mkl/ |
D | mkl_cwise_ops_common.cc | 48 auto out = context->mutable_output(0); in Compute() 54 out = context->mutable_output(0); in Compute()
|
D | mkl_input_conversion_op.cc | 280 << context->mutable_output(kInputIndex_0)->shape().DebugString() in Compute() 282 << context->mutable_output(kInputIndex_1)->shape().DebugString(); in Compute()
|
/external/tensorflow/tensorflow/c/kernels/ |
D | bitcast_op_test.cc | 71 ASSERT_EQ(expected_shape, ctx.mutable_output(0)->shape()) in TestBitcastOp() 72 << ctx.mutable_output(0)->shape().DebugString(); in TestBitcastOp()
|
D | summary_op_test.cc | 89 &summary, ctx.mutable_output(0)->scalar<tstring>()())); in TestScalarSummaryOp()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | gpu_utils.cc | 143 *instr.mutable_output() = output_desc.ToProto(element_type); in LogConvAutotuneResults() 186 *instr.mutable_output() = output_desc.ToProto(element_type); in LogFusedConvForwardAutotuneResults()
|
D | random_shuffle_op.cc | 86 auto vec = context->mutable_output(0)->vec<T>(); in Compute()
|
D | save_restore_tensor.cc | 416 if (dtypes[i] != context->mutable_output(i)->dtype()) { in RestoreTensorsV2() 420 DataTypeString(context->mutable_output(i)->dtype())); in RestoreTensorsV2()
|
D | parse_tensor_test.cc | 67 *parse_output = *ctx.mutable_output(0); in ParseSerializedWithNodeDef()
|
D | collective_ops.cc | 175 if (c->mutable_output(0) == nullptr) { in ComputeAsyncImpl() 280 if (c->mutable_output(0) == nullptr) { in ComputeAsyncImpl() 355 if (c->mutable_output(0) == nullptr) { in ComputeAsyncImpl() 432 if (c->mutable_output(0) == nullptr) { in ComputeAsyncImpl()
|
D | nn_ops_test.cc | 806 state.SetItemsProcessed(context->mutable_output(0)->NumElements() * in BM_LRNFloat() 886 state.SetItemsProcessed(avgpool_context->mutable_output(0)->NumElements() * in BM_AvgPool() 989 state.SetItemsProcessed(avgpool_context->mutable_output(0)->NumElements() * in BM_AvgPoolBk() 1080 state.SetItemsProcessed(maxpool_context->mutable_output(0)->NumElements() * in BM_MaxPool() 1254 state.SetItemsProcessed(relu_context->mutable_output(0)->NumElements() * in BM_ReluFloat() 1326 state.SetItemsProcessed(softplus_context->mutable_output(0)->NumElements() * in BM_SoftplusFloat()
|
D | ops_testutil.cc | 198 Tensor* output = context_->mutable_output(output_index); in GetOutput()
|
D | collective_nccl_test.cc | 323 CHECK_EQ(output_tensor_ptr, ctx.mutable_output(0)); in RunReduce() 342 CHECK(output_.CopyFrom(*ctx.mutable_output(0), input_.shape())); in RunReduce()
|
/external/tensorflow/tensorflow/security/advisory/ |
D | tfsa-2020-026.md | 19 outputs->push_back(Tensor(*context.mutable_output(i)));
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | ring_gatherer_test.cc | 484 CHECK_EQ(output_tensor_ptr, ctx.mutable_output(0)); in DoGather() 499 CHECK(output_tensor_.CopyFrom(*ctx.mutable_output(0), in DoGather() 500 ctx.mutable_output(0)->shape())); in DoGather()
|
D | ring_reducer_test.cc | 516 CHECK_EQ(output_tensor_ptr, ctx.mutable_output(0)); in DoReduce() 532 CHECK(tensor_.CopyFrom(*ctx.mutable_output(0), tensor_.shape())); in DoReduce()
|
D | base_collective_executor.cc | 299 Tensor* output = ctx->mutable_output(0); in ExecuteAsync()
|
/external/tensorflow/tensorflow/lite/delegates/coreml/builders/ |
D | op_builder.cc | 219 *layer_->mutable_output()->Add() = tensor_id.ToString(); in AddOutput()
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/kernels/ |
D | trt_engine_resource_ops_test.cc | 122 context_->mutable_output(0)->scalar<ResourceHandle>()(); in TEST_F()
|
/external/tensorflow/tensorflow/core/kernels/mlir_generated/ |
D | base_unary_ops_test.h | 67 void* result_ptr_on_device = context_->mutable_output(0)->data(); in RunAndExpectResult()
|
/external/tensorflow/tensorflow/core/framework/ |
D | op_kernel.h | 1057 Status mutable_output(StringPiece name, Tensor** tensor); 1211 Tensor* mutable_output(int index); 1622 inline Tensor* OpKernelContext::mutable_output(int index) { in mutable_output() function 1695 return ctx_->mutable_output(start_ + i);
|
D | op_kernel.cc | 748 if (mutable_output(index) != nullptr) { in allocate_output() 751 " mutable_output(index) = ", mutable_output(index), in allocate_output() 1009 Status OpKernelContext::mutable_output(StringPiece name, Tensor** tensor) { in mutable_output() function in tensorflow::OpKernelContext 1012 *tensor = mutable_output(index); in mutable_output()
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | map_defun_op_test.cc | 259 TF_EXPECT_OK(ExpectEqual(*context->mutable_output(i), in TEST_P()
|
/external/tensorflow/tensorflow/core/common_runtime/device/ |
D | device_event_mgr_test.cc | 419 const int64 return_bytes = ctx->mutable_output(0)->TotalBytes(); in DoAddChain() 420 se::DeviceMemoryBase gpu_src_ptr(DMAHelper::base(ctx->mutable_output(0)), in DoAddChain()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | service.cc | 709 *response.mutable_output() = output; in ExecuteGraphParallel() 880 *result->mutable_output(), in Execute() 908 *result->mutable_output() = execution->result(); in WaitForExecution()
|
/external/tensorflow/tensorflow/core/common_runtime/eager/ |
D | kernel_and_device.cc | 310 const auto* output_tensor = context.mutable_output(i); in Run()
|