Home
last modified time | relevance | path

Searched refs:dst_tensor (Results 1 – 10 of 10) sorted by relevance

/external/tensorflow/tensorflow/core/kernels/
Dmkl_aggregate_ops.cc81 Tensor* dst_tensor = nullptr; in Compute() local
84 AllocateOutputSetMklShape(ctx, output_idx, &dst_tensor, in Compute()
88 dst_tensor->scalar<T>()() = std::plus<float>{}(user_i1, user_i2); in Compute()
95 Tensor* dst_tensor = nullptr; in Compute() local
98 AllocateOutputSetMklShape(ctx, output_idx, &dst_tensor, in Compute()
213 Tensor* dst_tensor = nullptr; in Compute() local
236 AllocateOutputSetMklShape(ctx, output_idx, &dst_tensor, output_tf_shape, in Compute()
238 dst.SetUsrMemDataHandle(dst_tensor); in Compute()
Dmkl_relu_op.cc521 Tensor* dst_tensor = nullptr; in Compute() local
525 tf_shape_dst, &dst_tensor)); in Compute()
528 T* dst_data = dst_tensor->flat<T>().data(); in Compute()
734 Tensor* dst_tensor = nullptr; in Compute_Scalar() local
739 AllocateOutputSetMklShape(context, dst_index, &dst_tensor, in Compute_Scalar()
741 void* out_o = static_cast<void*>(dst_tensor->flat<T>().data()); in Compute_Scalar()
797 Tensor* dst_tensor = nullptr; in Compute_Scalar() local
802 AllocateOutputSetMklShape(context, dst_index, &dst_tensor, in Compute_Scalar()
804 void* out_o = static_cast<void*>(dst_tensor->flat<T>().data()); in Compute_Scalar()
869 Tensor* dst_tensor = nullptr; in Compute_Scalar() local
[all …]
Dmkl_fused_batch_norm_op.cc566 Tensor* dst_tensor = nullptr; in Compute() local
569 &dst_tensor); in Compute()
669 AllocateOutputSetMklShape(context, kDstIndex, &dst_tensor, tf_shape_dst, in Compute()
675 T* dst_data = dst_tensor->flat<T>().data(); in Compute()
737 TensorShape tf_shape_scale, Tensor** dst_tensor) { in HandleEmptyInput() argument
738 CHECK_NOTNULL(dst_tensor); in HandleEmptyInput()
743 AllocateOutputSetMklShape(context, kDstIndex, dst_tensor, tf_shape_src, in HandleEmptyInput()
745 CHECK_NOTNULL(*dst_tensor); in HandleEmptyInput()
746 memset(const_cast<char*>((*dst_tensor)->tensor_data().data()), 0, in HandleEmptyInput()
747 (*dst_tensor)->tensor_data().size()); in HandleEmptyInput()
Dmkl_concat_op.cc396 Tensor* dst_tensor = nullptr; in Compute() local
409 AllocateOutputSetMklShape(context, 0, &dst_tensor, tf_shape_dst, in Compute()
411 CHECK_NOTNULL(dst_tensor); in Compute()
415 dst.SetUsrMem(dst_md, dst_tensor); in Compute()
Dmkl_conv_ops.cc975 Tensor* dst_tensor = nullptr; in Compute() local
979 AllocateOutputSetMklShape(context, kOutputIndex_Dst, &dst_tensor, in Compute()
1086 &dst_tensor); in Compute()
1096 reinterpret_cast<Ttemp_output*>(dst_tensor->flat<Toutput>().data()); in Compute()
/external/tensorflow/tensorflow/lite/kernels/
Dwhile.cc45 TfLiteTensor* dst_tensor = dst_subgraph->tensor(dst_tensor_indices[i]); in CopyTensorsShapeAndType() local
46 dst_tensor->type = src_tensor->type; in CopyTensorsShapeAndType()
63 TfLiteTensor* dst_tensor = dst_subgraph->tensor(dst_tensor_indices[i]); in CopyTensorsData() local
64 TF_LITE_ENSURE_EQ(context, src_tensor->bytes, dst_tensor->bytes); in CopyTensorsData()
65 memcpy(dst_tensor->data.raw, src_tensor->data.raw, src_tensor->bytes); in CopyTensorsData()
/external/tensorflow/tensorflow/core/common_runtime/sycl/
Dsycl_util.h34 Tensor const& src_tensor, Tensor* dst_tensor) { in SYCLmemcpy() argument
36 void* dst_ptr = GetBase(dst_tensor); in SYCLmemcpy()
/external/tensorflow/tensorflow/core/common_runtime/
Dhierarchical_tree_broadcaster.h76 void DispatchRecv(int subdiv, int src_rank, int dst_rank, Tensor* dst_tensor,
Dhierarchical_tree_broadcaster.cc419 int dst_rank, Tensor* dst_tensor, in DispatchRecv() argument
434 col_ctx_->op_ctx->output_alloc_attr(0), dst_tensor, in DispatchRecv()
Dring_alg.cc406 Tensor* dst_tensor = (!rf->second_pass && (col_params_->merge_op != nullptr)) in DispatchRecv() local
414 col_ctx_->op_ctx->output_alloc_attr(0), dst_tensor, in DispatchRecv()