/external/tensorflow/tensorflow/core/kernels/ |
D | mkl_aggregate_ops.cc | 81 Tensor* dst_tensor = nullptr; in Compute() local 84 AllocateOutputSetMklShape(ctx, output_idx, &dst_tensor, in Compute() 88 dst_tensor->scalar<T>()() = std::plus<float>{}(user_i1, user_i2); in Compute() 95 Tensor* dst_tensor = nullptr; in Compute() local 98 AllocateOutputSetMklShape(ctx, output_idx, &dst_tensor, in Compute() 213 Tensor* dst_tensor = nullptr; in Compute() local 236 AllocateOutputSetMklShape(ctx, output_idx, &dst_tensor, output_tf_shape, in Compute() 238 dst.SetUsrMemDataHandle(dst_tensor); in Compute()
|
D | mkl_relu_op.cc | 521 Tensor* dst_tensor = nullptr; in Compute() local 525 tf_shape_dst, &dst_tensor)); in Compute() 528 T* dst_data = dst_tensor->flat<T>().data(); in Compute() 734 Tensor* dst_tensor = nullptr; in Compute_Scalar() local 739 AllocateOutputSetMklShape(context, dst_index, &dst_tensor, in Compute_Scalar() 741 void* out_o = static_cast<void*>(dst_tensor->flat<T>().data()); in Compute_Scalar() 797 Tensor* dst_tensor = nullptr; in Compute_Scalar() local 802 AllocateOutputSetMklShape(context, dst_index, &dst_tensor, in Compute_Scalar() 804 void* out_o = static_cast<void*>(dst_tensor->flat<T>().data()); in Compute_Scalar() 869 Tensor* dst_tensor = nullptr; in Compute_Scalar() local [all …]
|
D | mkl_fused_batch_norm_op.cc | 566 Tensor* dst_tensor = nullptr; in Compute() local 569 &dst_tensor); in Compute() 669 AllocateOutputSetMklShape(context, kDstIndex, &dst_tensor, tf_shape_dst, in Compute() 675 T* dst_data = dst_tensor->flat<T>().data(); in Compute() 737 TensorShape tf_shape_scale, Tensor** dst_tensor) { in HandleEmptyInput() argument 738 CHECK_NOTNULL(dst_tensor); in HandleEmptyInput() 743 AllocateOutputSetMklShape(context, kDstIndex, dst_tensor, tf_shape_src, in HandleEmptyInput() 745 CHECK_NOTNULL(*dst_tensor); in HandleEmptyInput() 746 memset(const_cast<char*>((*dst_tensor)->tensor_data().data()), 0, in HandleEmptyInput() 747 (*dst_tensor)->tensor_data().size()); in HandleEmptyInput()
|
D | mkl_concat_op.cc | 396 Tensor* dst_tensor = nullptr; in Compute() local 409 AllocateOutputSetMklShape(context, 0, &dst_tensor, tf_shape_dst, in Compute() 411 CHECK_NOTNULL(dst_tensor); in Compute() 415 dst.SetUsrMem(dst_md, dst_tensor); in Compute()
|
D | mkl_conv_ops.cc | 975 Tensor* dst_tensor = nullptr; in Compute() local 979 AllocateOutputSetMklShape(context, kOutputIndex_Dst, &dst_tensor, in Compute() 1086 &dst_tensor); in Compute() 1096 reinterpret_cast<Ttemp_output*>(dst_tensor->flat<Toutput>().data()); in Compute()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | while.cc | 45 TfLiteTensor* dst_tensor = dst_subgraph->tensor(dst_tensor_indices[i]); in CopyTensorsShapeAndType() local 46 dst_tensor->type = src_tensor->type; in CopyTensorsShapeAndType() 63 TfLiteTensor* dst_tensor = dst_subgraph->tensor(dst_tensor_indices[i]); in CopyTensorsData() local 64 TF_LITE_ENSURE_EQ(context, src_tensor->bytes, dst_tensor->bytes); in CopyTensorsData() 65 memcpy(dst_tensor->data.raw, src_tensor->data.raw, src_tensor->bytes); in CopyTensorsData()
|
/external/tensorflow/tensorflow/core/common_runtime/sycl/ |
D | sycl_util.h | 34 Tensor const& src_tensor, Tensor* dst_tensor) { in SYCLmemcpy() argument 36 void* dst_ptr = GetBase(dst_tensor); in SYCLmemcpy()
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | hierarchical_tree_broadcaster.h | 76 void DispatchRecv(int subdiv, int src_rank, int dst_rank, Tensor* dst_tensor,
|
D | hierarchical_tree_broadcaster.cc | 419 int dst_rank, Tensor* dst_tensor, in DispatchRecv() argument 434 col_ctx_->op_ctx->output_alloc_attr(0), dst_tensor, in DispatchRecv()
|
D | ring_alg.cc | 406 Tensor* dst_tensor = (!rf->second_pass && (col_params_->merge_op != nullptr)) in DispatchRecv() local 414 col_ctx_->op_ctx->output_alloc_attr(0), dst_tensor, in DispatchRecv()
|