/external/tensorflow/tensorflow/stream_executor/cuda/ |
D | cuda_blas.cc | 2604 int64 stride_c, int batch_count) { in DoBlasInternalImpl() argument 2622 stride_b, &beta, GpuMemoryMutable(c), CUDA_R_16F, ldc, stride_c, in DoBlasInternalImpl() 2638 reinterpret_cast<__half *>(GpuMemoryMutable(c) + batch * stride_c); in DoBlasInternalImpl() 2657 float beta, DeviceMemory<float> *c, int ldc, int64 stride_c, in DoBlasInternalImpl() argument 2668 GpuMemory(b), ldb, stride_b, &beta, GpuMemoryMutable(c), ldc, stride_c, in DoBlasInternalImpl() 2676 double beta, DeviceMemory<double> *c, int ldc, int64 stride_c, in DoBlasInternalImpl() argument 2682 GpuMemoryMutable(c), ldc, stride_c, batch_count); in DoBlasInternalImpl() 2691 int64 stride_c, int batch_count) { in DoBlasInternalImpl() argument 2699 GpuComplex(GpuMemoryMutable(c)), ldc, stride_c, batch_count); in DoBlasInternalImpl() 2708 int64 stride_c, int batch_count) { in DoBlasInternalImpl() argument [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | conv_ops_using_gemm.cc | 447 const int64 stride_c = GetTensorDim(strides_, data_format_, 'C'); in Conv2DUsingGemmOp() local 449 context, stride_n == 1 && stride_c == 1, in Conv2DUsingGemmOp()
|
D | depthwise_conv_op.cc | 285 const int64 stride_c = GetTensorDim(strides_, data_format_, 'C'); in DepthwiseConv2dNativeOp() local 292 context, (stride_n == 1 && stride_c == 1), in DepthwiseConv2dNativeOp()
|
D | depthwise_conv_grad_op.cc | 566 const int64 stride_c = GetTensorDim(strides_, data_format_, 'C'); in DepthwiseConv2dNativeBackpropInputOp() local 573 context, (stride_n == 1 && stride_c == 1), in DepthwiseConv2dNativeBackpropInputOp() 1069 const int64 stride_c = GetTensorDim(strides_, data_format_, 'C'); in DepthwiseConv2dNativeBackpropFilterOp() local 1076 context, (stride_n == 1 && stride_c == 1), in DepthwiseConv2dNativeBackpropFilterOp()
|
D | conv_ops_fused_image_transform.cc | 638 const int64 stride_c = GetTensorDim(strides_, FORMAT_NHWC, 'C'); in FusedResizeConv2DUsingGemmOp() local 640 context, stride_n == 1 && stride_c == 1, in FusedResizeConv2DUsingGemmOp()
|
D | conv_grad_input_ops.h | 367 int stride_c = GetTensorDim(strides_, data_format_, 'C'); 371 context, (stride_n == 1 && stride_c == 1),
|
D | pooling_ops_3d.cc | 673 const int32 stride_c = GetTensorDim(stride_, data_format_, 'C'); in MaxPooling3dGradGradOp() local 674 OP_REQUIRES(context, ksize_c == 1 && stride_c == 1, in MaxPooling3dGradGradOp()
|
D | conv_grad_filter_ops.cc | 271 int stride_c = GetTensorDim(strides_, data_format_, 'C'); in Conv2DBackpropFilterOp() local 275 context, (stride_n == 1 && stride_c == 1), in Conv2DBackpropFilterOp()
|
D | conv_ops.cc | 372 const int64 stride_c = GetTensorDim(strides, data_format, 'C'); in InitConv2DParameters() local 376 stride_n == 1 && stride_c == 1, in InitConv2DParameters()
|
/external/tensorflow/tensorflow/core/kernels/mkl/ |
D | mkl_conv_ops.h | 570 int stride_c = GetTensorDim(strides_, data_format_, 'C'); in MklConvBackpropCommonOp() local 572 context, (stride_n == 1 && stride_c == 1), in MklConvBackpropCommonOp()
|
D | mkl_conv_ops.cc | 450 const int64 stride_c = GetTensorDim(strides_, data_format_, 'C'); in MklConvOp() local 452 context, stride_n == 1 && stride_c == 1, in MklConvOp()
|
/external/tensorflow/tensorflow/stream_executor/ |
D | blas.h | 231 int64 stride_c = 0; member 1200 int64 stride_c, int batch_count) = 0; 1205 float beta, DeviceMemory<float> *c, int ldc, int64 stride_c, 1211 double beta, DeviceMemory<double> *c, int ldc, int64 stride_c, 1219 int64 stride_c, int batch_count) = 0; 1226 int64 stride_c, int batch_count) = 0; 2169 DeviceMemory<Eigen::half> *c, int ldc, int64 stride_c, int batch_count); \ 2175 int64 stride_c, int batch_count); \ 2181 DeviceMemory<double> *c, int ldc, int64 stride_c, int batch_count); \ 2188 int64 stride_c, int batch_count); \ [all …]
|
D | stream.cc | 4220 float beta, DeviceMemory<Eigen::half> *c, int ldc, int64 stride_c, in ThenBlasGemmStridedBatched() argument 4225 PARAM(stride_c), PARAM(batch_count)); in ThenBlasGemmStridedBatched() 4234 c, ldc, stride_c, batch_count); in ThenBlasGemmStridedBatched() 4241 float beta, DeviceMemory<float> *c, int ldc, int64 stride_c, in ThenBlasGemmStridedBatched() argument 4246 PARAM(stride_c), PARAM(batch_count)); in ThenBlasGemmStridedBatched() 4255 c, ldc, stride_c, batch_count); in ThenBlasGemmStridedBatched() 4262 double beta, DeviceMemory<double> *c, int ldc, int64 stride_c, in ThenBlasGemmStridedBatched() argument 4267 PARAM(stride_c), PARAM(batch_count)); in ThenBlasGemmStridedBatched() 4276 c, ldc, stride_c, batch_count); in ThenBlasGemmStridedBatched() 4285 int64 stride_c, int batch_count) { in ThenBlasGemmStridedBatched() argument [all …]
|
D | stream.h | 1478 int64 stride_c, int batch_count); 1483 float beta, DeviceMemory<float> *c, int ldc, int64 stride_c, 1489 double beta, DeviceMemory<double> *c, int ldc, int64 stride_c, 1497 int64 stride_c, int batch_count); 1504 int64 stride_c, int batch_count);
|
/external/tensorflow/tensorflow/stream_executor/rocm/ |
D | rocm_blas.cc | 2566 int64 stride_c, int batch_count) { in DoBlasGemmStridedBatched() argument 2579 reinterpret_cast<rocblas_half *>(GpuMemoryMutable(c)), ldc, stride_c, in DoBlasGemmStridedBatched() 2587 float beta, DeviceMemory<float> *c, int ldc, int64 stride_c, in DoBlasGemmStridedBatched() argument 2600 stride_c, batch_count); in DoBlasGemmStridedBatched() 2606 double beta, DeviceMemory<double> *c, int ldc, int64 stride_c, in DoBlasGemmStridedBatched() argument 2619 stride_c, batch_count); in DoBlasGemmStridedBatched() 2627 int64 stride_c, int batch_count) { in DoBlasGemmStridedBatched() argument 2633 complex_cast(beta), complex_cast(c), ldc, stride_c, in DoBlasGemmStridedBatched() 2642 int64 stride_c, int batch_count) { in DoBlasGemmStridedBatched() argument 2648 complex_cast(beta), complex_cast(c), ldc, stride_c, in DoBlasGemmStridedBatched()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | legacy_optimized_ops.h | 2489 int stride_c = n; in Conv() local 2492 stride_a, b, stride_b, 0.0f, c, stride_c); in Conv()
|
D | optimized_ops.h | 1301 int stride_c = n; in Conv() local 1304 stride_a, b, stride_b, 0.0f, c, stride_c); in Conv()
|