/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | runtime_fft_impl.h | 40 int64_t input_batch, int64_t fft_length0, int64_t fft_length1, in EigenFftC2C() argument 50 dims[0] = input_batch; in EigenFftC2C() 67 int64_t input_batch, int64_t fft_length0, int64_t fft_length1, in EigenFftR2C() argument 73 in_dims[0] = input_batch; in EigenFftR2C() 75 out_dims[0] = input_batch; in EigenFftR2C() 106 int64_t input_batch, int64_t fft_length0, int64_t fft_length1, in EigenFftC2R() argument 112 in_dims[0] = input_batch; in EigenFftC2R() 114 out_dims[0] = input_batch; in EigenFftC2R() 178 int64_t input_batch, int64_t fft_length0, in EigenFftWithRank() argument 185 static_cast<complex128*>(operand), input_batch, fft_length0, in EigenFftWithRank() [all …]
|
D | runtime_conv_impl.h | 32 ScalarType* rhs, Eigen::Index input_batch, Eigen::Index input_x, in EigenConv2DImpl() argument 43 input(lhs, input_batch, input_x, input_y, input_channels); in EigenConv2DImpl() 51 output(out, input_batch, output_x, output_y, kernel_filters); in EigenConv2DImpl() 57 input_reshaped_dims[0] = input_batch; in EigenConv2DImpl() 64 output_reshaped_dims[0] = input_batch; in EigenConv2DImpl() 75 pre_contract_dims[0] = output_y * output_x * input_batch; in EigenConv2DImpl() 80 post_contract_dims[0] = input_batch; in EigenConv2DImpl() 109 ScalarType* rhs, Eigen::Index input_batch, Eigen::Index input_x, in EigenConv3DImpl() argument 125 const ConstTType input(lhs, input_batch, input_x, input_y, input_z, in EigenConv3DImpl() 133 output(out, input_batch, output_x, output_y, output_z, kernel_filters); in EigenConv3DImpl() [all …]
|
D | runtime_conv2d_acl.cc | 31 int64_t input_batch, int64_t input_rows, int64_t input_cols, in ACLDepthwiseConvImpl() argument 65 input_batch), in ACLDepthwiseConvImpl() 72 input_batch), in ACLDepthwiseConvImpl() 128 float* rhs, int64_t input_batch, int64_t input_rows, in ACLGemmConvImpl() argument 166 input_batch), in ACLGemmConvImpl() 174 input_batch), in ACLGemmConvImpl() 235 int64_t input_batch, int64_t input_rows, int64_t input_cols, in __xla_cpu_runtime_ACLConv2DF32() argument 251 run_options_ptr, out, lhs, rhs, input_batch, input_rows, input_cols, in __xla_cpu_runtime_ACLConv2DF32() 259 if (ACLGemmConvImpl(run_options_ptr, out, lhs, rhs, input_batch, input_rows, in __xla_cpu_runtime_ACLConv2DF32() 272 run_options_ptr, out, lhs, rhs, input_batch, input_rows, input_cols, in __xla_cpu_runtime_ACLConv2DF32()
|
D | runtime_single_threaded_conv2d.cc | 24 Eigen::half* rhs, int64_t input_batch, int64_t input_rows, in __xla_cpu_runtime_EigenSingleThreadedConv2DF16() argument 33 Eigen::DefaultDevice(), out, lhs, rhs, input_batch, input_rows, in __xla_cpu_runtime_EigenSingleThreadedConv2DF16() 44 int64_t input_batch, int64_t input_rows, int64_t input_cols, in __xla_cpu_runtime_EigenSingleThreadedConv2DF32() argument 53 Eigen::DefaultDevice(), out, lhs, rhs, input_batch, input_rows, in __xla_cpu_runtime_EigenSingleThreadedConv2DF32()
|
D | runtime_conv2d_mkl.cc | 53 ScalarType* rhs, int64_t input_batch, int64_t input_rows, in MKLConvImpl() argument 70 memory::dims conv1_src_dim = {ToInt(input_batch), ToInt(input_channels), in MKLConvImpl() 75 memory::dims conv1_dst_dim = {ToInt(input_batch), ToInt(kernel_filters), in MKLConvImpl() 156 int64_t input_batch, int64_t input_rows, int64_t input_cols, in __xla_cpu_runtime_MKLConv2DF32() argument 168 run_options_ptr, out, lhs, rhs, input_batch, input_rows, input_cols, in __xla_cpu_runtime_MKLConv2DF32() 174 MKLConvImpl(nullptr, out, lhs, rhs, input_batch, input_rows, input_cols, in __xla_cpu_runtime_MKLConv2DF32()
|
D | runtime_conv2d.cc | 27 int64_t input_batch, int64_t input_rows, int64_t input_cols, in __xla_cpu_runtime_EigenConv2DF32() argument 39 *run_options->intra_op_thread_pool(), out, lhs, rhs, input_batch, in __xla_cpu_runtime_EigenConv2DF32() 49 Eigen::half* rhs, int64_t input_batch, int64_t input_rows, in __xla_cpu_runtime_EigenConv2DF16() argument 61 *run_options->intra_op_thread_pool(), out, lhs, rhs, input_batch, in __xla_cpu_runtime_EigenConv2DF16()
|
D | runtime_single_threaded_conv3d.cc | 24 int64_t input_batch, int64_t input_x, int64_t input_y, int64_t input_z, in __xla_cpu_runtime_EigenSingleThreadedConv3DF32() argument 35 Eigen::DefaultDevice(), out, lhs, rhs, input_batch, input_x, input_y, in __xla_cpu_runtime_EigenSingleThreadedConv3DF32() 47 Eigen::half* rhs, int64_t input_batch, int64_t input_x, int64_t input_y, in __xla_cpu_runtime_EigenSingleThreadedConv3DF16() argument 58 Eigen::DefaultDevice(), out, lhs, rhs, input_batch, input_x, input_y, in __xla_cpu_runtime_EigenSingleThreadedConv3DF16()
|
D | runtime_conv3d.cc | 27 int64_t input_batch, int64_t input_x, int64_t input_y, int64_t input_z, in __xla_cpu_runtime_EigenConv3DF32() argument 41 *run_options->intra_op_thread_pool(), out, lhs, rhs, input_batch, input_x, in __xla_cpu_runtime_EigenConv3DF32() 52 Eigen::half* rhs, int64_t input_batch, int64_t input_x, int64_t input_y, in __xla_cpu_runtime_EigenConv3DF16() argument 66 *run_options->intra_op_thread_pool(), out, lhs, rhs, input_batch, input_x, in __xla_cpu_runtime_EigenConv3DF16()
|
D | runtime_single_threaded_fft.cc | 23 int32_t double_precision, int32_t fft_rank, int64_t input_batch, in __xla_cpu_runtime_EigenSingleThreadedFft() argument 27 static_cast<bool>(double_precision), fft_rank, input_batch, in __xla_cpu_runtime_EigenSingleThreadedFft()
|
D | runtime_fft.cc | 27 int32_t double_precision, int32_t fft_rank, int64_t input_batch, in __xla_cpu_runtime_EigenFft() argument 34 static_cast<bool>(double_precision), fft_rank, input_batch, in __xla_cpu_runtime_EigenFft()
|
D | runtime_conv2d.h | 27 float* lhs, float* rhs, int64_t input_batch, int64_t input_rows, 38 Eigen::half* out, Eigen::half* lhs, Eigen::half* rhs, int64_t input_batch,
|
D | runtime_single_threaded_conv2d.h | 27 Eigen::half* out, Eigen::half* lhs, Eigen::half* rhs, int64_t input_batch, 39 float* lhs, float* rhs, int64_t input_batch, int64_t input_rows,
|
D | runtime_conv3d.h | 27 Eigen::half* out, Eigen::half* lhs, Eigen::half* rhs, int64_t input_batch, 40 float* lhs, float* rhs, int64_t input_batch, int64_t input_x,
|
D | runtime_single_threaded_conv3d.h | 27 Eigen::half* out, Eigen::half* lhs, Eigen::half* rhs, int64_t input_batch, 40 float* lhs, float* rhs, int64_t input_batch, int64_t input_x,
|
D | runtime_conv2d_acl.h | 66 float* lhs, float* rhs, int64_t input_batch, int64_t input_rows, 79 float* lhs, float* rhs, int64_t input_batch, int64_t input_rows, in __xla_cpu_runtime_ACLConv2DF32() argument
|
D | runtime_single_threaded_fft.h | 26 int64_t input_batch, int64_t fft_length0, int64_t fft_length1,
|
D | runtime_fft.h | 26 int64_t input_batch, int64_t fft_length0, int64_t fft_length1,
|
D | runtime_conv2d_mkl.h | 25 float* lhs, float* rhs, int64_t input_batch, int64_t input_rows,
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | depthwise_conv_test.cc | 475 const int input_batch = 2; in BatchPaddingValidTest() local 485 {input_batch, input_height, input_width, input_depth}}, in BatchPaddingValidTest() 536 const int input_batch = 4; in BatchPaddingSameTest() local 546 {input_batch, input_height, input_width, input_depth}}, in BatchPaddingSameTest() 1069 const int input_batch = 1; in TEST_P() local 1080 {input_batch, input_height, input_width, input_depth}, in TEST_P() 1142 const int input_batch = 1; in TEST_P() local 1153 {input_batch, input_height, input_width, input_depth}, in TEST_P() 1217 const int input_batch = 2; in TEST_P() local 1228 {input_batch, input_height, input_width, input_depth}, in TEST_P() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | quantized_batch_norm_op_test.cc | 62 const int input_batch = 1; in TEST_F() local 67 {input_batch, input_height, input_width, input_depth}); in TEST_F() 121 TensorShape({input_batch, input_height, input_width, input_depth})); in TEST_F() 159 const int input_batch = 1; in TEST_F() local 164 {input_batch, input_height, input_width, input_depth}); in TEST_F() 218 TensorShape({input_batch, input_height, input_width, input_depth})); in TEST_F()
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | space_to_depth.h | 41 const int input_batch = input_shape.Dims(0); in SpaceToDepth() local 53 TFLITE_DCHECK_EQ(input_batch, output_batch); in SpaceToDepth() 55 for (int in_b = 0; in_b < input_batch; ++in_b) { in SpaceToDepth()
|
D | depth_to_space.h | 39 const int input_batch = input_shape.Dims(0); in DepthToSpace() local 51 TFLITE_DCHECK_EQ(input_batch, output_batch); in DepthToSpace()
|
D | space_to_batch_nd.h | 80 int input_batch = out_b % input_batch_size; in SpaceToBatchND() local 96 Offset(input1_shape, input_batch, in SpaceToBatchND()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | convolution_dimension_numbers_test.cc | 35 int64_t input_batch, int64_t input_feature, int64_t input_first_spatial, in CreateConvDimensionNumbers() argument 41 dimension_numbers.set_input_batch_dimension(input_batch); in CreateConvDimensionNumbers()
|
/external/ComputeLibrary/src/core/NEON/kernels/arm_conv/depthwise/ |
D | depthwise_planar.hpp | 366 auto input_batch = reinterpret_cast<const TInput *>(input); in execute_internal() local 389 auto inptr_row = input_batch + input_i*ld_input_row; in execute_internal() 405 input_batch += ld_input_batch; in execute_internal()
|