/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | pooling_test.cc | 188 XlaOp out_backprop = ConstantR4FromArray4D<float>(&builder, {{{{1.}}}}); in XLA_TEST_F() local 192 AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, in XLA_TEST_F() 205 XlaOp out_backprop = in XLA_TEST_F() local 210 AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, in XLA_TEST_F() 223 XlaOp out_backprop = in XLA_TEST_F() local 228 AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, {{1, 1}, {1, 1}}, in XLA_TEST_F() 240 XlaOp out_backprop = in XLA_TEST_F() local 245 AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, {{1, 1}, {1, 1}}, in XLA_TEST_F() 255 XlaOp out_backprop = in XLA_TEST_F() local 263 AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, {{1, 1}, {1, 1}}, in XLA_TEST_F() [all …]
|
D | pooling.cc | 192 XlaOp AvgPoolGrad(XlaOp out_backprop, absl::Span<const int64> gradients_size, in AvgPoolGrad() argument 198 XlaBuilder* b = out_backprop.builder(); in AvgPoolGrad() 208 b->GetShape(out_backprop)); in AvgPoolGrad() 234 out_backprop, gradients_size, kernel_size, stride, spatial_padding, in AvgPoolGrad()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | dilation_ops.cc | 222 const Tensor& out_backprop = context->input(2); in Compute() local 238 batch == out_backprop.dim_size(0) && in Compute() 239 out_rows == out_backprop.dim_size(1) && in Compute() 240 out_cols == out_backprop.dim_size(2) && in Compute() 241 depth == out_backprop.dim_size(3), in Compute() 257 filter.tensor<T, 3>(), out_backprop.tensor<T, 4>(), stride_rows, in Compute() 273 typename TTypes<T, 4>::ConstTensor out_backprop, in operator ()() 285 const int output_rows = out_backprop.dimension(1); in operator ()() 286 const int output_cols = out_backprop.dimension(2); in operator ()() 322 out_backprop(b, h_out, w_out, d); in operator ()() [all …]
|
D | avgpooling_op.cc | 244 const Tensor& out_backprop = context->input(1); in Compute() local 252 OP_REQUIRES(context, out_backprop.dims() == 4, in Compute() 254 const int64 out_backprop_batch = out_backprop.dim_size(0); in Compute() 255 const int64 out_backprop_rows = out_backprop.dim_size(1); in Compute() 256 const int64 out_backprop_cols = out_backprop.dim_size(2); in Compute() 257 const int64 out_backprop_depth = out_backprop.dim_size(3); in Compute() 294 const T* out_backprop_ptr = out_backprop.flat<T>().data(); in Compute() 403 const Tensor& out_backprop = context->input(1); in Compute() local 411 OP_REQUIRES(context, out_backprop.dims() == 4, in Compute() 422 nullptr, nullptr, out_backprop, output_shape, in Compute() [all …]
|
D | depthwise_conv_grad_op.cc | 55 const Tensor& out_backprop = context->input(2); \ 63 context, out_backprop.dims() == 4, \ 67 context, batch == out_backprop.dim_size(0), \ 85 GetTensorDim(out_backprop.shape(), data_format_, 'H'); \ 92 GetTensorDim(out_backprop.shape(), data_format_, 'W'); \ 104 GetTensorDim(out_backprop.shape(), data_format_, 'C'); \ 183 const T* out_backprop, T* buffer) { in CopyOutputBackpropRegion() argument 224 out_backprop + (out_r * args.out_cols + out_c) * args.out_depth; in CopyOutputBackpropRegion() 375 const T* out_backprop, const T* depthwise_filter, in operator ()() 405 auto shard = [&ctx, &args, &out_backprop, &filter_data, &in_backprop]( in operator ()() [all …]
|
D | depthwise_conv_op_gpu.h | 817 const T* out_backprop, 870 sum += ldg(out_backprop + out_backprop_offset + 887 const T* out_backprop, 948 sum += ldg(out_backprop + out_backprop_offset) * 964 const T* out_backprop, 988 args, out_backprop, filter, in_backprop, num_in_backprop)); 995 const T* out_backprop, 1002 ctx, args, out_backprop, filter, in_backprop, data_format); 1007 ctx, args, out_backprop, filter, in_backprop, data_format); 1011 ctx, args, out_backprop, filter, in_backprop, data_format); [all …]
|
D | pooling_ops_3d.cc | 209 const Tensor& tensor_out, const Tensor& out_backprop, in launch() 216 for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { in launch() 232 for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { in launch() 237 for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { in launch() 252 src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, in launch() 281 out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); in launch() 342 const Tensor& out_backprop = context->input(2); in Compute() local 347 OP_REQUIRES(context, out_backprop.dims() == 5, in Compute() 369 context, tensor_in, tensor_out, out_backprop, window, stride, out, in Compute() 384 const Tensor& out_backprop, in launch() [all …]
|
D | dilation_ops_gpu.cu.cc | 212 typename TTypes<T, 4>::ConstTensor out_backprop, in operator ()() 224 const int output_rows = out_backprop.dimension(1); in operator ()() 225 const int output_cols = out_backprop.dimension(2); in operator ()() 243 input.data(), filter.data(), out_backprop.data(), batch, input_rows, in operator ()() 254 typename TTypes<T, 4>::ConstTensor out_backprop, in operator ()() 266 const int output_rows = out_backprop.dimension(1); in operator ()() 267 const int output_cols = out_backprop.dimension(2); in operator ()() 285 out_backprop.data(), batch, input_rows, input_cols, depth, filter_rows, in operator ()()
|
D | conv_grad_ops_3d.cc | 220 const Tensor& out_backprop = context->input(2); in Compute() local 221 const TensorShape& out_backprop_shape = out_backprop.shape(); in Compute() 246 out_backprop.tensor<T, 5>(), // output_backward in Compute() 326 const Tensor& out_backprop = context->input(2); in Compute() local 327 const TensorShape& out_backprop_shape = out_backprop.shape(); in Compute() 436 out_backprop.tensor<T, 5>(), // output_backward in Compute() 461 const T* out_backprop_data = out_backprop.template flat<T>().data(); in Compute() 676 const Tensor& out_backprop = context->input(2); in Compute() local 677 const TensorShape& out_backprop_shape = out_backprop.shape(); in Compute() 708 out_backprop.tensor<T, 5>(), // output_backward in Compute() [all …]
|
D | bias_op_test.cc | 28 Tensor out_backprop(DT_FLOAT, TensorShape({d0, d1, d2, d3})); in BiasAddGrad() local 29 out_backprop.flat<float>().setRandom(); in BiasAddGrad() 30 test::graph::Unary(g, "BiasAddGrad", test::graph::Constant(g, out_backprop)); in BiasAddGrad()
|
D | batch_norm_op.h | 79 typename TTypes<T, 4>::ConstTensor out_backprop, in operator() 114 db.device(d) = out_backprop.reshape(rest_by_depth).sum(reduction_axis); in operator() 120 scratch2.device(d) = (out_backprop.reshape(rest_by_depth) * in operator() 127 out_backprop.reshape(rest_by_depth) * ((scratch1 * gamma) in operator() 135 out_backprop.reshape(rest_by_depth) * in operator()
|
D | conv_grad_filter_ops.cc | 104 const Tensor& out_backprop, const Tensor& input, in operator ()() 112 out_backprop.tensor<T, 4>(), row_stride, col_stride, in operator ()() 238 const Tensor& out_backprop = context->input(2); in Compute() local 253 input.shape(), filter_shape, out_backprop.shape(), in Compute() 284 filter_backprop->tensor<T, 4>(), out_backprop.tensor<T, 4>(), in Compute() 343 const T* out_backprop_data = out_backprop.template flat<T>().data(); in Compute() 492 const Tensor& out_backprop = context->input(2); in Compute() local 524 launcher_(context, use_cudnn_, cudnn_use_autotune_, out_backprop, input, in Compute() 545 const Tensor& out_backprop, const Tensor& input, int row_dilation, in operator ()() argument 566 input.shape(), filter_shape, out_backprop.shape(), dilations, in operator ()() [all …]
|
D | conv_grad_input_ops.cc | 110 const Tensor& out_backprop, const Tensor& filter, in operator ()() 118 out_backprop.tensor<T, 4>(), row_stride, col_stride, in operator ()() 316 const Tensor& out_backprop = context->input(2); in Compute() local 330 input_shape, filter.shape(), out_backprop.shape(), in Compute() 365 out_backprop.tensor<T, 4>(), dims.spatial_dims[0].input_size, in Compute() 457 const T* out_backprop_data = out_backprop.template flat<T>().data(); in Compute() 635 const Tensor& out_backprop = context->input(2); in Compute() local 661 launcher_(context, use_cudnn_, cudnn_use_autotune_, out_backprop, filter, in Compute() 682 const Tensor& out_backprop, const Tensor& filter, int row_dilation, in operator ()() argument 705 filter_shape, out_backprop.shape(), dilations, strides, padding, in operator ()() [all …]
|
D | fractional_avg_pool_op.cc | 240 const Tensor& out_backprop = context->input(1); in Compute() local 244 const int64 out_batch = out_backprop.dim_size(0); in Compute() 245 const int64 out_rows = out_backprop.dim_size(1); in Compute() 246 const int64 out_cols = out_backprop.dim_size(2); in Compute() 247 const int64 out_depth = out_backprop.dim_size(3); in Compute() 275 ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(), in Compute()
|
D | mkl_conv_ops_test.cc | 179 Node* out_backprop = in DefaultConv2DBwdInput() local 187 .Input(out_backprop) in DefaultConv2DBwdInput() 207 Node* out_backprop = in MklConv2DBwdInput() local 217 .Input(out_backprop) in MklConv2DBwdInput() 242 Node* out_backprop = in DefaultConv2DBwdFilter() local 250 .Input(out_backprop) in DefaultConv2DBwdFilter() 271 Node* out_backprop = in MklConv2DBwdFilter() local 281 .Input(out_backprop) in MklConv2DBwdFilter()
|
D | batch_norm_op.cc | 102 const Tensor& out_backprop = context->input(4); in Compute() local 116 OP_REQUIRES(context, out_backprop.dims() == 4, in Compute() 118 out_backprop.shape().DebugString())); in Compute() 156 var.vec<T>(), gamma.vec<T>(), out_backprop.tensor<T, 4>(), in Compute() 243 typename TTypes<T, 4>::ConstTensor out_backprop, T variance_epsilon, \
|
D | conv_grad_ops.h | 177 const Tensor& out_backprop, const Tensor& filter, 187 const Tensor& out_backprop, const Tensor& input, 208 const Tensor& out_backprop, const Tensor& input,
|
D | depthwise_conv_op.h | 72 const T* out_backprop, const T* filter, T* in_backprop, 79 const T* out_backprop, const T* input, T* filter_backprop, 94 const T* out_backprop, const T* filter, T* in_backprop, 101 const T* out_backprop, const T* input, T* filter_backprop,
|
D | dilation_ops.h | 43 typename TTypes<T, 4>::ConstTensor out_backprop, 57 typename TTypes<T, 4>::ConstTensor out_backprop,
|
D | cudnn_pooling_gpu.cc | 123 const Tensor& out_backprop, const TensorShape& tensor_in_shape, in Compute() argument 148 ShapeFromFormat(FORMAT_NCHW, out_backprop.shape(), data_format); in Compute() 171 transformed_output_backprop = out_backprop; in Compute() 185 context->eigen_device<GPUDevice>(), out_backprop.tensor<T, 5>(), in Compute()
|
D | mkl_avgpooling_op.cc | 246 const Tensor& out_backprop = MklGetInput(context, 1); in Compute() local 262 mkl_context.params.in_dim = out_backprop.dims(); in Compute() 289 out_backprop.flat<T>().data())), in Compute() 296 static_cast<void*>(const_cast<T*>(out_backprop.flat<T>().data())), in Compute() 302 static_cast<void*>(const_cast<T*>(out_backprop.flat<T>().data())); in Compute() 354 const Tensor& out_backprop = MklGetInput(context, 1); in MklCreateLayoutsAndPrimitives() local 368 context, out_backprop.dims() == 4, in MklCreateLayoutsAndPrimitives()
|
D | pooling_ops_common.cc | 289 const Tensor* tensor_out, const Tensor& out_backprop, in Compute() argument 326 ShapeFromFormat(FORMAT_NCHW, out_backprop.shape(), data_format); in Compute() 349 transformed_output_backprop = out_backprop; in Compute() 371 context->eigen_device<Device>(), out_backprop.tensor<T, 4>(), in Compute()
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_BiasAddGrad.pbtxt | 4 name: "out_backprop" 12 1-D with size the feature dimension of `out_backprop`. 29 It accumulates all the values from out_backprop into the feature dimension.
|
D | api_def_FractionalAvgPoolGrad.pbtxt | 11 name: "out_backprop" 55 out_backprop to those indices that form the same pooling cell. Therefore, we
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | pooling_ops.cc | 327 auto out_backprop = ctx->Input(2); in Compile() local 339 out_backprop, init_value, scatter); in Compile() 419 auto out_backprop = ctx->Input(1); in Compile() local 428 xla::ConvertElementType(out_backprop, xla_reduction_type); in Compile() 556 auto out_backprop = ctx->Input(2); in Compile() local 566 auto bp_int = xla::BitcastConvertType(out_backprop, xla::U32); in Compile()
|