Searched refs:bottom_diff (Results 1 – 8 of 8) sorted by relevance
/external/tensorflow/tensorflow/core/kernels/ |
D | maxpooling_op_gpu.cu.cc | 184 const dtype* top_diff, dtype* bottom_diff) { in MaxPoolBackwardNoMaskNHWC() argument 214 CudaAtomicAdd(bottom_diff + n * height * width * channels + maxidx, in MaxPoolBackwardNoMaskNHWC() 242 const int bottom_offset, dtype* bottom_diff, in MaxPoolBackward() argument 247 CudaAtomicAdd(bottom_diff + offset + mask[index], top_diff[index]); in MaxPoolBackward() 271 const dtype* top_diff, dtype* bottom_diff) { in MaxPoolGradBackwardNoMaskNCHW() argument 300 bottom_diff[index] = top_diff[n * channels * height * width + maxidx]; in MaxPoolGradBackwardNoMaskNCHW() 311 const dtype* top_diff, dtype* bottom_diff) { in MaxPoolGradBackwardNoMaskNHWC() argument 341 bottom_diff[index] = top_diff[n * height * width * channels + maxidx]; in MaxPoolGradBackwardNoMaskNHWC() 369 const int bottom_offset, dtype* bottom_diff, in MaxPoolGradBackward() argument 374 bottom_diff[index] = top_diff[offset + mask[index]]; in MaxPoolGradBackward() [all …]
|
D | avgpooling_op_gpu.cu.cc | 50 const int pad_l, dtype* const bottom_diff) { in DEFINE_GPU_KERNELS() 79 bottom_diff[index] = gradient; in DEFINE_GPU_KERNELS() 90 const int pad_l, T* const bottom_diff, in RunAvePoolBackwardNHWC() argument 98 stride_w, pad_t, pad_t, bottom_diff)); in RunAvePoolBackwardNHWC() 108 double* const bottom_diff, const GPUDevice& d); 114 float* const bottom_diff, const GPUDevice& d); 120 Eigen::half* const bottom_diff, const GPUDevice& d);
|
D | pooling_ops_3d_gpu.cu.cc | 37 dtype* bottom_diff) { in MaxPoolGradBackwardNoMaskNCDHW() argument 73 bottom_diff[index] = in MaxPoolGradBackwardNoMaskNCDHW() 87 dtype* bottom_diff) { in MaxPoolGradBackwardNoMaskNDHWC() argument 124 bottom_diff[index] = in MaxPoolGradBackwardNoMaskNDHWC() 142 const T* top_diff, T* bottom_diff, const Eigen::GpuDevice& d) { in operator ()() argument 152 bottom_diff); in operator ()() 159 bottom_diff); in operator ()()
|
D | maxpooling_op_gpu.h | 59 const int bottom_offset, T* bottom_diff, 70 T* bottom_diff, const Eigen::GpuDevice& d); 77 const int bottom_offset, T* bottom_diff, 89 const T* top_diff, T* bottom_diff, const Eigen::GpuDevice& d);
|
D | resize_nearest_neighbor_op_gpu.cu.cc | 100 T* bottom_diff) { in ResizeNearestNeighborBackwardNHWC() argument 110 T* bottom_diff_n = bottom_diff + n * channels * out_height * out_width; in ResizeNearestNeighborBackwardNHWC() 131 T* bottom_diff) { in LegacyResizeNearestNeighborBackwardNHWC() argument 141 T* bottom_diff_n = bottom_diff + n * channels * out_height * out_width; in LegacyResizeNearestNeighborBackwardNHWC()
|
D | pooling_ops_3d_gpu.h | 41 const int pad_l, const T* top_diff, T* bottom_diff,
|
D | avgpooling_op.h | 74 const int pad_l, T* const bottom_diff,
|
D | maxpooling_op.cc | 538 void SpatialMaxPoolGradGrad(OpKernelContext* context, Tensor* bottom_diff, in SpatialMaxPoolGradGrad() argument 558 bottom_diff->flat<T>().data(), params.depth, in SpatialMaxPoolGradGrad()
|