/external/tensorflow/tensorflow/core/kernels/ |
D | maxpooling_op_gpu.cu.cc | 184 const dtype* top_diff, dtype* bottom_diff) { in MaxPoolBackwardNoMaskNHWC() argument 215 top_diff[index]); in MaxPoolBackwardNoMaskNHWC() 240 __global__ void MaxPoolBackward(const int nthreads, const dtype* top_diff, in MaxPoolBackward() argument 247 CudaAtomicAdd(bottom_diff + offset + mask[index], top_diff[index]); in MaxPoolBackward() 271 const dtype* top_diff, dtype* bottom_diff) { in MaxPoolGradBackwardNoMaskNCHW() argument 300 bottom_diff[index] = top_diff[n * channels * height * width + maxidx]; in MaxPoolGradBackwardNoMaskNCHW() 311 const dtype* top_diff, dtype* bottom_diff) { in MaxPoolGradBackwardNoMaskNHWC() argument 341 bottom_diff[index] = top_diff[n * height * width * channels + maxidx]; in MaxPoolGradBackwardNoMaskNHWC() 367 __global__ void MaxPoolGradBackward(const int nthreads, const dtype* top_diff, in MaxPoolGradBackward() argument 374 bottom_diff[index] = top_diff[offset + mask[index]]; in MaxPoolGradBackward() [all …]
|
D | avgpooling_op_gpu.cu.cc | 44 const dtype* const top_diff, const int num, in DEFINE_GPU_KERNELS() 64 top_diff + n * pooled_height * pooled_width * channels + c; in DEFINE_GPU_KERNELS() 84 bool RunAvePoolBackwardNHWC(const T* const top_diff, const int num, in RunAvePoolBackwardNHWC() argument 96 d.stream(), config.virtual_thread_count, top_diff, num, height, width, in RunAvePoolBackwardNHWC() 104 const double* const top_diff, const int num, const int height, 110 const float* const top_diff, const int num, const int height, 116 const Eigen::half* const top_diff, const int num, const int height,
|
D | pooling_ops_3d_gpu.cu.cc | 36 const int pad_t, const int pad_l, const dtype* top_diff, in MaxPoolGradBackwardNoMaskNCDHW() argument 74 top_diff[n * channels * plane * height * width + maxidx]; in MaxPoolGradBackwardNoMaskNCDHW() 86 const int pad_t, const int pad_l, const dtype* top_diff, in MaxPoolGradBackwardNoMaskNDHWC() argument 125 top_diff[n * plane * height * width * channels + maxidx]; in MaxPoolGradBackwardNoMaskNDHWC() 142 const T* top_diff, T* bottom_diff, const Eigen::GpuDevice& d) { in operator ()() argument 151 kernel_w, stride_p, stride_h, stride_w, pad_p, pad_t, pad_l, top_diff, in operator ()() 158 kernel_w, stride_p, stride_h, stride_w, pad_p, pad_t, pad_l, top_diff, in operator ()()
|
D | maxpooling_op_gpu.h | 58 const T* top_diff, const int64* mask, const int top_offset, 69 const int pad_t, const int pad_l, const T* top_diff, 76 const T* top_diff, const int64* mask, const int top_offset, 89 const T* top_diff, T* bottom_diff, const Eigen::GpuDevice& d);
|
D | resize_nearest_neighbor_op_gpu.cu.cc | 97 const int nthreads, const T* top_diff, const int in_height, in ResizeNearestNeighborBackwardNHWC() argument 122 CudaAtomicAdd(bottom_diff_n + idx, ldg(top_diff + index)); in ResizeNearestNeighborBackwardNHWC() 128 const int nthreads, const T* top_diff, const int in_height, in LegacyResizeNearestNeighborBackwardNHWC() argument 151 CudaAtomicAdd(bottom_diff_n + idx, ldg(top_diff + index)); in LegacyResizeNearestNeighborBackwardNHWC()
|
D | pooling_ops_3d_gpu.h | 41 const int pad_l, const T* top_diff, T* bottom_diff,
|
D | avgpooling_op.h | 68 bool RunAvePoolBackwardNHWC(const T* const top_diff, const int num,
|
D | maxpooling_op.cc | 540 const Tensor& top_diff, in SpatialMaxPoolGradGrad() argument 555 top_diff.flat<T>().data(), params.depth, in SpatialMaxPoolGradGrad()
|