/external/tensorflow/tensorflow/core/kernels/ |
D | avgpooling_op_gpu.cu.cc | 44 const int nthreads, const dtype* const __restrict__ top_diff, const int num, in DEFINE_GPU_KERNELS() 62 top_diff + n * pooled_height * pooled_width * channels + c; in DEFINE_GPU_KERNELS() 82 bool RunAvePoolBackwardNHWC(const T* const top_diff, const int num, in RunAvePoolBackwardNHWC() argument 94 d.stream(), config.virtual_thread_count, top_diff, num, height, width, in RunAvePoolBackwardNHWC() 102 const double* const top_diff, const int num, const int height, 108 const float* const top_diff, const int num, const int height, 114 const Eigen::half* const top_diff, const int num, const int height,
|
D | maxpooling_op_gpu.cu.cc | 204 const dtype* __restrict__ top_diff, in MaxPoolBackward() argument 212 GpuAtomicAdd(bottom_diff + offset + mask[index], top_diff[index]); in MaxPoolBackward() 236 const dtype* __restrict__ top_diff, dtype* __restrict__ bottom_diff) { in MaxPoolGradBackwardNoMaskNCHW() argument 265 bottom_diff[index] = top_diff[n * channels * height * width + maxidx]; in MaxPoolGradBackwardNoMaskNCHW() 277 const dtype* __restrict__ top_diff, dtype* __restrict__ bottom_diff) { in MaxPoolGradBackwardNoMaskNHWC() argument 307 bottom_diff[index] = top_diff[n * height * width * channels + maxidx]; in MaxPoolGradBackwardNoMaskNHWC() 333 const dtype* __restrict__ top_diff, in MaxPoolGradBackward() argument 342 bottom_diff[index] = top_diff[offset + mask[index]]; in MaxPoolGradBackward() 406 const int output_size, const int input_size, const T* top_diff, in operator ()() argument 418 0, d.stream(), output_size, top_diff, mask, top_offset, bottom_offset, in operator ()() [all …]
|
D | pooling_ops_3d_gpu.cu.cc | 37 const int pad_l, const dtype* __restrict__ top_diff, in MaxPoolGradBackwardNoMaskNCDHW() argument 75 top_diff[n * channels * plane * height * width + maxidx]; in MaxPoolGradBackwardNoMaskNCDHW() 88 const int pad_l, const dtype* __restrict__ top_diff, in MaxPoolGradBackwardNoMaskNDHWC() argument 127 top_diff[n * plane * height * width * channels + maxidx]; in MaxPoolGradBackwardNoMaskNDHWC() 144 const T* top_diff, T* bottom_diff, const Eigen::GpuDevice& d) { in operator ()() argument 154 stride_w, pad_p, pad_t, pad_l, top_diff, bottom_diff)); in operator ()() 161 stride_w, pad_p, pad_t, pad_l, top_diff, bottom_diff)); in operator ()()
|
D | maxpooling_op_gpu.h | 58 const T* top_diff, const int64* mask, const int top_offset, 66 const T* top_diff, const int64* mask, const int top_offset, 79 const T* top_diff, T* bottom_diff, const Eigen::GpuDevice& d);
|
D | pooling_ops_3d_gpu.h | 41 const int pad_l, const T* top_diff, T* bottom_diff,
|
D | avgpooling_op.h | 68 bool RunAvePoolBackwardNHWC(const T* const top_diff, const int num,
|
D | maxpooling_op.cc | 552 const Tensor& top_diff, in SpatialMaxPoolGradGrad() argument 567 top_diff.flat<T>().data(), params.depth, in SpatialMaxPoolGradGrad()
|
/external/tensorflow/tensorflow/core/kernels/image/ |
D | resize_nearest_neighbor_op_gpu.cu.cc | 96 const int nthreads, const T* __restrict__ top_diff, const int in_height, in ResizeNearestNeighborBackwardNHWC() argument 121 GpuAtomicAdd(bottom_diff_n + idx, ldg(top_diff + index)); in ResizeNearestNeighborBackwardNHWC() 127 const int nthreads, const T* __restrict__ top_diff, const int in_height, in LegacyResizeNearestNeighborBackwardNHWC() argument 150 GpuAtomicAdd(bottom_diff_n + idx, ldg(top_diff + index)); in LegacyResizeNearestNeighborBackwardNHWC() 229 void (*kernel)(const int nthreads, const T* __restrict__ top_diff, in operator ()()
|