Home
last modified time | relevance | path

Searched refs:gpuAtomicAddNoReturn (Results 1 – 17 of 17) sorted by relevance

/external/pytorch/aten/src/ATen/cuda/
DAtomic.cuh324 inline __device__ void gpuAtomicAddNoReturn(c10::complex<T> *address, c10::complex<T> val) { gpuAto… in gpuAtomicAddNoReturn() function
325 inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, … in gpuAtomicAddNoReturn() function
326 inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, va… in gpuAtomicAddNoReturn() function
327 inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, … in gpuAtomicAddNoReturn() function
328 inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, … in gpuAtomicAddNoReturn() function
329 inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, … in gpuAtomicAddNoReturn() function
330 inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); } in gpuAtomicAddNoReturn() function
331 inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address… in gpuAtomicAddNoReturn() function
332 inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd… in gpuAtomicAddNoReturn() function
333 inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, va… in gpuAtomicAddNoReturn() function
[all …]
/external/pytorch/aten/src/ATen/native/cuda/
DKernelUtils.cuh53 gpuAtomicAddNoReturn( in fastSpecializedAtomicAdd()
93 gpuAtomicAddNoReturn( in fastSpecializedAtomicAdd()
131 gpuAtomicAddNoReturn(tensor + index, value); in fastSpecializedAtomicAdd()
144 gpuAtomicAddNoReturn(tensor + index, value); in fastAtomicAdd()
DUpSampleLinear1d.cu111 gpuAtomicAddNoReturn(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val)); in upsample_linear1d_out_frame_backward()
112 gpuAtomicAddNoReturn( in upsample_linear1d_out_frame_backward()
DSummaryOps.cu95 gpuAtomicAddNoReturn(&smem[bin], getOp(linearIndex)); in C10_LAUNCH_BOUNDS_1()
105 gpuAtomicAddNoReturn(&a.data[aOffset], smem[i]); in C10_LAUNCH_BOUNDS_1()
123 gpuAtomicAddNoReturn(&a.data[aOffset], getOp(linearIndex)); in C10_LAUNCH_BOUNDS_1()
DReflectionPad.cu119 gpuAtomicAddNoReturn( in reflection_pad1d_backward_out_kernel()
160 gpuAtomicAddNoReturn(&grad_input[index_pair.first], grad_output[index_pair.second]); in reflection_pad2d_backward_out_kernel()
267 gpuAtomicAddNoReturn(target, value_to_add); in reflection_pad3d_backward_out_kernel()
DReplicationPadding.cu85 gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointX], valueToCopy); in replication_pad_backward_kernel()
143 gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); in replication_pad_backward_kernel()
224 gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], in replication_pad_backward_kernel()
DFractionalMaxPool2d.cu125 gpuAtomicAddNoReturn( in fractional_max_pool2d_backward_out_cuda_frame()
DUpSample.cuh214 gpuAtomicAddNoReturn( in upsample_increment_value_bounded()
DSortingRadixSelect.cuh234 gpuAtomicAddNoReturn(&smem[i], counts[i]); in countRadixUsingMask()
DSorting.cu135 gpuAtomicAddNoReturn(&num_nan, nan_count); in gatherMedian()
DDilatedMaxPool3d.cu228 gpuAtomicAddNoReturn(&gradInputData[(int64_t) slice * itime * iheight * iwidth + maxIndex], in max_pool3d_with_indices_backward_single_out_frame()
231gpuAtomicAddNoReturn(&gradInputData[((int64_t) batch * itime * iheight * iwidth + maxIndex) * feat… in max_pool3d_with_indices_backward_single_out_frame()
DFractionalMaxPool3d.cu148 gpuAtomicAddNoReturn( in fractional_max_pool3d_backward_out_frame()
DAdaptiveMaxPooling2d.cu196 gpuAtomicAddNoReturn(&(gradInput[argmax]), z); in atomicadaptivemaxgradinput()
DAdaptiveMaxPooling3d.cu273 gpuAtomicAddNoReturn(&(gradInput_d[argmax]), grad_delta); in atomicadaptivemaxgradinput()
DAdaptiveAveragePooling3d.cu303 gpuAtomicAddNoReturn(&(ptr_gradInput[ih*isizeW + iw]), grad_delta); in atomicadaptiveaveragegradinput()
DAdaptiveAveragePooling.cu214 gpuAtomicAddNoReturn(&(ptr_gradInput[iw]), grad_delta); in atomic_adaptive_average_gradinput()
DLossCTC.cu490gpuAtomicAddNoReturn(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * targe… in ctc_loss_backward_collect_nonblank_gpu_kernel()