/external/pytorch/aten/src/ATen/native/cuda/ |
D | ReduceOps.h | 3 struct TensorIterator; 12 void norm_launch_kernel(TensorIterator &iter, double val); 13 void min_launch_kernel(TensorIterator &iter); 14 void max_launch_kernel(TensorIterator &iter); 15 void aminmax_launch_kernel(TensorIterator &iter); 16 void min_all_launch_kernel(TensorIterator &iter); 17 void max_all_launch_kernel(TensorIterator &iter); 18 void aminmax_allreduce_launch_kernel(TensorIterator &iter);
|
D | ReduceSumProdKernel.cu | 15 void operator()(TensorIterator& iter) { in operator ()() 30 void operator()(TensorIterator& iter) { in operator ()() 41 void operator()(TensorIterator& iter) { 54 void operator()(TensorIterator& iter) { in operator ()() 64 void operator()(TensorIterator& iter) { in operator ()() 74 void operator()(TensorIterator& iter) { 88 void operator()(TensorIterator& iter) { in operator ()() 98 void operator()(TensorIterator& iter) { 110 void operator()(TensorIterator& iter) { in operator ()() 124 void operator()(TensorIterator& iter) { in operator ()() [all …]
|
D | ReduceMaxValuesKernel.cu | 28 void max_values_kernel_cuda_impl(TensorIterator& iter) { in max_values_kernel_cuda_impl() 35 void max_values_kernel_cuda(TensorIterator& iter) { in max_values_kernel_cuda() 42 void max_launch_kernel(TensorIterator& iter) { in max_launch_kernel() 53 void max_all_launch_kernel(TensorIterator &iter) { in max_all_launch_kernel()
|
D | ReduceMinValuesKernel.cu | 29 void min_values_kernel_cuda_impl(TensorIterator& iter) { in min_values_kernel_cuda_impl() 35 void min_values_kernel_cuda(TensorIterator& iter) { in min_values_kernel_cuda() 41 void min_launch_kernel(TensorIterator &iter) { in min_launch_kernel() 50 void min_all_launch_kernel(TensorIterator &iter) { in min_all_launch_kernel()
|
D | ReduceMomentKernel.cu | 14 void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) { in std_var_kernel_impl() 23 static void std_var_kernel_cuda(TensorIterator& iter, double correction, bool take_sqrt) { in std_var_kernel_cuda() 40 void mean_kernel_impl(TensorIterator& iter) { in mean_kernel_impl() 47 static void mean_kernel_cuda(TensorIterator& iter) { in mean_kernel_cuda()
|
D | ReduceAMinMaxKernel.cu | 21 void _min_max_values_kernel_cuda_impl(TensorIterator& iter) { in _min_max_values_kernel_cuda_impl() 30 void aminmax_allreduce_launch_kernel(TensorIterator& iter) { in aminmax_allreduce_launch_kernel() 37 void aminmax_launch_kernel(TensorIterator& iter) { in aminmax_launch_kernel()
|
D | IndexKernel.cu | 110 TensorIterator& iter, in index_fill_kernel_impl() 148 TensorIterator& iter, in index_copy_kernel_impl() 190 void index_put_kernel_impl(TensorIterator& iter, const IntArrayRef index_size, const IntArrayRef in… in index_put_kernel_impl() 204 TensorIterator& iter, in index_fill_kernel() 220 TensorIterator& iter, in index_copy_kernel() 236 static void index_put_kernel(TensorIterator& iter, const IntArrayRef index_size, const IntArrayRef … in index_put_kernel() 253 void index_put_kernel_quantized_cuda(TensorIterator& iter, const IntArrayRef index_size, const IntA… in index_put_kernel_quantized_cuda() 277 TensorIterator& iter, in cuda_take_put_kernel() 323 void put_kernel(TensorIterator& iter, const TensorBase& output, const bool accumulate) { in put_kernel() 347 TensorIterator& iter, in take_kernel() [all …]
|
D | Distributions.cpp | 31 at::TensorIterator iter = at::TensorIteratorConfig() in _s_binomial_cuda() 52 at::TensorIterator iter = at::TensorIteratorConfig() in _s_dirichlet_cuda() 63 TensorIterator iter = at::TensorIteratorConfig() in _standard_gamma_grad_cuda() 74 TensorIterator iter = at::TensorIteratorConfig() in _dirichlet_grad_cuda()
|
/external/pytorch/aten/src/ATen/native/ |
D | IndexKernel.h | 8 struct TensorIterator; 19 using index_fill_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t se… 20 using index_copy_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t se… 21 using index_put_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArrayRef indexed_strid… 22 using put_fn = void(*)(TensorIterator & iter, const TensorBase& self, const bool accumulate); 23 using take_fn = void(*)(TensorIterator & iter, const TensorBase& input); 24 using flip_fn = void(*)(TensorIterator &, const bool); 25 using masked_fill_fn = void(*)(TensorIterator &, const Scalar& scalar); 26 using masked_select_fn = void(*)(TensorIterator &, int64_t orig_stride); 27 using masked_scatter_fn = void(*)(TensorIterator &, const TensorBase &);
|
D | ReduceOpsUtils.h | 112 using DimMask = TensorIterator::DimMask; 185 inline TensorIterator make_reduction( in make_reduction() 205 return TensorIterator::reduce_op(viewed_result, self); in make_reduction() 207 return TensorIterator::reduce_op(viewed_result, self.to(in_dtype)); in make_reduction() 210 inline C10_UNUSED TensorIterator make_reduction( in make_reduction() 225 inline TensorIterator make_reduction( in make_reduction() 257 return TensorIterator::reduce_op(viewed_result1, viewed_result2, self); in make_reduction() 259 return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1)); in make_reduction() 262 inline C10_UNUSED TensorIterator make_reduction( in make_reduction() 399 inline TensorIterator make_reduction( in make_reduction() [all …]
|
D | Activation.h | 12 struct TensorIterator; 47 using activation_fn = void (*)(TensorIterator&); 48 using activation_backward_fn = void (*)(TensorIterator&); 52 using hardtanh_backward_fn = void (*)(TensorIterator&, const c10::Scalar&, const c10::Scalar&); 55 using hardswish_fn = void(*)(TensorIterator&); 56 using hardswish_backward_fn = void(*)(TensorIterator&);
|
D | RangeFactories.h | 5 struct TensorIterator; 9 DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, const Scalar&), arange_stub… 10 DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, int64_t), linspace_stub);
|
D | ReduceOps.h | 12 struct TensorIterator; 18 using reduce_fn = void(*)(TensorIterator &); 32 void (*)(TensorIterator&, double correction, bool take_sqrt); 39 using reduce_fn_flag = void(*)(TensorIterator &, const c10::Scalar&);
|
D | PointwiseOps.h | 12 struct TensorIterator; 17 using pointwise_fn = void (*)(TensorIterator&, const Scalar& scalar); 19 using pointwise_fn_double = void (*)(TensorIterator&, const Scalar&, double);
|
D | TensorIteratorReduce.cpp | 54 auto first_reduce = TensorIterator::reduce_op(buffer_0, iter.input(0)); in two_pass_reduction() 70 auto final_reduce = TensorIterator::reduce_op(unsqueezed, buffer); in two_pass_reduction() 121 auto sub_iter = TensorIterator(iter); in parallel_dim_reduction() 150 TensorIterator reduced = *this; in foreach_reduced_elt() 164 TensorIterator sub_iter(*this); in foreach_reduced_elt()
|
D | FunctionOfAMatrixUtils.h | 7 struct TensorIterator; 12 TensorIterator& iter,
|
D | LinearAlgebra.h | 10 struct TensorIterator; 15 using addr_fn = void (*)(TensorIterator &, const Scalar& beta, const Scalar& alpha);
|
/external/pytorch/aten/src/ATen/native/quantized/ |
D | FakeQuantAffine.h | 9 struct TensorIterator; 33 TensorIterator& iter, 46 TensorIterator &iter, 51 TensorIterator &iter, 52 TensorIterator &iter_mask, 59 TensorIterator &iter,
|
/external/pytorch/aten/src/ATen/ |
D | TensorIterator.h | 246 struct TensorIterator; 378 std::unique_ptr<TensorIterator> split(int dim); 736 struct TORCH_API TensorIterator final : public TensorIteratorBase { 737 TensorIterator() : TensorIteratorBase() {} in TensorIterator() function 739 TensorIterator(const TensorIteratorBase& iter) : TensorIteratorBase(iter) {} in TensorIterator() function 744 static TensorIterator binary_float_op( 748 static TensorIterator binary_op( 752 static TensorIterator borrowing_binary_op( 757 static TensorIterator comparison_op( 761 static TensorIterator unary_op(TensorBase& out, const TensorBase& a); [all …]
|
D | TensorIterator.cpp | 1062 TensorIterator TensorIterator::binary_op(TensorBase& out, const TensorBase& a, const TensorBase& b)… in binary_op() 1063 TensorIterator iter; in binary_op() 1068 TensorIterator TensorIterator::borrowing_binary_op( in borrowing_binary_op() 1070 TensorIterator iter; in borrowing_binary_op() 1075 TensorIterator TensorIterator::binary_float_op(TensorBase& out, const TensorBase& a, const TensorBa… in binary_float_op() 1076 TensorIterator iter; in binary_float_op() 1081 TensorIterator TensorIterator::comparison_op(TensorBase& out, const TensorBase& a, in comparison_op() 1083 TensorIterator iter; in comparison_op() 1088 TensorIterator TensorIterator::unary_op(TensorBase& out, const TensorBase& a) { in unary_op() 1089 TensorIterator iter; in unary_op() [all …]
|
/external/pytorch/aten/src/ATen/native/cpu/ |
D | RangeFactoriesKernel.cpp | 21 static void arange_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_st… in arange_kernel() 29 TensorIterator it(iter); in arange_kernel() 45 static void linspace_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_… in linspace_kernel() 56 TensorIterator it(iter); in linspace_kernel()
|
D | IndexKernel.cpp | 56 TensorIterator& iter, in cpu_take_put_kernel() 107 TensorIterator& iter, in put_kernel() 145 TensorIterator& iter, in take_kernel() 156 void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool … in index_put_kernel() 195 TensorIterator& iter, in index_fill_kernel() 255 TensorIterator& iter, in index_copy_kernel() 318 void cpu_masked_fill_kernel(TensorIterator& iter, scalar_t value) { in cpu_masked_fill_kernel() 333 void masked_fill_kernel(TensorIterator& iter, const Scalar& value) { in masked_fill_kernel() 345 void cpu_masked_scatter_kernel(TensorIterator& iter, const TensorBase& source) { in cpu_masked_scatter_kernel() 368 void masked_scatter_kernel(TensorIterator& iter, const TensorBase& source) { in masked_scatter_kernel() [all …]
|
D | ReduceOpsKernel.cpp | 138 static void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) { in std_var_kernel_impl() 151 static void prod_kernel_impl(TensorIterator& iter) { in prod_kernel_impl() 194 void norm_kernel_cpu_impl(TensorIterator& iter, const double& val) { in norm_kernel_cpu_impl() 211 TensorIterator& iter, in norm_kernel_tensor_iterator_impl() 285 static void and_kernel_impl(TensorIterator& iter) { in and_kernel_impl() 323 static void or_kernel_impl(TensorIterator& iter) { in or_kernel_impl() 357 static void min_values_kernel_impl(TensorIterator& iter) { in min_values_kernel_impl() 378 static void max_values_kernel_impl(TensorIterator& iter) { in max_values_kernel_impl() 388 static void argmax_kernel_impl(TensorIterator &iter) { in argmax_kernel_impl() 412 static void argmin_kernel_impl(TensorIterator &iter) { in argmin_kernel_impl()
|
/external/pytorch/aten/src/ATen/native/quantized/cuda/ |
D | FakeQuantizeCore.cu | 93 TensorIterator& iter, in _fake_quantize_grad_learnable_tensor_kernel_cuda() 127 TensorIterator & iter, in _fake_quant_per_channel_cachemask_cuda_helper() 128 TensorIterator & iter_mask, in _fake_quant_per_channel_cachemask_cuda_helper() 184 TensorIterator &iter, TensorIterator &iter_mask, int64_t quant_min, int64_t quant_max) { in fake_quant_per_channel_cachemask_cuda() 190 void _fake_quantize_grad_learnable_channel_kernel_cuda(TensorIterator &iter, int64_t quant_min, int… in _fake_quantize_grad_learnable_channel_kernel_cuda()
|
/external/pytorch/aten/src/ATen/native/sparse/ |
D | SparseFactories.h | 5 struct TensorIterator; 11 void (*)(TensorIterator&, const TensorBase&, TensorBase&, TensorBase&);
|