Home
last modified time | relevance | path

Searched refs:TensorIteratorBase (Results 1 – 25 of 155) sorted by relevance

1234567

/external/pytorch/aten/src/ATen/native/
DActivation.h13 struct TensorIteratorBase;
44 using structured_activation_fn = void (*)(TensorIteratorBase&);
45 using structured_activation_backward_fn = void (*)(TensorIteratorBase&);
49 using softplus_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
50 using softplus_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
51 using threshold_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
53 using hardsigmoid_fn = void(*)(TensorIteratorBase&);
54 using hardsigmoid_backward_fn = void(*)(TensorIteratorBase&);
57 using shrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
58 using softshrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
[all …]
DUnaryOps.h11 struct TensorIteratorBase;
16 using unary_fn = void(*)(TensorIteratorBase&);
17 using unary_fn_with_scalar = void(*)(TensorIteratorBase&, const Scalar& a);
20 void conj_kernel(TensorIteratorBase &iter);
21 void neg_kernel(TensorIteratorBase &iter);
22 void reciprocal_kernel(TensorIteratorBase &iter);
23 void rsqrt_kernel(TensorIteratorBase& iter);
24 void sqrt_kernel(TensorIteratorBase& iter);
98 DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, std::optional<Generator>)…
99 DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, std::optional<Generator>), exponential_…
[all …]
DTensorIteratorReduce.cpp18 using loop2d_t = TensorIteratorBase::loop2d_t;
20 static bool use_two_pass_reduction(TensorIteratorBase& iter);
21 static void two_pass_reduction(TensorIteratorBase& iter, loop2d_t loop);
22 static void parallel_dim_reduction(TensorIteratorBase& iter, loop2d_t loop);
24 void TensorIteratorBase::parallel_reduce(loop2d_t loop) { in parallel_reduce()
37 static bool use_two_pass_reduction(TensorIteratorBase& iter) { in use_two_pass_reduction()
41 static void two_pass_reduction(TensorIteratorBase& iter, loop2d_t loop) { in two_pass_reduction()
76 static int find_split_dim(TensorIteratorBase& iter) { in find_split_dim()
95 round_columns(TensorIteratorBase& iter, int dim, int multiple, int64_t begin, int64_t end) { in round_columns()
104 static void parallel_dim_reduction(TensorIteratorBase& iter, loop2d_t loop) { in parallel_dim_reduction()
[all …]
DTensorCompare.h12 struct TensorIteratorBase;
28 using is_infinity_op_fn = void (*)(TensorIteratorBase &);
35 using clamp_tensor_fn = void (*)(TensorIteratorBase &);
42 DECLARE_DISPATCH(void (*)(TensorIteratorBase &, const c10::Scalar&, const c10::Scalar&), clamp_scal…
43 DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_min_scalar_stub);
44 DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_max_scalar_stub);
DBinaryOps.h11 struct TensorIteratorBase;
45 using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
46 using structured_binary_fn_double = void(*)(TensorIteratorBase&, double);
47 using structured_binary_fn = void(*)(TensorIteratorBase&);
49 using binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
/external/pytorch/aten/src/ATen/native/cpu/
DUnaryOpsKernel.cpp36 static void sigmoid_kernel(TensorIteratorBase& iter) { in sigmoid_kernel()
97 void LogitMKLKernel(T eps, TensorIteratorBase* it) { in LogitMKLKernel()
134 void LogitMKLKernel(T eps, TensorIteratorBase* it) { in LogitMKLKernel()
140 static void logit_kernel(TensorIteratorBase& iter, const Scalar& eps_scalar) { in logit_kernel()
193 static void abs_kernel(TensorIteratorBase& iter) { in abs_kernel()
209 static void angle_kernel(TensorIteratorBase& iter) { in angle_kernel()
219 void conj_kernel(TensorIteratorBase& iter) { in conj_kernel()
234 static void bitwise_not_kernel(TensorIteratorBase& iter) { in bitwise_not_kernel()
257 static void frac_kernel(TensorIteratorBase& iter) { in frac_kernel()
266 static void logical_not_kernel(TensorIteratorBase& iter) { in logical_not_kernel()
[all …]
DBinaryOpsKernel.cpp69 void atan2_kernel(TensorIteratorBase& iter) { in atan2_kernel()
118 void mul_kernel(TensorIteratorBase& iter) { in mul_kernel()
160 void div_true_kernel(TensorIteratorBase& iter) { in div_true_kernel()
196 void div_trunc_kernel(TensorIteratorBase& iter) { in div_trunc_kernel()
269 void div_floor_kernel(TensorIteratorBase& iter) { in div_floor_kernel()
322 void remainder_kernel(TensorIteratorBase& iter) { in remainder_kernel()
381 void bitwise_and_kernel(TensorIteratorBase& iter) { in bitwise_and_kernel()
394 void bitwise_or_kernel(TensorIteratorBase& iter) { in bitwise_or_kernel()
407 void bitwise_xor_kernel(TensorIteratorBase& iter) { in bitwise_xor_kernel()
422 void lshift_kernel(TensorIteratorBase& iter) { in lshift_kernel()
[all …]
DDistributionKernels.cpp29 static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, std::optional<Gene… in cauchy_kernel()
102 static void exponential_kernel_default(TensorIteratorBase& iter, double lambda, std::optional<Gener… in exponential_kernel_default()
108 void exponential_kernel(TensorIteratorBase& iter, double lambda, std::optional<Generator> gen) { in exponential_kernel()
112 void exponential_kernel(TensorIteratorBase &iter, double lambda, std::optional<Generator> gen) { in exponential_kernel()
198 static void geometric_kernel(TensorIteratorBase& iter, double p, std::optional<Generator> gen) { in geometric_kernel()
203 static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, std::optional<Gene… in log_normal_kernel()
208 void uniform_kernel(TensorIteratorBase& iter, double from, double to, std::optional<Generator> gen)… in uniform_kernel()
218 static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, std::opti… in random_from_to_kernel()
223 static void random_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) { in random_kernel()
231 static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, std::optional<Generator> gen… in random_full_64_bits_range_kernel()
DDistributionTemplates.h26 void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG generator) { in random_from_to_kernel()
40 void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG generator) { in random_full_64_bits_range_kernel()
59 …void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional<Generator> g… in operator()
62 void operator()(TensorIteratorBase& iter, std::optional<Generator> gen) { in operator()
68 void random_kernel(TensorIteratorBase& iter, RNG generator) { in random_kernel()
80 void operator()(TensorIteratorBase& iter, std::optional<Generator> gen) { in operator()
267 void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG generator) { in uniform_kernel()
281 void operator()(TensorIteratorBase& iter, double from, double to, std::optional<Generator> gen) { in operator()
289 void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, RNG generator) { in cauchy_kernel()
301 …void operator()(TensorIteratorBase& iter, double median, double sigma, std::optional<Generator> ge… in operator()
[all …]
/external/pytorch/aten/src/ATen/
DTensorIterator.cpp33 using DimMask = TensorIteratorBase::DimMask;
34 using PtrVector = TensorIteratorBase::PtrVector;
35 using loop2d_t = TensorIteratorBase::loop2d_t;
36 using StrideVector = TensorIteratorBase::StrideVector;
231 void TensorIteratorBase::reorder_dimensions() { in reorder_dimensions()
311 ScalarType TensorIteratorBase::compute_common_dtype() { in compute_common_dtype()
346 void TensorIteratorBase::compute_types(const TensorIteratorConfig& config) { in compute_types()
551 StrideVector TensorIteratorBase::compatible_stride(int64_t element_size) const { in compatible_stride()
561 DimVector TensorIteratorBase::invert_perm(IntArrayRef input) const { in invert_perm()
573 void TensorIteratorBase::allocate_or_resize_outputs() { in allocate_or_resize_outputs()
[all …]
/external/pytorch/aten/src/ATen/native/cuda/
DActivation.h6 struct TensorIteratorBase;
12 void launch_glu_backward_kernel(const TensorIteratorBase& iter,
15 void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter);
17 void GeluCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
18 void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
DDistributions.h5 struct TensorIteratorBase;
17 TensorIteratorBase &iter, CUDAGeneratorImpl *gen);
19 void launch_dirichlet_kernel(TensorIteratorBase &iter);
21 void launch_standard_gamma_grad_kernel(TensorIteratorBase &iter);
23 void launch_dirichlet_grad_kernel(TensorIteratorBase &iter);
DCompareKernels.cu47 void compare_scalar_kernel(TensorIteratorBase &iter, OpType op, scalar_t rhs) { in compare_scalar_kernel()
55 void compare_kernel_impl(TensorIteratorBase &iter, OpType op) { in compare_kernel_impl()
75 C10_NOINLINE void compare_kernel_with_scalars(TensorIteratorBase &iter, OpType op) { in compare_kernel_with_scalars()
82 void ge_kernel_cuda(TensorIteratorBase& iter) { in ge_kernel_cuda()
86 void gt_kernel_cuda(TensorIteratorBase& iter) { in gt_kernel_cuda()
90 void le_kernel_cuda(TensorIteratorBase& iter) { in le_kernel_cuda()
94 void lt_kernel_cuda(TensorIteratorBase& iter) { in lt_kernel_cuda()
DPowKernel.cu15 void rsqrt_kernel_cuda(TensorIteratorBase& iter);
16 void sqrt_kernel_cuda(TensorIteratorBase& iter);
17 void reciprocal_kernel_cuda(TensorIteratorBase& iter);
21 void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar);
24 void pow_scalar_tensor_impl(TensorIteratorBase& iter, scalar_t base) { in pow_scalar_tensor_impl()
31 void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<value_t> base) { in pow_scalar_tensor_impl()
43 void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<at::Half> base) { in pow_scalar_tensor_impl()
79 void pow_chalf_tensor_scalar_impl(TensorIteratorBase& iter, const Scalar& exp_scalar) { in pow_chalf_tensor_scalar_impl()
99 void pow_tensor_tensor_kernel(TensorIteratorBase& iter) { in pow_tensor_tensor_kernel()
146 void pow_tensor_scalar_kernel_impl(TensorIteratorBase& iter, in pow_tensor_scalar_kernel_impl()
[all …]
DUnarySpecialOpsKernel.cu23 void exp2_kernel_cuda(TensorIteratorBase& iter) { in exp2_kernel_cuda()
45 void i0_kernel_cuda(TensorIteratorBase& iter) { in i0_kernel_cuda()
67 void i0e_kernel_cuda(TensorIteratorBase& iter) { in i0e_kernel_cuda()
88 void i1_kernel_cuda(TensorIteratorBase& iter) { in i1_kernel_cuda()
106 void i1e_kernel_cuda(TensorIteratorBase& iter) { in i1e_kernel_cuda()
124 void sigmoid_kernel_cuda(TensorIteratorBase& iter) { in sigmoid_kernel_cuda()
163 void sinc_kernel_cuda(TensorIteratorBase& iter) { in sinc_kernel_cuda()
193 void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) { in logit_kernel_cuda()
221 void ndtri_kernel_cuda(TensorIteratorBase& iter) { in ndtri_kernel_cuda()
238 void log_ndtr_kernel_cuda(TensorIteratorBase& iter) { in log_ndtr_kernel_cuda()
[all …]
DUnaryOpsKernel.cu23 void bitwise_not_kernel_cuda(TensorIteratorBase& iter) { in bitwise_not_kernel_cuda()
38 void exp_kernel_cuda(TensorIteratorBase& iter) { in exp_kernel_cuda()
71 void expm1_kernel_cuda(TensorIteratorBase& iter) { in expm1_kernel_cuda()
96 void rsqrt_kernel_cuda(TensorIteratorBase& iter) { in rsqrt_kernel_cuda()
135 void sqrt_kernel_cuda(TensorIteratorBase& iter) { in sqrt_kernel_cuda()
168 void clamp_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value, const Scalar& max_value) { in clamp_kernel_cuda()
183 void clamp_min_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value) { in clamp_min_kernel_cuda()
197 void clamp_max_kernel_cuda(TensorIteratorBase& iter, const Scalar& max_value) { in clamp_max_kernel_cuda()
223 TensorIteratorBase& iter, in nan_to_num_kernel_cuda()
264 void frexp_kernel_cuda(TensorIteratorBase& iter) { in frexp_kernel_cuda()
DTensorCompare.cu24 void isposinf_kernel_impl(TensorIteratorBase &iter) { in isposinf_kernel_impl()
33 void isneginf_kernel_impl(TensorIteratorBase &iter) { in isneginf_kernel_impl()
42 void clamp_kernel_impl(TensorIteratorBase& iter) { in clamp_kernel_impl()
59 void inline launch_clamp_scalar(TensorIteratorBase& iter, Scalar lim0, Scalar lim1, at::native::det… in launch_clamp_scalar()
81 void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min, const Scalar& max) { in clamp_scalar_kernel_impl()
85 void clamp_min_scalar_kernel_impl(TensorIteratorBase& iter, Scalar min) { in clamp_min_scalar_kernel_impl()
89 void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max) { in clamp_max_scalar_kernel_impl()
DDistributionTemplates.h115 void distribution_nullary_kernel(at::TensorIteratorBase& iter, in distribution_nullary_kernel()
235 void distribution_binary_kernel(TensorIteratorBase &iter, PhiloxCudaState philox_args, const func_t… in distribution_binary_kernel()
287 void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG gen) { in random_from_to_kernel()
327 void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG gen) { in random_full_64_bits_range_kernel()
354 …void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional<Generator> g… in operator()
357 void operator()(TensorIteratorBase& iter, std::optional<Generator> gen) { in operator()
363 void random_kernel(TensorIteratorBase& iter, RNG gen) { in random_kernel()
394 void operator()(TensorIteratorBase& iter, RNG gen) { in operator()
402 void uniform_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) { in uniform_and_transform()
417 void normal_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) { in normal_and_transform()
[all …]
DUnarySignKernels.cu17 void logical_not_kernel_cuda(TensorIteratorBase& iter) { in logical_not_kernel_cuda()
29 void neg_kernel_cuda(TensorIteratorBase& iter) { in neg_kernel_cuda()
62 void sign_kernel_cuda(TensorIteratorBase& iter){ in sign_kernel_cuda()
76 void signbit_kernel_cuda(TensorIteratorBase& iter){ in signbit_kernel_cuda()
100 void sgn_kernel_cuda(TensorIteratorBase& iter){ in sgn_kernel_cuda()
DUnaryFractionKernels.cu24 void ceil_kernel_cuda(TensorIteratorBase& iter) { in ceil_kernel_cuda()
35 void frac_kernel_cuda(TensorIteratorBase& iter) { in frac_kernel_cuda()
57 void floor_kernel_cuda(TensorIteratorBase& iter) { in floor_kernel_cuda()
99 void reciprocal_kernel_cuda(TensorIteratorBase& iter) { in reciprocal_kernel_cuda()
131 void round_kernel_cuda(TensorIteratorBase& iter) { in round_kernel_cuda()
143 void round_decimals_kernel_cuda(TensorIteratorBase& iter, int64_t decimals) { in round_decimals_kernel_cuda()
180 void trunc_kernel_cuda(TensorIteratorBase& iter) { in trunc_kernel_cuda()
DBinaryMiscOpsKernels.cu15 void smooth_l1_kernel_cuda(TensorIteratorBase& iter, double beta) { in smooth_l1_kernel_cuda()
35 void mse_kernel_cuda(TensorIteratorBase& iter) { in mse_kernel_cuda()
44 void xlogy_kernel_cuda(TensorIteratorBase& iter) { in xlogy_kernel_cuda()
58 void xlog1py_kernel_cuda(TensorIteratorBase& iter) { in xlog1py_kernel_cuda()
DUnaryGammaKernels.cu19 void digamma_kernel_cuda(TensorIteratorBase& iter) { in digamma_kernel_cuda()
44 void trigamma_kernel_cuda(TensorIteratorBase& iter) { in trigamma_kernel_cuda()
68 void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) { in polygamma_kernel_cuda()
105 void lgamma_kernel_cuda(TensorIteratorBase& iter) { in lgamma_kernel_cuda()
DLoops.cuh18 static OffsetCalculator<N> make_input_offset_calculator(const TensorIteratorBase& iter) { in make_input_offset_calculator()
32 static OffsetCalculator<num_outputs> make_output_offset_calculator(const TensorIteratorBase& iter) { in make_output_offset_calculator()
76 void gpu_kernel_nocast(TensorIteratorBase& iter, const func_t& f) { in gpu_kernel_nocast()
99 void gpu_kernel(TensorIteratorBase& iter, const func_t& f) { in gpu_kernel()
167 void opmath_gpu_kernel_with_scalars(TensorIteratorBase& iter, const func_t& f) { in opmath_gpu_kernel_with_scalars()
197 void opmath_symmetric_gpu_kernel_with_scalars(TensorIteratorBase& iter, const func_t& f) { in opmath_symmetric_gpu_kernel_with_scalars()
239 void gpu_kernel_with_scalars(TensorIteratorBase& iter, const func_t& f) { in gpu_kernel_with_scalars()
274 void gpu_kernel_multiple_outputs_impl(TensorIteratorBase& iter, const func_t& f) { in gpu_kernel_multiple_outputs_impl()
305 void gpu_kernel_multiple_outputs(TensorIteratorBase& iter, const func_t& f) { in gpu_kernel_multiple_outputs()
DCompareEQKernel.cu32 C10_NOINLINE void compare_eq_ne_kernel(TensorIteratorBase &iter, EqOpType op) { in compare_eq_ne_kernel()
39 void eq_kernel_cuda(TensorIteratorBase& iter) { in eq_kernel_cuda()
43 void ne_kernel_cuda(TensorIteratorBase& iter) { in ne_kernel_cuda()
/external/pytorch/aten/src/ATen/native/sparse/
DValidateCompressedIndicesKernel.cpp14 static void launch(TensorIteratorBase& iter, const func_t& f) { in launch()
21 static void launch(TensorIteratorBase& iter, const func_t& f) { in launch()
27 static void launch(TensorIteratorBase& iter, const func_t& f, const vec_func_t& vec_f) { in launch()

1234567