• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define TORCH_ASSERT_NO_OPERATORS
2 
3 #include <ATen/Dispatch.h>
4 #include <ATen/native/cuda/JitLoops.cuh>
5 #include <ATen/native/cuda/Loops.cuh>
6 #include <ATen/native/BinaryOps.h>
7 #include <ATen/native/Math.h>
8 #include <ATen/native/cuda/Math.cuh>
9 #include <ATen/native/cuda/jit_utils.h>
10 
11 namespace at::native {
12         namespace {
13             CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward";
14 
chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase & iterator)15             void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {
16 #if AT_USE_JITERATOR()
17                 AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() {
18                     opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string);
19                 });
20 #else
21                 AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() {
22                     gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
23                         return chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
24                     });
25                 });
26 #endif
27             } // chebyshev_polynomial_t_kernel_cuda
28         } // namespace (anonymous)
29 
30         REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda);
31 } // namespace at::native
32