• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/core/Tensor.h>
3 #include <ATen/Dispatch.h>
4 #include <ATen/TensorIterator.h>
5 #include <ATen/native/cuda/Loops.cuh>
6 
7 #ifndef AT_PER_OPERATOR_HEADERS
8 #include <ATen/Functions.h>
9 #include <ATen/NativeFunctions.h>
10 #else
11 #include <ATen/ops/empty.h>
12 #include <ATen/ops/int_repr_native.h>
13 #endif
14 
15 namespace at {
16 namespace native {
17 
int_repr_quantized_cuda(const Tensor & self)18 Tensor int_repr_quantized_cuda(const Tensor& self) {
19   Tensor dst;
20   AT_DISPATCH_QINT_TYPES(self.scalar_type(), "int_repr_quantized_cuda", [&]() {
21     dst = at::empty(
22         self.sizes(),
23         self.options().dtype(UNDERLYING_TYPE),
24         self.suggest_memory_format());
25     auto iter = TensorIteratorConfig()
26       .check_all_same_dtype(false)
27       .add_output(dst)
28       .add_input(self)
29       .build();
30     gpu_kernel(iter, [] GPU_LAMBDA(scalar_t value) -> underlying_t {
31       return value.val_;
32     });
33   });
34   return dst;
35 }
36 
37 } // namespace native
38 } // namespace at
39