#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include #include #include #include #include #ifndef AT_PER_OPERATOR_HEADERS #include #include #include #else #include #include #include #include #include #include #endif namespace at::native { // Exposes at::has_internal_overlap as an operator for testing purposes int64_t _debug_has_internal_overlap(const Tensor& self) { return static_cast(at::has_internal_overlap(self)); } bool is_pinned(const Tensor& self, std::optional device) { std::optional opt_device_type; if (device.has_value()) { TORCH_WARN_DEPRECATION( "The argument 'device' of Tensor.is_pinned() ", "is deprecated. Please do not pass this argument.") opt_device_type = device.value().type(); } // Only CPU tensors can be pinned if (!self.is_cpu()) { return false; } // Use getAcceleratorHooksInterface to make is_pinned device-agnostic return at::globalContext().isPinnedPtr(self.storage().data(), opt_device_type); } Tensor pin_memory(const Tensor& self, std::optional device) { if (device.has_value()) { TORCH_WARN_DEPRECATION( "The argument 'device' of Tensor.pin_memory() ", "is deprecated. Please do not pass this argument.") } // Kind of mad that I have to do two dynamic dispatches here, pretty // annoying if (self.is_pinned(device)) { return self; } return at::_pin_memory(self, device); } Tensor _pin_memory(const Tensor& self, std::optional device) { TORCH_CHECK(self.device().is_cpu(), "cannot pin '", self.toString(), "' only dense CPU tensors can be pinned"); // Use getAcceleratorHooksInterface to make pin_memory device-agnostic auto* allocator = device.has_value()? at::globalContext().getPinnedMemoryAllocator(device.value().type()): at::globalContext().getPinnedMemoryAllocator(); auto storage = Storage( Storage::use_byte_size_t(), detail::computeStorageNbytes( self.sizes(), self.strides(), self.dtype().itemsize()), allocator, /*resizable=*/false); auto tensor = at::cpu::empty({0}, self.options()).set_(storage, 0, self.sizes(), self.strides()); tensor.copy_(self); return tensor; } } // namespace at::native