/external/pytorch/test/inductor/ |
D | test_torchinductor_codegen_dynamic_shapes.py | 95 ("cpu", "cuda", "xpu"), is_skip=True 98 ("cpu", "cuda", "xpu"), is_skip=True 100 "test_to_device_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu"), is_skip=True), 136 "test_complex_fallback_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu")), 137 "test_adaptive_avg_pool2d2_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu")), 138 "test_adaptive_max_pool2d2_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu")), 139 "test_fractional_max_pool2d2_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu")), 140 "test_argmax_to_float_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu")), 141 "test_avg_pool2d7_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu")), 142 "test_avg_pool2d_backward4_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu")), [all …]
|
/external/pytorch/test/ |
D | test_xpu.py | 9 import torch.xpu._gpu_trace as gpu_trace 28 print("XPU not available, skipping tests", file=sys.stderr) 31 TEST_MULTIXPU = torch.xpu.device_count() > 1 34 xpu_device = torch.device("xpu") 72 current_device = torch.xpu.current_device() 73 torch.xpu.set_device(current_device) 74 self.assertEqual(current_device, torch.xpu.current_device()) 78 current_device = torch.xpu.current_device() 79 target_device = (current_device + 1) % torch.xpu.device_count() 81 with torch.xpu.device(target_device): [all …]
|
/external/pytorch/aten/src/ATen/test/ |
D | xpu_device_test.cpp | 3 #include <ATen/xpu/XPUContext.h> 4 #include <ATen/xpu/XPUDevice.h> 8 EXPECT_EQ(at::xpu::is_available(), torch::xpu::is_available()); in TEST() 9 if (!at::xpu::is_available()) { in TEST() 13 c10::xpu::DeviceProp* cur_device_prop = at::xpu::getCurrentDeviceProperties(); in TEST() 14 c10::xpu::DeviceProp* device_prop = at::xpu::getDeviceProperties(0); in TEST() 22 if (!at::xpu::is_available()) { in TEST() 26 sycl::device& raw_device = at::xpu::get_raw_device(0); in TEST() 27 void* ptr = sycl::malloc_device(8, raw_device, at::xpu::get_device_context()); in TEST() 29 at::Device device = at::xpu::getDeviceFromPtr(ptr); in TEST() [all …]
|
D | xpu_caching_host_allocator_test.cpp | 5 #include <ATen/xpu/CachingHostAllocator.h> 6 #include <ATen/xpu/XPUContext.h> 7 #include <ATen/xpu/XPUEvent.h> 9 #include <c10/xpu/XPUStream.h> 14 if (!at::xpu::is_available()) { in TEST() 21 // TODO: Uncomment this line when op `pin_memory` is supported on XPU. in TEST() 23 ASSERT_TRUE(at::xpu::CachingHostAllocator_recordEvent( in TEST() 26 at::xpu::getCurrentXPUStream())); in TEST() 38 ASSERT_TRUE(at::xpu::CachingHostAllocator_recordEvent( in TEST() 41 at::xpu::getCurrentXPUStream())); in TEST() [all …]
|
D | xpu_event_test.cpp | 3 #include <ATen/xpu/XPUEvent.h> 5 #include <c10/xpu/test/impl/XPUTest.h> 8 if (!at::xpu::is_available()) { in TEST() 11 auto stream = c10::xpu::getStreamFromPool(); in TEST() 12 at::xpu::XPUEvent event; in TEST() 20 auto wait_stream0 = c10::xpu::getStreamFromPool(); in TEST() 21 auto wait_stream1 = c10::xpu::getStreamFromPool(); in TEST() 31 if (at::xpu::device_count() <= 1) { in TEST() 35 const auto stream0 = at::xpu::getStreamFromPool(); in TEST() 36 at::xpu::XPUEvent event0; in TEST() [all …]
|
D | xpu_generator_test.cpp | 4 #include <ATen/xpu/XPUContext.h> 5 #include <ATen/xpu/XPUGeneratorImpl.h> 12 if (!at::xpu::is_available()) { in TEST() 15 auto foo = at::xpu::detail::createXPUGenerator(); in TEST() 21 if (!at::xpu::is_available()) { in TEST() 24 auto foo = at::xpu::detail::getDefaultXPUGenerator(); in TEST() 25 auto bar = at::xpu::detail::getDefaultXPUGenerator(); in TEST() 32 if (c10::xpu::device_count() >= 2) { in TEST() 33 foo = at::xpu::detail::getDefaultXPUGenerator(0); in TEST() 34 bar = at::xpu::detail::getDefaultXPUGenerator(0); in TEST() [all …]
|
/external/pytorch/c10/xpu/test/impl/ |
D | XPUStreamTest.cpp | 4 #include <c10/xpu/XPUStream.h> 5 #include <c10/xpu/test/impl/XPUTest.h> 12 return c10::xpu::device_count() > 0; in has_xpu() 22 c10::xpu::XPUStream copyStream = c10::xpu::getStreamFromPool(); in TEST() 24 auto s = c10::xpu::getStreamFromPool(); in TEST() 38 c10::xpu::XPUStream moveStream = c10::xpu::getStreamFromPool(); in TEST() 40 auto s = c10::xpu::getStreamFromPool(); in TEST() 59 c10::xpu::XPUStream stream = c10::xpu::getStreamFromPool(); in TEST() 61 c10::xpu::setCurrentXPUStream(stream); in TEST() 62 c10::xpu::XPUStream cur_stream = c10::xpu::getCurrentXPUStream(); in TEST() [all …]
|
D | XPUDeviceTest.cpp | 3 #include <c10/xpu/XPUFunctions.h> 6 return c10::xpu::device_count() > 0; in has_xpu() 14 c10::xpu::set_device(0); in TEST() 15 EXPECT_EQ(c10::xpu::current_device(), 0); in TEST() 17 if (c10::xpu::device_count() <= 1) { in TEST() 21 c10::xpu::set_device(1); in TEST() 22 EXPECT_EQ(c10::xpu::current_device(), 1); in TEST() 23 EXPECT_EQ(c10::xpu::exchange_device(0), 1); in TEST() 24 EXPECT_EQ(c10::xpu::current_device(), 0); in TEST() 32 c10::xpu::DeviceProp device_prop{}; in TEST() [all …]
|
D | XPUCachingAllocatorTest.cpp | 4 #include <c10/xpu/XPUCachingAllocator.h> 7 return c10::xpu::device_count() > 0; in has_xpu() 11 auto* allocator = c10::xpu::XPUCachingAllocator::get(); in TEST() 22 c10::xpu::XPUCachingAllocator::emptyCache(); in TEST() 23 auto* allocator = c10::xpu::XPUCachingAllocator::get(); in TEST() 34 512, _10mb, c10::xpu::get_raw_device(0), c10::xpu::get_device_context()); in TEST() 35 void* ptr1 = c10::xpu::XPUCachingAllocator::raw_alloc(_10mb); in TEST() 44 c10::xpu::XPUCachingAllocator::raw_delete(ptr1); in TEST() 45 sycl::free(tmp, c10::xpu::get_device_context()); in TEST() 46 c10::xpu::XPUCachingAllocator::emptyCache(); in TEST() [all …]
|
D | XPUGuardTest.cpp | 5 #include <c10/xpu/XPUStream.h> 6 #include <c10/xpu/test/impl/XPUTest.h> 9 return c10::xpu::device_count() > 0; in has_xpu() 20 EXPECT_EQ(c10::xpu::current_device(), 0); in TEST() 23 std::vector<c10::xpu::XPUStream> streams0 = { in TEST() 24 c10::xpu::getStreamFromPool(), c10::xpu::getStreamFromPool(true)}; in TEST() 27 c10::xpu::setCurrentXPUStream(streams0[0]); in TEST() 28 EXPECT_EQ(c10::xpu::getCurrentXPUStream(), streams0[0]); in TEST() 30 if (c10::xpu::device_count() <= 1) { in TEST() 34 // Test DeviceGuard for XPU. in TEST() [all …]
|
/external/pytorch/aten/src/ATen/xpu/detail/ |
D | XPUHooks.cpp | 1 #include <ATen/xpu/PinnedMemoryAllocator.h> 2 #include <ATen/xpu/XPUContext.h> 3 #include <ATen/xpu/XPUDevice.h> 4 #include <ATen/xpu/XPUGeneratorImpl.h> 5 #include <ATen/xpu/detail/XPUHooks.h> 8 #include <c10/xpu/XPUCachingAllocator.h> 10 namespace at::xpu::detail { namespace 13 C10_LOG_API_USAGE_ONCE("aten.init.xpu"); in initXPU() 14 const auto device_count = c10::xpu::device_count_ensure_non_zero(); in initXPU() 15 c10::xpu::XPUCachingAllocator::init(device_count); in initXPU() [all …]
|
/external/pytorch/torch/xpu/ |
D | __init__.py | 3 This package introduces support for the XPU backend, specifically tailored for 7 :func:`is_available()` to determine if your system supports XPU. 36 r"""Return true if compile with XPU support.""" 45 # Define dummy if PyTorch was compiled without XPU 49 raise NotImplementedError("PyTorch was compiled without XPU support") 52 raise NotImplementedError("PyTorch was compiled without XPU support") 57 r"""Return the number of XPU device available.""" 64 r"""Return a bool indicating if XPU is currently available.""" 70 r"""Return a bool indicating if the current XPU device supports dtype bfloat16.""" 75 r"""Return whether PyTorch's XPU state has been initialized.""" [all …]
|
D | random.py | 10 def get_rng_state(device: Union[int, str, torch.device] = "xpu") -> Tensor: 15 Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device). 18 This function eagerly initializes XPU. 24 device = torch.device("xpu", device) 28 default_generator = torch.xpu.default_generators[idx] 41 new_state: Tensor, device: Union[int, str, torch.device] = "xpu" 48 Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device). 55 device = torch.device("xpu", device) 61 default_generator = torch.xpu.default_generators[idx] 80 It's safe to call this function if XPU is not available; in that case, it is silently ignored. [all …]
|
D | memory.py | 15 allocator so that those can be used in other XPU application. 18 :func:`~torch.xpu.empty_cache` doesn't increase the amount of XPU 20 of XPU memory in certain cases. 27 r"""Reset the "peak" stats tracked by the XPU memory allocator. 29 See :func:`~torch.xpu.memory_stats` for details. Peak stats correspond to the 34 statistic for the current device, given by :func:`~torch.xpu.current_device`, 42 r"""Reset the "accumulated" (historical) stats tracked by the XPU memory allocator. 44 See :func:`~torch.xpu.memory_stats` for details. Accumulated stats correspond to 49 statistic for the current device, given by :func:`~torch.xpu.current_device`, 57 r"""Return the result of :func:`~torch.xpu.memory_stats` as a nested dictionary.""" [all …]
|
D | streams.py | 17 r"""Wrapper around a XPU stream. 19 A XPU stream is a linear sequence of execution that belongs to a specific 36 with torch.xpu.device(device): 43 event (torch.xpu.Event): an event to wait for. 62 event (torch.xpu.Event, optional): event to record. If not given, a new one 98 return f"torch.xpu.Stream(device={self.device} sycl_queue={self.sycl_queue:#x})" 102 r"""Wrapper around a XPU event. 104 XPU events are synchronization markers that can be used to monitor the 105 device's progress, and to synchronize XPU streams. 107 The underlying XPU events are lazily initialized when the event is first [all …]
|
D | _utils.py | 16 is a XPU device. Note that for a XPU device without a specified index, 17 i.e., ``torch.device('xpu')``, this will return the current default XPU 23 If :attr:`device` is ``None``, this will return the current default XPU 32 if device.type not in ["xpu", "cpu"]: 33 raise ValueError(f"Expected a xpu or cpu device, but got: {device}") 34 elif device.type != "xpu": 35 raise ValueError(f"Expected a xpu device, but got: {device}") 37 if isinstance(device, torch.xpu.device):
|
D | _gpu_trace.py | 7 "XPU event creation" 10 "XPU event deletion" 13 "XPU event record" 16 "XPU event wait" 19 "XPU memory allocation" 22 "XPU memory deallocation" 25 "XPU stream creation" 28 "XPU device synchronization" 31 "XPU stream synchronization" 34 "XPU event synchronization"
|
/external/pytorch/.github/workflows/ |
D | xpu.yml | 1 name: xpu 6 - ciflow/xpu/* 24 linux-jammy-xpu-py3_9-build: 25 name: linux-jammy-xpu-py3.9 30 build-environment: linux-jammy-xpu-py3.9 31 docker-image-name: pytorch-linux-jammy-xpu-2024.0-py3 35 { config: "default", shard: 1, num_shards: 4, runner: "linux.idc.xpu" }, 36 { config: "default", shard: 2, num_shards: 4, runner: "linux.idc.xpu" }, 37 { config: "default", shard: 3, num_shards: 4, runner: "linux.idc.xpu" }, 38 { config: "default", shard: 4, num_shards: 4, runner: "linux.idc.xpu" }, [all …]
|
/external/pytorch/torch/_dynamo/ |
D | device_interface.py | 222 if torch.xpu._is_compiled(): 229 device = torch.xpu.device 230 Event = torch.xpu.Event 231 Stream = torch.xpu.Stream 236 caching_worker_current_devices["xpu"] = device 240 if "xpu" in caching_worker_current_devices: 241 return caching_worker_current_devices["xpu"] 242 return torch.xpu.current_device() 249 assert device.type == "xpu" 255 if "xpu" not in caching_worker_device_properties: [all …]
|
/external/pytorch/torch/csrc/xpu/ |
D | Module.cpp | 2 #include <ATen/xpu/XPUContext.h> 3 #include <ATen/xpu/XPUGeneratorImpl.h> 5 #include <c10/xpu/XPUCachingAllocator.h> 6 #include <c10/xpu/XPUFunctions.h> 20 static bool in_bad_fork = false; // True for children forked after xpu init 23 // Called in the forked child if xpu has already been initialized 30 // Should be called before the first xpu call. It is mainly called in lazy_init. 31 // Note: This is distinct from initExtension because a stub xpu implementation 40 // XPU management methods 53 c10::xpu::set_device(device_index); in THXPModule_setDevice_wrap() [all …]
|
/external/pytorch/c10/xpu/ |
D | XPUStream.h | 5 #include <c10/xpu/XPUFunctions.h> 7 namespace c10::xpu { 34 * a SYCL queue, which allows asynchronous execution of XPU tasks. 41 /// will raise an error if the Stream is not, in fact, a XPU stream. 43 TORCH_CHECK(stream_.device_type() == DeviceType::XPU); in XPUStream() 63 /// XPU stream). 68 /// Get the XPU device type that this stream is associated with. 70 return DeviceType::XPU; in device_type() 73 /// Get the XPU device index that this stream is associated with. 79 /// guaranteed to be a XPU device. [all …]
|
/external/pytorch/aten/src/ATen/xpu/ |
D | XPUContext.h | 4 #include <c10/xpu/XPUFunctions.h> 5 #include <c10/xpu/XPUStream.h> 7 namespace at::xpu { 9 // XPU is available if we compiled with XPU. 11 return c10::xpu::device_count() > 0; in is_available() 20 } // namespace at::xpu
|
D | XPUContext.cpp | 1 #include <ATen/xpu/XPUContext.h> 8 namespace at::xpu { namespace 27 num_gpus = c10::xpu::device_count(); in initXPUContextVectors() 35 c10::xpu::get_device_properties(&device_properties[device], device); in initDeviceProperty() 39 sycl::device& raw_device = c10::xpu::get_raw_device(device); in initDeviceGlobalIdx() 47 it != devices.end(), "Can't find the global index of XPU device."); in initDeviceGlobalIdx() 65 auto device = c10::xpu::current_device(); in getCurrentDeviceProperties() 72 device = c10::xpu::current_device(); in getDeviceProperties() 79 // index of a XPU device in the framework. 87 } // namespace at::xpu
|
/external/pytorch/aten/src/ATen/detail/ |
D | XPUHooksInterface.h | 20 "Cannot initialize XPU without ATen_xpu library."); in initXPU() 30 "Cannot query detailed XPU version without ATen_xpu library."); in showConfig() 34 TORCH_CHECK(false, "Cannot get XPU global device index without ATen_xpu library."); in getGlobalIdxFromDevice() 38 TORCH_CHECK(false, "Cannot get XPU generator without ATen_xpu library."); 42 TORCH_CHECK(false, "Cannot get default XPU generator without ATen_xpu library."); 50 TORCH_CHECK(false, "Cannot get current device on XPU without ATen_xpu library."); in current_device() 54 TORCH_CHECK(false, "Cannot get device of pointer on XPU without ATen_xpu library."); in getDeviceFromPtr() 58 TORCH_CHECK(false, "Cannot synchronize XPU device without ATen_xpu library."); in deviceSynchronize() 62 TORCH_CHECK(false, "Cannot get XPU pinned memory allocator without ATen_xpu library."); in getPinnedMemoryAllocator()
|
/external/pytorch/aten/src/ATen/native/mkldnn/xpu/detail/ |
D | oneDNNContext.h | 6 #include <c10/xpu/XPUFunctions.h> 7 #include <c10/xpu/XPUStream.h> 29 TORCH_INTERNAL_ASSERT(device.index() < c10::xpu::device_count()); in get_engine() 38 int device_count = (int)c10::xpu::device_count(); in GpuEngineManager() 43 c10::xpu::get_raw_device(i), c10::xpu::get_device_context() in GpuEngineManager() 58 c10::DeviceIndex device_index = c10::xpu::current_device(); in get_stream() 59 TORCH_INTERNAL_ASSERT(device_index < c10::xpu::device_count()); in get_stream() 62 c10::xpu::getCurrentXPUStream(device_index).queue()); in get_stream()
|