1 /*
2 * Copyright © 2023 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "xe/anv_device.h"
24 #include "anv_private.h"
25
26 #include "drm-uapi/gpu_scheduler.h"
27 #include "drm-uapi/xe_drm.h"
28
29 #include "common/xe/intel_device_query.h"
30
anv_xe_device_destroy_vm(struct anv_device * device)31 bool anv_xe_device_destroy_vm(struct anv_device *device)
32 {
33 struct drm_xe_vm_destroy destroy = {
34 .vm_id = device->vm_id,
35 };
36
37 intel_bind_timeline_finish(&device->bind_timeline, device->fd);
38
39 return intel_ioctl(device->fd, DRM_IOCTL_XE_VM_DESTROY, &destroy) == 0;
40 }
41
anv_xe_device_setup_vm(struct anv_device * device)42 VkResult anv_xe_device_setup_vm(struct anv_device *device)
43 {
44 struct drm_xe_vm_create create = {
45 .flags = DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE,
46 };
47 if (intel_ioctl(device->fd, DRM_IOCTL_XE_VM_CREATE, &create) != 0)
48 return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
49 "vm creation failed");
50
51 device->vm_id = create.vm_id;
52
53 if (!intel_bind_timeline_init(&device->bind_timeline, device->fd)) {
54 anv_xe_device_destroy_vm(device);
55 return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
56 "intel_bind_timeline_init failed");
57 }
58
59 return VK_SUCCESS;
60 }
61
62 static VkQueueGlobalPriorityKHR
drm_sched_priority_to_vk_priority(enum drm_sched_priority drm_sched_priority)63 drm_sched_priority_to_vk_priority(enum drm_sched_priority drm_sched_priority)
64 {
65 switch (drm_sched_priority) {
66 case DRM_SCHED_PRIORITY_MIN:
67 return VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR;
68 case DRM_SCHED_PRIORITY_NORMAL:
69 return VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
70 case DRM_SCHED_PRIORITY_HIGH:
71 return VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR;
72 default:
73 unreachable("Invalid drm_sched_priority");
74 return VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR;
75 }
76 }
77
78 VkResult
anv_xe_physical_device_get_parameters(struct anv_physical_device * device)79 anv_xe_physical_device_get_parameters(struct anv_physical_device *device)
80 {
81 struct drm_xe_query_config *config;
82
83 config = xe_device_query_alloc_fetch(device->local_fd, DRM_XE_DEVICE_QUERY_CONFIG, NULL);
84 if (!config)
85 return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
86 "unable to query device config");
87
88 device->has_exec_timeline = true;
89 device->max_context_priority =
90 drm_sched_priority_to_vk_priority(config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY]);
91
92 free(config);
93 return VK_SUCCESS;
94 }
95
96 VkResult
anv_xe_physical_device_init_memory_types(struct anv_physical_device * device)97 anv_xe_physical_device_init_memory_types(struct anv_physical_device *device)
98 {
99 if (anv_physical_device_has_vram(device)) {
100 device->memory.type_count = 3;
101 device->memory.types[0] = (struct anv_memory_type) {
102 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
103 .heapIndex = 0,
104 };
105 device->memory.types[1] = (struct anv_memory_type) {
106 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
107 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
108 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
109 .heapIndex = 1,
110 };
111 device->memory.types[2] = (struct anv_memory_type) {
112 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
113 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
114 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
115 /* This memory type either comes from heaps[0] if there is only
116 * mappable vram region, or from heaps[2] if there is both mappable &
117 * non-mappable vram regions.
118 */
119 .heapIndex = device->vram_non_mappable.size > 0 ? 2 : 0,
120 };
121 } else if (device->info.has_llc) {
122 /* Big core GPUs share LLC with the CPU and thus one memory type can be
123 * both cached and coherent at the same time.
124 *
125 * But some game engines can't handle single type well
126 * https://gitlab.freedesktop.org/mesa/mesa/-/issues/7360#note_1719438
127 *
128 * TODO: But with current UAPI we can't change the mmap mode in Xe, so
129 * here only supporting two memory types.
130 */
131 device->memory.type_count = 2;
132 device->memory.types[0] = (struct anv_memory_type) {
133 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
134 .heapIndex = 0,
135 };
136 device->memory.types[1] = (struct anv_memory_type) {
137 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
138 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
139 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
140 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
141 .heapIndex = 0,
142 };
143 } else {
144 device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
145 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
146 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
147 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
148 .heapIndex = 0,
149 };
150 device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
151 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
152 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
153 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
154 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
155 .heapIndex = 0,
156 };
157 }
158 return VK_SUCCESS;
159 }
160
161 static VkResult
anv_xe_get_device_status(struct anv_device * device,uint32_t exec_queue_id)162 anv_xe_get_device_status(struct anv_device *device, uint32_t exec_queue_id)
163 {
164 VkResult result = VK_SUCCESS;
165 struct drm_xe_exec_queue_get_property exec_queue_get_property = {
166 .exec_queue_id = exec_queue_id,
167 .property = DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN,
168 };
169 int ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY,
170 &exec_queue_get_property);
171
172 if (ret || exec_queue_get_property.value)
173 result = vk_device_set_lost(&device->vk, "One or more queues banned");
174
175 return result;
176 }
177
178 VkResult
anv_xe_device_check_status(struct vk_device * vk_device)179 anv_xe_device_check_status(struct vk_device *vk_device)
180 {
181 struct anv_device *device = container_of(vk_device, struct anv_device, vk);
182 VkResult result = VK_SUCCESS;
183
184 for (uint32_t i = 0; i < device->queue_count; i++) {
185 result = anv_xe_get_device_status(device, device->queues[i].exec_queue_id);
186 if (result != VK_SUCCESS)
187 return result;
188
189 if (device->queues[i].companion_rcs_id != 0) {
190 uint32_t exec_queue_id = device->queues[i].companion_rcs_id;
191 result = anv_xe_get_device_status(device, exec_queue_id);
192 if (result != VK_SUCCESS)
193 return result;
194 }
195 }
196
197 return result;
198 }
199