1 /*
2 * Copyright © 2023 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "xe/anv_queue.h"
24
25 #include "anv_private.h"
26
27 #include "common/xe/intel_engine.h"
28 #include "common/intel_gem.h"
29
30 #include "xe/anv_device.h"
31
32 #include "drm-uapi/xe_drm.h"
33 #include "drm-uapi/gpu_scheduler.h"
34
35 static enum drm_sched_priority
anv_vk_priority_to_drm_sched_priority(VkQueueGlobalPriorityKHR vk_priority)36 anv_vk_priority_to_drm_sched_priority(VkQueueGlobalPriorityKHR vk_priority)
37 {
38 switch (vk_priority) {
39 case VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR:
40 return DRM_SCHED_PRIORITY_MIN;
41 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR:
42 return DRM_SCHED_PRIORITY_NORMAL;
43 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR:
44 return DRM_SCHED_PRIORITY_HIGH;
45 default:
46 unreachable("Invalid priority");
47 return DRM_SCHED_PRIORITY_MIN;
48 }
49 }
50
51 static VkResult
create_engine(struct anv_device * device,struct anv_queue * queue,const VkDeviceQueueCreateInfo * pCreateInfo,bool create_companion_rcs_engine)52 create_engine(struct anv_device *device,
53 struct anv_queue *queue,
54 const VkDeviceQueueCreateInfo *pCreateInfo,
55 bool create_companion_rcs_engine)
56 {
57 struct anv_physical_device *physical = device->physical;
58 uint32_t queue_family_index =
59 create_companion_rcs_engine ?
60 anv_get_first_render_queue_index(physical) :
61 pCreateInfo->queueFamilyIndex;
62 struct anv_queue_family *queue_family =
63 &physical->queue.families[queue_family_index];
64 const struct intel_query_engine_info *engines = physical->engine_info;
65 struct drm_xe_engine_class_instance *instances;
66 const VkDeviceQueueGlobalPriorityCreateInfoKHR *queue_priority =
67 vk_find_struct_const(pCreateInfo->pNext,
68 DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR);
69 const VkQueueGlobalPriorityKHR priority = queue_priority ?
70 queue_priority->globalPriority :
71 VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
72
73 /* As per spec, the driver implementation may deny requests to acquire
74 * a priority above the default priority (MEDIUM) if the caller does not
75 * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_KHR
76 * is returned.
77 */
78 if (physical->max_context_priority >= VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR) {
79 if (priority > physical->max_context_priority)
80 return vk_error(device, VK_ERROR_NOT_PERMITTED_KHR);
81 }
82
83 instances = vk_alloc(&device->vk.alloc,
84 sizeof(*instances) * queue_family->queueCount, 8,
85 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
86 if (!instances)
87 return VK_ERROR_OUT_OF_HOST_MEMORY;
88
89 /* Build a list of all compatible HW engines */
90 uint32_t count = 0;
91 for (uint32_t i = 0; i < engines->num_engines; i++) {
92 const struct intel_engine_class_instance engine = engines->engines[i];
93 if (engine.engine_class != queue_family->engine_class)
94 continue;
95
96 instances[count].engine_class = intel_engine_class_to_xe(engine.engine_class);
97 instances[count].engine_instance = engine.engine_instance;
98 instances[count++].gt_id = engine.gt_id;
99 }
100
101 assert(device->vm_id != 0);
102 struct drm_xe_ext_set_property ext = {
103 .base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
104 .property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
105 .value = anv_vk_priority_to_drm_sched_priority(priority),
106 };
107 struct drm_xe_exec_queue_create create = {
108 /* Allows KMD to pick one of those engines for the submission queue */
109 .instances = (uintptr_t)instances,
110 .vm_id = device->vm_id,
111 .width = 1,
112 .num_placements = count,
113 .extensions = (uintptr_t)&ext,
114 };
115 int ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create);
116 vk_free(&device->vk.alloc, instances);
117 if (ret)
118 return vk_errorf(device, VK_ERROR_UNKNOWN, "Unable to create exec queue");
119
120 if (create_companion_rcs_engine)
121 queue->companion_rcs_id = create.exec_queue_id;
122 else
123 queue->exec_queue_id = create.exec_queue_id;
124
125 return VK_SUCCESS;
126 }
127
128 VkResult
anv_xe_create_engine(struct anv_device * device,struct anv_queue * queue,const VkDeviceQueueCreateInfo * pCreateInfo)129 anv_xe_create_engine(struct anv_device *device,
130 struct anv_queue *queue,
131 const VkDeviceQueueCreateInfo *pCreateInfo)
132 {
133 VkResult result = create_engine(device, queue, pCreateInfo,
134 false /* create_companion_rcs_engine */);
135
136 if (result != VK_SUCCESS)
137 return result;
138
139 if (queue->family->engine_class == INTEL_ENGINE_CLASS_COPY ||
140 queue->family->engine_class == INTEL_ENGINE_CLASS_COMPUTE) {
141 result = create_engine(device, queue, pCreateInfo,
142 true /* create_companion_rcs_engine */);
143 }
144
145 return result;
146 }
147
148 static void
destroy_engine(struct anv_device * device,uint32_t exec_queue_id)149 destroy_engine(struct anv_device *device, uint32_t exec_queue_id)
150 {
151 struct drm_xe_exec_queue_destroy destroy = {
152 .exec_queue_id = exec_queue_id,
153 };
154 intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_DESTROY, &destroy);
155 }
156
157 void
anv_xe_destroy_engine(struct anv_device * device,struct anv_queue * queue)158 anv_xe_destroy_engine(struct anv_device *device, struct anv_queue *queue)
159 {
160 destroy_engine(device, queue->exec_queue_id);
161
162 if (queue->companion_rcs_id != 0)
163 destroy_engine(device, queue->companion_rcs_id);
164 }
165