• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2023 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 #include "xe/iris_batch.h"
24 
25 #include "iris_batch.h"
26 #include "iris_context.h"
27 #include "iris_screen.h"
28 
29 #include "common/intel_gem.h"
30 #include "common/intel_engine.h"
31 #include "common/xe/intel_device_query.h"
32 #include "common/xe/intel_engine.h"
33 #include "common/xe/intel_queue.h"
34 
35 #include "drm-uapi/xe_drm.h"
36 #include "drm-uapi/gpu_scheduler.h"
37 
38 static enum drm_sched_priority
iris_context_priority_to_drm_sched_priority(enum iris_context_priority priority)39 iris_context_priority_to_drm_sched_priority(enum iris_context_priority priority)
40 {
41    switch (priority) {
42    case IRIS_CONTEXT_HIGH_PRIORITY:
43       return DRM_SCHED_PRIORITY_HIGH;
44    case IRIS_CONTEXT_LOW_PRIORITY:
45       return DRM_SCHED_PRIORITY_MIN;
46    case IRIS_CONTEXT_MEDIUM_PRIORITY:
47       FALLTHROUGH;
48    default:
49       return DRM_SCHED_PRIORITY_NORMAL;
50    }
51 }
52 
53 static bool
iris_xe_init_batch(struct iris_bufmgr * bufmgr,struct intel_query_engine_info * engines_info,enum intel_engine_class engine_class,enum iris_context_priority priority,uint32_t * exec_queue_id)54 iris_xe_init_batch(struct iris_bufmgr *bufmgr,
55                    struct intel_query_engine_info *engines_info,
56                    enum intel_engine_class engine_class,
57                    enum iris_context_priority priority, uint32_t *exec_queue_id)
58 
59 {
60    struct drm_xe_engine_class_instance *instances;
61 
62    instances = malloc(sizeof(*instances) *
63                       intel_engines_count(engines_info, engine_class));
64    if (!instances)
65       return false;
66 
67    enum drm_sched_priority requested_priority = iris_context_priority_to_drm_sched_priority(priority);
68    enum drm_sched_priority allowed_priority = DRM_SCHED_PRIORITY_MIN;
69    if (requested_priority > DRM_SCHED_PRIORITY_MIN) {
70       struct drm_xe_query_config *config;
71 
72       config = xe_device_query_alloc_fetch(iris_bufmgr_get_fd(bufmgr),
73                                            DRM_XE_DEVICE_QUERY_CONFIG, NULL);
74       if (config)
75          allowed_priority = config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY];
76       free(config);
77    }
78    if (requested_priority < allowed_priority)
79       allowed_priority = requested_priority;
80 
81    uint32_t count = 0;
82    for (uint32_t i = 0; i < engines_info->num_engines; i++) {
83       const struct intel_engine_class_instance engine = engines_info->engines[i];
84       if (engine.engine_class != engine_class)
85          continue;
86 
87       instances[count].engine_class = intel_engine_class_to_xe(engine.engine_class);
88       instances[count].engine_instance = engine.engine_instance;
89       instances[count++].gt_id = engine.gt_id;
90    }
91    struct drm_xe_ext_set_property ext = {
92       .base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
93       .property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
94       .value = allowed_priority,
95    };
96    struct drm_xe_exec_queue_create create = {
97          .instances = (uintptr_t)instances,
98          .vm_id = iris_bufmgr_get_global_vm_id(bufmgr),
99          .width = 1,
100          .num_placements = count,
101          .extensions = (uintptr_t)&ext,
102    };
103    int ret = intel_ioctl(iris_bufmgr_get_fd(bufmgr),
104                          DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create);
105    free(instances);
106    if (ret)
107       goto error_create_exec_queue;
108 
109    /* TODO: handle "protected" context/exec_queue */
110    *exec_queue_id = create.exec_queue_id;
111 error_create_exec_queue:
112    return ret == 0;
113 }
114 
115 static void
iris_xe_map_intel_engine_class(struct iris_bufmgr * bufmgr,const struct intel_query_engine_info * engines_info,enum intel_engine_class * engine_classes)116 iris_xe_map_intel_engine_class(struct iris_bufmgr *bufmgr,
117                                const struct intel_query_engine_info *engines_info,
118                                enum intel_engine_class *engine_classes)
119 {
120    engine_classes[IRIS_BATCH_RENDER] = INTEL_ENGINE_CLASS_RENDER;
121    engine_classes[IRIS_BATCH_COMPUTE] = INTEL_ENGINE_CLASS_RENDER;
122    engine_classes[IRIS_BATCH_BLITTER] = INTEL_ENGINE_CLASS_COPY;
123    STATIC_ASSERT(IRIS_BATCH_COUNT == 3);
124 
125    if (iris_bufmgr_compute_engine_supported(bufmgr))
126       engine_classes[IRIS_BATCH_COMPUTE] = INTEL_ENGINE_CLASS_COMPUTE;
127 }
128 
iris_xe_init_batches(struct iris_context * ice)129 void iris_xe_init_batches(struct iris_context *ice)
130 {
131    struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
132    struct iris_bufmgr *bufmgr = screen->bufmgr;
133    const int fd = iris_bufmgr_get_fd(screen->bufmgr);
134    enum intel_engine_class engine_classes[IRIS_BATCH_COUNT];
135    struct intel_query_engine_info *engines_info;
136 
137    engines_info = intel_engine_get_info(fd, INTEL_KMD_TYPE_XE);
138    assert(engines_info);
139    if (!engines_info)
140       return;
141    iris_xe_map_intel_engine_class(bufmgr, engines_info, engine_classes);
142 
143    iris_foreach_batch(ice, batch) {
144       const enum iris_batch_name name = batch - &ice->batches[0];
145       ASSERTED bool ret;
146 
147       ret = iris_xe_init_batch(bufmgr, engines_info, engine_classes[name],
148                                ice->priority, &batch->xe.exec_queue_id);
149       assert(ret);
150    }
151 
152    free(engines_info);
153 }
154 
155 /*
156  * Wait for all previous DRM_IOCTL_XE_EXEC calls over the
157  * drm_xe_exec_queue in this iris_batch to complete.
158  **/
159 static void
iris_xe_wait_exec_queue_idle(struct iris_batch * batch)160 iris_xe_wait_exec_queue_idle(struct iris_batch *batch)
161 {
162    struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
163    int fd = iris_bufmgr_get_fd(bufmgr);
164    uint32_t syncobj;
165    int ret = xe_queue_get_syncobj_for_idle(fd, batch->xe.exec_queue_id,
166                                            &syncobj);
167 
168    if (ret) {
169       assert(iris_batch_is_banned(bufmgr, ret) == true);
170       return;
171    }
172 
173    struct drm_syncobj_wait wait = {
174       .handles = (uintptr_t)&syncobj,
175       .count_handles = 1,
176       .timeout_nsec = INT64_MAX,
177    };
178    ret = intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
179    assert(ret == 0);
180 
181    struct drm_syncobj_destroy syncobj_destroy = {
182       .handle = syncobj,
183    };
184    intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &syncobj_destroy);
185 }
186 
187 static void
iris_xe_destroy_exec_queue(struct iris_batch * batch)188 iris_xe_destroy_exec_queue(struct iris_batch *batch)
189 {
190    struct iris_screen *screen = batch->screen;
191    struct iris_bufmgr *bufmgr = screen->bufmgr;
192    struct drm_xe_exec_queue_destroy destroy = {
193       .exec_queue_id = batch->xe.exec_queue_id,
194    };
195    ASSERTED int ret;
196 
197    ret = intel_ioctl(iris_bufmgr_get_fd(bufmgr), DRM_IOCTL_XE_EXEC_QUEUE_DESTROY,
198                      &destroy);
199    assert(ret == 0);
200 }
201 
iris_xe_destroy_batch(struct iris_batch * batch)202 void iris_xe_destroy_batch(struct iris_batch *batch)
203 {
204    /* Xe KMD don't refcount anything, so resources could be freed while they
205     * are still in use if we don't wait for exec_queue to be idle.
206     */
207    iris_xe_wait_exec_queue_idle(batch);
208    iris_xe_destroy_exec_queue(batch);
209 }
210 
iris_xe_replace_batch(struct iris_batch * batch)211 bool iris_xe_replace_batch(struct iris_batch *batch)
212 {
213    enum intel_engine_class engine_classes[IRIS_BATCH_COUNT];
214    struct iris_screen *screen = batch->screen;
215    struct iris_bufmgr *bufmgr = screen->bufmgr;
216    struct iris_context *ice = batch->ice;
217    struct intel_query_engine_info *engines_info;
218    uint32_t new_exec_queue_id;
219    bool ret;
220 
221    engines_info = intel_engine_get_info(iris_bufmgr_get_fd(bufmgr),
222                                         INTEL_KMD_TYPE_XE);
223    if (!engines_info)
224       return false;
225    iris_xe_map_intel_engine_class(bufmgr, engines_info, engine_classes);
226 
227    ret = iris_xe_init_batch(bufmgr, engines_info, engine_classes[batch->name],
228                             ice->priority, &new_exec_queue_id);
229    if (ret) {
230       iris_xe_destroy_exec_queue(batch);
231       batch->xe.exec_queue_id = new_exec_queue_id;
232       iris_lost_context_state(batch);
233    }
234 
235    free(engines_info);
236    return ret;
237 }
238