1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <string.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "anv_private.h"
33 #include "common/intel_defines.h"
34 #include "common/intel_gem.h"
35 #include "drm-uapi/sync_file.h"
36
37 /**
38 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
39 *
40 * Return gem handle, or 0 on failure. Gem handles are never 0.
41 */
42 uint32_t
anv_gem_create(struct anv_device * device,uint64_t size)43 anv_gem_create(struct anv_device *device, uint64_t size)
44 {
45 struct drm_i915_gem_create gem_create = {
46 .size = size,
47 };
48
49 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
50 if (ret != 0) {
51 /* FIXME: What do we do if this fails? */
52 return 0;
53 }
54
55 return gem_create.handle;
56 }
57
58 void
anv_gem_close(struct anv_device * device,uint32_t gem_handle)59 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
60 {
61 struct drm_gem_close close = {
62 .handle = gem_handle,
63 };
64
65 intel_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
66 }
67
68 uint32_t
anv_gem_create_regions(struct anv_device * device,uint64_t anv_bo_size,uint32_t num_regions,struct drm_i915_gem_memory_class_instance * regions)69 anv_gem_create_regions(struct anv_device *device, uint64_t anv_bo_size,
70 uint32_t num_regions,
71 struct drm_i915_gem_memory_class_instance *regions)
72 {
73 struct drm_i915_gem_create_ext_memory_regions ext_regions = {
74 .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
75 .num_regions = num_regions,
76 .regions = (uintptr_t)regions,
77 };
78
79 struct drm_i915_gem_create_ext gem_create = {
80 .size = anv_bo_size,
81 .extensions = (uintptr_t) &ext_regions,
82 };
83
84 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE_EXT,
85 &gem_create);
86 if (ret != 0) {
87 return 0;
88 }
89
90 return gem_create.handle;
91 }
92
93 /**
94 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
95 */
96 static void*
anv_gem_mmap_offset(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)97 anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
98 uint64_t offset, uint64_t size, uint32_t flags)
99 {
100 struct drm_i915_gem_mmap_offset gem_mmap = {
101 .handle = gem_handle,
102 .flags = device->info.has_local_mem ? I915_MMAP_OFFSET_FIXED :
103 (flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
104 };
105 assert(offset == 0);
106
107 /* Get the fake offset back */
108 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
109 if (ret != 0)
110 return MAP_FAILED;
111
112 /* And map it */
113 void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
114 device->fd, gem_mmap.offset);
115 return map;
116 }
117
118 static void*
anv_gem_mmap_legacy(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)119 anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
120 uint64_t offset, uint64_t size, uint32_t flags)
121 {
122 assert(!device->info.has_local_mem);
123
124 struct drm_i915_gem_mmap gem_mmap = {
125 .handle = gem_handle,
126 .offset = offset,
127 .size = size,
128 .flags = flags,
129 };
130
131 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
132 if (ret != 0)
133 return MAP_FAILED;
134
135 return (void *)(uintptr_t) gem_mmap.addr_ptr;
136 }
137
138 /**
139 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
140 */
141 void*
anv_gem_mmap(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)142 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
143 uint64_t offset, uint64_t size, uint32_t flags)
144 {
145 void *map;
146 if (device->physical->has_mmap_offset)
147 map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
148 else
149 map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
150
151 if (map != MAP_FAILED)
152 VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));
153
154 return map;
155 }
156
157 /* This is just a wrapper around munmap, but it also notifies valgrind that
158 * this map is no longer valid. Pair this with anv_gem_mmap().
159 */
160 void
anv_gem_munmap(struct anv_device * device,void * p,uint64_t size)161 anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
162 {
163 VG(VALGRIND_FREELIKE_BLOCK(p, 0));
164 munmap(p, size);
165 }
166
167 uint32_t
anv_gem_userptr(struct anv_device * device,void * mem,size_t size)168 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
169 {
170 struct drm_i915_gem_userptr userptr = {
171 .user_ptr = (__u64)((unsigned long) mem),
172 .user_size = size,
173 .flags = 0,
174 };
175
176 if (device->physical->has_userptr_probe)
177 userptr.flags |= I915_USERPTR_PROBE;
178
179 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
180 if (ret == -1)
181 return 0;
182
183 return userptr.handle;
184 }
185
186 int
anv_gem_set_caching(struct anv_device * device,uint32_t gem_handle,uint32_t caching)187 anv_gem_set_caching(struct anv_device *device,
188 uint32_t gem_handle, uint32_t caching)
189 {
190 struct drm_i915_gem_caching gem_caching = {
191 .handle = gem_handle,
192 .caching = caching,
193 };
194
195 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
196 }
197
198 int
anv_gem_set_domain(struct anv_device * device,uint32_t gem_handle,uint32_t read_domains,uint32_t write_domain)199 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
200 uint32_t read_domains, uint32_t write_domain)
201 {
202 struct drm_i915_gem_set_domain gem_set_domain = {
203 .handle = gem_handle,
204 .read_domains = read_domains,
205 .write_domain = write_domain,
206 };
207
208 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
209 }
210
211 /**
212 * Returns 0, 1, or negative to indicate error
213 */
214 int
anv_gem_busy(struct anv_device * device,uint32_t gem_handle)215 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
216 {
217 struct drm_i915_gem_busy busy = {
218 .handle = gem_handle,
219 };
220
221 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
222 if (ret < 0)
223 return ret;
224
225 return busy.busy != 0;
226 }
227
228 /**
229 * On error, \a timeout_ns holds the remaining time.
230 */
231 int
anv_gem_wait(struct anv_device * device,uint32_t gem_handle,int64_t * timeout_ns)232 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
233 {
234 struct drm_i915_gem_wait wait = {
235 .bo_handle = gem_handle,
236 .timeout_ns = *timeout_ns,
237 .flags = 0,
238 };
239
240 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
241 *timeout_ns = wait.timeout_ns;
242
243 return ret;
244 }
245
246 int
anv_gem_execbuffer(struct anv_device * device,struct drm_i915_gem_execbuffer2 * execbuf)247 anv_gem_execbuffer(struct anv_device *device,
248 struct drm_i915_gem_execbuffer2 *execbuf)
249 {
250 if (execbuf->flags & I915_EXEC_FENCE_OUT)
251 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
252 else
253 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
254 }
255
256 /** Return -1 on error. */
257 int
anv_gem_get_tiling(struct anv_device * device,uint32_t gem_handle)258 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
259 {
260 struct drm_i915_gem_get_tiling get_tiling = {
261 .handle = gem_handle,
262 };
263
264 /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
265 * anymore, so we will need another way to get the tiling. Apparently this
266 * is only used in Android code, so we may need some other way to
267 * communicate the tiling mode.
268 */
269 if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
270 assert(!"Failed to get BO tiling");
271 return -1;
272 }
273
274 return get_tiling.tiling_mode;
275 }
276
277 int
anv_gem_set_tiling(struct anv_device * device,uint32_t gem_handle,uint32_t stride,uint32_t tiling)278 anv_gem_set_tiling(struct anv_device *device,
279 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
280 {
281 int ret;
282
283 /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
284 * nothing needs to be done.
285 */
286 if (!device->info.has_tiling_uapi)
287 return 0;
288
289 /* set_tiling overwrites the input on the error path, so we have to open
290 * code intel_ioctl.
291 */
292 do {
293 struct drm_i915_gem_set_tiling set_tiling = {
294 .handle = gem_handle,
295 .tiling_mode = tiling,
296 .stride = stride,
297 };
298
299 ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
300 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
301
302 return ret;
303 }
304
305 int
anv_gem_get_param(int fd,uint32_t param)306 anv_gem_get_param(int fd, uint32_t param)
307 {
308 int tmp;
309
310 drm_i915_getparam_t gp = {
311 .param = param,
312 .value = &tmp,
313 };
314
315 int ret = intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
316 if (ret == 0)
317 return tmp;
318
319 return 0;
320 }
321
322 uint64_t
anv_gem_get_drm_cap(int fd,uint32_t capability)323 anv_gem_get_drm_cap(int fd, uint32_t capability)
324 {
325 struct drm_get_cap cap = {
326 .capability = capability,
327 };
328
329 intel_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
330 return cap.value;
331 }
332
333 bool
anv_gem_get_bit6_swizzle(int fd,uint32_t tiling)334 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
335 {
336 struct drm_gem_close close;
337 int ret;
338
339 struct drm_i915_gem_create gem_create = {
340 .size = 4096,
341 };
342
343 if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
344 assert(!"Failed to create GEM BO");
345 return false;
346 }
347
348 bool swizzled = false;
349
350 /* set_tiling overwrites the input on the error path, so we have to open
351 * code intel_ioctl.
352 */
353 do {
354 struct drm_i915_gem_set_tiling set_tiling = {
355 .handle = gem_create.handle,
356 .tiling_mode = tiling,
357 .stride = tiling == I915_TILING_X ? 512 : 128,
358 };
359
360 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
361 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
362
363 if (ret != 0) {
364 assert(!"Failed to set BO tiling");
365 goto close_and_return;
366 }
367
368 struct drm_i915_gem_get_tiling get_tiling = {
369 .handle = gem_create.handle,
370 };
371
372 if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
373 assert(!"Failed to get BO tiling");
374 goto close_and_return;
375 }
376
377 swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
378
379 close_and_return:
380
381 memset(&close, 0, sizeof(close));
382 close.handle = gem_create.handle;
383 intel_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
384
385 return swizzled;
386 }
387
388 bool
anv_gem_has_context_priority(int fd)389 anv_gem_has_context_priority(int fd)
390 {
391 return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
392 INTEL_CONTEXT_MEDIUM_PRIORITY);
393 }
394
395 int
anv_gem_create_context(struct anv_device * device)396 anv_gem_create_context(struct anv_device *device)
397 {
398 struct drm_i915_gem_context_create create = { 0 };
399
400 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
401 if (ret == -1)
402 return -1;
403
404 return create.ctx_id;
405 }
406
407 int
anv_gem_create_context_engines(struct anv_device * device,const struct drm_i915_query_engine_info * info,int num_engines,uint16_t * engine_classes)408 anv_gem_create_context_engines(struct anv_device *device,
409 const struct drm_i915_query_engine_info *info,
410 int num_engines, uint16_t *engine_classes)
411 {
412 const size_t engine_inst_sz = 2 * sizeof(__u16); /* 1 class, 1 instance */
413 const size_t engines_param_size =
414 sizeof(__u64) /* extensions */ + num_engines * engine_inst_sz;
415
416 void *engines_param = malloc(engines_param_size);
417 assert(engines_param);
418 *(__u64*)engines_param = 0;
419 __u16 *class_inst_ptr = (__u16*)(((__u64*)engines_param) + 1);
420
421 /* For each type of drm_i915_gem_engine_class of interest, we keep track of
422 * the previous engine instance used.
423 */
424 int last_engine_idx[] = {
425 [I915_ENGINE_CLASS_RENDER] = -1,
426 };
427
428 int i915_engine_counts[] = {
429 [I915_ENGINE_CLASS_RENDER] =
430 anv_gem_count_engines(info, I915_ENGINE_CLASS_RENDER),
431 };
432
433 /* For each queue, we look for the next instance that matches the class we
434 * need.
435 */
436 for (int i = 0; i < num_engines; i++) {
437 uint16_t engine_class = engine_classes[i];
438 if (i915_engine_counts[engine_class] <= 0) {
439 free(engines_param);
440 return -1;
441 }
442
443 /* Run through the engines reported by the kernel looking for the next
444 * matching instance. We loop in case we want to create multiple
445 * contexts on an engine instance.
446 */
447 int engine_instance = -1;
448 for (int i = 0; i < info->num_engines; i++) {
449 int *idx = &last_engine_idx[engine_class];
450 if (++(*idx) >= info->num_engines)
451 *idx = 0;
452 if (info->engines[*idx].engine.engine_class == engine_class) {
453 engine_instance = info->engines[*idx].engine.engine_instance;
454 break;
455 }
456 }
457 if (engine_instance < 0) {
458 free(engines_param);
459 return -1;
460 }
461
462 *class_inst_ptr++ = engine_class;
463 *class_inst_ptr++ = engine_instance;
464 }
465
466 assert((uintptr_t)engines_param + engines_param_size ==
467 (uintptr_t)class_inst_ptr);
468
469 struct drm_i915_gem_context_create_ext_setparam set_engines = {
470 .base = {
471 .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
472 },
473 .param = {
474 .param = I915_CONTEXT_PARAM_ENGINES,
475 .value = (uintptr_t)engines_param,
476 .size = engines_param_size,
477 }
478 };
479 struct drm_i915_gem_context_create_ext create = {
480 .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
481 .extensions = (uintptr_t)&set_engines,
482 };
483 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create);
484 free(engines_param);
485 if (ret == -1)
486 return -1;
487
488 return create.ctx_id;
489 }
490
491 int
anv_gem_destroy_context(struct anv_device * device,int context)492 anv_gem_destroy_context(struct anv_device *device, int context)
493 {
494 struct drm_i915_gem_context_destroy destroy = {
495 .ctx_id = context,
496 };
497
498 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
499 }
500
501 int
anv_gem_set_context_param(int fd,int context,uint32_t param,uint64_t value)502 anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
503 {
504 struct drm_i915_gem_context_param p = {
505 .ctx_id = context,
506 .param = param,
507 .value = value,
508 };
509 int err = 0;
510
511 if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
512 err = -errno;
513 return err;
514 }
515
516 int
anv_gem_get_context_param(int fd,int context,uint32_t param,uint64_t * value)517 anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
518 {
519 struct drm_i915_gem_context_param gp = {
520 .ctx_id = context,
521 .param = param,
522 };
523
524 int ret = intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
525 if (ret == -1)
526 return -1;
527
528 *value = gp.value;
529 return 0;
530 }
531
532 int
anv_gem_context_get_reset_stats(int fd,int context,uint32_t * active,uint32_t * pending)533 anv_gem_context_get_reset_stats(int fd, int context,
534 uint32_t *active, uint32_t *pending)
535 {
536 struct drm_i915_reset_stats stats = {
537 .ctx_id = context,
538 };
539
540 int ret = intel_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
541 if (ret == 0) {
542 *active = stats.batch_active;
543 *pending = stats.batch_pending;
544 }
545
546 return ret;
547 }
548
549 int
anv_gem_handle_to_fd(struct anv_device * device,uint32_t gem_handle)550 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
551 {
552 struct drm_prime_handle args = {
553 .handle = gem_handle,
554 .flags = DRM_CLOEXEC | DRM_RDWR,
555 };
556
557 int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
558 if (ret == -1)
559 return -1;
560
561 return args.fd;
562 }
563
564 uint32_t
anv_gem_fd_to_handle(struct anv_device * device,int fd)565 anv_gem_fd_to_handle(struct anv_device *device, int fd)
566 {
567 struct drm_prime_handle args = {
568 .fd = fd,
569 };
570
571 int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
572 if (ret == -1)
573 return 0;
574
575 return args.handle;
576 }
577
578 int
anv_gem_reg_read(int fd,uint32_t offset,uint64_t * result)579 anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
580 {
581 struct drm_i915_reg_read args = {
582 .offset = offset
583 };
584
585 int ret = intel_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
586
587 *result = args.val;
588 return ret;
589 }
590
591 int
anv_gem_sync_file_merge(struct anv_device * device,int fd1,int fd2)592 anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
593 {
594 struct sync_merge_data args = {
595 .name = "anv merge fence",
596 .fd2 = fd2,
597 .fence = -1,
598 };
599
600 int ret = intel_ioctl(fd1, SYNC_IOC_MERGE, &args);
601 if (ret == -1)
602 return -1;
603
604 return args.fence;
605 }
606
607 uint32_t
anv_gem_syncobj_create(struct anv_device * device,uint32_t flags)608 anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
609 {
610 struct drm_syncobj_create args = {
611 .flags = flags,
612 };
613
614 int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
615 if (ret)
616 return 0;
617
618 return args.handle;
619 }
620
621 void
anv_gem_syncobj_destroy(struct anv_device * device,uint32_t handle)622 anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
623 {
624 struct drm_syncobj_destroy args = {
625 .handle = handle,
626 };
627
628 intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
629 }
630
631 int
anv_gem_syncobj_handle_to_fd(struct anv_device * device,uint32_t handle)632 anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
633 {
634 struct drm_syncobj_handle args = {
635 .handle = handle,
636 };
637
638 int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
639 if (ret)
640 return -1;
641
642 return args.fd;
643 }
644
645 uint32_t
anv_gem_syncobj_fd_to_handle(struct anv_device * device,int fd)646 anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
647 {
648 struct drm_syncobj_handle args = {
649 .fd = fd,
650 };
651
652 int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
653 if (ret)
654 return 0;
655
656 return args.handle;
657 }
658
659 int
anv_gem_syncobj_export_sync_file(struct anv_device * device,uint32_t handle)660 anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
661 {
662 struct drm_syncobj_handle args = {
663 .handle = handle,
664 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
665 };
666
667 int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
668 if (ret)
669 return -1;
670
671 return args.fd;
672 }
673
674 int
anv_gem_syncobj_import_sync_file(struct anv_device * device,uint32_t handle,int fd)675 anv_gem_syncobj_import_sync_file(struct anv_device *device,
676 uint32_t handle, int fd)
677 {
678 struct drm_syncobj_handle args = {
679 .handle = handle,
680 .fd = fd,
681 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
682 };
683
684 return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
685 }
686
687 void
anv_gem_syncobj_reset(struct anv_device * device,uint32_t handle)688 anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
689 {
690 struct drm_syncobj_array args = {
691 .handles = (uint64_t)(uintptr_t)&handle,
692 .count_handles = 1,
693 };
694
695 intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
696 }
697
698 bool
anv_gem_supports_syncobj_wait(int fd)699 anv_gem_supports_syncobj_wait(int fd)
700 {
701 return intel_gem_supports_syncobj_wait(fd);
702 }
703
704 int
anv_gem_syncobj_wait(struct anv_device * device,const uint32_t * handles,uint32_t num_handles,int64_t abs_timeout_ns,bool wait_all)705 anv_gem_syncobj_wait(struct anv_device *device,
706 const uint32_t *handles, uint32_t num_handles,
707 int64_t abs_timeout_ns, bool wait_all)
708 {
709 struct drm_syncobj_wait args = {
710 .handles = (uint64_t)(uintptr_t)handles,
711 .count_handles = num_handles,
712 .timeout_nsec = abs_timeout_ns,
713 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
714 };
715
716 if (wait_all)
717 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
718
719 return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
720 }
721
722 int
anv_gem_syncobj_timeline_wait(struct anv_device * device,const uint32_t * handles,const uint64_t * points,uint32_t num_items,int64_t abs_timeout_ns,bool wait_all,bool wait_materialize)723 anv_gem_syncobj_timeline_wait(struct anv_device *device,
724 const uint32_t *handles, const uint64_t *points,
725 uint32_t num_items, int64_t abs_timeout_ns,
726 bool wait_all, bool wait_materialize)
727 {
728 assert(device->physical->has_syncobj_wait_available);
729
730 struct drm_syncobj_timeline_wait args = {
731 .handles = (uint64_t)(uintptr_t)handles,
732 .points = (uint64_t)(uintptr_t)points,
733 .count_handles = num_items,
734 .timeout_nsec = abs_timeout_ns,
735 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
736 };
737
738 if (wait_all)
739 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
740 if (wait_materialize)
741 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;
742
743 return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
744 }
745
746 int
anv_gem_syncobj_timeline_signal(struct anv_device * device,const uint32_t * handles,const uint64_t * points,uint32_t num_items)747 anv_gem_syncobj_timeline_signal(struct anv_device *device,
748 const uint32_t *handles, const uint64_t *points,
749 uint32_t num_items)
750 {
751 assert(device->physical->has_syncobj_wait_available);
752
753 struct drm_syncobj_timeline_array args = {
754 .handles = (uint64_t)(uintptr_t)handles,
755 .points = (uint64_t)(uintptr_t)points,
756 .count_handles = num_items,
757 };
758
759 return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
760 }
761
762 int
anv_gem_syncobj_timeline_query(struct anv_device * device,const uint32_t * handles,uint64_t * points,uint32_t num_items)763 anv_gem_syncobj_timeline_query(struct anv_device *device,
764 const uint32_t *handles, uint64_t *points,
765 uint32_t num_items)
766 {
767 assert(device->physical->has_syncobj_wait_available);
768
769 struct drm_syncobj_timeline_array args = {
770 .handles = (uint64_t)(uintptr_t)handles,
771 .points = (uint64_t)(uintptr_t)points,
772 .count_handles = num_items,
773 };
774
775 return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
776 }
777
778 struct drm_i915_query_engine_info *
anv_gem_get_engine_info(int fd)779 anv_gem_get_engine_info(int fd)
780 {
781 return intel_i915_query_alloc(fd, DRM_I915_QUERY_ENGINE_INFO);
782 }
783
784 int
anv_gem_count_engines(const struct drm_i915_query_engine_info * info,uint16_t engine_class)785 anv_gem_count_engines(const struct drm_i915_query_engine_info *info,
786 uint16_t engine_class)
787 {
788 int count = 0;
789 for (int i = 0; i < info->num_engines; i++) {
790 if (info->engines[i].engine.engine_class == engine_class)
791 count++;
792 }
793 return count;
794 }
795