1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <string.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "anv_private.h"
33 #include "common/gen_defines.h"
34 #include "common/gen_gem.h"
35 #include "drm-uapi/sync_file.h"
36
37 /**
38 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
39 *
40 * Return gem handle, or 0 on failure. Gem handles are never 0.
41 */
42 uint32_t
anv_gem_create(struct anv_device * device,uint64_t size)43 anv_gem_create(struct anv_device *device, uint64_t size)
44 {
45 struct drm_i915_gem_create gem_create = {
46 .size = size,
47 };
48
49 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
50 if (ret != 0) {
51 /* FIXME: What do we do if this fails? */
52 return 0;
53 }
54
55 return gem_create.handle;
56 }
57
58 void
anv_gem_close(struct anv_device * device,uint32_t gem_handle)59 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
60 {
61 struct drm_gem_close close = {
62 .handle = gem_handle,
63 };
64
65 gen_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
66 }
67
68 /**
69 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
70 */
71 static void*
anv_gem_mmap_offset(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)72 anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
73 uint64_t offset, uint64_t size, uint32_t flags)
74 {
75 struct drm_i915_gem_mmap_offset gem_mmap = {
76 .handle = gem_handle,
77 .flags = (flags & I915_MMAP_WC) ?
78 I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
79 };
80 assert(offset == 0);
81
82 /* Get the fake offset back */
83 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
84 if (ret != 0)
85 return MAP_FAILED;
86
87 /* And map it */
88 void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
89 device->fd, gem_mmap.offset);
90 return map;
91 }
92
93 static void*
anv_gem_mmap_legacy(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)94 anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
95 uint64_t offset, uint64_t size, uint32_t flags)
96 {
97 struct drm_i915_gem_mmap gem_mmap = {
98 .handle = gem_handle,
99 .offset = offset,
100 .size = size,
101 .flags = flags,
102 };
103
104 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
105 if (ret != 0)
106 return MAP_FAILED;
107
108 return (void *)(uintptr_t) gem_mmap.addr_ptr;
109 }
110
111 /**
112 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
113 */
114 void*
anv_gem_mmap(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)115 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
116 uint64_t offset, uint64_t size, uint32_t flags)
117 {
118 void *map;
119 if (device->physical->has_mmap_offset)
120 map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
121 else
122 map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
123
124 if (map != MAP_FAILED)
125 VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));
126
127 return map;
128 }
129
130 /* This is just a wrapper around munmap, but it also notifies valgrind that
131 * this map is no longer valid. Pair this with anv_gem_mmap().
132 */
133 void
anv_gem_munmap(struct anv_device * device,void * p,uint64_t size)134 anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
135 {
136 VG(VALGRIND_FREELIKE_BLOCK(p, 0));
137 munmap(p, size);
138 }
139
140 uint32_t
anv_gem_userptr(struct anv_device * device,void * mem,size_t size)141 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
142 {
143 struct drm_i915_gem_userptr userptr = {
144 .user_ptr = (__u64)((unsigned long) mem),
145 .user_size = size,
146 .flags = 0,
147 };
148
149 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
150 if (ret == -1)
151 return 0;
152
153 return userptr.handle;
154 }
155
156 int
anv_gem_set_caching(struct anv_device * device,uint32_t gem_handle,uint32_t caching)157 anv_gem_set_caching(struct anv_device *device,
158 uint32_t gem_handle, uint32_t caching)
159 {
160 struct drm_i915_gem_caching gem_caching = {
161 .handle = gem_handle,
162 .caching = caching,
163 };
164
165 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
166 }
167
168 int
anv_gem_set_domain(struct anv_device * device,uint32_t gem_handle,uint32_t read_domains,uint32_t write_domain)169 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
170 uint32_t read_domains, uint32_t write_domain)
171 {
172 struct drm_i915_gem_set_domain gem_set_domain = {
173 .handle = gem_handle,
174 .read_domains = read_domains,
175 .write_domain = write_domain,
176 };
177
178 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
179 }
180
181 /**
182 * Returns 0, 1, or negative to indicate error
183 */
184 int
anv_gem_busy(struct anv_device * device,uint32_t gem_handle)185 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
186 {
187 struct drm_i915_gem_busy busy = {
188 .handle = gem_handle,
189 };
190
191 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
192 if (ret < 0)
193 return ret;
194
195 return busy.busy != 0;
196 }
197
198 /**
199 * On error, \a timeout_ns holds the remaining time.
200 */
201 int
anv_gem_wait(struct anv_device * device,uint32_t gem_handle,int64_t * timeout_ns)202 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
203 {
204 struct drm_i915_gem_wait wait = {
205 .bo_handle = gem_handle,
206 .timeout_ns = *timeout_ns,
207 .flags = 0,
208 };
209
210 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
211 *timeout_ns = wait.timeout_ns;
212
213 return ret;
214 }
215
216 int
anv_gem_execbuffer(struct anv_device * device,struct drm_i915_gem_execbuffer2 * execbuf)217 anv_gem_execbuffer(struct anv_device *device,
218 struct drm_i915_gem_execbuffer2 *execbuf)
219 {
220 if (execbuf->flags & I915_EXEC_FENCE_OUT)
221 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
222 else
223 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
224 }
225
226 /** Return -1 on error. */
227 int
anv_gem_get_tiling(struct anv_device * device,uint32_t gem_handle)228 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
229 {
230 struct drm_i915_gem_get_tiling get_tiling = {
231 .handle = gem_handle,
232 };
233
234 /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
235 * anymore, so we will need another way to get the tiling. Apparently this
236 * is only used in Android code, so we may need some other way to
237 * communicate the tiling mode.
238 */
239 if (gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
240 assert(!"Failed to get BO tiling");
241 return -1;
242 }
243
244 return get_tiling.tiling_mode;
245 }
246
247 int
anv_gem_set_tiling(struct anv_device * device,uint32_t gem_handle,uint32_t stride,uint32_t tiling)248 anv_gem_set_tiling(struct anv_device *device,
249 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
250 {
251 int ret;
252
253 /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
254 * nothing needs to be done.
255 */
256 if (!device->info.has_tiling_uapi)
257 return 0;
258
259 /* set_tiling overwrites the input on the error path, so we have to open
260 * code gen_ioctl.
261 */
262 do {
263 struct drm_i915_gem_set_tiling set_tiling = {
264 .handle = gem_handle,
265 .tiling_mode = tiling,
266 .stride = stride,
267 };
268
269 ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
270 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
271
272 return ret;
273 }
274
275 int
anv_gem_get_param(int fd,uint32_t param)276 anv_gem_get_param(int fd, uint32_t param)
277 {
278 int tmp;
279
280 drm_i915_getparam_t gp = {
281 .param = param,
282 .value = &tmp,
283 };
284
285 int ret = gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
286 if (ret == 0)
287 return tmp;
288
289 return 0;
290 }
291
292 uint64_t
anv_gem_get_drm_cap(int fd,uint32_t capability)293 anv_gem_get_drm_cap(int fd, uint32_t capability)
294 {
295 struct drm_get_cap cap = {
296 .capability = capability,
297 };
298
299 gen_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
300 return cap.value;
301 }
302
303 bool
anv_gem_get_bit6_swizzle(int fd,uint32_t tiling)304 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
305 {
306 struct drm_gem_close close;
307 int ret;
308
309 struct drm_i915_gem_create gem_create = {
310 .size = 4096,
311 };
312
313 if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
314 assert(!"Failed to create GEM BO");
315 return false;
316 }
317
318 bool swizzled = false;
319
320 /* set_tiling overwrites the input on the error path, so we have to open
321 * code gen_ioctl.
322 */
323 do {
324 struct drm_i915_gem_set_tiling set_tiling = {
325 .handle = gem_create.handle,
326 .tiling_mode = tiling,
327 .stride = tiling == I915_TILING_X ? 512 : 128,
328 };
329
330 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
331 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
332
333 if (ret != 0) {
334 assert(!"Failed to set BO tiling");
335 goto close_and_return;
336 }
337
338 struct drm_i915_gem_get_tiling get_tiling = {
339 .handle = gem_create.handle,
340 };
341
342 if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
343 assert(!"Failed to get BO tiling");
344 goto close_and_return;
345 }
346
347 swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
348
349 close_and_return:
350
351 memset(&close, 0, sizeof(close));
352 close.handle = gem_create.handle;
353 gen_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
354
355 return swizzled;
356 }
357
358 bool
anv_gem_has_context_priority(int fd)359 anv_gem_has_context_priority(int fd)
360 {
361 return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
362 GEN_CONTEXT_MEDIUM_PRIORITY);
363 }
364
365 int
anv_gem_create_context(struct anv_device * device)366 anv_gem_create_context(struct anv_device *device)
367 {
368 struct drm_i915_gem_context_create create = { 0 };
369
370 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
371 if (ret == -1)
372 return -1;
373
374 return create.ctx_id;
375 }
376
377 int
anv_gem_destroy_context(struct anv_device * device,int context)378 anv_gem_destroy_context(struct anv_device *device, int context)
379 {
380 struct drm_i915_gem_context_destroy destroy = {
381 .ctx_id = context,
382 };
383
384 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
385 }
386
387 int
anv_gem_set_context_param(int fd,int context,uint32_t param,uint64_t value)388 anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
389 {
390 struct drm_i915_gem_context_param p = {
391 .ctx_id = context,
392 .param = param,
393 .value = value,
394 };
395 int err = 0;
396
397 if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
398 err = -errno;
399 return err;
400 }
401
402 int
anv_gem_get_context_param(int fd,int context,uint32_t param,uint64_t * value)403 anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
404 {
405 struct drm_i915_gem_context_param gp = {
406 .ctx_id = context,
407 .param = param,
408 };
409
410 int ret = gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
411 if (ret == -1)
412 return -1;
413
414 *value = gp.value;
415 return 0;
416 }
417
418 int
anv_gem_gpu_get_reset_stats(struct anv_device * device,uint32_t * active,uint32_t * pending)419 anv_gem_gpu_get_reset_stats(struct anv_device *device,
420 uint32_t *active, uint32_t *pending)
421 {
422 struct drm_i915_reset_stats stats = {
423 .ctx_id = device->context_id,
424 };
425
426 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
427 if (ret == 0) {
428 *active = stats.batch_active;
429 *pending = stats.batch_pending;
430 }
431
432 return ret;
433 }
434
435 int
anv_gem_handle_to_fd(struct anv_device * device,uint32_t gem_handle)436 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
437 {
438 struct drm_prime_handle args = {
439 .handle = gem_handle,
440 .flags = DRM_CLOEXEC | DRM_RDWR,
441 };
442
443 int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
444 if (ret == -1)
445 return -1;
446
447 return args.fd;
448 }
449
450 uint32_t
anv_gem_fd_to_handle(struct anv_device * device,int fd)451 anv_gem_fd_to_handle(struct anv_device *device, int fd)
452 {
453 struct drm_prime_handle args = {
454 .fd = fd,
455 };
456
457 int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
458 if (ret == -1)
459 return 0;
460
461 return args.handle;
462 }
463
464 int
anv_gem_reg_read(int fd,uint32_t offset,uint64_t * result)465 anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
466 {
467 struct drm_i915_reg_read args = {
468 .offset = offset
469 };
470
471 int ret = gen_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
472
473 *result = args.val;
474 return ret;
475 }
476
477 int
anv_gem_sync_file_merge(struct anv_device * device,int fd1,int fd2)478 anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
479 {
480 struct sync_merge_data args = {
481 .name = "anv merge fence",
482 .fd2 = fd2,
483 .fence = -1,
484 };
485
486 int ret = gen_ioctl(fd1, SYNC_IOC_MERGE, &args);
487 if (ret == -1)
488 return -1;
489
490 return args.fence;
491 }
492
493 uint32_t
anv_gem_syncobj_create(struct anv_device * device,uint32_t flags)494 anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
495 {
496 struct drm_syncobj_create args = {
497 .flags = flags,
498 };
499
500 int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
501 if (ret)
502 return 0;
503
504 return args.handle;
505 }
506
507 void
anv_gem_syncobj_destroy(struct anv_device * device,uint32_t handle)508 anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
509 {
510 struct drm_syncobj_destroy args = {
511 .handle = handle,
512 };
513
514 gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
515 }
516
517 int
anv_gem_syncobj_handle_to_fd(struct anv_device * device,uint32_t handle)518 anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
519 {
520 struct drm_syncobj_handle args = {
521 .handle = handle,
522 };
523
524 int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
525 if (ret)
526 return -1;
527
528 return args.fd;
529 }
530
531 uint32_t
anv_gem_syncobj_fd_to_handle(struct anv_device * device,int fd)532 anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
533 {
534 struct drm_syncobj_handle args = {
535 .fd = fd,
536 };
537
538 int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
539 if (ret)
540 return 0;
541
542 return args.handle;
543 }
544
545 int
anv_gem_syncobj_export_sync_file(struct anv_device * device,uint32_t handle)546 anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
547 {
548 struct drm_syncobj_handle args = {
549 .handle = handle,
550 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
551 };
552
553 int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
554 if (ret)
555 return -1;
556
557 return args.fd;
558 }
559
560 int
anv_gem_syncobj_import_sync_file(struct anv_device * device,uint32_t handle,int fd)561 anv_gem_syncobj_import_sync_file(struct anv_device *device,
562 uint32_t handle, int fd)
563 {
564 struct drm_syncobj_handle args = {
565 .handle = handle,
566 .fd = fd,
567 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
568 };
569
570 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
571 }
572
573 void
anv_gem_syncobj_reset(struct anv_device * device,uint32_t handle)574 anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
575 {
576 struct drm_syncobj_array args = {
577 .handles = (uint64_t)(uintptr_t)&handle,
578 .count_handles = 1,
579 };
580
581 gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
582 }
583
584 bool
anv_gem_supports_syncobj_wait(int fd)585 anv_gem_supports_syncobj_wait(int fd)
586 {
587 return gen_gem_supports_syncobj_wait(fd);
588 }
589
590 int
anv_gem_syncobj_wait(struct anv_device * device,const uint32_t * handles,uint32_t num_handles,int64_t abs_timeout_ns,bool wait_all)591 anv_gem_syncobj_wait(struct anv_device *device,
592 const uint32_t *handles, uint32_t num_handles,
593 int64_t abs_timeout_ns, bool wait_all)
594 {
595 struct drm_syncobj_wait args = {
596 .handles = (uint64_t)(uintptr_t)handles,
597 .count_handles = num_handles,
598 .timeout_nsec = abs_timeout_ns,
599 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
600 };
601
602 if (wait_all)
603 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
604
605 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
606 }
607
608 int
anv_gem_syncobj_timeline_wait(struct anv_device * device,const uint32_t * handles,const uint64_t * points,uint32_t num_items,int64_t abs_timeout_ns,bool wait_all,bool wait_materialize)609 anv_gem_syncobj_timeline_wait(struct anv_device *device,
610 const uint32_t *handles, const uint64_t *points,
611 uint32_t num_items, int64_t abs_timeout_ns,
612 bool wait_all, bool wait_materialize)
613 {
614 assert(device->physical->has_syncobj_wait_available);
615
616 struct drm_syncobj_timeline_wait args = {
617 .handles = (uint64_t)(uintptr_t)handles,
618 .points = (uint64_t)(uintptr_t)points,
619 .count_handles = num_items,
620 .timeout_nsec = abs_timeout_ns,
621 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
622 };
623
624 if (wait_all)
625 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
626 if (wait_materialize)
627 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;
628
629 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
630 }
631
632 int
anv_gem_syncobj_timeline_signal(struct anv_device * device,const uint32_t * handles,const uint64_t * points,uint32_t num_items)633 anv_gem_syncobj_timeline_signal(struct anv_device *device,
634 const uint32_t *handles, const uint64_t *points,
635 uint32_t num_items)
636 {
637 assert(device->physical->has_syncobj_wait_available);
638
639 struct drm_syncobj_timeline_array args = {
640 .handles = (uint64_t)(uintptr_t)handles,
641 .points = (uint64_t)(uintptr_t)points,
642 .count_handles = num_items,
643 };
644
645 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
646 }
647
648 int
anv_gem_syncobj_timeline_query(struct anv_device * device,const uint32_t * handles,uint64_t * points,uint32_t num_items)649 anv_gem_syncobj_timeline_query(struct anv_device *device,
650 const uint32_t *handles, uint64_t *points,
651 uint32_t num_items)
652 {
653 assert(device->physical->has_syncobj_wait_available);
654
655 struct drm_syncobj_timeline_array args = {
656 .handles = (uint64_t)(uintptr_t)handles,
657 .points = (uint64_t)(uintptr_t)points,
658 .count_handles = num_items,
659 };
660
661 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
662 }
663