1 /*
2 * Copyright © 2007, 2011, 2013, 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29 #ifdef HAVE_LIBGEN_H
30 #include <libgen.h>
31 #endif
32 #include <stdio.h>
33 #include <fcntl.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <signal.h>
39 #include <pciaccess.h>
40 #include <getopt.h>
41 #include <stdlib.h>
42 #include <unistd.h>
43 #include <sys/wait.h>
44 #include <sys/types.h>
45 #include <sys/syscall.h>
46 #include <sys/utsname.h>
47 #include <termios.h>
48 #include <errno.h>
49
50 #include "drmtest.h"
51 #include "i915_drm.h"
52 #include "intel_batchbuffer.h"
53 #include "intel_chipset.h"
54 #include "intel_io.h"
55 #include "igt_debugfs.h"
56 #include "igt_sysfs.h"
57 #include "config.h"
58
59 #ifdef HAVE_VALGRIND
60 #include <valgrind/valgrind.h>
61 #include <valgrind/memcheck.h>
62
63 #define VG(x) x
64 #else
65 #define VG(x) do {} while (0)
66 #endif
67
68 #include "ioctl_wrappers.h"
69
70 /**
71 * SECTION:ioctl_wrappers
72 * @short_description: ioctl wrappers and related functions
73 * @title: ioctl wrappers
74 * @include: igt.h
75 *
76 * This helper library contains simple functions to wrap the raw drm/i915 kernel
77 * ioctls. The normal versions never pass any error codes to the caller and use
78 * igt_assert() to check for error conditions instead. For some ioctls raw
79 * wrappers which do pass on error codes are available. These raw wrappers have
80 * a __ prefix.
81 *
82 * For wrappers which check for feature bits there can also be two versions: The
83 * normal one simply returns a boolean to the caller. But when skipping the
84 * testcase entirely is the right action then it's better to use igt_skip()
85 * directly in the wrapper. Such functions have _require_ in their name to
86 * distinguish them.
87 */
88
89 int (*igt_ioctl)(int fd, unsigned long request, void *arg) = drmIoctl;
90
91
92 /**
93 * gem_handle_to_libdrm_bo:
94 * @bufmgr: libdrm buffer manager instance
95 * @fd: open i915 drm file descriptor
96 * @name: buffer name in libdrm
97 * @handle: gem buffer object handle
98 *
99 * This helper function imports a raw gem buffer handle into the libdrm buffer
100 * manager.
101 *
102 * Returns: The imported libdrm buffer manager object.
103 */
104 drm_intel_bo *
gem_handle_to_libdrm_bo(drm_intel_bufmgr * bufmgr,int fd,const char * name,uint32_t handle)105 gem_handle_to_libdrm_bo(drm_intel_bufmgr *bufmgr, int fd, const char *name, uint32_t handle)
106 {
107 struct drm_gem_flink flink;
108 int ret;
109 drm_intel_bo *bo;
110
111 memset(&flink, 0, sizeof(handle));
112 flink.handle = handle;
113 ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
114 igt_assert(ret == 0);
115 errno = 0;
116
117 bo = drm_intel_bo_gem_create_from_name(bufmgr, name, flink.name);
118 igt_assert(bo);
119
120 return bo;
121 }
122
123 static int
__gem_get_tiling(int fd,struct drm_i915_gem_get_tiling * arg)124 __gem_get_tiling(int fd, struct drm_i915_gem_get_tiling *arg)
125 {
126 int err;
127
128 err = 0;
129 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, arg))
130 err = -errno;
131 errno = 0;
132
133 return err;
134 }
135
136 /**
137 * gem_get_tiling:
138 * @fd: open i915 drm file descriptor
139 * @handle: gem buffer object handle
140 * @tiling: (out) tiling mode of the gem buffer
141 * @swizzle: (out) bit 6 swizzle mode
142 *
143 * This wraps the GET_TILING ioctl.
144 *
145 * Returns whether the actual physical tiling matches the reported tiling.
146 */
147 bool
gem_get_tiling(int fd,uint32_t handle,uint32_t * tiling,uint32_t * swizzle)148 gem_get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle)
149 {
150 struct drm_i915_gem_get_tiling get_tiling;
151
152 memset(&get_tiling, 0, sizeof(get_tiling));
153 get_tiling.handle = handle;
154
155 igt_assert_eq(__gem_get_tiling(fd, &get_tiling), 0);
156
157 *tiling = get_tiling.tiling_mode;
158 *swizzle = get_tiling.swizzle_mode;
159
160 return get_tiling.phys_swizzle_mode == get_tiling.swizzle_mode;
161 }
162
__gem_set_tiling(int fd,uint32_t handle,uint32_t tiling,uint32_t stride)163 int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
164 {
165 struct drm_i915_gem_set_tiling st;
166 int ret;
167
168 /* The kernel doesn't know about these tiling modes, expects NONE */
169 if (tiling == I915_TILING_Yf || tiling == I915_TILING_Ys)
170 tiling = I915_TILING_NONE;
171
172 memset(&st, 0, sizeof(st));
173 do {
174 st.handle = handle;
175 st.tiling_mode = tiling;
176 st.stride = tiling ? stride : 0;
177
178 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &st);
179 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
180 if (ret != 0)
181 return -errno;
182
183 errno = 0;
184 igt_assert(st.tiling_mode == tiling);
185 return 0;
186 }
187
188 /**
189 * gem_set_tiling:
190 * @fd: open i915 drm file descriptor
191 * @handle: gem buffer object handle
192 * @tiling: tiling mode bits
193 * @stride: stride of the buffer when using a tiled mode, otherwise must be 0
194 *
195 * This wraps the SET_TILING ioctl.
196 */
gem_set_tiling(int fd,uint32_t handle,uint32_t tiling,uint32_t stride)197 void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
198 {
199 igt_assert(__gem_set_tiling(fd, handle, tiling, stride) == 0);
200 }
201
__gem_set_caching(int fd,uint32_t handle,uint32_t caching)202 int __gem_set_caching(int fd, uint32_t handle, uint32_t caching)
203 {
204 struct drm_i915_gem_caching arg;
205 int err;
206
207 memset(&arg, 0, sizeof(arg));
208 arg.handle = handle;
209 arg.caching = caching;
210
211 err = 0;
212 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg))
213 err = -errno;
214
215 errno = 0;
216 return err;
217 }
218
219 /**
220 * gem_set_caching:
221 * @fd: open i915 drm file descriptor
222 * @handle: gem buffer object handle
223 * @caching: caching mode bits
224 *
225 * This wraps the SET_CACHING ioctl. Note that this function internally calls
226 * igt_require() when SET_CACHING isn't available, hence automatically skips the
227 * test. Therefore always extract test logic which uses this into its own
228 * subtest.
229 */
gem_set_caching(int fd,uint32_t handle,uint32_t caching)230 void gem_set_caching(int fd, uint32_t handle, uint32_t caching)
231 {
232 igt_require(__gem_set_caching(fd, handle, caching) == 0);
233 }
234
235 /**
236 * gem_get_caching:
237 * @fd: open i915 drm file descriptor
238 * @handle: gem buffer object handle
239 *
240 * This wraps the GET_CACHING ioctl.
241 *
242 * Returns: The current caching mode bits.
243 */
gem_get_caching(int fd,uint32_t handle)244 uint32_t gem_get_caching(int fd, uint32_t handle)
245 {
246 struct drm_i915_gem_caching arg;
247 int ret;
248
249 memset(&arg, 0, sizeof(arg));
250 arg.handle = handle;
251 ret = ioctl(fd, DRM_IOCTL_I915_GEM_GET_CACHING, &arg);
252 igt_assert(ret == 0);
253 errno = 0;
254
255 return arg.caching;
256 }
257
258 /**
259 * gem_open:
260 * @fd: open i915 drm file descriptor
261 * @name: flink buffer name
262 *
263 * This wraps the GEM_OPEN ioctl, which is used to import an flink name.
264 *
265 * Returns: gem file-private buffer handle of the open object.
266 */
gem_open(int fd,uint32_t name)267 uint32_t gem_open(int fd, uint32_t name)
268 {
269 struct drm_gem_open open_struct;
270 int ret;
271
272 memset(&open_struct, 0, sizeof(open_struct));
273 open_struct.name = name;
274 ret = ioctl(fd, DRM_IOCTL_GEM_OPEN, &open_struct);
275 igt_assert(ret == 0);
276 igt_assert(open_struct.handle != 0);
277 errno = 0;
278
279 return open_struct.handle;
280 }
281
282 /**
283 * gem_flink:
284 * @fd: open i915 drm file descriptor
285 * @handle: file-private gem buffer object handle
286 *
287 * This wraps the GEM_FLINK ioctl, which is used to export a gem buffer object
288 * into the device-global flink namespace. See gem_open() for opening such a
289 * buffer name on a different i915 drm file descriptor.
290 *
291 * Returns: The created flink buffer name.
292 */
gem_flink(int fd,uint32_t handle)293 uint32_t gem_flink(int fd, uint32_t handle)
294 {
295 struct drm_gem_flink flink;
296 int ret;
297
298 memset(&flink, 0, sizeof(flink));
299 flink.handle = handle;
300 ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
301 igt_assert(ret == 0);
302 errno = 0;
303
304 return flink.name;
305 }
306
307 /**
308 * gem_close:
309 * @fd: open i915 drm file descriptor
310 * @handle: gem buffer object handle
311 *
312 * This wraps the GEM_CLOSE ioctl, which to release a file-private gem buffer
313 * handle.
314 */
gem_close(int fd,uint32_t handle)315 void gem_close(int fd, uint32_t handle)
316 {
317 struct drm_gem_close close_bo;
318
319 igt_assert_neq(handle, 0);
320
321 memset(&close_bo, 0, sizeof(close_bo));
322 close_bo.handle = handle;
323 do_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
324 }
325
__gem_write(int fd,uint32_t handle,uint64_t offset,const void * buf,uint64_t length)326 int __gem_write(int fd, uint32_t handle, uint64_t offset, const void *buf, uint64_t length)
327 {
328 struct drm_i915_gem_pwrite gem_pwrite;
329 int err;
330
331 memset(&gem_pwrite, 0, sizeof(gem_pwrite));
332 gem_pwrite.handle = handle;
333 gem_pwrite.offset = offset;
334 gem_pwrite.size = length;
335 gem_pwrite.data_ptr = to_user_pointer(buf);
336
337 err = 0;
338 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite))
339 err = -errno;
340 return err;
341 }
342
343 /**
344 * gem_write:
345 * @fd: open i915 drm file descriptor
346 * @handle: gem buffer object handle
347 * @offset: offset within the buffer of the subrange
348 * @buf: pointer to the data to write into the buffer
349 * @length: size of the subrange
350 *
351 * This wraps the PWRITE ioctl, which is to upload a linear data to a subrange
352 * of a gem buffer object.
353 */
gem_write(int fd,uint32_t handle,uint64_t offset,const void * buf,uint64_t length)354 void gem_write(int fd, uint32_t handle, uint64_t offset, const void *buf, uint64_t length)
355 {
356 igt_assert_eq(__gem_write(fd, handle, offset, buf, length), 0);
357 }
358
__gem_read(int fd,uint32_t handle,uint64_t offset,void * buf,uint64_t length)359 static int __gem_read(int fd, uint32_t handle, uint64_t offset, void *buf, uint64_t length)
360 {
361 struct drm_i915_gem_pread gem_pread;
362 int err;
363
364 memset(&gem_pread, 0, sizeof(gem_pread));
365 gem_pread.handle = handle;
366 gem_pread.offset = offset;
367 gem_pread.size = length;
368 gem_pread.data_ptr = to_user_pointer(buf);
369
370 err = 0;
371 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_PREAD, &gem_pread))
372 err = -errno;
373 return err;
374 }
375 /**
376 * gem_read:
377 * @fd: open i915 drm file descriptor
378 * @handle: gem buffer object handle
379 * @offset: offset within the buffer of the subrange
380 * @buf: pointer to the data to read into
381 * @length: size of the subrange
382 *
383 * This wraps the PREAD ioctl, which is to download a linear data to a subrange
384 * of a gem buffer object.
385 */
gem_read(int fd,uint32_t handle,uint64_t offset,void * buf,uint64_t length)386 void gem_read(int fd, uint32_t handle, uint64_t offset, void *buf, uint64_t length)
387 {
388 igt_assert_eq(__gem_read(fd, handle, offset, buf, length), 0);
389 }
390
__gem_set_domain(int fd,uint32_t handle,uint32_t read,uint32_t write)391 int __gem_set_domain(int fd, uint32_t handle, uint32_t read, uint32_t write)
392 {
393 struct drm_i915_gem_set_domain set_domain;
394 int err;
395
396 memset(&set_domain, 0, sizeof(set_domain));
397 set_domain.handle = handle;
398 set_domain.read_domains = read;
399 set_domain.write_domain = write;
400
401 err = 0;
402 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain))
403 err = -errno;
404
405 return err;
406 }
407
408 /**
409 * gem_set_domain:
410 * @fd: open i915 drm file descriptor
411 * @handle: gem buffer object handle
412 * @read: gem domain bits for read access
413 * @write: gem domain bit for write access
414 *
415 * This wraps the SET_DOMAIN ioctl, which is used to control the coherency of
416 * the gem buffer object between the cpu and gtt mappings. It is also use to
417 * synchronize with outstanding rendering in general, but for that use-case
418 * please have a look at gem_sync().
419 */
gem_set_domain(int fd,uint32_t handle,uint32_t read,uint32_t write)420 void gem_set_domain(int fd, uint32_t handle, uint32_t read, uint32_t write)
421 {
422 igt_assert_eq(__gem_set_domain(fd, handle, read, write), 0);
423 }
424
425 /**
426 * __gem_wait:
427 * @fd: open i915 drm file descriptor
428 * @handle: gem buffer object handle
429 * @timeout_ns: [in] time to wait, [out] remaining time (in nanoseconds)
430 *
431 * This functions waits for outstanding rendering to complete, upto
432 * the timeout_ns. If no timeout_ns is provided, the wait is indefinite and
433 * only returns upon an error or when the rendering is complete.
434 */
gem_wait(int fd,uint32_t handle,int64_t * timeout_ns)435 int gem_wait(int fd, uint32_t handle, int64_t *timeout_ns)
436 {
437 struct drm_i915_gem_wait wait;
438 int ret;
439
440 memset(&wait, 0, sizeof(wait));
441 wait.bo_handle = handle;
442 wait.timeout_ns = timeout_ns ? *timeout_ns : -1;
443 wait.flags = 0;
444
445 ret = 0;
446 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait))
447 ret = -errno;
448
449 if (timeout_ns)
450 *timeout_ns = wait.timeout_ns;
451
452 return ret;
453 }
454
455 /**
456 * gem_sync:
457 * @fd: open i915 drm file descriptor
458 * @handle: gem buffer object handle
459 *
460 * This functions waits for outstanding rendering to complete.
461 */
gem_sync(int fd,uint32_t handle)462 void gem_sync(int fd, uint32_t handle)
463 {
464 if (gem_wait(fd, handle, NULL))
465 gem_set_domain(fd, handle,
466 I915_GEM_DOMAIN_GTT,
467 I915_GEM_DOMAIN_GTT);
468 errno = 0;
469 }
470
471
gem_create__has_stolen_support(int fd)472 bool gem_create__has_stolen_support(int fd)
473 {
474 static int has_stolen_support = -1;
475 struct drm_i915_getparam gp;
476 int val = -1;
477
478 if (has_stolen_support < 0) {
479 memset(&gp, 0, sizeof(gp));
480 gp.param = 38; /* CREATE_VERSION */
481 gp.value = &val;
482
483 /* Do we have the extended gem_create_ioctl? */
484 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
485 has_stolen_support = val >= 2;
486 }
487
488 return has_stolen_support;
489 }
490
491 struct local_i915_gem_create_v2 {
492 uint64_t size;
493 uint32_t handle;
494 uint32_t pad;
495 #define I915_CREATE_PLACEMENT_STOLEN (1<<0)
496 uint32_t flags;
497 };
498
499 #define LOCAL_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct local_i915_gem_create_v2)
__gem_create_stolen(int fd,uint64_t size)500 uint32_t __gem_create_stolen(int fd, uint64_t size)
501 {
502 struct local_i915_gem_create_v2 create;
503 int ret;
504
505 memset(&create, 0, sizeof(create));
506 create.handle = 0;
507 create.size = size;
508 create.flags = I915_CREATE_PLACEMENT_STOLEN;
509 ret = igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CREATE, &create);
510
511 if (ret < 0)
512 return 0;
513
514 errno = 0;
515 return create.handle;
516 }
517
518 /**
519 * gem_create_stolen:
520 * @fd: open i915 drm file descriptor
521 * @size: desired size of the buffer
522 *
523 * This wraps the new GEM_CREATE ioctl, which allocates a new gem buffer
524 * object of @size and placement in stolen memory region.
525 *
526 * Returns: The file-private handle of the created buffer object
527 */
528
gem_create_stolen(int fd,uint64_t size)529 uint32_t gem_create_stolen(int fd, uint64_t size)
530 {
531 struct local_i915_gem_create_v2 create;
532
533 memset(&create, 0, sizeof(create));
534 create.handle = 0;
535 create.size = size;
536 create.flags = I915_CREATE_PLACEMENT_STOLEN;
537 do_ioctl(fd, LOCAL_IOCTL_I915_GEM_CREATE, &create);
538 igt_assert(create.handle);
539
540 return create.handle;
541 }
542
__gem_create(int fd,uint64_t size,uint32_t * handle)543 int __gem_create(int fd, uint64_t size, uint32_t *handle)
544 {
545 struct drm_i915_gem_create create = {
546 .size = size,
547 };
548 int err = 0;
549
550 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create) == 0) {
551 *handle = create.handle;
552 } else {
553 err = -errno;
554 igt_assume(err != 0);
555 }
556
557 errno = 0;
558 return err;
559 }
560
561 /**
562 * gem_create:
563 * @fd: open i915 drm file descriptor
564 * @size: desired size of the buffer
565 *
566 * This wraps the GEM_CREATE ioctl, which allocates a new gem buffer object of
567 * @size.
568 *
569 * Returns: The file-private handle of the created buffer object
570 */
gem_create(int fd,uint64_t size)571 uint32_t gem_create(int fd, uint64_t size)
572 {
573 uint32_t handle;
574
575 igt_assert_eq(__gem_create(fd, size, &handle), 0);
576
577 return handle;
578 }
579
580 /**
581 * __gem_execbuf:
582 * @fd: open i915 drm file descriptor
583 * @execbuf: execbuffer data structure
584 *
585 * This wraps the EXECBUFFER2 ioctl, which submits a batchbuffer for the gpu to
586 * run. This is allowed to fail, with -errno returned.
587 */
__gem_execbuf(int fd,struct drm_i915_gem_execbuffer2 * execbuf)588 int __gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
589 {
590 int err = 0;
591 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf)) {
592 err = -errno;
593 igt_assume(err != 0);
594 }
595 errno = 0;
596 return err;
597 }
598
599 /**
600 * gem_execbuf:
601 * @fd: open i915 drm file descriptor
602 * @execbuf: execbuffer data structure
603 *
604 * This wraps the EXECBUFFER2 ioctl, which submits a batchbuffer for the gpu to
605 * run.
606 */
gem_execbuf(int fd,struct drm_i915_gem_execbuffer2 * execbuf)607 void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
608 {
609 igt_assert_eq(__gem_execbuf(fd, execbuf), 0);
610 }
611
612 /**
613 * __gem_execbuf_wr:
614 * @fd: open i915 drm file descriptor
615 * @execbuf: execbuffer data structure
616 *
617 * This wraps the EXECBUFFER2_WR ioctl, which submits a batchbuffer for the gpu to
618 * run. This is allowed to fail, with -errno returned.
619 */
__gem_execbuf_wr(int fd,struct drm_i915_gem_execbuffer2 * execbuf)620 int __gem_execbuf_wr(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
621 {
622 int err = 0;
623 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf)) {
624 err = -errno;
625 igt_assume(err != 0);
626 }
627 errno = 0;
628 return err;
629 }
630
631 /**
632 * gem_execbuf_wr:
633 * @fd: open i915 drm file descriptor
634 * @execbuf: execbuffer data structure
635 *
636 * This wraps the EXECBUFFER2_WR ioctl, which submits a batchbuffer for the gpu to
637 * run.
638 */
gem_execbuf_wr(int fd,struct drm_i915_gem_execbuffer2 * execbuf)639 void gem_execbuf_wr(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
640 {
641 igt_assert_eq(__gem_execbuf_wr(fd, execbuf), 0);
642 }
643
644 /**
645 * gem_madvise:
646 * @fd: open i915 drm file descriptor
647 * @handle: gem buffer object handle
648 * @state: desired madvise state
649 *
650 * This wraps the MADVISE ioctl, which is used in libdrm to implement
651 * opportunistic buffer object caching. Objects in the cache are set to DONTNEED
652 * (internally in the kernel tracked as purgeable objects). When such a cached
653 * object is in need again it must be set back to WILLNEED before first use.
654 *
655 * Returns: When setting the madvise state to WILLNEED this returns whether the
656 * backing storage was still available or not.
657 */
gem_madvise(int fd,uint32_t handle,int state)658 int gem_madvise(int fd, uint32_t handle, int state)
659 {
660 struct drm_i915_gem_madvise madv;
661
662 memset(&madv, 0, sizeof(madv));
663 madv.handle = handle;
664 madv.madv = state;
665 madv.retained = 1;
666 do_ioctl(fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
667
668 return madv.retained;
669 }
670
__gem_userptr(int fd,void * ptr,uint64_t size,int read_only,uint32_t flags,uint32_t * handle)671 int __gem_userptr(int fd, void *ptr, uint64_t size, int read_only, uint32_t flags, uint32_t *handle)
672 {
673 struct drm_i915_gem_userptr userptr;
674
675 memset(&userptr, 0, sizeof(userptr));
676 userptr.user_ptr = to_user_pointer(ptr);
677 userptr.user_size = size;
678 userptr.flags = flags;
679 if (read_only)
680 userptr.flags |= I915_USERPTR_READ_ONLY;
681
682 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr))
683 return -errno;
684
685 *handle = userptr.handle;
686 return 0;
687 }
688
689 /**
690 * gem_userptr:
691 * @fd: open i915 drm file descriptor
692 * @ptr: userptr pointer to be passed
693 * @size: desired size of the buffer
694 * @read_only: specify whether userptr is opened read only
695 * @flags: other userptr flags
696 * @handle: returned handle for the object
697 *
698 * Returns userptr handle for the GEM object.
699 */
gem_userptr(int fd,void * ptr,uint64_t size,int read_only,uint32_t flags,uint32_t * handle)700 void gem_userptr(int fd, void *ptr, uint64_t size, int read_only, uint32_t flags, uint32_t *handle)
701 {
702 igt_assert_eq(__gem_userptr(fd, ptr, size, read_only, flags, handle), 0);
703 }
704
705 /**
706 * gem_sw_finish:
707 * @fd: open i915 drm file descriptor
708 * @handle: gem buffer object handle
709 *
710 * This wraps the SW_FINISH ioctl, which is used to flush out frontbuffer
711 * rendering done through the direct cpu memory mappings. Shipping userspace
712 * does _not_ call this after frontbuffer rendering through gtt memory mappings.
713 */
gem_sw_finish(int fd,uint32_t handle)714 void gem_sw_finish(int fd, uint32_t handle)
715 {
716 struct drm_i915_gem_sw_finish finish;
717
718 memset(&finish, 0, sizeof(finish));
719 finish.handle = handle;
720
721 do_ioctl(fd, DRM_IOCTL_I915_GEM_SW_FINISH, &finish);
722 }
723
724 /**
725 * gem_bo_busy:
726 * @fd: open i915 drm file descriptor
727 * @handle: gem buffer object handle
728 *
729 * This wraps the BUSY ioctl, which tells whether a buffer object is still
730 * actively used by the gpu in a execbuffer.
731 *
732 * Returns: The busy state of the buffer object.
733 */
gem_bo_busy(int fd,uint32_t handle)734 bool gem_bo_busy(int fd, uint32_t handle)
735 {
736 struct drm_i915_gem_busy busy;
737
738 memset(&busy, 0, sizeof(busy));
739 busy.handle = handle;
740
741 do_ioctl(fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
742
743 return !!busy.busy;
744 }
745
746
747 /* feature test helpers */
748
749 /**
750 * gem_gtt_type:
751 * @fd: open i915 drm file descriptor
752 *
753 * Feature test macro to check what type of gtt is being used by the kernel:
754 * 0 - global gtt
755 * 1 - aliasing ppgtt
756 * 2 - full ppgtt
757 *
758 * Returns: Type of gtt being used.
759 */
gem_gtt_type(int fd)760 static int gem_gtt_type(int fd)
761 {
762 struct drm_i915_getparam gp;
763 int val = 0;
764
765 memset(&gp, 0, sizeof(gp));
766 gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
767 gp.value = &val;
768
769 if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp)))
770 return 0;
771
772 errno = 0;
773 return val;
774 }
775
776 /**
777 * gem_uses_ppgtt:
778 * @fd: open i915 drm file descriptor
779 *
780 * Feature test macro to check whether the kernel internally uses ppgtt to
781 * execute batches. Note that this is also true when we're using full ppgtt.
782 *
783 * Returns: Whether batches are run through ppgtt.
784 */
gem_uses_ppgtt(int fd)785 bool gem_uses_ppgtt(int fd)
786 {
787 return gem_gtt_type(fd) > 0;
788 }
789
790 /**
791 * gem_uses_full_ppgtt:
792 * @fd: open i915 drm file descriptor
793 *
794 * Feature test macro to check whether the kernel internally uses full
795 * per-process gtt to execute batches. Note that this is also true when we're
796 * using full 64b ppgtt.
797 *
798 * Returns: Whether batches are run through full ppgtt.
799 */
gem_uses_full_ppgtt(int fd)800 bool gem_uses_full_ppgtt(int fd)
801 {
802 return gem_gtt_type(fd) > 1;
803 }
804
805 /**
806 * gem_gpu_reset_type:
807 * @fd: open i915 drm file descriptor
808 *
809 * Query whether reset-engine (2), global-reset (1) or reset-disable (0)
810 * is available.
811 *
812 * Returns: GPU reset type available
813 */
gem_gpu_reset_type(int fd)814 int gem_gpu_reset_type(int fd)
815 {
816 struct drm_i915_getparam gp;
817 int gpu_reset_type = -1;
818
819 memset(&gp, 0, sizeof(gp));
820 gp.param = I915_PARAM_HAS_GPU_RESET;
821 gp.value = &gpu_reset_type;
822 drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
823
824 return gpu_reset_type;
825 }
826
827 /**
828 * gem_gpu_reset_enabled:
829 * @fd: open i915 drm file descriptor
830 *
831 * Feature test macro to check whether the kernel internally uses hangchecks
832 * and can reset the GPU upon hang detection. Note that this is also true when
833 * reset-engine (the lightweight, single engine reset) is available.
834 *
835 * Returns: Whether the driver will detect hangs and perform a reset.
836 */
gem_gpu_reset_enabled(int fd)837 bool gem_gpu_reset_enabled(int fd)
838 {
839 return gem_gpu_reset_type(fd) > 0;
840 }
841
842 /**
843 * gem_engine_reset_enabled:
844 * @fd: open i915 drm file descriptor
845 *
846 * Feature test macro to check whether the kernel internally uses hangchecks
847 * and can reset individual engines upon hang detection.
848 *
849 * Returns: Whether the driver will detect hangs and perform an engine reset.
850 */
gem_engine_reset_enabled(int fd)851 bool gem_engine_reset_enabled(int fd)
852 {
853 return gem_gpu_reset_type(fd) > 1;
854 }
855
856 /**
857 * gem_available_fences:
858 * @fd: open i915 drm file descriptor
859 *
860 * Feature test macro to query the kernel for the number of available fences
861 * usable in a batchbuffer. Only relevant for pre-gen4.
862 *
863 * Returns: The number of available fences.
864 */
gem_available_fences(int fd)865 int gem_available_fences(int fd)
866 {
867 static int num_fences = -1;
868
869 if (num_fences < 0) {
870 struct drm_i915_getparam gp;
871
872 memset(&gp, 0, sizeof(gp));
873 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
874 gp.value = &num_fences;
875
876 num_fences = 0;
877 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
878 errno = 0;
879 }
880
881 return num_fences;
882 }
883
gem_has_llc(int fd)884 bool gem_has_llc(int fd)
885 {
886 static int has_llc = -1;
887
888 if (has_llc < 0) {
889 struct drm_i915_getparam gp;
890
891 memset(&gp, 0, sizeof(gp));
892 gp.param = I915_PARAM_HAS_LLC;
893 gp.value = &has_llc;
894
895 has_llc = 0;
896 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
897 errno = 0;
898 }
899
900 return has_llc;
901 }
902
has_param(int fd,int param)903 static bool has_param(int fd, int param)
904 {
905 drm_i915_getparam_t gp;
906 int tmp = 0;
907
908 memset(&gp, 0, sizeof(gp));
909 gp.value = &tmp;
910 gp.param = param;
911
912 if (igt_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
913 return false;
914
915 errno = 0;
916 return tmp > 0;
917 }
918
919 /**
920 * gem_has_bsd:
921 * @fd: open i915 drm file descriptor
922 *
923 * Feature test macro to query whether the BSD ring is available.
924 *
925 * Note that recent Bspec calls this the VCS ring for Video Command Submission.
926 *
927 * Returns: Whether the BSD ring is available or not.
928 */
gem_has_bsd(int fd)929 bool gem_has_bsd(int fd)
930 {
931 static int has_bsd = -1;
932 if (has_bsd < 0)
933 has_bsd = has_param(fd, I915_PARAM_HAS_BSD);
934 return has_bsd;
935 }
936
937 /**
938 * gem_has_blt:
939 * @fd: open i915 drm file descriptor
940 *
941 * Feature test macro to query whether the blitter ring is available.
942 *
943 * Note that recent Bspec calls this the BCS ring for Blitter Command Submission.
944 *
945 * Returns: Whether the blitter ring is available or not.
946 */
gem_has_blt(int fd)947 bool gem_has_blt(int fd)
948 {
949 static int has_blt = -1;
950 if (has_blt < 0)
951 has_blt = has_param(fd, I915_PARAM_HAS_BLT);
952 return has_blt;
953 }
954
955 /**
956 * gem_has_vebox:
957 * @fd: open i915 drm file descriptor
958 *
959 * Feature test macro to query whether the vebox ring is available.
960 *
961 * Note that recent Bspec calls this the VECS ring for Video Enhancement Command
962 * Submission.
963 *
964 * Returns: Whether the vebox ring is available or not.
965 */
gem_has_vebox(int fd)966 bool gem_has_vebox(int fd)
967 {
968 static int has_vebox = -1;
969 if (has_vebox < 0)
970 has_vebox = has_param(fd, I915_PARAM_HAS_VEBOX);
971 return has_vebox;
972 }
973
974 #define I915_PARAM_HAS_BSD2 31
975 /**
976 * gem_has_bsd2:
977 * @fd: open i915 drm file descriptor
978 *
979 * Feature test macro to query whether the BSD2 ring is available.
980 *
981 * Note that recent Bspec calls this the VCS ring for Video Command Submission.
982 *
983 * Returns: Whether the BSD ring is avaible or not.
984 */
gem_has_bsd2(int fd)985 bool gem_has_bsd2(int fd)
986 {
987 static int has_bsd2 = -1;
988 if (has_bsd2 < 0)
989 has_bsd2 = has_param(fd, I915_PARAM_HAS_BSD2);
990 return has_bsd2;
991 }
992
993 struct local_i915_gem_get_aperture {
994 __u64 aper_size;
995 __u64 aper_available_size;
996 __u64 version;
997 __u64 map_total_size;
998 __u64 stolen_total_size;
999 };
1000 #define DRM_I915_GEM_GET_APERTURE 0x23
1001 #define LOCAL_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct local_i915_gem_get_aperture)
1002 /**
1003 * gem_total_mappable_size:
1004 * @fd: open i915 drm file descriptor
1005 *
1006 * Feature test macro to query the kernel for the total mappable size.
1007 *
1008 * Returns: Total mappable address space size.
1009 */
gem_total_mappable_size(int fd)1010 uint64_t gem_total_mappable_size(int fd)
1011 {
1012 struct local_i915_gem_get_aperture aperture;
1013
1014 memset(&aperture, 0, sizeof(aperture));
1015 do_ioctl(fd, LOCAL_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1016
1017 return aperture.map_total_size;
1018 }
1019
1020 /**
1021 * gem_total_stolen_size:
1022 * @fd: open i915 drm file descriptor
1023 *
1024 * Feature test macro to query the kernel for the total stolen size.
1025 *
1026 * Returns: Total stolen memory.
1027 */
gem_total_stolen_size(int fd)1028 uint64_t gem_total_stolen_size(int fd)
1029 {
1030 struct local_i915_gem_get_aperture aperture;
1031
1032 memset(&aperture, 0, sizeof(aperture));
1033 do_ioctl(fd, LOCAL_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1034
1035 return aperture.stolen_total_size;
1036 }
1037
1038 /**
1039 * gem_available_aperture_size:
1040 * @fd: open i915 drm file descriptor
1041 *
1042 * Feature test macro to query the kernel for the available gpu aperture size
1043 * usable in a batchbuffer.
1044 *
1045 * Returns: The available gtt address space size.
1046 */
gem_available_aperture_size(int fd)1047 uint64_t gem_available_aperture_size(int fd)
1048 {
1049 struct drm_i915_gem_get_aperture aperture;
1050
1051 memset(&aperture, 0, sizeof(aperture));
1052 aperture.aper_size = 256*1024*1024;
1053 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1054
1055 return aperture.aper_available_size;
1056 }
1057
1058 /**
1059 * gem_aperture_size:
1060 * @fd: open i915 drm file descriptor
1061 *
1062 * Feature test macro to query the kernel for the total gpu aperture size.
1063 *
1064 * Returns: The total gtt address space size.
1065 */
gem_aperture_size(int fd)1066 uint64_t gem_aperture_size(int fd)
1067 {
1068 static uint64_t aperture_size = 0;
1069
1070 if (aperture_size == 0) {
1071 struct drm_i915_gem_context_param p;
1072
1073 memset(&p, 0, sizeof(p));
1074 p.param = 0x3;
1075 if (__gem_context_get_param(fd, &p) == 0) {
1076 aperture_size = p.value;
1077 } else {
1078 struct drm_i915_gem_get_aperture aperture;
1079
1080 memset(&aperture, 0, sizeof(aperture));
1081 aperture.aper_size = 256*1024*1024;
1082
1083 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1084 aperture_size = aperture.aper_size;
1085 }
1086 }
1087
1088 return aperture_size;
1089 }
1090
1091 /**
1092 * gem_mappable_aperture_size:
1093 *
1094 * Feature test macro to query the kernel for the mappable gpu aperture size.
1095 * This is the area available for GTT memory mappings.
1096 *
1097 * Returns: The mappable gtt address space size.
1098 */
gem_mappable_aperture_size(void)1099 uint64_t gem_mappable_aperture_size(void)
1100 {
1101 #if defined(USE_INTEL)
1102 struct pci_device *pci_dev = intel_get_pci_device();
1103 int bar;
1104
1105 if (intel_gen(pci_dev->device_id) < 3)
1106 bar = 0;
1107 else
1108 bar = 2;
1109
1110 return pci_dev->regions[bar].size;
1111 #else
1112 return 0;
1113 #endif
1114 }
1115
1116 /**
1117 * gem_global_aperture_size:
1118 * @fd: open i915 drm file descriptor
1119 *
1120 * Feature test macro to query the kernel for the global gpu aperture size.
1121 * This is the area available for the kernel to perform address translations.
1122 *
1123 * Returns: The mappable gtt address space size.
1124 */
gem_global_aperture_size(int fd)1125 uint64_t gem_global_aperture_size(int fd)
1126 {
1127 struct drm_i915_gem_get_aperture aperture;
1128
1129 memset(&aperture, 0, sizeof(aperture));
1130 aperture.aper_size = 256*1024*1024;
1131 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1132
1133 return aperture.aper_size;
1134 }
1135
1136 /**
1137 * gem_has_softpin:
1138 * @fd: open i915 drm file descriptor
1139 *
1140 * Feature test macro to query whether the softpinning functionality is
1141 * supported.
1142 *
1143 * Returns: Whether softpin support is available
1144 */
gem_has_softpin(int fd)1145 bool gem_has_softpin(int fd)
1146 {
1147 static int has_softpin = -1;
1148
1149 if (has_softpin < 0) {
1150 struct drm_i915_getparam gp;
1151
1152 memset(&gp, 0, sizeof(gp));
1153 gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
1154 gp.value = &has_softpin;
1155
1156 has_softpin = 0;
1157 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1158 errno = 0;
1159 }
1160
1161 return has_softpin;
1162 }
1163
1164 /**
1165 * gem_has_exec_fence:
1166 * @fd: open i915 drm file descriptor
1167 *
1168 * Feature test macro to query whether in/out fence support in execbuffer is
1169 * available.
1170 *
1171 * Returns: Whether fence support is available
1172 */
gem_has_exec_fence(int fd)1173 bool gem_has_exec_fence(int fd)
1174 {
1175 static int has_exec_fence = -1;
1176
1177 if (has_exec_fence < 0) {
1178 struct drm_i915_getparam gp;
1179
1180 memset(&gp, 0, sizeof(gp));
1181 gp.param = I915_PARAM_HAS_EXEC_FENCE;
1182 gp.value = &has_exec_fence;
1183
1184 has_exec_fence = 0;
1185 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1186 errno = 0;
1187 }
1188
1189 return has_exec_fence;
1190 }
1191
1192 /**
1193 * gem_require_caching:
1194 * @fd: open i915 drm file descriptor
1195 *
1196 * Feature test macro to query whether buffer object caching control is
1197 * available. Automatically skips through igt_require() if not.
1198 */
gem_require_caching(int fd)1199 void gem_require_caching(int fd)
1200 {
1201 uint32_t handle;
1202
1203 handle = gem_create(fd, 4096);
1204 gem_set_caching(fd, handle, 0);
1205 gem_close(fd, handle);
1206
1207 errno = 0;
1208 }
1209
reset_device(int fd)1210 static void reset_device(int fd)
1211 {
1212 int dir;
1213
1214 dir = igt_debugfs_dir(fd);
1215 igt_require(dir >= 0);
1216
1217 if (ioctl(fd, DRM_IOCTL_I915_GEM_THROTTLE)) {
1218 igt_info("Found wedged device, trying to reset and continue\n");
1219 igt_sysfs_set(dir, "i915_wedged", "-1");
1220 }
1221 igt_sysfs_set(dir, "i915_next_seqno", "1");
1222
1223 close(dir);
1224 }
1225
igt_require_gem(int fd)1226 void igt_require_gem(int fd)
1227 {
1228 char path[256];
1229 int err;
1230
1231 igt_require_intel(fd);
1232
1233 /*
1234 * We only want to use the throttle-ioctl for its -EIO reporting
1235 * of a wedged device, not for actually waiting on outstanding
1236 * requests! So create a new drm_file for the device that is clean.
1237 */
1238 snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
1239 fd = open(path, O_RDWR);
1240 igt_assert_lte(0, fd);
1241
1242 /*
1243 * Reset the global seqno at the start of each test. This ensures that
1244 * the test will not wrap unless it explicitly sets up seqno wrapping
1245 * itself, which avoids accidentally hanging when setting up long
1246 * sequences of batches.
1247 */
1248 reset_device(fd);
1249
1250 err = 0;
1251 if (ioctl(fd, DRM_IOCTL_I915_GEM_THROTTLE))
1252 err = -errno;
1253
1254 close(fd);
1255
1256 igt_require_f(err == 0, "Unresponsive i915/GEM device\n");
1257 }
1258
1259 /**
1260 * gem_require_ring:
1261 * @fd: open i915 drm file descriptor
1262 * @ring: ring flag bit as used in gem_execbuf()
1263 *
1264 * Feature test macro to query whether a specific ring is available.
1265 * This automagically skips if the ring isn't available by
1266 * calling igt_require().
1267 */
gem_require_ring(int fd,unsigned ring)1268 void gem_require_ring(int fd, unsigned ring)
1269 {
1270 igt_require(gem_has_ring(fd, ring));
1271 }
1272
1273 /**
1274 * gem_has_mocs_registers:
1275 * @fd: open i915 drm file descriptor
1276 *
1277 * Feature test macro to query whether the device has MOCS registers.
1278 * These exist gen 9+.
1279 */
gem_has_mocs_registers(int fd)1280 bool gem_has_mocs_registers(int fd)
1281 {
1282 return intel_gen(intel_get_drm_devid(fd)) >= 9;
1283 }
1284
1285 /**
1286 * gem_require_mocs_registers:
1287 * @fd: open i915 drm file descriptor
1288 *
1289 * Feature test macro to query whether the device has MOCS registers.
1290 * These exist gen 9+.
1291 */
gem_require_mocs_registers(int fd)1292 void gem_require_mocs_registers(int fd)
1293 {
1294 igt_require(gem_has_mocs_registers(fd));
1295 }
1296
1297 /* prime */
1298
1299 /**
1300 * prime_handle_to_fd:
1301 * @fd: open i915 drm file descriptor
1302 * @handle: file-private gem buffer object handle
1303 *
1304 * This wraps the PRIME_HANDLE_TO_FD ioctl, which is used to export a gem buffer
1305 * object into a global (i.e. potentially cross-device) dma-buf file-descriptor
1306 * handle.
1307 *
1308 * Returns: The created dma-buf fd handle.
1309 */
prime_handle_to_fd(int fd,uint32_t handle)1310 int prime_handle_to_fd(int fd, uint32_t handle)
1311 {
1312 struct drm_prime_handle args;
1313
1314 memset(&args, 0, sizeof(args));
1315 args.handle = handle;
1316 args.flags = DRM_CLOEXEC;
1317 args.fd = -1;
1318
1319 do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
1320
1321 return args.fd;
1322 }
1323
1324 /**
1325 * prime_handle_to_fd_for_mmap:
1326 * @fd: open i915 drm file descriptor
1327 * @handle: file-private gem buffer object handle
1328 *
1329 * Same as prime_handle_to_fd above but with DRM_RDWR capabilities, which can
1330 * be useful for writing into the mmap'ed dma-buf file-descriptor.
1331 *
1332 * Returns: The created dma-buf fd handle or -1 if the ioctl fails.
1333 */
prime_handle_to_fd_for_mmap(int fd,uint32_t handle)1334 int prime_handle_to_fd_for_mmap(int fd, uint32_t handle)
1335 {
1336 struct drm_prime_handle args;
1337
1338 memset(&args, 0, sizeof(args));
1339 args.handle = handle;
1340 args.flags = DRM_CLOEXEC | DRM_RDWR;
1341 args.fd = -1;
1342
1343 if (igt_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args) != 0)
1344 return -1;
1345
1346 return args.fd;
1347 }
1348
1349 /**
1350 * prime_fd_to_handle:
1351 * @fd: open i915 drm file descriptor
1352 * @dma_buf_fd: dma-buf fd handle
1353 *
1354 * This wraps the PRIME_FD_TO_HANDLE ioctl, which is used to import a dma-buf
1355 * file-descriptor into a gem buffer object.
1356 *
1357 * Returns: The created gem buffer object handle.
1358 */
prime_fd_to_handle(int fd,int dma_buf_fd)1359 uint32_t prime_fd_to_handle(int fd, int dma_buf_fd)
1360 {
1361 struct drm_prime_handle args;
1362
1363 memset(&args, 0, sizeof(args));
1364 args.fd = dma_buf_fd;
1365 args.flags = 0;
1366 args.handle = 0;
1367
1368 do_ioctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
1369
1370 return args.handle;
1371 }
1372
1373 /**
1374 * prime_get_size:
1375 * @dma_buf_fd: dma-buf fd handle
1376 *
1377 * This wraps the lseek() protocol used to query the invariant size of a
1378 * dma-buf. Not all kernels support this, which is check with igt_require() and
1379 * so will result in automagic test skipping.
1380 *
1381 * Returns: The lifetime-invariant size of the dma-buf object.
1382 */
prime_get_size(int dma_buf_fd)1383 off_t prime_get_size(int dma_buf_fd)
1384 {
1385 off_t ret;
1386
1387 ret = lseek(dma_buf_fd, 0, SEEK_END);
1388 igt_assert(ret >= 0 || errno == ESPIPE);
1389 igt_require(ret >= 0);
1390 errno = 0;
1391
1392 return ret;
1393 }
1394
1395 /**
1396 * prime_sync_start
1397 * @dma_buf_fd: dma-buf fd handle
1398 * @write: read/write or read-only access
1399 *
1400 * Must be called before starting CPU mmap access to a dma-buf.
1401 */
prime_sync_start(int dma_buf_fd,bool write)1402 void prime_sync_start(int dma_buf_fd, bool write)
1403 {
1404 struct local_dma_buf_sync sync_start;
1405
1406 memset(&sync_start, 0, sizeof(sync_start));
1407 sync_start.flags = LOCAL_DMA_BUF_SYNC_START;
1408 sync_start.flags |= LOCAL_DMA_BUF_SYNC_READ;
1409 if (write)
1410 sync_start.flags |= LOCAL_DMA_BUF_SYNC_WRITE;
1411 do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_start);
1412 }
1413
1414 /**
1415 * prime_sync_end
1416 * @dma_buf_fd: dma-buf fd handle
1417 * @write: read/write or read-only access
1418 *
1419 * Must be called after finishing CPU mmap access to a dma-buf.
1420 */
prime_sync_end(int dma_buf_fd,bool write)1421 void prime_sync_end(int dma_buf_fd, bool write)
1422 {
1423 struct local_dma_buf_sync sync_end;
1424
1425 memset(&sync_end, 0, sizeof(sync_end));
1426 sync_end.flags = LOCAL_DMA_BUF_SYNC_END;
1427 sync_end.flags |= LOCAL_DMA_BUF_SYNC_READ;
1428 if (write)
1429 sync_end.flags |= LOCAL_DMA_BUF_SYNC_WRITE;
1430 do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_end);
1431 }
1432
igt_has_fb_modifiers(int fd)1433 bool igt_has_fb_modifiers(int fd)
1434 {
1435 static bool has_modifiers, cap_modifiers_tested;
1436
1437 if (!cap_modifiers_tested) {
1438 uint64_t cap_modifiers;
1439 int ret;
1440
1441 ret = drmGetCap(fd, DRM_CAP_ADDFB2_MODIFIERS, &cap_modifiers);
1442 igt_assert(ret == 0 || errno == EINVAL || errno == EOPNOTSUPP);
1443 has_modifiers = ret == 0 && cap_modifiers == 1;
1444 cap_modifiers_tested = true;
1445 }
1446
1447 return has_modifiers;
1448 }
1449
1450 /**
1451 * igt_require_fb_modifiers:
1452 * @fd: Open DRM file descriptor.
1453 *
1454 * Requires presence of DRM_CAP_ADDFB2_MODIFIERS.
1455 */
igt_require_fb_modifiers(int fd)1456 void igt_require_fb_modifiers(int fd)
1457 {
1458 igt_require(igt_has_fb_modifiers(fd));
1459 }
1460
__kms_addfb(int fd,uint32_t handle,uint32_t width,uint32_t height,uint32_t pixel_format,uint64_t modifier,uint32_t strides[4],uint32_t offsets[4],int num_planes,uint32_t flags,uint32_t * buf_id)1461 int __kms_addfb(int fd, uint32_t handle,
1462 uint32_t width, uint32_t height,
1463 uint32_t pixel_format, uint64_t modifier,
1464 uint32_t strides[4], uint32_t offsets[4],
1465 int num_planes, uint32_t flags, uint32_t *buf_id)
1466 {
1467 struct drm_mode_fb_cmd2 f;
1468 int ret, i;
1469
1470 if (flags & DRM_MODE_FB_MODIFIERS)
1471 igt_require_fb_modifiers(fd);
1472
1473 memset(&f, 0, sizeof(f));
1474
1475 f.width = width;
1476 f.height = height;
1477 f.pixel_format = pixel_format;
1478 f.flags = flags;
1479
1480 for (i = 0; i < num_planes; i++) {
1481 f.handles[i] = handle;
1482 f.modifier[i] = modifier;
1483 f.pitches[i] = strides[i];
1484 f.offsets[i] = offsets[i];
1485 }
1486
1487 ret = igt_ioctl(fd, DRM_IOCTL_MODE_ADDFB2, &f);
1488
1489 *buf_id = f.fb_id;
1490
1491 return ret < 0 ? -errno : ret;
1492 }
1493