1 /*
2 * Copyright © 2009,2012,2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 * Daniel Vetter <daniel.vetter@ffwll.ch>
27 *
28 */
29
30 /** @file gem_concurrent.c
31 *
32 * This is a test of pread/pwrite/mmap behavior when writing to active
33 * buffers.
34 *
35 * Based on gem_gtt_concurrent_blt.
36 */
37
38 #include "igt.h"
39 #include "igt_vgem.h"
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <string.h>
43 #include <fcntl.h>
44 #include <inttypes.h>
45 #include <errno.h>
46 #include <sys/resource.h>
47 #include <sys/stat.h>
48 #include <sys/time.h>
49 #include <sys/wait.h>
50
51 #include <drm.h>
52
53 #include "intel_bufmgr.h"
54
55 IGT_TEST_DESCRIPTION("Test of pread/pwrite/mmap behavior when writing to active"
56 " buffers.");
57
58 int fd, devid, gen;
59 int vgem_drv = -1;
60 int all;
61 int pass;
62
63 struct create {
64 const char *name;
65 void (*require)(const struct create *, unsigned);
66 drm_intel_bo *(*create)(drm_intel_bufmgr *, uint64_t size);
67 };
68
69 struct size {
70 const char *name;
71 int width, height;
72 };
73
74 struct buffers {
75 const char *name;
76 const struct create *create;
77 const struct access_mode *mode;
78 const struct size *size;
79 drm_intel_bufmgr *bufmgr;
80 struct intel_batchbuffer *batch;
81 drm_intel_bo **src, **dst;
82 drm_intel_bo *snoop, *spare;
83 uint32_t *tmp;
84 int width, height, npixels, page_size;
85 int count, num_buffers;
86 };
87
88 #define MIN_BUFFERS 3
89
90 static void blt_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src);
91
92 static void
nop_release_bo(drm_intel_bo * bo)93 nop_release_bo(drm_intel_bo *bo)
94 {
95 drm_intel_bo_unreference(bo);
96 }
97
98 static void
prw_set_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)99 prw_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
100 {
101 for (int i = 0; i < b->npixels; i++)
102 b->tmp[i] = val;
103 drm_intel_bo_subdata(bo, 0, 4*b->npixels, b->tmp);
104 }
105
106 static void
prw_cmp_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)107 prw_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
108 {
109 uint32_t *vaddr;
110
111 vaddr = b->tmp;
112 do_or_die(drm_intel_bo_get_subdata(bo, 0, 4*b->npixels, vaddr));
113 for (int i = 0; i < b->npixels; i++)
114 igt_assert_eq_u32(vaddr[i], val);
115 }
116
117 #define pixel(y, width) ((y)*(width) + (((y) + pass)%(width)))
118
119 static void
partial_set_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)120 partial_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
121 {
122 for (int y = 0; y < b->height; y++)
123 do_or_die(drm_intel_bo_subdata(bo, 4*pixel(y, b->width), 4, &val));
124 }
125
126 static void
partial_cmp_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)127 partial_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
128 {
129 for (int y = 0; y < b->height; y++) {
130 uint32_t buf;
131 do_or_die(drm_intel_bo_get_subdata(bo, 4*pixel(y, b->width), 4, &buf));
132 igt_assert_eq_u32(buf, val);
133 }
134 }
135
136 static drm_intel_bo *
create_normal_bo(drm_intel_bufmgr * bufmgr,uint64_t size)137 create_normal_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
138 {
139 drm_intel_bo *bo;
140
141 bo = drm_intel_bo_alloc(bufmgr, "bo", size, 0);
142 igt_assert(bo);
143
144 return bo;
145 }
146
can_create_normal(const struct create * create,unsigned count)147 static void can_create_normal(const struct create *create, unsigned count)
148 {
149 }
150
151 #if HAVE_CREATE_PRIVATE
152 static drm_intel_bo *
create_private_bo(drm_intel_bufmgr * bufmgr,uint64_t size)153 create_private_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
154 {
155 drm_intel_bo *bo;
156 uint32_t handle;
157
158 /* XXX gem_create_with_flags(fd, size, I915_CREATE_PRIVATE); */
159
160 handle = gem_create(fd, size);
161 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "stolen", handle);
162 gem_close(fd, handle);
163
164 return bo;
165 }
166
can_create_private(const struct create * create,unsigned count)167 static void can_create_private(const struct create *create, unsigned count)
168 {
169 igt_require(0);
170 }
171 #endif
172
173 #if HAVE_CREATE_STOLEN
174 static drm_intel_bo *
create_stolen_bo(drm_intel_bufmgr * bufmgr,uint64_t size)175 create_stolen_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
176 {
177 drm_intel_bo *bo;
178 uint32_t handle;
179
180 /* XXX gem_create_with_flags(fd, size, I915_CREATE_STOLEN); */
181
182 handle = gem_create(fd, size);
183 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "stolen", handle);
184 gem_close(fd, handle);
185
186 return bo;
187 }
188
can_create_stolen(const struct create * create,unsigned count)189 static void can_create_stolen(const struct create *create, unsigned count)
190 {
191 /* XXX check num_buffers against available stolen */
192 igt_require(0);
193 }
194 #endif
195
create_cpu_require(const struct create * create,unsigned count)196 static void create_cpu_require(const struct create *create, unsigned count)
197 {
198 #if HAVE_CREATE_STOLEN
199 igt_require(create->create != create_stolen_bo);
200 #endif
201 }
202
203 static drm_intel_bo *
unmapped_create_bo(const struct buffers * b)204 unmapped_create_bo(const struct buffers *b)
205 {
206 return b->create->create(b->bufmgr, 4*b->npixels);
207 }
208
create_snoop_require(const struct create * create,unsigned count)209 static void create_snoop_require(const struct create *create, unsigned count)
210 {
211 create_cpu_require(create, count);
212 igt_require(!gem_has_llc(fd));
213 }
214
215 static drm_intel_bo *
snoop_create_bo(const struct buffers * b)216 snoop_create_bo(const struct buffers *b)
217 {
218 drm_intel_bo *bo;
219
220 bo = unmapped_create_bo(b);
221 gem_set_caching(fd, bo->handle, I915_CACHING_CACHED);
222 drm_intel_bo_disable_reuse(bo);
223
224 return bo;
225 }
226
create_userptr_require(const struct create * create,unsigned count)227 static void create_userptr_require(const struct create *create, unsigned count)
228 {
229 static int has_userptr = -1;
230 if (has_userptr < 0) {
231 struct drm_i915_gem_userptr arg;
232
233 has_userptr = 0;
234
235 memset(&arg, 0, sizeof(arg));
236 arg.user_ptr = -4096ULL;
237 arg.user_size = 8192;
238 errno = 0;
239 drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &arg);
240 if (errno == EFAULT) {
241 igt_assert(posix_memalign((void **)&arg.user_ptr,
242 4096, arg.user_size) == 0);
243 has_userptr = drmIoctl(fd,
244 LOCAL_IOCTL_I915_GEM_USERPTR,
245 &arg) == 0;
246 free(from_user_pointer(arg.user_ptr));
247 }
248
249 }
250 igt_require(has_userptr);
251 }
252
253 static drm_intel_bo *
userptr_create_bo(const struct buffers * b)254 userptr_create_bo(const struct buffers *b)
255 {
256 struct local_i915_gem_userptr userptr;
257 drm_intel_bo *bo;
258 void *ptr;
259
260 memset(&userptr, 0, sizeof(userptr));
261 userptr.user_size = b->page_size;
262
263 ptr = mmap(NULL, userptr.user_size,
264 PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0);
265 igt_assert(ptr != (void *)-1);
266 userptr.user_ptr = to_user_pointer(ptr);
267
268 #if 0
269 do_or_die(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr));
270 bo = gem_handle_to_libdrm_bo(b->bufmgr, fd, "userptr", userptr.handle);
271 gem_close(fd, userptr.handle);
272 #else
273 bo = drm_intel_bo_alloc_userptr(b->bufmgr, "name",
274 ptr, I915_TILING_NONE, 0,
275 userptr.user_size, 0);
276 igt_assert(bo);
277 #endif
278 bo->virtual = from_user_pointer(userptr.user_ptr);
279
280 return bo;
281 }
282
283 static void
userptr_set_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)284 userptr_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
285 {
286 int size = b->npixels;
287 uint32_t *vaddr = bo->virtual;
288
289 gem_set_domain(fd, bo->handle,
290 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
291 while (size--)
292 *vaddr++ = val;
293 }
294
295 static void
userptr_cmp_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)296 userptr_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
297 {
298 int size = b->npixels;
299 uint32_t *vaddr = bo->virtual;
300
301 gem_set_domain(fd, bo->handle,
302 I915_GEM_DOMAIN_CPU, 0);
303 while (size--)
304 igt_assert_eq_u32(*vaddr++, val);
305 }
306
307 static void
userptr_release_bo(drm_intel_bo * bo)308 userptr_release_bo(drm_intel_bo *bo)
309 {
310 igt_assert(bo->virtual);
311
312 munmap(bo->virtual, bo->size);
313 bo->virtual = NULL;
314
315 drm_intel_bo_unreference(bo);
316 }
317
create_dmabuf_require(const struct create * create,unsigned count)318 static void create_dmabuf_require(const struct create *create, unsigned count)
319 {
320 static int has_dmabuf = -1;
321 if (has_dmabuf < 0) {
322 struct drm_prime_handle args;
323 void *ptr;
324
325 memset(&args, 0, sizeof(args));
326 args.handle = gem_create(fd, 4096);
327 args.flags = DRM_RDWR;
328 args.fd = -1;
329
330 drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
331 gem_close(fd, args.handle);
332
333 has_dmabuf = 0;
334 ptr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, args.fd, 0);
335 if (ptr != MAP_FAILED) {
336 has_dmabuf = 1;
337 munmap(ptr, 4096);
338 }
339
340 close(args.fd);
341 }
342 igt_require(has_dmabuf);
343 intel_require_files(2*count);
344 }
345
346 struct dmabuf {
347 int fd;
348 void *map;
349 };
350
351 static drm_intel_bo *
dmabuf_create_bo(const struct buffers * b)352 dmabuf_create_bo(const struct buffers *b)
353 {
354 struct drm_prime_handle args;
355 drm_intel_bo *bo;
356 struct dmabuf *dmabuf;
357 int size;
358
359 size = b->page_size;
360
361 memset(&args, 0, sizeof(args));
362 args.handle = gem_create(fd, size);
363 args.flags = DRM_RDWR;
364 args.fd = -1;
365
366 do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
367 gem_close(fd, args.handle);
368
369 bo = drm_intel_bo_gem_create_from_prime(b->bufmgr, args.fd, size);
370 igt_assert(bo);
371
372 dmabuf = malloc(sizeof(*dmabuf));
373 igt_assert(dmabuf);
374
375 dmabuf->fd = args.fd;
376 dmabuf->map = mmap(NULL, size,
377 PROT_READ | PROT_WRITE, MAP_SHARED,
378 dmabuf->fd, 0);
379 igt_assert(dmabuf->map != (void *)-1);
380
381 bo->virtual = dmabuf;
382
383 return bo;
384 }
385
386 static void
dmabuf_set_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)387 dmabuf_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
388 {
389 struct dmabuf *dmabuf = bo->virtual;
390 uint32_t *v = dmabuf->map;
391 int y;
392
393 prime_sync_start(dmabuf->fd, true);
394 for (y = 0; y < b->height; y++)
395 v[pixel(y, b->width)] = val;
396 prime_sync_end(dmabuf->fd, true);
397 }
398
399 static void
dmabuf_cmp_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)400 dmabuf_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
401 {
402 struct dmabuf *dmabuf = bo->virtual;
403 uint32_t *v = dmabuf->map;
404 int y;
405
406 prime_sync_start(dmabuf->fd, false);
407 for (y = 0; y < b->height; y++)
408 igt_assert_eq_u32(v[pixel(y, b->width)], val);
409 prime_sync_end(dmabuf->fd, false);
410 }
411
412 static void
dmabuf_release_bo(drm_intel_bo * bo)413 dmabuf_release_bo(drm_intel_bo *bo)
414 {
415 struct dmabuf *dmabuf = bo->virtual;
416 igt_assert(dmabuf);
417
418 munmap(dmabuf->map, bo->size);
419 close(dmabuf->fd);
420 free(dmabuf);
421
422 bo->virtual = NULL;
423 drm_intel_bo_unreference(bo);
424 }
425
has_prime_export(int _fd)426 static bool has_prime_export(int _fd)
427 {
428 uint64_t value;
429
430 if (drmGetCap(_fd, DRM_CAP_PRIME, &value))
431 return false;
432
433 return value & DRM_PRIME_CAP_EXPORT;
434 }
435
create_vgem_require(const struct create * create,unsigned count)436 static void create_vgem_require(const struct create *create, unsigned count)
437 {
438 igt_require(vgem_drv != -1);
439 igt_require(has_prime_export(vgem_drv));
440 create_dmabuf_require(create, count);
441 }
442
443 static drm_intel_bo *
vgem_create_bo(const struct buffers * b)444 vgem_create_bo(const struct buffers *b)
445 {
446 struct drm_prime_handle args;
447 drm_intel_bo *bo;
448 struct vgem_bo vgem;
449 struct dmabuf *dmabuf;
450
451 igt_assert(vgem_drv != -1);
452
453 vgem.width = b->width;
454 vgem.height = b->height;
455 vgem.bpp = 32;
456 vgem_create(vgem_drv, &vgem);
457
458 memset(&args, 0, sizeof(args));
459 args.handle = vgem.handle;
460 args.flags = DRM_RDWR;
461 args.fd = -1;
462
463 do_ioctl(vgem_drv, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
464 gem_close(vgem_drv, args.handle);
465 igt_assert(args.fd != -1);
466
467 bo = drm_intel_bo_gem_create_from_prime(b->bufmgr, args.fd, vgem.size);
468 igt_assert(bo);
469
470 dmabuf = malloc(sizeof(*dmabuf));
471 igt_assert(dmabuf);
472
473 dmabuf->fd = args.fd;
474 dmabuf->map = mmap(NULL, vgem.size,
475 PROT_READ | PROT_WRITE, MAP_SHARED,
476 dmabuf->fd, 0);
477 igt_assert(dmabuf->map != (void *)-1);
478
479 bo->virtual = dmabuf;
480
481 return bo;
482 }
483
484 static void
gtt_set_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)485 gtt_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
486 {
487 uint32_t *vaddr = bo->virtual;
488
489 drm_intel_gem_bo_start_gtt_access(bo, true);
490 for (int y = 0; y < b->height; y++)
491 vaddr[pixel(y, b->width)] = val;
492 }
493
494 static void
gtt_cmp_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)495 gtt_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
496 {
497 uint32_t *vaddr = bo->virtual;
498
499 /* GTT access is slow. So we just compare a few points */
500 drm_intel_gem_bo_start_gtt_access(bo, false);
501 for (int y = 0; y < b->height; y++)
502 igt_assert_eq_u32(vaddr[pixel(y, b->width)], val);
503 }
504
505 static drm_intel_bo *
map_bo(drm_intel_bo * bo)506 map_bo(drm_intel_bo *bo)
507 {
508 /* gtt map doesn't have a write parameter, so just keep the mapping
509 * around (to avoid the set_domain with the gtt write domain set) and
510 * manually tell the kernel when we start access the gtt. */
511 do_or_die(drm_intel_gem_bo_map_gtt(bo));
512
513 return bo;
514 }
515
516 static drm_intel_bo *
tile_bo(drm_intel_bo * bo,int width)517 tile_bo(drm_intel_bo *bo, int width)
518 {
519 uint32_t tiling = I915_TILING_X;
520 uint32_t stride = width * 4;
521
522 do_or_die(drm_intel_bo_set_tiling(bo, &tiling, stride));
523
524 return bo;
525 }
526
527 static drm_intel_bo *
gtt_create_bo(const struct buffers * b)528 gtt_create_bo(const struct buffers *b)
529 {
530 return map_bo(unmapped_create_bo(b));
531 }
532
533 static drm_intel_bo *
gttX_create_bo(const struct buffers * b)534 gttX_create_bo(const struct buffers *b)
535 {
536 return tile_bo(gtt_create_bo(b), b->width);
537 }
538
bit17_require(void)539 static void bit17_require(void)
540 {
541 static struct drm_i915_gem_get_tiling2 {
542 uint32_t handle;
543 uint32_t tiling_mode;
544 uint32_t swizzle_mode;
545 uint32_t phys_swizzle_mode;
546 } arg;
547 #define DRM_IOCTL_I915_GEM_GET_TILING2 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling2)
548
549 if (arg.handle == 0) {
550 arg.handle = gem_create(fd, 4096);
551 gem_set_tiling(fd, arg.handle, I915_TILING_X, 512);
552
553 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING2, &arg);
554 gem_close(fd, arg.handle);
555 }
556 igt_require(arg.phys_swizzle_mode == arg.swizzle_mode);
557 }
558
wc_require(void)559 static void wc_require(void)
560 {
561 bit17_require();
562 gem_require_mmap_wc(fd);
563 }
564
565 static void
wc_create_require(const struct create * create,unsigned count)566 wc_create_require(const struct create *create, unsigned count)
567 {
568 wc_require();
569 }
570
571 static drm_intel_bo *
wc_create_bo(const struct buffers * b)572 wc_create_bo(const struct buffers *b)
573 {
574 drm_intel_bo *bo;
575
576 bo = unmapped_create_bo(b);
577 bo->virtual = gem_mmap__wc(fd, bo->handle, 0, bo->size, PROT_READ | PROT_WRITE);
578 return bo;
579 }
580
581 static void
wc_release_bo(drm_intel_bo * bo)582 wc_release_bo(drm_intel_bo *bo)
583 {
584 igt_assert(bo->virtual);
585
586 munmap(bo->virtual, bo->size);
587 bo->virtual = NULL;
588
589 nop_release_bo(bo);
590 }
591
592 static drm_intel_bo *
gpu_create_bo(const struct buffers * b)593 gpu_create_bo(const struct buffers *b)
594 {
595 return unmapped_create_bo(b);
596 }
597
598 static drm_intel_bo *
gpuX_create_bo(const struct buffers * b)599 gpuX_create_bo(const struct buffers *b)
600 {
601 return tile_bo(gpu_create_bo(b), b->width);
602 }
603
604 static void
cpu_set_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)605 cpu_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
606 {
607 int size = b->npixels;
608 uint32_t *vaddr;
609
610 do_or_die(drm_intel_bo_map(bo, true));
611 vaddr = bo->virtual;
612 while (size--)
613 *vaddr++ = val;
614 drm_intel_bo_unmap(bo);
615 }
616
617 static void
cpu_cmp_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)618 cpu_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
619 {
620 int size = b->npixels;
621 uint32_t *vaddr;
622
623 do_or_die(drm_intel_bo_map(bo, false));
624 vaddr = bo->virtual;
625 while (size--)
626 igt_assert_eq_u32(*vaddr++, val);
627 drm_intel_bo_unmap(bo);
628 }
629
630 static void
gpu_set_bo(struct buffers * buffers,drm_intel_bo * bo,uint32_t val)631 gpu_set_bo(struct buffers *buffers, drm_intel_bo *bo, uint32_t val)
632 {
633 struct drm_i915_gem_relocation_entry reloc[1];
634 struct drm_i915_gem_exec_object2 gem_exec[2];
635 struct drm_i915_gem_execbuffer2 execbuf;
636 uint32_t buf[10], *b;
637 uint32_t tiling, swizzle;
638
639 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
640
641 memset(reloc, 0, sizeof(reloc));
642 memset(gem_exec, 0, sizeof(gem_exec));
643 memset(&execbuf, 0, sizeof(execbuf));
644
645 b = buf;
646 *b++ = XY_COLOR_BLT_CMD_NOLEN |
647 ((gen >= 8) ? 5 : 4) |
648 COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB;
649 if (gen >= 4 && tiling) {
650 b[-1] |= XY_COLOR_BLT_TILED;
651 *b = buffers->width;
652 } else
653 *b = buffers->width << 2;
654 *b++ |= 0xf0 << 16 | 1 << 25 | 1 << 24;
655 *b++ = 0;
656 *b++ = buffers->height << 16 | buffers->width;
657 reloc[0].offset = (b - buf) * sizeof(uint32_t);
658 reloc[0].target_handle = bo->handle;
659 reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
660 reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
661 *b++ = 0;
662 if (gen >= 8)
663 *b++ = 0;
664 *b++ = val;
665 *b++ = MI_BATCH_BUFFER_END;
666 if ((b - buf) & 1)
667 *b++ = 0;
668
669 gem_exec[0].handle = bo->handle;
670 gem_exec[0].flags = EXEC_OBJECT_NEEDS_FENCE;
671
672 gem_exec[1].handle = gem_create(fd, 4096);
673 gem_exec[1].relocation_count = 1;
674 gem_exec[1].relocs_ptr = to_user_pointer(reloc);
675
676 execbuf.buffers_ptr = to_user_pointer(gem_exec);
677 execbuf.buffer_count = 2;
678 execbuf.batch_len = (b - buf) * sizeof(buf[0]);
679 if (gen >= 6)
680 execbuf.flags = I915_EXEC_BLT;
681
682 gem_write(fd, gem_exec[1].handle, 0, buf, execbuf.batch_len);
683 gem_execbuf(fd, &execbuf);
684
685 gem_close(fd, gem_exec[1].handle);
686 }
687
688 static void
gpu_cmp_bo(struct buffers * b,drm_intel_bo * bo,uint32_t val)689 gpu_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
690 {
691 blt_copy_bo(b, b->snoop, bo);
692 cpu_cmp_bo(b, b->snoop, val);
693 }
694
695 struct access_mode {
696 const char *name;
697 void (*require)(const struct create *, unsigned);
698 drm_intel_bo *(*create_bo)(const struct buffers *b);
699 void (*set_bo)(struct buffers *b, drm_intel_bo *bo, uint32_t val);
700 void (*cmp_bo)(struct buffers *b, drm_intel_bo *bo, uint32_t val);
701 void (*release_bo)(drm_intel_bo *bo);
702 };
703 igt_render_copyfunc_t rendercopy;
704
read_sysctl(const char * path)705 static int read_sysctl(const char *path)
706 {
707 FILE *file = fopen(path, "r");
708 int max = 0;
709 if (file) {
710 if (fscanf(file, "%d", &max) != 1)
711 max = 0; /* silence! */
712 fclose(file);
713 }
714 return max;
715 }
716
write_sysctl(const char * path,int value)717 static int write_sysctl(const char *path, int value)
718 {
719 FILE *file = fopen(path, "w");
720 if (file) {
721 fprintf(file, "%d", value);
722 fclose(file);
723 }
724 return read_sysctl(path);
725 }
726
set_max_map_count(int num_buffers)727 static bool set_max_map_count(int num_buffers)
728 {
729 int max = read_sysctl("/proc/sys/vm/max_map_count");
730 if (max < num_buffers + 1024)
731 max = write_sysctl("/proc/sys/vm/max_map_count",
732 num_buffers + 1024);
733 return max > num_buffers;
734 }
735
buffers_init(struct buffers * b,const char * name,const struct create * create,const struct access_mode * mode,const struct size * size,int num_buffers,int _fd,int enable_reuse)736 static void buffers_init(struct buffers *b,
737 const char *name,
738 const struct create *create,
739 const struct access_mode *mode,
740 const struct size *size,
741 int num_buffers,
742 int _fd, int enable_reuse)
743 {
744 memset(b, 0, sizeof(*b));
745 b->name = name;
746 b->create = create;
747 b->mode = mode;
748 b->size = size;
749 b->num_buffers = num_buffers;
750 b->count = 0;
751
752 b->width = size->width;
753 b->height = size->height;
754 b->npixels = size->width * size->height;
755 b->page_size = 4*b->npixels;
756 b->page_size = (b->page_size + 4095) & -4096;
757 b->tmp = malloc(b->page_size);
758 igt_assert(b->tmp);
759
760 b->bufmgr = drm_intel_bufmgr_gem_init(_fd, 4096);
761 igt_assert(b->bufmgr);
762
763 b->src = malloc(2*sizeof(drm_intel_bo *)*num_buffers);
764 igt_assert(b->src);
765 b->dst = b->src + num_buffers;
766
767 if (enable_reuse)
768 drm_intel_bufmgr_gem_enable_reuse(b->bufmgr);
769 b->batch = intel_batchbuffer_alloc(b->bufmgr, devid);
770 igt_assert(b->batch);
771 }
772
buffers_destroy(struct buffers * b)773 static void buffers_destroy(struct buffers *b)
774 {
775 int count = b->count;
776 if (count == 0)
777 return;
778
779 /* Be safe so that we can clean up a partial creation */
780 b->count = 0;
781 for (int i = 0; i < count; i++) {
782 if (b->src[i]) {
783 b->mode->release_bo(b->src[i]);
784 b->src[i] = NULL;
785 } else
786 break;
787
788 if (b->dst[i]) {
789 b->mode->release_bo(b->dst[i]);
790 b->dst[i] = NULL;
791 }
792 }
793 if (b->snoop) {
794 nop_release_bo(b->snoop);
795 b->snoop = NULL;
796 }
797 if (b->spare) {
798 b->mode->release_bo(b->spare);
799 b->spare = NULL;
800 }
801 }
802
buffers_create(struct buffers * b)803 static void buffers_create(struct buffers *b)
804 {
805 int count = b->num_buffers;
806 igt_assert(b->bufmgr);
807
808 buffers_destroy(b);
809 igt_assert(b->count == 0);
810 b->count = count;
811
812 for (int i = 0; i < count; i++) {
813 b->src[i] = b->mode->create_bo(b);
814 b->dst[i] = b->mode->create_bo(b);
815 }
816 b->spare = b->mode->create_bo(b);
817 b->snoop = snoop_create_bo(b);
818 }
819
buffers_reset(struct buffers * b,bool enable_reuse)820 static void buffers_reset(struct buffers *b, bool enable_reuse)
821 {
822 b->bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
823 igt_assert(b->bufmgr);
824
825 if (enable_reuse)
826 drm_intel_bufmgr_gem_enable_reuse(b->bufmgr);
827 b->batch = intel_batchbuffer_alloc(b->bufmgr, devid);
828 igt_assert(b->batch);
829 }
830
buffers_fini(struct buffers * b)831 static void buffers_fini(struct buffers *b)
832 {
833 if (b->bufmgr == NULL)
834 return;
835
836 buffers_destroy(b);
837
838 free(b->tmp);
839 free(b->src);
840
841 intel_batchbuffer_free(b->batch);
842 drm_intel_bufmgr_destroy(b->bufmgr);
843
844 memset(b, 0, sizeof(*b));
845 }
846
847 typedef void (*do_copy)(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src);
848 typedef igt_hang_t (*do_hang)(void);
849
render_copy_bo(struct buffers * b,drm_intel_bo * dst,drm_intel_bo * src)850 static void render_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
851 {
852 struct igt_buf d = {
853 .bo = dst,
854 .size = b->npixels * 4,
855 .num_tiles = b->npixels * 4,
856 .stride = b->width * 4,
857 .bpp = 32,
858 }, s = {
859 .bo = src,
860 .size = b->npixels * 4,
861 .num_tiles = b->npixels * 4,
862 .stride = b->width * 4,
863 .bpp = 32,
864 };
865 uint32_t swizzle;
866
867 drm_intel_bo_get_tiling(dst, &d.tiling, &swizzle);
868 drm_intel_bo_get_tiling(src, &s.tiling, &swizzle);
869
870 rendercopy(b->batch, NULL,
871 &s, 0, 0,
872 b->width, b->height,
873 &d, 0, 0);
874 }
875
blt_copy_bo(struct buffers * b,drm_intel_bo * dst,drm_intel_bo * src)876 static void blt_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
877 {
878 intel_blt_copy(b->batch,
879 src, 0, 0, 4*b->width,
880 dst, 0, 0, 4*b->width,
881 b->width, b->height, 32);
882 }
883
cpu_copy_bo(struct buffers * b,drm_intel_bo * dst,drm_intel_bo * src)884 static void cpu_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
885 {
886 const int size = b->page_size;
887 void *d, *s;
888
889 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_CPU, 0);
890 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
891 s = gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ);
892 d = gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE);
893
894 memcpy(d, s, size);
895
896 munmap(d, size);
897 munmap(s, size);
898 }
899
gtt_copy_bo(struct buffers * b,drm_intel_bo * dst,drm_intel_bo * src)900 static void gtt_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
901 {
902 const int size = b->page_size;
903 void *d, *s;
904
905 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
906 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
907
908 s = gem_mmap__gtt(fd, src->handle, size, PROT_READ);
909 d = gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE);
910
911 memcpy(d, s, size);
912
913 munmap(d, size);
914 munmap(s, size);
915 }
916
wc_copy_bo(struct buffers * b,drm_intel_bo * dst,drm_intel_bo * src)917 static void wc_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
918 {
919 const int size = b->page_size;
920 void *d, *s;
921
922 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_WC, 0);
923 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
924
925 s = gem_mmap__wc(fd, src->handle, 0, size, PROT_READ);
926 d = gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE);
927
928 memcpy(d, s, size);
929
930 munmap(d, size);
931 munmap(s, size);
932 }
933
no_hang(void)934 static igt_hang_t no_hang(void)
935 {
936 return (igt_hang_t){0, 0};
937 }
938
bcs_hang(void)939 static igt_hang_t bcs_hang(void)
940 {
941 return igt_hang_ring(fd, I915_EXEC_BLT);
942 }
943
rcs_hang(void)944 static igt_hang_t rcs_hang(void)
945 {
946 return igt_hang_ring(fd, I915_EXEC_RENDER);
947 }
948
all_hang(void)949 static igt_hang_t all_hang(void)
950 {
951 igt_hang_t hang = igt_hang_ring(fd, I915_EXEC_RENDER);
952 unsigned engine;
953
954 for_each_physical_engine(fd, engine) {
955 struct drm_i915_gem_execbuffer2 eb = hang.spin->execbuf;
956
957 if (engine == I915_EXEC_RENDER)
958 continue;
959
960 eb.flags = engine;
961 __gem_execbuf(fd, &eb);
962 }
963
964 return hang;
965 }
966
do_basic0(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)967 static void do_basic0(struct buffers *buffers,
968 do_copy do_copy_func,
969 do_hang do_hang_func)
970 {
971 buffers->mode->set_bo(buffers, buffers->src[0], 0xdeadbeef);
972 for (int i = 0; i < buffers->count; i++) {
973 igt_hang_t hang = do_hang_func();
974
975 do_copy_func(buffers, buffers->dst[i], buffers->src[0]);
976 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef);
977
978 igt_post_hang_ring(fd, hang);
979 }
980 }
981
do_basic1(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)982 static void do_basic1(struct buffers *buffers,
983 do_copy do_copy_func,
984 do_hang do_hang_func)
985 {
986 for (int i = 0; i < buffers->count; i++) {
987 igt_hang_t hang = do_hang_func();
988
989 buffers->mode->set_bo(buffers, buffers->src[i], i);
990 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
991
992 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
993 usleep(0); /* let someone else claim the mutex */
994 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
995
996 igt_post_hang_ring(fd, hang);
997 }
998 }
999
do_basicN(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1000 static void do_basicN(struct buffers *buffers,
1001 do_copy do_copy_func,
1002 do_hang do_hang_func)
1003 {
1004 igt_hang_t hang;
1005
1006 for (int i = 0; i < buffers->count; i++) {
1007 buffers->mode->set_bo(buffers, buffers->src[i], i);
1008 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
1009 }
1010
1011 hang = do_hang_func();
1012
1013 for (int i = 0; i < buffers->count; i++) {
1014 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1015 usleep(0); /* let someone else claim the mutex */
1016 }
1017
1018 for (int i = 0; i < buffers->count; i++)
1019 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
1020
1021 igt_post_hang_ring(fd, hang);
1022 }
1023
do_overwrite_source(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1024 static void do_overwrite_source(struct buffers *buffers,
1025 do_copy do_copy_func,
1026 do_hang do_hang_func)
1027 {
1028 igt_hang_t hang;
1029 int i;
1030
1031 for (i = 0; i < buffers->count; i++) {
1032 buffers->mode->set_bo(buffers, buffers->src[i], i);
1033 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
1034 }
1035 for (i = 0; i < buffers->count; i++)
1036 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1037 hang = do_hang_func();
1038 for (i = buffers->count; i--; )
1039 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef);
1040 for (i = 0; i < buffers->count; i++)
1041 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
1042 igt_post_hang_ring(fd, hang);
1043 }
1044
do_overwrite_source_read(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func,int do_rcs)1045 static void do_overwrite_source_read(struct buffers *buffers,
1046 do_copy do_copy_func,
1047 do_hang do_hang_func,
1048 int do_rcs)
1049 {
1050 const int half = buffers->count/2;
1051 igt_hang_t hang;
1052 int i;
1053
1054 for (i = 0; i < half; i++) {
1055 buffers->mode->set_bo(buffers, buffers->src[i], i);
1056 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
1057 buffers->mode->set_bo(buffers, buffers->dst[i+half], ~i);
1058 }
1059 for (i = 0; i < half; i++) {
1060 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1061 if (do_rcs)
1062 render_copy_bo(buffers, buffers->dst[i+half], buffers->src[i]);
1063 else
1064 blt_copy_bo(buffers, buffers->dst[i+half], buffers->src[i]);
1065 }
1066 hang = do_hang_func();
1067 for (i = half; i--; )
1068 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef);
1069 for (i = 0; i < half; i++) {
1070 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
1071 buffers->mode->cmp_bo(buffers, buffers->dst[i+half], i);
1072 }
1073 igt_post_hang_ring(fd, hang);
1074 }
1075
do_overwrite_source_read_bcs(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1076 static void do_overwrite_source_read_bcs(struct buffers *buffers,
1077 do_copy do_copy_func,
1078 do_hang do_hang_func)
1079 {
1080 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 0);
1081 }
1082
do_overwrite_source_read_rcs(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1083 static void do_overwrite_source_read_rcs(struct buffers *buffers,
1084 do_copy do_copy_func,
1085 do_hang do_hang_func)
1086 {
1087 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 1);
1088 }
1089
do_overwrite_source__rev(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1090 static void do_overwrite_source__rev(struct buffers *buffers,
1091 do_copy do_copy_func,
1092 do_hang do_hang_func)
1093 {
1094 igt_hang_t hang;
1095 int i;
1096
1097 for (i = 0; i < buffers->count; i++) {
1098 buffers->mode->set_bo(buffers, buffers->src[i], i);
1099 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
1100 }
1101 for (i = 0; i < buffers->count; i++)
1102 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1103 hang = do_hang_func();
1104 for (i = 0; i < buffers->count; i++)
1105 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef);
1106 for (i = buffers->count; i--; )
1107 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
1108 igt_post_hang_ring(fd, hang);
1109 }
1110
do_overwrite_source__one(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1111 static void do_overwrite_source__one(struct buffers *buffers,
1112 do_copy do_copy_func,
1113 do_hang do_hang_func)
1114 {
1115 igt_hang_t hang;
1116
1117 buffers->mode->set_bo(buffers, buffers->src[0], 0);
1118 buffers->mode->set_bo(buffers, buffers->dst[0], ~0);
1119 do_copy_func(buffers, buffers->dst[0], buffers->src[0]);
1120 hang = do_hang_func();
1121 buffers->mode->set_bo(buffers, buffers->src[0], 0xdeadbeef);
1122 buffers->mode->cmp_bo(buffers, buffers->dst[0], 0);
1123 igt_post_hang_ring(fd, hang);
1124 }
1125
do_intermix(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func,int do_rcs)1126 static void do_intermix(struct buffers *buffers,
1127 do_copy do_copy_func,
1128 do_hang do_hang_func,
1129 int do_rcs)
1130 {
1131 const int half = buffers->count/2;
1132 igt_hang_t hang;
1133 int i;
1134
1135 for (i = 0; i < buffers->count; i++) {
1136 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef^~i);
1137 buffers->mode->set_bo(buffers, buffers->dst[i], i);
1138 }
1139 for (i = 0; i < half; i++) {
1140 if (do_rcs == 1 || (do_rcs == -1 && i & 1))
1141 render_copy_bo(buffers, buffers->dst[i], buffers->src[i]);
1142 else
1143 blt_copy_bo(buffers, buffers->dst[i], buffers->src[i]);
1144
1145 do_copy_func(buffers, buffers->dst[i+half], buffers->src[i]);
1146
1147 if (do_rcs == 1 || (do_rcs == -1 && (i & 1) == 0))
1148 render_copy_bo(buffers, buffers->dst[i], buffers->dst[i+half]);
1149 else
1150 blt_copy_bo(buffers, buffers->dst[i], buffers->dst[i+half]);
1151
1152 do_copy_func(buffers, buffers->dst[i+half], buffers->src[i+half]);
1153 }
1154 hang = do_hang_func();
1155 for (i = 0; i < 2*half; i++)
1156 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef^~i);
1157 igt_post_hang_ring(fd, hang);
1158 }
1159
do_intermix_rcs(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1160 static void do_intermix_rcs(struct buffers *buffers,
1161 do_copy do_copy_func,
1162 do_hang do_hang_func)
1163 {
1164 do_intermix(buffers, do_copy_func, do_hang_func, 1);
1165 }
1166
do_intermix_bcs(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1167 static void do_intermix_bcs(struct buffers *buffers,
1168 do_copy do_copy_func,
1169 do_hang do_hang_func)
1170 {
1171 do_intermix(buffers, do_copy_func, do_hang_func, 0);
1172 }
1173
do_intermix_both(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1174 static void do_intermix_both(struct buffers *buffers,
1175 do_copy do_copy_func,
1176 do_hang do_hang_func)
1177 {
1178 do_intermix(buffers, do_copy_func, do_hang_func, -1);
1179 }
1180
do_early_read(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1181 static void do_early_read(struct buffers *buffers,
1182 do_copy do_copy_func,
1183 do_hang do_hang_func)
1184 {
1185 igt_hang_t hang;
1186 int i;
1187
1188 for (i = buffers->count; i--; )
1189 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef);
1190 for (i = 0; i < buffers->count; i++)
1191 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1192 hang = do_hang_func();
1193 for (i = buffers->count; i--; )
1194 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef);
1195 igt_post_hang_ring(fd, hang);
1196 }
1197
do_read_read_bcs(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1198 static void do_read_read_bcs(struct buffers *buffers,
1199 do_copy do_copy_func,
1200 do_hang do_hang_func)
1201 {
1202 igt_hang_t hang;
1203 int i;
1204
1205 for (i = buffers->count; i--; )
1206 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef ^ i);
1207 for (i = 0; i < buffers->count; i++) {
1208 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1209 blt_copy_bo(buffers, buffers->spare, buffers->src[i]);
1210 }
1211 buffers->mode->cmp_bo(buffers, buffers->spare, 0xdeadbeef^(buffers->count-1));
1212 hang = do_hang_func();
1213 for (i = buffers->count; i--; )
1214 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef ^ i);
1215 igt_post_hang_ring(fd, hang);
1216 }
1217
do_write_read_bcs(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1218 static void do_write_read_bcs(struct buffers *buffers,
1219 do_copy do_copy_func,
1220 do_hang do_hang_func)
1221 {
1222 igt_hang_t hang;
1223 int i;
1224
1225 for (i = buffers->count; i--; )
1226 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef ^ i);
1227 for (i = 0; i < buffers->count; i++) {
1228 blt_copy_bo(buffers, buffers->spare, buffers->src[i]);
1229 do_copy_func(buffers, buffers->dst[i], buffers->spare);
1230 }
1231 hang = do_hang_func();
1232 for (i = buffers->count; i--; )
1233 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef ^ i);
1234 igt_post_hang_ring(fd, hang);
1235 }
1236
do_read_read_rcs(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1237 static void do_read_read_rcs(struct buffers *buffers,
1238 do_copy do_copy_func,
1239 do_hang do_hang_func)
1240 {
1241 igt_hang_t hang;
1242 int i;
1243
1244 for (i = buffers->count; i--; )
1245 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef ^ i);
1246 for (i = 0; i < buffers->count; i++) {
1247 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1248 render_copy_bo(buffers, buffers->spare, buffers->src[i]);
1249 }
1250 buffers->mode->cmp_bo(buffers, buffers->spare, 0xdeadbeef^(buffers->count-1));
1251 hang = do_hang_func();
1252 for (i = buffers->count; i--; )
1253 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef ^ i);
1254 igt_post_hang_ring(fd, hang);
1255 }
1256
do_write_read_rcs(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1257 static void do_write_read_rcs(struct buffers *buffers,
1258 do_copy do_copy_func,
1259 do_hang do_hang_func)
1260 {
1261 igt_hang_t hang;
1262 int i;
1263
1264 for (i = buffers->count; i--; )
1265 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef ^ i);
1266 for (i = 0; i < buffers->count; i++) {
1267 render_copy_bo(buffers, buffers->spare, buffers->src[i]);
1268 do_copy_func(buffers, buffers->dst[i], buffers->spare);
1269 }
1270 hang = do_hang_func();
1271 for (i = buffers->count; i--; )
1272 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef ^ i);
1273 igt_post_hang_ring(fd, hang);
1274 }
1275
do_gpu_read_after_write(struct buffers * buffers,do_copy do_copy_func,do_hang do_hang_func)1276 static void do_gpu_read_after_write(struct buffers *buffers,
1277 do_copy do_copy_func,
1278 do_hang do_hang_func)
1279 {
1280 igt_hang_t hang;
1281 int i;
1282
1283 for (i = buffers->count; i--; )
1284 buffers->mode->set_bo(buffers, buffers->src[i], 0xabcdabcd);
1285 for (i = 0; i < buffers->count; i++)
1286 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1287 for (i = buffers->count; i--; )
1288 do_copy_func(buffers, buffers->spare, buffers->dst[i]);
1289 hang = do_hang_func();
1290 for (i = buffers->count; i--; )
1291 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xabcdabcd);
1292 igt_post_hang_ring(fd, hang);
1293 }
1294
1295 typedef void (*do_test)(struct buffers *buffers,
1296 do_copy do_copy_func,
1297 do_hang do_hang_func);
1298
1299 typedef void (*run_wrap)(struct buffers *buffers,
1300 do_test do_test_func,
1301 do_copy do_copy_func,
1302 do_hang do_hang_func);
1303
run_single(struct buffers * buffers,do_test do_test_func,do_copy do_copy_func,do_hang do_hang_func)1304 static void run_single(struct buffers *buffers,
1305 do_test do_test_func,
1306 do_copy do_copy_func,
1307 do_hang do_hang_func)
1308 {
1309 pass = 0;
1310 do_test_func(buffers, do_copy_func, do_hang_func);
1311 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1312 }
1313
run_interruptible(struct buffers * buffers,do_test do_test_func,do_copy do_copy_func,do_hang do_hang_func)1314 static void run_interruptible(struct buffers *buffers,
1315 do_test do_test_func,
1316 do_copy do_copy_func,
1317 do_hang do_hang_func)
1318 {
1319 pass = 0;
1320 igt_while_interruptible(true)
1321 do_test_func(buffers, do_copy_func, do_hang_func);
1322 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1323 }
1324
run_child(struct buffers * buffers,do_test do_test_func,do_copy do_copy_func,do_hang do_hang_func)1325 static void run_child(struct buffers *buffers,
1326 do_test do_test_func,
1327 do_copy do_copy_func,
1328 do_hang do_hang_func)
1329
1330 {
1331 /* We inherit the buffers from the parent, but the bufmgr/batch
1332 * needs to be local as the cache of reusable itself will be COWed,
1333 * leading to the child closing an object without the parent knowing.
1334 */
1335 pass = 0;
1336 igt_fork(child, 1)
1337 do_test_func(buffers, do_copy_func, do_hang_func);
1338 igt_waitchildren();
1339 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1340 }
1341
__run_forked(struct buffers * buffers,int num_children,int loops,bool interrupt,do_test do_test_func,do_copy do_copy_func,do_hang do_hang_func)1342 static void __run_forked(struct buffers *buffers,
1343 int num_children, int loops, bool interrupt,
1344 do_test do_test_func,
1345 do_copy do_copy_func,
1346 do_hang do_hang_func)
1347
1348 {
1349 /* purge the libdrm caches before cloing the process */
1350 buffers_destroy(buffers);
1351 intel_batchbuffer_free(buffers->batch);
1352 drm_intel_bufmgr_destroy(buffers->bufmgr);
1353
1354 igt_fork(child, num_children) {
1355 int num_buffers;
1356
1357 /* recreate process local variables */
1358 fd = drm_open_driver(DRIVER_INTEL);
1359
1360 num_buffers = buffers->num_buffers / num_children;
1361 num_buffers += MIN_BUFFERS;
1362 if (num_buffers < buffers->num_buffers)
1363 buffers->num_buffers = num_buffers;
1364
1365 buffers_reset(buffers, true);
1366 buffers_create(buffers);
1367
1368 igt_while_interruptible(interrupt) {
1369 for (pass = 0; pass < loops; pass++)
1370 do_test_func(buffers,
1371 do_copy_func,
1372 do_hang_func);
1373 }
1374 }
1375 igt_waitchildren();
1376 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1377
1378 buffers_reset(buffers, true);
1379 }
1380
run_forked(struct buffers * buffers,do_test do_test_func,do_copy do_copy_func,do_hang do_hang_func)1381 static void run_forked(struct buffers *buffers,
1382 do_test do_test_func,
1383 do_copy do_copy_func,
1384 do_hang do_hang_func)
1385 {
1386 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1387 __run_forked(buffers, ncpus, ncpus, false,
1388 do_test_func, do_copy_func, do_hang_func);
1389 }
1390
run_bomb(struct buffers * buffers,do_test do_test_func,do_copy do_copy_func,do_hang do_hang_func)1391 static void run_bomb(struct buffers *buffers,
1392 do_test do_test_func,
1393 do_copy do_copy_func,
1394 do_hang do_hang_func)
1395 {
1396 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1397 __run_forked(buffers, 8*ncpus, 2, true,
1398 do_test_func, do_copy_func, do_hang_func);
1399 }
1400
cpu_require(void)1401 static void cpu_require(void)
1402 {
1403 bit17_require();
1404 }
1405
gtt_require(void)1406 static void gtt_require(void)
1407 {
1408 }
1409
bcs_require(void)1410 static void bcs_require(void)
1411 {
1412 }
1413
rcs_require(void)1414 static void rcs_require(void)
1415 {
1416 igt_require(rendercopy);
1417 }
1418
1419 static void
run_mode(const char * prefix,const struct create * create,const struct access_mode * mode,const struct size * size,const int num_buffers,const char * suffix,run_wrap run_wrap_func)1420 run_mode(const char *prefix,
1421 const struct create *create,
1422 const struct access_mode *mode,
1423 const struct size *size,
1424 const int num_buffers,
1425 const char *suffix,
1426 run_wrap run_wrap_func)
1427 {
1428 const struct {
1429 const char *prefix;
1430 do_copy copy;
1431 void (*require)(void);
1432 } pipelines[] = {
1433 { "cpu", cpu_copy_bo, cpu_require },
1434 { "gtt", gtt_copy_bo, gtt_require },
1435 { "wc", wc_copy_bo, wc_require },
1436 { "blt", blt_copy_bo, bcs_require },
1437 { "render", render_copy_bo, rcs_require },
1438 { NULL, NULL }
1439 }, *pskip = pipelines + 3, *p;
1440 const struct {
1441 const char *suffix;
1442 do_hang hang;
1443 } hangs[] = {
1444 { "", no_hang },
1445 { "-hang-blt", bcs_hang },
1446 { "-hang-render", rcs_hang },
1447 { "-hang-all", all_hang },
1448 { NULL, NULL },
1449 }, *h;
1450 struct buffers buffers;
1451
1452 igt_fixture
1453 buffers_init(&buffers, prefix, create, mode,
1454 size, num_buffers,
1455 fd, run_wrap_func != run_child);
1456
1457 for (h = hangs; h->suffix; h++) {
1458 if (!all && *h->suffix)
1459 continue;
1460
1461 if (!*h->suffix)
1462 igt_fixture
1463 igt_fork_hang_detector(fd);
1464
1465 for (p = all ? pipelines : pskip; p->prefix; p++) {
1466 igt_subtest_group {
1467 igt_fixture p->require();
1468
1469 igt_subtest_f("%s-%s-%s-sanitycheck0%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1470 buffers_create(&buffers);
1471 run_wrap_func(&buffers, do_basic0,
1472 p->copy, h->hang);
1473 }
1474
1475 igt_subtest_f("%s-%s-%s-sanitycheck1%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1476 buffers_create(&buffers);
1477 run_wrap_func(&buffers, do_basic1,
1478 p->copy, h->hang);
1479 }
1480
1481 igt_subtest_f("%s-%s-%s-sanitycheckN%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1482 buffers_create(&buffers);
1483 run_wrap_func(&buffers, do_basicN,
1484 p->copy, h->hang);
1485 }
1486
1487 /* try to overwrite the source values */
1488 igt_subtest_f("%s-%s-%s-overwrite-source-one%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1489 buffers_create(&buffers);
1490 run_wrap_func(&buffers,
1491 do_overwrite_source__one,
1492 p->copy, h->hang);
1493 }
1494
1495 igt_subtest_f("%s-%s-%s-overwrite-source%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1496 buffers_create(&buffers);
1497 run_wrap_func(&buffers,
1498 do_overwrite_source,
1499 p->copy, h->hang);
1500 }
1501
1502 igt_subtest_f("%s-%s-%s-overwrite-source-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1503 buffers_create(&buffers);
1504 run_wrap_func(&buffers,
1505 do_overwrite_source_read_bcs,
1506 p->copy, h->hang);
1507 }
1508
1509 igt_subtest_f("%s-%s-%s-overwrite-source-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1510 igt_require(rendercopy);
1511 buffers_create(&buffers);
1512 run_wrap_func(&buffers,
1513 do_overwrite_source_read_rcs,
1514 p->copy, h->hang);
1515 }
1516
1517 igt_subtest_f("%s-%s-%s-overwrite-source-rev%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1518 buffers_create(&buffers);
1519 run_wrap_func(&buffers,
1520 do_overwrite_source__rev,
1521 p->copy, h->hang);
1522 }
1523
1524 /* try to intermix copies with GPU copies*/
1525 igt_subtest_f("%s-%s-%s-intermix-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1526 igt_require(rendercopy);
1527 buffers_create(&buffers);
1528 run_wrap_func(&buffers,
1529 do_intermix_rcs,
1530 p->copy, h->hang);
1531 }
1532 igt_subtest_f("%s-%s-%s-intermix-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1533 igt_require(rendercopy);
1534 buffers_create(&buffers);
1535 run_wrap_func(&buffers,
1536 do_intermix_bcs,
1537 p->copy, h->hang);
1538 }
1539 igt_subtest_f("%s-%s-%s-intermix-both%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1540 igt_require(rendercopy);
1541 buffers_create(&buffers);
1542 run_wrap_func(&buffers,
1543 do_intermix_both,
1544 p->copy, h->hang);
1545 }
1546
1547 /* try to read the results before the copy completes */
1548 igt_subtest_f("%s-%s-%s-early-read%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1549 buffers_create(&buffers);
1550 run_wrap_func(&buffers,
1551 do_early_read,
1552 p->copy, h->hang);
1553 }
1554
1555 /* concurrent reads */
1556 igt_subtest_f("%s-%s-%s-read-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1557 buffers_create(&buffers);
1558 run_wrap_func(&buffers,
1559 do_read_read_bcs,
1560 p->copy, h->hang);
1561 }
1562 igt_subtest_f("%s-%s-%s-read-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1563 igt_require(rendercopy);
1564 buffers_create(&buffers);
1565 run_wrap_func(&buffers,
1566 do_read_read_rcs,
1567 p->copy, h->hang);
1568 }
1569
1570 /* split copying between rings */
1571 igt_subtest_f("%s-%s-%s-write-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1572 buffers_create(&buffers);
1573 run_wrap_func(&buffers,
1574 do_write_read_bcs,
1575 p->copy, h->hang);
1576 }
1577 igt_subtest_f("%s-%s-%s-write-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1578 igt_require(rendercopy);
1579 buffers_create(&buffers);
1580 run_wrap_func(&buffers,
1581 do_write_read_rcs,
1582 p->copy, h->hang);
1583 }
1584
1585 /* and finally try to trick the kernel into loosing the pending write */
1586 igt_subtest_f("%s-%s-%s-gpu-read-after-write%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
1587 buffers_create(&buffers);
1588 run_wrap_func(&buffers,
1589 do_gpu_read_after_write,
1590 p->copy, h->hang);
1591 }
1592 }
1593 }
1594
1595 if (!*h->suffix)
1596 igt_fixture
1597 igt_stop_hang_detector();
1598 }
1599
1600 igt_fixture
1601 buffers_fini(&buffers);
1602 }
1603
1604 static void
run_modes(const char * style,const struct create * create,const struct access_mode * mode,const struct size * size,const int num)1605 run_modes(const char *style,
1606 const struct create *create,
1607 const struct access_mode *mode,
1608 const struct size *size,
1609 const int num)
1610 {
1611 const struct wrap {
1612 const char *suffix;
1613 run_wrap func;
1614 } wrappers[] = {
1615 { "", run_single },
1616 { "-child", run_child },
1617 { "-forked", run_forked },
1618 { "-interruptible", run_interruptible },
1619 { "-bomb", run_bomb },
1620 { NULL },
1621 };
1622
1623 while (mode->name) {
1624 igt_subtest_group {
1625 igt_fixture {
1626 if (mode->require)
1627 mode->require(create, num);
1628 }
1629
1630 for (const struct wrap *w = wrappers; w->suffix; w++) {
1631 run_mode(style, create, mode, size, num,
1632 w->suffix, w->func);
1633 }
1634 }
1635
1636 mode++;
1637 }
1638 }
1639
1640 static unsigned
num_buffers(uint64_t max,const struct size * s,const struct create * c,unsigned allow_mem)1641 num_buffers(uint64_t max,
1642 const struct size *s,
1643 const struct create *c,
1644 unsigned allow_mem)
1645 {
1646 unsigned size = 4*s->width*s->height;
1647 uint64_t n;
1648
1649 igt_assert(size);
1650 n = max / (2*size);
1651 n += MIN_BUFFERS;
1652
1653 igt_require(n < INT32_MAX);
1654 igt_require(set_max_map_count(2*n));
1655
1656 if (c->require)
1657 c->require(c, n);
1658
1659 intel_require_memory(2*n, size, allow_mem);
1660
1661 return n;
1662 }
1663
1664 igt_main
1665 {
1666 const struct access_mode modes[] = {
1667 {
1668 .name = "prw",
1669 .create_bo = unmapped_create_bo,
1670 .set_bo = prw_set_bo,
1671 .cmp_bo = prw_cmp_bo,
1672 .release_bo = nop_release_bo,
1673 },
1674 {
1675 .name = "partial",
1676 .create_bo = unmapped_create_bo,
1677 .set_bo = partial_set_bo,
1678 .cmp_bo = partial_cmp_bo,
1679 .release_bo = nop_release_bo,
1680 },
1681 {
1682 .name = "cpu",
1683 .create_bo = unmapped_create_bo,
1684 .require = create_cpu_require,
1685 .set_bo = cpu_set_bo,
1686 .cmp_bo = cpu_cmp_bo,
1687 .release_bo = nop_release_bo,
1688 },
1689 {
1690 .name = "snoop",
1691 .create_bo = snoop_create_bo,
1692 .require = create_snoop_require,
1693 .set_bo = cpu_set_bo,
1694 .cmp_bo = cpu_cmp_bo,
1695 .release_bo = nop_release_bo,
1696 },
1697 {
1698 .name = "userptr",
1699 .create_bo = userptr_create_bo,
1700 .require = create_userptr_require,
1701 .set_bo = userptr_set_bo,
1702 .cmp_bo = userptr_cmp_bo,
1703 .release_bo = userptr_release_bo,
1704 },
1705 {
1706 .name = "dmabuf",
1707 .create_bo = dmabuf_create_bo,
1708 .require = create_dmabuf_require,
1709 .set_bo = dmabuf_set_bo,
1710 .cmp_bo = dmabuf_cmp_bo,
1711 .release_bo = dmabuf_release_bo,
1712 },
1713 {
1714 .name = "vgem",
1715 .create_bo = vgem_create_bo,
1716 .require = create_vgem_require,
1717 .set_bo = dmabuf_set_bo,
1718 .cmp_bo = dmabuf_cmp_bo,
1719 .release_bo = dmabuf_release_bo,
1720 },
1721 {
1722 .name = "gtt",
1723 .create_bo = gtt_create_bo,
1724 .set_bo = gtt_set_bo,
1725 .cmp_bo = gtt_cmp_bo,
1726 .release_bo = nop_release_bo,
1727 },
1728 {
1729 .name = "gttX",
1730 .create_bo = gttX_create_bo,
1731 .set_bo = gtt_set_bo,
1732 .cmp_bo = gtt_cmp_bo,
1733 .release_bo = nop_release_bo,
1734 },
1735 {
1736 .name = "wc",
1737 .require = wc_create_require,
1738 .create_bo = wc_create_bo,
1739 .set_bo = gtt_set_bo,
1740 .cmp_bo = gtt_cmp_bo,
1741 .release_bo = wc_release_bo,
1742 },
1743 {
1744 .name = "gpu",
1745 .create_bo = gpu_create_bo,
1746 .set_bo = gpu_set_bo,
1747 .cmp_bo = gpu_cmp_bo,
1748 .release_bo = nop_release_bo,
1749 },
1750 {
1751 .name = "gpuX",
1752 .create_bo = gpuX_create_bo,
1753 .set_bo = gpu_set_bo,
1754 .cmp_bo = gpu_cmp_bo,
1755 .release_bo = nop_release_bo,
1756 },
1757 { NULL },
1758 };
1759 const struct create create[] = {
1760 { "", can_create_normal, create_normal_bo},
1761 #if HAVE_CREATE_PRIVATE
1762 { "private-", can_create_private, create_private_bo},
1763 #endif
1764 #if HAVE_CREATE_STOLEN
1765 { "stolen-", can_create_stolen, create_stolen_bo},
1766 #endif
1767 { NULL, NULL }
1768 };
1769 const struct size sizes[] = {
1770 { "4KiB", 128, 8 },
1771 { "256KiB", 128, 128 },
1772 { "1MiB", 512, 512 },
1773 { "16MiB", 2048, 2048 },
1774 { NULL}
1775 };
1776 uint64_t pin_sz = 0;
1777 void *pinned = NULL;
1778 char name[80];
1779 int count = 0;
1780
1781 igt_skip_on_simulation();
1782
1783 if (strstr(igt_test_name(), "all"))
1784 all = true;
1785
1786 igt_fixture {
1787 igt_allow_unlimited_files();
1788
1789 fd = drm_open_driver(DRIVER_INTEL);
1790 igt_require_gem(fd);
1791 intel_detect_and_clear_missed_interrupts(fd);
1792 devid = intel_get_drm_devid(fd);
1793 gen = intel_gen(devid);
1794 rendercopy = igt_get_render_copyfunc(devid);
1795
1796 vgem_drv = __drm_open_driver(DRIVER_VGEM);
1797 }
1798
1799 for (const struct create *c = create; c->name; c++) {
1800 for (const struct size *s = sizes; s->name; s++) {
1801 /* Minimum test set */
1802 snprintf(name, sizeof(name), "%s%s-%s",
1803 c->name, s->name, "tiny");
1804 igt_subtest_group {
1805 igt_fixture {
1806 count = num_buffers(0, s, c, CHECK_RAM);
1807 }
1808 run_modes(name, c, modes, s, count);
1809 }
1810
1811 /* "Average" test set */
1812 snprintf(name, sizeof(name), "%s%s-%s",
1813 c->name, s->name, "small");
1814 igt_subtest_group {
1815 igt_fixture {
1816 count = num_buffers(gem_mappable_aperture_size()/4,
1817 s, c, CHECK_RAM);
1818 }
1819 run_modes(name, c, modes, s, count);
1820 }
1821
1822 /* Use the entire mappable aperture */
1823 snprintf(name, sizeof(name), "%s%s-%s",
1824 c->name, s->name, "thrash");
1825 igt_subtest_group {
1826 igt_fixture {
1827 count = num_buffers(gem_mappable_aperture_size(),
1828 s, c, CHECK_RAM);
1829 }
1830 run_modes(name, c, modes, s, count);
1831 }
1832
1833 /* Use the entire global GTT */
1834 snprintf(name, sizeof(name), "%s%s-%s",
1835 c->name, s->name, "global");
1836 igt_subtest_group {
1837 igt_fixture {
1838 count = num_buffers(gem_global_aperture_size(fd),
1839 s, c, CHECK_RAM);
1840 }
1841 run_modes(name, c, modes, s, count);
1842 }
1843
1844 /* Use the entire per-process GTT */
1845 snprintf(name, sizeof(name), "%s%s-%s",
1846 c->name, s->name, "full");
1847 igt_subtest_group {
1848 igt_fixture {
1849 count = num_buffers(gem_aperture_size(fd),
1850 s, c, CHECK_RAM);
1851 }
1852 run_modes(name, c, modes, s, count);
1853 }
1854
1855 snprintf(name, sizeof(name), "%s%s-%s",
1856 c->name, s->name, "shrink");
1857 igt_subtest_group {
1858 igt_fixture {
1859 count = num_buffers(gem_mappable_aperture_size(),
1860 s, c, CHECK_RAM);
1861
1862 igt_fork_shrink_helper(fd);
1863 }
1864 run_modes(name, c, modes, s, count);
1865
1866 igt_fixture
1867 igt_stop_shrink_helper();
1868 }
1869
1870 /* Use the entire mappable aperture, force swapping */
1871 snprintf(name, sizeof(name), "%s%s-%s",
1872 c->name, s->name, "swap");
1873 igt_subtest_group {
1874 igt_fixture {
1875 if (intel_get_avail_ram_mb() > gem_mappable_aperture_size()/(1024*1024)) {
1876 pin_sz = intel_get_avail_ram_mb() - gem_mappable_aperture_size()/(1024*1024);
1877
1878 igt_debug("Pinning %lld MiB\n", (long long)pin_sz);
1879 pin_sz *= 1024 * 1024;
1880
1881 if (posix_memalign(&pinned, 4096, pin_sz) ||
1882 mlock(pinned, pin_sz) ||
1883 madvise(pinned, pin_sz, MADV_DONTFORK)) {
1884 free(pinned);
1885 pinned = NULL;
1886 }
1887 igt_require(pinned);
1888 }
1889
1890 count = num_buffers(gem_mappable_aperture_size(),
1891 s, c, CHECK_RAM | CHECK_SWAP);
1892 }
1893 run_modes(name, c, modes, s, count);
1894
1895 igt_fixture {
1896 if (pinned) {
1897 munlock(pinned, pin_sz);
1898 free(pinned);
1899 pinned = NULL;
1900 }
1901 }
1902 }
1903 }
1904 }
1905 }
1906