• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "igt.h"
25 #include "igt_dummyload.h"
26 
27 IGT_TEST_DESCRIPTION("Basic sanity check of execbuf-ioctl relocations.");
28 
29 #define LOCAL_I915_EXEC_BSD_SHIFT      (13)
30 #define LOCAL_I915_EXEC_BSD_MASK       (3 << LOCAL_I915_EXEC_BSD_SHIFT)
31 
32 #define LOCAL_I915_EXEC_NO_RELOC (1<<11)
33 #define LOCAL_I915_EXEC_HANDLE_LUT (1<<12)
34 
35 #define ENGINE_MASK  (I915_EXEC_RING_MASK | LOCAL_I915_EXEC_BSD_MASK)
36 
find_last_set(uint64_t x)37 static uint32_t find_last_set(uint64_t x)
38 {
39 	uint32_t i = 0;
40 	while (x) {
41 		x >>= 1;
42 		i++;
43 	}
44 	return i;
45 }
46 
write_dword(int fd,uint32_t target_handle,uint64_t target_offset,uint32_t value)47 static void write_dword(int fd,
48 			uint32_t target_handle,
49 			uint64_t target_offset,
50 			uint32_t value)
51 {
52 	int gen = intel_gen(intel_get_drm_devid(fd));
53 	struct drm_i915_gem_execbuffer2 execbuf;
54 	struct drm_i915_gem_exec_object2 obj[2];
55 	struct drm_i915_gem_relocation_entry reloc;
56 	uint32_t buf[16];
57 	int i;
58 
59 	memset(obj, 0, sizeof(obj));
60 	obj[0].handle = target_handle;
61 	obj[1].handle = gem_create(fd, 4096);
62 
63 	i = 0;
64 	buf[i++] = MI_STORE_DWORD_IMM | (gen < 6 ? 1<<22 : 0);
65 	if (gen >= 8) {
66 		buf[i++] = target_offset;
67 		buf[i++] = target_offset >> 32;
68 	} else if (gen >= 4) {
69 		buf[i++] = 0;
70 		buf[i++] = target_offset;
71 	} else {
72 		buf[i-1]--;
73 		buf[i++] = target_offset;
74 	}
75 	buf[i++] = value;
76 	buf[i++] = MI_BATCH_BUFFER_END;
77 	gem_write(fd, obj[1].handle, 0, buf, sizeof(buf));
78 
79 	memset(&reloc, 0, sizeof(reloc));
80 	if (gen >= 8 || gen < 4)
81 		reloc.offset = sizeof(uint32_t);
82 	else
83 		reloc.offset = 2*sizeof(uint32_t);
84 	reloc.target_handle = target_handle;
85 	reloc.delta = target_offset;
86 	reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
87 	reloc.write_domain = I915_GEM_DOMAIN_INSTRUCTION;
88 
89 	obj[1].relocation_count = 1;
90 	obj[1].relocs_ptr = to_user_pointer(&reloc);
91 
92 	memset(&execbuf, 0, sizeof(execbuf));
93 	execbuf.buffers_ptr = to_user_pointer(obj);
94 	execbuf.buffer_count = 2;
95 	execbuf.flags = I915_EXEC_SECURE;
96 	gem_execbuf(fd, &execbuf);
97 	gem_close(fd, obj[1].handle);
98 }
99 
100 enum mode { MEM, CPU, WC, GTT };
101 #define RO 0x100
from_mmap(int fd,uint64_t size,enum mode mode)102 static void from_mmap(int fd, uint64_t size, enum mode mode)
103 {
104 	uint32_t bbe = MI_BATCH_BUFFER_END;
105 	struct drm_i915_gem_execbuffer2 execbuf;
106 	struct drm_i915_gem_exec_object2 obj;
107 	struct drm_i915_gem_relocation_entry *relocs;
108 	uint32_t reloc_handle;
109 	uint64_t value;
110 	uint64_t max, i;
111 	int retry = 2;
112 
113 	/* Worst case is that the kernel has to copy the entire incoming
114 	 * reloc[], so double the memory requirements.
115 	 */
116 	intel_require_memory(2, size, CHECK_RAM);
117 
118 	memset(&obj, 0, sizeof(obj));
119 	obj.handle = gem_create(fd, 4096);
120 	gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
121 
122 	max = size / sizeof(*relocs);
123 	switch (mode & ~RO) {
124 	case MEM:
125 		relocs = mmap(0, size,
126 			      PROT_WRITE, MAP_PRIVATE | MAP_ANON,
127 			      -1, 0);
128 		igt_assert(relocs != (void *)-1);
129 		break;
130 	case GTT:
131 		reloc_handle = gem_create(fd, size);
132 		relocs = gem_mmap__gtt(fd, reloc_handle, size, PROT_WRITE);
133 		gem_set_domain(fd, reloc_handle,
134 				I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
135 		gem_close(fd, reloc_handle);
136 		break;
137 	case CPU:
138 		reloc_handle = gem_create(fd, size);
139 		relocs = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_WRITE);
140 		gem_set_domain(fd, reloc_handle,
141 			       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
142 		gem_close(fd, reloc_handle);
143 		break;
144 	case WC:
145 		reloc_handle = gem_create(fd, size);
146 		relocs = gem_mmap__wc(fd, reloc_handle, 0, size, PROT_WRITE);
147 		gem_set_domain(fd, reloc_handle,
148 			       I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
149 		gem_close(fd, reloc_handle);
150 		break;
151 	}
152 
153 	for (i = 0; i < max; i++) {
154 		relocs[i].target_handle = obj.handle;
155 		relocs[i].presumed_offset = ~0ull;
156 		relocs[i].offset = 1024;
157 		relocs[i].delta = i;
158 		relocs[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
159 		relocs[i].write_domain = 0;
160 	}
161 	obj.relocation_count = max;
162 	obj.relocs_ptr = to_user_pointer(relocs);
163 
164 	if (mode & RO)
165 		mprotect(relocs, size, PROT_READ);
166 
167 	memset(&execbuf, 0, sizeof(execbuf));
168 	execbuf.buffers_ptr = to_user_pointer(&obj);
169 	execbuf.buffer_count = 1;
170 	while (relocs[0].presumed_offset == ~0ull && retry--)
171 		gem_execbuf(fd, &execbuf);
172 	gem_read(fd, obj.handle, 1024, &value, sizeof(value));
173 	gem_close(fd, obj.handle);
174 
175 	igt_assert_eq_u64(value, obj.offset + max - 1);
176 	if (relocs[0].presumed_offset != ~0ull) {
177 		for (i = 0; i < max; i++)
178 			igt_assert_eq_u64(relocs[i].presumed_offset,
179 					  obj.offset);
180 	}
181 	munmap(relocs, size);
182 }
183 
from_gpu(int fd)184 static void from_gpu(int fd)
185 {
186 	uint32_t bbe = MI_BATCH_BUFFER_END;
187 	struct drm_i915_gem_execbuffer2 execbuf;
188 	struct drm_i915_gem_exec_object2 obj;
189 	struct drm_i915_gem_relocation_entry *relocs;
190 	uint32_t reloc_handle;
191 	uint64_t value;
192 
193 	igt_require(gem_can_store_dword(fd, 0));
194 
195 	memset(&obj, 0, sizeof(obj));
196 	obj.handle = gem_create(fd, 4096);
197 	gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
198 
199 	reloc_handle = gem_create(fd, 4096);
200 	write_dword(fd,
201 		    reloc_handle,
202 		    offsetof(struct drm_i915_gem_relocation_entry,
203 			     target_handle),
204 		    obj.handle);
205 	write_dword(fd,
206 		    reloc_handle,
207 		    offsetof(struct drm_i915_gem_relocation_entry,
208 			     offset),
209 		    1024);
210 	write_dword(fd,
211 		    reloc_handle,
212 		    offsetof(struct drm_i915_gem_relocation_entry,
213 			     read_domains),
214 		    I915_GEM_DOMAIN_INSTRUCTION);
215 
216 	relocs = gem_mmap__cpu(fd, reloc_handle, 0, 4096, PROT_READ);
217 	gem_set_domain(fd, reloc_handle,
218 		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
219 	gem_close(fd, reloc_handle);
220 
221 	obj.relocation_count = 1;
222 	obj.relocs_ptr = to_user_pointer(relocs);
223 
224 	memset(&execbuf, 0, sizeof(execbuf));
225 	execbuf.buffers_ptr = to_user_pointer(&obj);
226 	execbuf.buffer_count = 1;
227 	gem_execbuf(fd, &execbuf);
228 	gem_read(fd, obj.handle, 1024, &value, sizeof(value));
229 	gem_close(fd, obj.handle);
230 
231 	igt_assert_eq_u64(value, obj.offset);
232 	igt_assert_eq_u64(relocs->presumed_offset, obj.offset);
233 	munmap(relocs, 4096);
234 }
235 
check_bo(int fd,uint32_t handle)236 static void check_bo(int fd, uint32_t handle)
237 {
238 	uint32_t *map;
239 	int i;
240 
241 	igt_debug("Verifying result\n");
242 	map = gem_mmap__cpu(fd, handle, 0, 4096, PROT_READ);
243 	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
244 	for (i = 0; i < 1024; i++)
245 		igt_assert_eq(map[i], i);
246 	munmap(map, 4096);
247 }
248 
active(int fd,unsigned engine)249 static void active(int fd, unsigned engine)
250 {
251 	const int gen = intel_gen(intel_get_drm_devid(fd));
252 	struct drm_i915_gem_relocation_entry reloc;
253 	struct drm_i915_gem_exec_object2 obj[2];
254 	struct drm_i915_gem_execbuffer2 execbuf;
255 	unsigned engines[16];
256 	unsigned nengine;
257 	int pass;
258 
259 	nengine = 0;
260 	if (engine == ALL_ENGINES) {
261 		for_each_physical_engine(fd, engine) {
262 			if (gem_can_store_dword(fd, engine))
263 				engines[nengine++] = engine;
264 		}
265 	} else {
266 		igt_require(gem_has_ring(fd, engine));
267 		igt_require(gem_can_store_dword(fd, engine));
268 		engines[nengine++] = engine;
269 	}
270 	igt_require(nengine);
271 
272 	memset(obj, 0, sizeof(obj));
273 	obj[0].handle = gem_create(fd, 4096);
274 	obj[1].handle = gem_create(fd, 64*1024);
275 	obj[1].relocs_ptr = to_user_pointer(&reloc);
276 	obj[1].relocation_count = 1;
277 
278 	memset(&reloc, 0, sizeof(reloc));
279 	reloc.offset = sizeof(uint32_t);
280 	reloc.target_handle = obj[0].handle;
281 	if (gen < 8 && gen >= 4)
282 		reloc.offset += sizeof(uint32_t);
283 	reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
284 	reloc.write_domain = I915_GEM_DOMAIN_INSTRUCTION;
285 
286 	memset(&execbuf, 0, sizeof(execbuf));
287 	execbuf.buffers_ptr = to_user_pointer(obj);
288 	execbuf.buffer_count = 2;
289 	if (gen < 6)
290 		execbuf.flags |= I915_EXEC_SECURE;
291 
292 	for (pass = 0; pass < 1024; pass++) {
293 		uint32_t batch[16];
294 		int i = 0;
295 		batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
296 		if (gen >= 8) {
297 			batch[++i] = 0;
298 			batch[++i] = 0;
299 		} else if (gen >= 4) {
300 			batch[++i] = 0;
301 			batch[++i] = 0;
302 		} else {
303 			batch[i]--;
304 			batch[++i] = 0;
305 		}
306 		batch[++i] = pass;
307 		batch[++i] = MI_BATCH_BUFFER_END;
308 		gem_write(fd, obj[1].handle, pass*sizeof(batch),
309 			  batch, sizeof(batch));
310 	}
311 
312 	for (pass = 0; pass < 1024; pass++) {
313 		reloc.delta = 4*pass;
314 		reloc.presumed_offset = -1;
315 		execbuf.flags &= ~ENGINE_MASK;
316 		execbuf.flags |= engines[rand() % nengine];
317 		gem_execbuf(fd, &execbuf);
318 		execbuf.batch_start_offset += 64;
319 		reloc.offset += 64;
320 	}
321 	gem_close(fd, obj[1].handle);
322 
323 	check_bo(fd, obj[0].handle);
324 	gem_close(fd, obj[0].handle);
325 }
326 
has_64b_reloc(int fd)327 static bool has_64b_reloc(int fd)
328 {
329 	return intel_gen(intel_get_drm_devid(fd)) >= 8;
330 }
331 
332 #define NORELOC 1
333 #define ACTIVE 2
334 #define HANG 4
basic_reloc(int fd,unsigned before,unsigned after,unsigned flags)335 static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
336 {
337 #define OBJSZ 8192
338 	struct drm_i915_gem_relocation_entry reloc;
339 	struct drm_i915_gem_exec_object2 obj;
340 	struct drm_i915_gem_execbuffer2 execbuf;
341 	uint64_t address_mask = has_64b_reloc(fd) ? ~(uint64_t)0 : ~(uint32_t)0;
342 	const uint32_t bbe = MI_BATCH_BUFFER_END;
343 	unsigned int reloc_offset;
344 
345 	memset(&obj, 0, sizeof(obj));
346 	obj.handle = gem_create(fd, OBJSZ);
347 	obj.relocs_ptr = to_user_pointer(&reloc);
348 	obj.relocation_count = 1;
349 	gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
350 
351 	memset(&execbuf, 0, sizeof(execbuf));
352 	execbuf.buffers_ptr = to_user_pointer(&obj);
353 	execbuf.buffer_count = 1;
354 	if (flags & NORELOC)
355 		execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
356 
357 	for (reloc_offset = 4096 - 8; reloc_offset <= 4096 + 8; reloc_offset += 4) {
358 		igt_spin_t *spin = NULL;
359 		uint32_t trash = 0;
360 		uint64_t offset;
361 
362 		obj.offset = -1;
363 
364 		memset(&reloc, 0, sizeof(reloc));
365 		reloc.offset = reloc_offset;
366 		reloc.target_handle = obj.handle;
367 		reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
368 		reloc.presumed_offset = -1;
369 
370 		if (before) {
371 			char *wc;
372 
373 			if (before == I915_GEM_DOMAIN_CPU)
374 				wc = gem_mmap__cpu(fd, obj.handle, 0, OBJSZ, PROT_WRITE);
375 			else if (before == I915_GEM_DOMAIN_GTT)
376 				wc = gem_mmap__gtt(fd, obj.handle, OBJSZ, PROT_WRITE);
377 			else if (before == I915_GEM_DOMAIN_WC)
378 				wc = gem_mmap__wc(fd, obj.handle, 0, OBJSZ, PROT_WRITE);
379 			else
380 				igt_assert(0);
381 			gem_set_domain(fd, obj.handle, before, before);
382 			offset = -1;
383 			memcpy(wc + reloc_offset, &offset, sizeof(offset));
384 			munmap(wc, OBJSZ);
385 		} else {
386 			offset = -1;
387 			gem_write(fd, obj.handle, reloc_offset, &offset, sizeof(offset));
388 		}
389 
390 		if (flags & ACTIVE) {
391 			spin = igt_spin_new(fd,
392 					    .engine = I915_EXEC_DEFAULT,
393 					    .dependency = obj.handle);
394 			if (!(flags & HANG))
395 				igt_spin_set_timeout(spin, NSEC_PER_SEC/100);
396 			igt_assert(gem_bo_busy(fd, obj.handle));
397 		}
398 
399 		gem_execbuf(fd, &execbuf);
400 
401 		if (after) {
402 			char *wc;
403 
404 			if (after == I915_GEM_DOMAIN_CPU)
405 				wc = gem_mmap__cpu(fd, obj.handle, 0, OBJSZ, PROT_READ);
406 			else if (after == I915_GEM_DOMAIN_GTT)
407 				wc = gem_mmap__gtt(fd, obj.handle, OBJSZ, PROT_READ);
408 			else if (after == I915_GEM_DOMAIN_WC)
409 				wc = gem_mmap__wc(fd, obj.handle, 0, OBJSZ, PROT_READ);
410 			else
411 				igt_assert(0);
412 			gem_set_domain(fd, obj.handle, after, 0);
413 			offset = ~reloc.presumed_offset & address_mask;
414 			memcpy(&offset, wc + reloc_offset, has_64b_reloc(fd) ? 8 : 4);
415 			munmap(wc, OBJSZ);
416 		} else {
417 			offset = ~reloc.presumed_offset & address_mask;
418 			gem_read(fd, obj.handle, reloc_offset, &offset, has_64b_reloc(fd) ? 8 : 4);
419 		}
420 
421 		if (reloc.presumed_offset == -1)
422 			igt_warn("reloc.presumed_offset == -1\n");
423 		else
424 			igt_assert_eq_u64(reloc.presumed_offset, offset);
425 		igt_assert_eq_u64(obj.offset, offset);
426 
427 		igt_spin_free(fd, spin);
428 
429 		/* Simulate relocation */
430 		if (flags & NORELOC) {
431 			obj.offset += OBJSZ;
432 			reloc.presumed_offset += OBJSZ;
433 		} else {
434 			trash = obj.handle;
435 			obj.handle = gem_create(fd, OBJSZ);
436 			gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
437 			reloc.target_handle = obj.handle;
438 		}
439 
440 		if (before) {
441 			char *wc;
442 
443 			if (before == I915_GEM_DOMAIN_CPU)
444 				wc = gem_mmap__cpu(fd, obj.handle, 0, OBJSZ, PROT_WRITE);
445 			else if (before == I915_GEM_DOMAIN_GTT)
446 				wc = gem_mmap__gtt(fd, obj.handle, OBJSZ, PROT_WRITE);
447 			else if (before == I915_GEM_DOMAIN_WC)
448 				wc = gem_mmap__wc(fd, obj.handle, 0, OBJSZ, PROT_WRITE);
449 			else
450 				igt_assert(0);
451 			gem_set_domain(fd, obj.handle, before, before);
452 			memcpy(wc + reloc_offset, &reloc.presumed_offset, sizeof(reloc.presumed_offset));
453 			munmap(wc, OBJSZ);
454 		} else {
455 			gem_write(fd, obj.handle, reloc_offset, &reloc.presumed_offset, sizeof(reloc.presumed_offset));
456 		}
457 
458 		if (flags & ACTIVE) {
459 			spin = igt_spin_new(fd,
460 					    .engine = I915_EXEC_DEFAULT,
461 					    .dependency = obj.handle);
462 			if (!(flags & HANG))
463 				igt_spin_set_timeout(spin, NSEC_PER_SEC/100);
464 			igt_assert(gem_bo_busy(fd, obj.handle));
465 		}
466 
467 		gem_execbuf(fd, &execbuf);
468 
469 		if (after) {
470 			char *wc;
471 
472 			if (after == I915_GEM_DOMAIN_CPU)
473 				wc = gem_mmap__cpu(fd, obj.handle, 0, OBJSZ, PROT_READ);
474 			else if (after == I915_GEM_DOMAIN_GTT)
475 				wc = gem_mmap__gtt(fd, obj.handle, OBJSZ, PROT_READ);
476 			else if (after == I915_GEM_DOMAIN_WC)
477 				wc = gem_mmap__wc(fd, obj.handle, 0, OBJSZ, PROT_READ);
478 			else
479 				igt_assert(0);
480 			gem_set_domain(fd, obj.handle, after, 0);
481 			offset = ~reloc.presumed_offset & address_mask;
482 			memcpy(&offset, wc + reloc_offset, has_64b_reloc(fd) ? 8 : 4);
483 			munmap(wc, OBJSZ);
484 		} else {
485 			offset = ~reloc.presumed_offset & address_mask;
486 			gem_read(fd, obj.handle, reloc_offset, &offset, has_64b_reloc(fd) ? 8 : 4);
487 		}
488 
489 		if (reloc.presumed_offset == -1)
490 			igt_warn("reloc.presumed_offset == -1\n");
491 		else
492 			igt_assert_eq_u64(reloc.presumed_offset, offset);
493 		igt_assert_eq_u64(obj.offset, offset);
494 
495 		igt_spin_free(fd, spin);
496 		if (trash)
497 			gem_close(fd, trash);
498 	}
499 
500 	gem_close(fd, obj.handle);
501 }
502 
sign_extend(uint64_t x,int index)503 static inline uint64_t sign_extend(uint64_t x, int index)
504 {
505 	int shift = 63 - index;
506 	return (int64_t)(x << shift) >> shift;
507 }
508 
gen8_canonical_address(uint64_t address)509 static uint64_t gen8_canonical_address(uint64_t address)
510 {
511 	return sign_extend(address, 47);
512 }
513 
basic_range(int fd,unsigned flags)514 static void basic_range(int fd, unsigned flags)
515 {
516 	struct drm_i915_gem_relocation_entry reloc[128];
517 	struct drm_i915_gem_exec_object2 obj[128];
518 	struct drm_i915_gem_execbuffer2 execbuf;
519 	uint64_t address_mask = has_64b_reloc(fd) ? ~(uint64_t)0 : ~(uint32_t)0;
520 	uint64_t gtt_size = gem_aperture_size(fd);
521 	const uint32_t bbe = MI_BATCH_BUFFER_END;
522 	igt_spin_t *spin = NULL;
523 	int count, n;
524 
525 	igt_require(gem_has_softpin(fd));
526 
527 	for (count = 12; gtt_size >> (count + 1); count++)
528 		;
529 
530 	count -= 12;
531 
532 	memset(obj, 0, sizeof(obj));
533 	memset(reloc, 0, sizeof(reloc));
534 	memset(&execbuf, 0, sizeof(execbuf));
535 
536 	n = 0;
537 	for (int i = 0; i <= count; i++) {
538 		obj[n].handle = gem_create(fd, 4096);
539 		obj[n].offset = (1ull << (i + 12)) - 4096;
540 		obj[n].offset = gen8_canonical_address(obj[n].offset);
541 		obj[n].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
542 		gem_write(fd, obj[n].handle, 0, &bbe, sizeof(bbe));
543 		execbuf.buffers_ptr = to_user_pointer(&obj[n]);
544 		execbuf.buffer_count = 1;
545 		if (__gem_execbuf(fd, &execbuf))
546 			continue;
547 
548 		igt_debug("obj[%d] handle=%d, address=%llx\n",
549 			  n, obj[n].handle, (long long)obj[n].offset);
550 
551 		reloc[n].offset = 8 * (n + 1);
552 		reloc[n].target_handle = obj[n].handle;
553 		reloc[n].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
554 		reloc[n].presumed_offset = -1;
555 		n++;
556 	}
557 	for (int i = 1; i < count; i++) {
558 		obj[n].handle = gem_create(fd, 4096);
559 		obj[n].offset = 1ull << (i + 12);
560 		obj[n].offset = gen8_canonical_address(obj[n].offset);
561 		obj[n].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
562 		gem_write(fd, obj[n].handle, 0, &bbe, sizeof(bbe));
563 		execbuf.buffers_ptr = to_user_pointer(&obj[n]);
564 		execbuf.buffer_count = 1;
565 		if (__gem_execbuf(fd, &execbuf))
566 			continue;
567 
568 		igt_debug("obj[%d] handle=%d, address=%llx\n",
569 			  n, obj[n].handle, (long long)obj[n].offset);
570 
571 		reloc[n].offset = 8 * (n + 1);
572 		reloc[n].target_handle = obj[n].handle;
573 		reloc[n].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
574 		reloc[n].presumed_offset = -1;
575 		n++;
576 	}
577 	igt_require(n);
578 
579 	obj[n].handle = gem_create(fd, 4096);
580 	obj[n].relocs_ptr = to_user_pointer(reloc);
581 	obj[n].relocation_count = n;
582 	gem_write(fd, obj[n].handle, 0, &bbe, sizeof(bbe));
583 
584 	execbuf.buffers_ptr = to_user_pointer(obj);
585 	execbuf.buffer_count = n + 1;
586 
587 	if (flags & ACTIVE) {
588 		spin = igt_spin_new(fd, .dependency = obj[n].handle);
589 		if (!(flags & HANG))
590 			igt_spin_set_timeout(spin, NSEC_PER_SEC/100);
591 		igt_assert(gem_bo_busy(fd, obj[n].handle));
592 	}
593 
594 	gem_execbuf(fd, &execbuf);
595 	igt_spin_free(fd, spin);
596 
597 	for (int i = 0; i < n; i++) {
598 		uint64_t offset;
599 
600 		offset = ~reloc[i].presumed_offset & address_mask;
601 		gem_read(fd, obj[n].handle, reloc[i].offset,
602 			 &offset, has_64b_reloc(fd) ? 8 : 4);
603 
604 		igt_debug("obj[%d] handle=%d, offset=%llx, found=%llx, presumed=%llx\n",
605 			  i, obj[i].handle,
606 			  (long long)obj[i].offset,
607 			  (long long)offset,
608 			  (long long)reloc[i].presumed_offset);
609 
610 		igt_assert_eq_u64(obj[i].offset, offset);
611 		if (reloc[i].presumed_offset == -1)
612 			igt_warn("reloc.presumed_offset == -1\n");
613 		else
614 			igt_assert_eq_u64(reloc[i].presumed_offset, offset);
615 	}
616 
617 	for (int i = 0; i <= n; i++)
618 		gem_close(fd, obj[i].handle);
619 }
620 
basic_softpin(int fd)621 static void basic_softpin(int fd)
622 {
623 	struct drm_i915_gem_exec_object2 obj[2];
624 	struct drm_i915_gem_execbuffer2 execbuf;
625 	uint64_t offset;
626 	uint32_t bbe = MI_BATCH_BUFFER_END;
627 
628 	igt_require(gem_has_softpin(fd));
629 
630 	memset(obj, 0, sizeof(obj));
631 	obj[1].handle = gem_create(fd, 4096);
632 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
633 
634 	memset(&execbuf, 0, sizeof(execbuf));
635 	execbuf.buffers_ptr = to_user_pointer(&obj[1]);
636 	execbuf.buffer_count = 1;
637 	gem_execbuf(fd, &execbuf);
638 
639 	offset = obj[1].offset;
640 
641 	obj[0].handle = gem_create(fd, 4096);
642 	obj[0].offset = obj[1].offset;
643 	obj[0].flags = EXEC_OBJECT_PINNED;
644 
645 	execbuf.buffers_ptr = to_user_pointer(&obj[0]);
646 	execbuf.buffer_count = 2;
647 
648 	gem_execbuf(fd, &execbuf);
649 	igt_assert_eq_u64(obj[0].offset, offset);
650 
651 	gem_close(fd, obj[0].handle);
652 	gem_close(fd, obj[1].handle);
653 }
654 
655 igt_main
656 {
657 	const struct mode {
658 		const char *name;
659 		unsigned before, after;
660 	} modes[] = {
661 		{ "cpu", I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU },
662 		{ "gtt", I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT },
663 		{ "wc", I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC },
664 		{ "cpu-gtt", I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_GTT },
665 		{ "gtt-cpu", I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_CPU },
666 		{ "cpu-wc", I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_WC },
667 		{ "wc-cpu", I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_CPU },
668 		{ "gtt-wc", I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_WC },
669 		{ "wc-gtt", I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_GTT },
670 		{ "cpu-read", I915_GEM_DOMAIN_CPU, 0 },
671 		{ "gtt-read", I915_GEM_DOMAIN_GTT, 0 },
672 		{ "wc-read", I915_GEM_DOMAIN_WC, 0 },
673 		{ "write-cpu", 0, I915_GEM_DOMAIN_CPU },
674 		{ "write-gtt", 0, I915_GEM_DOMAIN_GTT },
675 		{ "write-wc", 0, I915_GEM_DOMAIN_WC },
676 		{ "write-read", 0, 0 },
677 		{ },
678 	}, *m;
679 	const struct flags {
680 		const char *name;
681 		unsigned flags;
682 		bool basic;
683 	} flags[] = {
684 		{ "", 0 , true},
685 		{ "-noreloc", NORELOC, true },
686 		{ "-active", ACTIVE, true },
687 		{ "-hang", ACTIVE | HANG },
688 		{ },
689 	}, *f;
690 	uint64_t size;
691 	int fd = -1;
692 
693 	igt_fixture {
694 		fd = drm_open_driver_master(DRIVER_INTEL);
695 		igt_require_gem(fd);
696 	}
697 
698 	for (f = flags; f->name; f++) {
699 		igt_hang_t hang;
700 
701 		igt_subtest_group {
702 			igt_fixture {
703 				if (f->flags & HANG)
704 					hang = igt_allow_hang(fd, 0, 0);
705 			}
706 
707 			for (m = modes; m->name; m++) {
708 				igt_subtest_f("%s%s%s",
709 					      f->basic ? "basic-" : "",
710 					      m->name,
711 					      f->name) {
712 					if ((m->before | m->after) & I915_GEM_DOMAIN_WC)
713 						igt_require(gem_mmap__has_wc(fd));
714 					basic_reloc(fd, m->before, m->after, f->flags);
715 				}
716 			}
717 
718 			if (!(f->flags & NORELOC)) {
719 				igt_subtest_f("%srange%s",
720 					      f->basic ? "basic-" : "", f->name)
721 					basic_range(fd, f->flags);
722 			}
723 
724 			igt_fixture {
725 				if (f->flags & HANG)
726 					igt_disallow_hang(fd, hang);
727 			}
728 		}
729 	}
730 
731 	igt_subtest("basic-softpin")
732 		basic_softpin(fd);
733 
734 	for (size = 4096; size <= 4ull*1024*1024*1024; size <<= 1) {
735 		igt_subtest_f("mmap-%u", find_last_set(size) - 1)
736 			from_mmap(fd, size, MEM);
737 		igt_subtest_f("readonly-%u", find_last_set(size) - 1)
738 			from_mmap(fd, size, MEM | RO);
739 		igt_subtest_f("cpu-%u", find_last_set(size) - 1)
740 			from_mmap(fd, size, CPU);
find_last_set(size)741 		igt_subtest_f("wc-%u", find_last_set(size) - 1) {
742 			igt_require(gem_mmap__has_wc(fd));
743 			from_mmap(fd, size, WC);
744 		}
745 		igt_subtest_f("gtt-%u", find_last_set(size) - 1)
746 			from_mmap(fd, size, GTT);
747 	}
748 
749 	igt_subtest("gpu")
750 		from_gpu(fd);
751 
752 	igt_subtest("active")
753 		active(fd, ALL_ENGINES);
754 	for (const struct intel_execution_engine *e = intel_execution_engines;
755 	     e->name; e++) {
756 		igt_subtest_f("active-%s", e->name)
757 			active(fd, e->exec_id | e->flags);
758 	}
759 	igt_fixture
760 		close(fd);
761 }
762