• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31 
32 #include "i915_random.h"
33 #include "i915_selftest.h"
34 
35 #include "mock_drm.h"
36 #include "mock_gem_device.h"
37 #include "mock_gtt.h"
38 #include "igt_flush_test.h"
39 
cleanup_freed_objects(struct drm_i915_private * i915)40 static void cleanup_freed_objects(struct drm_i915_private *i915)
41 {
42 	i915_gem_drain_freed_objects(i915);
43 }
44 
fake_free_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)45 static void fake_free_pages(struct drm_i915_gem_object *obj,
46 			    struct sg_table *pages)
47 {
48 	sg_free_table(pages);
49 	kfree(pages);
50 }
51 
fake_get_pages(struct drm_i915_gem_object * obj)52 static int fake_get_pages(struct drm_i915_gem_object *obj)
53 {
54 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
55 #define PFN_BIAS 0x1000
56 	struct sg_table *pages;
57 	struct scatterlist *sg;
58 	unsigned int sg_page_sizes;
59 	typeof(obj->base.size) rem;
60 
61 	pages = kmalloc(sizeof(*pages), GFP);
62 	if (!pages)
63 		return -ENOMEM;
64 
65 	rem = round_up(obj->base.size, BIT(31)) >> 31;
66 	if (sg_alloc_table(pages, rem, GFP)) {
67 		kfree(pages);
68 		return -ENOMEM;
69 	}
70 
71 	sg_page_sizes = 0;
72 	rem = obj->base.size;
73 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
74 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
75 
76 		GEM_BUG_ON(!len);
77 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
78 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
79 		sg_dma_len(sg) = len;
80 		sg_page_sizes |= len;
81 
82 		rem -= len;
83 	}
84 	GEM_BUG_ON(rem);
85 
86 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
87 
88 	return 0;
89 #undef GFP
90 }
91 
fake_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)92 static void fake_put_pages(struct drm_i915_gem_object *obj,
93 			   struct sg_table *pages)
94 {
95 	fake_free_pages(obj, pages);
96 	obj->mm.dirty = false;
97 }
98 
99 static const struct drm_i915_gem_object_ops fake_ops = {
100 	.name = "fake-gem",
101 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
102 	.get_pages = fake_get_pages,
103 	.put_pages = fake_put_pages,
104 };
105 
106 static struct drm_i915_gem_object *
fake_dma_object(struct drm_i915_private * i915,u64 size)107 fake_dma_object(struct drm_i915_private *i915, u64 size)
108 {
109 	static struct lock_class_key lock_class;
110 	struct drm_i915_gem_object *obj;
111 
112 	GEM_BUG_ON(!size);
113 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
114 
115 	if (overflows_type(size, obj->base.size))
116 		return ERR_PTR(-E2BIG);
117 
118 	obj = i915_gem_object_alloc();
119 	if (!obj)
120 		goto err;
121 
122 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
123 	i915_gem_object_init(obj, &fake_ops, &lock_class);
124 
125 	i915_gem_object_set_volatile(obj);
126 
127 	obj->write_domain = I915_GEM_DOMAIN_CPU;
128 	obj->read_domains = I915_GEM_DOMAIN_CPU;
129 	obj->cache_level = I915_CACHE_NONE;
130 
131 	/* Preallocate the "backing storage" */
132 	if (i915_gem_object_pin_pages(obj))
133 		goto err_obj;
134 
135 	i915_gem_object_unpin_pages(obj);
136 	return obj;
137 
138 err_obj:
139 	i915_gem_object_put(obj);
140 err:
141 	return ERR_PTR(-ENOMEM);
142 }
143 
igt_ppgtt_alloc(void * arg)144 static int igt_ppgtt_alloc(void *arg)
145 {
146 	struct drm_i915_private *dev_priv = arg;
147 	struct i915_ppgtt *ppgtt;
148 	u64 size, last, limit;
149 	int err = 0;
150 
151 	/* Allocate a ppggt and try to fill the entire range */
152 
153 	if (!HAS_PPGTT(dev_priv))
154 		return 0;
155 
156 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
157 	if (IS_ERR(ppgtt))
158 		return PTR_ERR(ppgtt);
159 
160 	if (!ppgtt->vm.allocate_va_range)
161 		goto err_ppgtt_cleanup;
162 
163 	/*
164 	 * While we only allocate the page tables here and so we could
165 	 * address a much larger GTT than we could actually fit into
166 	 * RAM, a practical limit is the amount of physical pages in the system.
167 	 * This should ensure that we do not run into the oomkiller during
168 	 * the test and take down the machine wilfully.
169 	 */
170 	limit = totalram_pages() << PAGE_SHIFT;
171 	limit = min(ppgtt->vm.total, limit);
172 
173 	/* Check we can allocate the entire range */
174 	for (size = 4096; size <= limit; size <<= 2) {
175 		struct i915_vm_pt_stash stash = {};
176 
177 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
178 		if (err)
179 			goto err_ppgtt_cleanup;
180 
181 		err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
182 		if (err) {
183 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
184 			goto err_ppgtt_cleanup;
185 		}
186 
187 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
188 		cond_resched();
189 
190 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
191 
192 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
193 	}
194 
195 	/* Check we can incrementally allocate the entire range */
196 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
197 		struct i915_vm_pt_stash stash = {};
198 
199 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
200 		if (err)
201 			goto err_ppgtt_cleanup;
202 
203 		err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
204 		if (err) {
205 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
206 			goto err_ppgtt_cleanup;
207 		}
208 
209 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
210 					    last, size - last);
211 		cond_resched();
212 
213 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
214 	}
215 
216 err_ppgtt_cleanup:
217 	i915_vm_put(&ppgtt->vm);
218 	return err;
219 }
220 
lowlevel_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)221 static int lowlevel_hole(struct i915_address_space *vm,
222 			 u64 hole_start, u64 hole_end,
223 			 unsigned long end_time)
224 {
225 	I915_RND_STATE(seed_prng);
226 	struct i915_vma *mock_vma;
227 	unsigned int size;
228 
229 	mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
230 	if (!mock_vma)
231 		return -ENOMEM;
232 
233 	/* Keep creating larger objects until one cannot fit into the hole */
234 	for (size = 12; (hole_end - hole_start) >> size; size++) {
235 		I915_RND_SUBSTATE(prng, seed_prng);
236 		struct drm_i915_gem_object *obj;
237 		unsigned int *order, count, n;
238 		u64 hole_size;
239 
240 		hole_size = (hole_end - hole_start) >> size;
241 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
242 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
243 		count = hole_size >> 1;
244 		if (!count) {
245 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
246 				 __func__, hole_start, hole_end, size, hole_size);
247 			break;
248 		}
249 
250 		do {
251 			order = i915_random_order(count, &prng);
252 			if (order)
253 				break;
254 		} while (count >>= 1);
255 		if (!count) {
256 			kfree(mock_vma);
257 			return -ENOMEM;
258 		}
259 		GEM_BUG_ON(!order);
260 
261 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
262 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
263 
264 		/* Ignore allocation failures (i.e. don't report them as
265 		 * a test failure) as we are purposefully allocating very
266 		 * large objects without checking that we have sufficient
267 		 * memory. We expect to hit -ENOMEM.
268 		 */
269 
270 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
271 		if (IS_ERR(obj)) {
272 			kfree(order);
273 			break;
274 		}
275 
276 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
277 
278 		if (i915_gem_object_pin_pages(obj)) {
279 			i915_gem_object_put(obj);
280 			kfree(order);
281 			break;
282 		}
283 
284 		for (n = 0; n < count; n++) {
285 			u64 addr = hole_start + order[n] * BIT_ULL(size);
286 			intel_wakeref_t wakeref;
287 
288 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
289 
290 			if (igt_timeout(end_time,
291 					"%s timed out before %d/%d\n",
292 					__func__, n, count)) {
293 				hole_end = hole_start; /* quit */
294 				break;
295 			}
296 
297 			if (vm->allocate_va_range) {
298 				struct i915_vm_pt_stash stash = {};
299 
300 				if (i915_vm_alloc_pt_stash(vm, &stash,
301 							   BIT_ULL(size)))
302 					break;
303 
304 				if (i915_vm_pin_pt_stash(vm, &stash)) {
305 					i915_vm_free_pt_stash(vm, &stash);
306 					break;
307 				}
308 
309 				vm->allocate_va_range(vm, &stash,
310 						      addr, BIT_ULL(size));
311 
312 				i915_vm_free_pt_stash(vm, &stash);
313 			}
314 
315 			mock_vma->pages = obj->mm.pages;
316 			mock_vma->node.size = BIT_ULL(size);
317 			mock_vma->node.start = addr;
318 
319 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
320 				vm->insert_entries(vm, mock_vma,
321 						   I915_CACHE_NONE, 0);
322 		}
323 		count = n;
324 
325 		i915_random_reorder(order, count, &prng);
326 		for (n = 0; n < count; n++) {
327 			u64 addr = hole_start + order[n] * BIT_ULL(size);
328 			intel_wakeref_t wakeref;
329 
330 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
331 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
332 				vm->clear_range(vm, addr, BIT_ULL(size));
333 		}
334 
335 		i915_gem_object_unpin_pages(obj);
336 		i915_gem_object_put(obj);
337 
338 		kfree(order);
339 
340 		cleanup_freed_objects(vm->i915);
341 	}
342 
343 	kfree(mock_vma);
344 	return 0;
345 }
346 
close_object_list(struct list_head * objects,struct i915_address_space * vm)347 static void close_object_list(struct list_head *objects,
348 			      struct i915_address_space *vm)
349 {
350 	struct drm_i915_gem_object *obj, *on;
351 	int ignored;
352 
353 	list_for_each_entry_safe(obj, on, objects, st_link) {
354 		struct i915_vma *vma;
355 
356 		vma = i915_vma_instance(obj, vm, NULL);
357 		if (!IS_ERR(vma))
358 			ignored = i915_vma_unbind(vma);
359 
360 		list_del(&obj->st_link);
361 		i915_gem_object_put(obj);
362 	}
363 }
364 
fill_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)365 static int fill_hole(struct i915_address_space *vm,
366 		     u64 hole_start, u64 hole_end,
367 		     unsigned long end_time)
368 {
369 	const u64 hole_size = hole_end - hole_start;
370 	struct drm_i915_gem_object *obj;
371 	const unsigned long max_pages =
372 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
373 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
374 	unsigned long npages, prime, flags;
375 	struct i915_vma *vma;
376 	LIST_HEAD(objects);
377 	int err;
378 
379 	/* Try binding many VMA working inwards from either edge */
380 
381 	flags = PIN_OFFSET_FIXED | PIN_USER;
382 	if (i915_is_ggtt(vm))
383 		flags |= PIN_GLOBAL;
384 
385 	for_each_prime_number_from(prime, 2, max_step) {
386 		for (npages = 1; npages <= max_pages; npages *= prime) {
387 			const u64 full_size = npages << PAGE_SHIFT;
388 			const struct {
389 				const char *name;
390 				u64 offset;
391 				int step;
392 			} phases[] = {
393 				{ "top-down", hole_end, -1, },
394 				{ "bottom-up", hole_start, 1, },
395 				{ }
396 			}, *p;
397 
398 			obj = fake_dma_object(vm->i915, full_size);
399 			if (IS_ERR(obj))
400 				break;
401 
402 			list_add(&obj->st_link, &objects);
403 
404 			/* Align differing sized objects against the edges, and
405 			 * check we don't walk off into the void when binding
406 			 * them into the GTT.
407 			 */
408 			for (p = phases; p->name; p++) {
409 				u64 offset;
410 
411 				offset = p->offset;
412 				list_for_each_entry(obj, &objects, st_link) {
413 					vma = i915_vma_instance(obj, vm, NULL);
414 					if (IS_ERR(vma))
415 						continue;
416 
417 					if (p->step < 0) {
418 						if (offset < hole_start + obj->base.size)
419 							break;
420 						offset -= obj->base.size;
421 					}
422 
423 					err = i915_vma_pin(vma, 0, 0, offset | flags);
424 					if (err) {
425 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
426 						       __func__, p->name, err, npages, prime, offset);
427 						goto err;
428 					}
429 
430 					if (!drm_mm_node_allocated(&vma->node) ||
431 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
432 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
433 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
434 						       offset);
435 						err = -EINVAL;
436 						goto err;
437 					}
438 
439 					i915_vma_unpin(vma);
440 
441 					if (p->step > 0) {
442 						if (offset + obj->base.size > hole_end)
443 							break;
444 						offset += obj->base.size;
445 					}
446 				}
447 
448 				offset = p->offset;
449 				list_for_each_entry(obj, &objects, st_link) {
450 					vma = i915_vma_instance(obj, vm, NULL);
451 					if (IS_ERR(vma))
452 						continue;
453 
454 					if (p->step < 0) {
455 						if (offset < hole_start + obj->base.size)
456 							break;
457 						offset -= obj->base.size;
458 					}
459 
460 					if (!drm_mm_node_allocated(&vma->node) ||
461 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
462 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
463 						       __func__, p->name, vma->node.start, vma->node.size,
464 						       offset);
465 						err = -EINVAL;
466 						goto err;
467 					}
468 
469 					err = i915_vma_unbind(vma);
470 					if (err) {
471 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
472 						       __func__, p->name, vma->node.start, vma->node.size,
473 						       err);
474 						goto err;
475 					}
476 
477 					if (p->step > 0) {
478 						if (offset + obj->base.size > hole_end)
479 							break;
480 						offset += obj->base.size;
481 					}
482 				}
483 
484 				offset = p->offset;
485 				list_for_each_entry_reverse(obj, &objects, st_link) {
486 					vma = i915_vma_instance(obj, vm, NULL);
487 					if (IS_ERR(vma))
488 						continue;
489 
490 					if (p->step < 0) {
491 						if (offset < hole_start + obj->base.size)
492 							break;
493 						offset -= obj->base.size;
494 					}
495 
496 					err = i915_vma_pin(vma, 0, 0, offset | flags);
497 					if (err) {
498 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
499 						       __func__, p->name, err, npages, prime, offset);
500 						goto err;
501 					}
502 
503 					if (!drm_mm_node_allocated(&vma->node) ||
504 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
505 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
506 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
507 						       offset);
508 						err = -EINVAL;
509 						goto err;
510 					}
511 
512 					i915_vma_unpin(vma);
513 
514 					if (p->step > 0) {
515 						if (offset + obj->base.size > hole_end)
516 							break;
517 						offset += obj->base.size;
518 					}
519 				}
520 
521 				offset = p->offset;
522 				list_for_each_entry_reverse(obj, &objects, st_link) {
523 					vma = i915_vma_instance(obj, vm, NULL);
524 					if (IS_ERR(vma))
525 						continue;
526 
527 					if (p->step < 0) {
528 						if (offset < hole_start + obj->base.size)
529 							break;
530 						offset -= obj->base.size;
531 					}
532 
533 					if (!drm_mm_node_allocated(&vma->node) ||
534 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
535 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
536 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
537 						       offset);
538 						err = -EINVAL;
539 						goto err;
540 					}
541 
542 					err = i915_vma_unbind(vma);
543 					if (err) {
544 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
545 						       __func__, p->name, vma->node.start, vma->node.size,
546 						       err);
547 						goto err;
548 					}
549 
550 					if (p->step > 0) {
551 						if (offset + obj->base.size > hole_end)
552 							break;
553 						offset += obj->base.size;
554 					}
555 				}
556 			}
557 
558 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
559 					__func__, npages, prime)) {
560 				err = -EINTR;
561 				goto err;
562 			}
563 		}
564 
565 		close_object_list(&objects, vm);
566 		cleanup_freed_objects(vm->i915);
567 	}
568 
569 	return 0;
570 
571 err:
572 	close_object_list(&objects, vm);
573 	return err;
574 }
575 
walk_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)576 static int walk_hole(struct i915_address_space *vm,
577 		     u64 hole_start, u64 hole_end,
578 		     unsigned long end_time)
579 {
580 	const u64 hole_size = hole_end - hole_start;
581 	const unsigned long max_pages =
582 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
583 	unsigned long flags;
584 	u64 size;
585 
586 	/* Try binding a single VMA in different positions within the hole */
587 
588 	flags = PIN_OFFSET_FIXED | PIN_USER;
589 	if (i915_is_ggtt(vm))
590 		flags |= PIN_GLOBAL;
591 
592 	for_each_prime_number_from(size, 1, max_pages) {
593 		struct drm_i915_gem_object *obj;
594 		struct i915_vma *vma;
595 		u64 addr;
596 		int err = 0;
597 
598 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
599 		if (IS_ERR(obj))
600 			break;
601 
602 		vma = i915_vma_instance(obj, vm, NULL);
603 		if (IS_ERR(vma)) {
604 			err = PTR_ERR(vma);
605 			goto err_put;
606 		}
607 
608 		for (addr = hole_start;
609 		     addr + obj->base.size < hole_end;
610 		     addr += obj->base.size) {
611 			err = i915_vma_pin(vma, 0, 0, addr | flags);
612 			if (err) {
613 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
614 				       __func__, addr, vma->size,
615 				       hole_start, hole_end, err);
616 				goto err_put;
617 			}
618 			i915_vma_unpin(vma);
619 
620 			if (!drm_mm_node_allocated(&vma->node) ||
621 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
622 				pr_err("%s incorrect at %llx + %llx\n",
623 				       __func__, addr, vma->size);
624 				err = -EINVAL;
625 				goto err_put;
626 			}
627 
628 			err = i915_vma_unbind(vma);
629 			if (err) {
630 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
631 				       __func__, addr, vma->size, err);
632 				goto err_put;
633 			}
634 
635 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
636 
637 			if (igt_timeout(end_time,
638 					"%s timed out at %llx\n",
639 					__func__, addr)) {
640 				err = -EINTR;
641 				goto err_put;
642 			}
643 		}
644 
645 err_put:
646 		i915_gem_object_put(obj);
647 		if (err)
648 			return err;
649 
650 		cleanup_freed_objects(vm->i915);
651 	}
652 
653 	return 0;
654 }
655 
pot_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)656 static int pot_hole(struct i915_address_space *vm,
657 		    u64 hole_start, u64 hole_end,
658 		    unsigned long end_time)
659 {
660 	struct drm_i915_gem_object *obj;
661 	struct i915_vma *vma;
662 	unsigned long flags;
663 	unsigned int pot;
664 	int err = 0;
665 
666 	flags = PIN_OFFSET_FIXED | PIN_USER;
667 	if (i915_is_ggtt(vm))
668 		flags |= PIN_GLOBAL;
669 
670 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
671 	if (IS_ERR(obj))
672 		return PTR_ERR(obj);
673 
674 	vma = i915_vma_instance(obj, vm, NULL);
675 	if (IS_ERR(vma)) {
676 		err = PTR_ERR(vma);
677 		goto err_obj;
678 	}
679 
680 	/* Insert a pair of pages across every pot boundary within the hole */
681 	for (pot = fls64(hole_end - 1) - 1;
682 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
683 	     pot--) {
684 		u64 step = BIT_ULL(pot);
685 		u64 addr;
686 
687 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
688 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
689 		     addr += step) {
690 			err = i915_vma_pin(vma, 0, 0, addr | flags);
691 			if (err) {
692 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
693 				       __func__,
694 				       addr,
695 				       hole_start, hole_end,
696 				       err);
697 				goto err_obj;
698 			}
699 
700 			if (!drm_mm_node_allocated(&vma->node) ||
701 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
702 				pr_err("%s incorrect at %llx + %llx\n",
703 				       __func__, addr, vma->size);
704 				i915_vma_unpin(vma);
705 				err = i915_vma_unbind(vma);
706 				err = -EINVAL;
707 				goto err_obj;
708 			}
709 
710 			i915_vma_unpin(vma);
711 			err = i915_vma_unbind(vma);
712 			GEM_BUG_ON(err);
713 		}
714 
715 		if (igt_timeout(end_time,
716 				"%s timed out after %d/%d\n",
717 				__func__, pot, fls64(hole_end - 1) - 1)) {
718 			err = -EINTR;
719 			goto err_obj;
720 		}
721 	}
722 
723 err_obj:
724 	i915_gem_object_put(obj);
725 	return err;
726 }
727 
drunk_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)728 static int drunk_hole(struct i915_address_space *vm,
729 		      u64 hole_start, u64 hole_end,
730 		      unsigned long end_time)
731 {
732 	I915_RND_STATE(prng);
733 	unsigned int size;
734 	unsigned long flags;
735 
736 	flags = PIN_OFFSET_FIXED | PIN_USER;
737 	if (i915_is_ggtt(vm))
738 		flags |= PIN_GLOBAL;
739 
740 	/* Keep creating larger objects until one cannot fit into the hole */
741 	for (size = 12; (hole_end - hole_start) >> size; size++) {
742 		struct drm_i915_gem_object *obj;
743 		unsigned int *order, count, n;
744 		struct i915_vma *vma;
745 		u64 hole_size;
746 		int err = -ENODEV;
747 
748 		hole_size = (hole_end - hole_start) >> size;
749 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
750 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
751 		count = hole_size >> 1;
752 		if (!count) {
753 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
754 				 __func__, hole_start, hole_end, size, hole_size);
755 			break;
756 		}
757 
758 		do {
759 			order = i915_random_order(count, &prng);
760 			if (order)
761 				break;
762 		} while (count >>= 1);
763 		if (!count)
764 			return -ENOMEM;
765 		GEM_BUG_ON(!order);
766 
767 		/* Ignore allocation failures (i.e. don't report them as
768 		 * a test failure) as we are purposefully allocating very
769 		 * large objects without checking that we have sufficient
770 		 * memory. We expect to hit -ENOMEM.
771 		 */
772 
773 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
774 		if (IS_ERR(obj)) {
775 			kfree(order);
776 			break;
777 		}
778 
779 		vma = i915_vma_instance(obj, vm, NULL);
780 		if (IS_ERR(vma)) {
781 			err = PTR_ERR(vma);
782 			goto err_obj;
783 		}
784 
785 		GEM_BUG_ON(vma->size != BIT_ULL(size));
786 
787 		for (n = 0; n < count; n++) {
788 			u64 addr = hole_start + order[n] * BIT_ULL(size);
789 
790 			err = i915_vma_pin(vma, 0, 0, addr | flags);
791 			if (err) {
792 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
793 				       __func__,
794 				       addr, BIT_ULL(size),
795 				       hole_start, hole_end,
796 				       err);
797 				goto err_obj;
798 			}
799 
800 			if (!drm_mm_node_allocated(&vma->node) ||
801 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
802 				pr_err("%s incorrect at %llx + %llx\n",
803 				       __func__, addr, BIT_ULL(size));
804 				i915_vma_unpin(vma);
805 				err = i915_vma_unbind(vma);
806 				err = -EINVAL;
807 				goto err_obj;
808 			}
809 
810 			i915_vma_unpin(vma);
811 			err = i915_vma_unbind(vma);
812 			GEM_BUG_ON(err);
813 
814 			if (igt_timeout(end_time,
815 					"%s timed out after %d/%d\n",
816 					__func__, n, count)) {
817 				err = -EINTR;
818 				goto err_obj;
819 			}
820 		}
821 
822 err_obj:
823 		i915_gem_object_put(obj);
824 		kfree(order);
825 		if (err)
826 			return err;
827 
828 		cleanup_freed_objects(vm->i915);
829 	}
830 
831 	return 0;
832 }
833 
__shrink_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)834 static int __shrink_hole(struct i915_address_space *vm,
835 			 u64 hole_start, u64 hole_end,
836 			 unsigned long end_time)
837 {
838 	struct drm_i915_gem_object *obj;
839 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
840 	unsigned int order = 12;
841 	LIST_HEAD(objects);
842 	int err = 0;
843 	u64 addr;
844 
845 	/* Keep creating larger objects until one cannot fit into the hole */
846 	for (addr = hole_start; addr < hole_end; ) {
847 		struct i915_vma *vma;
848 		u64 size = BIT_ULL(order++);
849 
850 		size = min(size, hole_end - addr);
851 		obj = fake_dma_object(vm->i915, size);
852 		if (IS_ERR(obj)) {
853 			err = PTR_ERR(obj);
854 			break;
855 		}
856 
857 		list_add(&obj->st_link, &objects);
858 
859 		vma = i915_vma_instance(obj, vm, NULL);
860 		if (IS_ERR(vma)) {
861 			err = PTR_ERR(vma);
862 			break;
863 		}
864 
865 		GEM_BUG_ON(vma->size != size);
866 
867 		err = i915_vma_pin(vma, 0, 0, addr | flags);
868 		if (err) {
869 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
870 			       __func__, addr, size, hole_start, hole_end, err);
871 			break;
872 		}
873 
874 		if (!drm_mm_node_allocated(&vma->node) ||
875 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
876 			pr_err("%s incorrect at %llx + %llx\n",
877 			       __func__, addr, size);
878 			i915_vma_unpin(vma);
879 			err = i915_vma_unbind(vma);
880 			err = -EINVAL;
881 			break;
882 		}
883 
884 		i915_vma_unpin(vma);
885 		addr += size;
886 
887 		/*
888 		 * Since we are injecting allocation faults at random intervals,
889 		 * wait for this allocation to complete before we change the
890 		 * faultinjection.
891 		 */
892 		err = i915_vma_sync(vma);
893 		if (err)
894 			break;
895 
896 		if (igt_timeout(end_time,
897 				"%s timed out at ofset %llx [%llx - %llx]\n",
898 				__func__, addr, hole_start, hole_end)) {
899 			err = -EINTR;
900 			break;
901 		}
902 	}
903 
904 	close_object_list(&objects, vm);
905 	cleanup_freed_objects(vm->i915);
906 	return err;
907 }
908 
shrink_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)909 static int shrink_hole(struct i915_address_space *vm,
910 		       u64 hole_start, u64 hole_end,
911 		       unsigned long end_time)
912 {
913 	unsigned long prime;
914 	int err;
915 
916 	vm->fault_attr.probability = 999;
917 	atomic_set(&vm->fault_attr.times, -1);
918 
919 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
920 		vm->fault_attr.interval = prime;
921 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
922 		if (err)
923 			break;
924 	}
925 
926 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
927 
928 	return err;
929 }
930 
shrink_boom(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)931 static int shrink_boom(struct i915_address_space *vm,
932 		       u64 hole_start, u64 hole_end,
933 		       unsigned long end_time)
934 {
935 	unsigned int sizes[] = { SZ_2M, SZ_1G };
936 	struct drm_i915_gem_object *purge;
937 	struct drm_i915_gem_object *explode;
938 	int err;
939 	int i;
940 
941 	/*
942 	 * Catch the case which shrink_hole seems to miss. The setup here
943 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
944 	 * ensuring that all vma assiocated with the respective pd/pdp are
945 	 * unpinned at the time.
946 	 */
947 
948 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
949 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
950 		unsigned int size = sizes[i];
951 		struct i915_vma *vma;
952 
953 		purge = fake_dma_object(vm->i915, size);
954 		if (IS_ERR(purge))
955 			return PTR_ERR(purge);
956 
957 		vma = i915_vma_instance(purge, vm, NULL);
958 		if (IS_ERR(vma)) {
959 			err = PTR_ERR(vma);
960 			goto err_purge;
961 		}
962 
963 		err = i915_vma_pin(vma, 0, 0, flags);
964 		if (err)
965 			goto err_purge;
966 
967 		/* Should now be ripe for purging */
968 		i915_vma_unpin(vma);
969 
970 		explode = fake_dma_object(vm->i915, size);
971 		if (IS_ERR(explode)) {
972 			err = PTR_ERR(explode);
973 			goto err_purge;
974 		}
975 
976 		vm->fault_attr.probability = 100;
977 		vm->fault_attr.interval = 1;
978 		atomic_set(&vm->fault_attr.times, -1);
979 
980 		vma = i915_vma_instance(explode, vm, NULL);
981 		if (IS_ERR(vma)) {
982 			err = PTR_ERR(vma);
983 			goto err_explode;
984 		}
985 
986 		err = i915_vma_pin(vma, 0, 0, flags | size);
987 		if (err)
988 			goto err_explode;
989 
990 		i915_vma_unpin(vma);
991 
992 		i915_gem_object_put(purge);
993 		i915_gem_object_put(explode);
994 
995 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
996 		cleanup_freed_objects(vm->i915);
997 	}
998 
999 	return 0;
1000 
1001 err_explode:
1002 	i915_gem_object_put(explode);
1003 err_purge:
1004 	i915_gem_object_put(purge);
1005 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1006 	return err;
1007 }
1008 
exercise_ppgtt(struct drm_i915_private * dev_priv,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1009 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1010 			  int (*func)(struct i915_address_space *vm,
1011 				      u64 hole_start, u64 hole_end,
1012 				      unsigned long end_time))
1013 {
1014 	struct i915_ppgtt *ppgtt;
1015 	IGT_TIMEOUT(end_time);
1016 	struct file *file;
1017 	int err;
1018 
1019 	if (!HAS_FULL_PPGTT(dev_priv))
1020 		return 0;
1021 
1022 	file = mock_file(dev_priv);
1023 	if (IS_ERR(file))
1024 		return PTR_ERR(file);
1025 
1026 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1027 	if (IS_ERR(ppgtt)) {
1028 		err = PTR_ERR(ppgtt);
1029 		goto out_free;
1030 	}
1031 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1032 	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1033 
1034 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1035 
1036 	i915_vm_put(&ppgtt->vm);
1037 
1038 out_free:
1039 	fput(file);
1040 	return err;
1041 }
1042 
igt_ppgtt_fill(void * arg)1043 static int igt_ppgtt_fill(void *arg)
1044 {
1045 	return exercise_ppgtt(arg, fill_hole);
1046 }
1047 
igt_ppgtt_walk(void * arg)1048 static int igt_ppgtt_walk(void *arg)
1049 {
1050 	return exercise_ppgtt(arg, walk_hole);
1051 }
1052 
igt_ppgtt_pot(void * arg)1053 static int igt_ppgtt_pot(void *arg)
1054 {
1055 	return exercise_ppgtt(arg, pot_hole);
1056 }
1057 
igt_ppgtt_drunk(void * arg)1058 static int igt_ppgtt_drunk(void *arg)
1059 {
1060 	return exercise_ppgtt(arg, drunk_hole);
1061 }
1062 
igt_ppgtt_lowlevel(void * arg)1063 static int igt_ppgtt_lowlevel(void *arg)
1064 {
1065 	return exercise_ppgtt(arg, lowlevel_hole);
1066 }
1067 
igt_ppgtt_shrink(void * arg)1068 static int igt_ppgtt_shrink(void *arg)
1069 {
1070 	return exercise_ppgtt(arg, shrink_hole);
1071 }
1072 
igt_ppgtt_shrink_boom(void * arg)1073 static int igt_ppgtt_shrink_boom(void *arg)
1074 {
1075 	return exercise_ppgtt(arg, shrink_boom);
1076 }
1077 
sort_holes(void * priv,const struct list_head * A,const struct list_head * B)1078 static int sort_holes(void *priv, const struct list_head *A,
1079 		      const struct list_head *B)
1080 {
1081 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1082 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1083 
1084 	if (a->start < b->start)
1085 		return -1;
1086 	else
1087 		return 1;
1088 }
1089 
exercise_ggtt(struct drm_i915_private * i915,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1090 static int exercise_ggtt(struct drm_i915_private *i915,
1091 			 int (*func)(struct i915_address_space *vm,
1092 				     u64 hole_start, u64 hole_end,
1093 				     unsigned long end_time))
1094 {
1095 	struct i915_ggtt *ggtt = &i915->ggtt;
1096 	u64 hole_start, hole_end, last = 0;
1097 	struct drm_mm_node *node;
1098 	IGT_TIMEOUT(end_time);
1099 	int err = 0;
1100 
1101 restart:
1102 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1103 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1104 		if (hole_start < last)
1105 			continue;
1106 
1107 		if (ggtt->vm.mm.color_adjust)
1108 			ggtt->vm.mm.color_adjust(node, 0,
1109 						 &hole_start, &hole_end);
1110 		if (hole_start >= hole_end)
1111 			continue;
1112 
1113 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1114 		if (err)
1115 			break;
1116 
1117 		/* As we have manipulated the drm_mm, the list may be corrupt */
1118 		last = hole_end;
1119 		goto restart;
1120 	}
1121 
1122 	return err;
1123 }
1124 
igt_ggtt_fill(void * arg)1125 static int igt_ggtt_fill(void *arg)
1126 {
1127 	return exercise_ggtt(arg, fill_hole);
1128 }
1129 
igt_ggtt_walk(void * arg)1130 static int igt_ggtt_walk(void *arg)
1131 {
1132 	return exercise_ggtt(arg, walk_hole);
1133 }
1134 
igt_ggtt_pot(void * arg)1135 static int igt_ggtt_pot(void *arg)
1136 {
1137 	return exercise_ggtt(arg, pot_hole);
1138 }
1139 
igt_ggtt_drunk(void * arg)1140 static int igt_ggtt_drunk(void *arg)
1141 {
1142 	return exercise_ggtt(arg, drunk_hole);
1143 }
1144 
igt_ggtt_lowlevel(void * arg)1145 static int igt_ggtt_lowlevel(void *arg)
1146 {
1147 	return exercise_ggtt(arg, lowlevel_hole);
1148 }
1149 
igt_ggtt_page(void * arg)1150 static int igt_ggtt_page(void *arg)
1151 {
1152 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1153 	I915_RND_STATE(prng);
1154 	struct drm_i915_private *i915 = arg;
1155 	struct i915_ggtt *ggtt = &i915->ggtt;
1156 	struct drm_i915_gem_object *obj;
1157 	intel_wakeref_t wakeref;
1158 	struct drm_mm_node tmp;
1159 	unsigned int *order, n;
1160 	int err;
1161 
1162 	if (!i915_ggtt_has_aperture(ggtt))
1163 		return 0;
1164 
1165 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1166 	if (IS_ERR(obj))
1167 		return PTR_ERR(obj);
1168 
1169 	err = i915_gem_object_pin_pages(obj);
1170 	if (err)
1171 		goto out_free;
1172 
1173 	memset(&tmp, 0, sizeof(tmp));
1174 	mutex_lock(&ggtt->vm.mutex);
1175 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1176 					  count * PAGE_SIZE, 0,
1177 					  I915_COLOR_UNEVICTABLE,
1178 					  0, ggtt->mappable_end,
1179 					  DRM_MM_INSERT_LOW);
1180 	mutex_unlock(&ggtt->vm.mutex);
1181 	if (err)
1182 		goto out_unpin;
1183 
1184 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1185 
1186 	for (n = 0; n < count; n++) {
1187 		u64 offset = tmp.start + n * PAGE_SIZE;
1188 
1189 		ggtt->vm.insert_page(&ggtt->vm,
1190 				     i915_gem_object_get_dma_address(obj, 0),
1191 				     offset, I915_CACHE_NONE, 0);
1192 	}
1193 
1194 	order = i915_random_order(count, &prng);
1195 	if (!order) {
1196 		err = -ENOMEM;
1197 		goto out_remove;
1198 	}
1199 
1200 	for (n = 0; n < count; n++) {
1201 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1202 		u32 __iomem *vaddr;
1203 
1204 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1205 		iowrite32(n, vaddr + n);
1206 		io_mapping_unmap_atomic(vaddr);
1207 	}
1208 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1209 
1210 	i915_random_reorder(order, count, &prng);
1211 	for (n = 0; n < count; n++) {
1212 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1213 		u32 __iomem *vaddr;
1214 		u32 val;
1215 
1216 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1217 		val = ioread32(vaddr + n);
1218 		io_mapping_unmap_atomic(vaddr);
1219 
1220 		if (val != n) {
1221 			pr_err("insert page failed: found %d, expected %d\n",
1222 			       val, n);
1223 			err = -EINVAL;
1224 			break;
1225 		}
1226 	}
1227 
1228 	kfree(order);
1229 out_remove:
1230 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1231 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1232 	mutex_lock(&ggtt->vm.mutex);
1233 	drm_mm_remove_node(&tmp);
1234 	mutex_unlock(&ggtt->vm.mutex);
1235 out_unpin:
1236 	i915_gem_object_unpin_pages(obj);
1237 out_free:
1238 	i915_gem_object_put(obj);
1239 	return err;
1240 }
1241 
track_vma_bind(struct i915_vma * vma)1242 static void track_vma_bind(struct i915_vma *vma)
1243 {
1244 	struct drm_i915_gem_object *obj = vma->obj;
1245 
1246 	__i915_gem_object_pin_pages(obj);
1247 
1248 	GEM_BUG_ON(vma->pages);
1249 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1250 	__i915_gem_object_pin_pages(obj);
1251 	vma->pages = obj->mm.pages;
1252 
1253 	mutex_lock(&vma->vm->mutex);
1254 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1255 	mutex_unlock(&vma->vm->mutex);
1256 }
1257 
exercise_mock(struct drm_i915_private * i915,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1258 static int exercise_mock(struct drm_i915_private *i915,
1259 			 int (*func)(struct i915_address_space *vm,
1260 				     u64 hole_start, u64 hole_end,
1261 				     unsigned long end_time))
1262 {
1263 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1264 	struct i915_address_space *vm;
1265 	struct i915_gem_context *ctx;
1266 	IGT_TIMEOUT(end_time);
1267 	int err;
1268 
1269 	ctx = mock_context(i915, "mock");
1270 	if (!ctx)
1271 		return -ENOMEM;
1272 
1273 	vm = i915_gem_context_get_vm_rcu(ctx);
1274 	err = func(vm, 0, min(vm->total, limit), end_time);
1275 	i915_vm_put(vm);
1276 
1277 	mock_context_close(ctx);
1278 	return err;
1279 }
1280 
igt_mock_fill(void * arg)1281 static int igt_mock_fill(void *arg)
1282 {
1283 	struct i915_ggtt *ggtt = arg;
1284 
1285 	return exercise_mock(ggtt->vm.i915, fill_hole);
1286 }
1287 
igt_mock_walk(void * arg)1288 static int igt_mock_walk(void *arg)
1289 {
1290 	struct i915_ggtt *ggtt = arg;
1291 
1292 	return exercise_mock(ggtt->vm.i915, walk_hole);
1293 }
1294 
igt_mock_pot(void * arg)1295 static int igt_mock_pot(void *arg)
1296 {
1297 	struct i915_ggtt *ggtt = arg;
1298 
1299 	return exercise_mock(ggtt->vm.i915, pot_hole);
1300 }
1301 
igt_mock_drunk(void * arg)1302 static int igt_mock_drunk(void *arg)
1303 {
1304 	struct i915_ggtt *ggtt = arg;
1305 
1306 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1307 }
1308 
igt_gtt_reserve(void * arg)1309 static int igt_gtt_reserve(void *arg)
1310 {
1311 	struct i915_ggtt *ggtt = arg;
1312 	struct drm_i915_gem_object *obj, *on;
1313 	I915_RND_STATE(prng);
1314 	LIST_HEAD(objects);
1315 	u64 total;
1316 	int err = -ENODEV;
1317 
1318 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1319 	 * for the node, and evicts if it has to. So our test checks that
1320 	 * it can give us the requsted space and prevent overlaps.
1321 	 */
1322 
1323 	/* Start by filling the GGTT */
1324 	for (total = 0;
1325 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1326 	     total += 2 * I915_GTT_PAGE_SIZE) {
1327 		struct i915_vma *vma;
1328 
1329 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1330 						      2 * PAGE_SIZE);
1331 		if (IS_ERR(obj)) {
1332 			err = PTR_ERR(obj);
1333 			goto out;
1334 		}
1335 
1336 		err = i915_gem_object_pin_pages(obj);
1337 		if (err) {
1338 			i915_gem_object_put(obj);
1339 			goto out;
1340 		}
1341 
1342 		list_add(&obj->st_link, &objects);
1343 
1344 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1345 		if (IS_ERR(vma)) {
1346 			err = PTR_ERR(vma);
1347 			goto out;
1348 		}
1349 
1350 		mutex_lock(&ggtt->vm.mutex);
1351 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1352 					   obj->base.size,
1353 					   total,
1354 					   obj->cache_level,
1355 					   0);
1356 		mutex_unlock(&ggtt->vm.mutex);
1357 		if (err) {
1358 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1359 			       total, ggtt->vm.total, err);
1360 			goto out;
1361 		}
1362 		track_vma_bind(vma);
1363 
1364 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1365 		if (vma->node.start != total ||
1366 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1367 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1368 			       vma->node.start, vma->node.size,
1369 			       total, 2*I915_GTT_PAGE_SIZE);
1370 			err = -EINVAL;
1371 			goto out;
1372 		}
1373 	}
1374 
1375 	/* Now we start forcing evictions */
1376 	for (total = I915_GTT_PAGE_SIZE;
1377 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1378 	     total += 2 * I915_GTT_PAGE_SIZE) {
1379 		struct i915_vma *vma;
1380 
1381 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1382 						      2 * PAGE_SIZE);
1383 		if (IS_ERR(obj)) {
1384 			err = PTR_ERR(obj);
1385 			goto out;
1386 		}
1387 
1388 		err = i915_gem_object_pin_pages(obj);
1389 		if (err) {
1390 			i915_gem_object_put(obj);
1391 			goto out;
1392 		}
1393 
1394 		list_add(&obj->st_link, &objects);
1395 
1396 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1397 		if (IS_ERR(vma)) {
1398 			err = PTR_ERR(vma);
1399 			goto out;
1400 		}
1401 
1402 		mutex_lock(&ggtt->vm.mutex);
1403 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1404 					   obj->base.size,
1405 					   total,
1406 					   obj->cache_level,
1407 					   0);
1408 		mutex_unlock(&ggtt->vm.mutex);
1409 		if (err) {
1410 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1411 			       total, ggtt->vm.total, err);
1412 			goto out;
1413 		}
1414 		track_vma_bind(vma);
1415 
1416 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1417 		if (vma->node.start != total ||
1418 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1419 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1420 			       vma->node.start, vma->node.size,
1421 			       total, 2*I915_GTT_PAGE_SIZE);
1422 			err = -EINVAL;
1423 			goto out;
1424 		}
1425 	}
1426 
1427 	/* And then try at random */
1428 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1429 		struct i915_vma *vma;
1430 		u64 offset;
1431 
1432 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1433 		if (IS_ERR(vma)) {
1434 			err = PTR_ERR(vma);
1435 			goto out;
1436 		}
1437 
1438 		err = i915_vma_unbind(vma);
1439 		if (err) {
1440 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1441 			goto out;
1442 		}
1443 
1444 		offset = igt_random_offset(&prng,
1445 					   0, ggtt->vm.total,
1446 					   2 * I915_GTT_PAGE_SIZE,
1447 					   I915_GTT_MIN_ALIGNMENT);
1448 
1449 		mutex_lock(&ggtt->vm.mutex);
1450 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1451 					   obj->base.size,
1452 					   offset,
1453 					   obj->cache_level,
1454 					   0);
1455 		mutex_unlock(&ggtt->vm.mutex);
1456 		if (err) {
1457 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1458 			       total, ggtt->vm.total, err);
1459 			goto out;
1460 		}
1461 		track_vma_bind(vma);
1462 
1463 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1464 		if (vma->node.start != offset ||
1465 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1466 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1467 			       vma->node.start, vma->node.size,
1468 			       offset, 2*I915_GTT_PAGE_SIZE);
1469 			err = -EINVAL;
1470 			goto out;
1471 		}
1472 	}
1473 
1474 out:
1475 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1476 		i915_gem_object_unpin_pages(obj);
1477 		i915_gem_object_put(obj);
1478 	}
1479 	return err;
1480 }
1481 
igt_gtt_insert(void * arg)1482 static int igt_gtt_insert(void *arg)
1483 {
1484 	struct i915_ggtt *ggtt = arg;
1485 	struct drm_i915_gem_object *obj, *on;
1486 	struct drm_mm_node tmp = {};
1487 	const struct invalid_insert {
1488 		u64 size;
1489 		u64 alignment;
1490 		u64 start, end;
1491 	} invalid_insert[] = {
1492 		{
1493 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1494 			0, ggtt->vm.total,
1495 		},
1496 		{
1497 			2*I915_GTT_PAGE_SIZE, 0,
1498 			0, I915_GTT_PAGE_SIZE,
1499 		},
1500 		{
1501 			-(u64)I915_GTT_PAGE_SIZE, 0,
1502 			0, 4*I915_GTT_PAGE_SIZE,
1503 		},
1504 		{
1505 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1506 			0, 4*I915_GTT_PAGE_SIZE,
1507 		},
1508 		{
1509 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1510 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1511 		},
1512 		{}
1513 	}, *ii;
1514 	LIST_HEAD(objects);
1515 	u64 total;
1516 	int err = -ENODEV;
1517 
1518 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1519 	 * to the node, evicting if required.
1520 	 */
1521 
1522 	/* Check a couple of obviously invalid requests */
1523 	for (ii = invalid_insert; ii->size; ii++) {
1524 		mutex_lock(&ggtt->vm.mutex);
1525 		err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1526 					  ii->size, ii->alignment,
1527 					  I915_COLOR_UNEVICTABLE,
1528 					  ii->start, ii->end,
1529 					  0);
1530 		mutex_unlock(&ggtt->vm.mutex);
1531 		if (err != -ENOSPC) {
1532 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1533 			       ii->size, ii->alignment, ii->start, ii->end,
1534 			       err);
1535 			return -EINVAL;
1536 		}
1537 	}
1538 
1539 	/* Start by filling the GGTT */
1540 	for (total = 0;
1541 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1542 	     total += I915_GTT_PAGE_SIZE) {
1543 		struct i915_vma *vma;
1544 
1545 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1546 						      I915_GTT_PAGE_SIZE);
1547 		if (IS_ERR(obj)) {
1548 			err = PTR_ERR(obj);
1549 			goto out;
1550 		}
1551 
1552 		err = i915_gem_object_pin_pages(obj);
1553 		if (err) {
1554 			i915_gem_object_put(obj);
1555 			goto out;
1556 		}
1557 
1558 		list_add(&obj->st_link, &objects);
1559 
1560 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1561 		if (IS_ERR(vma)) {
1562 			err = PTR_ERR(vma);
1563 			goto out;
1564 		}
1565 
1566 		mutex_lock(&ggtt->vm.mutex);
1567 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1568 					  obj->base.size, 0, obj->cache_level,
1569 					  0, ggtt->vm.total,
1570 					  0);
1571 		mutex_unlock(&ggtt->vm.mutex);
1572 		if (err == -ENOSPC) {
1573 			/* maxed out the GGTT space */
1574 			i915_gem_object_put(obj);
1575 			break;
1576 		}
1577 		if (err) {
1578 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1579 			       total, ggtt->vm.total, err);
1580 			goto out;
1581 		}
1582 		track_vma_bind(vma);
1583 		__i915_vma_pin(vma);
1584 
1585 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1586 	}
1587 
1588 	list_for_each_entry(obj, &objects, st_link) {
1589 		struct i915_vma *vma;
1590 
1591 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1592 		if (IS_ERR(vma)) {
1593 			err = PTR_ERR(vma);
1594 			goto out;
1595 		}
1596 
1597 		if (!drm_mm_node_allocated(&vma->node)) {
1598 			pr_err("VMA was unexpectedly evicted!\n");
1599 			err = -EINVAL;
1600 			goto out;
1601 		}
1602 
1603 		__i915_vma_unpin(vma);
1604 	}
1605 
1606 	/* If we then reinsert, we should find the same hole */
1607 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1608 		struct i915_vma *vma;
1609 		u64 offset;
1610 
1611 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1612 		if (IS_ERR(vma)) {
1613 			err = PTR_ERR(vma);
1614 			goto out;
1615 		}
1616 
1617 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1618 		offset = vma->node.start;
1619 
1620 		err = i915_vma_unbind(vma);
1621 		if (err) {
1622 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1623 			goto out;
1624 		}
1625 
1626 		mutex_lock(&ggtt->vm.mutex);
1627 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1628 					  obj->base.size, 0, obj->cache_level,
1629 					  0, ggtt->vm.total,
1630 					  0);
1631 		mutex_unlock(&ggtt->vm.mutex);
1632 		if (err) {
1633 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1634 			       total, ggtt->vm.total, err);
1635 			goto out;
1636 		}
1637 		track_vma_bind(vma);
1638 
1639 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1640 		if (vma->node.start != offset) {
1641 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1642 			       offset, vma->node.start);
1643 			err = -EINVAL;
1644 			goto out;
1645 		}
1646 	}
1647 
1648 	/* And then force evictions */
1649 	for (total = 0;
1650 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1651 	     total += 2 * I915_GTT_PAGE_SIZE) {
1652 		struct i915_vma *vma;
1653 
1654 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1655 						      2 * I915_GTT_PAGE_SIZE);
1656 		if (IS_ERR(obj)) {
1657 			err = PTR_ERR(obj);
1658 			goto out;
1659 		}
1660 
1661 		err = i915_gem_object_pin_pages(obj);
1662 		if (err) {
1663 			i915_gem_object_put(obj);
1664 			goto out;
1665 		}
1666 
1667 		list_add(&obj->st_link, &objects);
1668 
1669 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1670 		if (IS_ERR(vma)) {
1671 			err = PTR_ERR(vma);
1672 			goto out;
1673 		}
1674 
1675 		mutex_lock(&ggtt->vm.mutex);
1676 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1677 					  obj->base.size, 0, obj->cache_level,
1678 					  0, ggtt->vm.total,
1679 					  0);
1680 		mutex_unlock(&ggtt->vm.mutex);
1681 		if (err) {
1682 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1683 			       total, ggtt->vm.total, err);
1684 			goto out;
1685 		}
1686 		track_vma_bind(vma);
1687 
1688 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1689 	}
1690 
1691 out:
1692 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1693 		i915_gem_object_unpin_pages(obj);
1694 		i915_gem_object_put(obj);
1695 	}
1696 	return err;
1697 }
1698 
i915_gem_gtt_mock_selftests(void)1699 int i915_gem_gtt_mock_selftests(void)
1700 {
1701 	static const struct i915_subtest tests[] = {
1702 		SUBTEST(igt_mock_drunk),
1703 		SUBTEST(igt_mock_walk),
1704 		SUBTEST(igt_mock_pot),
1705 		SUBTEST(igt_mock_fill),
1706 		SUBTEST(igt_gtt_reserve),
1707 		SUBTEST(igt_gtt_insert),
1708 	};
1709 	struct drm_i915_private *i915;
1710 	struct i915_ggtt *ggtt;
1711 	int err;
1712 
1713 	i915 = mock_gem_device();
1714 	if (!i915)
1715 		return -ENOMEM;
1716 
1717 	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1718 	if (!ggtt) {
1719 		err = -ENOMEM;
1720 		goto out_put;
1721 	}
1722 	mock_init_ggtt(i915, ggtt);
1723 
1724 	err = i915_subtests(tests, ggtt);
1725 
1726 	mock_device_flush(i915);
1727 	i915_gem_drain_freed_objects(i915);
1728 	mock_fini_ggtt(ggtt);
1729 	kfree(ggtt);
1730 out_put:
1731 	mock_destroy_device(i915);
1732 	return err;
1733 }
1734 
context_sync(struct intel_context * ce)1735 static int context_sync(struct intel_context *ce)
1736 {
1737 	struct i915_request *rq;
1738 	long timeout;
1739 
1740 	rq = intel_context_create_request(ce);
1741 	if (IS_ERR(rq))
1742 		return PTR_ERR(rq);
1743 
1744 	i915_request_get(rq);
1745 	i915_request_add(rq);
1746 
1747 	timeout = i915_request_wait(rq, 0, HZ / 5);
1748 	i915_request_put(rq);
1749 
1750 	return timeout < 0 ? -EIO : 0;
1751 }
1752 
1753 static struct i915_request *
submit_batch(struct intel_context * ce,u64 addr)1754 submit_batch(struct intel_context *ce, u64 addr)
1755 {
1756 	struct i915_request *rq;
1757 	int err;
1758 
1759 	rq = intel_context_create_request(ce);
1760 	if (IS_ERR(rq))
1761 		return rq;
1762 
1763 	err = 0;
1764 	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1765 		err = rq->engine->emit_init_breadcrumb(rq);
1766 	if (err == 0)
1767 		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1768 
1769 	if (err == 0)
1770 		i915_request_get(rq);
1771 	i915_request_add(rq);
1772 
1773 	return err ? ERR_PTR(err) : rq;
1774 }
1775 
spinner(u32 * batch,int i)1776 static u32 *spinner(u32 *batch, int i)
1777 {
1778 	return batch + i * 64 / sizeof(*batch) + 4;
1779 }
1780 
end_spin(u32 * batch,int i)1781 static void end_spin(u32 *batch, int i)
1782 {
1783 	*spinner(batch, i) = MI_BATCH_BUFFER_END;
1784 	wmb();
1785 }
1786 
igt_cs_tlb(void * arg)1787 static int igt_cs_tlb(void *arg)
1788 {
1789 	const unsigned int count = PAGE_SIZE / 64;
1790 	const unsigned int chunk_size = count * PAGE_SIZE;
1791 	struct drm_i915_private *i915 = arg;
1792 	struct drm_i915_gem_object *bbe, *act, *out;
1793 	struct i915_gem_engines_iter it;
1794 	struct i915_address_space *vm;
1795 	struct i915_gem_context *ctx;
1796 	struct intel_context *ce;
1797 	struct i915_vma *vma;
1798 	I915_RND_STATE(prng);
1799 	struct file *file;
1800 	unsigned int i;
1801 	u32 *result;
1802 	u32 *batch;
1803 	int err = 0;
1804 
1805 	/*
1806 	 * Our mission here is to fool the hardware to execute something
1807 	 * from scratch as it has not seen the batch move (due to missing
1808 	 * the TLB invalidate).
1809 	 */
1810 
1811 	file = mock_file(i915);
1812 	if (IS_ERR(file))
1813 		return PTR_ERR(file);
1814 
1815 	ctx = live_context(i915, file);
1816 	if (IS_ERR(ctx)) {
1817 		err = PTR_ERR(ctx);
1818 		goto out_unlock;
1819 	}
1820 
1821 	vm = i915_gem_context_get_vm_rcu(ctx);
1822 	if (i915_is_ggtt(vm))
1823 		goto out_vm;
1824 
1825 	/* Create two pages; dummy we prefill the TLB, and intended */
1826 	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1827 	if (IS_ERR(bbe)) {
1828 		err = PTR_ERR(bbe);
1829 		goto out_vm;
1830 	}
1831 
1832 	batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1833 	if (IS_ERR(batch)) {
1834 		err = PTR_ERR(batch);
1835 		goto out_put_bbe;
1836 	}
1837 	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1838 	i915_gem_object_flush_map(bbe);
1839 	i915_gem_object_unpin_map(bbe);
1840 
1841 	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1842 	if (IS_ERR(act)) {
1843 		err = PTR_ERR(act);
1844 		goto out_put_bbe;
1845 	}
1846 
1847 	/* Track the execution of each request by writing into different slot */
1848 	batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1849 	if (IS_ERR(batch)) {
1850 		err = PTR_ERR(batch);
1851 		goto out_put_act;
1852 	}
1853 	for (i = 0; i < count; i++) {
1854 		u32 *cs = batch + i * 64 / sizeof(*cs);
1855 		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1856 
1857 		GEM_BUG_ON(INTEL_GEN(i915) < 6);
1858 		cs[0] = MI_STORE_DWORD_IMM_GEN4;
1859 		if (INTEL_GEN(i915) >= 8) {
1860 			cs[1] = lower_32_bits(addr);
1861 			cs[2] = upper_32_bits(addr);
1862 			cs[3] = i;
1863 			cs[4] = MI_NOOP;
1864 			cs[5] = MI_BATCH_BUFFER_START_GEN8;
1865 		} else {
1866 			cs[1] = 0;
1867 			cs[2] = lower_32_bits(addr);
1868 			cs[3] = i;
1869 			cs[4] = MI_NOOP;
1870 			cs[5] = MI_BATCH_BUFFER_START;
1871 		}
1872 	}
1873 
1874 	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1875 	if (IS_ERR(out)) {
1876 		err = PTR_ERR(out);
1877 		goto out_put_batch;
1878 	}
1879 	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1880 
1881 	vma = i915_vma_instance(out, vm, NULL);
1882 	if (IS_ERR(vma)) {
1883 		err = PTR_ERR(vma);
1884 		goto out_put_out;
1885 	}
1886 
1887 	err = i915_vma_pin(vma, 0, 0,
1888 			   PIN_USER |
1889 			   PIN_OFFSET_FIXED |
1890 			   (vm->total - PAGE_SIZE));
1891 	if (err)
1892 		goto out_put_out;
1893 	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1894 
1895 	result = i915_gem_object_pin_map(out, I915_MAP_WB);
1896 	if (IS_ERR(result)) {
1897 		err = PTR_ERR(result);
1898 		goto out_put_out;
1899 	}
1900 
1901 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1902 		IGT_TIMEOUT(end_time);
1903 		unsigned long pass = 0;
1904 
1905 		if (!intel_engine_can_store_dword(ce->engine))
1906 			continue;
1907 
1908 		while (!__igt_timeout(end_time, NULL)) {
1909 			struct i915_vm_pt_stash stash = {};
1910 			struct i915_request *rq;
1911 			u64 offset;
1912 
1913 			offset = igt_random_offset(&prng,
1914 						   0, vm->total - PAGE_SIZE,
1915 						   chunk_size, PAGE_SIZE);
1916 
1917 			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1918 
1919 			vma = i915_vma_instance(bbe, vm, NULL);
1920 			if (IS_ERR(vma)) {
1921 				err = PTR_ERR(vma);
1922 				goto end;
1923 			}
1924 
1925 			err = vma->ops->set_pages(vma);
1926 			if (err)
1927 				goto end;
1928 
1929 			err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
1930 			if (err)
1931 				goto end;
1932 
1933 			err = i915_vm_pin_pt_stash(vm, &stash);
1934 			if (err) {
1935 				i915_vm_free_pt_stash(vm, &stash);
1936 				goto end;
1937 			}
1938 
1939 			vm->allocate_va_range(vm, &stash, offset, chunk_size);
1940 
1941 			i915_vm_free_pt_stash(vm, &stash);
1942 
1943 			/* Prime the TLB with the dummy pages */
1944 			for (i = 0; i < count; i++) {
1945 				vma->node.start = offset + i * PAGE_SIZE;
1946 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1947 
1948 				rq = submit_batch(ce, vma->node.start);
1949 				if (IS_ERR(rq)) {
1950 					err = PTR_ERR(rq);
1951 					goto end;
1952 				}
1953 				i915_request_put(rq);
1954 			}
1955 
1956 			vma->ops->clear_pages(vma);
1957 
1958 			err = context_sync(ce);
1959 			if (err) {
1960 				pr_err("%s: dummy setup timed out\n",
1961 				       ce->engine->name);
1962 				goto end;
1963 			}
1964 
1965 			vma = i915_vma_instance(act, vm, NULL);
1966 			if (IS_ERR(vma)) {
1967 				err = PTR_ERR(vma);
1968 				goto end;
1969 			}
1970 
1971 			err = vma->ops->set_pages(vma);
1972 			if (err)
1973 				goto end;
1974 
1975 			/* Replace the TLB with target batches */
1976 			for (i = 0; i < count; i++) {
1977 				struct i915_request *rq;
1978 				u32 *cs = batch + i * 64 / sizeof(*cs);
1979 				u64 addr;
1980 
1981 				vma->node.start = offset + i * PAGE_SIZE;
1982 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1983 
1984 				addr = vma->node.start + i * 64;
1985 				cs[4] = MI_NOOP;
1986 				cs[6] = lower_32_bits(addr);
1987 				cs[7] = upper_32_bits(addr);
1988 				wmb();
1989 
1990 				rq = submit_batch(ce, addr);
1991 				if (IS_ERR(rq)) {
1992 					err = PTR_ERR(rq);
1993 					goto end;
1994 				}
1995 
1996 				/* Wait until the context chain has started */
1997 				if (i == 0) {
1998 					while (READ_ONCE(result[i]) &&
1999 					       !i915_request_completed(rq))
2000 						cond_resched();
2001 				} else {
2002 					end_spin(batch, i - 1);
2003 				}
2004 
2005 				i915_request_put(rq);
2006 			}
2007 			end_spin(batch, count - 1);
2008 
2009 			vma->ops->clear_pages(vma);
2010 
2011 			err = context_sync(ce);
2012 			if (err) {
2013 				pr_err("%s: writes timed out\n",
2014 				       ce->engine->name);
2015 				goto end;
2016 			}
2017 
2018 			for (i = 0; i < count; i++) {
2019 				if (result[i] != i) {
2020 					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2021 					       ce->engine->name, pass,
2022 					       offset, i, result[i], i);
2023 					err = -EINVAL;
2024 					goto end;
2025 				}
2026 			}
2027 
2028 			vm->clear_range(vm, offset, chunk_size);
2029 			pass++;
2030 		}
2031 	}
2032 end:
2033 	if (igt_flush_test(i915))
2034 		err = -EIO;
2035 	i915_gem_context_unlock_engines(ctx);
2036 	i915_gem_object_unpin_map(out);
2037 out_put_out:
2038 	i915_gem_object_put(out);
2039 out_put_batch:
2040 	i915_gem_object_unpin_map(act);
2041 out_put_act:
2042 	i915_gem_object_put(act);
2043 out_put_bbe:
2044 	i915_gem_object_put(bbe);
2045 out_vm:
2046 	i915_vm_put(vm);
2047 out_unlock:
2048 	fput(file);
2049 	return err;
2050 }
2051 
i915_gem_gtt_live_selftests(struct drm_i915_private * i915)2052 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2053 {
2054 	static const struct i915_subtest tests[] = {
2055 		SUBTEST(igt_ppgtt_alloc),
2056 		SUBTEST(igt_ppgtt_lowlevel),
2057 		SUBTEST(igt_ppgtt_drunk),
2058 		SUBTEST(igt_ppgtt_walk),
2059 		SUBTEST(igt_ppgtt_pot),
2060 		SUBTEST(igt_ppgtt_fill),
2061 		SUBTEST(igt_ppgtt_shrink),
2062 		SUBTEST(igt_ppgtt_shrink_boom),
2063 		SUBTEST(igt_ggtt_lowlevel),
2064 		SUBTEST(igt_ggtt_drunk),
2065 		SUBTEST(igt_ggtt_walk),
2066 		SUBTEST(igt_ggtt_pot),
2067 		SUBTEST(igt_ggtt_fill),
2068 		SUBTEST(igt_ggtt_page),
2069 		SUBTEST(igt_cs_tlb),
2070 	};
2071 
2072 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2073 
2074 	return i915_subtests(tests, i915);
2075 }
2076