• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_requests.h"
13 #include "gt/intel_reset.h"
14 #include "i915_selftest.h"
15 
16 #include "gem/selftests/igt_gem_utils.h"
17 #include "selftests/i915_random.h"
18 #include "selftests/igt_flush_test.h"
19 #include "selftests/igt_live_test.h"
20 #include "selftests/igt_reset.h"
21 #include "selftests/igt_spinner.h"
22 #include "selftests/mock_drm.h"
23 #include "selftests/mock_gem_device.h"
24 
25 #include "huge_gem_object.h"
26 #include "igt_gem_utils.h"
27 
28 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
29 
ctx_vm(struct i915_gem_context * ctx)30 static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
31 {
32 	/* single threaded, private ctx */
33 	return rcu_dereference_protected(ctx->vm, true);
34 }
35 
live_nop_switch(void * arg)36 static int live_nop_switch(void *arg)
37 {
38 	const unsigned int nctx = 1024;
39 	struct drm_i915_private *i915 = arg;
40 	struct intel_engine_cs *engine;
41 	struct i915_gem_context **ctx;
42 	struct igt_live_test t;
43 	struct file *file;
44 	unsigned long n;
45 	int err = -ENODEV;
46 
47 	/*
48 	 * Create as many contexts as we can feasibly get away with
49 	 * and check we can switch between them rapidly.
50 	 *
51 	 * Serves as very simple stress test for submission and HW switching
52 	 * between contexts.
53 	 */
54 
55 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
56 		return 0;
57 
58 	file = mock_file(i915);
59 	if (IS_ERR(file))
60 		return PTR_ERR(file);
61 
62 	ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
63 	if (!ctx) {
64 		err = -ENOMEM;
65 		goto out_file;
66 	}
67 
68 	for (n = 0; n < nctx; n++) {
69 		ctx[n] = live_context(i915, file);
70 		if (IS_ERR(ctx[n])) {
71 			err = PTR_ERR(ctx[n]);
72 			goto out_file;
73 		}
74 	}
75 
76 	for_each_uabi_engine(engine, i915) {
77 		struct i915_request *rq = NULL;
78 		unsigned long end_time, prime;
79 		ktime_t times[2] = {};
80 
81 		times[0] = ktime_get_raw();
82 		for (n = 0; n < nctx; n++) {
83 			struct i915_request *this;
84 
85 			this = igt_request_alloc(ctx[n], engine);
86 			if (IS_ERR(this)) {
87 				err = PTR_ERR(this);
88 				goto out_file;
89 			}
90 			if (rq) {
91 				i915_request_await_dma_fence(this, &rq->fence);
92 				i915_request_put(rq);
93 			}
94 			rq = i915_request_get(this);
95 			i915_request_add(this);
96 		}
97 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
98 			pr_err("Failed to populated %d contexts\n", nctx);
99 			intel_gt_set_wedged(&i915->gt);
100 			i915_request_put(rq);
101 			err = -EIO;
102 			goto out_file;
103 		}
104 		i915_request_put(rq);
105 
106 		times[1] = ktime_get_raw();
107 
108 		pr_info("Populated %d contexts on %s in %lluns\n",
109 			nctx, engine->name, ktime_to_ns(times[1] - times[0]));
110 
111 		err = igt_live_test_begin(&t, i915, __func__, engine->name);
112 		if (err)
113 			goto out_file;
114 
115 		end_time = jiffies + i915_selftest.timeout_jiffies;
116 		for_each_prime_number_from(prime, 2, 8192) {
117 			times[1] = ktime_get_raw();
118 
119 			rq = NULL;
120 			for (n = 0; n < prime; n++) {
121 				struct i915_request *this;
122 
123 				this = igt_request_alloc(ctx[n % nctx], engine);
124 				if (IS_ERR(this)) {
125 					err = PTR_ERR(this);
126 					goto out_file;
127 				}
128 
129 				if (rq) { /* Force submission order */
130 					i915_request_await_dma_fence(this, &rq->fence);
131 					i915_request_put(rq);
132 				}
133 
134 				/*
135 				 * This space is left intentionally blank.
136 				 *
137 				 * We do not actually want to perform any
138 				 * action with this request, we just want
139 				 * to measure the latency in allocation
140 				 * and submission of our breadcrumbs -
141 				 * ensuring that the bare request is sufficient
142 				 * for the system to work (i.e. proper HEAD
143 				 * tracking of the rings, interrupt handling,
144 				 * etc). It also gives us the lowest bounds
145 				 * for latency.
146 				 */
147 
148 				rq = i915_request_get(this);
149 				i915_request_add(this);
150 			}
151 			GEM_BUG_ON(!rq);
152 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
153 				pr_err("Switching between %ld contexts timed out\n",
154 				       prime);
155 				intel_gt_set_wedged(&i915->gt);
156 				i915_request_put(rq);
157 				break;
158 			}
159 			i915_request_put(rq);
160 
161 			times[1] = ktime_sub(ktime_get_raw(), times[1]);
162 			if (prime == 2)
163 				times[0] = times[1];
164 
165 			if (__igt_timeout(end_time, NULL))
166 				break;
167 		}
168 
169 		err = igt_live_test_end(&t);
170 		if (err)
171 			goto out_file;
172 
173 		pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
174 			engine->name,
175 			ktime_to_ns(times[0]),
176 			prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
177 	}
178 
179 out_file:
180 	fput(file);
181 	return err;
182 }
183 
184 struct parallel_switch {
185 	struct kthread_worker *worker;
186 	struct kthread_work work;
187 	struct intel_context *ce[2];
188 	int result;
189 };
190 
__live_parallel_switch1(struct kthread_work * work)191 static void __live_parallel_switch1(struct kthread_work *work)
192 {
193 	struct parallel_switch *arg =
194 		container_of(work, typeof(*arg), work);
195 	IGT_TIMEOUT(end_time);
196 	unsigned long count;
197 
198 	count = 0;
199 	arg->result = 0;
200 	do {
201 		struct i915_request *rq = NULL;
202 		int n;
203 
204 		for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
205 			struct i915_request *prev = rq;
206 
207 			rq = i915_request_create(arg->ce[n]);
208 			if (IS_ERR(rq)) {
209 				i915_request_put(prev);
210 				arg->result = PTR_ERR(rq);
211 				break;
212 			}
213 
214 			i915_request_get(rq);
215 			if (prev) {
216 				arg->result =
217 					i915_request_await_dma_fence(rq,
218 								     &prev->fence);
219 				i915_request_put(prev);
220 			}
221 
222 			i915_request_add(rq);
223 		}
224 
225 		if (IS_ERR_OR_NULL(rq))
226 			break;
227 
228 		if (i915_request_wait(rq, 0, HZ) < 0)
229 			arg->result = -ETIME;
230 
231 		i915_request_put(rq);
232 
233 		count++;
234 	} while (!arg->result && !__igt_timeout(end_time, NULL));
235 
236 	pr_info("%s: %lu switches (sync) <%d>\n",
237 		arg->ce[0]->engine->name, count, arg->result);
238 }
239 
__live_parallel_switchN(struct kthread_work * work)240 static void __live_parallel_switchN(struct kthread_work *work)
241 {
242 	struct parallel_switch *arg =
243 		container_of(work, typeof(*arg), work);
244 	struct i915_request *rq = NULL;
245 	IGT_TIMEOUT(end_time);
246 	unsigned long count;
247 	int n;
248 
249 	count = 0;
250 	arg->result = 0;
251 	do {
252 		for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
253 			struct i915_request *prev = rq;
254 
255 			rq = i915_request_create(arg->ce[n]);
256 			if (IS_ERR(rq)) {
257 				i915_request_put(prev);
258 				arg->result = PTR_ERR(rq);
259 				break;
260 			}
261 
262 			i915_request_get(rq);
263 			if (prev) {
264 				arg->result =
265 					i915_request_await_dma_fence(rq,
266 								     &prev->fence);
267 				i915_request_put(prev);
268 			}
269 
270 			i915_request_add(rq);
271 		}
272 
273 		count++;
274 	} while (!arg->result && !__igt_timeout(end_time, NULL));
275 
276 	if (!IS_ERR_OR_NULL(rq))
277 		i915_request_put(rq);
278 
279 	pr_info("%s: %lu switches (many) <%d>\n",
280 		arg->ce[0]->engine->name, count, arg->result);
281 }
282 
live_parallel_switch(void * arg)283 static int live_parallel_switch(void *arg)
284 {
285 	struct drm_i915_private *i915 = arg;
286 	static void (* const func[])(struct kthread_work *) = {
287 		__live_parallel_switch1,
288 		__live_parallel_switchN,
289 		NULL,
290 	};
291 	struct parallel_switch *data = NULL;
292 	struct i915_gem_engines *engines;
293 	struct i915_gem_engines_iter it;
294 	void (* const *fn)(struct kthread_work *);
295 	struct i915_gem_context *ctx;
296 	struct intel_context *ce;
297 	struct file *file;
298 	int n, m, count;
299 	int err = 0;
300 
301 	/*
302 	 * Check we can process switches on all engines simultaneously.
303 	 */
304 
305 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
306 		return 0;
307 
308 	file = mock_file(i915);
309 	if (IS_ERR(file))
310 		return PTR_ERR(file);
311 
312 	ctx = live_context(i915, file);
313 	if (IS_ERR(ctx)) {
314 		err = PTR_ERR(ctx);
315 		goto out_file;
316 	}
317 
318 	engines = i915_gem_context_lock_engines(ctx);
319 	count = engines->num_engines;
320 
321 	data = kcalloc(count, sizeof(*data), GFP_KERNEL);
322 	if (!data) {
323 		i915_gem_context_unlock_engines(ctx);
324 		err = -ENOMEM;
325 		goto out_file;
326 	}
327 
328 	m = 0; /* Use the first context as our template for the engines */
329 	for_each_gem_engine(ce, engines, it) {
330 		err = intel_context_pin(ce);
331 		if (err) {
332 			i915_gem_context_unlock_engines(ctx);
333 			goto out;
334 		}
335 		data[m++].ce[0] = intel_context_get(ce);
336 	}
337 	i915_gem_context_unlock_engines(ctx);
338 
339 	/* Clone the same set of engines into the other contexts */
340 	for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
341 		ctx = live_context(i915, file);
342 		if (IS_ERR(ctx)) {
343 			err = PTR_ERR(ctx);
344 			goto out;
345 		}
346 
347 		for (m = 0; m < count; m++) {
348 			if (!data[m].ce[0])
349 				continue;
350 
351 			ce = intel_context_create(data[m].ce[0]->engine);
352 			if (IS_ERR(ce)) {
353 				err = PTR_ERR(ce);
354 				goto out;
355 			}
356 
357 			err = intel_context_pin(ce);
358 			if (err) {
359 				intel_context_put(ce);
360 				goto out;
361 			}
362 
363 			data[m].ce[n] = ce;
364 		}
365 	}
366 
367 	for (n = 0; n < count; n++) {
368 		struct kthread_worker *worker;
369 
370 		if (!data[n].ce[0])
371 			continue;
372 
373 		worker = kthread_create_worker(0, "igt/parallel:%s",
374 					       data[n].ce[0]->engine->name);
375 		if (IS_ERR(worker)) {
376 			err = PTR_ERR(worker);
377 			goto out;
378 		}
379 
380 		data[n].worker = worker;
381 	}
382 
383 	for (fn = func; !err && *fn; fn++) {
384 		struct igt_live_test t;
385 
386 		err = igt_live_test_begin(&t, i915, __func__, "");
387 		if (err)
388 			break;
389 
390 		for (n = 0; n < count; n++) {
391 			if (!data[n].ce[0])
392 				continue;
393 
394 			data[n].result = 0;
395 			kthread_init_work(&data[n].work, *fn);
396 			kthread_queue_work(data[n].worker, &data[n].work);
397 		}
398 
399 		for (n = 0; n < count; n++) {
400 			if (data[n].ce[0]) {
401 				kthread_flush_work(&data[n].work);
402 				if (data[n].result && !err)
403 					err = data[n].result;
404 			}
405 		}
406 
407 		if (igt_live_test_end(&t)) {
408 			err = err ?: -EIO;
409 			break;
410 		}
411 	}
412 
413 out:
414 	for (n = 0; n < count; n++) {
415 		for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
416 			if (!data[n].ce[m])
417 				continue;
418 
419 			intel_context_unpin(data[n].ce[m]);
420 			intel_context_put(data[n].ce[m]);
421 		}
422 
423 		if (data[n].worker)
424 			kthread_destroy_worker(data[n].worker);
425 	}
426 	kfree(data);
427 out_file:
428 	fput(file);
429 	return err;
430 }
431 
real_page_count(struct drm_i915_gem_object * obj)432 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
433 {
434 	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
435 }
436 
fake_page_count(struct drm_i915_gem_object * obj)437 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
438 {
439 	return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
440 }
441 
gpu_fill(struct intel_context * ce,struct drm_i915_gem_object * obj,unsigned int dw)442 static int gpu_fill(struct intel_context *ce,
443 		    struct drm_i915_gem_object *obj,
444 		    unsigned int dw)
445 {
446 	struct i915_vma *vma;
447 	int err;
448 
449 	GEM_BUG_ON(obj->base.size > ce->vm->total);
450 	GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
451 
452 	vma = i915_vma_instance(obj, ce->vm, NULL);
453 	if (IS_ERR(vma))
454 		return PTR_ERR(vma);
455 
456 	err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
457 	if (err)
458 		return err;
459 
460 	/*
461 	 * Within the GTT the huge objects maps every page onto
462 	 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
463 	 * We set the nth dword within the page using the nth
464 	 * mapping via the GTT - this should exercise the GTT mapping
465 	 * whilst checking that each context provides a unique view
466 	 * into the object.
467 	 */
468 	err = igt_gpu_fill_dw(ce, vma,
469 			      (dw * real_page_count(obj)) << PAGE_SHIFT |
470 			      (dw * sizeof(u32)),
471 			      real_page_count(obj),
472 			      dw);
473 	i915_vma_unpin(vma);
474 
475 	return err;
476 }
477 
cpu_fill(struct drm_i915_gem_object * obj,u32 value)478 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
479 {
480 	const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
481 	unsigned int n, m, need_flush;
482 	int err;
483 
484 	i915_gem_object_lock(obj, NULL);
485 	err = i915_gem_object_prepare_write(obj, &need_flush);
486 	if (err)
487 		goto out;
488 
489 	for (n = 0; n < real_page_count(obj); n++) {
490 		u32 *map;
491 
492 		map = kmap_atomic(i915_gem_object_get_page(obj, n));
493 		for (m = 0; m < DW_PER_PAGE; m++)
494 			map[m] = value;
495 		if (!has_llc)
496 			drm_clflush_virt_range(map, PAGE_SIZE);
497 		kunmap_atomic(map);
498 	}
499 
500 	i915_gem_object_finish_access(obj);
501 	obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
502 	obj->write_domain = 0;
503 out:
504 	i915_gem_object_unlock(obj);
505 	return err;
506 }
507 
cpu_check(struct drm_i915_gem_object * obj,unsigned int idx,unsigned int max)508 static noinline int cpu_check(struct drm_i915_gem_object *obj,
509 			      unsigned int idx, unsigned int max)
510 {
511 	unsigned int n, m, needs_flush;
512 	int err;
513 
514 	i915_gem_object_lock(obj, NULL);
515 	err = i915_gem_object_prepare_read(obj, &needs_flush);
516 	if (err)
517 		goto out_unlock;
518 
519 	for (n = 0; n < real_page_count(obj); n++) {
520 		u32 *map;
521 
522 		map = kmap_atomic(i915_gem_object_get_page(obj, n));
523 		if (needs_flush & CLFLUSH_BEFORE)
524 			drm_clflush_virt_range(map, PAGE_SIZE);
525 
526 		for (m = 0; m < max; m++) {
527 			if (map[m] != m) {
528 				pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
529 				       __builtin_return_address(0), idx,
530 				       n, real_page_count(obj), m, max,
531 				       map[m], m);
532 				err = -EINVAL;
533 				goto out_unmap;
534 			}
535 		}
536 
537 		for (; m < DW_PER_PAGE; m++) {
538 			if (map[m] != STACK_MAGIC) {
539 				pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
540 				       __builtin_return_address(0), idx, n, m,
541 				       map[m], STACK_MAGIC);
542 				err = -EINVAL;
543 				goto out_unmap;
544 			}
545 		}
546 
547 out_unmap:
548 		kunmap_atomic(map);
549 		if (err)
550 			break;
551 	}
552 
553 	i915_gem_object_finish_access(obj);
554 out_unlock:
555 	i915_gem_object_unlock(obj);
556 	return err;
557 }
558 
file_add_object(struct file * file,struct drm_i915_gem_object * obj)559 static int file_add_object(struct file *file, struct drm_i915_gem_object *obj)
560 {
561 	int err;
562 
563 	GEM_BUG_ON(obj->base.handle_count);
564 
565 	/* tie the object to the drm_file for easy reaping */
566 	err = idr_alloc(&to_drm_file(file)->object_idr,
567 			&obj->base, 1, 0, GFP_KERNEL);
568 	if (err < 0)
569 		return err;
570 
571 	i915_gem_object_get(obj);
572 	obj->base.handle_count++;
573 	return 0;
574 }
575 
576 static struct drm_i915_gem_object *
create_test_object(struct i915_address_space * vm,struct file * file,struct list_head * objects)577 create_test_object(struct i915_address_space *vm,
578 		   struct file *file,
579 		   struct list_head *objects)
580 {
581 	struct drm_i915_gem_object *obj;
582 	u64 size;
583 	int err;
584 
585 	/* Keep in GEM's good graces */
586 	intel_gt_retire_requests(vm->gt);
587 
588 	size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
589 	size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
590 
591 	obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
592 	if (IS_ERR(obj))
593 		return obj;
594 
595 	err = file_add_object(file, obj);
596 	i915_gem_object_put(obj);
597 	if (err)
598 		return ERR_PTR(err);
599 
600 	err = cpu_fill(obj, STACK_MAGIC);
601 	if (err) {
602 		pr_err("Failed to fill object with cpu, err=%d\n",
603 		       err);
604 		return ERR_PTR(err);
605 	}
606 
607 	list_add_tail(&obj->st_link, objects);
608 	return obj;
609 }
610 
max_dwords(struct drm_i915_gem_object * obj)611 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
612 {
613 	unsigned long npages = fake_page_count(obj);
614 
615 	GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
616 	return npages / DW_PER_PAGE;
617 }
618 
throttle_release(struct i915_request ** q,int count)619 static void throttle_release(struct i915_request **q, int count)
620 {
621 	int i;
622 
623 	for (i = 0; i < count; i++) {
624 		if (IS_ERR_OR_NULL(q[i]))
625 			continue;
626 
627 		i915_request_put(fetch_and_zero(&q[i]));
628 	}
629 }
630 
throttle(struct intel_context * ce,struct i915_request ** q,int count)631 static int throttle(struct intel_context *ce,
632 		    struct i915_request **q, int count)
633 {
634 	int i;
635 
636 	if (!IS_ERR_OR_NULL(q[0])) {
637 		if (i915_request_wait(q[0],
638 				      I915_WAIT_INTERRUPTIBLE,
639 				      MAX_SCHEDULE_TIMEOUT) < 0)
640 			return -EINTR;
641 
642 		i915_request_put(q[0]);
643 	}
644 
645 	for (i = 0; i < count - 1; i++)
646 		q[i] = q[i + 1];
647 
648 	q[i] = intel_context_create_request(ce);
649 	if (IS_ERR(q[i]))
650 		return PTR_ERR(q[i]);
651 
652 	i915_request_get(q[i]);
653 	i915_request_add(q[i]);
654 
655 	return 0;
656 }
657 
igt_ctx_exec(void * arg)658 static int igt_ctx_exec(void *arg)
659 {
660 	struct drm_i915_private *i915 = arg;
661 	struct intel_engine_cs *engine;
662 	int err = -ENODEV;
663 
664 	/*
665 	 * Create a few different contexts (with different mm) and write
666 	 * through each ctx/mm using the GPU making sure those writes end
667 	 * up in the expected pages of our obj.
668 	 */
669 
670 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
671 		return 0;
672 
673 	for_each_uabi_engine(engine, i915) {
674 		struct drm_i915_gem_object *obj = NULL;
675 		unsigned long ncontexts, ndwords, dw;
676 		struct i915_request *tq[5] = {};
677 		struct igt_live_test t;
678 		IGT_TIMEOUT(end_time);
679 		LIST_HEAD(objects);
680 		struct file *file;
681 
682 		if (!intel_engine_can_store_dword(engine))
683 			continue;
684 
685 		if (!engine->context_size)
686 			continue; /* No logical context support in HW */
687 
688 		file = mock_file(i915);
689 		if (IS_ERR(file))
690 			return PTR_ERR(file);
691 
692 		err = igt_live_test_begin(&t, i915, __func__, engine->name);
693 		if (err)
694 			goto out_file;
695 
696 		ncontexts = 0;
697 		ndwords = 0;
698 		dw = 0;
699 		while (!time_after(jiffies, end_time)) {
700 			struct i915_gem_context *ctx;
701 			struct intel_context *ce;
702 
703 			ctx = kernel_context(i915, NULL);
704 			if (IS_ERR(ctx)) {
705 				err = PTR_ERR(ctx);
706 				goto out_file;
707 			}
708 
709 			ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
710 			GEM_BUG_ON(IS_ERR(ce));
711 
712 			if (!obj) {
713 				obj = create_test_object(ce->vm, file, &objects);
714 				if (IS_ERR(obj)) {
715 					err = PTR_ERR(obj);
716 					intel_context_put(ce);
717 					kernel_context_close(ctx);
718 					goto out_file;
719 				}
720 			}
721 
722 			err = gpu_fill(ce, obj, dw);
723 			if (err) {
724 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
725 				       ndwords, dw, max_dwords(obj),
726 				       engine->name,
727 				       yesno(!!rcu_access_pointer(ctx->vm)),
728 				       err);
729 				intel_context_put(ce);
730 				kernel_context_close(ctx);
731 				goto out_file;
732 			}
733 
734 			err = throttle(ce, tq, ARRAY_SIZE(tq));
735 			if (err) {
736 				intel_context_put(ce);
737 				kernel_context_close(ctx);
738 				goto out_file;
739 			}
740 
741 			if (++dw == max_dwords(obj)) {
742 				obj = NULL;
743 				dw = 0;
744 			}
745 
746 			ndwords++;
747 			ncontexts++;
748 
749 			intel_context_put(ce);
750 			kernel_context_close(ctx);
751 		}
752 
753 		pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
754 			ncontexts, engine->name, ndwords);
755 
756 		ncontexts = dw = 0;
757 		list_for_each_entry(obj, &objects, st_link) {
758 			unsigned int rem =
759 				min_t(unsigned int, ndwords - dw, max_dwords(obj));
760 
761 			err = cpu_check(obj, ncontexts++, rem);
762 			if (err)
763 				break;
764 
765 			dw += rem;
766 		}
767 
768 out_file:
769 		throttle_release(tq, ARRAY_SIZE(tq));
770 		if (igt_live_test_end(&t))
771 			err = -EIO;
772 
773 		fput(file);
774 		if (err)
775 			return err;
776 
777 		i915_gem_drain_freed_objects(i915);
778 	}
779 
780 	return 0;
781 }
782 
igt_shared_ctx_exec(void * arg)783 static int igt_shared_ctx_exec(void *arg)
784 {
785 	struct drm_i915_private *i915 = arg;
786 	struct i915_request *tq[5] = {};
787 	struct i915_gem_context *parent;
788 	struct intel_engine_cs *engine;
789 	struct igt_live_test t;
790 	struct file *file;
791 	int err = 0;
792 
793 	/*
794 	 * Create a few different contexts with the same mm and write
795 	 * through each ctx using the GPU making sure those writes end
796 	 * up in the expected pages of our obj.
797 	 */
798 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
799 		return 0;
800 
801 	file = mock_file(i915);
802 	if (IS_ERR(file))
803 		return PTR_ERR(file);
804 
805 	parent = live_context(i915, file);
806 	if (IS_ERR(parent)) {
807 		err = PTR_ERR(parent);
808 		goto out_file;
809 	}
810 
811 	if (!parent->vm) { /* not full-ppgtt; nothing to share */
812 		err = 0;
813 		goto out_file;
814 	}
815 
816 	err = igt_live_test_begin(&t, i915, __func__, "");
817 	if (err)
818 		goto out_file;
819 
820 	for_each_uabi_engine(engine, i915) {
821 		unsigned long ncontexts, ndwords, dw;
822 		struct drm_i915_gem_object *obj = NULL;
823 		IGT_TIMEOUT(end_time);
824 		LIST_HEAD(objects);
825 
826 		if (!intel_engine_can_store_dword(engine))
827 			continue;
828 
829 		dw = 0;
830 		ndwords = 0;
831 		ncontexts = 0;
832 		while (!time_after(jiffies, end_time)) {
833 			struct i915_gem_context *ctx;
834 			struct intel_context *ce;
835 
836 			ctx = kernel_context(i915, ctx_vm(parent));
837 			if (IS_ERR(ctx)) {
838 				err = PTR_ERR(ctx);
839 				goto out_test;
840 			}
841 
842 			ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
843 			GEM_BUG_ON(IS_ERR(ce));
844 
845 			if (!obj) {
846 				obj = create_test_object(ctx_vm(parent),
847 							 file, &objects);
848 				if (IS_ERR(obj)) {
849 					err = PTR_ERR(obj);
850 					intel_context_put(ce);
851 					kernel_context_close(ctx);
852 					goto out_test;
853 				}
854 			}
855 
856 			err = gpu_fill(ce, obj, dw);
857 			if (err) {
858 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
859 				       ndwords, dw, max_dwords(obj),
860 				       engine->name,
861 				       yesno(!!rcu_access_pointer(ctx->vm)),
862 				       err);
863 				intel_context_put(ce);
864 				kernel_context_close(ctx);
865 				goto out_test;
866 			}
867 
868 			err = throttle(ce, tq, ARRAY_SIZE(tq));
869 			if (err) {
870 				intel_context_put(ce);
871 				kernel_context_close(ctx);
872 				goto out_test;
873 			}
874 
875 			if (++dw == max_dwords(obj)) {
876 				obj = NULL;
877 				dw = 0;
878 			}
879 
880 			ndwords++;
881 			ncontexts++;
882 
883 			intel_context_put(ce);
884 			kernel_context_close(ctx);
885 		}
886 		pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
887 			ncontexts, engine->name, ndwords);
888 
889 		ncontexts = dw = 0;
890 		list_for_each_entry(obj, &objects, st_link) {
891 			unsigned int rem =
892 				min_t(unsigned int, ndwords - dw, max_dwords(obj));
893 
894 			err = cpu_check(obj, ncontexts++, rem);
895 			if (err)
896 				goto out_test;
897 
898 			dw += rem;
899 		}
900 
901 		i915_gem_drain_freed_objects(i915);
902 	}
903 out_test:
904 	throttle_release(tq, ARRAY_SIZE(tq));
905 	if (igt_live_test_end(&t))
906 		err = -EIO;
907 out_file:
908 	fput(file);
909 	return err;
910 }
911 
rpcs_query_batch(struct drm_i915_gem_object * rpcs,struct i915_vma * vma)912 static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *vma)
913 {
914 	u32 *cmd;
915 
916 	GEM_BUG_ON(GRAPHICS_VER(vma->vm->i915) < 8);
917 
918 	cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB);
919 	if (IS_ERR(cmd))
920 		return PTR_ERR(cmd);
921 
922 	*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
923 	*cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
924 	*cmd++ = lower_32_bits(vma->node.start);
925 	*cmd++ = upper_32_bits(vma->node.start);
926 	*cmd = MI_BATCH_BUFFER_END;
927 
928 	__i915_gem_object_flush_map(rpcs, 0, 64);
929 	i915_gem_object_unpin_map(rpcs);
930 
931 	intel_gt_chipset_flush(vma->vm->gt);
932 
933 	return 0;
934 }
935 
936 static int
emit_rpcs_query(struct drm_i915_gem_object * obj,struct intel_context * ce,struct i915_request ** rq_out)937 emit_rpcs_query(struct drm_i915_gem_object *obj,
938 		struct intel_context *ce,
939 		struct i915_request **rq_out)
940 {
941 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
942 	struct i915_request *rq;
943 	struct i915_gem_ww_ctx ww;
944 	struct i915_vma *batch;
945 	struct i915_vma *vma;
946 	struct drm_i915_gem_object *rpcs;
947 	int err;
948 
949 	GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
950 
951 	if (GRAPHICS_VER(i915) < 8)
952 		return -EINVAL;
953 
954 	vma = i915_vma_instance(obj, ce->vm, NULL);
955 	if (IS_ERR(vma))
956 		return PTR_ERR(vma);
957 
958 	rpcs = i915_gem_object_create_internal(i915, PAGE_SIZE);
959 	if (IS_ERR(rpcs))
960 		return PTR_ERR(rpcs);
961 
962 	batch = i915_vma_instance(rpcs, ce->vm, NULL);
963 	if (IS_ERR(batch)) {
964 		err = PTR_ERR(batch);
965 		goto err_put;
966 	}
967 
968 	i915_gem_ww_ctx_init(&ww, false);
969 retry:
970 	err = i915_gem_object_lock(obj, &ww);
971 	if (!err)
972 		err = i915_gem_object_lock(rpcs, &ww);
973 	if (!err)
974 		err = i915_gem_object_set_to_gtt_domain(obj, false);
975 	if (!err)
976 		err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
977 	if (err)
978 		goto err_put;
979 
980 	err = i915_vma_pin_ww(batch, &ww, 0, 0, PIN_USER);
981 	if (err)
982 		goto err_vma;
983 
984 	err = rpcs_query_batch(rpcs, vma);
985 	if (err)
986 		goto err_batch;
987 
988 	rq = i915_request_create(ce);
989 	if (IS_ERR(rq)) {
990 		err = PTR_ERR(rq);
991 		goto err_batch;
992 	}
993 
994 	err = i915_request_await_object(rq, batch->obj, false);
995 	if (err == 0)
996 		err = i915_vma_move_to_active(batch, rq, 0);
997 	if (err)
998 		goto skip_request;
999 
1000 	err = i915_request_await_object(rq, vma->obj, true);
1001 	if (err == 0)
1002 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1003 	if (err)
1004 		goto skip_request;
1005 
1006 	if (rq->engine->emit_init_breadcrumb) {
1007 		err = rq->engine->emit_init_breadcrumb(rq);
1008 		if (err)
1009 			goto skip_request;
1010 	}
1011 
1012 	err = rq->engine->emit_bb_start(rq,
1013 					batch->node.start, batch->node.size,
1014 					0);
1015 	if (err)
1016 		goto skip_request;
1017 
1018 	*rq_out = i915_request_get(rq);
1019 
1020 skip_request:
1021 	if (err)
1022 		i915_request_set_error_once(rq, err);
1023 	i915_request_add(rq);
1024 err_batch:
1025 	i915_vma_unpin(batch);
1026 err_vma:
1027 	i915_vma_unpin(vma);
1028 err_put:
1029 	if (err == -EDEADLK) {
1030 		err = i915_gem_ww_ctx_backoff(&ww);
1031 		if (!err)
1032 			goto retry;
1033 	}
1034 	i915_gem_ww_ctx_fini(&ww);
1035 	i915_gem_object_put(rpcs);
1036 	return err;
1037 }
1038 
1039 #define TEST_IDLE	BIT(0)
1040 #define TEST_BUSY	BIT(1)
1041 #define TEST_RESET	BIT(2)
1042 
1043 static int
__sseu_prepare(const char * name,unsigned int flags,struct intel_context * ce,struct igt_spinner ** spin)1044 __sseu_prepare(const char *name,
1045 	       unsigned int flags,
1046 	       struct intel_context *ce,
1047 	       struct igt_spinner **spin)
1048 {
1049 	struct i915_request *rq;
1050 	int ret;
1051 
1052 	*spin = NULL;
1053 	if (!(flags & (TEST_BUSY | TEST_RESET)))
1054 		return 0;
1055 
1056 	*spin = kzalloc(sizeof(**spin), GFP_KERNEL);
1057 	if (!*spin)
1058 		return -ENOMEM;
1059 
1060 	ret = igt_spinner_init(*spin, ce->engine->gt);
1061 	if (ret)
1062 		goto err_free;
1063 
1064 	rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
1065 	if (IS_ERR(rq)) {
1066 		ret = PTR_ERR(rq);
1067 		goto err_fini;
1068 	}
1069 
1070 	i915_request_add(rq);
1071 
1072 	if (!igt_wait_for_spinner(*spin, rq)) {
1073 		pr_err("%s: Spinner failed to start!\n", name);
1074 		ret = -ETIMEDOUT;
1075 		goto err_end;
1076 	}
1077 
1078 	return 0;
1079 
1080 err_end:
1081 	igt_spinner_end(*spin);
1082 err_fini:
1083 	igt_spinner_fini(*spin);
1084 err_free:
1085 	kfree(fetch_and_zero(spin));
1086 	return ret;
1087 }
1088 
1089 static int
__read_slice_count(struct intel_context * ce,struct drm_i915_gem_object * obj,struct igt_spinner * spin,u32 * rpcs)1090 __read_slice_count(struct intel_context *ce,
1091 		   struct drm_i915_gem_object *obj,
1092 		   struct igt_spinner *spin,
1093 		   u32 *rpcs)
1094 {
1095 	struct i915_request *rq = NULL;
1096 	u32 s_mask, s_shift;
1097 	unsigned int cnt;
1098 	u32 *buf, val;
1099 	long ret;
1100 
1101 	ret = emit_rpcs_query(obj, ce, &rq);
1102 	if (ret)
1103 		return ret;
1104 
1105 	if (spin)
1106 		igt_spinner_end(spin);
1107 
1108 	ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1109 	i915_request_put(rq);
1110 	if (ret < 0)
1111 		return ret;
1112 
1113 	buf = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1114 	if (IS_ERR(buf)) {
1115 		ret = PTR_ERR(buf);
1116 		return ret;
1117 	}
1118 
1119 	if (GRAPHICS_VER(ce->engine->i915) >= 11) {
1120 		s_mask = GEN11_RPCS_S_CNT_MASK;
1121 		s_shift = GEN11_RPCS_S_CNT_SHIFT;
1122 	} else {
1123 		s_mask = GEN8_RPCS_S_CNT_MASK;
1124 		s_shift = GEN8_RPCS_S_CNT_SHIFT;
1125 	}
1126 
1127 	val = *buf;
1128 	cnt = (val & s_mask) >> s_shift;
1129 	*rpcs = val;
1130 
1131 	i915_gem_object_unpin_map(obj);
1132 
1133 	return cnt;
1134 }
1135 
1136 static int
__check_rpcs(const char * name,u32 rpcs,int slices,unsigned int expected,const char * prefix,const char * suffix)1137 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
1138 	     const char *prefix, const char *suffix)
1139 {
1140 	if (slices == expected)
1141 		return 0;
1142 
1143 	if (slices < 0) {
1144 		pr_err("%s: %s read slice count failed with %d%s\n",
1145 		       name, prefix, slices, suffix);
1146 		return slices;
1147 	}
1148 
1149 	pr_err("%s: %s slice count %d is not %u%s\n",
1150 	       name, prefix, slices, expected, suffix);
1151 
1152 	pr_info("RPCS=0x%x; %u%sx%u%s\n",
1153 		rpcs, slices,
1154 		(rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
1155 		(rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
1156 		(rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
1157 
1158 	return -EINVAL;
1159 }
1160 
1161 static int
__sseu_finish(const char * name,unsigned int flags,struct intel_context * ce,struct drm_i915_gem_object * obj,unsigned int expected,struct igt_spinner * spin)1162 __sseu_finish(const char *name,
1163 	      unsigned int flags,
1164 	      struct intel_context *ce,
1165 	      struct drm_i915_gem_object *obj,
1166 	      unsigned int expected,
1167 	      struct igt_spinner *spin)
1168 {
1169 	unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
1170 	u32 rpcs = 0;
1171 	int ret = 0;
1172 
1173 	if (flags & TEST_RESET) {
1174 		ret = intel_engine_reset(ce->engine, "sseu");
1175 		if (ret)
1176 			goto out;
1177 	}
1178 
1179 	ret = __read_slice_count(ce, obj,
1180 				 flags & TEST_RESET ? NULL : spin, &rpcs);
1181 	ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
1182 	if (ret)
1183 		goto out;
1184 
1185 	ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
1186 	ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
1187 
1188 out:
1189 	if (spin)
1190 		igt_spinner_end(spin);
1191 
1192 	if ((flags & TEST_IDLE) && ret == 0) {
1193 		ret = igt_flush_test(ce->engine->i915);
1194 		if (ret)
1195 			return ret;
1196 
1197 		ret = __read_slice_count(ce, obj, NULL, &rpcs);
1198 		ret = __check_rpcs(name, rpcs, ret, expected,
1199 				   "Context", " after idle!");
1200 	}
1201 
1202 	return ret;
1203 }
1204 
1205 static int
__sseu_test(const char * name,unsigned int flags,struct intel_context * ce,struct drm_i915_gem_object * obj,struct intel_sseu sseu)1206 __sseu_test(const char *name,
1207 	    unsigned int flags,
1208 	    struct intel_context *ce,
1209 	    struct drm_i915_gem_object *obj,
1210 	    struct intel_sseu sseu)
1211 {
1212 	struct igt_spinner *spin = NULL;
1213 	int ret;
1214 
1215 	intel_engine_pm_get(ce->engine);
1216 
1217 	ret = __sseu_prepare(name, flags, ce, &spin);
1218 	if (ret)
1219 		goto out_pm;
1220 
1221 	ret = intel_context_reconfigure_sseu(ce, sseu);
1222 	if (ret)
1223 		goto out_spin;
1224 
1225 	ret = __sseu_finish(name, flags, ce, obj,
1226 			    hweight32(sseu.slice_mask), spin);
1227 
1228 out_spin:
1229 	if (spin) {
1230 		igt_spinner_end(spin);
1231 		igt_spinner_fini(spin);
1232 		kfree(spin);
1233 	}
1234 out_pm:
1235 	intel_engine_pm_put(ce->engine);
1236 	return ret;
1237 }
1238 
1239 static int
__igt_ctx_sseu(struct drm_i915_private * i915,const char * name,unsigned int flags)1240 __igt_ctx_sseu(struct drm_i915_private *i915,
1241 	       const char *name,
1242 	       unsigned int flags)
1243 {
1244 	struct drm_i915_gem_object *obj;
1245 	int inst = 0;
1246 	int ret = 0;
1247 
1248 	if (GRAPHICS_VER(i915) < 9)
1249 		return 0;
1250 
1251 	if (flags & TEST_RESET)
1252 		igt_global_reset_lock(&i915->gt);
1253 
1254 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1255 	if (IS_ERR(obj)) {
1256 		ret = PTR_ERR(obj);
1257 		goto out_unlock;
1258 	}
1259 
1260 	do {
1261 		struct intel_engine_cs *engine;
1262 		struct intel_context *ce;
1263 		struct intel_sseu pg_sseu;
1264 
1265 		engine = intel_engine_lookup_user(i915,
1266 						  I915_ENGINE_CLASS_RENDER,
1267 						  inst++);
1268 		if (!engine)
1269 			break;
1270 
1271 		if (hweight32(engine->sseu.slice_mask) < 2)
1272 			continue;
1273 
1274 		if (!engine->gt->info.sseu.has_slice_pg)
1275 			continue;
1276 
1277 		/*
1278 		 * Gen11 VME friendly power-gated configuration with
1279 		 * half enabled sub-slices.
1280 		 */
1281 		pg_sseu = engine->sseu;
1282 		pg_sseu.slice_mask = 1;
1283 		pg_sseu.subslice_mask =
1284 			~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
1285 
1286 		pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1287 			engine->name, name, flags,
1288 			hweight32(engine->sseu.slice_mask),
1289 			hweight32(pg_sseu.slice_mask));
1290 
1291 		ce = intel_context_create(engine);
1292 		if (IS_ERR(ce)) {
1293 			ret = PTR_ERR(ce);
1294 			goto out_put;
1295 		}
1296 
1297 		ret = intel_context_pin(ce);
1298 		if (ret)
1299 			goto out_ce;
1300 
1301 		/* First set the default mask. */
1302 		ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1303 		if (ret)
1304 			goto out_unpin;
1305 
1306 		/* Then set a power-gated configuration. */
1307 		ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1308 		if (ret)
1309 			goto out_unpin;
1310 
1311 		/* Back to defaults. */
1312 		ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1313 		if (ret)
1314 			goto out_unpin;
1315 
1316 		/* One last power-gated configuration for the road. */
1317 		ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1318 		if (ret)
1319 			goto out_unpin;
1320 
1321 out_unpin:
1322 		intel_context_unpin(ce);
1323 out_ce:
1324 		intel_context_put(ce);
1325 	} while (!ret);
1326 
1327 	if (igt_flush_test(i915))
1328 		ret = -EIO;
1329 
1330 out_put:
1331 	i915_gem_object_put(obj);
1332 
1333 out_unlock:
1334 	if (flags & TEST_RESET)
1335 		igt_global_reset_unlock(&i915->gt);
1336 
1337 	if (ret)
1338 		pr_err("%s: Failed with %d!\n", name, ret);
1339 
1340 	return ret;
1341 }
1342 
igt_ctx_sseu(void * arg)1343 static int igt_ctx_sseu(void *arg)
1344 {
1345 	struct {
1346 		const char *name;
1347 		unsigned int flags;
1348 	} *phase, phases[] = {
1349 		{ .name = "basic", .flags = 0 },
1350 		{ .name = "idle", .flags = TEST_IDLE },
1351 		{ .name = "busy", .flags = TEST_BUSY },
1352 		{ .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1353 		{ .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1354 		{ .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1355 	};
1356 	unsigned int i;
1357 	int ret = 0;
1358 
1359 	for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1360 	     i++, phase++)
1361 		ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1362 
1363 	return ret;
1364 }
1365 
igt_ctx_readonly(void * arg)1366 static int igt_ctx_readonly(void *arg)
1367 {
1368 	struct drm_i915_private *i915 = arg;
1369 	unsigned long idx, ndwords, dw, num_engines;
1370 	struct drm_i915_gem_object *obj = NULL;
1371 	struct i915_request *tq[5] = {};
1372 	struct i915_gem_engines_iter it;
1373 	struct i915_address_space *vm;
1374 	struct i915_gem_context *ctx;
1375 	struct intel_context *ce;
1376 	struct igt_live_test t;
1377 	I915_RND_STATE(prng);
1378 	IGT_TIMEOUT(end_time);
1379 	LIST_HEAD(objects);
1380 	struct file *file;
1381 	int err = -ENODEV;
1382 
1383 	/*
1384 	 * Create a few read-only objects (with the occasional writable object)
1385 	 * and try to write into these object checking that the GPU discards
1386 	 * any write to a read-only object.
1387 	 */
1388 
1389 	file = mock_file(i915);
1390 	if (IS_ERR(file))
1391 		return PTR_ERR(file);
1392 
1393 	err = igt_live_test_begin(&t, i915, __func__, "");
1394 	if (err)
1395 		goto out_file;
1396 
1397 	ctx = live_context(i915, file);
1398 	if (IS_ERR(ctx)) {
1399 		err = PTR_ERR(ctx);
1400 		goto out_file;
1401 	}
1402 
1403 	vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
1404 	if (!vm || !vm->has_read_only) {
1405 		err = 0;
1406 		goto out_file;
1407 	}
1408 
1409 	num_engines = 0;
1410 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
1411 		if (intel_engine_can_store_dword(ce->engine))
1412 			num_engines++;
1413 	i915_gem_context_unlock_engines(ctx);
1414 
1415 	ndwords = 0;
1416 	dw = 0;
1417 	while (!time_after(jiffies, end_time)) {
1418 		for_each_gem_engine(ce,
1419 				    i915_gem_context_lock_engines(ctx), it) {
1420 			if (!intel_engine_can_store_dword(ce->engine))
1421 				continue;
1422 
1423 			if (!obj) {
1424 				obj = create_test_object(ce->vm, file, &objects);
1425 				if (IS_ERR(obj)) {
1426 					err = PTR_ERR(obj);
1427 					i915_gem_context_unlock_engines(ctx);
1428 					goto out_file;
1429 				}
1430 
1431 				if (prandom_u32_state(&prng) & 1)
1432 					i915_gem_object_set_readonly(obj);
1433 			}
1434 
1435 			err = gpu_fill(ce, obj, dw);
1436 			if (err) {
1437 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
1438 				       ndwords, dw, max_dwords(obj),
1439 				       ce->engine->name,
1440 				       yesno(!!ctx_vm(ctx)),
1441 				       err);
1442 				i915_gem_context_unlock_engines(ctx);
1443 				goto out_file;
1444 			}
1445 
1446 			err = throttle(ce, tq, ARRAY_SIZE(tq));
1447 			if (err) {
1448 				i915_gem_context_unlock_engines(ctx);
1449 				goto out_file;
1450 			}
1451 
1452 			if (++dw == max_dwords(obj)) {
1453 				obj = NULL;
1454 				dw = 0;
1455 			}
1456 			ndwords++;
1457 		}
1458 		i915_gem_context_unlock_engines(ctx);
1459 	}
1460 	pr_info("Submitted %lu dwords (across %lu engines)\n",
1461 		ndwords, num_engines);
1462 
1463 	dw = 0;
1464 	idx = 0;
1465 	list_for_each_entry(obj, &objects, st_link) {
1466 		unsigned int rem =
1467 			min_t(unsigned int, ndwords - dw, max_dwords(obj));
1468 		unsigned int num_writes;
1469 
1470 		num_writes = rem;
1471 		if (i915_gem_object_is_readonly(obj))
1472 			num_writes = 0;
1473 
1474 		err = cpu_check(obj, idx++, num_writes);
1475 		if (err)
1476 			break;
1477 
1478 		dw += rem;
1479 	}
1480 
1481 out_file:
1482 	throttle_release(tq, ARRAY_SIZE(tq));
1483 	if (igt_live_test_end(&t))
1484 		err = -EIO;
1485 
1486 	fput(file);
1487 	return err;
1488 }
1489 
check_scratch(struct i915_address_space * vm,u64 offset)1490 static int check_scratch(struct i915_address_space *vm, u64 offset)
1491 {
1492 	struct drm_mm_node *node;
1493 
1494 	mutex_lock(&vm->mutex);
1495 	node = __drm_mm_interval_first(&vm->mm,
1496 				       offset, offset + sizeof(u32) - 1);
1497 	mutex_unlock(&vm->mutex);
1498 	if (!node || node->start > offset)
1499 		return 0;
1500 
1501 	GEM_BUG_ON(offset >= node->start + node->size);
1502 
1503 	pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1504 	       upper_32_bits(offset), lower_32_bits(offset));
1505 	return -EINVAL;
1506 }
1507 
write_to_scratch(struct i915_gem_context * ctx,struct intel_engine_cs * engine,u64 offset,u32 value)1508 static int write_to_scratch(struct i915_gem_context *ctx,
1509 			    struct intel_engine_cs *engine,
1510 			    u64 offset, u32 value)
1511 {
1512 	struct drm_i915_private *i915 = ctx->i915;
1513 	struct drm_i915_gem_object *obj;
1514 	struct i915_address_space *vm;
1515 	struct i915_request *rq;
1516 	struct i915_vma *vma;
1517 	u32 *cmd;
1518 	int err;
1519 
1520 	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1521 
1522 	err = check_scratch(ctx_vm(ctx), offset);
1523 	if (err)
1524 		return err;
1525 
1526 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1527 	if (IS_ERR(obj))
1528 		return PTR_ERR(obj);
1529 
1530 	cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1531 	if (IS_ERR(cmd)) {
1532 		err = PTR_ERR(cmd);
1533 		goto out;
1534 	}
1535 
1536 	*cmd++ = MI_STORE_DWORD_IMM_GEN4;
1537 	if (GRAPHICS_VER(i915) >= 8) {
1538 		*cmd++ = lower_32_bits(offset);
1539 		*cmd++ = upper_32_bits(offset);
1540 	} else {
1541 		*cmd++ = 0;
1542 		*cmd++ = offset;
1543 	}
1544 	*cmd++ = value;
1545 	*cmd = MI_BATCH_BUFFER_END;
1546 	__i915_gem_object_flush_map(obj, 0, 64);
1547 	i915_gem_object_unpin_map(obj);
1548 
1549 	intel_gt_chipset_flush(engine->gt);
1550 
1551 	vm = i915_gem_context_get_vm_rcu(ctx);
1552 	vma = i915_vma_instance(obj, vm, NULL);
1553 	if (IS_ERR(vma)) {
1554 		err = PTR_ERR(vma);
1555 		goto out_vm;
1556 	}
1557 
1558 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1559 	if (err)
1560 		goto out_vm;
1561 
1562 	rq = igt_request_alloc(ctx, engine);
1563 	if (IS_ERR(rq)) {
1564 		err = PTR_ERR(rq);
1565 		goto err_unpin;
1566 	}
1567 
1568 	i915_vma_lock(vma);
1569 	err = i915_request_await_object(rq, vma->obj, false);
1570 	if (err == 0)
1571 		err = i915_vma_move_to_active(vma, rq, 0);
1572 	i915_vma_unlock(vma);
1573 	if (err)
1574 		goto skip_request;
1575 
1576 	if (rq->engine->emit_init_breadcrumb) {
1577 		err = rq->engine->emit_init_breadcrumb(rq);
1578 		if (err)
1579 			goto skip_request;
1580 	}
1581 
1582 	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1583 	if (err)
1584 		goto skip_request;
1585 
1586 	i915_vma_unpin(vma);
1587 
1588 	i915_request_add(rq);
1589 
1590 	goto out_vm;
1591 skip_request:
1592 	i915_request_set_error_once(rq, err);
1593 	i915_request_add(rq);
1594 err_unpin:
1595 	i915_vma_unpin(vma);
1596 out_vm:
1597 	i915_vm_put(vm);
1598 out:
1599 	i915_gem_object_put(obj);
1600 	return err;
1601 }
1602 
read_from_scratch(struct i915_gem_context * ctx,struct intel_engine_cs * engine,u64 offset,u32 * value)1603 static int read_from_scratch(struct i915_gem_context *ctx,
1604 			     struct intel_engine_cs *engine,
1605 			     u64 offset, u32 *value)
1606 {
1607 	struct drm_i915_private *i915 = ctx->i915;
1608 	struct drm_i915_gem_object *obj;
1609 	struct i915_address_space *vm;
1610 	const u32 result = 0x100;
1611 	struct i915_request *rq;
1612 	struct i915_vma *vma;
1613 	unsigned int flags;
1614 	u32 *cmd;
1615 	int err;
1616 
1617 	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1618 
1619 	err = check_scratch(ctx_vm(ctx), offset);
1620 	if (err)
1621 		return err;
1622 
1623 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1624 	if (IS_ERR(obj))
1625 		return PTR_ERR(obj);
1626 
1627 	if (GRAPHICS_VER(i915) >= 8) {
1628 		const u32 GPR0 = engine->mmio_base + 0x600;
1629 
1630 		vm = i915_gem_context_get_vm_rcu(ctx);
1631 		vma = i915_vma_instance(obj, vm, NULL);
1632 		if (IS_ERR(vma)) {
1633 			err = PTR_ERR(vma);
1634 			goto out_vm;
1635 		}
1636 
1637 		err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1638 		if (err)
1639 			goto out_vm;
1640 
1641 		cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1642 		if (IS_ERR(cmd)) {
1643 			err = PTR_ERR(cmd);
1644 			goto out;
1645 		}
1646 
1647 		memset(cmd, POISON_INUSE, PAGE_SIZE);
1648 		*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1649 		*cmd++ = GPR0;
1650 		*cmd++ = lower_32_bits(offset);
1651 		*cmd++ = upper_32_bits(offset);
1652 		*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1653 		*cmd++ = GPR0;
1654 		*cmd++ = result;
1655 		*cmd++ = 0;
1656 		*cmd = MI_BATCH_BUFFER_END;
1657 
1658 		i915_gem_object_flush_map(obj);
1659 		i915_gem_object_unpin_map(obj);
1660 
1661 		flags = 0;
1662 	} else {
1663 		const u32 reg = engine->mmio_base + 0x420;
1664 
1665 		/* hsw: register access even to 3DPRIM! is protected */
1666 		vm = i915_vm_get(&engine->gt->ggtt->vm);
1667 		vma = i915_vma_instance(obj, vm, NULL);
1668 		if (IS_ERR(vma)) {
1669 			err = PTR_ERR(vma);
1670 			goto out_vm;
1671 		}
1672 
1673 		err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
1674 		if (err)
1675 			goto out_vm;
1676 
1677 		cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1678 		if (IS_ERR(cmd)) {
1679 			err = PTR_ERR(cmd);
1680 			goto out;
1681 		}
1682 
1683 		memset(cmd, POISON_INUSE, PAGE_SIZE);
1684 		*cmd++ = MI_LOAD_REGISTER_MEM;
1685 		*cmd++ = reg;
1686 		*cmd++ = offset;
1687 		*cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
1688 		*cmd++ = reg;
1689 		*cmd++ = vma->node.start + result;
1690 		*cmd = MI_BATCH_BUFFER_END;
1691 
1692 		i915_gem_object_flush_map(obj);
1693 		i915_gem_object_unpin_map(obj);
1694 
1695 		flags = I915_DISPATCH_SECURE;
1696 	}
1697 
1698 	intel_gt_chipset_flush(engine->gt);
1699 
1700 	rq = igt_request_alloc(ctx, engine);
1701 	if (IS_ERR(rq)) {
1702 		err = PTR_ERR(rq);
1703 		goto err_unpin;
1704 	}
1705 
1706 	i915_vma_lock(vma);
1707 	err = i915_request_await_object(rq, vma->obj, true);
1708 	if (err == 0)
1709 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1710 	i915_vma_unlock(vma);
1711 	if (err)
1712 		goto skip_request;
1713 
1714 	if (rq->engine->emit_init_breadcrumb) {
1715 		err = rq->engine->emit_init_breadcrumb(rq);
1716 		if (err)
1717 			goto skip_request;
1718 	}
1719 
1720 	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
1721 	if (err)
1722 		goto skip_request;
1723 
1724 	i915_vma_unpin(vma);
1725 
1726 	i915_request_add(rq);
1727 
1728 	i915_gem_object_lock(obj, NULL);
1729 	err = i915_gem_object_set_to_cpu_domain(obj, false);
1730 	i915_gem_object_unlock(obj);
1731 	if (err)
1732 		goto out_vm;
1733 
1734 	cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1735 	if (IS_ERR(cmd)) {
1736 		err = PTR_ERR(cmd);
1737 		goto out_vm;
1738 	}
1739 
1740 	*value = cmd[result / sizeof(*cmd)];
1741 	i915_gem_object_unpin_map(obj);
1742 
1743 	goto out_vm;
1744 skip_request:
1745 	i915_request_set_error_once(rq, err);
1746 	i915_request_add(rq);
1747 err_unpin:
1748 	i915_vma_unpin(vma);
1749 out_vm:
1750 	i915_vm_put(vm);
1751 out:
1752 	i915_gem_object_put(obj);
1753 	return err;
1754 }
1755 
check_scratch_page(struct i915_gem_context * ctx,u32 * out)1756 static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
1757 {
1758 	struct i915_address_space *vm;
1759 	u32 *vaddr;
1760 	int err = 0;
1761 
1762 	vm = ctx_vm(ctx);
1763 	if (!vm)
1764 		return -ENODEV;
1765 
1766 	if (!vm->scratch[0]) {
1767 		pr_err("No scratch page!\n");
1768 		return -EINVAL;
1769 	}
1770 
1771 	vaddr = __px_vaddr(vm->scratch[0]);
1772 
1773 	memcpy(out, vaddr, sizeof(*out));
1774 	if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
1775 		pr_err("Inconsistent initial state of scratch page!\n");
1776 		err = -EINVAL;
1777 	}
1778 
1779 	return err;
1780 }
1781 
igt_vm_isolation(void * arg)1782 static int igt_vm_isolation(void *arg)
1783 {
1784 	struct drm_i915_private *i915 = arg;
1785 	struct i915_gem_context *ctx_a, *ctx_b;
1786 	unsigned long num_engines, count;
1787 	struct intel_engine_cs *engine;
1788 	struct igt_live_test t;
1789 	I915_RND_STATE(prng);
1790 	struct file *file;
1791 	u64 vm_total;
1792 	u32 expected;
1793 	int err;
1794 
1795 	if (GRAPHICS_VER(i915) < 7)
1796 		return 0;
1797 
1798 	/*
1799 	 * The simple goal here is that a write into one context is not
1800 	 * observed in a second (separate page tables and scratch).
1801 	 */
1802 
1803 	file = mock_file(i915);
1804 	if (IS_ERR(file))
1805 		return PTR_ERR(file);
1806 
1807 	err = igt_live_test_begin(&t, i915, __func__, "");
1808 	if (err)
1809 		goto out_file;
1810 
1811 	ctx_a = live_context(i915, file);
1812 	if (IS_ERR(ctx_a)) {
1813 		err = PTR_ERR(ctx_a);
1814 		goto out_file;
1815 	}
1816 
1817 	ctx_b = live_context(i915, file);
1818 	if (IS_ERR(ctx_b)) {
1819 		err = PTR_ERR(ctx_b);
1820 		goto out_file;
1821 	}
1822 
1823 	/* We can only test vm isolation, if the vm are distinct */
1824 	if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
1825 		goto out_file;
1826 
1827 	/* Read the initial state of the scratch page */
1828 	err = check_scratch_page(ctx_a, &expected);
1829 	if (err)
1830 		goto out_file;
1831 
1832 	err = check_scratch_page(ctx_b, &expected);
1833 	if (err)
1834 		goto out_file;
1835 
1836 	vm_total = ctx_vm(ctx_a)->total;
1837 	GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
1838 
1839 	count = 0;
1840 	num_engines = 0;
1841 	for_each_uabi_engine(engine, i915) {
1842 		IGT_TIMEOUT(end_time);
1843 		unsigned long this = 0;
1844 
1845 		if (!intel_engine_can_store_dword(engine))
1846 			continue;
1847 
1848 		/* Not all engines have their own GPR! */
1849 		if (GRAPHICS_VER(i915) < 8 && engine->class != RENDER_CLASS)
1850 			continue;
1851 
1852 		while (!__igt_timeout(end_time, NULL)) {
1853 			u32 value = 0xc5c5c5c5;
1854 			u64 offset;
1855 
1856 			/* Leave enough space at offset 0 for the batch */
1857 			offset = igt_random_offset(&prng,
1858 						   I915_GTT_PAGE_SIZE, vm_total,
1859 						   sizeof(u32), alignof_dword);
1860 
1861 			err = write_to_scratch(ctx_a, engine,
1862 					       offset, 0xdeadbeef);
1863 			if (err == 0)
1864 				err = read_from_scratch(ctx_b, engine,
1865 							offset, &value);
1866 			if (err)
1867 				goto out_file;
1868 
1869 			if (value != expected) {
1870 				pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1871 				       engine->name, value,
1872 				       upper_32_bits(offset),
1873 				       lower_32_bits(offset),
1874 				       this);
1875 				err = -EINVAL;
1876 				goto out_file;
1877 			}
1878 
1879 			this++;
1880 		}
1881 		count += this;
1882 		num_engines++;
1883 	}
1884 	pr_info("Checked %lu scratch offsets across %lu engines\n",
1885 		count, num_engines);
1886 
1887 out_file:
1888 	if (igt_live_test_end(&t))
1889 		err = -EIO;
1890 	fput(file);
1891 	return err;
1892 }
1893 
i915_gem_context_live_selftests(struct drm_i915_private * i915)1894 int i915_gem_context_live_selftests(struct drm_i915_private *i915)
1895 {
1896 	static const struct i915_subtest tests[] = {
1897 		SUBTEST(live_nop_switch),
1898 		SUBTEST(live_parallel_switch),
1899 		SUBTEST(igt_ctx_exec),
1900 		SUBTEST(igt_ctx_readonly),
1901 		SUBTEST(igt_ctx_sseu),
1902 		SUBTEST(igt_shared_ctx_exec),
1903 		SUBTEST(igt_vm_isolation),
1904 	};
1905 
1906 	if (intel_gt_is_wedged(&i915->gt))
1907 		return 0;
1908 
1909 	return i915_live_subtests(tests, i915);
1910 }
1911