• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17 
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 
21 static const struct wo_register {
22 	enum intel_platform platform;
23 	u32 reg;
24 } wo_registers[] = {
25 	{ INTEL_GEMINILAKE, 0x731c }
26 };
27 
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		struct i915_wa_list wa_list;
32 		struct i915_wa_list ctx_wa_list;
33 	} engine[I915_NUM_ENGINES];
34 };
35 
request_add_sync(struct i915_request * rq,int err)36 static int request_add_sync(struct i915_request *rq, int err)
37 {
38 	i915_request_get(rq);
39 	i915_request_add(rq);
40 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
41 		err = -EIO;
42 	i915_request_put(rq);
43 
44 	return err;
45 }
46 
request_add_spin(struct i915_request * rq,struct igt_spinner * spin)47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
48 {
49 	int err = 0;
50 
51 	i915_request_get(rq);
52 	i915_request_add(rq);
53 	if (spin && !igt_wait_for_spinner(spin, rq))
54 		err = -ETIMEDOUT;
55 	i915_request_put(rq);
56 
57 	return err;
58 }
59 
60 static void
reference_lists_init(struct intel_gt * gt,struct wa_lists * lists)61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 {
63 	struct intel_engine_cs *engine;
64 	enum intel_engine_id id;
65 
66 	memset(lists, 0, sizeof(*lists));
67 
68 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69 	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 	wa_init_finish(&lists->gt_wa_list);
71 
72 	for_each_engine(engine, gt, id) {
73 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
74 
75 		wa_init_start(wal, "REF", engine->name);
76 		engine_init_workarounds(engine, wal);
77 		wa_init_finish(wal);
78 
79 		__intel_engine_init_ctx_wa(engine,
80 					   &lists->engine[id].ctx_wa_list,
81 					   "CTX_REF");
82 	}
83 }
84 
85 static void
reference_lists_fini(struct intel_gt * gt,struct wa_lists * lists)86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 {
88 	struct intel_engine_cs *engine;
89 	enum intel_engine_id id;
90 
91 	for_each_engine(engine, gt, id)
92 		intel_wa_list_free(&lists->engine[id].wa_list);
93 
94 	intel_wa_list_free(&lists->gt_wa_list);
95 }
96 
97 static struct drm_i915_gem_object *
read_nonprivs(struct i915_gem_context * ctx,struct intel_engine_cs * engine)98 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
99 {
100 	const u32 base = engine->mmio_base;
101 	struct drm_i915_gem_object *result;
102 	struct i915_request *rq;
103 	struct i915_vma *vma;
104 	u32 srm, *cs;
105 	int err;
106 	int i;
107 
108 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
109 	if (IS_ERR(result))
110 		return result;
111 
112 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
113 
114 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
115 	if (IS_ERR(cs)) {
116 		err = PTR_ERR(cs);
117 		goto err_obj;
118 	}
119 	memset(cs, 0xc5, PAGE_SIZE);
120 	i915_gem_object_flush_map(result);
121 	i915_gem_object_unpin_map(result);
122 
123 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124 	if (IS_ERR(vma)) {
125 		err = PTR_ERR(vma);
126 		goto err_obj;
127 	}
128 
129 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130 	if (err)
131 		goto err_obj;
132 
133 	rq = igt_request_alloc(ctx, engine);
134 	if (IS_ERR(rq)) {
135 		err = PTR_ERR(rq);
136 		goto err_pin;
137 	}
138 
139 	i915_vma_lock(vma);
140 	err = i915_request_await_object(rq, vma->obj, true);
141 	if (err == 0)
142 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143 	i915_vma_unlock(vma);
144 	if (err)
145 		goto err_req;
146 
147 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
148 	if (INTEL_GEN(ctx->i915) >= 8)
149 		srm++;
150 
151 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152 	if (IS_ERR(cs)) {
153 		err = PTR_ERR(cs);
154 		goto err_req;
155 	}
156 
157 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158 		*cs++ = srm;
159 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
160 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161 		*cs++ = 0;
162 	}
163 	intel_ring_advance(rq, cs);
164 
165 	i915_request_add(rq);
166 	i915_vma_unpin(vma);
167 
168 	return result;
169 
170 err_req:
171 	i915_request_add(rq);
172 err_pin:
173 	i915_vma_unpin(vma);
174 err_obj:
175 	i915_gem_object_put(result);
176 	return ERR_PTR(err);
177 }
178 
179 static u32
get_whitelist_reg(const struct intel_engine_cs * engine,unsigned int i)180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
181 {
182 	i915_reg_t reg = i < engine->whitelist.count ?
183 			 engine->whitelist.list[i].reg :
184 			 RING_NOPID(engine->mmio_base);
185 
186 	return i915_mmio_reg_offset(reg);
187 }
188 
189 static void
print_results(const struct intel_engine_cs * engine,const u32 * results)190 print_results(const struct intel_engine_cs *engine, const u32 *results)
191 {
192 	unsigned int i;
193 
194 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195 		u32 expected = get_whitelist_reg(engine, i);
196 		u32 actual = results[i];
197 
198 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199 			i, expected, actual);
200 	}
201 }
202 
check_whitelist(struct i915_gem_context * ctx,struct intel_engine_cs * engine)203 static int check_whitelist(struct i915_gem_context *ctx,
204 			   struct intel_engine_cs *engine)
205 {
206 	struct drm_i915_gem_object *results;
207 	struct intel_wedge_me wedge;
208 	u32 *vaddr;
209 	int err;
210 	int i;
211 
212 	results = read_nonprivs(ctx, engine);
213 	if (IS_ERR(results))
214 		return PTR_ERR(results);
215 
216 	err = 0;
217 	i915_gem_object_lock(results, NULL);
218 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219 		err = i915_gem_object_set_to_cpu_domain(results, false);
220 	i915_gem_object_unlock(results);
221 	if (intel_gt_is_wedged(engine->gt))
222 		err = -EIO;
223 	if (err)
224 		goto out_put;
225 
226 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
227 	if (IS_ERR(vaddr)) {
228 		err = PTR_ERR(vaddr);
229 		goto out_put;
230 	}
231 
232 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233 		u32 expected = get_whitelist_reg(engine, i);
234 		u32 actual = vaddr[i];
235 
236 		if (expected != actual) {
237 			print_results(engine, vaddr);
238 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239 			       i, expected, actual);
240 
241 			err = -EINVAL;
242 			break;
243 		}
244 	}
245 
246 	i915_gem_object_unpin_map(results);
247 out_put:
248 	i915_gem_object_put(results);
249 	return err;
250 }
251 
do_device_reset(struct intel_engine_cs * engine)252 static int do_device_reset(struct intel_engine_cs *engine)
253 {
254 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
255 	return 0;
256 }
257 
do_engine_reset(struct intel_engine_cs * engine)258 static int do_engine_reset(struct intel_engine_cs *engine)
259 {
260 	return intel_engine_reset(engine, "live_workarounds");
261 }
262 
263 static int
switch_to_scratch_context(struct intel_engine_cs * engine,struct igt_spinner * spin)264 switch_to_scratch_context(struct intel_engine_cs *engine,
265 			  struct igt_spinner *spin)
266 {
267 	struct intel_context *ce;
268 	struct i915_request *rq;
269 	int err = 0;
270 
271 	ce = intel_context_create(engine);
272 	if (IS_ERR(ce))
273 		return PTR_ERR(ce);
274 
275 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
276 	intel_context_put(ce);
277 
278 	if (IS_ERR(rq)) {
279 		spin = NULL;
280 		err = PTR_ERR(rq);
281 		goto err;
282 	}
283 
284 	err = request_add_spin(rq, spin);
285 err:
286 	if (err && spin)
287 		igt_spinner_end(spin);
288 
289 	return err;
290 }
291 
check_whitelist_across_reset(struct intel_engine_cs * engine,int (* reset)(struct intel_engine_cs *),const char * name)292 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
293 					int (*reset)(struct intel_engine_cs *),
294 					const char *name)
295 {
296 	struct drm_i915_private *i915 = engine->i915;
297 	struct i915_gem_context *ctx, *tmp;
298 	struct igt_spinner spin;
299 	intel_wakeref_t wakeref;
300 	int err;
301 
302 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303 		engine->whitelist.count, engine->name, name);
304 
305 	ctx = kernel_context(i915);
306 	if (IS_ERR(ctx))
307 		return PTR_ERR(ctx);
308 
309 	err = igt_spinner_init(&spin, engine->gt);
310 	if (err)
311 		goto out_ctx;
312 
313 	err = check_whitelist(ctx, engine);
314 	if (err) {
315 		pr_err("Invalid whitelist *before* %s reset!\n", name);
316 		goto out_spin;
317 	}
318 
319 	err = switch_to_scratch_context(engine, &spin);
320 	if (err)
321 		goto out_spin;
322 
323 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
324 		err = reset(engine);
325 
326 	igt_spinner_end(&spin);
327 
328 	if (err) {
329 		pr_err("%s reset failed\n", name);
330 		goto out_spin;
331 	}
332 
333 	err = check_whitelist(ctx, engine);
334 	if (err) {
335 		pr_err("Whitelist not preserved in context across %s reset!\n",
336 		       name);
337 		goto out_spin;
338 	}
339 
340 	tmp = kernel_context(i915);
341 	if (IS_ERR(tmp)) {
342 		err = PTR_ERR(tmp);
343 		goto out_spin;
344 	}
345 	kernel_context_close(ctx);
346 	ctx = tmp;
347 
348 	err = check_whitelist(ctx, engine);
349 	if (err) {
350 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
351 		       name);
352 		goto out_spin;
353 	}
354 
355 out_spin:
356 	igt_spinner_fini(&spin);
357 out_ctx:
358 	kernel_context_close(ctx);
359 	return err;
360 }
361 
create_batch(struct i915_address_space * vm)362 static struct i915_vma *create_batch(struct i915_address_space *vm)
363 {
364 	struct drm_i915_gem_object *obj;
365 	struct i915_vma *vma;
366 	int err;
367 
368 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
369 	if (IS_ERR(obj))
370 		return ERR_CAST(obj);
371 
372 	vma = i915_vma_instance(obj, vm, NULL);
373 	if (IS_ERR(vma)) {
374 		err = PTR_ERR(vma);
375 		goto err_obj;
376 	}
377 
378 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
379 	if (err)
380 		goto err_obj;
381 
382 	return vma;
383 
384 err_obj:
385 	i915_gem_object_put(obj);
386 	return ERR_PTR(err);
387 }
388 
reg_write(u32 old,u32 new,u32 rsvd)389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
390 {
391 	if (rsvd == 0x0000ffff) {
392 		old &= ~(new >> 16);
393 		old |= new & (new >> 16);
394 	} else {
395 		old &= ~rsvd;
396 		old |= new & rsvd;
397 	}
398 
399 	return old;
400 }
401 
wo_register(struct intel_engine_cs * engine,u32 reg)402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
403 {
404 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
405 	int i;
406 
407 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
409 		return true;
410 
411 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412 		if (wo_registers[i].platform == platform &&
413 		    wo_registers[i].reg == reg)
414 			return true;
415 	}
416 
417 	return false;
418 }
419 
timestamp(const struct intel_engine_cs * engine,u32 reg)420 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
421 {
422 	reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
423 	switch (reg) {
424 	case 0x358:
425 	case 0x35c:
426 	case 0x3a8:
427 		return true;
428 
429 	default:
430 		return false;
431 	}
432 }
433 
ro_register(u32 reg)434 static bool ro_register(u32 reg)
435 {
436 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
437 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
438 		return true;
439 
440 	return false;
441 }
442 
whitelist_writable_count(struct intel_engine_cs * engine)443 static int whitelist_writable_count(struct intel_engine_cs *engine)
444 {
445 	int count = engine->whitelist.count;
446 	int i;
447 
448 	for (i = 0; i < engine->whitelist.count; i++) {
449 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
450 
451 		if (ro_register(reg))
452 			count--;
453 	}
454 
455 	return count;
456 }
457 
check_dirty_whitelist(struct intel_context * ce)458 static int check_dirty_whitelist(struct intel_context *ce)
459 {
460 	const u32 values[] = {
461 		0x00000000,
462 		0x01010101,
463 		0x10100101,
464 		0x03030303,
465 		0x30300303,
466 		0x05050505,
467 		0x50500505,
468 		0x0f0f0f0f,
469 		0xf00ff00f,
470 		0x10101010,
471 		0xf0f01010,
472 		0x30303030,
473 		0xa0a03030,
474 		0x50505050,
475 		0xc0c05050,
476 		0xf0f0f0f0,
477 		0x11111111,
478 		0x33333333,
479 		0x55555555,
480 		0x0000ffff,
481 		0x00ff00ff,
482 		0xff0000ff,
483 		0xffff00ff,
484 		0xffffffff,
485 	};
486 	struct intel_engine_cs *engine = ce->engine;
487 	struct i915_vma *scratch;
488 	struct i915_vma *batch;
489 	int err = 0, i, v;
490 	u32 *cs, *results;
491 
492 	scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
493 	if (IS_ERR(scratch))
494 		return PTR_ERR(scratch);
495 
496 	batch = create_batch(ce->vm);
497 	if (IS_ERR(batch)) {
498 		err = PTR_ERR(batch);
499 		goto out_scratch;
500 	}
501 
502 	for (i = 0; i < engine->whitelist.count; i++) {
503 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
504 		u64 addr = scratch->node.start;
505 		struct i915_request *rq;
506 		u32 srm, lrm, rsvd;
507 		u32 expect;
508 		int idx;
509 		bool ro_reg;
510 
511 		if (wo_register(engine, reg))
512 			continue;
513 
514 		if (timestamp(engine, reg))
515 			continue; /* timestamps are expected to autoincrement */
516 
517 		ro_reg = ro_register(reg);
518 
519 		/* Clear non priv flags */
520 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
521 
522 		srm = MI_STORE_REGISTER_MEM;
523 		lrm = MI_LOAD_REGISTER_MEM;
524 		if (INTEL_GEN(engine->i915) >= 8)
525 			lrm++, srm++;
526 
527 		pr_debug("%s: Writing garbage to %x\n",
528 			 engine->name, reg);
529 
530 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
531 		if (IS_ERR(cs)) {
532 			err = PTR_ERR(cs);
533 			goto out_batch;
534 		}
535 
536 		/* SRM original */
537 		*cs++ = srm;
538 		*cs++ = reg;
539 		*cs++ = lower_32_bits(addr);
540 		*cs++ = upper_32_bits(addr);
541 
542 		idx = 1;
543 		for (v = 0; v < ARRAY_SIZE(values); v++) {
544 			/* LRI garbage */
545 			*cs++ = MI_LOAD_REGISTER_IMM(1);
546 			*cs++ = reg;
547 			*cs++ = values[v];
548 
549 			/* SRM result */
550 			*cs++ = srm;
551 			*cs++ = reg;
552 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
553 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
554 			idx++;
555 		}
556 		for (v = 0; v < ARRAY_SIZE(values); v++) {
557 			/* LRI garbage */
558 			*cs++ = MI_LOAD_REGISTER_IMM(1);
559 			*cs++ = reg;
560 			*cs++ = ~values[v];
561 
562 			/* SRM result */
563 			*cs++ = srm;
564 			*cs++ = reg;
565 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
566 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
567 			idx++;
568 		}
569 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
570 
571 		/* LRM original -- don't leave garbage in the context! */
572 		*cs++ = lrm;
573 		*cs++ = reg;
574 		*cs++ = lower_32_bits(addr);
575 		*cs++ = upper_32_bits(addr);
576 
577 		*cs++ = MI_BATCH_BUFFER_END;
578 
579 		i915_gem_object_flush_map(batch->obj);
580 		i915_gem_object_unpin_map(batch->obj);
581 		intel_gt_chipset_flush(engine->gt);
582 
583 		rq = intel_context_create_request(ce);
584 		if (IS_ERR(rq)) {
585 			err = PTR_ERR(rq);
586 			goto out_batch;
587 		}
588 
589 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
590 			err = engine->emit_init_breadcrumb(rq);
591 			if (err)
592 				goto err_request;
593 		}
594 
595 		i915_vma_lock(batch);
596 		err = i915_request_await_object(rq, batch->obj, false);
597 		if (err == 0)
598 			err = i915_vma_move_to_active(batch, rq, 0);
599 		i915_vma_unlock(batch);
600 		if (err)
601 			goto err_request;
602 
603 		i915_vma_lock(scratch);
604 		err = i915_request_await_object(rq, scratch->obj, true);
605 		if (err == 0)
606 			err = i915_vma_move_to_active(scratch, rq,
607 						      EXEC_OBJECT_WRITE);
608 		i915_vma_unlock(scratch);
609 		if (err)
610 			goto err_request;
611 
612 		err = engine->emit_bb_start(rq,
613 					    batch->node.start, PAGE_SIZE,
614 					    0);
615 		if (err)
616 			goto err_request;
617 
618 err_request:
619 		err = request_add_sync(rq, err);
620 		if (err) {
621 			pr_err("%s: Futzing %x timedout; cancelling test\n",
622 			       engine->name, reg);
623 			intel_gt_set_wedged(engine->gt);
624 			goto out_batch;
625 		}
626 
627 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
628 		if (IS_ERR(results)) {
629 			err = PTR_ERR(results);
630 			goto out_batch;
631 		}
632 
633 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
634 		if (!ro_reg) {
635 			/* detect write masking */
636 			rsvd = results[ARRAY_SIZE(values)];
637 			if (!rsvd) {
638 				pr_err("%s: Unable to write to whitelisted register %x\n",
639 				       engine->name, reg);
640 				err = -EINVAL;
641 				goto out_unpin;
642 			}
643 		} else {
644 			rsvd = 0;
645 		}
646 
647 		expect = results[0];
648 		idx = 1;
649 		for (v = 0; v < ARRAY_SIZE(values); v++) {
650 			if (ro_reg)
651 				expect = results[0];
652 			else
653 				expect = reg_write(expect, values[v], rsvd);
654 
655 			if (results[idx] != expect)
656 				err++;
657 			idx++;
658 		}
659 		for (v = 0; v < ARRAY_SIZE(values); v++) {
660 			if (ro_reg)
661 				expect = results[0];
662 			else
663 				expect = reg_write(expect, ~values[v], rsvd);
664 
665 			if (results[idx] != expect)
666 				err++;
667 			idx++;
668 		}
669 		if (err) {
670 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
671 			       engine->name, err, reg);
672 
673 			if (ro_reg)
674 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
675 					engine->name, reg, results[0]);
676 			else
677 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
678 					engine->name, reg, results[0], rsvd);
679 
680 			expect = results[0];
681 			idx = 1;
682 			for (v = 0; v < ARRAY_SIZE(values); v++) {
683 				u32 w = values[v];
684 
685 				if (ro_reg)
686 					expect = results[0];
687 				else
688 					expect = reg_write(expect, w, rsvd);
689 				pr_info("Wrote %08x, read %08x, expect %08x\n",
690 					w, results[idx], expect);
691 				idx++;
692 			}
693 			for (v = 0; v < ARRAY_SIZE(values); v++) {
694 				u32 w = ~values[v];
695 
696 				if (ro_reg)
697 					expect = results[0];
698 				else
699 					expect = reg_write(expect, w, rsvd);
700 				pr_info("Wrote %08x, read %08x, expect %08x\n",
701 					w, results[idx], expect);
702 				idx++;
703 			}
704 
705 			err = -EINVAL;
706 		}
707 out_unpin:
708 		i915_gem_object_unpin_map(scratch->obj);
709 		if (err)
710 			break;
711 	}
712 
713 	if (igt_flush_test(engine->i915))
714 		err = -EIO;
715 out_batch:
716 	i915_vma_unpin_and_release(&batch, 0);
717 out_scratch:
718 	i915_vma_unpin_and_release(&scratch, 0);
719 	return err;
720 }
721 
live_dirty_whitelist(void * arg)722 static int live_dirty_whitelist(void *arg)
723 {
724 	struct intel_gt *gt = arg;
725 	struct intel_engine_cs *engine;
726 	enum intel_engine_id id;
727 
728 	/* Can the user write to the whitelisted registers? */
729 
730 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
731 		return 0;
732 
733 	for_each_engine(engine, gt, id) {
734 		struct intel_context *ce;
735 		int err;
736 
737 		if (engine->whitelist.count == 0)
738 			continue;
739 
740 		ce = intel_context_create(engine);
741 		if (IS_ERR(ce))
742 			return PTR_ERR(ce);
743 
744 		err = check_dirty_whitelist(ce);
745 		intel_context_put(ce);
746 		if (err)
747 			return err;
748 	}
749 
750 	return 0;
751 }
752 
live_reset_whitelist(void * arg)753 static int live_reset_whitelist(void *arg)
754 {
755 	struct intel_gt *gt = arg;
756 	struct intel_engine_cs *engine;
757 	enum intel_engine_id id;
758 	int err = 0;
759 
760 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
761 	igt_global_reset_lock(gt);
762 
763 	for_each_engine(engine, gt, id) {
764 		if (engine->whitelist.count == 0)
765 			continue;
766 
767 		if (intel_has_reset_engine(gt)) {
768 			err = check_whitelist_across_reset(engine,
769 							   do_engine_reset,
770 							   "engine");
771 			if (err)
772 				goto out;
773 		}
774 
775 		if (intel_has_gpu_reset(gt)) {
776 			err = check_whitelist_across_reset(engine,
777 							   do_device_reset,
778 							   "device");
779 			if (err)
780 				goto out;
781 		}
782 	}
783 
784 out:
785 	igt_global_reset_unlock(gt);
786 	return err;
787 }
788 
read_whitelisted_registers(struct i915_gem_context * ctx,struct intel_engine_cs * engine,struct i915_vma * results)789 static int read_whitelisted_registers(struct i915_gem_context *ctx,
790 				      struct intel_engine_cs *engine,
791 				      struct i915_vma *results)
792 {
793 	struct i915_request *rq;
794 	int i, err = 0;
795 	u32 srm, *cs;
796 
797 	rq = igt_request_alloc(ctx, engine);
798 	if (IS_ERR(rq))
799 		return PTR_ERR(rq);
800 
801 	i915_vma_lock(results);
802 	err = i915_request_await_object(rq, results->obj, true);
803 	if (err == 0)
804 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
805 	i915_vma_unlock(results);
806 	if (err)
807 		goto err_req;
808 
809 	srm = MI_STORE_REGISTER_MEM;
810 	if (INTEL_GEN(ctx->i915) >= 8)
811 		srm++;
812 
813 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
814 	if (IS_ERR(cs)) {
815 		err = PTR_ERR(cs);
816 		goto err_req;
817 	}
818 
819 	for (i = 0; i < engine->whitelist.count; i++) {
820 		u64 offset = results->node.start + sizeof(u32) * i;
821 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
822 
823 		/* Clear non priv flags */
824 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
825 
826 		*cs++ = srm;
827 		*cs++ = reg;
828 		*cs++ = lower_32_bits(offset);
829 		*cs++ = upper_32_bits(offset);
830 	}
831 	intel_ring_advance(rq, cs);
832 
833 err_req:
834 	return request_add_sync(rq, err);
835 }
836 
scrub_whitelisted_registers(struct i915_gem_context * ctx,struct intel_engine_cs * engine)837 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
838 				       struct intel_engine_cs *engine)
839 {
840 	struct i915_address_space *vm;
841 	struct i915_request *rq;
842 	struct i915_vma *batch;
843 	int i, err = 0;
844 	u32 *cs;
845 
846 	vm = i915_gem_context_get_vm_rcu(ctx);
847 	batch = create_batch(vm);
848 	i915_vm_put(vm);
849 	if (IS_ERR(batch))
850 		return PTR_ERR(batch);
851 
852 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
853 	if (IS_ERR(cs)) {
854 		err = PTR_ERR(cs);
855 		goto err_batch;
856 	}
857 
858 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
859 	for (i = 0; i < engine->whitelist.count; i++) {
860 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
861 
862 		if (ro_register(reg))
863 			continue;
864 
865 		/* Clear non priv flags */
866 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
867 
868 		*cs++ = reg;
869 		*cs++ = 0xffffffff;
870 	}
871 	*cs++ = MI_BATCH_BUFFER_END;
872 
873 	i915_gem_object_flush_map(batch->obj);
874 	intel_gt_chipset_flush(engine->gt);
875 
876 	rq = igt_request_alloc(ctx, engine);
877 	if (IS_ERR(rq)) {
878 		err = PTR_ERR(rq);
879 		goto err_unpin;
880 	}
881 
882 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
883 		err = engine->emit_init_breadcrumb(rq);
884 		if (err)
885 			goto err_request;
886 	}
887 
888 	i915_vma_lock(batch);
889 	err = i915_request_await_object(rq, batch->obj, false);
890 	if (err == 0)
891 		err = i915_vma_move_to_active(batch, rq, 0);
892 	i915_vma_unlock(batch);
893 	if (err)
894 		goto err_request;
895 
896 	/* Perform the writes from an unprivileged "user" batch */
897 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
898 
899 err_request:
900 	err = request_add_sync(rq, err);
901 
902 err_unpin:
903 	i915_gem_object_unpin_map(batch->obj);
904 err_batch:
905 	i915_vma_unpin_and_release(&batch, 0);
906 	return err;
907 }
908 
909 struct regmask {
910 	i915_reg_t reg;
911 	unsigned long gen_mask;
912 };
913 
find_reg(struct drm_i915_private * i915,i915_reg_t reg,const struct regmask * tbl,unsigned long count)914 static bool find_reg(struct drm_i915_private *i915,
915 		     i915_reg_t reg,
916 		     const struct regmask *tbl,
917 		     unsigned long count)
918 {
919 	u32 offset = i915_mmio_reg_offset(reg);
920 
921 	while (count--) {
922 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
923 		    i915_mmio_reg_offset(tbl->reg) == offset)
924 			return true;
925 		tbl++;
926 	}
927 
928 	return false;
929 }
930 
pardon_reg(struct drm_i915_private * i915,i915_reg_t reg)931 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
932 {
933 	/* Alas, we must pardon some whitelists. Mistakes already made */
934 	static const struct regmask pardon[] = {
935 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
936 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
937 	};
938 
939 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
940 }
941 
result_eq(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg)942 static bool result_eq(struct intel_engine_cs *engine,
943 		      u32 a, u32 b, i915_reg_t reg)
944 {
945 	if (a != b && !pardon_reg(engine->i915, reg)) {
946 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
947 		       i915_mmio_reg_offset(reg), a, b);
948 		return false;
949 	}
950 
951 	return true;
952 }
953 
writeonly_reg(struct drm_i915_private * i915,i915_reg_t reg)954 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
955 {
956 	/* Some registers do not seem to behave and our writes unreadable */
957 	static const struct regmask wo[] = {
958 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
959 	};
960 
961 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
962 }
963 
result_neq(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg)964 static bool result_neq(struct intel_engine_cs *engine,
965 		       u32 a, u32 b, i915_reg_t reg)
966 {
967 	if (a == b && !writeonly_reg(engine->i915, reg)) {
968 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
969 		       i915_mmio_reg_offset(reg), a);
970 		return false;
971 	}
972 
973 	return true;
974 }
975 
976 static int
check_whitelisted_registers(struct intel_engine_cs * engine,struct i915_vma * A,struct i915_vma * B,bool (* fn)(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg))977 check_whitelisted_registers(struct intel_engine_cs *engine,
978 			    struct i915_vma *A,
979 			    struct i915_vma *B,
980 			    bool (*fn)(struct intel_engine_cs *engine,
981 				       u32 a, u32 b,
982 				       i915_reg_t reg))
983 {
984 	u32 *a, *b;
985 	int i, err;
986 
987 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
988 	if (IS_ERR(a))
989 		return PTR_ERR(a);
990 
991 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
992 	if (IS_ERR(b)) {
993 		err = PTR_ERR(b);
994 		goto err_a;
995 	}
996 
997 	err = 0;
998 	for (i = 0; i < engine->whitelist.count; i++) {
999 		const struct i915_wa *wa = &engine->whitelist.list[i];
1000 
1001 		if (i915_mmio_reg_offset(wa->reg) &
1002 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
1003 			continue;
1004 
1005 		if (!fn(engine, a[i], b[i], wa->reg))
1006 			err = -EINVAL;
1007 	}
1008 
1009 	i915_gem_object_unpin_map(B->obj);
1010 err_a:
1011 	i915_gem_object_unpin_map(A->obj);
1012 	return err;
1013 }
1014 
live_isolated_whitelist(void * arg)1015 static int live_isolated_whitelist(void *arg)
1016 {
1017 	struct intel_gt *gt = arg;
1018 	struct {
1019 		struct i915_gem_context *ctx;
1020 		struct i915_vma *scratch[2];
1021 	} client[2] = {};
1022 	struct intel_engine_cs *engine;
1023 	enum intel_engine_id id;
1024 	int i, err = 0;
1025 
1026 	/*
1027 	 * Check that a write into a whitelist register works, but
1028 	 * invisible to a second context.
1029 	 */
1030 
1031 	if (!intel_engines_has_context_isolation(gt->i915))
1032 		return 0;
1033 
1034 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1035 		struct i915_address_space *vm;
1036 		struct i915_gem_context *c;
1037 
1038 		c = kernel_context(gt->i915);
1039 		if (IS_ERR(c)) {
1040 			err = PTR_ERR(c);
1041 			goto err;
1042 		}
1043 
1044 		vm = i915_gem_context_get_vm_rcu(c);
1045 
1046 		client[i].scratch[0] = create_scratch(vm, 1024);
1047 		if (IS_ERR(client[i].scratch[0])) {
1048 			err = PTR_ERR(client[i].scratch[0]);
1049 			i915_vm_put(vm);
1050 			kernel_context_close(c);
1051 			goto err;
1052 		}
1053 
1054 		client[i].scratch[1] = create_scratch(vm, 1024);
1055 		if (IS_ERR(client[i].scratch[1])) {
1056 			err = PTR_ERR(client[i].scratch[1]);
1057 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1058 			i915_vm_put(vm);
1059 			kernel_context_close(c);
1060 			goto err;
1061 		}
1062 
1063 		client[i].ctx = c;
1064 		i915_vm_put(vm);
1065 	}
1066 
1067 	for_each_engine(engine, gt, id) {
1068 		if (!engine->kernel_context->vm)
1069 			continue;
1070 
1071 		if (!whitelist_writable_count(engine))
1072 			continue;
1073 
1074 		/* Read default values */
1075 		err = read_whitelisted_registers(client[0].ctx, engine,
1076 						 client[0].scratch[0]);
1077 		if (err)
1078 			goto err;
1079 
1080 		/* Try to overwrite registers (should only affect ctx0) */
1081 		err = scrub_whitelisted_registers(client[0].ctx, engine);
1082 		if (err)
1083 			goto err;
1084 
1085 		/* Read values from ctx1, we expect these to be defaults */
1086 		err = read_whitelisted_registers(client[1].ctx, engine,
1087 						 client[1].scratch[0]);
1088 		if (err)
1089 			goto err;
1090 
1091 		/* Verify that both reads return the same default values */
1092 		err = check_whitelisted_registers(engine,
1093 						  client[0].scratch[0],
1094 						  client[1].scratch[0],
1095 						  result_eq);
1096 		if (err)
1097 			goto err;
1098 
1099 		/* Read back the updated values in ctx0 */
1100 		err = read_whitelisted_registers(client[0].ctx, engine,
1101 						 client[0].scratch[1]);
1102 		if (err)
1103 			goto err;
1104 
1105 		/* User should be granted privilege to overwhite regs */
1106 		err = check_whitelisted_registers(engine,
1107 						  client[0].scratch[0],
1108 						  client[0].scratch[1],
1109 						  result_neq);
1110 		if (err)
1111 			goto err;
1112 	}
1113 
1114 err:
1115 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1116 		if (!client[i].ctx)
1117 			break;
1118 
1119 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1120 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1121 		kernel_context_close(client[i].ctx);
1122 	}
1123 
1124 	if (igt_flush_test(gt->i915))
1125 		err = -EIO;
1126 
1127 	return err;
1128 }
1129 
1130 static bool
verify_wa_lists(struct i915_gem_context * ctx,struct wa_lists * lists,const char * str)1131 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1132 		const char *str)
1133 {
1134 	struct drm_i915_private *i915 = ctx->i915;
1135 	struct i915_gem_engines_iter it;
1136 	struct intel_context *ce;
1137 	bool ok = true;
1138 
1139 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1140 
1141 	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1142 		enum intel_engine_id id = ce->engine->id;
1143 
1144 		ok &= engine_wa_list_verify(ce,
1145 					    &lists->engine[id].wa_list,
1146 					    str) == 0;
1147 
1148 		ok &= engine_wa_list_verify(ce,
1149 					    &lists->engine[id].ctx_wa_list,
1150 					    str) == 0;
1151 	}
1152 
1153 	return ok;
1154 }
1155 
1156 static int
live_gpu_reset_workarounds(void * arg)1157 live_gpu_reset_workarounds(void *arg)
1158 {
1159 	struct intel_gt *gt = arg;
1160 	struct i915_gem_context *ctx;
1161 	intel_wakeref_t wakeref;
1162 	struct wa_lists lists;
1163 	bool ok;
1164 
1165 	if (!intel_has_gpu_reset(gt))
1166 		return 0;
1167 
1168 	ctx = kernel_context(gt->i915);
1169 	if (IS_ERR(ctx))
1170 		return PTR_ERR(ctx);
1171 
1172 	i915_gem_context_lock_engines(ctx);
1173 
1174 	pr_info("Verifying after GPU reset...\n");
1175 
1176 	igt_global_reset_lock(gt);
1177 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1178 
1179 	reference_lists_init(gt, &lists);
1180 
1181 	ok = verify_wa_lists(ctx, &lists, "before reset");
1182 	if (!ok)
1183 		goto out;
1184 
1185 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1186 
1187 	ok = verify_wa_lists(ctx, &lists, "after reset");
1188 
1189 out:
1190 	i915_gem_context_unlock_engines(ctx);
1191 	kernel_context_close(ctx);
1192 	reference_lists_fini(gt, &lists);
1193 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1194 	igt_global_reset_unlock(gt);
1195 
1196 	return ok ? 0 : -ESRCH;
1197 }
1198 
1199 static int
live_engine_reset_workarounds(void * arg)1200 live_engine_reset_workarounds(void *arg)
1201 {
1202 	struct intel_gt *gt = arg;
1203 	struct i915_gem_engines_iter it;
1204 	struct i915_gem_context *ctx;
1205 	struct intel_context *ce;
1206 	struct igt_spinner spin;
1207 	struct i915_request *rq;
1208 	intel_wakeref_t wakeref;
1209 	struct wa_lists lists;
1210 	int ret = 0;
1211 
1212 	if (!intel_has_reset_engine(gt))
1213 		return 0;
1214 
1215 	ctx = kernel_context(gt->i915);
1216 	if (IS_ERR(ctx))
1217 		return PTR_ERR(ctx);
1218 
1219 	igt_global_reset_lock(gt);
1220 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1221 
1222 	reference_lists_init(gt, &lists);
1223 
1224 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1225 		struct intel_engine_cs *engine = ce->engine;
1226 		bool ok;
1227 
1228 		pr_info("Verifying after %s reset...\n", engine->name);
1229 
1230 		ok = verify_wa_lists(ctx, &lists, "before reset");
1231 		if (!ok) {
1232 			ret = -ESRCH;
1233 			goto err;
1234 		}
1235 
1236 		intel_engine_reset(engine, "live_workarounds");
1237 
1238 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1239 		if (!ok) {
1240 			ret = -ESRCH;
1241 			goto err;
1242 		}
1243 
1244 		ret = igt_spinner_init(&spin, engine->gt);
1245 		if (ret)
1246 			goto err;
1247 
1248 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1249 		if (IS_ERR(rq)) {
1250 			ret = PTR_ERR(rq);
1251 			igt_spinner_fini(&spin);
1252 			goto err;
1253 		}
1254 
1255 		ret = request_add_spin(rq, &spin);
1256 		if (ret) {
1257 			pr_err("Spinner failed to start\n");
1258 			igt_spinner_fini(&spin);
1259 			goto err;
1260 		}
1261 
1262 		intel_engine_reset(engine, "live_workarounds");
1263 
1264 		igt_spinner_end(&spin);
1265 		igt_spinner_fini(&spin);
1266 
1267 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1268 		if (!ok) {
1269 			ret = -ESRCH;
1270 			goto err;
1271 		}
1272 	}
1273 err:
1274 	i915_gem_context_unlock_engines(ctx);
1275 	reference_lists_fini(gt, &lists);
1276 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1277 	igt_global_reset_unlock(gt);
1278 	kernel_context_close(ctx);
1279 
1280 	igt_flush_test(gt->i915);
1281 
1282 	return ret;
1283 }
1284 
intel_workarounds_live_selftests(struct drm_i915_private * i915)1285 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1286 {
1287 	static const struct i915_subtest tests[] = {
1288 		SUBTEST(live_dirty_whitelist),
1289 		SUBTEST(live_reset_whitelist),
1290 		SUBTEST(live_isolated_whitelist),
1291 		SUBTEST(live_gpu_reset_workarounds),
1292 		SUBTEST(live_engine_reset_workarounds),
1293 	};
1294 
1295 	if (intel_gt_is_wedged(&i915->gt))
1296 		return 0;
1297 
1298 	return intel_gt_live_subtests(tests, &i915->gt);
1299 }
1300