• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_print.h>
26 
27 #include "gem/i915_gem_context.h"
28 
29 #include "i915_drv.h"
30 
31 #include "intel_breadcrumbs.h"
32 #include "intel_context.h"
33 #include "intel_engine.h"
34 #include "intel_engine_pm.h"
35 #include "intel_engine_user.h"
36 #include "intel_gt.h"
37 #include "intel_gt_requests.h"
38 #include "intel_gt_pm.h"
39 #include "intel_lrc.h"
40 #include "intel_reset.h"
41 #include "intel_ring.h"
42 
43 /* Haswell does have the CXT_SIZE register however it does not appear to be
44  * valid. Now, docs explain in dwords what is in the context object. The full
45  * size is 70720 bytes, however, the power context and execlist context will
46  * never be saved (power context is stored elsewhere, and execlists don't work
47  * on HSW) - so the final size, including the extra state required for the
48  * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
49  */
50 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
51 
52 #define DEFAULT_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
53 #define GEN8_LR_CONTEXT_RENDER_SIZE	(20 * PAGE_SIZE)
54 #define GEN9_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
55 #define GEN10_LR_CONTEXT_RENDER_SIZE	(18 * PAGE_SIZE)
56 #define GEN11_LR_CONTEXT_RENDER_SIZE	(14 * PAGE_SIZE)
57 
58 #define GEN8_LR_CONTEXT_OTHER_SIZE	( 2 * PAGE_SIZE)
59 
60 #define MAX_MMIO_BASES 3
61 struct engine_info {
62 	unsigned int hw_id;
63 	u8 class;
64 	u8 instance;
65 	/* mmio bases table *must* be sorted in reverse gen order */
66 	struct engine_mmio_base {
67 		u32 gen : 8;
68 		u32 base : 24;
69 	} mmio_bases[MAX_MMIO_BASES];
70 };
71 
72 static const struct engine_info intel_engines[] = {
73 	[RCS0] = {
74 		.hw_id = RCS0_HW,
75 		.class = RENDER_CLASS,
76 		.instance = 0,
77 		.mmio_bases = {
78 			{ .gen = 1, .base = RENDER_RING_BASE }
79 		},
80 	},
81 	[BCS0] = {
82 		.hw_id = BCS0_HW,
83 		.class = COPY_ENGINE_CLASS,
84 		.instance = 0,
85 		.mmio_bases = {
86 			{ .gen = 6, .base = BLT_RING_BASE }
87 		},
88 	},
89 	[VCS0] = {
90 		.hw_id = VCS0_HW,
91 		.class = VIDEO_DECODE_CLASS,
92 		.instance = 0,
93 		.mmio_bases = {
94 			{ .gen = 11, .base = GEN11_BSD_RING_BASE },
95 			{ .gen = 6, .base = GEN6_BSD_RING_BASE },
96 			{ .gen = 4, .base = BSD_RING_BASE }
97 		},
98 	},
99 	[VCS1] = {
100 		.hw_id = VCS1_HW,
101 		.class = VIDEO_DECODE_CLASS,
102 		.instance = 1,
103 		.mmio_bases = {
104 			{ .gen = 11, .base = GEN11_BSD2_RING_BASE },
105 			{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
106 		},
107 	},
108 	[VCS2] = {
109 		.hw_id = VCS2_HW,
110 		.class = VIDEO_DECODE_CLASS,
111 		.instance = 2,
112 		.mmio_bases = {
113 			{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
114 		},
115 	},
116 	[VCS3] = {
117 		.hw_id = VCS3_HW,
118 		.class = VIDEO_DECODE_CLASS,
119 		.instance = 3,
120 		.mmio_bases = {
121 			{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
122 		},
123 	},
124 	[VECS0] = {
125 		.hw_id = VECS0_HW,
126 		.class = VIDEO_ENHANCEMENT_CLASS,
127 		.instance = 0,
128 		.mmio_bases = {
129 			{ .gen = 11, .base = GEN11_VEBOX_RING_BASE },
130 			{ .gen = 7, .base = VEBOX_RING_BASE }
131 		},
132 	},
133 	[VECS1] = {
134 		.hw_id = VECS1_HW,
135 		.class = VIDEO_ENHANCEMENT_CLASS,
136 		.instance = 1,
137 		.mmio_bases = {
138 			{ .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
139 		},
140 	},
141 };
142 
143 /**
144  * intel_engine_context_size() - return the size of the context for an engine
145  * @gt: the gt
146  * @class: engine class
147  *
148  * Each engine class may require a different amount of space for a context
149  * image.
150  *
151  * Return: size (in bytes) of an engine class specific context image
152  *
153  * Note: this size includes the HWSP, which is part of the context image
154  * in LRC mode, but does not include the "shared data page" used with
155  * GuC submission. The caller should account for this if using the GuC.
156  */
intel_engine_context_size(struct intel_gt * gt,u8 class)157 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
158 {
159 	struct intel_uncore *uncore = gt->uncore;
160 	u32 cxt_size;
161 
162 	BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
163 
164 	switch (class) {
165 	case RENDER_CLASS:
166 		switch (INTEL_GEN(gt->i915)) {
167 		default:
168 			MISSING_CASE(INTEL_GEN(gt->i915));
169 			return DEFAULT_LR_CONTEXT_RENDER_SIZE;
170 		case 12:
171 		case 11:
172 			return GEN11_LR_CONTEXT_RENDER_SIZE;
173 		case 10:
174 			return GEN10_LR_CONTEXT_RENDER_SIZE;
175 		case 9:
176 			return GEN9_LR_CONTEXT_RENDER_SIZE;
177 		case 8:
178 			return GEN8_LR_CONTEXT_RENDER_SIZE;
179 		case 7:
180 			if (IS_HASWELL(gt->i915))
181 				return HSW_CXT_TOTAL_SIZE;
182 
183 			cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
184 			return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
185 					PAGE_SIZE);
186 		case 6:
187 			cxt_size = intel_uncore_read(uncore, CXT_SIZE);
188 			return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
189 					PAGE_SIZE);
190 		case 5:
191 		case 4:
192 			/*
193 			 * There is a discrepancy here between the size reported
194 			 * by the register and the size of the context layout
195 			 * in the docs. Both are described as authorative!
196 			 *
197 			 * The discrepancy is on the order of a few cachelines,
198 			 * but the total is under one page (4k), which is our
199 			 * minimum allocation anyway so it should all come
200 			 * out in the wash.
201 			 */
202 			cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
203 			drm_dbg(&gt->i915->drm,
204 				"gen%d CXT_SIZE = %d bytes [0x%08x]\n",
205 				INTEL_GEN(gt->i915), cxt_size * 64,
206 				cxt_size - 1);
207 			return round_up(cxt_size * 64, PAGE_SIZE);
208 		case 3:
209 		case 2:
210 		/* For the special day when i810 gets merged. */
211 		case 1:
212 			return 0;
213 		}
214 		break;
215 	default:
216 		MISSING_CASE(class);
217 		fallthrough;
218 	case VIDEO_DECODE_CLASS:
219 	case VIDEO_ENHANCEMENT_CLASS:
220 	case COPY_ENGINE_CLASS:
221 		if (INTEL_GEN(gt->i915) < 8)
222 			return 0;
223 		return GEN8_LR_CONTEXT_OTHER_SIZE;
224 	}
225 }
226 
__engine_mmio_base(struct drm_i915_private * i915,const struct engine_mmio_base * bases)227 static u32 __engine_mmio_base(struct drm_i915_private *i915,
228 			      const struct engine_mmio_base *bases)
229 {
230 	int i;
231 
232 	for (i = 0; i < MAX_MMIO_BASES; i++)
233 		if (INTEL_GEN(i915) >= bases[i].gen)
234 			break;
235 
236 	GEM_BUG_ON(i == MAX_MMIO_BASES);
237 	GEM_BUG_ON(!bases[i].base);
238 
239 	return bases[i].base;
240 }
241 
__sprint_engine_name(struct intel_engine_cs * engine)242 static void __sprint_engine_name(struct intel_engine_cs *engine)
243 {
244 	/*
245 	 * Before we know what the uABI name for this engine will be,
246 	 * we still would like to keep track of this engine in the debug logs.
247 	 * We throw in a ' here as a reminder that this isn't its final name.
248 	 */
249 	GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
250 			     intel_engine_class_repr(engine->class),
251 			     engine->instance) >= sizeof(engine->name));
252 }
253 
intel_engine_set_hwsp_writemask(struct intel_engine_cs * engine,u32 mask)254 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
255 {
256 	/*
257 	 * Though they added more rings on g4x/ilk, they did not add
258 	 * per-engine HWSTAM until gen6.
259 	 */
260 	if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
261 		return;
262 
263 	if (INTEL_GEN(engine->i915) >= 3)
264 		ENGINE_WRITE(engine, RING_HWSTAM, mask);
265 	else
266 		ENGINE_WRITE16(engine, RING_HWSTAM, mask);
267 }
268 
intel_engine_sanitize_mmio(struct intel_engine_cs * engine)269 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
270 {
271 	/* Mask off all writes into the unknown HWSP */
272 	intel_engine_set_hwsp_writemask(engine, ~0u);
273 }
274 
intel_engine_setup(struct intel_gt * gt,enum intel_engine_id id)275 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
276 {
277 	const struct engine_info *info = &intel_engines[id];
278 	struct drm_i915_private *i915 = gt->i915;
279 	struct intel_engine_cs *engine;
280 
281 	BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
282 	BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
283 
284 	if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
285 		return -EINVAL;
286 
287 	if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
288 		return -EINVAL;
289 
290 	if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
291 		return -EINVAL;
292 
293 	if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
294 		return -EINVAL;
295 
296 	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
297 	if (!engine)
298 		return -ENOMEM;
299 
300 	BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
301 
302 	engine->id = id;
303 	engine->legacy_idx = INVALID_ENGINE;
304 	engine->mask = BIT(id);
305 	engine->i915 = i915;
306 	engine->gt = gt;
307 	engine->uncore = gt->uncore;
308 	engine->hw_id = engine->guc_id = info->hw_id;
309 	engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
310 
311 	engine->class = info->class;
312 	engine->instance = info->instance;
313 	__sprint_engine_name(engine);
314 
315 	engine->props.heartbeat_interval_ms =
316 		CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
317 	engine->props.max_busywait_duration_ns =
318 		CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
319 	engine->props.preempt_timeout_ms =
320 		CONFIG_DRM_I915_PREEMPT_TIMEOUT;
321 	engine->props.stop_timeout_ms =
322 		CONFIG_DRM_I915_STOP_TIMEOUT;
323 	engine->props.timeslice_duration_ms =
324 		CONFIG_DRM_I915_TIMESLICE_DURATION;
325 
326 	/* Override to uninterruptible for OpenCL workloads. */
327 	if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
328 		engine->props.preempt_timeout_ms = 0;
329 
330 	engine->defaults = engine->props; /* never to change again */
331 
332 	engine->context_size = intel_engine_context_size(gt, engine->class);
333 	if (WARN_ON(engine->context_size > BIT(20)))
334 		engine->context_size = 0;
335 	if (engine->context_size)
336 		DRIVER_CAPS(i915)->has_logical_contexts = true;
337 
338 	/* Nothing to do here, execute in order of dependencies */
339 	engine->schedule = NULL;
340 
341 	ewma__engine_latency_init(&engine->latency);
342 	seqlock_init(&engine->stats.lock);
343 
344 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
345 
346 	/* Scrub mmio state on takeover */
347 	intel_engine_sanitize_mmio(engine);
348 
349 	gt->engine_class[info->class][info->instance] = engine;
350 	gt->engine[id] = engine;
351 
352 	return 0;
353 }
354 
__setup_engine_capabilities(struct intel_engine_cs * engine)355 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
356 {
357 	struct drm_i915_private *i915 = engine->i915;
358 
359 	if (engine->class == VIDEO_DECODE_CLASS) {
360 		/*
361 		 * HEVC support is present on first engine instance
362 		 * before Gen11 and on all instances afterwards.
363 		 */
364 		if (INTEL_GEN(i915) >= 11 ||
365 		    (INTEL_GEN(i915) >= 9 && engine->instance == 0))
366 			engine->uabi_capabilities |=
367 				I915_VIDEO_CLASS_CAPABILITY_HEVC;
368 
369 		/*
370 		 * SFC block is present only on even logical engine
371 		 * instances.
372 		 */
373 		if ((INTEL_GEN(i915) >= 11 &&
374 		     (engine->gt->info.vdbox_sfc_access &
375 		      BIT(engine->instance))) ||
376 		    (INTEL_GEN(i915) >= 9 && engine->instance == 0))
377 			engine->uabi_capabilities |=
378 				I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
379 	} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
380 		if (INTEL_GEN(i915) >= 9)
381 			engine->uabi_capabilities |=
382 				I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
383 	}
384 }
385 
intel_setup_engine_capabilities(struct intel_gt * gt)386 static void intel_setup_engine_capabilities(struct intel_gt *gt)
387 {
388 	struct intel_engine_cs *engine;
389 	enum intel_engine_id id;
390 
391 	for_each_engine(engine, gt, id)
392 		__setup_engine_capabilities(engine);
393 }
394 
395 /**
396  * intel_engines_release() - free the resources allocated for Command Streamers
397  * @gt: pointer to struct intel_gt
398  */
intel_engines_release(struct intel_gt * gt)399 void intel_engines_release(struct intel_gt *gt)
400 {
401 	struct intel_engine_cs *engine;
402 	enum intel_engine_id id;
403 
404 	/*
405 	 * Before we release the resources held by engine, we must be certain
406 	 * that the HW is no longer accessing them -- having the GPU scribble
407 	 * to or read from a page being used for something else causes no end
408 	 * of fun.
409 	 *
410 	 * The GPU should be reset by this point, but assume the worst just
411 	 * in case we aborted before completely initialising the engines.
412 	 */
413 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
414 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
415 		__intel_gt_reset(gt, ALL_ENGINES);
416 
417 	/* Decouple the backend; but keep the layout for late GPU resets */
418 	for_each_engine(engine, gt, id) {
419 		if (!engine->release)
420 			continue;
421 
422 		intel_wakeref_wait_for_idle(&engine->wakeref);
423 		GEM_BUG_ON(intel_engine_pm_is_awake(engine));
424 
425 		engine->release(engine);
426 		engine->release = NULL;
427 
428 		memset(&engine->reset, 0, sizeof(engine->reset));
429 	}
430 }
431 
intel_engine_free_request_pool(struct intel_engine_cs * engine)432 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
433 {
434 	if (!engine->request_pool)
435 		return;
436 
437 	kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
438 }
439 
intel_engines_free(struct intel_gt * gt)440 void intel_engines_free(struct intel_gt *gt)
441 {
442 	struct intel_engine_cs *engine;
443 	enum intel_engine_id id;
444 
445 	/* Free the requests! dma-resv keeps fences around for an eternity */
446 	rcu_barrier();
447 
448 	for_each_engine(engine, gt, id) {
449 		intel_engine_free_request_pool(engine);
450 		kfree(engine);
451 		gt->engine[id] = NULL;
452 	}
453 }
454 
455 /*
456  * Determine which engines are fused off in our particular hardware.
457  * Note that we have a catch-22 situation where we need to be able to access
458  * the blitter forcewake domain to read the engine fuses, but at the same time
459  * we need to know which engines are available on the system to know which
460  * forcewake domains are present. We solve this by intializing the forcewake
461  * domains based on the full engine mask in the platform capabilities before
462  * calling this function and pruning the domains for fused-off engines
463  * afterwards.
464  */
init_engine_mask(struct intel_gt * gt)465 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
466 {
467 	struct drm_i915_private *i915 = gt->i915;
468 	struct intel_gt_info *info = &gt->info;
469 	struct intel_uncore *uncore = gt->uncore;
470 	unsigned int logical_vdbox = 0;
471 	unsigned int i;
472 	u32 media_fuse;
473 	u16 vdbox_mask;
474 	u16 vebox_mask;
475 
476 	info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
477 
478 	if (INTEL_GEN(i915) < 11)
479 		return info->engine_mask;
480 
481 	media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
482 
483 	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
484 	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
485 		      GEN11_GT_VEBOX_DISABLE_SHIFT;
486 
487 	for (i = 0; i < I915_MAX_VCS; i++) {
488 		if (!HAS_ENGINE(gt, _VCS(i))) {
489 			vdbox_mask &= ~BIT(i);
490 			continue;
491 		}
492 
493 		if (!(BIT(i) & vdbox_mask)) {
494 			info->engine_mask &= ~BIT(_VCS(i));
495 			drm_dbg(&i915->drm, "vcs%u fused off\n", i);
496 			continue;
497 		}
498 
499 		/*
500 		 * In Gen11, only even numbered logical VDBOXes are
501 		 * hooked up to an SFC (Scaler & Format Converter) unit.
502 		 * In TGL each VDBOX has access to an SFC.
503 		 */
504 		if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
505 			gt->info.vdbox_sfc_access |= BIT(i);
506 	}
507 	drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
508 		vdbox_mask, VDBOX_MASK(gt));
509 	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
510 
511 	for (i = 0; i < I915_MAX_VECS; i++) {
512 		if (!HAS_ENGINE(gt, _VECS(i))) {
513 			vebox_mask &= ~BIT(i);
514 			continue;
515 		}
516 
517 		if (!(BIT(i) & vebox_mask)) {
518 			info->engine_mask &= ~BIT(_VECS(i));
519 			drm_dbg(&i915->drm, "vecs%u fused off\n", i);
520 		}
521 	}
522 	drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
523 		vebox_mask, VEBOX_MASK(gt));
524 	GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
525 
526 	return info->engine_mask;
527 }
528 
529 /**
530  * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
531  * @gt: pointer to struct intel_gt
532  *
533  * Return: non-zero if the initialization failed.
534  */
intel_engines_init_mmio(struct intel_gt * gt)535 int intel_engines_init_mmio(struct intel_gt *gt)
536 {
537 	struct drm_i915_private *i915 = gt->i915;
538 	const unsigned int engine_mask = init_engine_mask(gt);
539 	unsigned int mask = 0;
540 	unsigned int i;
541 	int err;
542 
543 	drm_WARN_ON(&i915->drm, engine_mask == 0);
544 	drm_WARN_ON(&i915->drm, engine_mask &
545 		    GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
546 
547 	if (i915_inject_probe_failure(i915))
548 		return -ENODEV;
549 
550 	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
551 		if (!HAS_ENGINE(gt, i))
552 			continue;
553 
554 		err = intel_engine_setup(gt, i);
555 		if (err)
556 			goto cleanup;
557 
558 		mask |= BIT(i);
559 	}
560 
561 	/*
562 	 * Catch failures to update intel_engines table when the new engines
563 	 * are added to the driver by a warning and disabling the forgotten
564 	 * engines.
565 	 */
566 	if (drm_WARN_ON(&i915->drm, mask != engine_mask))
567 		gt->info.engine_mask = mask;
568 
569 	gt->info.num_engines = hweight32(mask);
570 
571 	intel_gt_check_and_clear_faults(gt);
572 
573 	intel_setup_engine_capabilities(gt);
574 
575 	intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
576 
577 	return 0;
578 
579 cleanup:
580 	intel_engines_free(gt);
581 	return err;
582 }
583 
intel_engine_init_execlists(struct intel_engine_cs * engine)584 void intel_engine_init_execlists(struct intel_engine_cs *engine)
585 {
586 	struct intel_engine_execlists * const execlists = &engine->execlists;
587 
588 	execlists->port_mask = 1;
589 	GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
590 	GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
591 
592 	memset(execlists->pending, 0, sizeof(execlists->pending));
593 	execlists->active =
594 		memset(execlists->inflight, 0, sizeof(execlists->inflight));
595 
596 	execlists->queue_priority_hint = INT_MIN;
597 	execlists->queue = RB_ROOT_CACHED;
598 }
599 
cleanup_status_page(struct intel_engine_cs * engine)600 static void cleanup_status_page(struct intel_engine_cs *engine)
601 {
602 	struct i915_vma *vma;
603 
604 	/* Prevent writes into HWSP after returning the page to the system */
605 	intel_engine_set_hwsp_writemask(engine, ~0u);
606 
607 	vma = fetch_and_zero(&engine->status_page.vma);
608 	if (!vma)
609 		return;
610 
611 	if (!HWS_NEEDS_PHYSICAL(engine->i915))
612 		i915_vma_unpin(vma);
613 
614 	i915_gem_object_unpin_map(vma->obj);
615 	i915_gem_object_put(vma->obj);
616 }
617 
pin_ggtt_status_page(struct intel_engine_cs * engine,struct i915_vma * vma)618 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
619 				struct i915_vma *vma)
620 {
621 	unsigned int flags;
622 
623 	if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
624 		/*
625 		 * On g33, we cannot place HWS above 256MiB, so
626 		 * restrict its pinning to the low mappable arena.
627 		 * Though this restriction is not documented for
628 		 * gen4, gen5, or byt, they also behave similarly
629 		 * and hang if the HWS is placed at the top of the
630 		 * GTT. To generalise, it appears that all !llc
631 		 * platforms have issues with us placing the HWS
632 		 * above the mappable region (even though we never
633 		 * actually map it).
634 		 */
635 		flags = PIN_MAPPABLE;
636 	else
637 		flags = PIN_HIGH;
638 
639 	return i915_ggtt_pin(vma, NULL, 0, flags);
640 }
641 
init_status_page(struct intel_engine_cs * engine)642 static int init_status_page(struct intel_engine_cs *engine)
643 {
644 	struct drm_i915_gem_object *obj;
645 	struct i915_vma *vma;
646 	void *vaddr;
647 	int ret;
648 
649 	/*
650 	 * Though the HWS register does support 36bit addresses, historically
651 	 * we have had hangs and corruption reported due to wild writes if
652 	 * the HWS is placed above 4G. We only allow objects to be allocated
653 	 * in GFP_DMA32 for i965, and no earlier physical address users had
654 	 * access to more than 4G.
655 	 */
656 	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
657 	if (IS_ERR(obj)) {
658 		drm_err(&engine->i915->drm,
659 			"Failed to allocate status page\n");
660 		return PTR_ERR(obj);
661 	}
662 
663 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
664 
665 	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
666 	if (IS_ERR(vma)) {
667 		ret = PTR_ERR(vma);
668 		goto err;
669 	}
670 
671 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
672 	if (IS_ERR(vaddr)) {
673 		ret = PTR_ERR(vaddr);
674 		goto err;
675 	}
676 
677 	engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
678 	engine->status_page.vma = vma;
679 
680 	if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
681 		ret = pin_ggtt_status_page(engine, vma);
682 		if (ret)
683 			goto err_unpin;
684 	}
685 
686 	return 0;
687 
688 err_unpin:
689 	i915_gem_object_unpin_map(obj);
690 err:
691 	i915_gem_object_put(obj);
692 	return ret;
693 }
694 
engine_setup_common(struct intel_engine_cs * engine)695 static int engine_setup_common(struct intel_engine_cs *engine)
696 {
697 	int err;
698 
699 	init_llist_head(&engine->barrier_tasks);
700 
701 	err = init_status_page(engine);
702 	if (err)
703 		return err;
704 
705 	engine->breadcrumbs = intel_breadcrumbs_create(engine);
706 	if (!engine->breadcrumbs) {
707 		err = -ENOMEM;
708 		goto err_status;
709 	}
710 
711 	err = intel_engine_init_cmd_parser(engine);
712 	if (err)
713 		goto err_cmd_parser;
714 
715 	intel_engine_init_active(engine, ENGINE_PHYSICAL);
716 	intel_engine_init_execlists(engine);
717 	intel_engine_init__pm(engine);
718 	intel_engine_init_retire(engine);
719 
720 	/* Use the whole device by default */
721 	engine->sseu =
722 		intel_sseu_from_device_info(&engine->gt->info.sseu);
723 
724 	intel_engine_init_workarounds(engine);
725 	intel_engine_init_whitelist(engine);
726 	intel_engine_init_ctx_wa(engine);
727 
728 	return 0;
729 
730 err_cmd_parser:
731 	intel_breadcrumbs_free(engine->breadcrumbs);
732 err_status:
733 	cleanup_status_page(engine);
734 	return err;
735 }
736 
737 struct measure_breadcrumb {
738 	struct i915_request rq;
739 	struct intel_ring ring;
740 	u32 cs[2048];
741 };
742 
measure_breadcrumb_dw(struct intel_context * ce)743 static int measure_breadcrumb_dw(struct intel_context *ce)
744 {
745 	struct intel_engine_cs *engine = ce->engine;
746 	struct measure_breadcrumb *frame;
747 	int dw;
748 
749 	GEM_BUG_ON(!engine->gt->scratch);
750 
751 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
752 	if (!frame)
753 		return -ENOMEM;
754 
755 	frame->rq.engine = engine;
756 	frame->rq.context = ce;
757 	rcu_assign_pointer(frame->rq.timeline, ce->timeline);
758 
759 	frame->ring.vaddr = frame->cs;
760 	frame->ring.size = sizeof(frame->cs);
761 	frame->ring.wrap =
762 		BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
763 	frame->ring.effective_size = frame->ring.size;
764 	intel_ring_update_space(&frame->ring);
765 	frame->rq.ring = &frame->ring;
766 
767 	mutex_lock(&ce->timeline->mutex);
768 	spin_lock_irq(&engine->active.lock);
769 
770 	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
771 
772 	spin_unlock_irq(&engine->active.lock);
773 	mutex_unlock(&ce->timeline->mutex);
774 
775 	GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
776 
777 	kfree(frame);
778 	return dw;
779 }
780 
781 void
intel_engine_init_active(struct intel_engine_cs * engine,unsigned int subclass)782 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
783 {
784 	INIT_LIST_HEAD(&engine->active.requests);
785 	INIT_LIST_HEAD(&engine->active.hold);
786 
787 	spin_lock_init(&engine->active.lock);
788 	lockdep_set_subclass(&engine->active.lock, subclass);
789 
790 	/*
791 	 * Due to an interesting quirk in lockdep's internal debug tracking,
792 	 * after setting a subclass we must ensure the lock is used. Otherwise,
793 	 * nr_unused_locks is incremented once too often.
794 	 */
795 #ifdef CONFIG_DEBUG_LOCK_ALLOC
796 	local_irq_disable();
797 	lock_map_acquire(&engine->active.lock.dep_map);
798 	lock_map_release(&engine->active.lock.dep_map);
799 	local_irq_enable();
800 #endif
801 }
802 
803 static struct intel_context *
create_pinned_context(struct intel_engine_cs * engine,unsigned int hwsp,struct lock_class_key * key,const char * name)804 create_pinned_context(struct intel_engine_cs *engine,
805 		      unsigned int hwsp,
806 		      struct lock_class_key *key,
807 		      const char *name)
808 {
809 	struct intel_context *ce;
810 	int err;
811 
812 	ce = intel_context_create(engine);
813 	if (IS_ERR(ce))
814 		return ce;
815 
816 	__set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
817 	ce->timeline = page_pack_bits(NULL, hwsp);
818 
819 	err = intel_context_pin(ce); /* perma-pin so it is always available */
820 	if (err) {
821 		intel_context_put(ce);
822 		return ERR_PTR(err);
823 	}
824 
825 	/*
826 	 * Give our perma-pinned kernel timelines a separate lockdep class,
827 	 * so that we can use them from within the normal user timelines
828 	 * should we need to inject GPU operations during their request
829 	 * construction.
830 	 */
831 	lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
832 
833 	return ce;
834 }
835 
836 static struct intel_context *
create_kernel_context(struct intel_engine_cs * engine)837 create_kernel_context(struct intel_engine_cs *engine)
838 {
839 	static struct lock_class_key kernel;
840 
841 	return create_pinned_context(engine, I915_GEM_HWS_SEQNO_ADDR,
842 				     &kernel, "kernel_context");
843 }
844 
845 /**
846  * intel_engines_init_common - initialize cengine state which might require hw access
847  * @engine: Engine to initialize.
848  *
849  * Initializes @engine@ structure members shared between legacy and execlists
850  * submission modes which do require hardware access.
851  *
852  * Typcally done at later stages of submission mode specific engine setup.
853  *
854  * Returns zero on success or an error code on failure.
855  */
engine_init_common(struct intel_engine_cs * engine)856 static int engine_init_common(struct intel_engine_cs *engine)
857 {
858 	struct intel_context *ce;
859 	int ret;
860 
861 	engine->set_default_submission(engine);
862 
863 	/*
864 	 * We may need to do things with the shrinker which
865 	 * require us to immediately switch back to the default
866 	 * context. This can cause a problem as pinning the
867 	 * default context also requires GTT space which may not
868 	 * be available. To avoid this we always pin the default
869 	 * context.
870 	 */
871 	ce = create_kernel_context(engine);
872 	if (IS_ERR(ce))
873 		return PTR_ERR(ce);
874 
875 	ret = measure_breadcrumb_dw(ce);
876 	if (ret < 0)
877 		goto err_context;
878 
879 	engine->emit_fini_breadcrumb_dw = ret;
880 	engine->kernel_context = ce;
881 
882 	return 0;
883 
884 err_context:
885 	intel_context_put(ce);
886 	return ret;
887 }
888 
intel_engines_init(struct intel_gt * gt)889 int intel_engines_init(struct intel_gt *gt)
890 {
891 	int (*setup)(struct intel_engine_cs *engine);
892 	struct intel_engine_cs *engine;
893 	enum intel_engine_id id;
894 	int err;
895 
896 	if (HAS_EXECLISTS(gt->i915))
897 		setup = intel_execlists_submission_setup;
898 	else
899 		setup = intel_ring_submission_setup;
900 
901 	for_each_engine(engine, gt, id) {
902 		err = engine_setup_common(engine);
903 		if (err)
904 			return err;
905 
906 		err = setup(engine);
907 		if (err)
908 			return err;
909 
910 		err = engine_init_common(engine);
911 		if (err)
912 			return err;
913 
914 		intel_engine_add_user(engine);
915 	}
916 
917 	return 0;
918 }
919 
920 /**
921  * intel_engines_cleanup_common - cleans up the engine state created by
922  *                                the common initiailizers.
923  * @engine: Engine to cleanup.
924  *
925  * This cleans up everything created by the common helpers.
926  */
intel_engine_cleanup_common(struct intel_engine_cs * engine)927 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
928 {
929 	GEM_BUG_ON(!list_empty(&engine->active.requests));
930 	tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
931 
932 	cleanup_status_page(engine);
933 	intel_breadcrumbs_free(engine->breadcrumbs);
934 
935 	intel_engine_fini_retire(engine);
936 	intel_engine_cleanup_cmd_parser(engine);
937 
938 	if (engine->default_state)
939 		fput(engine->default_state);
940 
941 	if (engine->kernel_context) {
942 		intel_context_unpin(engine->kernel_context);
943 		intel_context_put(engine->kernel_context);
944 	}
945 	GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
946 
947 	intel_wa_list_free(&engine->ctx_wa_list);
948 	intel_wa_list_free(&engine->wa_list);
949 	intel_wa_list_free(&engine->whitelist);
950 }
951 
952 /**
953  * intel_engine_resume - re-initializes the HW state of the engine
954  * @engine: Engine to resume.
955  *
956  * Returns zero on success or an error code on failure.
957  */
intel_engine_resume(struct intel_engine_cs * engine)958 int intel_engine_resume(struct intel_engine_cs *engine)
959 {
960 	intel_engine_apply_workarounds(engine);
961 	intel_engine_apply_whitelist(engine);
962 
963 	return engine->resume(engine);
964 }
965 
intel_engine_get_active_head(const struct intel_engine_cs * engine)966 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
967 {
968 	struct drm_i915_private *i915 = engine->i915;
969 
970 	u64 acthd;
971 
972 	if (INTEL_GEN(i915) >= 8)
973 		acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
974 	else if (INTEL_GEN(i915) >= 4)
975 		acthd = ENGINE_READ(engine, RING_ACTHD);
976 	else
977 		acthd = ENGINE_READ(engine, ACTHD);
978 
979 	return acthd;
980 }
981 
intel_engine_get_last_batch_head(const struct intel_engine_cs * engine)982 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
983 {
984 	u64 bbaddr;
985 
986 	if (INTEL_GEN(engine->i915) >= 8)
987 		bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
988 	else
989 		bbaddr = ENGINE_READ(engine, RING_BBADDR);
990 
991 	return bbaddr;
992 }
993 
stop_timeout(const struct intel_engine_cs * engine)994 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
995 {
996 	if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
997 		return 0;
998 
999 	/*
1000 	 * If we are doing a normal GPU reset, we can take our time and allow
1001 	 * the engine to quiesce. We've stopped submission to the engine, and
1002 	 * if we wait long enough an innocent context should complete and
1003 	 * leave the engine idle. So they should not be caught unaware by
1004 	 * the forthcoming GPU reset (which usually follows the stop_cs)!
1005 	 */
1006 	return READ_ONCE(engine->props.stop_timeout_ms);
1007 }
1008 
intel_engine_stop_cs(struct intel_engine_cs * engine)1009 int intel_engine_stop_cs(struct intel_engine_cs *engine)
1010 {
1011 	struct intel_uncore *uncore = engine->uncore;
1012 	const u32 base = engine->mmio_base;
1013 	const i915_reg_t mode = RING_MI_MODE(base);
1014 	int err;
1015 
1016 	if (INTEL_GEN(engine->i915) < 3)
1017 		return -ENODEV;
1018 
1019 	ENGINE_TRACE(engine, "\n");
1020 
1021 	intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
1022 
1023 	err = 0;
1024 	if (__intel_wait_for_register_fw(uncore,
1025 					 mode, MODE_IDLE, MODE_IDLE,
1026 					 1000, stop_timeout(engine),
1027 					 NULL)) {
1028 		ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
1029 		err = -ETIMEDOUT;
1030 	}
1031 
1032 	/* A final mmio read to let GPU writes be hopefully flushed to memory */
1033 	intel_uncore_posting_read_fw(uncore, mode);
1034 
1035 	return err;
1036 }
1037 
intel_engine_cancel_stop_cs(struct intel_engine_cs * engine)1038 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1039 {
1040 	ENGINE_TRACE(engine, "\n");
1041 
1042 	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1043 }
1044 
i915_cache_level_str(struct drm_i915_private * i915,int type)1045 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1046 {
1047 	switch (type) {
1048 	case I915_CACHE_NONE: return " uncached";
1049 	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1050 	case I915_CACHE_L3_LLC: return " L3+LLC";
1051 	case I915_CACHE_WT: return " WT";
1052 	default: return "";
1053 	}
1054 }
1055 
1056 static u32
read_subslice_reg(const struct intel_engine_cs * engine,int slice,int subslice,i915_reg_t reg)1057 read_subslice_reg(const struct intel_engine_cs *engine,
1058 		  int slice, int subslice, i915_reg_t reg)
1059 {
1060 	struct drm_i915_private *i915 = engine->i915;
1061 	struct intel_uncore *uncore = engine->uncore;
1062 	u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
1063 	enum forcewake_domains fw_domains;
1064 
1065 	if (INTEL_GEN(i915) >= 11) {
1066 		mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1067 		mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1068 	} else {
1069 		mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1070 		mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1071 	}
1072 
1073 	fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
1074 						    FW_REG_READ);
1075 	fw_domains |= intel_uncore_forcewake_for_reg(uncore,
1076 						     GEN8_MCR_SELECTOR,
1077 						     FW_REG_READ | FW_REG_WRITE);
1078 
1079 	spin_lock_irq(&uncore->lock);
1080 	intel_uncore_forcewake_get__locked(uncore, fw_domains);
1081 
1082 	old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
1083 
1084 	mcr &= ~mcr_mask;
1085 	mcr |= mcr_ss;
1086 	intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
1087 
1088 	val = intel_uncore_read_fw(uncore, reg);
1089 
1090 	mcr &= ~mcr_mask;
1091 	mcr |= old_mcr & mcr_mask;
1092 
1093 	intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
1094 
1095 	intel_uncore_forcewake_put__locked(uncore, fw_domains);
1096 	spin_unlock_irq(&uncore->lock);
1097 
1098 	return val;
1099 }
1100 
1101 /* NB: please notice the memset */
intel_engine_get_instdone(const struct intel_engine_cs * engine,struct intel_instdone * instdone)1102 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1103 			       struct intel_instdone *instdone)
1104 {
1105 	struct drm_i915_private *i915 = engine->i915;
1106 	const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
1107 	struct intel_uncore *uncore = engine->uncore;
1108 	u32 mmio_base = engine->mmio_base;
1109 	int slice;
1110 	int subslice;
1111 
1112 	memset(instdone, 0, sizeof(*instdone));
1113 
1114 	switch (INTEL_GEN(i915)) {
1115 	default:
1116 		instdone->instdone =
1117 			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1118 
1119 		if (engine->id != RCS0)
1120 			break;
1121 
1122 		instdone->slice_common =
1123 			intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1124 		if (INTEL_GEN(i915) >= 12) {
1125 			instdone->slice_common_extra[0] =
1126 				intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
1127 			instdone->slice_common_extra[1] =
1128 				intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
1129 		}
1130 		for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
1131 			instdone->sampler[slice][subslice] =
1132 				read_subslice_reg(engine, slice, subslice,
1133 						  GEN7_SAMPLER_INSTDONE);
1134 			instdone->row[slice][subslice] =
1135 				read_subslice_reg(engine, slice, subslice,
1136 						  GEN7_ROW_INSTDONE);
1137 		}
1138 		break;
1139 	case 7:
1140 		instdone->instdone =
1141 			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1142 
1143 		if (engine->id != RCS0)
1144 			break;
1145 
1146 		instdone->slice_common =
1147 			intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1148 		instdone->sampler[0][0] =
1149 			intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1150 		instdone->row[0][0] =
1151 			intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1152 
1153 		break;
1154 	case 6:
1155 	case 5:
1156 	case 4:
1157 		instdone->instdone =
1158 			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1159 		if (engine->id == RCS0)
1160 			/* HACK: Using the wrong struct member */
1161 			instdone->slice_common =
1162 				intel_uncore_read(uncore, GEN4_INSTDONE1);
1163 		break;
1164 	case 3:
1165 	case 2:
1166 		instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1167 		break;
1168 	}
1169 }
1170 
ring_is_idle(struct intel_engine_cs * engine)1171 static bool ring_is_idle(struct intel_engine_cs *engine)
1172 {
1173 	bool idle = true;
1174 
1175 	if (I915_SELFTEST_ONLY(!engine->mmio_base))
1176 		return true;
1177 
1178 	if (!intel_engine_pm_get_if_awake(engine))
1179 		return true;
1180 
1181 	/* First check that no commands are left in the ring */
1182 	if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1183 	    (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1184 		idle = false;
1185 
1186 	/* No bit for gen2, so assume the CS parser is idle */
1187 	if (INTEL_GEN(engine->i915) > 2 &&
1188 	    !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1189 		idle = false;
1190 
1191 	intel_engine_pm_put(engine);
1192 
1193 	return idle;
1194 }
1195 
intel_engine_flush_submission(struct intel_engine_cs * engine)1196 void intel_engine_flush_submission(struct intel_engine_cs *engine)
1197 {
1198 	struct tasklet_struct *t = &engine->execlists.tasklet;
1199 
1200 	if (!t->func)
1201 		return;
1202 
1203 	/* Synchronise and wait for the tasklet on another CPU */
1204 	tasklet_kill(t);
1205 
1206 	/* Having cancelled the tasklet, ensure that is run */
1207 	local_bh_disable();
1208 	if (tasklet_trylock(t)) {
1209 		/* Must wait for any GPU reset in progress. */
1210 		if (__tasklet_is_enabled(t))
1211 			t->func(t->data);
1212 		tasklet_unlock(t);
1213 	}
1214 	local_bh_enable();
1215 }
1216 
1217 /**
1218  * intel_engine_is_idle() - Report if the engine has finished process all work
1219  * @engine: the intel_engine_cs
1220  *
1221  * Return true if there are no requests pending, nothing left to be submitted
1222  * to hardware, and that the engine is idle.
1223  */
intel_engine_is_idle(struct intel_engine_cs * engine)1224 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1225 {
1226 	/* More white lies, if wedged, hw state is inconsistent */
1227 	if (intel_gt_is_wedged(engine->gt))
1228 		return true;
1229 
1230 	if (!intel_engine_pm_is_awake(engine))
1231 		return true;
1232 
1233 	/* Waiting to drain ELSP? */
1234 	if (execlists_active(&engine->execlists)) {
1235 		synchronize_hardirq(engine->i915->drm.pdev->irq);
1236 
1237 		intel_engine_flush_submission(engine);
1238 
1239 		if (execlists_active(&engine->execlists))
1240 			return false;
1241 	}
1242 
1243 	/* ELSP is empty, but there are ready requests? E.g. after reset */
1244 	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1245 		return false;
1246 
1247 	/* Ring stopped? */
1248 	return ring_is_idle(engine);
1249 }
1250 
intel_engines_are_idle(struct intel_gt * gt)1251 bool intel_engines_are_idle(struct intel_gt *gt)
1252 {
1253 	struct intel_engine_cs *engine;
1254 	enum intel_engine_id id;
1255 
1256 	/*
1257 	 * If the driver is wedged, HW state may be very inconsistent and
1258 	 * report that it is still busy, even though we have stopped using it.
1259 	 */
1260 	if (intel_gt_is_wedged(gt))
1261 		return true;
1262 
1263 	/* Already parked (and passed an idleness test); must still be idle */
1264 	if (!READ_ONCE(gt->awake))
1265 		return true;
1266 
1267 	for_each_engine(engine, gt, id) {
1268 		if (!intel_engine_is_idle(engine))
1269 			return false;
1270 	}
1271 
1272 	return true;
1273 }
1274 
intel_engines_reset_default_submission(struct intel_gt * gt)1275 void intel_engines_reset_default_submission(struct intel_gt *gt)
1276 {
1277 	struct intel_engine_cs *engine;
1278 	enum intel_engine_id id;
1279 
1280 	for_each_engine(engine, gt, id)
1281 		engine->set_default_submission(engine);
1282 }
1283 
intel_engine_can_store_dword(struct intel_engine_cs * engine)1284 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1285 {
1286 	switch (INTEL_GEN(engine->i915)) {
1287 	case 2:
1288 		return false; /* uses physical not virtual addresses */
1289 	case 3:
1290 		/* maybe only uses physical not virtual addresses */
1291 		return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1292 	case 4:
1293 		return !IS_I965G(engine->i915); /* who knows! */
1294 	case 6:
1295 		return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1296 	default:
1297 		return true;
1298 	}
1299 }
1300 
print_sched_attr(const struct i915_sched_attr * attr,char * buf,int x,int len)1301 static int print_sched_attr(const struct i915_sched_attr *attr,
1302 			    char *buf, int x, int len)
1303 {
1304 	if (attr->priority == I915_PRIORITY_INVALID)
1305 		return x;
1306 
1307 	x += snprintf(buf + x, len - x,
1308 		      " prio=%d", attr->priority);
1309 
1310 	return x;
1311 }
1312 
print_request(struct drm_printer * m,struct i915_request * rq,const char * prefix)1313 static void print_request(struct drm_printer *m,
1314 			  struct i915_request *rq,
1315 			  const char *prefix)
1316 {
1317 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1318 	char buf[80] = "";
1319 	int x = 0;
1320 
1321 	x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
1322 
1323 	drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
1324 		   prefix,
1325 		   rq->fence.context, rq->fence.seqno,
1326 		   i915_request_completed(rq) ? "!" :
1327 		   i915_request_started(rq) ? "*" :
1328 		   "",
1329 		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1330 			    &rq->fence.flags) ? "+" :
1331 		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1332 			    &rq->fence.flags) ? "-" :
1333 		   "",
1334 		   buf,
1335 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1336 		   name);
1337 }
1338 
get_timeline(struct i915_request * rq)1339 static struct intel_timeline *get_timeline(struct i915_request *rq)
1340 {
1341 	struct intel_timeline *tl;
1342 
1343 	/*
1344 	 * Even though we are holding the engine->active.lock here, there
1345 	 * is no control over the submission queue per-se and we are
1346 	 * inspecting the active state at a random point in time, with an
1347 	 * unknown queue. Play safe and make sure the timeline remains valid.
1348 	 * (Only being used for pretty printing, one extra kref shouldn't
1349 	 * cause a camel stampede!)
1350 	 */
1351 	rcu_read_lock();
1352 	tl = rcu_dereference(rq->timeline);
1353 	if (!kref_get_unless_zero(&tl->kref))
1354 		tl = NULL;
1355 	rcu_read_unlock();
1356 
1357 	return tl;
1358 }
1359 
print_ring(char * buf,int sz,struct i915_request * rq)1360 static int print_ring(char *buf, int sz, struct i915_request *rq)
1361 {
1362 	int len = 0;
1363 
1364 	if (!i915_request_signaled(rq)) {
1365 		struct intel_timeline *tl = get_timeline(rq);
1366 
1367 		len = scnprintf(buf, sz,
1368 				"ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
1369 				i915_ggtt_offset(rq->ring->vma),
1370 				tl ? tl->hwsp_offset : 0,
1371 				hwsp_seqno(rq),
1372 				DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
1373 						      1000 * 1000));
1374 
1375 		if (tl)
1376 			intel_timeline_put(tl);
1377 	}
1378 
1379 	return len;
1380 }
1381 
hexdump(struct drm_printer * m,const void * buf,size_t len)1382 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1383 {
1384 	const size_t rowsize = 8 * sizeof(u32);
1385 	const void *prev = NULL;
1386 	bool skip = false;
1387 	size_t pos;
1388 
1389 	for (pos = 0; pos < len; pos += rowsize) {
1390 		char line[128];
1391 
1392 		if (prev && !memcmp(prev, buf + pos, rowsize)) {
1393 			if (!skip) {
1394 				drm_printf(m, "*\n");
1395 				skip = true;
1396 			}
1397 			continue;
1398 		}
1399 
1400 		WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1401 						rowsize, sizeof(u32),
1402 						line, sizeof(line),
1403 						false) >= sizeof(line));
1404 		drm_printf(m, "[%04zx] %s\n", pos, line);
1405 
1406 		prev = buf + pos;
1407 		skip = false;
1408 	}
1409 }
1410 
repr_timer(const struct timer_list * t)1411 static const char *repr_timer(const struct timer_list *t)
1412 {
1413 	if (!READ_ONCE(t->expires))
1414 		return "inactive";
1415 
1416 	if (timer_pending(t))
1417 		return "active";
1418 
1419 	return "expired";
1420 }
1421 
intel_engine_print_registers(struct intel_engine_cs * engine,struct drm_printer * m)1422 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1423 					 struct drm_printer *m)
1424 {
1425 	struct drm_i915_private *dev_priv = engine->i915;
1426 	struct intel_engine_execlists * const execlists = &engine->execlists;
1427 	u64 addr;
1428 
1429 	if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
1430 		drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1431 	if (HAS_EXECLISTS(dev_priv)) {
1432 		drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
1433 			   ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
1434 		drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
1435 			   ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
1436 	}
1437 	drm_printf(m, "\tRING_START: 0x%08x\n",
1438 		   ENGINE_READ(engine, RING_START));
1439 	drm_printf(m, "\tRING_HEAD:  0x%08x\n",
1440 		   ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1441 	drm_printf(m, "\tRING_TAIL:  0x%08x\n",
1442 		   ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1443 	drm_printf(m, "\tRING_CTL:   0x%08x%s\n",
1444 		   ENGINE_READ(engine, RING_CTL),
1445 		   ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1446 	if (INTEL_GEN(engine->i915) > 2) {
1447 		drm_printf(m, "\tRING_MODE:  0x%08x%s\n",
1448 			   ENGINE_READ(engine, RING_MI_MODE),
1449 			   ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1450 	}
1451 
1452 	if (INTEL_GEN(dev_priv) >= 6) {
1453 		drm_printf(m, "\tRING_IMR:   0x%08x\n",
1454 			   ENGINE_READ(engine, RING_IMR));
1455 		drm_printf(m, "\tRING_ESR:   0x%08x\n",
1456 			   ENGINE_READ(engine, RING_ESR));
1457 		drm_printf(m, "\tRING_EMR:   0x%08x\n",
1458 			   ENGINE_READ(engine, RING_EMR));
1459 		drm_printf(m, "\tRING_EIR:   0x%08x\n",
1460 			   ENGINE_READ(engine, RING_EIR));
1461 	}
1462 
1463 	addr = intel_engine_get_active_head(engine);
1464 	drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
1465 		   upper_32_bits(addr), lower_32_bits(addr));
1466 	addr = intel_engine_get_last_batch_head(engine);
1467 	drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1468 		   upper_32_bits(addr), lower_32_bits(addr));
1469 	if (INTEL_GEN(dev_priv) >= 8)
1470 		addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1471 	else if (INTEL_GEN(dev_priv) >= 4)
1472 		addr = ENGINE_READ(engine, RING_DMA_FADD);
1473 	else
1474 		addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1475 	drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1476 		   upper_32_bits(addr), lower_32_bits(addr));
1477 	if (INTEL_GEN(dev_priv) >= 4) {
1478 		drm_printf(m, "\tIPEIR: 0x%08x\n",
1479 			   ENGINE_READ(engine, RING_IPEIR));
1480 		drm_printf(m, "\tIPEHR: 0x%08x\n",
1481 			   ENGINE_READ(engine, RING_IPEHR));
1482 	} else {
1483 		drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1484 		drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1485 	}
1486 
1487 	if (HAS_EXECLISTS(dev_priv)) {
1488 		struct i915_request * const *port, *rq;
1489 		const u32 *hws =
1490 			&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1491 		const u8 num_entries = execlists->csb_size;
1492 		unsigned int idx;
1493 		u8 read, write;
1494 
1495 		drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1496 			   yesno(test_bit(TASKLET_STATE_SCHED,
1497 					  &engine->execlists.tasklet.state)),
1498 			   enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
1499 			   repr_timer(&engine->execlists.preempt),
1500 			   repr_timer(&engine->execlists.timer));
1501 
1502 		read = execlists->csb_head;
1503 		write = READ_ONCE(*execlists->csb_write);
1504 
1505 		drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1506 			   ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1507 			   ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1508 			   read, write, num_entries);
1509 
1510 		if (read >= num_entries)
1511 			read = 0;
1512 		if (write >= num_entries)
1513 			write = 0;
1514 		if (read > write)
1515 			write += num_entries;
1516 		while (read < write) {
1517 			idx = ++read % num_entries;
1518 			drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1519 				   idx, hws[idx * 2], hws[idx * 2 + 1]);
1520 		}
1521 
1522 		execlists_active_lock_bh(execlists);
1523 		rcu_read_lock();
1524 		for (port = execlists->active; (rq = *port); port++) {
1525 			char hdr[160];
1526 			int len;
1527 
1528 			len = scnprintf(hdr, sizeof(hdr),
1529 					"\t\tActive[%d]:  ccid:%08x%s%s, ",
1530 					(int)(port - execlists->active),
1531 					rq->context->lrc.ccid,
1532 					intel_context_is_closed(rq->context) ? "!" : "",
1533 					intel_context_is_banned(rq->context) ? "*" : "");
1534 			len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1535 			scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1536 			print_request(m, rq, hdr);
1537 		}
1538 		for (port = execlists->pending; (rq = *port); port++) {
1539 			char hdr[160];
1540 			int len;
1541 
1542 			len = scnprintf(hdr, sizeof(hdr),
1543 					"\t\tPending[%d]: ccid:%08x%s%s, ",
1544 					(int)(port - execlists->pending),
1545 					rq->context->lrc.ccid,
1546 					intel_context_is_closed(rq->context) ? "!" : "",
1547 					intel_context_is_banned(rq->context) ? "*" : "");
1548 			len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1549 			scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1550 			print_request(m, rq, hdr);
1551 		}
1552 		rcu_read_unlock();
1553 		execlists_active_unlock_bh(execlists);
1554 	} else if (INTEL_GEN(dev_priv) > 6) {
1555 		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1556 			   ENGINE_READ(engine, RING_PP_DIR_BASE));
1557 		drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1558 			   ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1559 		drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1560 			   ENGINE_READ(engine, RING_PP_DIR_DCLV));
1561 	}
1562 }
1563 
print_request_ring(struct drm_printer * m,struct i915_request * rq)1564 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1565 {
1566 	void *ring;
1567 	int size;
1568 
1569 	drm_printf(m,
1570 		   "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1571 		   rq->head, rq->postfix, rq->tail,
1572 		   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1573 		   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1574 
1575 	size = rq->tail - rq->head;
1576 	if (rq->tail < rq->head)
1577 		size += rq->ring->size;
1578 
1579 	ring = kmalloc(size, GFP_ATOMIC);
1580 	if (ring) {
1581 		const void *vaddr = rq->ring->vaddr;
1582 		unsigned int head = rq->head;
1583 		unsigned int len = 0;
1584 
1585 		if (rq->tail < head) {
1586 			len = rq->ring->size - head;
1587 			memcpy(ring, vaddr + head, len);
1588 			head = 0;
1589 		}
1590 		memcpy(ring + len, vaddr + head, size - len);
1591 
1592 		hexdump(m, ring, size);
1593 		kfree(ring);
1594 	}
1595 }
1596 
list_count(struct list_head * list)1597 static unsigned long list_count(struct list_head *list)
1598 {
1599 	struct list_head *pos;
1600 	unsigned long count = 0;
1601 
1602 	list_for_each(pos, list)
1603 		count++;
1604 
1605 	return count;
1606 }
1607 
intel_engine_dump(struct intel_engine_cs * engine,struct drm_printer * m,const char * header,...)1608 void intel_engine_dump(struct intel_engine_cs *engine,
1609 		       struct drm_printer *m,
1610 		       const char *header, ...)
1611 {
1612 	struct i915_gpu_error * const error = &engine->i915->gpu_error;
1613 	struct i915_request *rq;
1614 	intel_wakeref_t wakeref;
1615 	unsigned long flags;
1616 	ktime_t dummy;
1617 
1618 	if (header) {
1619 		va_list ap;
1620 
1621 		va_start(ap, header);
1622 		drm_vprintf(m, header, &ap);
1623 		va_end(ap);
1624 	}
1625 
1626 	if (intel_gt_is_wedged(engine->gt))
1627 		drm_printf(m, "*** WEDGED ***\n");
1628 
1629 	drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1630 	drm_printf(m, "\tBarriers?: %s\n",
1631 		   yesno(!llist_empty(&engine->barrier_tasks)));
1632 	drm_printf(m, "\tLatency: %luus\n",
1633 		   ewma__engine_latency_read(&engine->latency));
1634 	if (intel_engine_supports_stats(engine))
1635 		drm_printf(m, "\tRuntime: %llums\n",
1636 			   ktime_to_ms(intel_engine_get_busy_time(engine,
1637 								  &dummy)));
1638 	drm_printf(m, "\tForcewake: %x domains, %d active\n",
1639 		   engine->fw_domain, atomic_read(&engine->fw_active));
1640 
1641 	rcu_read_lock();
1642 	rq = READ_ONCE(engine->heartbeat.systole);
1643 	if (rq)
1644 		drm_printf(m, "\tHeartbeat: %d ms ago\n",
1645 			   jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1646 	rcu_read_unlock();
1647 	drm_printf(m, "\tReset count: %d (global %d)\n",
1648 		   i915_reset_engine_count(error, engine),
1649 		   i915_reset_count(error));
1650 
1651 	drm_printf(m, "\tRequests:\n");
1652 
1653 	spin_lock_irqsave(&engine->active.lock, flags);
1654 	rq = intel_engine_find_active_request(engine);
1655 	if (rq) {
1656 		struct intel_timeline *tl = get_timeline(rq);
1657 
1658 		print_request(m, rq, "\t\tactive ");
1659 
1660 		drm_printf(m, "\t\tring->start:  0x%08x\n",
1661 			   i915_ggtt_offset(rq->ring->vma));
1662 		drm_printf(m, "\t\tring->head:   0x%08x\n",
1663 			   rq->ring->head);
1664 		drm_printf(m, "\t\tring->tail:   0x%08x\n",
1665 			   rq->ring->tail);
1666 		drm_printf(m, "\t\tring->emit:   0x%08x\n",
1667 			   rq->ring->emit);
1668 		drm_printf(m, "\t\tring->space:  0x%08x\n",
1669 			   rq->ring->space);
1670 
1671 		if (tl) {
1672 			drm_printf(m, "\t\tring->hwsp:   0x%08x\n",
1673 				   tl->hwsp_offset);
1674 			intel_timeline_put(tl);
1675 		}
1676 
1677 		print_request_ring(m, rq);
1678 
1679 		if (rq->context->lrc_reg_state) {
1680 			drm_printf(m, "Logical Ring Context:\n");
1681 			hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
1682 		}
1683 	}
1684 	drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
1685 	spin_unlock_irqrestore(&engine->active.lock, flags);
1686 
1687 	drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
1688 	wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1689 	if (wakeref) {
1690 		intel_engine_print_registers(engine, m);
1691 		intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1692 	} else {
1693 		drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1694 	}
1695 
1696 	intel_execlists_show_requests(engine, m, print_request, 8);
1697 
1698 	drm_printf(m, "HWSP:\n");
1699 	hexdump(m, engine->status_page.addr, PAGE_SIZE);
1700 
1701 	drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1702 
1703 	intel_engine_print_breadcrumbs(engine, m);
1704 }
1705 
__intel_engine_get_busy_time(struct intel_engine_cs * engine,ktime_t * now)1706 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
1707 					    ktime_t *now)
1708 {
1709 	ktime_t total = engine->stats.total;
1710 
1711 	/*
1712 	 * If the engine is executing something at the moment
1713 	 * add it to the total.
1714 	 */
1715 	*now = ktime_get();
1716 	if (atomic_read(&engine->stats.active))
1717 		total = ktime_add(total, ktime_sub(*now, engine->stats.start));
1718 
1719 	return total;
1720 }
1721 
1722 /**
1723  * intel_engine_get_busy_time() - Return current accumulated engine busyness
1724  * @engine: engine to report on
1725  * @now: monotonic timestamp of sampling
1726  *
1727  * Returns accumulated time @engine was busy since engine stats were enabled.
1728  */
intel_engine_get_busy_time(struct intel_engine_cs * engine,ktime_t * now)1729 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
1730 {
1731 	unsigned int seq;
1732 	ktime_t total;
1733 
1734 	do {
1735 		seq = read_seqbegin(&engine->stats.lock);
1736 		total = __intel_engine_get_busy_time(engine, now);
1737 	} while (read_seqretry(&engine->stats.lock, seq));
1738 
1739 	return total;
1740 }
1741 
match_ring(struct i915_request * rq)1742 static bool match_ring(struct i915_request *rq)
1743 {
1744 	u32 ring = ENGINE_READ(rq->engine, RING_START);
1745 
1746 	return ring == i915_ggtt_offset(rq->ring->vma);
1747 }
1748 
1749 struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs * engine)1750 intel_engine_find_active_request(struct intel_engine_cs *engine)
1751 {
1752 	struct i915_request *request, *active = NULL;
1753 
1754 	/*
1755 	 * We are called by the error capture, reset and to dump engine
1756 	 * state at random points in time. In particular, note that neither is
1757 	 * crucially ordered with an interrupt. After a hang, the GPU is dead
1758 	 * and we assume that no more writes can happen (we waited long enough
1759 	 * for all writes that were in transaction to be flushed) - adding an
1760 	 * extra delay for a recent interrupt is pointless. Hence, we do
1761 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
1762 	 * At all other times, we must assume the GPU is still running, but
1763 	 * we only care about the snapshot of this moment.
1764 	 */
1765 	lockdep_assert_held(&engine->active.lock);
1766 
1767 	rcu_read_lock();
1768 	request = execlists_active(&engine->execlists);
1769 	if (request) {
1770 		struct intel_timeline *tl = request->context->timeline;
1771 
1772 		list_for_each_entry_from_reverse(request, &tl->requests, link) {
1773 			if (i915_request_completed(request))
1774 				break;
1775 
1776 			active = request;
1777 		}
1778 	}
1779 	rcu_read_unlock();
1780 	if (active)
1781 		return active;
1782 
1783 	list_for_each_entry(request, &engine->active.requests, sched.link) {
1784 		if (i915_request_completed(request))
1785 			continue;
1786 
1787 		if (!i915_request_started(request))
1788 			continue;
1789 
1790 		/* More than one preemptible request may match! */
1791 		if (!match_ring(request))
1792 			continue;
1793 
1794 		active = request;
1795 		break;
1796 	}
1797 
1798 	return active;
1799 }
1800 
1801 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1802 #include "mock_engine.c"
1803 #include "selftest_engine.c"
1804 #include "selftest_engine_cs.c"
1805 #endif
1806