• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014 Intel Corporation
4  */
5 
6 #include <linux/circ_buf.h>
7 
8 #include "gem/i915_gem_context.h"
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gt/intel_lrc_reg.h"
14 #include "gt/intel_ring.h"
15 
16 #include "intel_guc_submission.h"
17 
18 #include "i915_drv.h"
19 #include "i915_trace.h"
20 
21 /**
22  * DOC: GuC-based command submission
23  *
24  * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
25  * firmware is moving to an updated submission interface and we plan to
26  * turn submission back on when that lands. The below documentation (and related
27  * code) matches the old submission model and will be updated as part of the
28  * upgrade to the new flow.
29  *
30  * GuC stage descriptor:
31  * During initialization, the driver allocates a static pool of 1024 such
32  * descriptors, and shares them with the GuC. Currently, we only use one
33  * descriptor. This stage descriptor lets the GuC know about the workqueue and
34  * process descriptor. Theoretically, it also lets the GuC know about our HW
35  * contexts (context ID, etc...), but we actually employ a kind of submission
36  * where the GuC uses the LRCA sent via the work item instead. This is called
37  * a "proxy" submission.
38  *
39  * The Scratch registers:
40  * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
41  * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
42  * triggers an interrupt on the GuC via another register write (0xC4C8).
43  * Firmware writes a success/fail code back to the action register after
44  * processes the request. The kernel driver polls waiting for this update and
45  * then proceeds.
46  *
47  * Work Items:
48  * There are several types of work items that the host may place into a
49  * workqueue, each with its own requirements and limitations. Currently only
50  * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
51  * represents in-order queue. The kernel driver packs ring tail pointer and an
52  * ELSP context descriptor dword into Work Item.
53  * See guc_add_request()
54  *
55  */
56 
to_priolist(struct rb_node * rb)57 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
58 {
59 	return rb_entry(rb, struct i915_priolist, node);
60 }
61 
__get_stage_desc(struct intel_guc * guc,u32 id)62 static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
63 {
64 	struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
65 
66 	return &base[id];
67 }
68 
guc_workqueue_create(struct intel_guc * guc)69 static int guc_workqueue_create(struct intel_guc *guc)
70 {
71 	return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
72 					      &guc->workqueue_vaddr);
73 }
74 
guc_workqueue_destroy(struct intel_guc * guc)75 static void guc_workqueue_destroy(struct intel_guc *guc)
76 {
77 	i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
78 }
79 
80 /*
81  * Initialise the process descriptor shared with the GuC firmware.
82  */
guc_proc_desc_create(struct intel_guc * guc)83 static int guc_proc_desc_create(struct intel_guc *guc)
84 {
85 	const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
86 
87 	return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
88 					      &guc->proc_desc_vaddr);
89 }
90 
guc_proc_desc_destroy(struct intel_guc * guc)91 static void guc_proc_desc_destroy(struct intel_guc *guc)
92 {
93 	i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
94 }
95 
guc_proc_desc_init(struct intel_guc * guc)96 static void guc_proc_desc_init(struct intel_guc *guc)
97 {
98 	struct guc_process_desc *desc;
99 
100 	desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
101 
102 	/*
103 	 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
104 	 * space for ring3 clients (set them as in mmap_ioctl) or kernel
105 	 * space for kernel clients (map on demand instead? May make debug
106 	 * easier to have it mapped).
107 	 */
108 	desc->wq_base_addr = 0;
109 	desc->db_base_addr = 0;
110 
111 	desc->wq_size_bytes = GUC_WQ_SIZE;
112 	desc->wq_status = WQ_STATUS_ACTIVE;
113 	desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
114 }
115 
guc_proc_desc_fini(struct intel_guc * guc)116 static void guc_proc_desc_fini(struct intel_guc *guc)
117 {
118 	memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
119 }
120 
guc_stage_desc_pool_create(struct intel_guc * guc)121 static int guc_stage_desc_pool_create(struct intel_guc *guc)
122 {
123 	u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
124 			      GUC_MAX_STAGE_DESCRIPTORS);
125 
126 	return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
127 					      &guc->stage_desc_pool_vaddr);
128 }
129 
guc_stage_desc_pool_destroy(struct intel_guc * guc)130 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
131 {
132 	i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
133 }
134 
135 /*
136  * Initialise/clear the stage descriptor shared with the GuC firmware.
137  *
138  * This descriptor tells the GuC where (in GGTT space) to find the important
139  * data structures related to work submission (process descriptor, write queue,
140  * etc).
141  */
guc_stage_desc_init(struct intel_guc * guc)142 static void guc_stage_desc_init(struct intel_guc *guc)
143 {
144 	struct guc_stage_desc *desc;
145 
146 	/* we only use 1 stage desc, so hardcode it to 0 */
147 	desc = __get_stage_desc(guc, 0);
148 	memset(desc, 0, sizeof(*desc));
149 
150 	desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
151 			  GUC_STAGE_DESC_ATTR_KERNEL;
152 
153 	desc->stage_id = 0;
154 	desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
155 
156 	desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
157 	desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
158 	desc->wq_size = GUC_WQ_SIZE;
159 }
160 
guc_stage_desc_fini(struct intel_guc * guc)161 static void guc_stage_desc_fini(struct intel_guc *guc)
162 {
163 	struct guc_stage_desc *desc;
164 
165 	desc = __get_stage_desc(guc, 0);
166 	memset(desc, 0, sizeof(*desc));
167 }
168 
169 /* Construct a Work Item and append it to the GuC's Work Queue */
guc_wq_item_append(struct intel_guc * guc,u32 target_engine,u32 context_desc,u32 ring_tail,u32 fence_id)170 static void guc_wq_item_append(struct intel_guc *guc,
171 			       u32 target_engine, u32 context_desc,
172 			       u32 ring_tail, u32 fence_id)
173 {
174 	/* wqi_len is in DWords, and does not include the one-word header */
175 	const size_t wqi_size = sizeof(struct guc_wq_item);
176 	const u32 wqi_len = wqi_size / sizeof(u32) - 1;
177 	struct guc_process_desc *desc = guc->proc_desc_vaddr;
178 	struct guc_wq_item *wqi;
179 	u32 wq_off;
180 
181 	lockdep_assert_held(&guc->wq_lock);
182 
183 	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
184 	 * should not have the case where structure wqi is across page, neither
185 	 * wrapped to the beginning. This simplifies the implementation below.
186 	 *
187 	 * XXX: if not the case, we need save data to a temp wqi and copy it to
188 	 * workqueue buffer dw by dw.
189 	 */
190 	BUILD_BUG_ON(wqi_size != 16);
191 
192 	/* We expect the WQ to be active if we're appending items to it */
193 	GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
194 
195 	/* Free space is guaranteed. */
196 	wq_off = READ_ONCE(desc->tail);
197 	GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
198 			      GUC_WQ_SIZE) < wqi_size);
199 	GEM_BUG_ON(wq_off & (wqi_size - 1));
200 
201 	wqi = guc->workqueue_vaddr + wq_off;
202 
203 	/* Now fill in the 4-word work queue item */
204 	wqi->header = WQ_TYPE_INORDER |
205 		      (wqi_len << WQ_LEN_SHIFT) |
206 		      (target_engine << WQ_TARGET_SHIFT) |
207 		      WQ_NO_WCFLUSH_WAIT;
208 	wqi->context_desc = context_desc;
209 	wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
210 	GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
211 	wqi->fence_id = fence_id;
212 
213 	/* Make the update visible to GuC */
214 	WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
215 }
216 
guc_add_request(struct intel_guc * guc,struct i915_request * rq)217 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
218 {
219 	struct intel_engine_cs *engine = rq->engine;
220 	u32 ctx_desc = rq->context->lrc.ccid;
221 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
222 
223 	guc_wq_item_append(guc, engine->guc_id, ctx_desc,
224 			   ring_tail, rq->fence.seqno);
225 }
226 
227 /*
228  * When we're doing submissions using regular execlists backend, writing to
229  * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
230  * pinned in mappable aperture portion of GGTT are visible to command streamer.
231  * Writes done by GuC on our behalf are not guaranteeing such ordering,
232  * therefore, to ensure the flush, we're issuing a POSTING READ.
233  */
flush_ggtt_writes(struct i915_vma * vma)234 static void flush_ggtt_writes(struct i915_vma *vma)
235 {
236 	if (i915_vma_is_map_and_fenceable(vma))
237 		intel_uncore_posting_read_fw(vma->vm->gt->uncore,
238 					     GUC_STATUS);
239 }
240 
guc_submit(struct intel_engine_cs * engine,struct i915_request ** out,struct i915_request ** end)241 static void guc_submit(struct intel_engine_cs *engine,
242 		       struct i915_request **out,
243 		       struct i915_request **end)
244 {
245 	struct intel_guc *guc = &engine->gt->uc.guc;
246 
247 	spin_lock(&guc->wq_lock);
248 
249 	do {
250 		struct i915_request *rq = *out++;
251 
252 		flush_ggtt_writes(rq->ring->vma);
253 		guc_add_request(guc, rq);
254 	} while (out != end);
255 
256 	spin_unlock(&guc->wq_lock);
257 }
258 
rq_prio(const struct i915_request * rq)259 static inline int rq_prio(const struct i915_request *rq)
260 {
261 	return rq->sched.attr.priority;
262 }
263 
schedule_in(struct i915_request * rq,int idx)264 static struct i915_request *schedule_in(struct i915_request *rq, int idx)
265 {
266 	trace_i915_request_in(rq, idx);
267 
268 	/*
269 	 * Currently we are not tracking the rq->context being inflight
270 	 * (ce->inflight = rq->engine). It is only used by the execlists
271 	 * backend at the moment, a similar counting strategy would be
272 	 * required if we generalise the inflight tracking.
273 	 */
274 
275 	__intel_gt_pm_get(rq->engine->gt);
276 	return i915_request_get(rq);
277 }
278 
schedule_out(struct i915_request * rq)279 static void schedule_out(struct i915_request *rq)
280 {
281 	trace_i915_request_out(rq);
282 
283 	intel_gt_pm_put_async(rq->engine->gt);
284 	i915_request_put(rq);
285 }
286 
__guc_dequeue(struct intel_engine_cs * engine)287 static void __guc_dequeue(struct intel_engine_cs *engine)
288 {
289 	struct intel_engine_execlists * const execlists = &engine->execlists;
290 	struct i915_request **first = execlists->inflight;
291 	struct i915_request ** const last_port = first + execlists->port_mask;
292 	struct i915_request *last = first[0];
293 	struct i915_request **port;
294 	bool submit = false;
295 	struct rb_node *rb;
296 
297 	lockdep_assert_held(&engine->active.lock);
298 
299 	if (last) {
300 		if (*++first)
301 			return;
302 
303 		last = NULL;
304 	}
305 
306 	/*
307 	 * We write directly into the execlists->inflight queue and don't use
308 	 * the execlists->pending queue, as we don't have a distinct switch
309 	 * event.
310 	 */
311 	port = first;
312 	while ((rb = rb_first_cached(&execlists->queue))) {
313 		struct i915_priolist *p = to_priolist(rb);
314 		struct i915_request *rq, *rn;
315 		int i;
316 
317 		priolist_for_each_request_consume(rq, rn, p, i) {
318 			if (last && rq->context != last->context) {
319 				if (port == last_port)
320 					goto done;
321 
322 				*port = schedule_in(last,
323 						    port - execlists->inflight);
324 				port++;
325 			}
326 
327 			list_del_init(&rq->sched.link);
328 			__i915_request_submit(rq);
329 			submit = true;
330 			last = rq;
331 		}
332 
333 		rb_erase_cached(&p->node, &execlists->queue);
334 		i915_priolist_free(p);
335 	}
336 done:
337 	execlists->queue_priority_hint =
338 		rb ? to_priolist(rb)->priority : INT_MIN;
339 	if (submit) {
340 		*port = schedule_in(last, port - execlists->inflight);
341 		*++port = NULL;
342 		guc_submit(engine, first, port);
343 	}
344 	execlists->active = execlists->inflight;
345 }
346 
guc_submission_tasklet(unsigned long data)347 static void guc_submission_tasklet(unsigned long data)
348 {
349 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
350 	struct intel_engine_execlists * const execlists = &engine->execlists;
351 	struct i915_request **port, *rq;
352 	unsigned long flags;
353 
354 	spin_lock_irqsave(&engine->active.lock, flags);
355 
356 	for (port = execlists->inflight; (rq = *port); port++) {
357 		if (!i915_request_completed(rq))
358 			break;
359 
360 		schedule_out(rq);
361 	}
362 	if (port != execlists->inflight) {
363 		int idx = port - execlists->inflight;
364 		int rem = ARRAY_SIZE(execlists->inflight) - idx;
365 		memmove(execlists->inflight, port, rem * sizeof(*port));
366 	}
367 
368 	__guc_dequeue(engine);
369 
370 	spin_unlock_irqrestore(&engine->active.lock, flags);
371 }
372 
guc_reset_prepare(struct intel_engine_cs * engine)373 static void guc_reset_prepare(struct intel_engine_cs *engine)
374 {
375 	struct intel_engine_execlists * const execlists = &engine->execlists;
376 
377 	ENGINE_TRACE(engine, "\n");
378 
379 	/*
380 	 * Prevent request submission to the hardware until we have
381 	 * completed the reset in i915_gem_reset_finish(). If a request
382 	 * is completed by one engine, it may then queue a request
383 	 * to a second via its execlists->tasklet *just* as we are
384 	 * calling engine->init_hw() and also writing the ELSP.
385 	 * Turning off the execlists->tasklet until the reset is over
386 	 * prevents the race.
387 	 */
388 	__tasklet_disable_sync_once(&execlists->tasklet);
389 }
390 
391 static void
cancel_port_requests(struct intel_engine_execlists * const execlists)392 cancel_port_requests(struct intel_engine_execlists * const execlists)
393 {
394 	struct i915_request * const *port, *rq;
395 
396 	/* Note we are only using the inflight and not the pending queue */
397 
398 	for (port = execlists->active; (rq = *port); port++)
399 		schedule_out(rq);
400 	execlists->active =
401 		memset(execlists->inflight, 0, sizeof(execlists->inflight));
402 }
403 
guc_reset_rewind(struct intel_engine_cs * engine,bool stalled)404 static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
405 {
406 	struct intel_engine_execlists * const execlists = &engine->execlists;
407 	struct i915_request *rq;
408 	unsigned long flags;
409 
410 	spin_lock_irqsave(&engine->active.lock, flags);
411 
412 	cancel_port_requests(execlists);
413 
414 	/* Push back any incomplete requests for replay after the reset. */
415 	rq = execlists_unwind_incomplete_requests(execlists);
416 	if (!rq)
417 		goto out_unlock;
418 
419 	if (!i915_request_started(rq))
420 		stalled = false;
421 
422 	__i915_request_reset(rq, stalled);
423 	intel_lr_context_reset(engine, rq->context, rq->head, stalled);
424 
425 out_unlock:
426 	spin_unlock_irqrestore(&engine->active.lock, flags);
427 }
428 
guc_reset_cancel(struct intel_engine_cs * engine)429 static void guc_reset_cancel(struct intel_engine_cs *engine)
430 {
431 	struct intel_engine_execlists * const execlists = &engine->execlists;
432 	struct i915_request *rq, *rn;
433 	struct rb_node *rb;
434 	unsigned long flags;
435 
436 	ENGINE_TRACE(engine, "\n");
437 
438 	/*
439 	 * Before we call engine->cancel_requests(), we should have exclusive
440 	 * access to the submission state. This is arranged for us by the
441 	 * caller disabling the interrupt generation, the tasklet and other
442 	 * threads that may then access the same state, giving us a free hand
443 	 * to reset state. However, we still need to let lockdep be aware that
444 	 * we know this state may be accessed in hardirq context, so we
445 	 * disable the irq around this manipulation and we want to keep
446 	 * the spinlock focused on its duties and not accidentally conflate
447 	 * coverage to the submission's irq state. (Similarly, although we
448 	 * shouldn't need to disable irq around the manipulation of the
449 	 * submission's irq state, we also wish to remind ourselves that
450 	 * it is irq state.)
451 	 */
452 	spin_lock_irqsave(&engine->active.lock, flags);
453 
454 	/* Cancel the requests on the HW and clear the ELSP tracker. */
455 	cancel_port_requests(execlists);
456 
457 	/* Mark all executing requests as skipped. */
458 	list_for_each_entry(rq, &engine->active.requests, sched.link) {
459 		i915_request_set_error_once(rq, -EIO);
460 		i915_request_mark_complete(rq);
461 	}
462 
463 	/* Flush the queued requests to the timeline list (for retiring). */
464 	while ((rb = rb_first_cached(&execlists->queue))) {
465 		struct i915_priolist *p = to_priolist(rb);
466 		int i;
467 
468 		priolist_for_each_request_consume(rq, rn, p, i) {
469 			list_del_init(&rq->sched.link);
470 			__i915_request_submit(rq);
471 			dma_fence_set_error(&rq->fence, -EIO);
472 			i915_request_mark_complete(rq);
473 		}
474 
475 		rb_erase_cached(&p->node, &execlists->queue);
476 		i915_priolist_free(p);
477 	}
478 
479 	/* Remaining _unready_ requests will be nop'ed when submitted */
480 
481 	execlists->queue_priority_hint = INT_MIN;
482 	execlists->queue = RB_ROOT_CACHED;
483 
484 	spin_unlock_irqrestore(&engine->active.lock, flags);
485 }
486 
guc_reset_finish(struct intel_engine_cs * engine)487 static void guc_reset_finish(struct intel_engine_cs *engine)
488 {
489 	struct intel_engine_execlists * const execlists = &engine->execlists;
490 
491 	if (__tasklet_enable(&execlists->tasklet))
492 		/* And kick in case we missed a new request submission. */
493 		tasklet_hi_schedule(&execlists->tasklet);
494 
495 	ENGINE_TRACE(engine, "depth->%d\n",
496 		     atomic_read(&execlists->tasklet.count));
497 }
498 
499 /*
500  * Everything below here is concerned with setup & teardown, and is
501  * therefore not part of the somewhat time-critical batch-submission
502  * path of guc_submit() above.
503  */
504 
505 /*
506  * Set up the memory resources to be shared with the GuC (via the GGTT)
507  * at firmware loading time.
508  */
intel_guc_submission_init(struct intel_guc * guc)509 int intel_guc_submission_init(struct intel_guc *guc)
510 {
511 	int ret;
512 
513 	if (guc->stage_desc_pool)
514 		return 0;
515 
516 	ret = guc_stage_desc_pool_create(guc);
517 	if (ret)
518 		return ret;
519 	/*
520 	 * Keep static analysers happy, let them know that we allocated the
521 	 * vma after testing that it didn't exist earlier.
522 	 */
523 	GEM_BUG_ON(!guc->stage_desc_pool);
524 
525 	ret = guc_workqueue_create(guc);
526 	if (ret)
527 		goto err_pool;
528 
529 	ret = guc_proc_desc_create(guc);
530 	if (ret)
531 		goto err_workqueue;
532 
533 	spin_lock_init(&guc->wq_lock);
534 
535 	return 0;
536 
537 err_workqueue:
538 	guc_workqueue_destroy(guc);
539 err_pool:
540 	guc_stage_desc_pool_destroy(guc);
541 	return ret;
542 }
543 
intel_guc_submission_fini(struct intel_guc * guc)544 void intel_guc_submission_fini(struct intel_guc *guc)
545 {
546 	if (guc->stage_desc_pool) {
547 		guc_proc_desc_destroy(guc);
548 		guc_workqueue_destroy(guc);
549 		guc_stage_desc_pool_destroy(guc);
550 	}
551 }
552 
guc_interrupts_capture(struct intel_gt * gt)553 static void guc_interrupts_capture(struct intel_gt *gt)
554 {
555 	struct intel_uncore *uncore = gt->uncore;
556 	u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
557 	u32 dmask = irqs << 16 | irqs;
558 
559 	GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
560 
561 	/* Don't handle the ctx switch interrupt in GuC submission mode */
562 	intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
563 	intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
564 }
565 
guc_interrupts_release(struct intel_gt * gt)566 static void guc_interrupts_release(struct intel_gt *gt)
567 {
568 	struct intel_uncore *uncore = gt->uncore;
569 	u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
570 	u32 dmask = irqs << 16 | irqs;
571 
572 	GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
573 
574 	/* Handle ctx switch interrupts again */
575 	intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
576 	intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
577 }
578 
guc_set_default_submission(struct intel_engine_cs * engine)579 static void guc_set_default_submission(struct intel_engine_cs *engine)
580 {
581 	/*
582 	 * We inherit a bunch of functions from execlists that we'd like
583 	 * to keep using:
584 	 *
585 	 *    engine->submit_request = execlists_submit_request;
586 	 *    engine->cancel_requests = execlists_cancel_requests;
587 	 *    engine->schedule = execlists_schedule;
588 	 *
589 	 * But we need to override the actual submission backend in order
590 	 * to talk to the GuC.
591 	 */
592 	intel_execlists_set_default_submission(engine);
593 
594 	engine->execlists.tasklet.func = guc_submission_tasklet;
595 
596 	/* do not use execlists park/unpark */
597 	engine->park = engine->unpark = NULL;
598 
599 	engine->reset.prepare = guc_reset_prepare;
600 	engine->reset.rewind = guc_reset_rewind;
601 	engine->reset.cancel = guc_reset_cancel;
602 	engine->reset.finish = guc_reset_finish;
603 
604 	engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
605 	engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
606 
607 	/*
608 	 * For the breadcrumb irq to work we need the interrupts to stay
609 	 * enabled. However, on all platforms on which we'll have support for
610 	 * GuC submission we don't allow disabling the interrupts at runtime, so
611 	 * we're always safe with the current flow.
612 	 */
613 	GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
614 }
615 
intel_guc_submission_enable(struct intel_guc * guc)616 void intel_guc_submission_enable(struct intel_guc *guc)
617 {
618 	struct intel_gt *gt = guc_to_gt(guc);
619 	struct intel_engine_cs *engine;
620 	enum intel_engine_id id;
621 
622 	/*
623 	 * We're using GuC work items for submitting work through GuC. Since
624 	 * we're coalescing multiple requests from a single context into a
625 	 * single work item prior to assigning it to execlist_port, we can
626 	 * never have more work items than the total number of ports (for all
627 	 * engines). The GuC firmware is controlling the HEAD of work queue,
628 	 * and it is guaranteed that it will remove the work item from the
629 	 * queue before our request is completed.
630 	 */
631 	BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
632 		     sizeof(struct guc_wq_item) *
633 		     I915_NUM_ENGINES > GUC_WQ_SIZE);
634 
635 	guc_proc_desc_init(guc);
636 	guc_stage_desc_init(guc);
637 
638 	/* Take over from manual control of ELSP (execlists) */
639 	guc_interrupts_capture(gt);
640 
641 	for_each_engine(engine, gt, id) {
642 		engine->set_default_submission = guc_set_default_submission;
643 		engine->set_default_submission(engine);
644 	}
645 }
646 
intel_guc_submission_disable(struct intel_guc * guc)647 void intel_guc_submission_disable(struct intel_guc *guc)
648 {
649 	struct intel_gt *gt = guc_to_gt(guc);
650 
651 	GEM_BUG_ON(gt->awake); /* GT should be parked first */
652 
653 	/* Note: By the time we're here, GuC may have already been reset */
654 
655 	guc_interrupts_release(gt);
656 
657 	guc_stage_desc_fini(guc);
658 	guc_proc_desc_fini(guc);
659 }
660 
__guc_submission_selected(struct intel_guc * guc)661 static bool __guc_submission_selected(struct intel_guc *guc)
662 {
663 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
664 
665 	if (!intel_guc_submission_is_supported(guc))
666 		return false;
667 
668 	return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
669 }
670 
intel_guc_submission_init_early(struct intel_guc * guc)671 void intel_guc_submission_init_early(struct intel_guc *guc)
672 {
673 	guc->submission_selected = __guc_submission_selected(guc);
674 }
675 
intel_engine_in_guc_submission_mode(const struct intel_engine_cs * engine)676 bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
677 {
678 	return engine->set_default_submission == guc_set_default_submission;
679 }
680