• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Zhi Wang <zhi.a.wang@intel.com>
25  *
26  * Contributors:
27  *    Ping Gao <ping.a.gao@intel.com>
28  *    Tina Zhang <tina.zhang@intel.com>
29  *    Chanbin Du <changbin.du@intel.com>
30  *    Min He <min.he@intel.com>
31  *    Bing Niu <bing.niu@intel.com>
32  *    Zhenyu Wang <zhenyuw@linux.intel.com>
33  *
34  */
35 
36 #include <linux/kthread.h>
37 
38 #include "gem/i915_gem_pm.h"
39 #include "gt/intel_context.h"
40 #include "gt/intel_execlists_submission.h"
41 #include "gt/intel_lrc.h"
42 #include "gt/intel_ring.h"
43 
44 #include "i915_drv.h"
45 #include "i915_gem_gtt.h"
46 #include "gvt.h"
47 
48 #define RING_CTX_OFF(x) \
49 	offsetof(struct execlist_ring_context, x)
50 
set_context_pdp_root_pointer(struct execlist_ring_context * ring_context,u32 pdp[8])51 static void set_context_pdp_root_pointer(
52 		struct execlist_ring_context *ring_context,
53 		u32 pdp[8])
54 {
55 	int i;
56 
57 	for (i = 0; i < 8; i++)
58 		ring_context->pdps[i].val = pdp[7 - i];
59 }
60 
update_shadow_pdps(struct intel_vgpu_workload * workload)61 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
62 {
63 	struct execlist_ring_context *shadow_ring_context;
64 	struct intel_context *ctx = workload->req->context;
65 
66 	if (WARN_ON(!workload->shadow_mm))
67 		return;
68 
69 	if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
70 		return;
71 
72 	shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state;
73 	set_context_pdp_root_pointer(shadow_ring_context,
74 			(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
75 }
76 
77 /*
78  * when populating shadow ctx from guest, we should not overrride oa related
79  * registers, so that they will not be overlapped by guest oa configs. Thus
80  * made it possible to capture oa data from host for both host and guests.
81  */
sr_oa_regs(struct intel_vgpu_workload * workload,u32 * reg_state,bool save)82 static void sr_oa_regs(struct intel_vgpu_workload *workload,
83 		u32 *reg_state, bool save)
84 {
85 	struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
86 	u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
87 	u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
88 	int i = 0;
89 	u32 flex_mmio[] = {
90 		i915_mmio_reg_offset(EU_PERF_CNTL0),
91 		i915_mmio_reg_offset(EU_PERF_CNTL1),
92 		i915_mmio_reg_offset(EU_PERF_CNTL2),
93 		i915_mmio_reg_offset(EU_PERF_CNTL3),
94 		i915_mmio_reg_offset(EU_PERF_CNTL4),
95 		i915_mmio_reg_offset(EU_PERF_CNTL5),
96 		i915_mmio_reg_offset(EU_PERF_CNTL6),
97 	};
98 
99 	if (workload->engine->id != RCS0)
100 		return;
101 
102 	if (save) {
103 		workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
104 
105 		for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
106 			u32 state_offset = ctx_flexeu0 + i * 2;
107 
108 			workload->flex_mmio[i] = reg_state[state_offset + 1];
109 		}
110 	} else {
111 		reg_state[ctx_oactxctrl] =
112 			i915_mmio_reg_offset(GEN8_OACTXCONTROL);
113 		reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
114 
115 		for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
116 			u32 state_offset = ctx_flexeu0 + i * 2;
117 			u32 mmio = flex_mmio[i];
118 
119 			reg_state[state_offset] = mmio;
120 			reg_state[state_offset + 1] = workload->flex_mmio[i];
121 		}
122 	}
123 }
124 
populate_shadow_context(struct intel_vgpu_workload * workload)125 static int populate_shadow_context(struct intel_vgpu_workload *workload)
126 {
127 	struct intel_vgpu *vgpu = workload->vgpu;
128 	struct intel_gvt *gvt = vgpu->gvt;
129 	struct intel_context *ctx = workload->req->context;
130 	struct execlist_ring_context *shadow_ring_context;
131 	void *dst;
132 	void *context_base;
133 	unsigned long context_gpa, context_page_num;
134 	unsigned long gpa_base; /* first gpa of consecutive GPAs */
135 	unsigned long gpa_size; /* size of consecutive GPAs */
136 	struct intel_vgpu_submission *s = &vgpu->submission;
137 	int i;
138 	bool skip = false;
139 	int ring_id = workload->engine->id;
140 	int ret;
141 
142 	GEM_BUG_ON(!intel_context_is_pinned(ctx));
143 
144 	context_base = (void *) ctx->lrc_reg_state -
145 				(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
146 
147 	shadow_ring_context = (void *) ctx->lrc_reg_state;
148 
149 	sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
150 #define COPY_REG(name) \
151 	intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
152 		+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
153 #define COPY_REG_MASKED(name) {\
154 		intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
155 					      + RING_CTX_OFF(name.val),\
156 					      &shadow_ring_context->name.val, 4);\
157 		shadow_ring_context->name.val |= 0xffff << 16;\
158 	}
159 
160 	COPY_REG_MASKED(ctx_ctrl);
161 	COPY_REG(ctx_timestamp);
162 
163 	if (workload->engine->id == RCS0) {
164 		COPY_REG(bb_per_ctx_ptr);
165 		COPY_REG(rcs_indirect_ctx);
166 		COPY_REG(rcs_indirect_ctx_offset);
167 	} else if (workload->engine->id == BCS0)
168 		intel_gvt_hypervisor_read_gpa(vgpu,
169 				workload->ring_context_gpa +
170 				BCS_TILE_REGISTER_VAL_OFFSET,
171 				(void *)shadow_ring_context +
172 				BCS_TILE_REGISTER_VAL_OFFSET, 4);
173 #undef COPY_REG
174 #undef COPY_REG_MASKED
175 
176 	/* don't copy Ring Context (the first 0x50 dwords),
177 	 * only copy the Engine Context part from guest
178 	 */
179 	intel_gvt_hypervisor_read_gpa(vgpu,
180 			workload->ring_context_gpa +
181 			RING_CTX_SIZE,
182 			(void *)shadow_ring_context +
183 			RING_CTX_SIZE,
184 			I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
185 
186 	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
187 
188 	gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
189 			workload->engine->name, workload->ctx_desc.lrca,
190 			workload->ctx_desc.context_id,
191 			workload->ring_context_gpa);
192 
193 	/* only need to ensure this context is not pinned/unpinned during the
194 	 * period from last submission to this this submission.
195 	 * Upon reaching this function, the currently submitted context is not
196 	 * supposed to get unpinned. If a misbehaving guest driver ever does
197 	 * this, it would corrupt itself.
198 	 */
199 	if (s->last_ctx[ring_id].valid &&
200 			(s->last_ctx[ring_id].lrca ==
201 				workload->ctx_desc.lrca) &&
202 			(s->last_ctx[ring_id].ring_context_gpa ==
203 				workload->ring_context_gpa))
204 		skip = true;
205 
206 	s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
207 	s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
208 
209 	if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
210 		return 0;
211 
212 	s->last_ctx[ring_id].valid = false;
213 	context_page_num = workload->engine->context_size;
214 	context_page_num = context_page_num >> PAGE_SHIFT;
215 
216 	if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
217 		context_page_num = 19;
218 
219 	/* find consecutive GPAs from gma until the first inconsecutive GPA.
220 	 * read from the continuous GPAs into dst virtual address
221 	 */
222 	gpa_size = 0;
223 	for (i = 2; i < context_page_num; i++) {
224 		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
225 				(u32)((workload->ctx_desc.lrca + i) <<
226 				I915_GTT_PAGE_SHIFT));
227 		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
228 			gvt_vgpu_err("Invalid guest context descriptor\n");
229 			return -EFAULT;
230 		}
231 
232 		if (gpa_size == 0) {
233 			gpa_base = context_gpa;
234 			dst = context_base + (i << I915_GTT_PAGE_SHIFT);
235 		} else if (context_gpa != gpa_base + gpa_size)
236 			goto read;
237 
238 		gpa_size += I915_GTT_PAGE_SIZE;
239 
240 		if (i == context_page_num - 1)
241 			goto read;
242 
243 		continue;
244 
245 read:
246 		intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
247 		gpa_base = context_gpa;
248 		gpa_size = I915_GTT_PAGE_SIZE;
249 		dst = context_base + (i << I915_GTT_PAGE_SHIFT);
250 	}
251 	ret = intel_gvt_scan_engine_context(workload);
252 	if (ret) {
253 		gvt_vgpu_err("invalid cmd found in guest context pages\n");
254 		return ret;
255 	}
256 	s->last_ctx[ring_id].valid = true;
257 	return 0;
258 }
259 
is_gvt_request(struct i915_request * rq)260 static inline bool is_gvt_request(struct i915_request *rq)
261 {
262 	return intel_context_force_single_submission(rq->context);
263 }
264 
save_ring_hw_state(struct intel_vgpu * vgpu,const struct intel_engine_cs * engine)265 static void save_ring_hw_state(struct intel_vgpu *vgpu,
266 			       const struct intel_engine_cs *engine)
267 {
268 	struct intel_uncore *uncore = engine->uncore;
269 	i915_reg_t reg;
270 
271 	reg = RING_INSTDONE(engine->mmio_base);
272 	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
273 		intel_uncore_read(uncore, reg);
274 
275 	reg = RING_ACTHD(engine->mmio_base);
276 	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
277 		intel_uncore_read(uncore, reg);
278 
279 	reg = RING_ACTHD_UDW(engine->mmio_base);
280 	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
281 		intel_uncore_read(uncore, reg);
282 }
283 
shadow_context_status_change(struct notifier_block * nb,unsigned long action,void * data)284 static int shadow_context_status_change(struct notifier_block *nb,
285 		unsigned long action, void *data)
286 {
287 	struct i915_request *rq = data;
288 	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
289 				shadow_ctx_notifier_block[rq->engine->id]);
290 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
291 	enum intel_engine_id ring_id = rq->engine->id;
292 	struct intel_vgpu_workload *workload;
293 	unsigned long flags;
294 
295 	if (!is_gvt_request(rq)) {
296 		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
297 		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
298 		    scheduler->engine_owner[ring_id]) {
299 			/* Switch ring from vGPU to host. */
300 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
301 					      NULL, rq->engine);
302 			scheduler->engine_owner[ring_id] = NULL;
303 		}
304 		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
305 
306 		return NOTIFY_OK;
307 	}
308 
309 	workload = scheduler->current_workload[ring_id];
310 	if (unlikely(!workload))
311 		return NOTIFY_OK;
312 
313 	switch (action) {
314 	case INTEL_CONTEXT_SCHEDULE_IN:
315 		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
316 		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
317 			/* Switch ring from host to vGPU or vGPU to vGPU. */
318 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
319 					      workload->vgpu, rq->engine);
320 			scheduler->engine_owner[ring_id] = workload->vgpu;
321 		} else
322 			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
323 				      ring_id, workload->vgpu->id);
324 		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
325 		atomic_set(&workload->shadow_ctx_active, 1);
326 		break;
327 	case INTEL_CONTEXT_SCHEDULE_OUT:
328 		save_ring_hw_state(workload->vgpu, rq->engine);
329 		atomic_set(&workload->shadow_ctx_active, 0);
330 		break;
331 	case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
332 		save_ring_hw_state(workload->vgpu, rq->engine);
333 		break;
334 	default:
335 		WARN_ON(1);
336 		return NOTIFY_OK;
337 	}
338 	wake_up(&workload->shadow_ctx_status_wq);
339 	return NOTIFY_OK;
340 }
341 
342 static void
shadow_context_descriptor_update(struct intel_context * ce,struct intel_vgpu_workload * workload)343 shadow_context_descriptor_update(struct intel_context *ce,
344 				 struct intel_vgpu_workload *workload)
345 {
346 	u64 desc = ce->lrc.desc;
347 
348 	/*
349 	 * Update bits 0-11 of the context descriptor which includes flags
350 	 * like GEN8_CTX_* cached in desc_template
351 	 */
352 	desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT);
353 	desc |= (u64)workload->ctx_desc.addressing_mode <<
354 		GEN8_CTX_ADDRESSING_MODE_SHIFT;
355 
356 	ce->lrc.desc = desc;
357 }
358 
copy_workload_to_ring_buffer(struct intel_vgpu_workload * workload)359 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
360 {
361 	struct intel_vgpu *vgpu = workload->vgpu;
362 	struct i915_request *req = workload->req;
363 	void *shadow_ring_buffer_va;
364 	u32 *cs;
365 	int err;
366 
367 	if (GRAPHICS_VER(req->engine->i915) == 9 && is_inhibit_context(req->context))
368 		intel_vgpu_restore_inhibit_context(vgpu, req);
369 
370 	/*
371 	 * To track whether a request has started on HW, we can emit a
372 	 * breadcrumb at the beginning of the request and check its
373 	 * timeline's HWSP to see if the breadcrumb has advanced past the
374 	 * start of this request. Actually, the request must have the
375 	 * init_breadcrumb if its timeline set has_init_bread_crumb, or the
376 	 * scheduler might get a wrong state of it during reset. Since the
377 	 * requests from gvt always set the has_init_breadcrumb flag, here
378 	 * need to do the emit_init_breadcrumb for all the requests.
379 	 */
380 	if (req->engine->emit_init_breadcrumb) {
381 		err = req->engine->emit_init_breadcrumb(req);
382 		if (err) {
383 			gvt_vgpu_err("fail to emit init breadcrumb\n");
384 			return err;
385 		}
386 	}
387 
388 	/* allocate shadow ring buffer */
389 	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
390 	if (IS_ERR(cs)) {
391 		gvt_vgpu_err("fail to alloc size =%ld shadow  ring buffer\n",
392 			workload->rb_len);
393 		return PTR_ERR(cs);
394 	}
395 
396 	shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
397 
398 	/* get shadow ring buffer va */
399 	workload->shadow_ring_buffer_va = cs;
400 
401 	memcpy(cs, shadow_ring_buffer_va,
402 			workload->rb_len);
403 
404 	cs += workload->rb_len / sizeof(u32);
405 	intel_ring_advance(workload->req, cs);
406 
407 	return 0;
408 }
409 
release_shadow_wa_ctx(struct intel_shadow_wa_ctx * wa_ctx)410 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
411 {
412 	if (!wa_ctx->indirect_ctx.obj)
413 		return;
414 
415 	i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
416 	i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
417 	i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
418 	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
419 
420 	wa_ctx->indirect_ctx.obj = NULL;
421 	wa_ctx->indirect_ctx.shadow_va = NULL;
422 }
423 
set_dma_address(struct i915_page_directory * pd,dma_addr_t addr)424 static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr)
425 {
426 	struct scatterlist *sg = pd->pt.base->mm.pages->sgl;
427 
428 	/* This is not a good idea */
429 	sg->dma_address = addr;
430 }
431 
set_context_ppgtt_from_shadow(struct intel_vgpu_workload * workload,struct intel_context * ce)432 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
433 					  struct intel_context *ce)
434 {
435 	struct intel_vgpu_mm *mm = workload->shadow_mm;
436 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
437 	int i = 0;
438 
439 	if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
440 		set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]);
441 	} else {
442 		for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
443 			struct i915_page_directory * const pd =
444 				i915_pd_entry(ppgtt->pd, i);
445 			/* skip now as current i915 ppgtt alloc won't allocate
446 			   top level pdp for non 4-level table, won't impact
447 			   shadow ppgtt. */
448 			if (!pd)
449 				break;
450 
451 			set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]);
452 		}
453 	}
454 }
455 
456 static int
intel_gvt_workload_req_alloc(struct intel_vgpu_workload * workload)457 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
458 {
459 	struct intel_vgpu *vgpu = workload->vgpu;
460 	struct intel_vgpu_submission *s = &vgpu->submission;
461 	struct i915_request *rq;
462 
463 	if (workload->req)
464 		return 0;
465 
466 	rq = i915_request_create(s->shadow[workload->engine->id]);
467 	if (IS_ERR(rq)) {
468 		gvt_vgpu_err("fail to allocate gem request\n");
469 		return PTR_ERR(rq);
470 	}
471 
472 	workload->req = i915_request_get(rq);
473 	return 0;
474 }
475 
476 /**
477  * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
478  * shadow it as well, include ringbuffer,wa_ctx and ctx.
479  * @workload: an abstract entity for each execlist submission.
480  *
481  * This function is called before the workload submitting to i915, to make
482  * sure the content of the workload is valid.
483  */
intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload * workload)484 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
485 {
486 	struct intel_vgpu *vgpu = workload->vgpu;
487 	struct intel_vgpu_submission *s = &vgpu->submission;
488 	int ret;
489 
490 	lockdep_assert_held(&vgpu->vgpu_lock);
491 
492 	if (workload->shadow)
493 		return 0;
494 
495 	if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
496 		shadow_context_descriptor_update(s->shadow[workload->engine->id],
497 						 workload);
498 
499 	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
500 	if (ret)
501 		return ret;
502 
503 	if (workload->engine->id == RCS0 &&
504 	    workload->wa_ctx.indirect_ctx.size) {
505 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
506 		if (ret)
507 			goto err_shadow;
508 	}
509 
510 	workload->shadow = true;
511 	return 0;
512 
513 err_shadow:
514 	release_shadow_wa_ctx(&workload->wa_ctx);
515 	return ret;
516 }
517 
518 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
519 
prepare_shadow_batch_buffer(struct intel_vgpu_workload * workload)520 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
521 {
522 	struct intel_gvt *gvt = workload->vgpu->gvt;
523 	const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
524 	struct intel_vgpu_shadow_bb *bb;
525 	struct i915_gem_ww_ctx ww;
526 	int ret;
527 
528 	list_for_each_entry(bb, &workload->shadow_bb, list) {
529 		/* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
530 		 * is only updated into ring_scan_buffer, not real ring address
531 		 * allocated in later copy_workload_to_ring_buffer. pls be noted
532 		 * shadow_ring_buffer_va is now pointed to real ring buffer va
533 		 * in copy_workload_to_ring_buffer.
534 		 */
535 
536 		if (bb->bb_offset)
537 			bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
538 				+ bb->bb_offset;
539 
540 		/*
541 		 * For non-priv bb, scan&shadow is only for
542 		 * debugging purpose, so the content of shadow bb
543 		 * is the same as original bb. Therefore,
544 		 * here, rather than switch to shadow bb's gma
545 		 * address, we directly use original batch buffer's
546 		 * gma address, and send original bb to hardware
547 		 * directly
548 		 */
549 		if (!bb->ppgtt) {
550 			i915_gem_ww_ctx_init(&ww, false);
551 retry:
552 			i915_gem_object_lock(bb->obj, &ww);
553 
554 			bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
555 							      NULL, 0, 0, 0);
556 			if (IS_ERR(bb->vma)) {
557 				ret = PTR_ERR(bb->vma);
558 				if (ret == -EDEADLK) {
559 					ret = i915_gem_ww_ctx_backoff(&ww);
560 					if (!ret)
561 						goto retry;
562 				}
563 				goto err;
564 			}
565 
566 			/* relocate shadow batch buffer */
567 			bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
568 			if (gmadr_bytes == 8)
569 				bb->bb_start_cmd_va[2] = 0;
570 
571 			ret = i915_vma_move_to_active(bb->vma,
572 						      workload->req,
573 						      0);
574 			if (ret)
575 				goto err;
576 
577 			/* No one is going to touch shadow bb from now on. */
578 			i915_gem_object_flush_map(bb->obj);
579 			i915_gem_ww_ctx_fini(&ww);
580 		}
581 	}
582 	return 0;
583 err:
584 	i915_gem_ww_ctx_fini(&ww);
585 	release_shadow_batch_buffer(workload);
586 	return ret;
587 }
588 
update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx * wa_ctx)589 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
590 {
591 	struct intel_vgpu_workload *workload =
592 		container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
593 	struct i915_request *rq = workload->req;
594 	struct execlist_ring_context *shadow_ring_context =
595 		(struct execlist_ring_context *)rq->context->lrc_reg_state;
596 
597 	shadow_ring_context->bb_per_ctx_ptr.val =
598 		(shadow_ring_context->bb_per_ctx_ptr.val &
599 		(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
600 	shadow_ring_context->rcs_indirect_ctx.val =
601 		(shadow_ring_context->rcs_indirect_ctx.val &
602 		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
603 }
604 
prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx * wa_ctx)605 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
606 {
607 	struct i915_vma *vma;
608 	unsigned char *per_ctx_va =
609 		(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
610 		wa_ctx->indirect_ctx.size;
611 	struct i915_gem_ww_ctx ww;
612 	int ret;
613 
614 	if (wa_ctx->indirect_ctx.size == 0)
615 		return 0;
616 
617 	i915_gem_ww_ctx_init(&ww, false);
618 retry:
619 	i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);
620 
621 	vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
622 					  0, CACHELINE_BYTES, 0);
623 	if (IS_ERR(vma)) {
624 		ret = PTR_ERR(vma);
625 		if (ret == -EDEADLK) {
626 			ret = i915_gem_ww_ctx_backoff(&ww);
627 			if (!ret)
628 				goto retry;
629 		}
630 		return ret;
631 	}
632 
633 	i915_gem_ww_ctx_fini(&ww);
634 
635 	/* FIXME: we are not tracking our pinned VMA leaving it
636 	 * up to the core to fix up the stray pin_count upon
637 	 * free.
638 	 */
639 
640 	wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
641 
642 	wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
643 	memset(per_ctx_va, 0, CACHELINE_BYTES);
644 
645 	update_wa_ctx_2_shadow_ctx(wa_ctx);
646 	return 0;
647 }
648 
update_vreg_in_ctx(struct intel_vgpu_workload * workload)649 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
650 {
651 	vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
652 		workload->rb_start;
653 }
654 
release_shadow_batch_buffer(struct intel_vgpu_workload * workload)655 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
656 {
657 	struct intel_vgpu_shadow_bb *bb, *pos;
658 
659 	if (list_empty(&workload->shadow_bb))
660 		return;
661 
662 	bb = list_first_entry(&workload->shadow_bb,
663 			struct intel_vgpu_shadow_bb, list);
664 
665 	list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
666 		if (bb->obj) {
667 			i915_gem_object_lock(bb->obj, NULL);
668 			if (bb->va && !IS_ERR(bb->va))
669 				i915_gem_object_unpin_map(bb->obj);
670 
671 			if (bb->vma && !IS_ERR(bb->vma))
672 				i915_vma_unpin(bb->vma);
673 
674 			i915_gem_object_unlock(bb->obj);
675 			i915_gem_object_put(bb->obj);
676 		}
677 		list_del(&bb->list);
678 		kfree(bb);
679 	}
680 }
681 
682 static int
intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload * workload)683 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
684 {
685 	struct intel_vgpu *vgpu = workload->vgpu;
686 	struct intel_vgpu_mm *m;
687 	int ret = 0;
688 
689 	ret = intel_vgpu_pin_mm(workload->shadow_mm);
690 	if (ret) {
691 		gvt_vgpu_err("fail to vgpu pin mm\n");
692 		return ret;
693 	}
694 
695 	if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
696 	    !workload->shadow_mm->ppgtt_mm.shadowed) {
697 		intel_vgpu_unpin_mm(workload->shadow_mm);
698 		gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
699 		return -EINVAL;
700 	}
701 
702 	if (!list_empty(&workload->lri_shadow_mm)) {
703 		list_for_each_entry(m, &workload->lri_shadow_mm,
704 				    ppgtt_mm.link) {
705 			ret = intel_vgpu_pin_mm(m);
706 			if (ret) {
707 				list_for_each_entry_from_reverse(m,
708 								 &workload->lri_shadow_mm,
709 								 ppgtt_mm.link)
710 					intel_vgpu_unpin_mm(m);
711 				gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
712 				break;
713 			}
714 		}
715 	}
716 
717 	if (ret)
718 		intel_vgpu_unpin_mm(workload->shadow_mm);
719 
720 	return ret;
721 }
722 
723 static void
intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload * workload)724 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
725 {
726 	struct intel_vgpu_mm *m;
727 
728 	if (!list_empty(&workload->lri_shadow_mm)) {
729 		list_for_each_entry(m, &workload->lri_shadow_mm,
730 				    ppgtt_mm.link)
731 			intel_vgpu_unpin_mm(m);
732 	}
733 	intel_vgpu_unpin_mm(workload->shadow_mm);
734 }
735 
prepare_workload(struct intel_vgpu_workload * workload)736 static int prepare_workload(struct intel_vgpu_workload *workload)
737 {
738 	struct intel_vgpu *vgpu = workload->vgpu;
739 	struct intel_vgpu_submission *s = &vgpu->submission;
740 	int ret = 0;
741 
742 	ret = intel_vgpu_shadow_mm_pin(workload);
743 	if (ret) {
744 		gvt_vgpu_err("fail to pin shadow mm\n");
745 		return ret;
746 	}
747 
748 	update_shadow_pdps(workload);
749 
750 	set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
751 
752 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
753 	if (ret) {
754 		gvt_vgpu_err("fail to vgpu sync oos pages\n");
755 		goto err_unpin_mm;
756 	}
757 
758 	ret = intel_vgpu_flush_post_shadow(workload->vgpu);
759 	if (ret) {
760 		gvt_vgpu_err("fail to flush post shadow\n");
761 		goto err_unpin_mm;
762 	}
763 
764 	ret = copy_workload_to_ring_buffer(workload);
765 	if (ret) {
766 		gvt_vgpu_err("fail to generate request\n");
767 		goto err_unpin_mm;
768 	}
769 
770 	ret = prepare_shadow_batch_buffer(workload);
771 	if (ret) {
772 		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
773 		goto err_unpin_mm;
774 	}
775 
776 	ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
777 	if (ret) {
778 		gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
779 		goto err_shadow_batch;
780 	}
781 
782 	if (workload->prepare) {
783 		ret = workload->prepare(workload);
784 		if (ret)
785 			goto err_shadow_wa_ctx;
786 	}
787 
788 	return 0;
789 err_shadow_wa_ctx:
790 	release_shadow_wa_ctx(&workload->wa_ctx);
791 err_shadow_batch:
792 	release_shadow_batch_buffer(workload);
793 err_unpin_mm:
794 	intel_vgpu_shadow_mm_unpin(workload);
795 	return ret;
796 }
797 
dispatch_workload(struct intel_vgpu_workload * workload)798 static int dispatch_workload(struct intel_vgpu_workload *workload)
799 {
800 	struct intel_vgpu *vgpu = workload->vgpu;
801 	struct i915_request *rq;
802 	int ret;
803 
804 	gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
805 		      workload->engine->name, workload);
806 
807 	mutex_lock(&vgpu->vgpu_lock);
808 
809 	ret = intel_gvt_workload_req_alloc(workload);
810 	if (ret)
811 		goto err_req;
812 
813 	ret = intel_gvt_scan_and_shadow_workload(workload);
814 	if (ret)
815 		goto out;
816 
817 	ret = populate_shadow_context(workload);
818 	if (ret) {
819 		release_shadow_wa_ctx(&workload->wa_ctx);
820 		goto out;
821 	}
822 
823 	ret = prepare_workload(workload);
824 out:
825 	if (ret) {
826 		/* We might still need to add request with
827 		 * clean ctx to retire it properly..
828 		 */
829 		rq = fetch_and_zero(&workload->req);
830 		i915_request_put(rq);
831 	}
832 
833 	if (!IS_ERR_OR_NULL(workload->req)) {
834 		gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
835 			      workload->engine->name, workload->req);
836 		i915_request_add(workload->req);
837 		workload->dispatched = true;
838 	}
839 err_req:
840 	if (ret)
841 		workload->status = ret;
842 	mutex_unlock(&vgpu->vgpu_lock);
843 	return ret;
844 }
845 
846 static struct intel_vgpu_workload *
pick_next_workload(struct intel_gvt * gvt,struct intel_engine_cs * engine)847 pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
848 {
849 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
850 	struct intel_vgpu_workload *workload = NULL;
851 
852 	mutex_lock(&gvt->sched_lock);
853 
854 	/*
855 	 * no current vgpu / will be scheduled out / no workload
856 	 * bail out
857 	 */
858 	if (!scheduler->current_vgpu) {
859 		gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
860 		goto out;
861 	}
862 
863 	if (scheduler->need_reschedule) {
864 		gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
865 		goto out;
866 	}
867 
868 	if (!scheduler->current_vgpu->active ||
869 	    list_empty(workload_q_head(scheduler->current_vgpu, engine)))
870 		goto out;
871 
872 	/*
873 	 * still have current workload, maybe the workload disptacher
874 	 * fail to submit it for some reason, resubmit it.
875 	 */
876 	if (scheduler->current_workload[engine->id]) {
877 		workload = scheduler->current_workload[engine->id];
878 		gvt_dbg_sched("ring %s still have current workload %p\n",
879 			      engine->name, workload);
880 		goto out;
881 	}
882 
883 	/*
884 	 * pick a workload as current workload
885 	 * once current workload is set, schedule policy routines
886 	 * will wait the current workload is finished when trying to
887 	 * schedule out a vgpu.
888 	 */
889 	scheduler->current_workload[engine->id] =
890 		list_first_entry(workload_q_head(scheduler->current_vgpu,
891 						 engine),
892 				 struct intel_vgpu_workload, list);
893 
894 	workload = scheduler->current_workload[engine->id];
895 
896 	gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
897 
898 	atomic_inc(&workload->vgpu->submission.running_workload_num);
899 out:
900 	mutex_unlock(&gvt->sched_lock);
901 	return workload;
902 }
903 
update_guest_pdps(struct intel_vgpu * vgpu,u64 ring_context_gpa,u32 pdp[8])904 static void update_guest_pdps(struct intel_vgpu *vgpu,
905 			      u64 ring_context_gpa, u32 pdp[8])
906 {
907 	u64 gpa;
908 	int i;
909 
910 	gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
911 
912 	for (i = 0; i < 8; i++)
913 		intel_gvt_hypervisor_write_gpa(vgpu,
914 				gpa + i * 8, &pdp[7 - i], 4);
915 }
916 
917 static __maybe_unused bool
check_shadow_context_ppgtt(struct execlist_ring_context * c,struct intel_vgpu_mm * m)918 check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
919 {
920 	if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
921 		u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
922 
923 		if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
924 			gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
925 			return false;
926 		}
927 		return true;
928 	} else {
929 		/* see comment in LRI handler in cmd_parser.c */
930 		gvt_dbg_mm("invalid shadow mm type\n");
931 		return false;
932 	}
933 }
934 
update_guest_context(struct intel_vgpu_workload * workload)935 static void update_guest_context(struct intel_vgpu_workload *workload)
936 {
937 	struct i915_request *rq = workload->req;
938 	struct intel_vgpu *vgpu = workload->vgpu;
939 	struct execlist_ring_context *shadow_ring_context;
940 	struct intel_context *ctx = workload->req->context;
941 	void *context_base;
942 	void *src;
943 	unsigned long context_gpa, context_page_num;
944 	unsigned long gpa_base; /* first gpa of consecutive GPAs */
945 	unsigned long gpa_size; /* size of consecutive GPAs*/
946 	int i;
947 	u32 ring_base;
948 	u32 head, tail;
949 	u16 wrap_count;
950 
951 	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
952 		      workload->ctx_desc.lrca);
953 
954 	GEM_BUG_ON(!intel_context_is_pinned(ctx));
955 
956 	head = workload->rb_head;
957 	tail = workload->rb_tail;
958 	wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
959 
960 	if (tail < head) {
961 		if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
962 			wrap_count = 0;
963 		else
964 			wrap_count += 1;
965 	}
966 
967 	head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
968 
969 	ring_base = rq->engine->mmio_base;
970 	vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
971 	vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
972 
973 	context_page_num = rq->engine->context_size;
974 	context_page_num = context_page_num >> PAGE_SHIFT;
975 
976 	if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
977 		context_page_num = 19;
978 
979 	context_base = (void *) ctx->lrc_reg_state -
980 			(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
981 
982 	/* find consecutive GPAs from gma until the first inconsecutive GPA.
983 	 * write to the consecutive GPAs from src virtual address
984 	 */
985 	gpa_size = 0;
986 	for (i = 2; i < context_page_num; i++) {
987 		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
988 				(u32)((workload->ctx_desc.lrca + i) <<
989 					I915_GTT_PAGE_SHIFT));
990 		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
991 			gvt_vgpu_err("invalid guest context descriptor\n");
992 			return;
993 		}
994 
995 		if (gpa_size == 0) {
996 			gpa_base = context_gpa;
997 			src = context_base + (i << I915_GTT_PAGE_SHIFT);
998 		} else if (context_gpa != gpa_base + gpa_size)
999 			goto write;
1000 
1001 		gpa_size += I915_GTT_PAGE_SIZE;
1002 
1003 		if (i == context_page_num - 1)
1004 			goto write;
1005 
1006 		continue;
1007 
1008 write:
1009 		intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
1010 		gpa_base = context_gpa;
1011 		gpa_size = I915_GTT_PAGE_SIZE;
1012 		src = context_base + (i << I915_GTT_PAGE_SHIFT);
1013 	}
1014 
1015 	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
1016 		RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
1017 
1018 	shadow_ring_context = (void *) ctx->lrc_reg_state;
1019 
1020 	if (!list_empty(&workload->lri_shadow_mm)) {
1021 		struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
1022 							  struct intel_vgpu_mm,
1023 							  ppgtt_mm.link);
1024 		GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
1025 		update_guest_pdps(vgpu, workload->ring_context_gpa,
1026 				  (void *)m->ppgtt_mm.guest_pdps);
1027 	}
1028 
1029 #define COPY_REG(name) \
1030 	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
1031 		RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
1032 
1033 	COPY_REG(ctx_ctrl);
1034 	COPY_REG(ctx_timestamp);
1035 
1036 #undef COPY_REG
1037 
1038 	intel_gvt_hypervisor_write_gpa(vgpu,
1039 			workload->ring_context_gpa +
1040 			sizeof(*shadow_ring_context),
1041 			(void *)shadow_ring_context +
1042 			sizeof(*shadow_ring_context),
1043 			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
1044 }
1045 
intel_vgpu_clean_workloads(struct intel_vgpu * vgpu,intel_engine_mask_t engine_mask)1046 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
1047 				intel_engine_mask_t engine_mask)
1048 {
1049 	struct intel_vgpu_submission *s = &vgpu->submission;
1050 	struct intel_engine_cs *engine;
1051 	struct intel_vgpu_workload *pos, *n;
1052 	intel_engine_mask_t tmp;
1053 
1054 	/* free the unsubmited workloads in the queues. */
1055 	for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
1056 		list_for_each_entry_safe(pos, n,
1057 			&s->workload_q_head[engine->id], list) {
1058 			list_del_init(&pos->list);
1059 			intel_vgpu_destroy_workload(pos);
1060 		}
1061 		clear_bit(engine->id, s->shadow_ctx_desc_updated);
1062 	}
1063 }
1064 
complete_current_workload(struct intel_gvt * gvt,int ring_id)1065 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
1066 {
1067 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1068 	struct intel_vgpu_workload *workload =
1069 		scheduler->current_workload[ring_id];
1070 	struct intel_vgpu *vgpu = workload->vgpu;
1071 	struct intel_vgpu_submission *s = &vgpu->submission;
1072 	struct i915_request *rq = workload->req;
1073 	int event;
1074 
1075 	mutex_lock(&vgpu->vgpu_lock);
1076 	mutex_lock(&gvt->sched_lock);
1077 
1078 	/* For the workload w/ request, needs to wait for the context
1079 	 * switch to make sure request is completed.
1080 	 * For the workload w/o request, directly complete the workload.
1081 	 */
1082 	if (rq) {
1083 		wait_event(workload->shadow_ctx_status_wq,
1084 			   !atomic_read(&workload->shadow_ctx_active));
1085 
1086 		/* If this request caused GPU hang, req->fence.error will
1087 		 * be set to -EIO. Use -EIO to set workload status so
1088 		 * that when this request caused GPU hang, didn't trigger
1089 		 * context switch interrupt to guest.
1090 		 */
1091 		if (likely(workload->status == -EINPROGRESS)) {
1092 			if (workload->req->fence.error == -EIO)
1093 				workload->status = -EIO;
1094 			else
1095 				workload->status = 0;
1096 		}
1097 
1098 		if (!workload->status &&
1099 		    !(vgpu->resetting_eng & BIT(ring_id))) {
1100 			update_guest_context(workload);
1101 
1102 			for_each_set_bit(event, workload->pending_events,
1103 					 INTEL_GVT_EVENT_MAX)
1104 				intel_vgpu_trigger_virtual_event(vgpu, event);
1105 		}
1106 
1107 		i915_request_put(fetch_and_zero(&workload->req));
1108 	}
1109 
1110 	gvt_dbg_sched("ring id %d complete workload %p status %d\n",
1111 			ring_id, workload, workload->status);
1112 
1113 	scheduler->current_workload[ring_id] = NULL;
1114 
1115 	list_del_init(&workload->list);
1116 
1117 	if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
1118 		/* if workload->status is not successful means HW GPU
1119 		 * has occurred GPU hang or something wrong with i915/GVT,
1120 		 * and GVT won't inject context switch interrupt to guest.
1121 		 * So this error is a vGPU hang actually to the guest.
1122 		 * According to this we should emunlate a vGPU hang. If
1123 		 * there are pending workloads which are already submitted
1124 		 * from guest, we should clean them up like HW GPU does.
1125 		 *
1126 		 * if it is in middle of engine resetting, the pending
1127 		 * workloads won't be submitted to HW GPU and will be
1128 		 * cleaned up during the resetting process later, so doing
1129 		 * the workload clean up here doesn't have any impact.
1130 		 **/
1131 		intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
1132 	}
1133 
1134 	workload->complete(workload);
1135 
1136 	intel_vgpu_shadow_mm_unpin(workload);
1137 	intel_vgpu_destroy_workload(workload);
1138 
1139 	atomic_dec(&s->running_workload_num);
1140 	wake_up(&scheduler->workload_complete_wq);
1141 
1142 	if (gvt->scheduler.need_reschedule)
1143 		intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
1144 
1145 	mutex_unlock(&gvt->sched_lock);
1146 	mutex_unlock(&vgpu->vgpu_lock);
1147 }
1148 
workload_thread(void * arg)1149 static int workload_thread(void *arg)
1150 {
1151 	struct intel_engine_cs *engine = arg;
1152 	const bool need_force_wake = GRAPHICS_VER(engine->i915) >= 9;
1153 	struct intel_gvt *gvt = engine->i915->gvt;
1154 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1155 	struct intel_vgpu_workload *workload = NULL;
1156 	struct intel_vgpu *vgpu = NULL;
1157 	int ret;
1158 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1159 
1160 	gvt_dbg_core("workload thread for ring %s started\n", engine->name);
1161 
1162 	while (!kthread_should_stop()) {
1163 		intel_wakeref_t wakeref;
1164 
1165 		add_wait_queue(&scheduler->waitq[engine->id], &wait);
1166 		do {
1167 			workload = pick_next_workload(gvt, engine);
1168 			if (workload)
1169 				break;
1170 			wait_woken(&wait, TASK_INTERRUPTIBLE,
1171 				   MAX_SCHEDULE_TIMEOUT);
1172 		} while (!kthread_should_stop());
1173 		remove_wait_queue(&scheduler->waitq[engine->id], &wait);
1174 
1175 		if (!workload)
1176 			break;
1177 
1178 		gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
1179 			      engine->name, workload,
1180 			      workload->vgpu->id);
1181 
1182 		wakeref = intel_runtime_pm_get(engine->uncore->rpm);
1183 
1184 		gvt_dbg_sched("ring %s will dispatch workload %p\n",
1185 			      engine->name, workload);
1186 
1187 		if (need_force_wake)
1188 			intel_uncore_forcewake_get(engine->uncore,
1189 						   FORCEWAKE_ALL);
1190 		/*
1191 		 * Update the vReg of the vGPU which submitted this
1192 		 * workload. The vGPU may use these registers for checking
1193 		 * the context state. The value comes from GPU commands
1194 		 * in this workload.
1195 		 */
1196 		update_vreg_in_ctx(workload);
1197 
1198 		ret = dispatch_workload(workload);
1199 
1200 		if (ret) {
1201 			vgpu = workload->vgpu;
1202 			gvt_vgpu_err("fail to dispatch workload, skip\n");
1203 			goto complete;
1204 		}
1205 
1206 		gvt_dbg_sched("ring %s wait workload %p\n",
1207 			      engine->name, workload);
1208 		i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1209 
1210 complete:
1211 		gvt_dbg_sched("will complete workload %p, status: %d\n",
1212 			      workload, workload->status);
1213 
1214 		complete_current_workload(gvt, engine->id);
1215 
1216 		if (need_force_wake)
1217 			intel_uncore_forcewake_put(engine->uncore,
1218 						   FORCEWAKE_ALL);
1219 
1220 		intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1221 		if (ret && (vgpu_is_vm_unhealthy(ret)))
1222 			enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1223 	}
1224 	return 0;
1225 }
1226 
intel_gvt_wait_vgpu_idle(struct intel_vgpu * vgpu)1227 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1228 {
1229 	struct intel_vgpu_submission *s = &vgpu->submission;
1230 	struct intel_gvt *gvt = vgpu->gvt;
1231 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1232 
1233 	if (atomic_read(&s->running_workload_num)) {
1234 		gvt_dbg_sched("wait vgpu idle\n");
1235 
1236 		wait_event(scheduler->workload_complete_wq,
1237 				!atomic_read(&s->running_workload_num));
1238 	}
1239 }
1240 
intel_gvt_clean_workload_scheduler(struct intel_gvt * gvt)1241 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1242 {
1243 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1244 	struct intel_engine_cs *engine;
1245 	enum intel_engine_id i;
1246 
1247 	gvt_dbg_core("clean workload scheduler\n");
1248 
1249 	for_each_engine(engine, gvt->gt, i) {
1250 		atomic_notifier_chain_unregister(
1251 					&engine->context_status_notifier,
1252 					&gvt->shadow_ctx_notifier_block[i]);
1253 		kthread_stop(scheduler->thread[i]);
1254 	}
1255 }
1256 
intel_gvt_init_workload_scheduler(struct intel_gvt * gvt)1257 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1258 {
1259 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1260 	struct intel_engine_cs *engine;
1261 	enum intel_engine_id i;
1262 	int ret;
1263 
1264 	gvt_dbg_core("init workload scheduler\n");
1265 
1266 	init_waitqueue_head(&scheduler->workload_complete_wq);
1267 
1268 	for_each_engine(engine, gvt->gt, i) {
1269 		init_waitqueue_head(&scheduler->waitq[i]);
1270 
1271 		scheduler->thread[i] = kthread_run(workload_thread, engine,
1272 						   "gvt:%s", engine->name);
1273 		if (IS_ERR(scheduler->thread[i])) {
1274 			gvt_err("fail to create workload thread\n");
1275 			ret = PTR_ERR(scheduler->thread[i]);
1276 			goto err;
1277 		}
1278 
1279 		gvt->shadow_ctx_notifier_block[i].notifier_call =
1280 					shadow_context_status_change;
1281 		atomic_notifier_chain_register(&engine->context_status_notifier,
1282 					&gvt->shadow_ctx_notifier_block[i]);
1283 	}
1284 
1285 	return 0;
1286 
1287 err:
1288 	intel_gvt_clean_workload_scheduler(gvt);
1289 	return ret;
1290 }
1291 
1292 static void
i915_context_ppgtt_root_restore(struct intel_vgpu_submission * s,struct i915_ppgtt * ppgtt)1293 i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
1294 				struct i915_ppgtt *ppgtt)
1295 {
1296 	int i;
1297 
1298 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
1299 		set_dma_address(ppgtt->pd, s->i915_context_pml4);
1300 	} else {
1301 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1302 			struct i915_page_directory * const pd =
1303 				i915_pd_entry(ppgtt->pd, i);
1304 
1305 			set_dma_address(pd, s->i915_context_pdps[i]);
1306 		}
1307 	}
1308 }
1309 
1310 /**
1311  * intel_vgpu_clean_submission - free submission-related resource for vGPU
1312  * @vgpu: a vGPU
1313  *
1314  * This function is called when a vGPU is being destroyed.
1315  *
1316  */
intel_vgpu_clean_submission(struct intel_vgpu * vgpu)1317 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1318 {
1319 	struct intel_vgpu_submission *s = &vgpu->submission;
1320 	struct intel_engine_cs *engine;
1321 	enum intel_engine_id id;
1322 
1323 	intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1324 
1325 	i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
1326 	for_each_engine(engine, vgpu->gvt->gt, id)
1327 		intel_context_put(s->shadow[id]);
1328 
1329 	kmem_cache_destroy(s->workloads);
1330 }
1331 
1332 
1333 /**
1334  * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1335  * @vgpu: a vGPU
1336  * @engine_mask: engines expected to be reset
1337  *
1338  * This function is called when a vGPU is being destroyed.
1339  *
1340  */
intel_vgpu_reset_submission(struct intel_vgpu * vgpu,intel_engine_mask_t engine_mask)1341 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1342 				 intel_engine_mask_t engine_mask)
1343 {
1344 	struct intel_vgpu_submission *s = &vgpu->submission;
1345 
1346 	if (!s->active)
1347 		return;
1348 
1349 	intel_vgpu_clean_workloads(vgpu, engine_mask);
1350 	s->ops->reset(vgpu, engine_mask);
1351 }
1352 
1353 static void
i915_context_ppgtt_root_save(struct intel_vgpu_submission * s,struct i915_ppgtt * ppgtt)1354 i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
1355 			     struct i915_ppgtt *ppgtt)
1356 {
1357 	int i;
1358 
1359 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
1360 		s->i915_context_pml4 = px_dma(ppgtt->pd);
1361 	} else {
1362 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1363 			struct i915_page_directory * const pd =
1364 				i915_pd_entry(ppgtt->pd, i);
1365 
1366 			s->i915_context_pdps[i] = px_dma(pd);
1367 		}
1368 	}
1369 }
1370 
1371 /**
1372  * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1373  * @vgpu: a vGPU
1374  *
1375  * This function is called when a vGPU is being created.
1376  *
1377  * Returns:
1378  * Zero on success, negative error code if failed.
1379  *
1380  */
intel_vgpu_setup_submission(struct intel_vgpu * vgpu)1381 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1382 {
1383 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1384 	struct intel_vgpu_submission *s = &vgpu->submission;
1385 	struct intel_engine_cs *engine;
1386 	struct i915_ppgtt *ppgtt;
1387 	enum intel_engine_id i;
1388 	int ret;
1389 
1390 	ppgtt = i915_ppgtt_create(&i915->gt);
1391 	if (IS_ERR(ppgtt))
1392 		return PTR_ERR(ppgtt);
1393 
1394 	i915_context_ppgtt_root_save(s, ppgtt);
1395 
1396 	for_each_engine(engine, vgpu->gvt->gt, i) {
1397 		struct intel_context *ce;
1398 
1399 		INIT_LIST_HEAD(&s->workload_q_head[i]);
1400 		s->shadow[i] = ERR_PTR(-EINVAL);
1401 
1402 		ce = intel_context_create(engine);
1403 		if (IS_ERR(ce)) {
1404 			ret = PTR_ERR(ce);
1405 			goto out_shadow_ctx;
1406 		}
1407 
1408 		i915_vm_put(ce->vm);
1409 		ce->vm = i915_vm_get(&ppgtt->vm);
1410 		intel_context_set_single_submission(ce);
1411 
1412 		/* Max ring buffer size */
1413 		if (!intel_uc_wants_guc_submission(&engine->gt->uc))
1414 			ce->ring_size = SZ_2M;
1415 
1416 		s->shadow[i] = ce;
1417 	}
1418 
1419 	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1420 
1421 	s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1422 						  sizeof(struct intel_vgpu_workload), 0,
1423 						  SLAB_HWCACHE_ALIGN,
1424 						  offsetof(struct intel_vgpu_workload, rb_tail),
1425 						  sizeof_field(struct intel_vgpu_workload, rb_tail),
1426 						  NULL);
1427 
1428 	if (!s->workloads) {
1429 		ret = -ENOMEM;
1430 		goto out_shadow_ctx;
1431 	}
1432 
1433 	atomic_set(&s->running_workload_num, 0);
1434 	bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
1435 
1436 	memset(s->last_ctx, 0, sizeof(s->last_ctx));
1437 
1438 	i915_vm_put(&ppgtt->vm);
1439 	return 0;
1440 
1441 out_shadow_ctx:
1442 	i915_context_ppgtt_root_restore(s, ppgtt);
1443 	for_each_engine(engine, vgpu->gvt->gt, i) {
1444 		if (IS_ERR(s->shadow[i]))
1445 			break;
1446 
1447 		intel_context_put(s->shadow[i]);
1448 	}
1449 	i915_vm_put(&ppgtt->vm);
1450 	return ret;
1451 }
1452 
1453 /**
1454  * intel_vgpu_select_submission_ops - select virtual submission interface
1455  * @vgpu: a vGPU
1456  * @engine_mask: either ALL_ENGINES or target engine mask
1457  * @interface: expected vGPU virtual submission interface
1458  *
1459  * This function is called when guest configures submission interface.
1460  *
1461  * Returns:
1462  * Zero on success, negative error code if failed.
1463  *
1464  */
intel_vgpu_select_submission_ops(struct intel_vgpu * vgpu,intel_engine_mask_t engine_mask,unsigned int interface)1465 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1466 				     intel_engine_mask_t engine_mask,
1467 				     unsigned int interface)
1468 {
1469 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1470 	struct intel_vgpu_submission *s = &vgpu->submission;
1471 	const struct intel_vgpu_submission_ops *ops[] = {
1472 		[INTEL_VGPU_EXECLIST_SUBMISSION] =
1473 			&intel_vgpu_execlist_submission_ops,
1474 	};
1475 	int ret;
1476 
1477 	if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
1478 		return -EINVAL;
1479 
1480 	if (drm_WARN_ON(&i915->drm,
1481 			interface == 0 && engine_mask != ALL_ENGINES))
1482 		return -EINVAL;
1483 
1484 	if (s->active)
1485 		s->ops->clean(vgpu, engine_mask);
1486 
1487 	if (interface == 0) {
1488 		s->ops = NULL;
1489 		s->virtual_submission_interface = 0;
1490 		s->active = false;
1491 		gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1492 		return 0;
1493 	}
1494 
1495 	ret = ops[interface]->init(vgpu, engine_mask);
1496 	if (ret)
1497 		return ret;
1498 
1499 	s->ops = ops[interface];
1500 	s->virtual_submission_interface = interface;
1501 	s->active = true;
1502 
1503 	gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1504 			vgpu->id, s->ops->name);
1505 
1506 	return 0;
1507 }
1508 
1509 /**
1510  * intel_vgpu_destroy_workload - destroy a vGPU workload
1511  * @workload: workload to destroy
1512  *
1513  * This function is called when destroy a vGPU workload.
1514  *
1515  */
intel_vgpu_destroy_workload(struct intel_vgpu_workload * workload)1516 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1517 {
1518 	struct intel_vgpu_submission *s = &workload->vgpu->submission;
1519 
1520 	intel_context_unpin(s->shadow[workload->engine->id]);
1521 	release_shadow_batch_buffer(workload);
1522 	release_shadow_wa_ctx(&workload->wa_ctx);
1523 
1524 	if (!list_empty(&workload->lri_shadow_mm)) {
1525 		struct intel_vgpu_mm *m, *mm;
1526 		list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
1527 					 ppgtt_mm.link) {
1528 			list_del(&m->ppgtt_mm.link);
1529 			intel_vgpu_mm_put(m);
1530 		}
1531 	}
1532 
1533 	GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
1534 	if (workload->shadow_mm)
1535 		intel_vgpu_mm_put(workload->shadow_mm);
1536 
1537 	kmem_cache_free(s->workloads, workload);
1538 }
1539 
1540 static struct intel_vgpu_workload *
alloc_workload(struct intel_vgpu * vgpu)1541 alloc_workload(struct intel_vgpu *vgpu)
1542 {
1543 	struct intel_vgpu_submission *s = &vgpu->submission;
1544 	struct intel_vgpu_workload *workload;
1545 
1546 	workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1547 	if (!workload)
1548 		return ERR_PTR(-ENOMEM);
1549 
1550 	INIT_LIST_HEAD(&workload->list);
1551 	INIT_LIST_HEAD(&workload->shadow_bb);
1552 	INIT_LIST_HEAD(&workload->lri_shadow_mm);
1553 
1554 	init_waitqueue_head(&workload->shadow_ctx_status_wq);
1555 	atomic_set(&workload->shadow_ctx_active, 0);
1556 
1557 	workload->status = -EINPROGRESS;
1558 	workload->vgpu = vgpu;
1559 
1560 	return workload;
1561 }
1562 
1563 #define RING_CTX_OFF(x) \
1564 	offsetof(struct execlist_ring_context, x)
1565 
read_guest_pdps(struct intel_vgpu * vgpu,u64 ring_context_gpa,u32 pdp[8])1566 static void read_guest_pdps(struct intel_vgpu *vgpu,
1567 		u64 ring_context_gpa, u32 pdp[8])
1568 {
1569 	u64 gpa;
1570 	int i;
1571 
1572 	gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
1573 
1574 	for (i = 0; i < 8; i++)
1575 		intel_gvt_hypervisor_read_gpa(vgpu,
1576 				gpa + i * 8, &pdp[7 - i], 4);
1577 }
1578 
prepare_mm(struct intel_vgpu_workload * workload)1579 static int prepare_mm(struct intel_vgpu_workload *workload)
1580 {
1581 	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1582 	struct intel_vgpu_mm *mm;
1583 	struct intel_vgpu *vgpu = workload->vgpu;
1584 	enum intel_gvt_gtt_type root_entry_type;
1585 	u64 pdps[GVT_RING_CTX_NR_PDPS];
1586 
1587 	switch (desc->addressing_mode) {
1588 	case 1: /* legacy 32-bit */
1589 		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1590 		break;
1591 	case 3: /* legacy 64-bit */
1592 		root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1593 		break;
1594 	default:
1595 		gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1596 		return -EINVAL;
1597 	}
1598 
1599 	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1600 
1601 	mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1602 	if (IS_ERR(mm))
1603 		return PTR_ERR(mm);
1604 
1605 	workload->shadow_mm = mm;
1606 	return 0;
1607 }
1608 
1609 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1610 		((a)->lrca == (b)->lrca))
1611 
1612 /**
1613  * intel_vgpu_create_workload - create a vGPU workload
1614  * @vgpu: a vGPU
1615  * @engine: the engine
1616  * @desc: a guest context descriptor
1617  *
1618  * This function is called when creating a vGPU workload.
1619  *
1620  * Returns:
1621  * struct intel_vgpu_workload * on success, negative error code in
1622  * pointer if failed.
1623  *
1624  */
1625 struct intel_vgpu_workload *
intel_vgpu_create_workload(struct intel_vgpu * vgpu,const struct intel_engine_cs * engine,struct execlist_ctx_descriptor_format * desc)1626 intel_vgpu_create_workload(struct intel_vgpu *vgpu,
1627 			   const struct intel_engine_cs *engine,
1628 			   struct execlist_ctx_descriptor_format *desc)
1629 {
1630 	struct intel_vgpu_submission *s = &vgpu->submission;
1631 	struct list_head *q = workload_q_head(vgpu, engine);
1632 	struct intel_vgpu_workload *last_workload = NULL;
1633 	struct intel_vgpu_workload *workload = NULL;
1634 	u64 ring_context_gpa;
1635 	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1636 	u32 guest_head;
1637 	int ret;
1638 
1639 	ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
1640 			(u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1641 	if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1642 		gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1643 		return ERR_PTR(-EINVAL);
1644 	}
1645 
1646 	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1647 			RING_CTX_OFF(ring_header.val), &head, 4);
1648 
1649 	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1650 			RING_CTX_OFF(ring_tail.val), &tail, 4);
1651 
1652 	guest_head = head;
1653 
1654 	head &= RB_HEAD_OFF_MASK;
1655 	tail &= RB_TAIL_OFF_MASK;
1656 
1657 	list_for_each_entry_reverse(last_workload, q, list) {
1658 
1659 		if (same_context(&last_workload->ctx_desc, desc)) {
1660 			gvt_dbg_el("ring %s cur workload == last\n",
1661 				   engine->name);
1662 			gvt_dbg_el("ctx head %x real head %lx\n", head,
1663 				   last_workload->rb_tail);
1664 			/*
1665 			 * cannot use guest context head pointer here,
1666 			 * as it might not be updated at this time
1667 			 */
1668 			head = last_workload->rb_tail;
1669 			break;
1670 		}
1671 	}
1672 
1673 	gvt_dbg_el("ring %s begin a new workload\n", engine->name);
1674 
1675 	/* record some ring buffer register values for scan and shadow */
1676 	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1677 			RING_CTX_OFF(rb_start.val), &start, 4);
1678 	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1679 			RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1680 	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1681 			RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1682 
1683 	if (!intel_gvt_ggtt_validate_range(vgpu, start,
1684 				_RING_CTL_BUF_SIZE(ctl))) {
1685 		gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
1686 		return ERR_PTR(-EINVAL);
1687 	}
1688 
1689 	workload = alloc_workload(vgpu);
1690 	if (IS_ERR(workload))
1691 		return workload;
1692 
1693 	workload->engine = engine;
1694 	workload->ctx_desc = *desc;
1695 	workload->ring_context_gpa = ring_context_gpa;
1696 	workload->rb_head = head;
1697 	workload->guest_rb_head = guest_head;
1698 	workload->rb_tail = tail;
1699 	workload->rb_start = start;
1700 	workload->rb_ctl = ctl;
1701 
1702 	if (engine->id == RCS0) {
1703 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1704 			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1705 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1706 			RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1707 
1708 		workload->wa_ctx.indirect_ctx.guest_gma =
1709 			indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1710 		workload->wa_ctx.indirect_ctx.size =
1711 			(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1712 			CACHELINE_BYTES;
1713 
1714 		if (workload->wa_ctx.indirect_ctx.size != 0) {
1715 			if (!intel_gvt_ggtt_validate_range(vgpu,
1716 				workload->wa_ctx.indirect_ctx.guest_gma,
1717 				workload->wa_ctx.indirect_ctx.size)) {
1718 				gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1719 				    workload->wa_ctx.indirect_ctx.guest_gma);
1720 				kmem_cache_free(s->workloads, workload);
1721 				return ERR_PTR(-EINVAL);
1722 			}
1723 		}
1724 
1725 		workload->wa_ctx.per_ctx.guest_gma =
1726 			per_ctx & PER_CTX_ADDR_MASK;
1727 		workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1728 		if (workload->wa_ctx.per_ctx.valid) {
1729 			if (!intel_gvt_ggtt_validate_range(vgpu,
1730 				workload->wa_ctx.per_ctx.guest_gma,
1731 				CACHELINE_BYTES)) {
1732 				gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1733 					workload->wa_ctx.per_ctx.guest_gma);
1734 				kmem_cache_free(s->workloads, workload);
1735 				return ERR_PTR(-EINVAL);
1736 			}
1737 		}
1738 	}
1739 
1740 	gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
1741 		   workload, engine->name, head, tail, start, ctl);
1742 
1743 	ret = prepare_mm(workload);
1744 	if (ret) {
1745 		kmem_cache_free(s->workloads, workload);
1746 		return ERR_PTR(ret);
1747 	}
1748 
1749 	/* Only scan and shadow the first workload in the queue
1750 	 * as there is only one pre-allocated buf-obj for shadow.
1751 	 */
1752 	if (list_empty(q)) {
1753 		intel_wakeref_t wakeref;
1754 
1755 		with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
1756 			ret = intel_gvt_scan_and_shadow_workload(workload);
1757 	}
1758 
1759 	if (ret) {
1760 		if (vgpu_is_vm_unhealthy(ret))
1761 			enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1762 		intel_vgpu_destroy_workload(workload);
1763 		return ERR_PTR(ret);
1764 	}
1765 
1766 	ret = intel_context_pin(s->shadow[engine->id]);
1767 	if (ret) {
1768 		intel_vgpu_destroy_workload(workload);
1769 		return ERR_PTR(ret);
1770 	}
1771 
1772 	return workload;
1773 }
1774 
1775 /**
1776  * intel_vgpu_queue_workload - Qeue a vGPU workload
1777  * @workload: the workload to queue in
1778  */
intel_vgpu_queue_workload(struct intel_vgpu_workload * workload)1779 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1780 {
1781 	list_add_tail(&workload->list,
1782 		      workload_q_head(workload->vgpu, workload->engine));
1783 	intel_gvt_kick_schedule(workload->vgpu->gvt);
1784 	wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
1785 }
1786