• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27 
28 /*
29  * This file implements HW context support. On gen5+ a HW context consists of an
30  * opaque GPU object which is referenced at times of context saves and restores.
31  * With RC6 enabled, the context is also referenced as the GPU enters and exists
32  * from RC6 (GPU has it's own internal power context, except on gen5). Though
33  * something like a context does exist for the media ring, the code only
34  * supports contexts for the render ring.
35  *
36  * In software, there is a distinction between contexts created by the user,
37  * and the default HW context. The default HW context is used by GPU clients
38  * that do not request setup of their own hardware context. The default
39  * context's state is never restored to help prevent programming errors. This
40  * would happen if a client ran and piggy-backed off another clients GPU state.
41  * The default context only exists to give the GPU some offset to load as the
42  * current to invoke a save of the context we actually care about. In fact, the
43  * code could likely be constructed, albeit in a more complicated fashion, to
44  * never use the default context, though that limits the driver's ability to
45  * swap out, and/or destroy other contexts.
46  *
47  * All other contexts are created as a request by the GPU client. These contexts
48  * store GPU state, and thus allow GPU clients to not re-emit state (and
49  * potentially query certain state) at any time. The kernel driver makes
50  * certain that the appropriate commands are inserted.
51  *
52  * The context life cycle is semi-complicated in that context BOs may live
53  * longer than the context itself because of the way the hardware, and object
54  * tracking works. Below is a very crude representation of the state machine
55  * describing the context life.
56  *                                         refcount     pincount     active
57  * S0: initial state                          0            0           0
58  * S1: context created                        1            0           0
59  * S2: context is currently running           2            1           X
60  * S3: GPU referenced, but not current        2            0           1
61  * S4: context is current, but destroyed      1            1           0
62  * S5: like S3, but destroyed                 1            0           1
63  *
64  * The most common (but not all) transitions:
65  * S0->S1: client creates a context
66  * S1->S2: client submits execbuf with context
67  * S2->S3: other clients submits execbuf with context
68  * S3->S1: context object was retired
69  * S3->S2: clients submits another execbuf
70  * S2->S4: context destroy called with current context
71  * S3->S5->S0: destroy path
72  * S4->S5->S0: destroy path on current context
73  *
74  * There are two confusing terms used above:
75  *  The "current context" means the context which is currently running on the
76  *  GPU. The GPU has loaded its state already and has stored away the gtt
77  *  offset of the BO. The GPU is not actively referencing the data at this
78  *  offset, but it will on the next context switch. The only way to avoid this
79  *  is to do a GPU reset.
80  *
81  *  An "active context' is one which was previously the "current context" and is
82  *  on the active list waiting for the next context switch to occur. Until this
83  *  happens, the object must remain at the same gtt offset. It is therefore
84  *  possible to destroy a context, but it is still active.
85  *
86  */
87 
88 #include <drm/drmP.h>
89 #include <drm/i915_drm.h>
90 #include "i915_drv.h"
91 #include "i915_trace.h"
92 
93 /* This is a HW constraint. The value below is the largest known requirement
94  * I've seen in a spec to date, and that was a workaround for a non-shipping
95  * part. It should be safe to decrease this, but it's more future proof as is.
96  */
97 #define GEN6_CONTEXT_ALIGN (64<<10)
98 #define GEN7_CONTEXT_ALIGN 4096
99 
get_context_alignment(struct drm_device * dev)100 static size_t get_context_alignment(struct drm_device *dev)
101 {
102 	if (IS_GEN6(dev))
103 		return GEN6_CONTEXT_ALIGN;
104 
105 	return GEN7_CONTEXT_ALIGN;
106 }
107 
get_context_size(struct drm_device * dev)108 static int get_context_size(struct drm_device *dev)
109 {
110 	struct drm_i915_private *dev_priv = dev->dev_private;
111 	int ret;
112 	u32 reg;
113 
114 	switch (INTEL_INFO(dev)->gen) {
115 	case 6:
116 		reg = I915_READ(CXT_SIZE);
117 		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
118 		break;
119 	case 7:
120 		reg = I915_READ(GEN7_CXT_SIZE);
121 		if (IS_HASWELL(dev))
122 			ret = HSW_CXT_TOTAL_SIZE;
123 		else
124 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
125 		break;
126 	case 8:
127 		ret = GEN8_CXT_TOTAL_SIZE;
128 		break;
129 	default:
130 		BUG();
131 	}
132 
133 	return ret;
134 }
135 
i915_gem_context_clean(struct intel_context * ctx)136 static void i915_gem_context_clean(struct intel_context *ctx)
137 {
138 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
139 	struct i915_vma *vma, *next;
140 
141 	if (!ppgtt)
142 		return;
143 
144 	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
145 				 mm_list) {
146 		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
147 			break;
148 	}
149 }
150 
i915_gem_context_free(struct kref * ctx_ref)151 void i915_gem_context_free(struct kref *ctx_ref)
152 {
153 	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
154 
155 	trace_i915_context_free(ctx);
156 
157 	if (i915.enable_execlists)
158 		intel_lr_context_free(ctx);
159 
160 	kfree(ctx->jump_whitelist);
161 
162 	/*
163 	 * This context is going away and we need to remove all VMAs still
164 	 * around. This is to handle imported shared objects for which
165 	 * destructor did not run when their handles were closed.
166 	 */
167 	i915_gem_context_clean(ctx);
168 
169 	i915_ppgtt_put(ctx->ppgtt);
170 
171 	if (ctx->legacy_hw_ctx.rcs_state)
172 		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
173 	list_del(&ctx->link);
174 	kfree(ctx);
175 }
176 
177 struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device * dev,size_t size)178 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
179 {
180 	struct drm_i915_gem_object *obj;
181 	int ret;
182 
183 	obj = i915_gem_alloc_object(dev, size);
184 	if (obj == NULL)
185 		return ERR_PTR(-ENOMEM);
186 
187 	/*
188 	 * Try to make the context utilize L3 as well as LLC.
189 	 *
190 	 * On VLV we don't have L3 controls in the PTEs so we
191 	 * shouldn't touch the cache level, especially as that
192 	 * would make the object snooped which might have a
193 	 * negative performance impact.
194 	 */
195 	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
196 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
197 		/* Failure shouldn't ever happen this early */
198 		if (WARN_ON(ret)) {
199 			drm_gem_object_unreference(&obj->base);
200 			return ERR_PTR(ret);
201 		}
202 	}
203 
204 	return obj;
205 }
206 
207 static struct intel_context *
__create_hw_context(struct drm_device * dev,struct drm_i915_file_private * file_priv)208 __create_hw_context(struct drm_device *dev,
209 		    struct drm_i915_file_private *file_priv)
210 {
211 	struct drm_i915_private *dev_priv = dev->dev_private;
212 	struct intel_context *ctx;
213 	int ret;
214 
215 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
216 	if (ctx == NULL)
217 		return ERR_PTR(-ENOMEM);
218 
219 	kref_init(&ctx->ref);
220 	list_add_tail(&ctx->link, &dev_priv->context_list);
221 	ctx->i915 = dev_priv;
222 
223 	if (dev_priv->hw_context_size) {
224 		struct drm_i915_gem_object *obj =
225 				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
226 		if (IS_ERR(obj)) {
227 			ret = PTR_ERR(obj);
228 			goto err_out;
229 		}
230 		ctx->legacy_hw_ctx.rcs_state = obj;
231 	}
232 
233 	/* Default context will never have a file_priv */
234 	if (file_priv != NULL) {
235 		ret = idr_alloc(&file_priv->context_idr, ctx,
236 				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
237 		if (ret < 0)
238 			goto err_out;
239 	} else
240 		ret = DEFAULT_CONTEXT_HANDLE;
241 
242 	ctx->file_priv = file_priv;
243 	ctx->user_handle = ret;
244 	/* NB: Mark all slices as needing a remap so that when the context first
245 	 * loads it will restore whatever remap state already exists. If there
246 	 * is no remap info, it will be a NOP. */
247 	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
248 
249 	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
250 
251 	ctx->jump_whitelist = NULL;
252 	ctx->jump_whitelist_cmds = 0;
253 
254 	return ctx;
255 
256 err_out:
257 	i915_gem_context_unreference(ctx);
258 	return ERR_PTR(ret);
259 }
260 
261 /**
262  * The default context needs to exist per ring that uses contexts. It stores the
263  * context state of the GPU for applications that don't utilize HW contexts, as
264  * well as an idle case.
265  */
266 static struct intel_context *
i915_gem_create_context(struct drm_device * dev,struct drm_i915_file_private * file_priv)267 i915_gem_create_context(struct drm_device *dev,
268 			struct drm_i915_file_private *file_priv)
269 {
270 	const bool is_global_default_ctx = file_priv == NULL;
271 	struct intel_context *ctx;
272 	int ret = 0;
273 
274 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
275 
276 	ctx = __create_hw_context(dev, file_priv);
277 	if (IS_ERR(ctx))
278 		return ctx;
279 
280 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
281 		/* We may need to do things with the shrinker which
282 		 * require us to immediately switch back to the default
283 		 * context. This can cause a problem as pinning the
284 		 * default context also requires GTT space which may not
285 		 * be available. To avoid this we always pin the default
286 		 * context.
287 		 */
288 		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
289 					    get_context_alignment(dev), 0);
290 		if (ret) {
291 			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
292 			goto err_destroy;
293 		}
294 	}
295 
296 	if (USES_FULL_PPGTT(dev)) {
297 		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
298 
299 		if (IS_ERR_OR_NULL(ppgtt)) {
300 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
301 					 PTR_ERR(ppgtt));
302 			ret = PTR_ERR(ppgtt);
303 			goto err_unpin;
304 		}
305 
306 		ctx->ppgtt = ppgtt;
307 	}
308 
309 	trace_i915_context_create(ctx);
310 
311 	return ctx;
312 
313 err_unpin:
314 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
315 		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
316 err_destroy:
317 	idr_remove(&file_priv->context_idr, ctx->user_handle);
318 	i915_gem_context_unreference(ctx);
319 	return ERR_PTR(ret);
320 }
321 
i915_gem_context_reset(struct drm_device * dev)322 void i915_gem_context_reset(struct drm_device *dev)
323 {
324 	struct drm_i915_private *dev_priv = dev->dev_private;
325 	int i;
326 
327 	if (i915.enable_execlists) {
328 		struct intel_context *ctx;
329 
330 		list_for_each_entry(ctx, &dev_priv->context_list, link) {
331 			intel_lr_context_reset(dev, ctx);
332 		}
333 
334 		return;
335 	}
336 
337 	for (i = 0; i < I915_NUM_RINGS; i++) {
338 		struct intel_engine_cs *ring = &dev_priv->ring[i];
339 		struct intel_context *lctx = ring->last_context;
340 
341 		if (lctx) {
342 			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
343 				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
344 
345 			i915_gem_context_unreference(lctx);
346 			ring->last_context = NULL;
347 		}
348 
349 		/* Force the GPU state to be reinitialised on enabling */
350 		if (ring->default_context)
351 			ring->default_context->legacy_hw_ctx.initialized = false;
352 	}
353 }
354 
i915_gem_context_init(struct drm_device * dev)355 int i915_gem_context_init(struct drm_device *dev)
356 {
357 	struct drm_i915_private *dev_priv = dev->dev_private;
358 	struct intel_context *ctx;
359 	int i;
360 
361 	/* Init should only be called once per module load. Eventually the
362 	 * restriction on the context_disabled check can be loosened. */
363 	if (WARN_ON(dev_priv->ring[RCS].default_context))
364 		return 0;
365 
366 	if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
367 		if (!i915.enable_execlists) {
368 			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
369 			return -EINVAL;
370 		}
371 	}
372 
373 	if (i915.enable_execlists) {
374 		/* NB: intentionally left blank. We will allocate our own
375 		 * backing objects as we need them, thank you very much */
376 		dev_priv->hw_context_size = 0;
377 	} else if (HAS_HW_CONTEXTS(dev)) {
378 		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
379 		if (dev_priv->hw_context_size > (1<<20)) {
380 			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
381 					 dev_priv->hw_context_size);
382 			dev_priv->hw_context_size = 0;
383 		}
384 	}
385 
386 	ctx = i915_gem_create_context(dev, NULL);
387 	if (IS_ERR(ctx)) {
388 		DRM_ERROR("Failed to create default global context (error %ld)\n",
389 			  PTR_ERR(ctx));
390 		return PTR_ERR(ctx);
391 	}
392 
393 	for (i = 0; i < I915_NUM_RINGS; i++) {
394 		struct intel_engine_cs *ring = &dev_priv->ring[i];
395 
396 		/* NB: RCS will hold a ref for all rings */
397 		ring->default_context = ctx;
398 	}
399 
400 	DRM_DEBUG_DRIVER("%s context support initialized\n",
401 			i915.enable_execlists ? "LR" :
402 			dev_priv->hw_context_size ? "HW" : "fake");
403 	return 0;
404 }
405 
i915_gem_context_fini(struct drm_device * dev)406 void i915_gem_context_fini(struct drm_device *dev)
407 {
408 	struct drm_i915_private *dev_priv = dev->dev_private;
409 	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
410 	int i;
411 
412 	if (dctx->legacy_hw_ctx.rcs_state) {
413 		/* The only known way to stop the gpu from accessing the hw context is
414 		 * to reset it. Do this as the very last operation to avoid confusing
415 		 * other code, leading to spurious errors. */
416 		intel_gpu_reset(dev);
417 
418 		/* When default context is created and switched to, base object refcount
419 		 * will be 2 (+1 from object creation and +1 from do_switch()).
420 		 * i915_gem_context_fini() will be called after gpu_idle() has switched
421 		 * to default context. So we need to unreference the base object once
422 		 * to offset the do_switch part, so that i915_gem_context_unreference()
423 		 * can then free the base object correctly. */
424 		WARN_ON(!dev_priv->ring[RCS].last_context);
425 		if (dev_priv->ring[RCS].last_context == dctx) {
426 			/* Fake switch to NULL context */
427 			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
428 			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
429 			i915_gem_context_unreference(dctx);
430 			dev_priv->ring[RCS].last_context = NULL;
431 		}
432 
433 		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
434 	}
435 
436 	for (i = 0; i < I915_NUM_RINGS; i++) {
437 		struct intel_engine_cs *ring = &dev_priv->ring[i];
438 
439 		if (ring->last_context)
440 			i915_gem_context_unreference(ring->last_context);
441 
442 		ring->default_context = NULL;
443 		ring->last_context = NULL;
444 	}
445 
446 	i915_gem_context_unreference(dctx);
447 }
448 
i915_gem_context_enable(struct drm_i915_gem_request * req)449 int i915_gem_context_enable(struct drm_i915_gem_request *req)
450 {
451 	struct intel_engine_cs *ring = req->ring;
452 	int ret;
453 
454 	if (i915.enable_execlists) {
455 		if (ring->init_context == NULL)
456 			return 0;
457 
458 		ret = ring->init_context(req);
459 	} else
460 		ret = i915_switch_context(req);
461 
462 	if (ret) {
463 		DRM_ERROR("ring init context: %d\n", ret);
464 		return ret;
465 	}
466 
467 	return 0;
468 }
469 
context_idr_cleanup(int id,void * p,void * data)470 static int context_idr_cleanup(int id, void *p, void *data)
471 {
472 	struct intel_context *ctx = p;
473 
474 	i915_gem_context_unreference(ctx);
475 	return 0;
476 }
477 
i915_gem_context_open(struct drm_device * dev,struct drm_file * file)478 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
479 {
480 	struct drm_i915_file_private *file_priv = file->driver_priv;
481 	struct intel_context *ctx;
482 
483 	idr_init(&file_priv->context_idr);
484 
485 	mutex_lock(&dev->struct_mutex);
486 	ctx = i915_gem_create_context(dev, file_priv);
487 	mutex_unlock(&dev->struct_mutex);
488 
489 	if (IS_ERR(ctx)) {
490 		idr_destroy(&file_priv->context_idr);
491 		return PTR_ERR(ctx);
492 	}
493 
494 	return 0;
495 }
496 
i915_gem_context_close(struct drm_device * dev,struct drm_file * file)497 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
498 {
499 	struct drm_i915_file_private *file_priv = file->driver_priv;
500 
501 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
502 	idr_destroy(&file_priv->context_idr);
503 }
504 
505 struct intel_context *
i915_gem_context_get(struct drm_i915_file_private * file_priv,u32 id)506 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
507 {
508 	struct intel_context *ctx;
509 
510 	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
511 	if (!ctx)
512 		return ERR_PTR(-ENOENT);
513 
514 	return ctx;
515 }
516 
517 static inline int
mi_set_context(struct drm_i915_gem_request * req,u32 hw_flags)518 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
519 {
520 	struct intel_engine_cs *ring = req->ring;
521 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
522 	const int num_rings =
523 		/* Use an extended w/a on ivb+ if signalling from other rings */
524 		i915_semaphore_is_enabled(ring->dev) ?
525 		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
526 		0;
527 	int len, i, ret;
528 
529 	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
530 	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
531 	 * explicitly, so we rely on the value at ring init, stored in
532 	 * itlb_before_ctx_switch.
533 	 */
534 	if (IS_GEN6(ring->dev)) {
535 		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
536 		if (ret)
537 			return ret;
538 	}
539 
540 	/* These flags are for resource streamer on HSW+ */
541 	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
542 		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
543 	else if (INTEL_INFO(ring->dev)->gen < 8)
544 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
545 
546 
547 	len = 4;
548 	if (INTEL_INFO(ring->dev)->gen >= 7)
549 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
550 
551 	ret = intel_ring_begin(req, len);
552 	if (ret)
553 		return ret;
554 
555 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
556 	if (INTEL_INFO(ring->dev)->gen >= 7) {
557 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
558 		if (num_rings) {
559 			struct intel_engine_cs *signaller;
560 
561 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
562 			for_each_ring(signaller, to_i915(ring->dev), i) {
563 				if (signaller == ring)
564 					continue;
565 
566 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
567 				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
568 			}
569 		}
570 	}
571 
572 	intel_ring_emit(ring, MI_NOOP);
573 	intel_ring_emit(ring, MI_SET_CONTEXT);
574 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
575 			flags);
576 	/*
577 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
578 	 * WaMiSetContext_Hang:snb,ivb,vlv
579 	 */
580 	intel_ring_emit(ring, MI_NOOP);
581 
582 	if (INTEL_INFO(ring->dev)->gen >= 7) {
583 		if (num_rings) {
584 			struct intel_engine_cs *signaller;
585 
586 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
587 			for_each_ring(signaller, to_i915(ring->dev), i) {
588 				if (signaller == ring)
589 					continue;
590 
591 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
592 				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
593 			}
594 		}
595 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
596 	}
597 
598 	intel_ring_advance(ring);
599 
600 	return ret;
601 }
602 
should_skip_switch(struct intel_engine_cs * ring,struct intel_context * from,struct intel_context * to)603 static inline bool should_skip_switch(struct intel_engine_cs *ring,
604 				      struct intel_context *from,
605 				      struct intel_context *to)
606 {
607 	if (to->remap_slice)
608 		return false;
609 
610 	if (to->ppgtt && from == to &&
611 	    !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
612 		return true;
613 
614 	return false;
615 }
616 
617 static bool
needs_pd_load_pre(struct intel_engine_cs * ring,struct intel_context * to)618 needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
619 {
620 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
621 
622 	if (!to->ppgtt)
623 		return false;
624 
625 	if (INTEL_INFO(ring->dev)->gen < 8)
626 		return true;
627 
628 	if (ring != &dev_priv->ring[RCS])
629 		return true;
630 
631 	return false;
632 }
633 
634 static bool
needs_pd_load_post(struct intel_engine_cs * ring,struct intel_context * to,u32 hw_flags)635 needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
636 		u32 hw_flags)
637 {
638 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
639 
640 	if (!to->ppgtt)
641 		return false;
642 
643 	if (!IS_GEN8(ring->dev))
644 		return false;
645 
646 	if (ring != &dev_priv->ring[RCS])
647 		return false;
648 
649 	if (hw_flags & MI_RESTORE_INHIBIT)
650 		return true;
651 
652 	return false;
653 }
654 
do_switch(struct drm_i915_gem_request * req)655 static int do_switch(struct drm_i915_gem_request *req)
656 {
657 	struct intel_context *to = req->ctx;
658 	struct intel_engine_cs *ring = req->ring;
659 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
660 	struct intel_context *from = ring->last_context;
661 	u32 hw_flags = 0;
662 	bool uninitialized = false;
663 	int ret, i;
664 
665 	if (from != NULL && ring == &dev_priv->ring[RCS]) {
666 		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
667 		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
668 	}
669 
670 	if (should_skip_switch(ring, from, to))
671 		return 0;
672 
673 	/* Trying to pin first makes error handling easier. */
674 	if (ring == &dev_priv->ring[RCS]) {
675 		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
676 					    get_context_alignment(ring->dev), 0);
677 		if (ret)
678 			return ret;
679 	}
680 
681 	/*
682 	 * Pin can switch back to the default context if we end up calling into
683 	 * evict_everything - as a last ditch gtt defrag effort that also
684 	 * switches to the default context. Hence we need to reload from here.
685 	 */
686 	from = ring->last_context;
687 
688 	if (needs_pd_load_pre(ring, to)) {
689 		/* Older GENs and non render rings still want the load first,
690 		 * "PP_DCLV followed by PP_DIR_BASE register through Load
691 		 * Register Immediate commands in Ring Buffer before submitting
692 		 * a context."*/
693 		trace_switch_mm(ring, to);
694 		ret = to->ppgtt->switch_mm(to->ppgtt, req);
695 		if (ret)
696 			goto unpin_out;
697 
698 		/* Doing a PD load always reloads the page dirs */
699 		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
700 	}
701 
702 	if (ring != &dev_priv->ring[RCS]) {
703 		if (from)
704 			i915_gem_context_unreference(from);
705 		goto done;
706 	}
707 
708 	/*
709 	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
710 	 * that thanks to write = false in this call and us not setting any gpu
711 	 * write domains when putting a context object onto the active list
712 	 * (when switching away from it), this won't block.
713 	 *
714 	 * XXX: We need a real interface to do this instead of trickery.
715 	 */
716 	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
717 	if (ret)
718 		goto unpin_out;
719 
720 	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
721 		hw_flags |= MI_RESTORE_INHIBIT;
722 		/* NB: If we inhibit the restore, the context is not allowed to
723 		 * die because future work may end up depending on valid address
724 		 * space. This means we must enforce that a page table load
725 		 * occur when this occurs. */
726 	} else if (to->ppgtt &&
727 		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
728 		hw_flags |= MI_FORCE_RESTORE;
729 		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
730 	}
731 
732 	/* We should never emit switch_mm more than once */
733 	WARN_ON(needs_pd_load_pre(ring, to) &&
734 		needs_pd_load_post(ring, to, hw_flags));
735 
736 	ret = mi_set_context(req, hw_flags);
737 	if (ret)
738 		goto unpin_out;
739 
740 	/* GEN8 does *not* require an explicit reload if the PDPs have been
741 	 * setup, and we do not wish to move them.
742 	 */
743 	if (needs_pd_load_post(ring, to, hw_flags)) {
744 		trace_switch_mm(ring, to);
745 		ret = to->ppgtt->switch_mm(to->ppgtt, req);
746 		/* The hardware context switch is emitted, but we haven't
747 		 * actually changed the state - so it's probably safe to bail
748 		 * here. Still, let the user know something dangerous has
749 		 * happened.
750 		 */
751 		if (ret) {
752 			DRM_ERROR("Failed to change address space on context switch\n");
753 			goto unpin_out;
754 		}
755 	}
756 
757 	for (i = 0; i < MAX_L3_SLICES; i++) {
758 		if (!(to->remap_slice & (1<<i)))
759 			continue;
760 
761 		ret = i915_gem_l3_remap(req, i);
762 		/* If it failed, try again next round */
763 		if (ret)
764 			DRM_DEBUG_DRIVER("L3 remapping failed\n");
765 		else
766 			to->remap_slice &= ~(1<<i);
767 	}
768 
769 	/* The backing object for the context is done after switching to the
770 	 * *next* context. Therefore we cannot retire the previous context until
771 	 * the next context has already started running. In fact, the below code
772 	 * is a bit suboptimal because the retiring can occur simply after the
773 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
774 	 */
775 	if (from != NULL) {
776 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
777 		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
778 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
779 		 * whole damn pipeline, we don't need to explicitly mark the
780 		 * object dirty. The only exception is that the context must be
781 		 * correct in case the object gets swapped out. Ideally we'd be
782 		 * able to defer doing this until we know the object would be
783 		 * swapped, but there is no way to do that yet.
784 		 */
785 		from->legacy_hw_ctx.rcs_state->dirty = 1;
786 
787 		/* obj is kept alive until the next request by its active ref */
788 		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
789 		i915_gem_context_unreference(from);
790 	}
791 
792 	uninitialized = !to->legacy_hw_ctx.initialized;
793 	to->legacy_hw_ctx.initialized = true;
794 
795 done:
796 	i915_gem_context_reference(to);
797 	ring->last_context = to;
798 
799 	if (uninitialized) {
800 		if (ring->init_context) {
801 			ret = ring->init_context(req);
802 			if (ret)
803 				DRM_ERROR("ring init context: %d\n", ret);
804 		}
805 	}
806 
807 	return 0;
808 
809 unpin_out:
810 	if (ring->id == RCS)
811 		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
812 	return ret;
813 }
814 
815 /**
816  * i915_switch_context() - perform a GPU context switch.
817  * @req: request for which we'll execute the context switch
818  *
819  * The context life cycle is simple. The context refcount is incremented and
820  * decremented by 1 and create and destroy. If the context is in use by the GPU,
821  * it will have a refcount > 1. This allows us to destroy the context abstract
822  * object while letting the normal object tracking destroy the backing BO.
823  *
824  * This function should not be used in execlists mode.  Instead the context is
825  * switched by writing to the ELSP and requests keep a reference to their
826  * context.
827  */
i915_switch_context(struct drm_i915_gem_request * req)828 int i915_switch_context(struct drm_i915_gem_request *req)
829 {
830 	struct intel_engine_cs *ring = req->ring;
831 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
832 
833 	WARN_ON(i915.enable_execlists);
834 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
835 
836 	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
837 		if (req->ctx != ring->last_context) {
838 			i915_gem_context_reference(req->ctx);
839 			if (ring->last_context)
840 				i915_gem_context_unreference(ring->last_context);
841 			ring->last_context = req->ctx;
842 		}
843 		return 0;
844 	}
845 
846 	return do_switch(req);
847 }
848 
contexts_enabled(struct drm_device * dev)849 static bool contexts_enabled(struct drm_device *dev)
850 {
851 	return i915.enable_execlists || to_i915(dev)->hw_context_size;
852 }
853 
i915_gem_context_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)854 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
855 				  struct drm_file *file)
856 {
857 	struct drm_i915_gem_context_create *args = data;
858 	struct drm_i915_file_private *file_priv = file->driver_priv;
859 	struct intel_context *ctx;
860 	int ret;
861 
862 	if (!contexts_enabled(dev))
863 		return -ENODEV;
864 
865 	ret = i915_mutex_lock_interruptible(dev);
866 	if (ret)
867 		return ret;
868 
869 	ctx = i915_gem_create_context(dev, file_priv);
870 	mutex_unlock(&dev->struct_mutex);
871 	if (IS_ERR(ctx))
872 		return PTR_ERR(ctx);
873 
874 	args->ctx_id = ctx->user_handle;
875 	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
876 
877 	return 0;
878 }
879 
i915_gem_context_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)880 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
881 				   struct drm_file *file)
882 {
883 	struct drm_i915_gem_context_destroy *args = data;
884 	struct drm_i915_file_private *file_priv = file->driver_priv;
885 	struct intel_context *ctx;
886 	int ret;
887 
888 	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
889 		return -ENOENT;
890 
891 	ret = i915_mutex_lock_interruptible(dev);
892 	if (ret)
893 		return ret;
894 
895 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
896 	if (IS_ERR(ctx)) {
897 		mutex_unlock(&dev->struct_mutex);
898 		return PTR_ERR(ctx);
899 	}
900 
901 	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
902 	i915_gem_context_unreference(ctx);
903 	mutex_unlock(&dev->struct_mutex);
904 
905 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
906 	return 0;
907 }
908 
i915_gem_context_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)909 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
910 				    struct drm_file *file)
911 {
912 	struct drm_i915_file_private *file_priv = file->driver_priv;
913 	struct drm_i915_gem_context_param *args = data;
914 	struct intel_context *ctx;
915 	int ret;
916 
917 	ret = i915_mutex_lock_interruptible(dev);
918 	if (ret)
919 		return ret;
920 
921 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
922 	if (IS_ERR(ctx)) {
923 		mutex_unlock(&dev->struct_mutex);
924 		return PTR_ERR(ctx);
925 	}
926 
927 	args->size = 0;
928 	switch (args->param) {
929 	case I915_CONTEXT_PARAM_BAN_PERIOD:
930 		args->value = ctx->hang_stats.ban_period_seconds;
931 		break;
932 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
933 		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
934 		break;
935 	default:
936 		ret = -EINVAL;
937 		break;
938 	}
939 	mutex_unlock(&dev->struct_mutex);
940 
941 	return ret;
942 }
943 
i915_gem_context_setparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)944 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
945 				    struct drm_file *file)
946 {
947 	struct drm_i915_file_private *file_priv = file->driver_priv;
948 	struct drm_i915_gem_context_param *args = data;
949 	struct intel_context *ctx;
950 	int ret;
951 
952 	ret = i915_mutex_lock_interruptible(dev);
953 	if (ret)
954 		return ret;
955 
956 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
957 	if (IS_ERR(ctx)) {
958 		mutex_unlock(&dev->struct_mutex);
959 		return PTR_ERR(ctx);
960 	}
961 
962 	switch (args->param) {
963 	case I915_CONTEXT_PARAM_BAN_PERIOD:
964 		if (args->size)
965 			ret = -EINVAL;
966 		else if (args->value < ctx->hang_stats.ban_period_seconds &&
967 			 !capable(CAP_SYS_ADMIN))
968 			ret = -EPERM;
969 		else
970 			ctx->hang_stats.ban_period_seconds = args->value;
971 		break;
972 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
973 		if (args->size) {
974 			ret = -EINVAL;
975 		} else {
976 			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
977 			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
978 		}
979 		break;
980 	default:
981 		ret = -EINVAL;
982 		break;
983 	}
984 	mutex_unlock(&dev->struct_mutex);
985 
986 	return ret;
987 }
988