• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6 
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66 
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69 
70 #include "gt/gen6_ppgtt.h"
71 #include "gt/intel_context.h"
72 #include "gt/intel_context_param.h"
73 #include "gt/intel_engine_heartbeat.h"
74 #include "gt/intel_engine_user.h"
75 #include "gt/intel_ring.h"
76 
77 #include "i915_gem_context.h"
78 #include "i915_globals.h"
79 #include "i915_trace.h"
80 #include "i915_user_extensions.h"
81 
82 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
83 
84 static struct i915_global_gem_context {
85 	struct i915_global base;
86 	struct kmem_cache *slab_luts;
87 } global;
88 
i915_lut_handle_alloc(void)89 struct i915_lut_handle *i915_lut_handle_alloc(void)
90 {
91 	return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
92 }
93 
i915_lut_handle_free(struct i915_lut_handle * lut)94 void i915_lut_handle_free(struct i915_lut_handle *lut)
95 {
96 	return kmem_cache_free(global.slab_luts, lut);
97 }
98 
lut_close(struct i915_gem_context * ctx)99 static void lut_close(struct i915_gem_context *ctx)
100 {
101 	struct radix_tree_iter iter;
102 	void __rcu **slot;
103 
104 	mutex_lock(&ctx->lut_mutex);
105 	rcu_read_lock();
106 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
107 		struct i915_vma *vma = rcu_dereference_raw(*slot);
108 		struct drm_i915_gem_object *obj = vma->obj;
109 		struct i915_lut_handle *lut;
110 
111 		if (!kref_get_unless_zero(&obj->base.refcount))
112 			continue;
113 
114 		spin_lock(&obj->lut_lock);
115 		list_for_each_entry(lut, &obj->lut_list, obj_link) {
116 			if (lut->ctx != ctx)
117 				continue;
118 
119 			if (lut->handle != iter.index)
120 				continue;
121 
122 			list_del(&lut->obj_link);
123 			break;
124 		}
125 		spin_unlock(&obj->lut_lock);
126 
127 		if (&lut->obj_link != &obj->lut_list) {
128 			i915_lut_handle_free(lut);
129 			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
130 			i915_vma_close(vma);
131 			i915_gem_object_put(obj);
132 		}
133 
134 		i915_gem_object_put(obj);
135 	}
136 	rcu_read_unlock();
137 	mutex_unlock(&ctx->lut_mutex);
138 }
139 
140 static struct intel_context *
lookup_user_engine(struct i915_gem_context * ctx,unsigned long flags,const struct i915_engine_class_instance * ci)141 lookup_user_engine(struct i915_gem_context *ctx,
142 		   unsigned long flags,
143 		   const struct i915_engine_class_instance *ci)
144 #define LOOKUP_USER_INDEX BIT(0)
145 {
146 	int idx;
147 
148 	if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
149 		return ERR_PTR(-EINVAL);
150 
151 	if (!i915_gem_context_user_engines(ctx)) {
152 		struct intel_engine_cs *engine;
153 
154 		engine = intel_engine_lookup_user(ctx->i915,
155 						  ci->engine_class,
156 						  ci->engine_instance);
157 		if (!engine)
158 			return ERR_PTR(-EINVAL);
159 
160 		idx = engine->legacy_idx;
161 	} else {
162 		idx = ci->engine_instance;
163 	}
164 
165 	return i915_gem_context_get_engine(ctx, idx);
166 }
167 
168 static struct i915_address_space *
context_get_vm_rcu(struct i915_gem_context * ctx)169 context_get_vm_rcu(struct i915_gem_context *ctx)
170 {
171 	GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
172 
173 	do {
174 		struct i915_address_space *vm;
175 
176 		/*
177 		 * We do not allow downgrading from full-ppgtt [to a shared
178 		 * global gtt], so ctx->vm cannot become NULL.
179 		 */
180 		vm = rcu_dereference(ctx->vm);
181 		if (!kref_get_unless_zero(&vm->ref))
182 			continue;
183 
184 		/*
185 		 * This ppgtt may have be reallocated between
186 		 * the read and the kref, and reassigned to a third
187 		 * context. In order to avoid inadvertent sharing
188 		 * of this ppgtt with that third context (and not
189 		 * src), we have to confirm that we have the same
190 		 * ppgtt after passing through the strong memory
191 		 * barrier implied by a successful
192 		 * kref_get_unless_zero().
193 		 *
194 		 * Once we have acquired the current ppgtt of ctx,
195 		 * we no longer care if it is released from ctx, as
196 		 * it cannot be reallocated elsewhere.
197 		 */
198 
199 		if (vm == rcu_access_pointer(ctx->vm))
200 			return rcu_pointer_handoff(vm);
201 
202 		i915_vm_put(vm);
203 	} while (1);
204 }
205 
intel_context_set_gem(struct intel_context * ce,struct i915_gem_context * ctx)206 static void intel_context_set_gem(struct intel_context *ce,
207 				  struct i915_gem_context *ctx)
208 {
209 	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
210 	RCU_INIT_POINTER(ce->gem_context, ctx);
211 
212 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
213 		ce->ring = __intel_context_ring_size(SZ_16K);
214 
215 	if (rcu_access_pointer(ctx->vm)) {
216 		struct i915_address_space *vm;
217 
218 		rcu_read_lock();
219 		vm = context_get_vm_rcu(ctx); /* hmm */
220 		rcu_read_unlock();
221 
222 		i915_vm_put(ce->vm);
223 		ce->vm = vm;
224 	}
225 
226 	GEM_BUG_ON(ce->timeline);
227 	if (ctx->timeline)
228 		ce->timeline = intel_timeline_get(ctx->timeline);
229 
230 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
231 	    intel_engine_has_timeslices(ce->engine))
232 		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
233 }
234 
__free_engines(struct i915_gem_engines * e,unsigned int count)235 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
236 {
237 	while (count--) {
238 		if (!e->engines[count])
239 			continue;
240 
241 		intel_context_put(e->engines[count]);
242 	}
243 	kfree(e);
244 }
245 
free_engines(struct i915_gem_engines * e)246 static void free_engines(struct i915_gem_engines *e)
247 {
248 	__free_engines(e, e->num_engines);
249 }
250 
free_engines_rcu(struct rcu_head * rcu)251 static void free_engines_rcu(struct rcu_head *rcu)
252 {
253 	struct i915_gem_engines *engines =
254 		container_of(rcu, struct i915_gem_engines, rcu);
255 
256 	i915_sw_fence_fini(&engines->fence);
257 	free_engines(engines);
258 }
259 
260 static int __i915_sw_fence_call
engines_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)261 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
262 {
263 	struct i915_gem_engines *engines =
264 		container_of(fence, typeof(*engines), fence);
265 
266 	switch (state) {
267 	case FENCE_COMPLETE:
268 		if (!list_empty(&engines->link)) {
269 			struct i915_gem_context *ctx = engines->ctx;
270 			unsigned long flags;
271 
272 			spin_lock_irqsave(&ctx->stale.lock, flags);
273 			list_del(&engines->link);
274 			spin_unlock_irqrestore(&ctx->stale.lock, flags);
275 		}
276 		i915_gem_context_put(engines->ctx);
277 		break;
278 
279 	case FENCE_FREE:
280 		init_rcu_head(&engines->rcu);
281 		call_rcu(&engines->rcu, free_engines_rcu);
282 		break;
283 	}
284 
285 	return NOTIFY_DONE;
286 }
287 
alloc_engines(unsigned int count)288 static struct i915_gem_engines *alloc_engines(unsigned int count)
289 {
290 	struct i915_gem_engines *e;
291 
292 	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
293 	if (!e)
294 		return NULL;
295 
296 	i915_sw_fence_init(&e->fence, engines_notify);
297 	return e;
298 }
299 
default_engines(struct i915_gem_context * ctx)300 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
301 {
302 	const struct intel_gt *gt = &ctx->i915->gt;
303 	struct intel_engine_cs *engine;
304 	struct i915_gem_engines *e;
305 	enum intel_engine_id id;
306 
307 	e = alloc_engines(I915_NUM_ENGINES);
308 	if (!e)
309 		return ERR_PTR(-ENOMEM);
310 
311 	for_each_engine(engine, gt, id) {
312 		struct intel_context *ce;
313 
314 		if (engine->legacy_idx == INVALID_ENGINE)
315 			continue;
316 
317 		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
318 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
319 
320 		ce = intel_context_create(engine);
321 		if (IS_ERR(ce)) {
322 			__free_engines(e, e->num_engines + 1);
323 			return ERR_CAST(ce);
324 		}
325 
326 		intel_context_set_gem(ce, ctx);
327 
328 		e->engines[engine->legacy_idx] = ce;
329 		e->num_engines = max(e->num_engines, engine->legacy_idx);
330 	}
331 	e->num_engines++;
332 
333 	return e;
334 }
335 
i915_gem_context_free(struct i915_gem_context * ctx)336 static void i915_gem_context_free(struct i915_gem_context *ctx)
337 {
338 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
339 
340 	spin_lock(&ctx->i915->gem.contexts.lock);
341 	list_del(&ctx->link);
342 	spin_unlock(&ctx->i915->gem.contexts.lock);
343 
344 	mutex_destroy(&ctx->engines_mutex);
345 	mutex_destroy(&ctx->lut_mutex);
346 
347 	if (ctx->timeline)
348 		intel_timeline_put(ctx->timeline);
349 
350 	put_pid(ctx->pid);
351 	mutex_destroy(&ctx->mutex);
352 
353 	kfree_rcu(ctx, rcu);
354 }
355 
contexts_free_all(struct llist_node * list)356 static void contexts_free_all(struct llist_node *list)
357 {
358 	struct i915_gem_context *ctx, *cn;
359 
360 	llist_for_each_entry_safe(ctx, cn, list, free_link)
361 		i915_gem_context_free(ctx);
362 }
363 
contexts_flush_free(struct i915_gem_contexts * gc)364 static void contexts_flush_free(struct i915_gem_contexts *gc)
365 {
366 	contexts_free_all(llist_del_all(&gc->free_list));
367 }
368 
contexts_free_worker(struct work_struct * work)369 static void contexts_free_worker(struct work_struct *work)
370 {
371 	struct i915_gem_contexts *gc =
372 		container_of(work, typeof(*gc), free_work);
373 
374 	contexts_flush_free(gc);
375 }
376 
i915_gem_context_release(struct kref * ref)377 void i915_gem_context_release(struct kref *ref)
378 {
379 	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
380 	struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
381 
382 	trace_i915_context_free(ctx);
383 	if (llist_add(&ctx->free_link, &gc->free_list))
384 		schedule_work(&gc->free_work);
385 }
386 
387 static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context * ctx)388 __context_engines_static(const struct i915_gem_context *ctx)
389 {
390 	return rcu_dereference_protected(ctx->engines, true);
391 }
392 
__reset_context(struct i915_gem_context * ctx,struct intel_engine_cs * engine)393 static void __reset_context(struct i915_gem_context *ctx,
394 			    struct intel_engine_cs *engine)
395 {
396 	intel_gt_handle_error(engine->gt, engine->mask, 0,
397 			      "context closure in %s", ctx->name);
398 }
399 
__cancel_engine(struct intel_engine_cs * engine)400 static bool __cancel_engine(struct intel_engine_cs *engine)
401 {
402 	/*
403 	 * Send a "high priority pulse" down the engine to cause the
404 	 * current request to be momentarily preempted. (If it fails to
405 	 * be preempted, it will be reset). As we have marked our context
406 	 * as banned, any incomplete request, including any running, will
407 	 * be skipped following the preemption.
408 	 *
409 	 * If there is no hangchecking (one of the reasons why we try to
410 	 * cancel the context) and no forced preemption, there may be no
411 	 * means by which we reset the GPU and evict the persistent hog.
412 	 * Ergo if we are unable to inject a preemptive pulse that can
413 	 * kill the banned context, we fallback to doing a local reset
414 	 * instead.
415 	 */
416 	return intel_engine_pulse(engine) == 0;
417 }
418 
419 static bool
__active_engine(struct i915_request * rq,struct intel_engine_cs ** active)420 __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
421 {
422 	struct intel_engine_cs *engine, *locked;
423 	bool ret = false;
424 
425 	/*
426 	 * Serialise with __i915_request_submit() so that it sees
427 	 * is-banned?, or we know the request is already inflight.
428 	 *
429 	 * Note that rq->engine is unstable, and so we double
430 	 * check that we have acquired the lock on the final engine.
431 	 */
432 	locked = READ_ONCE(rq->engine);
433 	spin_lock_irq(&locked->active.lock);
434 	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
435 		spin_unlock(&locked->active.lock);
436 		locked = engine;
437 		spin_lock(&locked->active.lock);
438 	}
439 
440 	if (i915_request_is_active(rq)) {
441 		if (!i915_request_completed(rq))
442 			*active = locked;
443 		ret = true;
444 	}
445 
446 	spin_unlock_irq(&locked->active.lock);
447 
448 	return ret;
449 }
450 
active_engine(struct intel_context * ce)451 static struct intel_engine_cs *active_engine(struct intel_context *ce)
452 {
453 	struct intel_engine_cs *engine = NULL;
454 	struct i915_request *rq;
455 
456 	if (!ce->timeline)
457 		return NULL;
458 
459 	/*
460 	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
461 	 * to the request to prevent it being transferred to a new timeline
462 	 * (and onto a new timeline->requests list).
463 	 */
464 	rcu_read_lock();
465 	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
466 		bool found;
467 
468 		/* timeline is already completed upto this point? */
469 		if (!i915_request_get_rcu(rq))
470 			break;
471 
472 		/* Check with the backend if the request is inflight */
473 		found = true;
474 		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
475 			found = __active_engine(rq, &engine);
476 
477 		i915_request_put(rq);
478 		if (found)
479 			break;
480 	}
481 	rcu_read_unlock();
482 
483 	return engine;
484 }
485 
kill_engines(struct i915_gem_engines * engines,bool ban)486 static void kill_engines(struct i915_gem_engines *engines, bool ban)
487 {
488 	struct i915_gem_engines_iter it;
489 	struct intel_context *ce;
490 
491 	/*
492 	 * Map the user's engine back to the actual engines; one virtual
493 	 * engine will be mapped to multiple engines, and using ctx->engine[]
494 	 * the same engine may be have multiple instances in the user's map.
495 	 * However, we only care about pending requests, so only include
496 	 * engines on which there are incomplete requests.
497 	 */
498 	for_each_gem_engine(ce, engines, it) {
499 		struct intel_engine_cs *engine;
500 
501 		if (ban && intel_context_set_banned(ce))
502 			continue;
503 
504 		/*
505 		 * Check the current active state of this context; if we
506 		 * are currently executing on the GPU we need to evict
507 		 * ourselves. On the other hand, if we haven't yet been
508 		 * submitted to the GPU or if everything is complete,
509 		 * we have nothing to do.
510 		 */
511 		engine = active_engine(ce);
512 
513 		/* First attempt to gracefully cancel the context */
514 		if (engine && !__cancel_engine(engine) && ban)
515 			/*
516 			 * If we are unable to send a preemptive pulse to bump
517 			 * the context from the GPU, we have to resort to a full
518 			 * reset. We hope the collateral damage is worth it.
519 			 */
520 			__reset_context(engines->ctx, engine);
521 	}
522 }
523 
kill_context(struct i915_gem_context * ctx)524 static void kill_context(struct i915_gem_context *ctx)
525 {
526 	bool ban = (!i915_gem_context_is_persistent(ctx) ||
527 		    !ctx->i915->params.enable_hangcheck);
528 	struct i915_gem_engines *pos, *next;
529 
530 	spin_lock_irq(&ctx->stale.lock);
531 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
532 	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
533 		if (!i915_sw_fence_await(&pos->fence)) {
534 			list_del_init(&pos->link);
535 			continue;
536 		}
537 
538 		spin_unlock_irq(&ctx->stale.lock);
539 
540 		kill_engines(pos, ban);
541 
542 		spin_lock_irq(&ctx->stale.lock);
543 		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
544 		list_safe_reset_next(pos, next, link);
545 		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
546 
547 		i915_sw_fence_complete(&pos->fence);
548 	}
549 	spin_unlock_irq(&ctx->stale.lock);
550 }
551 
engines_idle_release(struct i915_gem_context * ctx,struct i915_gem_engines * engines)552 static void engines_idle_release(struct i915_gem_context *ctx,
553 				 struct i915_gem_engines *engines)
554 {
555 	struct i915_gem_engines_iter it;
556 	struct intel_context *ce;
557 
558 	INIT_LIST_HEAD(&engines->link);
559 
560 	engines->ctx = i915_gem_context_get(ctx);
561 
562 	for_each_gem_engine(ce, engines, it) {
563 		int err;
564 
565 		/* serialises with execbuf */
566 		set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
567 		if (!intel_context_pin_if_active(ce))
568 			continue;
569 
570 		/* Wait until context is finally scheduled out and retired */
571 		err = i915_sw_fence_await_active(&engines->fence,
572 						 &ce->active,
573 						 I915_ACTIVE_AWAIT_BARRIER);
574 		intel_context_unpin(ce);
575 		if (err)
576 			goto kill;
577 	}
578 
579 	spin_lock_irq(&ctx->stale.lock);
580 	if (!i915_gem_context_is_closed(ctx))
581 		list_add_tail(&engines->link, &ctx->stale.engines);
582 	spin_unlock_irq(&ctx->stale.lock);
583 
584 kill:
585 	if (list_empty(&engines->link)) /* raced, already closed */
586 		kill_engines(engines, true);
587 
588 	i915_sw_fence_commit(&engines->fence);
589 }
590 
set_closed_name(struct i915_gem_context * ctx)591 static void set_closed_name(struct i915_gem_context *ctx)
592 {
593 	char *s;
594 
595 	/* Replace '[]' with '<>' to indicate closed in debug prints */
596 
597 	s = strrchr(ctx->name, '[');
598 	if (!s)
599 		return;
600 
601 	*s = '<';
602 
603 	s = strchr(s + 1, ']');
604 	if (s)
605 		*s = '>';
606 }
607 
context_close(struct i915_gem_context * ctx)608 static void context_close(struct i915_gem_context *ctx)
609 {
610 	struct i915_address_space *vm;
611 
612 	/* Flush any concurrent set_engines() */
613 	mutex_lock(&ctx->engines_mutex);
614 	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
615 	i915_gem_context_set_closed(ctx);
616 	mutex_unlock(&ctx->engines_mutex);
617 
618 	mutex_lock(&ctx->mutex);
619 
620 	set_closed_name(ctx);
621 
622 	vm = i915_gem_context_vm(ctx);
623 	if (vm)
624 		i915_vm_close(vm);
625 
626 	ctx->file_priv = ERR_PTR(-EBADF);
627 
628 	/*
629 	 * The LUT uses the VMA as a backpointer to unref the object,
630 	 * so we need to clear the LUT before we close all the VMA (inside
631 	 * the ppgtt).
632 	 */
633 	lut_close(ctx);
634 
635 	mutex_unlock(&ctx->mutex);
636 
637 	/*
638 	 * If the user has disabled hangchecking, we can not be sure that
639 	 * the batches will ever complete after the context is closed,
640 	 * keeping the context and all resources pinned forever. So in this
641 	 * case we opt to forcibly kill off all remaining requests on
642 	 * context close.
643 	 */
644 	kill_context(ctx);
645 
646 	i915_gem_context_put(ctx);
647 }
648 
__context_set_persistence(struct i915_gem_context * ctx,bool state)649 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
650 {
651 	if (i915_gem_context_is_persistent(ctx) == state)
652 		return 0;
653 
654 	if (state) {
655 		/*
656 		 * Only contexts that are short-lived [that will expire or be
657 		 * reset] are allowed to survive past termination. We require
658 		 * hangcheck to ensure that the persistent requests are healthy.
659 		 */
660 		if (!ctx->i915->params.enable_hangcheck)
661 			return -EINVAL;
662 
663 		i915_gem_context_set_persistence(ctx);
664 	} else {
665 		/* To cancel a context we use "preempt-to-idle" */
666 		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
667 			return -ENODEV;
668 
669 		/*
670 		 * If the cancel fails, we then need to reset, cleanly!
671 		 *
672 		 * If the per-engine reset fails, all hope is lost! We resort
673 		 * to a full GPU reset in that unlikely case, but realistically
674 		 * if the engine could not reset, the full reset does not fare
675 		 * much better. The damage has been done.
676 		 *
677 		 * However, if we cannot reset an engine by itself, we cannot
678 		 * cleanup a hanging persistent context without causing
679 		 * colateral damage, and we should not pretend we can by
680 		 * exposing the interface.
681 		 */
682 		if (!intel_has_reset_engine(&ctx->i915->gt))
683 			return -ENODEV;
684 
685 		i915_gem_context_clear_persistence(ctx);
686 	}
687 
688 	return 0;
689 }
690 
691 static struct i915_gem_context *
__create_context(struct drm_i915_private * i915)692 __create_context(struct drm_i915_private *i915)
693 {
694 	struct i915_gem_context *ctx;
695 	struct i915_gem_engines *e;
696 	int err;
697 	int i;
698 
699 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
700 	if (!ctx)
701 		return ERR_PTR(-ENOMEM);
702 
703 	kref_init(&ctx->ref);
704 	ctx->i915 = i915;
705 	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
706 	mutex_init(&ctx->mutex);
707 	INIT_LIST_HEAD(&ctx->link);
708 
709 	spin_lock_init(&ctx->stale.lock);
710 	INIT_LIST_HEAD(&ctx->stale.engines);
711 
712 	mutex_init(&ctx->engines_mutex);
713 	e = default_engines(ctx);
714 	if (IS_ERR(e)) {
715 		err = PTR_ERR(e);
716 		goto err_free;
717 	}
718 	RCU_INIT_POINTER(ctx->engines, e);
719 
720 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
721 	mutex_init(&ctx->lut_mutex);
722 
723 	/* NB: Mark all slices as needing a remap so that when the context first
724 	 * loads it will restore whatever remap state already exists. If there
725 	 * is no remap info, it will be a NOP. */
726 	ctx->remap_slice = ALL_L3_SLICES(i915);
727 
728 	i915_gem_context_set_bannable(ctx);
729 	i915_gem_context_set_recoverable(ctx);
730 	__context_set_persistence(ctx, true /* cgroup hook? */);
731 
732 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
733 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
734 
735 	return ctx;
736 
737 err_free:
738 	kfree(ctx);
739 	return ERR_PTR(err);
740 }
741 
742 static inline struct i915_gem_engines *
__context_engines_await(const struct i915_gem_context * ctx)743 __context_engines_await(const struct i915_gem_context *ctx)
744 {
745 	struct i915_gem_engines *engines;
746 
747 	rcu_read_lock();
748 	do {
749 		engines = rcu_dereference(ctx->engines);
750 		GEM_BUG_ON(!engines);
751 
752 		if (unlikely(!i915_sw_fence_await(&engines->fence)))
753 			continue;
754 
755 		if (likely(engines == rcu_access_pointer(ctx->engines)))
756 			break;
757 
758 		i915_sw_fence_complete(&engines->fence);
759 	} while (1);
760 	rcu_read_unlock();
761 
762 	return engines;
763 }
764 
765 static int
context_apply_all(struct i915_gem_context * ctx,int (* fn)(struct intel_context * ce,void * data),void * data)766 context_apply_all(struct i915_gem_context *ctx,
767 		  int (*fn)(struct intel_context *ce, void *data),
768 		  void *data)
769 {
770 	struct i915_gem_engines_iter it;
771 	struct i915_gem_engines *e;
772 	struct intel_context *ce;
773 	int err = 0;
774 
775 	e = __context_engines_await(ctx);
776 	for_each_gem_engine(ce, e, it) {
777 		err = fn(ce, data);
778 		if (err)
779 			break;
780 	}
781 	i915_sw_fence_complete(&e->fence);
782 
783 	return err;
784 }
785 
__apply_ppgtt(struct intel_context * ce,void * vm)786 static int __apply_ppgtt(struct intel_context *ce, void *vm)
787 {
788 	i915_vm_put(ce->vm);
789 	ce->vm = i915_vm_get(vm);
790 	return 0;
791 }
792 
793 static struct i915_address_space *
__set_ppgtt(struct i915_gem_context * ctx,struct i915_address_space * vm)794 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
795 {
796 	struct i915_address_space *old;
797 
798 	old = rcu_replace_pointer(ctx->vm,
799 				  i915_vm_open(vm),
800 				  lockdep_is_held(&ctx->mutex));
801 	GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
802 
803 	context_apply_all(ctx, __apply_ppgtt, vm);
804 
805 	return old;
806 }
807 
__assign_ppgtt(struct i915_gem_context * ctx,struct i915_address_space * vm)808 static void __assign_ppgtt(struct i915_gem_context *ctx,
809 			   struct i915_address_space *vm)
810 {
811 	if (vm == rcu_access_pointer(ctx->vm))
812 		return;
813 
814 	vm = __set_ppgtt(ctx, vm);
815 	if (vm)
816 		i915_vm_close(vm);
817 }
818 
__set_timeline(struct intel_timeline ** dst,struct intel_timeline * src)819 static void __set_timeline(struct intel_timeline **dst,
820 			   struct intel_timeline *src)
821 {
822 	struct intel_timeline *old = *dst;
823 
824 	*dst = src ? intel_timeline_get(src) : NULL;
825 
826 	if (old)
827 		intel_timeline_put(old);
828 }
829 
__apply_timeline(struct intel_context * ce,void * timeline)830 static int __apply_timeline(struct intel_context *ce, void *timeline)
831 {
832 	__set_timeline(&ce->timeline, timeline);
833 	return 0;
834 }
835 
__assign_timeline(struct i915_gem_context * ctx,struct intel_timeline * timeline)836 static void __assign_timeline(struct i915_gem_context *ctx,
837 			      struct intel_timeline *timeline)
838 {
839 	__set_timeline(&ctx->timeline, timeline);
840 	context_apply_all(ctx, __apply_timeline, timeline);
841 }
842 
843 static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private * i915,unsigned int flags)844 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
845 {
846 	struct i915_gem_context *ctx;
847 
848 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
849 	    !HAS_EXECLISTS(i915))
850 		return ERR_PTR(-EINVAL);
851 
852 	/* Reap the stale contexts */
853 	contexts_flush_free(&i915->gem.contexts);
854 
855 	ctx = __create_context(i915);
856 	if (IS_ERR(ctx))
857 		return ctx;
858 
859 	if (HAS_FULL_PPGTT(i915)) {
860 		struct i915_ppgtt *ppgtt;
861 
862 		ppgtt = i915_ppgtt_create(&i915->gt);
863 		if (IS_ERR(ppgtt)) {
864 			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
865 				PTR_ERR(ppgtt));
866 			context_close(ctx);
867 			return ERR_CAST(ppgtt);
868 		}
869 
870 		mutex_lock(&ctx->mutex);
871 		__assign_ppgtt(ctx, &ppgtt->vm);
872 		mutex_unlock(&ctx->mutex);
873 
874 		i915_vm_put(&ppgtt->vm);
875 	}
876 
877 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
878 		struct intel_timeline *timeline;
879 
880 		timeline = intel_timeline_create(&i915->gt);
881 		if (IS_ERR(timeline)) {
882 			context_close(ctx);
883 			return ERR_CAST(timeline);
884 		}
885 
886 		__assign_timeline(ctx, timeline);
887 		intel_timeline_put(timeline);
888 	}
889 
890 	trace_i915_context_create(ctx);
891 
892 	return ctx;
893 }
894 
init_contexts(struct i915_gem_contexts * gc)895 static void init_contexts(struct i915_gem_contexts *gc)
896 {
897 	spin_lock_init(&gc->lock);
898 	INIT_LIST_HEAD(&gc->list);
899 
900 	INIT_WORK(&gc->free_work, contexts_free_worker);
901 	init_llist_head(&gc->free_list);
902 }
903 
i915_gem_init__contexts(struct drm_i915_private * i915)904 void i915_gem_init__contexts(struct drm_i915_private *i915)
905 {
906 	init_contexts(&i915->gem.contexts);
907 	drm_dbg(&i915->drm, "%s context support initialized\n",
908 		DRIVER_CAPS(i915)->has_logical_contexts ?
909 		"logical" : "fake");
910 }
911 
i915_gem_driver_release__contexts(struct drm_i915_private * i915)912 void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
913 {
914 	flush_work(&i915->gem.contexts.free_work);
915 	rcu_barrier(); /* and flush the left over RCU frees */
916 }
917 
gem_context_register(struct i915_gem_context * ctx,struct drm_i915_file_private * fpriv,u32 * id)918 static int gem_context_register(struct i915_gem_context *ctx,
919 				struct drm_i915_file_private *fpriv,
920 				u32 *id)
921 {
922 	struct drm_i915_private *i915 = ctx->i915;
923 	struct i915_address_space *vm;
924 	int ret;
925 
926 	ctx->file_priv = fpriv;
927 
928 	mutex_lock(&ctx->mutex);
929 	vm = i915_gem_context_vm(ctx);
930 	if (vm)
931 		WRITE_ONCE(vm->file, fpriv); /* XXX */
932 	mutex_unlock(&ctx->mutex);
933 
934 	ctx->pid = get_task_pid(current, PIDTYPE_PID);
935 	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
936 		 current->comm, pid_nr(ctx->pid));
937 
938 	/* And finally expose ourselves to userspace via the idr */
939 	ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
940 	if (ret)
941 		goto err_pid;
942 
943 	spin_lock(&i915->gem.contexts.lock);
944 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
945 	spin_unlock(&i915->gem.contexts.lock);
946 
947 	return 0;
948 
949 err_pid:
950 	put_pid(fetch_and_zero(&ctx->pid));
951 	return ret;
952 }
953 
i915_gem_context_open(struct drm_i915_private * i915,struct drm_file * file)954 int i915_gem_context_open(struct drm_i915_private *i915,
955 			  struct drm_file *file)
956 {
957 	struct drm_i915_file_private *file_priv = file->driver_priv;
958 	struct i915_gem_context *ctx;
959 	int err;
960 	u32 id;
961 
962 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
963 
964 	/* 0 reserved for invalid/unassigned ppgtt */
965 	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
966 
967 	ctx = i915_gem_create_context(i915, 0);
968 	if (IS_ERR(ctx)) {
969 		err = PTR_ERR(ctx);
970 		goto err;
971 	}
972 
973 	err = gem_context_register(ctx, file_priv, &id);
974 	if (err < 0)
975 		goto err_ctx;
976 
977 	GEM_BUG_ON(id);
978 	return 0;
979 
980 err_ctx:
981 	context_close(ctx);
982 err:
983 	xa_destroy(&file_priv->vm_xa);
984 	xa_destroy(&file_priv->context_xa);
985 	return err;
986 }
987 
i915_gem_context_close(struct drm_file * file)988 void i915_gem_context_close(struct drm_file *file)
989 {
990 	struct drm_i915_file_private *file_priv = file->driver_priv;
991 	struct drm_i915_private *i915 = file_priv->dev_priv;
992 	struct i915_address_space *vm;
993 	struct i915_gem_context *ctx;
994 	unsigned long idx;
995 
996 	xa_for_each(&file_priv->context_xa, idx, ctx)
997 		context_close(ctx);
998 	xa_destroy(&file_priv->context_xa);
999 
1000 	xa_for_each(&file_priv->vm_xa, idx, vm)
1001 		i915_vm_put(vm);
1002 	xa_destroy(&file_priv->vm_xa);
1003 
1004 	contexts_flush_free(&i915->gem.contexts);
1005 }
1006 
i915_gem_vm_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)1007 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1008 			     struct drm_file *file)
1009 {
1010 	struct drm_i915_private *i915 = to_i915(dev);
1011 	struct drm_i915_gem_vm_control *args = data;
1012 	struct drm_i915_file_private *file_priv = file->driver_priv;
1013 	struct i915_ppgtt *ppgtt;
1014 	u32 id;
1015 	int err;
1016 
1017 	if (!HAS_FULL_PPGTT(i915))
1018 		return -ENODEV;
1019 
1020 	if (args->flags)
1021 		return -EINVAL;
1022 
1023 	ppgtt = i915_ppgtt_create(&i915->gt);
1024 	if (IS_ERR(ppgtt))
1025 		return PTR_ERR(ppgtt);
1026 
1027 	ppgtt->vm.file = file_priv;
1028 
1029 	if (args->extensions) {
1030 		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1031 					   NULL, 0,
1032 					   ppgtt);
1033 		if (err)
1034 			goto err_put;
1035 	}
1036 
1037 	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1038 		       xa_limit_32b, GFP_KERNEL);
1039 	if (err)
1040 		goto err_put;
1041 
1042 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1043 	args->vm_id = id;
1044 	return 0;
1045 
1046 err_put:
1047 	i915_vm_put(&ppgtt->vm);
1048 	return err;
1049 }
1050 
i915_gem_vm_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)1051 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1052 			      struct drm_file *file)
1053 {
1054 	struct drm_i915_file_private *file_priv = file->driver_priv;
1055 	struct drm_i915_gem_vm_control *args = data;
1056 	struct i915_address_space *vm;
1057 
1058 	if (args->flags)
1059 		return -EINVAL;
1060 
1061 	if (args->extensions)
1062 		return -EINVAL;
1063 
1064 	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1065 	if (!vm)
1066 		return -ENOENT;
1067 
1068 	i915_vm_put(vm);
1069 	return 0;
1070 }
1071 
1072 struct context_barrier_task {
1073 	struct i915_active base;
1074 	void (*task)(void *data);
1075 	void *data;
1076 };
1077 
1078 __i915_active_call
cb_retire(struct i915_active * base)1079 static void cb_retire(struct i915_active *base)
1080 {
1081 	struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
1082 
1083 	if (cb->task)
1084 		cb->task(cb->data);
1085 
1086 	i915_active_fini(&cb->base);
1087 	kfree(cb);
1088 }
1089 
1090 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
context_barrier_task(struct i915_gem_context * ctx,intel_engine_mask_t engines,bool (* skip)(struct intel_context * ce,void * data),int (* pin)(struct intel_context * ce,struct i915_gem_ww_ctx * ww,void * data),int (* emit)(struct i915_request * rq,void * data),void (* task)(void * data),void * data)1091 static int context_barrier_task(struct i915_gem_context *ctx,
1092 				intel_engine_mask_t engines,
1093 				bool (*skip)(struct intel_context *ce, void *data),
1094 				int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
1095 				int (*emit)(struct i915_request *rq, void *data),
1096 				void (*task)(void *data),
1097 				void *data)
1098 {
1099 	struct context_barrier_task *cb;
1100 	struct i915_gem_engines_iter it;
1101 	struct i915_gem_engines *e;
1102 	struct i915_gem_ww_ctx ww;
1103 	struct intel_context *ce;
1104 	int err = 0;
1105 
1106 	GEM_BUG_ON(!task);
1107 
1108 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1109 	if (!cb)
1110 		return -ENOMEM;
1111 
1112 	i915_active_init(&cb->base, NULL, cb_retire);
1113 	err = i915_active_acquire(&cb->base);
1114 	if (err) {
1115 		kfree(cb);
1116 		return err;
1117 	}
1118 
1119 	e = __context_engines_await(ctx);
1120 	if (!e) {
1121 		i915_active_release(&cb->base);
1122 		return -ENOENT;
1123 	}
1124 
1125 	for_each_gem_engine(ce, e, it) {
1126 		struct i915_request *rq;
1127 
1128 		if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
1129 				       ce->engine->mask)) {
1130 			err = -ENXIO;
1131 			break;
1132 		}
1133 
1134 		if (!(ce->engine->mask & engines))
1135 			continue;
1136 
1137 		if (skip && skip(ce, data))
1138 			continue;
1139 
1140 		i915_gem_ww_ctx_init(&ww, true);
1141 retry:
1142 		err = intel_context_pin_ww(ce, &ww);
1143 		if (err)
1144 			goto err;
1145 
1146 		if (pin)
1147 			err = pin(ce, &ww, data);
1148 		if (err)
1149 			goto err_unpin;
1150 
1151 		rq = i915_request_create(ce);
1152 		if (IS_ERR(rq)) {
1153 			err = PTR_ERR(rq);
1154 			goto err_unpin;
1155 		}
1156 
1157 		err = 0;
1158 		if (emit)
1159 			err = emit(rq, data);
1160 		if (err == 0)
1161 			err = i915_active_add_request(&cb->base, rq);
1162 
1163 		i915_request_add(rq);
1164 err_unpin:
1165 		intel_context_unpin(ce);
1166 err:
1167 		if (err == -EDEADLK) {
1168 			err = i915_gem_ww_ctx_backoff(&ww);
1169 			if (!err)
1170 				goto retry;
1171 		}
1172 		i915_gem_ww_ctx_fini(&ww);
1173 
1174 		if (err)
1175 			break;
1176 	}
1177 	i915_sw_fence_complete(&e->fence);
1178 
1179 	cb->task = err ? NULL : task; /* caller needs to unwind instead */
1180 	cb->data = data;
1181 
1182 	i915_active_release(&cb->base);
1183 
1184 	return err;
1185 }
1186 
get_ppgtt(struct drm_i915_file_private * file_priv,struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)1187 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1188 		     struct i915_gem_context *ctx,
1189 		     struct drm_i915_gem_context_param *args)
1190 {
1191 	struct i915_address_space *vm;
1192 	int err;
1193 	u32 id;
1194 
1195 	if (!rcu_access_pointer(ctx->vm))
1196 		return -ENODEV;
1197 
1198 	rcu_read_lock();
1199 	vm = context_get_vm_rcu(ctx);
1200 	rcu_read_unlock();
1201 	if (!vm)
1202 		return -ENODEV;
1203 
1204 	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1205 	if (err)
1206 		goto err_put;
1207 
1208 	i915_vm_open(vm);
1209 
1210 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1211 	args->value = id;
1212 	args->size = 0;
1213 
1214 err_put:
1215 	i915_vm_put(vm);
1216 	return err;
1217 }
1218 
set_ppgtt_barrier(void * data)1219 static void set_ppgtt_barrier(void *data)
1220 {
1221 	struct i915_address_space *old = data;
1222 
1223 	if (INTEL_GEN(old->i915) < 8)
1224 		gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1225 
1226 	i915_vm_close(old);
1227 }
1228 
pin_ppgtt_update(struct intel_context * ce,struct i915_gem_ww_ctx * ww,void * data)1229 static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
1230 {
1231 	struct i915_address_space *vm = ce->vm;
1232 
1233 	if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
1234 		/* ppGTT is not part of the legacy context image */
1235 		return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
1236 
1237 	return 0;
1238 }
1239 
emit_ppgtt_update(struct i915_request * rq,void * data)1240 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1241 {
1242 	struct i915_address_space *vm = rq->context->vm;
1243 	struct intel_engine_cs *engine = rq->engine;
1244 	u32 base = engine->mmio_base;
1245 	u32 *cs;
1246 	int i;
1247 
1248 	if (i915_vm_is_4lvl(vm)) {
1249 		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1250 		const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1251 
1252 		cs = intel_ring_begin(rq, 6);
1253 		if (IS_ERR(cs))
1254 			return PTR_ERR(cs);
1255 
1256 		*cs++ = MI_LOAD_REGISTER_IMM(2);
1257 
1258 		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1259 		*cs++ = upper_32_bits(pd_daddr);
1260 		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1261 		*cs++ = lower_32_bits(pd_daddr);
1262 
1263 		*cs++ = MI_NOOP;
1264 		intel_ring_advance(rq, cs);
1265 	} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1266 		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1267 		int err;
1268 
1269 		/* Magic required to prevent forcewake errors! */
1270 		err = engine->emit_flush(rq, EMIT_INVALIDATE);
1271 		if (err)
1272 			return err;
1273 
1274 		cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1275 		if (IS_ERR(cs))
1276 			return PTR_ERR(cs);
1277 
1278 		*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1279 		for (i = GEN8_3LVL_PDPES; i--; ) {
1280 			const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1281 
1282 			*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1283 			*cs++ = upper_32_bits(pd_daddr);
1284 			*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1285 			*cs++ = lower_32_bits(pd_daddr);
1286 		}
1287 		*cs++ = MI_NOOP;
1288 		intel_ring_advance(rq, cs);
1289 	}
1290 
1291 	return 0;
1292 }
1293 
skip_ppgtt_update(struct intel_context * ce,void * data)1294 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1295 {
1296 	if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1297 		return !ce->state;
1298 	else
1299 		return !atomic_read(&ce->pin_count);
1300 }
1301 
set_ppgtt(struct drm_i915_file_private * file_priv,struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)1302 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1303 		     struct i915_gem_context *ctx,
1304 		     struct drm_i915_gem_context_param *args)
1305 {
1306 	struct i915_address_space *vm, *old;
1307 	int err;
1308 
1309 	if (args->size)
1310 		return -EINVAL;
1311 
1312 	if (!rcu_access_pointer(ctx->vm))
1313 		return -ENODEV;
1314 
1315 	if (upper_32_bits(args->value))
1316 		return -ENOENT;
1317 
1318 	rcu_read_lock();
1319 	vm = xa_load(&file_priv->vm_xa, args->value);
1320 	if (vm && !kref_get_unless_zero(&vm->ref))
1321 		vm = NULL;
1322 	rcu_read_unlock();
1323 	if (!vm)
1324 		return -ENOENT;
1325 
1326 	err = mutex_lock_interruptible(&ctx->mutex);
1327 	if (err)
1328 		goto out;
1329 
1330 	if (i915_gem_context_is_closed(ctx)) {
1331 		err = -ENOENT;
1332 		goto unlock;
1333 	}
1334 
1335 	if (vm == rcu_access_pointer(ctx->vm))
1336 		goto unlock;
1337 
1338 	old = __set_ppgtt(ctx, vm);
1339 
1340 	/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1341 	lut_close(ctx);
1342 
1343 	/*
1344 	 * We need to flush any requests using the current ppgtt before
1345 	 * we release it as the requests do not hold a reference themselves,
1346 	 * only indirectly through the context.
1347 	 */
1348 	err = context_barrier_task(ctx, ALL_ENGINES,
1349 				   skip_ppgtt_update,
1350 				   pin_ppgtt_update,
1351 				   emit_ppgtt_update,
1352 				   set_ppgtt_barrier,
1353 				   old);
1354 	if (err) {
1355 		i915_vm_close(__set_ppgtt(ctx, old));
1356 		i915_vm_close(old);
1357 		lut_close(ctx); /* force a rebuild of the old obj:vma cache */
1358 	}
1359 
1360 unlock:
1361 	mutex_unlock(&ctx->mutex);
1362 out:
1363 	i915_vm_put(vm);
1364 	return err;
1365 }
1366 
__apply_ringsize(struct intel_context * ce,void * sz)1367 static int __apply_ringsize(struct intel_context *ce, void *sz)
1368 {
1369 	return intel_context_set_ring_size(ce, (unsigned long)sz);
1370 }
1371 
set_ringsize(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)1372 static int set_ringsize(struct i915_gem_context *ctx,
1373 			struct drm_i915_gem_context_param *args)
1374 {
1375 	if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1376 		return -ENODEV;
1377 
1378 	if (args->size)
1379 		return -EINVAL;
1380 
1381 	if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
1382 		return -EINVAL;
1383 
1384 	if (args->value < I915_GTT_PAGE_SIZE)
1385 		return -EINVAL;
1386 
1387 	if (args->value > 128 * I915_GTT_PAGE_SIZE)
1388 		return -EINVAL;
1389 
1390 	return context_apply_all(ctx,
1391 				 __apply_ringsize,
1392 				 __intel_context_ring_size(args->value));
1393 }
1394 
__get_ringsize(struct intel_context * ce,void * arg)1395 static int __get_ringsize(struct intel_context *ce, void *arg)
1396 {
1397 	long sz;
1398 
1399 	sz = intel_context_get_ring_size(ce);
1400 	GEM_BUG_ON(sz > INT_MAX);
1401 
1402 	return sz; /* stop on first engine */
1403 }
1404 
get_ringsize(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)1405 static int get_ringsize(struct i915_gem_context *ctx,
1406 			struct drm_i915_gem_context_param *args)
1407 {
1408 	int sz;
1409 
1410 	if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1411 		return -ENODEV;
1412 
1413 	if (args->size)
1414 		return -EINVAL;
1415 
1416 	sz = context_apply_all(ctx, __get_ringsize, NULL);
1417 	if (sz < 0)
1418 		return sz;
1419 
1420 	args->value = sz;
1421 	return 0;
1422 }
1423 
1424 int
i915_gem_user_to_context_sseu(struct intel_gt * gt,const struct drm_i915_gem_context_param_sseu * user,struct intel_sseu * context)1425 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1426 			      const struct drm_i915_gem_context_param_sseu *user,
1427 			      struct intel_sseu *context)
1428 {
1429 	const struct sseu_dev_info *device = &gt->info.sseu;
1430 	struct drm_i915_private *i915 = gt->i915;
1431 
1432 	/* No zeros in any field. */
1433 	if (!user->slice_mask || !user->subslice_mask ||
1434 	    !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1435 		return -EINVAL;
1436 
1437 	/* Max > min. */
1438 	if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1439 		return -EINVAL;
1440 
1441 	/*
1442 	 * Some future proofing on the types since the uAPI is wider than the
1443 	 * current internal implementation.
1444 	 */
1445 	if (overflows_type(user->slice_mask, context->slice_mask) ||
1446 	    overflows_type(user->subslice_mask, context->subslice_mask) ||
1447 	    overflows_type(user->min_eus_per_subslice,
1448 			   context->min_eus_per_subslice) ||
1449 	    overflows_type(user->max_eus_per_subslice,
1450 			   context->max_eus_per_subslice))
1451 		return -EINVAL;
1452 
1453 	/* Check validity against hardware. */
1454 	if (user->slice_mask & ~device->slice_mask)
1455 		return -EINVAL;
1456 
1457 	if (user->subslice_mask & ~device->subslice_mask[0])
1458 		return -EINVAL;
1459 
1460 	if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1461 		return -EINVAL;
1462 
1463 	context->slice_mask = user->slice_mask;
1464 	context->subslice_mask = user->subslice_mask;
1465 	context->min_eus_per_subslice = user->min_eus_per_subslice;
1466 	context->max_eus_per_subslice = user->max_eus_per_subslice;
1467 
1468 	/* Part specific restrictions. */
1469 	if (IS_GEN(i915, 11)) {
1470 		unsigned int hw_s = hweight8(device->slice_mask);
1471 		unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1472 		unsigned int req_s = hweight8(context->slice_mask);
1473 		unsigned int req_ss = hweight8(context->subslice_mask);
1474 
1475 		/*
1476 		 * Only full subslice enablement is possible if more than one
1477 		 * slice is turned on.
1478 		 */
1479 		if (req_s > 1 && req_ss != hw_ss_per_s)
1480 			return -EINVAL;
1481 
1482 		/*
1483 		 * If more than four (SScount bitfield limit) subslices are
1484 		 * requested then the number has to be even.
1485 		 */
1486 		if (req_ss > 4 && (req_ss & 1))
1487 			return -EINVAL;
1488 
1489 		/*
1490 		 * If only one slice is enabled and subslice count is below the
1491 		 * device full enablement, it must be at most half of the all
1492 		 * available subslices.
1493 		 */
1494 		if (req_s == 1 && req_ss < hw_ss_per_s &&
1495 		    req_ss > (hw_ss_per_s / 2))
1496 			return -EINVAL;
1497 
1498 		/* ABI restriction - VME use case only. */
1499 
1500 		/* All slices or one slice only. */
1501 		if (req_s != 1 && req_s != hw_s)
1502 			return -EINVAL;
1503 
1504 		/*
1505 		 * Half subslices or full enablement only when one slice is
1506 		 * enabled.
1507 		 */
1508 		if (req_s == 1 &&
1509 		    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1510 			return -EINVAL;
1511 
1512 		/* No EU configuration changes. */
1513 		if ((user->min_eus_per_subslice !=
1514 		     device->max_eus_per_subslice) ||
1515 		    (user->max_eus_per_subslice !=
1516 		     device->max_eus_per_subslice))
1517 			return -EINVAL;
1518 	}
1519 
1520 	return 0;
1521 }
1522 
set_sseu(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)1523 static int set_sseu(struct i915_gem_context *ctx,
1524 		    struct drm_i915_gem_context_param *args)
1525 {
1526 	struct drm_i915_private *i915 = ctx->i915;
1527 	struct drm_i915_gem_context_param_sseu user_sseu;
1528 	struct intel_context *ce;
1529 	struct intel_sseu sseu;
1530 	unsigned long lookup;
1531 	int ret;
1532 
1533 	if (args->size < sizeof(user_sseu))
1534 		return -EINVAL;
1535 
1536 	if (!IS_GEN(i915, 11))
1537 		return -ENODEV;
1538 
1539 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1540 			   sizeof(user_sseu)))
1541 		return -EFAULT;
1542 
1543 	if (user_sseu.rsvd)
1544 		return -EINVAL;
1545 
1546 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1547 		return -EINVAL;
1548 
1549 	lookup = 0;
1550 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1551 		lookup |= LOOKUP_USER_INDEX;
1552 
1553 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1554 	if (IS_ERR(ce))
1555 		return PTR_ERR(ce);
1556 
1557 	/* Only render engine supports RPCS configuration. */
1558 	if (ce->engine->class != RENDER_CLASS) {
1559 		ret = -ENODEV;
1560 		goto out_ce;
1561 	}
1562 
1563 	ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1564 	if (ret)
1565 		goto out_ce;
1566 
1567 	ret = intel_context_reconfigure_sseu(ce, sseu);
1568 	if (ret)
1569 		goto out_ce;
1570 
1571 	args->size = sizeof(user_sseu);
1572 
1573 out_ce:
1574 	intel_context_put(ce);
1575 	return ret;
1576 }
1577 
1578 struct set_engines {
1579 	struct i915_gem_context *ctx;
1580 	struct i915_gem_engines *engines;
1581 };
1582 
1583 static int
set_engines__load_balance(struct i915_user_extension __user * base,void * data)1584 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1585 {
1586 	struct i915_context_engines_load_balance __user *ext =
1587 		container_of_user(base, typeof(*ext), base);
1588 	const struct set_engines *set = data;
1589 	struct drm_i915_private *i915 = set->ctx->i915;
1590 	struct intel_engine_cs *stack[16];
1591 	struct intel_engine_cs **siblings;
1592 	struct intel_context *ce;
1593 	u16 num_siblings, idx;
1594 	unsigned int n;
1595 	int err;
1596 
1597 	if (!HAS_EXECLISTS(i915))
1598 		return -ENODEV;
1599 
1600 	if (intel_uc_uses_guc_submission(&i915->gt.uc))
1601 		return -ENODEV; /* not implement yet */
1602 
1603 	if (get_user(idx, &ext->engine_index))
1604 		return -EFAULT;
1605 
1606 	if (idx >= set->engines->num_engines) {
1607 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
1608 			idx, set->engines->num_engines);
1609 		return -EINVAL;
1610 	}
1611 
1612 	idx = array_index_nospec(idx, set->engines->num_engines);
1613 	if (set->engines->engines[idx]) {
1614 		drm_dbg(&i915->drm,
1615 			"Invalid placement[%d], already occupied\n", idx);
1616 		return -EEXIST;
1617 	}
1618 
1619 	if (get_user(num_siblings, &ext->num_siblings))
1620 		return -EFAULT;
1621 
1622 	err = check_user_mbz(&ext->flags);
1623 	if (err)
1624 		return err;
1625 
1626 	err = check_user_mbz(&ext->mbz64);
1627 	if (err)
1628 		return err;
1629 
1630 	siblings = stack;
1631 	if (num_siblings > ARRAY_SIZE(stack)) {
1632 		siblings = kmalloc_array(num_siblings,
1633 					 sizeof(*siblings),
1634 					 GFP_KERNEL);
1635 		if (!siblings)
1636 			return -ENOMEM;
1637 	}
1638 
1639 	for (n = 0; n < num_siblings; n++) {
1640 		struct i915_engine_class_instance ci;
1641 
1642 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1643 			err = -EFAULT;
1644 			goto out_siblings;
1645 		}
1646 
1647 		siblings[n] = intel_engine_lookup_user(i915,
1648 						       ci.engine_class,
1649 						       ci.engine_instance);
1650 		if (!siblings[n]) {
1651 			drm_dbg(&i915->drm,
1652 				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
1653 				n, ci.engine_class, ci.engine_instance);
1654 			err = -EINVAL;
1655 			goto out_siblings;
1656 		}
1657 	}
1658 
1659 	ce = intel_execlists_create_virtual(siblings, n);
1660 	if (IS_ERR(ce)) {
1661 		err = PTR_ERR(ce);
1662 		goto out_siblings;
1663 	}
1664 
1665 	intel_context_set_gem(ce, set->ctx);
1666 
1667 	if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1668 		intel_context_put(ce);
1669 		err = -EEXIST;
1670 		goto out_siblings;
1671 	}
1672 
1673 out_siblings:
1674 	if (siblings != stack)
1675 		kfree(siblings);
1676 
1677 	return err;
1678 }
1679 
1680 static int
set_engines__bond(struct i915_user_extension __user * base,void * data)1681 set_engines__bond(struct i915_user_extension __user *base, void *data)
1682 {
1683 	struct i915_context_engines_bond __user *ext =
1684 		container_of_user(base, typeof(*ext), base);
1685 	const struct set_engines *set = data;
1686 	struct drm_i915_private *i915 = set->ctx->i915;
1687 	struct i915_engine_class_instance ci;
1688 	struct intel_engine_cs *virtual;
1689 	struct intel_engine_cs *master;
1690 	u16 idx, num_bonds;
1691 	int err, n;
1692 
1693 	if (get_user(idx, &ext->virtual_index))
1694 		return -EFAULT;
1695 
1696 	if (idx >= set->engines->num_engines) {
1697 		drm_dbg(&i915->drm,
1698 			"Invalid index for virtual engine: %d >= %d\n",
1699 			idx, set->engines->num_engines);
1700 		return -EINVAL;
1701 	}
1702 
1703 	idx = array_index_nospec(idx, set->engines->num_engines);
1704 	if (!set->engines->engines[idx]) {
1705 		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
1706 		return -EINVAL;
1707 	}
1708 	virtual = set->engines->engines[idx]->engine;
1709 
1710 	err = check_user_mbz(&ext->flags);
1711 	if (err)
1712 		return err;
1713 
1714 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1715 		err = check_user_mbz(&ext->mbz64[n]);
1716 		if (err)
1717 			return err;
1718 	}
1719 
1720 	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1721 		return -EFAULT;
1722 
1723 	master = intel_engine_lookup_user(i915,
1724 					  ci.engine_class, ci.engine_instance);
1725 	if (!master) {
1726 		drm_dbg(&i915->drm,
1727 			"Unrecognised master engine: { class:%u, instance:%u }\n",
1728 			ci.engine_class, ci.engine_instance);
1729 		return -EINVAL;
1730 	}
1731 
1732 	if (get_user(num_bonds, &ext->num_bonds))
1733 		return -EFAULT;
1734 
1735 	for (n = 0; n < num_bonds; n++) {
1736 		struct intel_engine_cs *bond;
1737 
1738 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1739 			return -EFAULT;
1740 
1741 		bond = intel_engine_lookup_user(i915,
1742 						ci.engine_class,
1743 						ci.engine_instance);
1744 		if (!bond) {
1745 			drm_dbg(&i915->drm,
1746 				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1747 				n, ci.engine_class, ci.engine_instance);
1748 			return -EINVAL;
1749 		}
1750 
1751 		/*
1752 		 * A non-virtual engine has no siblings to choose between; and
1753 		 * a submit fence will always be directed to the one engine.
1754 		 */
1755 		if (intel_engine_is_virtual(virtual)) {
1756 			err = intel_virtual_engine_attach_bond(virtual,
1757 							       master,
1758 							       bond);
1759 			if (err)
1760 				return err;
1761 		}
1762 	}
1763 
1764 	return 0;
1765 }
1766 
1767 static const i915_user_extension_fn set_engines__extensions[] = {
1768 	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1769 	[I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1770 };
1771 
1772 static int
set_engines(struct i915_gem_context * ctx,const struct drm_i915_gem_context_param * args)1773 set_engines(struct i915_gem_context *ctx,
1774 	    const struct drm_i915_gem_context_param *args)
1775 {
1776 	struct drm_i915_private *i915 = ctx->i915;
1777 	struct i915_context_param_engines __user *user =
1778 		u64_to_user_ptr(args->value);
1779 	struct set_engines set = { .ctx = ctx };
1780 	unsigned int num_engines, n;
1781 	u64 extensions;
1782 	int err;
1783 
1784 	if (!args->size) { /* switch back to legacy user_ring_map */
1785 		if (!i915_gem_context_user_engines(ctx))
1786 			return 0;
1787 
1788 		set.engines = default_engines(ctx);
1789 		if (IS_ERR(set.engines))
1790 			return PTR_ERR(set.engines);
1791 
1792 		goto replace;
1793 	}
1794 
1795 	BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1796 	if (args->size < sizeof(*user) ||
1797 	    !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1798 		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
1799 			args->size);
1800 		return -EINVAL;
1801 	}
1802 
1803 	/*
1804 	 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1805 	 * first 64 engines defined here.
1806 	 */
1807 	num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1808 	set.engines = alloc_engines(num_engines);
1809 	if (!set.engines)
1810 		return -ENOMEM;
1811 
1812 	for (n = 0; n < num_engines; n++) {
1813 		struct i915_engine_class_instance ci;
1814 		struct intel_engine_cs *engine;
1815 		struct intel_context *ce;
1816 
1817 		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1818 			__free_engines(set.engines, n);
1819 			return -EFAULT;
1820 		}
1821 
1822 		if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1823 		    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1824 			set.engines->engines[n] = NULL;
1825 			continue;
1826 		}
1827 
1828 		engine = intel_engine_lookup_user(ctx->i915,
1829 						  ci.engine_class,
1830 						  ci.engine_instance);
1831 		if (!engine) {
1832 			drm_dbg(&i915->drm,
1833 				"Invalid engine[%d]: { class:%d, instance:%d }\n",
1834 				n, ci.engine_class, ci.engine_instance);
1835 			__free_engines(set.engines, n);
1836 			return -ENOENT;
1837 		}
1838 
1839 		ce = intel_context_create(engine);
1840 		if (IS_ERR(ce)) {
1841 			__free_engines(set.engines, n);
1842 			return PTR_ERR(ce);
1843 		}
1844 
1845 		intel_context_set_gem(ce, ctx);
1846 
1847 		set.engines->engines[n] = ce;
1848 	}
1849 	set.engines->num_engines = num_engines;
1850 
1851 	err = -EFAULT;
1852 	if (!get_user(extensions, &user->extensions))
1853 		err = i915_user_extensions(u64_to_user_ptr(extensions),
1854 					   set_engines__extensions,
1855 					   ARRAY_SIZE(set_engines__extensions),
1856 					   &set);
1857 	if (err) {
1858 		free_engines(set.engines);
1859 		return err;
1860 	}
1861 
1862 replace:
1863 	mutex_lock(&ctx->engines_mutex);
1864 	if (i915_gem_context_is_closed(ctx)) {
1865 		mutex_unlock(&ctx->engines_mutex);
1866 		free_engines(set.engines);
1867 		return -ENOENT;
1868 	}
1869 	if (args->size)
1870 		i915_gem_context_set_user_engines(ctx);
1871 	else
1872 		i915_gem_context_clear_user_engines(ctx);
1873 	set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1874 	mutex_unlock(&ctx->engines_mutex);
1875 
1876 	/* Keep track of old engine sets for kill_context() */
1877 	engines_idle_release(ctx, set.engines);
1878 
1879 	return 0;
1880 }
1881 
1882 static struct i915_gem_engines *
__copy_engines(struct i915_gem_engines * e)1883 __copy_engines(struct i915_gem_engines *e)
1884 {
1885 	struct i915_gem_engines *copy;
1886 	unsigned int n;
1887 
1888 	copy = alloc_engines(e->num_engines);
1889 	if (!copy)
1890 		return ERR_PTR(-ENOMEM);
1891 
1892 	for (n = 0; n < e->num_engines; n++) {
1893 		if (e->engines[n])
1894 			copy->engines[n] = intel_context_get(e->engines[n]);
1895 		else
1896 			copy->engines[n] = NULL;
1897 	}
1898 	copy->num_engines = n;
1899 
1900 	return copy;
1901 }
1902 
1903 static int
get_engines(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)1904 get_engines(struct i915_gem_context *ctx,
1905 	    struct drm_i915_gem_context_param *args)
1906 {
1907 	struct i915_context_param_engines __user *user;
1908 	struct i915_gem_engines *e;
1909 	size_t n, count, size;
1910 	int err = 0;
1911 
1912 	err = mutex_lock_interruptible(&ctx->engines_mutex);
1913 	if (err)
1914 		return err;
1915 
1916 	e = NULL;
1917 	if (i915_gem_context_user_engines(ctx))
1918 		e = __copy_engines(i915_gem_context_engines(ctx));
1919 	mutex_unlock(&ctx->engines_mutex);
1920 	if (IS_ERR_OR_NULL(e)) {
1921 		args->size = 0;
1922 		return PTR_ERR_OR_ZERO(e);
1923 	}
1924 
1925 	count = e->num_engines;
1926 
1927 	/* Be paranoid in case we have an impedance mismatch */
1928 	if (!check_struct_size(user, engines, count, &size)) {
1929 		err = -EINVAL;
1930 		goto err_free;
1931 	}
1932 	if (overflows_type(size, args->size)) {
1933 		err = -EINVAL;
1934 		goto err_free;
1935 	}
1936 
1937 	if (!args->size) {
1938 		args->size = size;
1939 		goto err_free;
1940 	}
1941 
1942 	if (args->size < size) {
1943 		err = -EINVAL;
1944 		goto err_free;
1945 	}
1946 
1947 	user = u64_to_user_ptr(args->value);
1948 	if (put_user(0, &user->extensions)) {
1949 		err = -EFAULT;
1950 		goto err_free;
1951 	}
1952 
1953 	for (n = 0; n < count; n++) {
1954 		struct i915_engine_class_instance ci = {
1955 			.engine_class = I915_ENGINE_CLASS_INVALID,
1956 			.engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1957 		};
1958 
1959 		if (e->engines[n]) {
1960 			ci.engine_class = e->engines[n]->engine->uabi_class;
1961 			ci.engine_instance = e->engines[n]->engine->uabi_instance;
1962 		}
1963 
1964 		if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1965 			err = -EFAULT;
1966 			goto err_free;
1967 		}
1968 	}
1969 
1970 	args->size = size;
1971 
1972 err_free:
1973 	free_engines(e);
1974 	return err;
1975 }
1976 
1977 static int
set_persistence(struct i915_gem_context * ctx,const struct drm_i915_gem_context_param * args)1978 set_persistence(struct i915_gem_context *ctx,
1979 		const struct drm_i915_gem_context_param *args)
1980 {
1981 	if (args->size)
1982 		return -EINVAL;
1983 
1984 	return __context_set_persistence(ctx, args->value);
1985 }
1986 
__apply_priority(struct intel_context * ce,void * arg)1987 static int __apply_priority(struct intel_context *ce, void *arg)
1988 {
1989 	struct i915_gem_context *ctx = arg;
1990 
1991 	if (!intel_engine_has_timeslices(ce->engine))
1992 		return 0;
1993 
1994 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1995 		intel_context_set_use_semaphores(ce);
1996 	else
1997 		intel_context_clear_use_semaphores(ce);
1998 
1999 	return 0;
2000 }
2001 
set_priority(struct i915_gem_context * ctx,const struct drm_i915_gem_context_param * args)2002 static int set_priority(struct i915_gem_context *ctx,
2003 			const struct drm_i915_gem_context_param *args)
2004 {
2005 	s64 priority = args->value;
2006 
2007 	if (args->size)
2008 		return -EINVAL;
2009 
2010 	if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
2011 		return -ENODEV;
2012 
2013 	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
2014 	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
2015 		return -EINVAL;
2016 
2017 	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
2018 	    !capable(CAP_SYS_NICE))
2019 		return -EPERM;
2020 
2021 	ctx->sched.priority = I915_USER_PRIORITY(priority);
2022 	context_apply_all(ctx, __apply_priority, ctx);
2023 
2024 	return 0;
2025 }
2026 
ctx_setparam(struct drm_i915_file_private * fpriv,struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)2027 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2028 			struct i915_gem_context *ctx,
2029 			struct drm_i915_gem_context_param *args)
2030 {
2031 	int ret = 0;
2032 
2033 	switch (args->param) {
2034 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
2035 		if (args->size)
2036 			ret = -EINVAL;
2037 		else if (args->value)
2038 			set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2039 		else
2040 			clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2041 		break;
2042 
2043 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2044 		if (args->size)
2045 			ret = -EINVAL;
2046 		else if (args->value)
2047 			i915_gem_context_set_no_error_capture(ctx);
2048 		else
2049 			i915_gem_context_clear_no_error_capture(ctx);
2050 		break;
2051 
2052 	case I915_CONTEXT_PARAM_BANNABLE:
2053 		if (args->size)
2054 			ret = -EINVAL;
2055 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
2056 			ret = -EPERM;
2057 		else if (args->value)
2058 			i915_gem_context_set_bannable(ctx);
2059 		else
2060 			i915_gem_context_clear_bannable(ctx);
2061 		break;
2062 
2063 	case I915_CONTEXT_PARAM_RECOVERABLE:
2064 		if (args->size)
2065 			ret = -EINVAL;
2066 		else if (args->value)
2067 			i915_gem_context_set_recoverable(ctx);
2068 		else
2069 			i915_gem_context_clear_recoverable(ctx);
2070 		break;
2071 
2072 	case I915_CONTEXT_PARAM_PRIORITY:
2073 		ret = set_priority(ctx, args);
2074 		break;
2075 
2076 	case I915_CONTEXT_PARAM_SSEU:
2077 		ret = set_sseu(ctx, args);
2078 		break;
2079 
2080 	case I915_CONTEXT_PARAM_VM:
2081 		ret = set_ppgtt(fpriv, ctx, args);
2082 		break;
2083 
2084 	case I915_CONTEXT_PARAM_ENGINES:
2085 		ret = set_engines(ctx, args);
2086 		break;
2087 
2088 	case I915_CONTEXT_PARAM_PERSISTENCE:
2089 		ret = set_persistence(ctx, args);
2090 		break;
2091 
2092 	case I915_CONTEXT_PARAM_RINGSIZE:
2093 		ret = set_ringsize(ctx, args);
2094 		break;
2095 
2096 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2097 	default:
2098 		ret = -EINVAL;
2099 		break;
2100 	}
2101 
2102 	return ret;
2103 }
2104 
2105 struct create_ext {
2106 	struct i915_gem_context *ctx;
2107 	struct drm_i915_file_private *fpriv;
2108 };
2109 
create_setparam(struct i915_user_extension __user * ext,void * data)2110 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2111 {
2112 	struct drm_i915_gem_context_create_ext_setparam local;
2113 	const struct create_ext *arg = data;
2114 
2115 	if (copy_from_user(&local, ext, sizeof(local)))
2116 		return -EFAULT;
2117 
2118 	if (local.param.ctx_id)
2119 		return -EINVAL;
2120 
2121 	return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
2122 }
2123 
copy_ring_size(struct intel_context * dst,struct intel_context * src)2124 static int copy_ring_size(struct intel_context *dst,
2125 			  struct intel_context *src)
2126 {
2127 	long sz;
2128 
2129 	sz = intel_context_get_ring_size(src);
2130 	if (sz < 0)
2131 		return sz;
2132 
2133 	return intel_context_set_ring_size(dst, sz);
2134 }
2135 
clone_engines(struct i915_gem_context * dst,struct i915_gem_context * src)2136 static int clone_engines(struct i915_gem_context *dst,
2137 			 struct i915_gem_context *src)
2138 {
2139 	struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2140 	struct i915_gem_engines *clone;
2141 	bool user_engines;
2142 	unsigned long n;
2143 
2144 	clone = alloc_engines(e->num_engines);
2145 	if (!clone)
2146 		goto err_unlock;
2147 
2148 	for (n = 0; n < e->num_engines; n++) {
2149 		struct intel_engine_cs *engine;
2150 
2151 		if (!e->engines[n]) {
2152 			clone->engines[n] = NULL;
2153 			continue;
2154 		}
2155 		engine = e->engines[n]->engine;
2156 
2157 		/*
2158 		 * Virtual engines are singletons; they can only exist
2159 		 * inside a single context, because they embed their
2160 		 * HW context... As each virtual context implies a single
2161 		 * timeline (each engine can only dequeue a single request
2162 		 * at any time), it would be surprising for two contexts
2163 		 * to use the same engine. So let's create a copy of
2164 		 * the virtual engine instead.
2165 		 */
2166 		if (intel_engine_is_virtual(engine))
2167 			clone->engines[n] =
2168 				intel_execlists_clone_virtual(engine);
2169 		else
2170 			clone->engines[n] = intel_context_create(engine);
2171 		if (IS_ERR_OR_NULL(clone->engines[n])) {
2172 			__free_engines(clone, n);
2173 			goto err_unlock;
2174 		}
2175 
2176 		intel_context_set_gem(clone->engines[n], dst);
2177 
2178 		/* Copy across the preferred ringsize */
2179 		if (copy_ring_size(clone->engines[n], e->engines[n])) {
2180 			__free_engines(clone, n + 1);
2181 			goto err_unlock;
2182 		}
2183 	}
2184 	clone->num_engines = n;
2185 
2186 	user_engines = i915_gem_context_user_engines(src);
2187 	i915_gem_context_unlock_engines(src);
2188 
2189 	/* Serialised by constructor */
2190 	engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
2191 	if (user_engines)
2192 		i915_gem_context_set_user_engines(dst);
2193 	else
2194 		i915_gem_context_clear_user_engines(dst);
2195 	return 0;
2196 
2197 err_unlock:
2198 	i915_gem_context_unlock_engines(src);
2199 	return -ENOMEM;
2200 }
2201 
clone_flags(struct i915_gem_context * dst,struct i915_gem_context * src)2202 static int clone_flags(struct i915_gem_context *dst,
2203 		       struct i915_gem_context *src)
2204 {
2205 	dst->user_flags = src->user_flags;
2206 	return 0;
2207 }
2208 
clone_schedattr(struct i915_gem_context * dst,struct i915_gem_context * src)2209 static int clone_schedattr(struct i915_gem_context *dst,
2210 			   struct i915_gem_context *src)
2211 {
2212 	dst->sched = src->sched;
2213 	return 0;
2214 }
2215 
clone_sseu(struct i915_gem_context * dst,struct i915_gem_context * src)2216 static int clone_sseu(struct i915_gem_context *dst,
2217 		      struct i915_gem_context *src)
2218 {
2219 	struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2220 	struct i915_gem_engines *clone;
2221 	unsigned long n;
2222 	int err;
2223 
2224 	/* no locking required; sole access under constructor*/
2225 	clone = __context_engines_static(dst);
2226 	if (e->num_engines != clone->num_engines) {
2227 		err = -EINVAL;
2228 		goto unlock;
2229 	}
2230 
2231 	for (n = 0; n < e->num_engines; n++) {
2232 		struct intel_context *ce = e->engines[n];
2233 
2234 		if (clone->engines[n]->engine->class != ce->engine->class) {
2235 			/* Must have compatible engine maps! */
2236 			err = -EINVAL;
2237 			goto unlock;
2238 		}
2239 
2240 		/* serialises with set_sseu */
2241 		err = intel_context_lock_pinned(ce);
2242 		if (err)
2243 			goto unlock;
2244 
2245 		clone->engines[n]->sseu = ce->sseu;
2246 		intel_context_unlock_pinned(ce);
2247 	}
2248 
2249 	err = 0;
2250 unlock:
2251 	i915_gem_context_unlock_engines(src);
2252 	return err;
2253 }
2254 
clone_timeline(struct i915_gem_context * dst,struct i915_gem_context * src)2255 static int clone_timeline(struct i915_gem_context *dst,
2256 			  struct i915_gem_context *src)
2257 {
2258 	if (src->timeline)
2259 		__assign_timeline(dst, src->timeline);
2260 
2261 	return 0;
2262 }
2263 
clone_vm(struct i915_gem_context * dst,struct i915_gem_context * src)2264 static int clone_vm(struct i915_gem_context *dst,
2265 		    struct i915_gem_context *src)
2266 {
2267 	struct i915_address_space *vm;
2268 	int err = 0;
2269 
2270 	if (!rcu_access_pointer(src->vm))
2271 		return 0;
2272 
2273 	rcu_read_lock();
2274 	vm = context_get_vm_rcu(src);
2275 	rcu_read_unlock();
2276 
2277 	if (!mutex_lock_interruptible(&dst->mutex)) {
2278 		__assign_ppgtt(dst, vm);
2279 		mutex_unlock(&dst->mutex);
2280 	} else {
2281 		err = -EINTR;
2282 	}
2283 
2284 	i915_vm_put(vm);
2285 	return err;
2286 }
2287 
create_clone(struct i915_user_extension __user * ext,void * data)2288 static int create_clone(struct i915_user_extension __user *ext, void *data)
2289 {
2290 	static int (* const fn[])(struct i915_gem_context *dst,
2291 				  struct i915_gem_context *src) = {
2292 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2293 		MAP(ENGINES, clone_engines),
2294 		MAP(FLAGS, clone_flags),
2295 		MAP(SCHEDATTR, clone_schedattr),
2296 		MAP(SSEU, clone_sseu),
2297 		MAP(TIMELINE, clone_timeline),
2298 		MAP(VM, clone_vm),
2299 #undef MAP
2300 	};
2301 	struct drm_i915_gem_context_create_ext_clone local;
2302 	const struct create_ext *arg = data;
2303 	struct i915_gem_context *dst = arg->ctx;
2304 	struct i915_gem_context *src;
2305 	int err, bit;
2306 
2307 	if (copy_from_user(&local, ext, sizeof(local)))
2308 		return -EFAULT;
2309 
2310 	BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2311 		     I915_CONTEXT_CLONE_UNKNOWN);
2312 
2313 	if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2314 		return -EINVAL;
2315 
2316 	if (local.rsvd)
2317 		return -EINVAL;
2318 
2319 	rcu_read_lock();
2320 	src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2321 	rcu_read_unlock();
2322 	if (!src)
2323 		return -ENOENT;
2324 
2325 	GEM_BUG_ON(src == dst);
2326 
2327 	for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2328 		if (!(local.flags & BIT(bit)))
2329 			continue;
2330 
2331 		err = fn[bit](dst, src);
2332 		if (err)
2333 			return err;
2334 	}
2335 
2336 	return 0;
2337 }
2338 
2339 static const i915_user_extension_fn create_extensions[] = {
2340 	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2341 	[I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2342 };
2343 
client_is_banned(struct drm_i915_file_private * file_priv)2344 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2345 {
2346 	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2347 }
2348 
i915_gem_context_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2349 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2350 				  struct drm_file *file)
2351 {
2352 	struct drm_i915_private *i915 = to_i915(dev);
2353 	struct drm_i915_gem_context_create_ext *args = data;
2354 	struct create_ext ext_data;
2355 	int ret;
2356 	u32 id;
2357 
2358 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
2359 		return -ENODEV;
2360 
2361 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2362 		return -EINVAL;
2363 
2364 	ret = intel_gt_terminally_wedged(&i915->gt);
2365 	if (ret)
2366 		return ret;
2367 
2368 	ext_data.fpriv = file->driver_priv;
2369 	if (client_is_banned(ext_data.fpriv)) {
2370 		drm_dbg(&i915->drm,
2371 			"client %s[%d] banned from creating ctx\n",
2372 			current->comm, task_pid_nr(current));
2373 		return -EIO;
2374 	}
2375 
2376 	ext_data.ctx = i915_gem_create_context(i915, args->flags);
2377 	if (IS_ERR(ext_data.ctx))
2378 		return PTR_ERR(ext_data.ctx);
2379 
2380 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2381 		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2382 					   create_extensions,
2383 					   ARRAY_SIZE(create_extensions),
2384 					   &ext_data);
2385 		if (ret)
2386 			goto err_ctx;
2387 	}
2388 
2389 	ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
2390 	if (ret < 0)
2391 		goto err_ctx;
2392 
2393 	args->ctx_id = id;
2394 	drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2395 
2396 	return 0;
2397 
2398 err_ctx:
2399 	context_close(ext_data.ctx);
2400 	return ret;
2401 }
2402 
i915_gem_context_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2403 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2404 				   struct drm_file *file)
2405 {
2406 	struct drm_i915_gem_context_destroy *args = data;
2407 	struct drm_i915_file_private *file_priv = file->driver_priv;
2408 	struct i915_gem_context *ctx;
2409 
2410 	if (args->pad != 0)
2411 		return -EINVAL;
2412 
2413 	if (!args->ctx_id)
2414 		return -ENOENT;
2415 
2416 	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2417 	if (!ctx)
2418 		return -ENOENT;
2419 
2420 	context_close(ctx);
2421 	return 0;
2422 }
2423 
get_sseu(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)2424 static int get_sseu(struct i915_gem_context *ctx,
2425 		    struct drm_i915_gem_context_param *args)
2426 {
2427 	struct drm_i915_gem_context_param_sseu user_sseu;
2428 	struct intel_context *ce;
2429 	unsigned long lookup;
2430 	int err;
2431 
2432 	if (args->size == 0)
2433 		goto out;
2434 	else if (args->size < sizeof(user_sseu))
2435 		return -EINVAL;
2436 
2437 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2438 			   sizeof(user_sseu)))
2439 		return -EFAULT;
2440 
2441 	if (user_sseu.rsvd)
2442 		return -EINVAL;
2443 
2444 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2445 		return -EINVAL;
2446 
2447 	lookup = 0;
2448 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2449 		lookup |= LOOKUP_USER_INDEX;
2450 
2451 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2452 	if (IS_ERR(ce))
2453 		return PTR_ERR(ce);
2454 
2455 	err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2456 	if (err) {
2457 		intel_context_put(ce);
2458 		return err;
2459 	}
2460 
2461 	user_sseu.slice_mask = ce->sseu.slice_mask;
2462 	user_sseu.subslice_mask = ce->sseu.subslice_mask;
2463 	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2464 	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2465 
2466 	intel_context_unlock_pinned(ce);
2467 	intel_context_put(ce);
2468 
2469 	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2470 			 sizeof(user_sseu)))
2471 		return -EFAULT;
2472 
2473 out:
2474 	args->size = sizeof(user_sseu);
2475 
2476 	return 0;
2477 }
2478 
i915_gem_context_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2479 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2480 				    struct drm_file *file)
2481 {
2482 	struct drm_i915_file_private *file_priv = file->driver_priv;
2483 	struct drm_i915_gem_context_param *args = data;
2484 	struct i915_gem_context *ctx;
2485 	int ret = 0;
2486 
2487 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2488 	if (!ctx)
2489 		return -ENOENT;
2490 
2491 	switch (args->param) {
2492 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
2493 		args->size = 0;
2494 		args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2495 		break;
2496 
2497 	case I915_CONTEXT_PARAM_GTT_SIZE:
2498 		args->size = 0;
2499 		rcu_read_lock();
2500 		if (rcu_access_pointer(ctx->vm))
2501 			args->value = rcu_dereference(ctx->vm)->total;
2502 		else
2503 			args->value = to_i915(dev)->ggtt.vm.total;
2504 		rcu_read_unlock();
2505 		break;
2506 
2507 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2508 		args->size = 0;
2509 		args->value = i915_gem_context_no_error_capture(ctx);
2510 		break;
2511 
2512 	case I915_CONTEXT_PARAM_BANNABLE:
2513 		args->size = 0;
2514 		args->value = i915_gem_context_is_bannable(ctx);
2515 		break;
2516 
2517 	case I915_CONTEXT_PARAM_RECOVERABLE:
2518 		args->size = 0;
2519 		args->value = i915_gem_context_is_recoverable(ctx);
2520 		break;
2521 
2522 	case I915_CONTEXT_PARAM_PRIORITY:
2523 		args->size = 0;
2524 		args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2525 		break;
2526 
2527 	case I915_CONTEXT_PARAM_SSEU:
2528 		ret = get_sseu(ctx, args);
2529 		break;
2530 
2531 	case I915_CONTEXT_PARAM_VM:
2532 		ret = get_ppgtt(file_priv, ctx, args);
2533 		break;
2534 
2535 	case I915_CONTEXT_PARAM_ENGINES:
2536 		ret = get_engines(ctx, args);
2537 		break;
2538 
2539 	case I915_CONTEXT_PARAM_PERSISTENCE:
2540 		args->size = 0;
2541 		args->value = i915_gem_context_is_persistent(ctx);
2542 		break;
2543 
2544 	case I915_CONTEXT_PARAM_RINGSIZE:
2545 		ret = get_ringsize(ctx, args);
2546 		break;
2547 
2548 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2549 	default:
2550 		ret = -EINVAL;
2551 		break;
2552 	}
2553 
2554 	i915_gem_context_put(ctx);
2555 	return ret;
2556 }
2557 
i915_gem_context_setparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2558 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2559 				    struct drm_file *file)
2560 {
2561 	struct drm_i915_file_private *file_priv = file->driver_priv;
2562 	struct drm_i915_gem_context_param *args = data;
2563 	struct i915_gem_context *ctx;
2564 	int ret;
2565 
2566 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2567 	if (!ctx)
2568 		return -ENOENT;
2569 
2570 	ret = ctx_setparam(file_priv, ctx, args);
2571 
2572 	i915_gem_context_put(ctx);
2573 	return ret;
2574 }
2575 
i915_gem_context_reset_stats_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2576 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2577 				       void *data, struct drm_file *file)
2578 {
2579 	struct drm_i915_private *i915 = to_i915(dev);
2580 	struct drm_i915_reset_stats *args = data;
2581 	struct i915_gem_context *ctx;
2582 	int ret;
2583 
2584 	if (args->flags || args->pad)
2585 		return -EINVAL;
2586 
2587 	ret = -ENOENT;
2588 	rcu_read_lock();
2589 	ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2590 	if (!ctx)
2591 		goto out;
2592 
2593 	/*
2594 	 * We opt for unserialised reads here. This may result in tearing
2595 	 * in the extremely unlikely event of a GPU hang on this context
2596 	 * as we are querying them. If we need that extra layer of protection,
2597 	 * we should wrap the hangstats with a seqlock.
2598 	 */
2599 
2600 	if (capable(CAP_SYS_ADMIN))
2601 		args->reset_count = i915_reset_count(&i915->gpu_error);
2602 	else
2603 		args->reset_count = 0;
2604 
2605 	args->batch_active = atomic_read(&ctx->guilty_count);
2606 	args->batch_pending = atomic_read(&ctx->active_count);
2607 
2608 	ret = 0;
2609 out:
2610 	rcu_read_unlock();
2611 	return ret;
2612 }
2613 
2614 /* GEM context-engines iterator: for_each_gem_engine() */
2615 struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter * it)2616 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2617 {
2618 	const struct i915_gem_engines *e = it->engines;
2619 	struct intel_context *ctx;
2620 
2621 	if (unlikely(!e))
2622 		return NULL;
2623 
2624 	do {
2625 		if (it->idx >= e->num_engines)
2626 			return NULL;
2627 
2628 		ctx = e->engines[it->idx++];
2629 	} while (!ctx);
2630 
2631 	return ctx;
2632 }
2633 
2634 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2635 #include "selftests/mock_context.c"
2636 #include "selftests/i915_gem_context.c"
2637 #endif
2638 
i915_global_gem_context_shrink(void)2639 static void i915_global_gem_context_shrink(void)
2640 {
2641 	kmem_cache_shrink(global.slab_luts);
2642 }
2643 
i915_global_gem_context_exit(void)2644 static void i915_global_gem_context_exit(void)
2645 {
2646 	kmem_cache_destroy(global.slab_luts);
2647 }
2648 
2649 static struct i915_global_gem_context global = { {
2650 	.shrink = i915_global_gem_context_shrink,
2651 	.exit = i915_global_gem_context_exit,
2652 } };
2653 
i915_global_gem_context_init(void)2654 int __init i915_global_gem_context_init(void)
2655 {
2656 	global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2657 	if (!global.slab_luts)
2658 		return -ENOMEM;
2659 
2660 	i915_global_register(&global.base);
2661 	return 0;
2662 }
2663