1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include "gem/i915_gem_context.h"
7 #include "gem/i915_gem_pm.h"
8
9 #include "i915_drv.h"
10 #include "i915_trace.h"
11
12 #include "intel_context.h"
13 #include "intel_engine.h"
14 #include "intel_engine_pm.h"
15 #include "intel_ring.h"
16
17 static struct kmem_cache *slab_ce;
18
intel_context_alloc(void)19 static struct intel_context *intel_context_alloc(void)
20 {
21 return kmem_cache_zalloc(slab_ce, GFP_KERNEL);
22 }
23
rcu_context_free(struct rcu_head * rcu)24 static void rcu_context_free(struct rcu_head *rcu)
25 {
26 struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
27
28 trace_intel_context_free(ce);
29 kmem_cache_free(slab_ce, ce);
30 }
31
intel_context_free(struct intel_context * ce)32 void intel_context_free(struct intel_context *ce)
33 {
34 call_rcu(&ce->rcu, rcu_context_free);
35 }
36
37 struct intel_context *
intel_context_create(struct intel_engine_cs * engine)38 intel_context_create(struct intel_engine_cs *engine)
39 {
40 struct intel_context *ce;
41
42 ce = intel_context_alloc();
43 if (!ce)
44 return ERR_PTR(-ENOMEM);
45
46 intel_context_init(ce, engine);
47 trace_intel_context_create(ce);
48 return ce;
49 }
50
intel_context_alloc_state(struct intel_context * ce)51 int intel_context_alloc_state(struct intel_context *ce)
52 {
53 int err = 0;
54
55 if (mutex_lock_interruptible(&ce->pin_mutex))
56 return -EINTR;
57
58 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
59 if (intel_context_is_banned(ce)) {
60 err = -EIO;
61 goto unlock;
62 }
63
64 err = ce->ops->alloc(ce);
65 if (unlikely(err))
66 goto unlock;
67
68 set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
69 }
70
71 unlock:
72 mutex_unlock(&ce->pin_mutex);
73 return err;
74 }
75
intel_context_active_acquire(struct intel_context * ce)76 static int intel_context_active_acquire(struct intel_context *ce)
77 {
78 int err;
79
80 __i915_active_acquire(&ce->active);
81
82 if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine))
83 return 0;
84
85 /* Preallocate tracking nodes */
86 err = i915_active_acquire_preallocate_barrier(&ce->active,
87 ce->engine);
88 if (err)
89 i915_active_release(&ce->active);
90
91 return err;
92 }
93
intel_context_active_release(struct intel_context * ce)94 static void intel_context_active_release(struct intel_context *ce)
95 {
96 /* Nodes preallocated in intel_context_active() */
97 i915_active_acquire_barrier(&ce->active);
98 i915_active_release(&ce->active);
99 }
100
__context_pin_state(struct i915_vma * vma,struct i915_gem_ww_ctx * ww)101 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
102 {
103 unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
104 int err;
105
106 err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
107 if (err)
108 return err;
109
110 err = i915_active_acquire(&vma->active);
111 if (err)
112 goto err_unpin;
113
114 /*
115 * And mark it as a globally pinned object to let the shrinker know
116 * it cannot reclaim the object until we release it.
117 */
118 i915_vma_make_unshrinkable(vma);
119 vma->obj->mm.dirty = true;
120
121 return 0;
122
123 err_unpin:
124 i915_vma_unpin(vma);
125 return err;
126 }
127
__context_unpin_state(struct i915_vma * vma)128 static void __context_unpin_state(struct i915_vma *vma)
129 {
130 i915_vma_make_shrinkable(vma);
131 i915_active_release(&vma->active);
132 __i915_vma_unpin(vma);
133 }
134
__ring_active(struct intel_ring * ring,struct i915_gem_ww_ctx * ww)135 static int __ring_active(struct intel_ring *ring,
136 struct i915_gem_ww_ctx *ww)
137 {
138 int err;
139
140 err = intel_ring_pin(ring, ww);
141 if (err)
142 return err;
143
144 err = i915_active_acquire(&ring->vma->active);
145 if (err)
146 goto err_pin;
147
148 return 0;
149
150 err_pin:
151 intel_ring_unpin(ring);
152 return err;
153 }
154
__ring_retire(struct intel_ring * ring)155 static void __ring_retire(struct intel_ring *ring)
156 {
157 i915_active_release(&ring->vma->active);
158 intel_ring_unpin(ring);
159 }
160
intel_context_pre_pin(struct intel_context * ce,struct i915_gem_ww_ctx * ww)161 static int intel_context_pre_pin(struct intel_context *ce,
162 struct i915_gem_ww_ctx *ww)
163 {
164 int err;
165
166 CE_TRACE(ce, "active\n");
167
168 err = __ring_active(ce->ring, ww);
169 if (err)
170 return err;
171
172 err = intel_timeline_pin(ce->timeline, ww);
173 if (err)
174 goto err_ring;
175
176 if (!ce->state)
177 return 0;
178
179 err = __context_pin_state(ce->state, ww);
180 if (err)
181 goto err_timeline;
182
183
184 return 0;
185
186 err_timeline:
187 intel_timeline_unpin(ce->timeline);
188 err_ring:
189 __ring_retire(ce->ring);
190 return err;
191 }
192
intel_context_post_unpin(struct intel_context * ce)193 static void intel_context_post_unpin(struct intel_context *ce)
194 {
195 if (ce->state)
196 __context_unpin_state(ce->state);
197
198 intel_timeline_unpin(ce->timeline);
199 __ring_retire(ce->ring);
200 }
201
__intel_context_do_pin_ww(struct intel_context * ce,struct i915_gem_ww_ctx * ww)202 int __intel_context_do_pin_ww(struct intel_context *ce,
203 struct i915_gem_ww_ctx *ww)
204 {
205 bool handoff = false;
206 void *vaddr;
207 int err = 0;
208
209 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
210 err = intel_context_alloc_state(ce);
211 if (err)
212 return err;
213 }
214
215 /*
216 * We always pin the context/ring/timeline here, to ensure a pin
217 * refcount for __intel_context_active(), which prevent a lock
218 * inversion of ce->pin_mutex vs dma_resv_lock().
219 */
220
221 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
222 if (!err && ce->ring->vma->obj)
223 err = i915_gem_object_lock(ce->ring->vma->obj, ww);
224 if (!err && ce->state)
225 err = i915_gem_object_lock(ce->state->obj, ww);
226 if (!err)
227 err = intel_context_pre_pin(ce, ww);
228 if (err)
229 return err;
230
231 err = i915_active_acquire(&ce->active);
232 if (err)
233 goto err_ctx_unpin;
234
235 err = ce->ops->pre_pin(ce, ww, &vaddr);
236 if (err)
237 goto err_release;
238
239 err = mutex_lock_interruptible(&ce->pin_mutex);
240 if (err)
241 goto err_post_unpin;
242
243 if (unlikely(intel_context_is_closed(ce))) {
244 err = -ENOENT;
245 goto err_unlock;
246 }
247
248 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
249 err = intel_context_active_acquire(ce);
250 if (unlikely(err))
251 goto err_unlock;
252
253 err = ce->ops->pin(ce, vaddr);
254 if (err) {
255 intel_context_active_release(ce);
256 goto err_unlock;
257 }
258
259 CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
260 i915_ggtt_offset(ce->ring->vma),
261 ce->ring->head, ce->ring->tail);
262
263 handoff = true;
264 smp_mb__before_atomic(); /* flush pin before it is visible */
265 atomic_inc(&ce->pin_count);
266 }
267
268 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
269
270 trace_intel_context_do_pin(ce);
271
272 err_unlock:
273 mutex_unlock(&ce->pin_mutex);
274 err_post_unpin:
275 if (!handoff)
276 ce->ops->post_unpin(ce);
277 err_release:
278 i915_active_release(&ce->active);
279 err_ctx_unpin:
280 intel_context_post_unpin(ce);
281
282 /*
283 * Unlock the hwsp_ggtt object since it's shared.
284 * In principle we can unlock all the global state locked above
285 * since it's pinned and doesn't need fencing, and will
286 * thus remain resident until it is explicitly unpinned.
287 */
288 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
289
290 return err;
291 }
292
__intel_context_do_pin(struct intel_context * ce)293 int __intel_context_do_pin(struct intel_context *ce)
294 {
295 struct i915_gem_ww_ctx ww;
296 int err;
297
298 i915_gem_ww_ctx_init(&ww, true);
299 retry:
300 err = __intel_context_do_pin_ww(ce, &ww);
301 if (err == -EDEADLK) {
302 err = i915_gem_ww_ctx_backoff(&ww);
303 if (!err)
304 goto retry;
305 }
306 i915_gem_ww_ctx_fini(&ww);
307 return err;
308 }
309
__intel_context_do_unpin(struct intel_context * ce,int sub)310 void __intel_context_do_unpin(struct intel_context *ce, int sub)
311 {
312 if (!atomic_sub_and_test(sub, &ce->pin_count))
313 return;
314
315 CE_TRACE(ce, "unpin\n");
316 ce->ops->unpin(ce);
317 ce->ops->post_unpin(ce);
318
319 /*
320 * Once released, we may asynchronously drop the active reference.
321 * As that may be the only reference keeping the context alive,
322 * take an extra now so that it is not freed before we finish
323 * dereferencing it.
324 */
325 intel_context_get(ce);
326 intel_context_active_release(ce);
327 trace_intel_context_do_unpin(ce);
328 intel_context_put(ce);
329 }
330
__intel_context_retire(struct i915_active * active)331 static void __intel_context_retire(struct i915_active *active)
332 {
333 struct intel_context *ce = container_of(active, typeof(*ce), active);
334
335 CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
336 intel_context_get_total_runtime_ns(ce),
337 intel_context_get_avg_runtime_ns(ce));
338
339 set_bit(CONTEXT_VALID_BIT, &ce->flags);
340 intel_context_post_unpin(ce);
341 intel_context_put(ce);
342 }
343
__intel_context_active(struct i915_active * active)344 static int __intel_context_active(struct i915_active *active)
345 {
346 struct intel_context *ce = container_of(active, typeof(*ce), active);
347
348 intel_context_get(ce);
349
350 /* everything should already be activated by intel_context_pre_pin() */
351 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
352 __intel_ring_pin(ce->ring);
353
354 __intel_timeline_pin(ce->timeline);
355
356 if (ce->state) {
357 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
358 __i915_vma_pin(ce->state);
359 i915_vma_make_unshrinkable(ce->state);
360 }
361
362 return 0;
363 }
364
365 static int __i915_sw_fence_call
sw_fence_dummy_notify(struct i915_sw_fence * sf,enum i915_sw_fence_notify state)366 sw_fence_dummy_notify(struct i915_sw_fence *sf,
367 enum i915_sw_fence_notify state)
368 {
369 return NOTIFY_DONE;
370 }
371
372 void
intel_context_init(struct intel_context * ce,struct intel_engine_cs * engine)373 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
374 {
375 GEM_BUG_ON(!engine->cops);
376 GEM_BUG_ON(!engine->gt->vm);
377
378 kref_init(&ce->ref);
379
380 ce->engine = engine;
381 ce->ops = engine->cops;
382 ce->sseu = engine->sseu;
383 ce->ring = NULL;
384 ce->ring_size = SZ_4K;
385
386 ewma_runtime_init(&ce->runtime.avg);
387
388 ce->vm = i915_vm_get(engine->gt->vm);
389
390 /* NB ce->signal_link/lock is used under RCU */
391 spin_lock_init(&ce->signal_lock);
392 INIT_LIST_HEAD(&ce->signals);
393
394 mutex_init(&ce->pin_mutex);
395
396 spin_lock_init(&ce->guc_state.lock);
397 INIT_LIST_HEAD(&ce->guc_state.fences);
398
399 spin_lock_init(&ce->guc_active.lock);
400 INIT_LIST_HEAD(&ce->guc_active.requests);
401
402 ce->guc_id = GUC_INVALID_LRC_ID;
403 INIT_LIST_HEAD(&ce->guc_id_link);
404
405 /*
406 * Initialize fence to be complete as this is expected to be complete
407 * unless there is a pending schedule disable outstanding.
408 */
409 i915_sw_fence_init(&ce->guc_blocked, sw_fence_dummy_notify);
410 i915_sw_fence_commit(&ce->guc_blocked);
411
412 i915_active_init(&ce->active,
413 __intel_context_active, __intel_context_retire, 0);
414 }
415
intel_context_fini(struct intel_context * ce)416 void intel_context_fini(struct intel_context *ce)
417 {
418 if (ce->timeline)
419 intel_timeline_put(ce->timeline);
420 i915_vm_put(ce->vm);
421
422 mutex_destroy(&ce->pin_mutex);
423 i915_active_fini(&ce->active);
424 i915_sw_fence_fini(&ce->guc_blocked);
425 }
426
i915_context_module_exit(void)427 void i915_context_module_exit(void)
428 {
429 kmem_cache_destroy(slab_ce);
430 }
431
i915_context_module_init(void)432 int __init i915_context_module_init(void)
433 {
434 slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
435 if (!slab_ce)
436 return -ENOMEM;
437
438 return 0;
439 }
440
intel_context_enter_engine(struct intel_context * ce)441 void intel_context_enter_engine(struct intel_context *ce)
442 {
443 intel_engine_pm_get(ce->engine);
444 intel_timeline_enter(ce->timeline);
445 }
446
intel_context_exit_engine(struct intel_context * ce)447 void intel_context_exit_engine(struct intel_context *ce)
448 {
449 intel_timeline_exit(ce->timeline);
450 intel_engine_pm_put(ce->engine);
451 }
452
intel_context_prepare_remote_request(struct intel_context * ce,struct i915_request * rq)453 int intel_context_prepare_remote_request(struct intel_context *ce,
454 struct i915_request *rq)
455 {
456 struct intel_timeline *tl = ce->timeline;
457 int err;
458
459 /* Only suitable for use in remotely modifying this context */
460 GEM_BUG_ON(rq->context == ce);
461
462 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
463 /* Queue this switch after current activity by this context. */
464 err = i915_active_fence_set(&tl->last_request, rq);
465 if (err)
466 return err;
467 }
468
469 /*
470 * Guarantee context image and the timeline remains pinned until the
471 * modifying request is retired by setting the ce activity tracker.
472 *
473 * But we only need to take one pin on the account of it. Or in other
474 * words transfer the pinned ce object to tracked active request.
475 */
476 GEM_BUG_ON(i915_active_is_idle(&ce->active));
477 return i915_active_add_request(&ce->active, rq);
478 }
479
intel_context_create_request(struct intel_context * ce)480 struct i915_request *intel_context_create_request(struct intel_context *ce)
481 {
482 struct i915_gem_ww_ctx ww;
483 struct i915_request *rq;
484 int err;
485
486 i915_gem_ww_ctx_init(&ww, true);
487 retry:
488 err = intel_context_pin_ww(ce, &ww);
489 if (!err) {
490 rq = i915_request_create(ce);
491 intel_context_unpin(ce);
492 } else if (err == -EDEADLK) {
493 err = i915_gem_ww_ctx_backoff(&ww);
494 if (!err)
495 goto retry;
496 rq = ERR_PTR(err);
497 } else {
498 rq = ERR_PTR(err);
499 }
500
501 i915_gem_ww_ctx_fini(&ww);
502
503 if (IS_ERR(rq))
504 return rq;
505
506 /*
507 * timeline->mutex should be the inner lock, but is used as outer lock.
508 * Hack around this to shut up lockdep in selftests..
509 */
510 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
511 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
512 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
513 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
514
515 return rq;
516 }
517
intel_context_find_active_request(struct intel_context * ce)518 struct i915_request *intel_context_find_active_request(struct intel_context *ce)
519 {
520 struct i915_request *rq, *active = NULL;
521 unsigned long flags;
522
523 GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
524
525 spin_lock_irqsave(&ce->guc_active.lock, flags);
526 list_for_each_entry_reverse(rq, &ce->guc_active.requests,
527 sched.link) {
528 if (i915_request_completed(rq))
529 break;
530
531 active = rq;
532 }
533 spin_unlock_irqrestore(&ce->guc_active.lock, flags);
534
535 return active;
536 }
537
538 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
539 #include "selftest_context.c"
540 #endif
541