Lines Matching refs:ce
26 struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); in rcu_context_free() local
28 trace_intel_context_free(ce); in rcu_context_free()
29 kmem_cache_free(slab_ce, ce); in rcu_context_free()
32 void intel_context_free(struct intel_context *ce) in intel_context_free() argument
34 call_rcu(&ce->rcu, rcu_context_free); in intel_context_free()
40 struct intel_context *ce; in intel_context_create() local
42 ce = intel_context_alloc(); in intel_context_create()
43 if (!ce) in intel_context_create()
46 intel_context_init(ce, engine); in intel_context_create()
47 trace_intel_context_create(ce); in intel_context_create()
48 return ce; in intel_context_create()
51 int intel_context_alloc_state(struct intel_context *ce) in intel_context_alloc_state() argument
55 if (mutex_lock_interruptible(&ce->pin_mutex)) in intel_context_alloc_state()
58 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { in intel_context_alloc_state()
59 if (intel_context_is_banned(ce)) { in intel_context_alloc_state()
64 err = ce->ops->alloc(ce); in intel_context_alloc_state()
68 set_bit(CONTEXT_ALLOC_BIT, &ce->flags); in intel_context_alloc_state()
72 mutex_unlock(&ce->pin_mutex); in intel_context_alloc_state()
76 static int intel_context_active_acquire(struct intel_context *ce) in intel_context_active_acquire() argument
80 __i915_active_acquire(&ce->active); in intel_context_active_acquire()
82 if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine)) in intel_context_active_acquire()
86 err = i915_active_acquire_preallocate_barrier(&ce->active, in intel_context_active_acquire()
87 ce->engine); in intel_context_active_acquire()
89 i915_active_release(&ce->active); in intel_context_active_acquire()
94 static void intel_context_active_release(struct intel_context *ce) in intel_context_active_release() argument
97 i915_active_acquire_barrier(&ce->active); in intel_context_active_release()
98 i915_active_release(&ce->active); in intel_context_active_release()
161 static int intel_context_pre_pin(struct intel_context *ce, in intel_context_pre_pin() argument
166 CE_TRACE(ce, "active\n"); in intel_context_pre_pin()
168 err = __ring_active(ce->ring, ww); in intel_context_pre_pin()
172 err = intel_timeline_pin(ce->timeline, ww); in intel_context_pre_pin()
176 if (!ce->state) in intel_context_pre_pin()
179 err = __context_pin_state(ce->state, ww); in intel_context_pre_pin()
187 intel_timeline_unpin(ce->timeline); in intel_context_pre_pin()
189 __ring_retire(ce->ring); in intel_context_pre_pin()
193 static void intel_context_post_unpin(struct intel_context *ce) in intel_context_post_unpin() argument
195 if (ce->state) in intel_context_post_unpin()
196 __context_unpin_state(ce->state); in intel_context_post_unpin()
198 intel_timeline_unpin(ce->timeline); in intel_context_post_unpin()
199 __ring_retire(ce->ring); in intel_context_post_unpin()
202 int __intel_context_do_pin_ww(struct intel_context *ce, in __intel_context_do_pin_ww() argument
209 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { in __intel_context_do_pin_ww()
210 err = intel_context_alloc_state(ce); in __intel_context_do_pin_ww()
221 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); in __intel_context_do_pin_ww()
222 if (!err && ce->ring->vma->obj) in __intel_context_do_pin_ww()
223 err = i915_gem_object_lock(ce->ring->vma->obj, ww); in __intel_context_do_pin_ww()
224 if (!err && ce->state) in __intel_context_do_pin_ww()
225 err = i915_gem_object_lock(ce->state->obj, ww); in __intel_context_do_pin_ww()
227 err = intel_context_pre_pin(ce, ww); in __intel_context_do_pin_ww()
231 err = i915_active_acquire(&ce->active); in __intel_context_do_pin_ww()
235 err = ce->ops->pre_pin(ce, ww, &vaddr); in __intel_context_do_pin_ww()
239 err = mutex_lock_interruptible(&ce->pin_mutex); in __intel_context_do_pin_ww()
243 if (unlikely(intel_context_is_closed(ce))) { in __intel_context_do_pin_ww()
248 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { in __intel_context_do_pin_ww()
249 err = intel_context_active_acquire(ce); in __intel_context_do_pin_ww()
253 err = ce->ops->pin(ce, vaddr); in __intel_context_do_pin_ww()
255 intel_context_active_release(ce); in __intel_context_do_pin_ww()
259 CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n", in __intel_context_do_pin_ww()
260 i915_ggtt_offset(ce->ring->vma), in __intel_context_do_pin_ww()
261 ce->ring->head, ce->ring->tail); in __intel_context_do_pin_ww()
265 atomic_inc(&ce->pin_count); in __intel_context_do_pin_ww()
268 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ in __intel_context_do_pin_ww()
270 trace_intel_context_do_pin(ce); in __intel_context_do_pin_ww()
273 mutex_unlock(&ce->pin_mutex); in __intel_context_do_pin_ww()
276 ce->ops->post_unpin(ce); in __intel_context_do_pin_ww()
278 i915_active_release(&ce->active); in __intel_context_do_pin_ww()
280 intel_context_post_unpin(ce); in __intel_context_do_pin_ww()
288 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj); in __intel_context_do_pin_ww()
293 int __intel_context_do_pin(struct intel_context *ce) in __intel_context_do_pin() argument
300 err = __intel_context_do_pin_ww(ce, &ww); in __intel_context_do_pin()
310 void __intel_context_do_unpin(struct intel_context *ce, int sub) in __intel_context_do_unpin() argument
312 if (!atomic_sub_and_test(sub, &ce->pin_count)) in __intel_context_do_unpin()
315 CE_TRACE(ce, "unpin\n"); in __intel_context_do_unpin()
316 ce->ops->unpin(ce); in __intel_context_do_unpin()
317 ce->ops->post_unpin(ce); in __intel_context_do_unpin()
325 intel_context_get(ce); in __intel_context_do_unpin()
326 intel_context_active_release(ce); in __intel_context_do_unpin()
327 trace_intel_context_do_unpin(ce); in __intel_context_do_unpin()
328 intel_context_put(ce); in __intel_context_do_unpin()
333 struct intel_context *ce = container_of(active, typeof(*ce), active); in __intel_context_retire() local
335 CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n", in __intel_context_retire()
336 intel_context_get_total_runtime_ns(ce), in __intel_context_retire()
337 intel_context_get_avg_runtime_ns(ce)); in __intel_context_retire()
339 set_bit(CONTEXT_VALID_BIT, &ce->flags); in __intel_context_retire()
340 intel_context_post_unpin(ce); in __intel_context_retire()
341 intel_context_put(ce); in __intel_context_retire()
346 struct intel_context *ce = container_of(active, typeof(*ce), active); in __intel_context_active() local
348 intel_context_get(ce); in __intel_context_active()
351 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active)); in __intel_context_active()
352 __intel_ring_pin(ce->ring); in __intel_context_active()
354 __intel_timeline_pin(ce->timeline); in __intel_context_active()
356 if (ce->state) { in __intel_context_active()
357 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active)); in __intel_context_active()
358 __i915_vma_pin(ce->state); in __intel_context_active()
359 i915_vma_make_unshrinkable(ce->state); in __intel_context_active()
373 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) in intel_context_init() argument
378 kref_init(&ce->ref); in intel_context_init()
380 ce->engine = engine; in intel_context_init()
381 ce->ops = engine->cops; in intel_context_init()
382 ce->sseu = engine->sseu; in intel_context_init()
383 ce->ring = NULL; in intel_context_init()
384 ce->ring_size = SZ_4K; in intel_context_init()
386 ewma_runtime_init(&ce->runtime.avg); in intel_context_init()
388 ce->vm = i915_vm_get(engine->gt->vm); in intel_context_init()
391 spin_lock_init(&ce->signal_lock); in intel_context_init()
392 INIT_LIST_HEAD(&ce->signals); in intel_context_init()
394 mutex_init(&ce->pin_mutex); in intel_context_init()
396 spin_lock_init(&ce->guc_state.lock); in intel_context_init()
397 INIT_LIST_HEAD(&ce->guc_state.fences); in intel_context_init()
399 spin_lock_init(&ce->guc_active.lock); in intel_context_init()
400 INIT_LIST_HEAD(&ce->guc_active.requests); in intel_context_init()
402 ce->guc_id = GUC_INVALID_LRC_ID; in intel_context_init()
403 INIT_LIST_HEAD(&ce->guc_id_link); in intel_context_init()
409 i915_sw_fence_init(&ce->guc_blocked, sw_fence_dummy_notify); in intel_context_init()
410 i915_sw_fence_commit(&ce->guc_blocked); in intel_context_init()
412 i915_active_init(&ce->active, in intel_context_init()
416 void intel_context_fini(struct intel_context *ce) in intel_context_fini() argument
418 if (ce->timeline) in intel_context_fini()
419 intel_timeline_put(ce->timeline); in intel_context_fini()
420 i915_vm_put(ce->vm); in intel_context_fini()
422 mutex_destroy(&ce->pin_mutex); in intel_context_fini()
423 i915_active_fini(&ce->active); in intel_context_fini()
424 i915_sw_fence_fini(&ce->guc_blocked); in intel_context_fini()
441 void intel_context_enter_engine(struct intel_context *ce) in intel_context_enter_engine() argument
443 intel_engine_pm_get(ce->engine); in intel_context_enter_engine()
444 intel_timeline_enter(ce->timeline); in intel_context_enter_engine()
447 void intel_context_exit_engine(struct intel_context *ce) in intel_context_exit_engine() argument
449 intel_timeline_exit(ce->timeline); in intel_context_exit_engine()
450 intel_engine_pm_put(ce->engine); in intel_context_exit_engine()
453 int intel_context_prepare_remote_request(struct intel_context *ce, in intel_context_prepare_remote_request() argument
456 struct intel_timeline *tl = ce->timeline; in intel_context_prepare_remote_request()
460 GEM_BUG_ON(rq->context == ce); in intel_context_prepare_remote_request()
476 GEM_BUG_ON(i915_active_is_idle(&ce->active)); in intel_context_prepare_remote_request()
477 return i915_active_add_request(&ce->active, rq); in intel_context_prepare_remote_request()
480 struct i915_request *intel_context_create_request(struct intel_context *ce) in intel_context_create_request() argument
488 err = intel_context_pin_ww(ce, &ww); in intel_context_create_request()
490 rq = i915_request_create(ce); in intel_context_create_request()
491 intel_context_unpin(ce); in intel_context_create_request()
510 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie); in intel_context_create_request()
511 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_); in intel_context_create_request()
512 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); in intel_context_create_request()
513 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex); in intel_context_create_request()
518 struct i915_request *intel_context_find_active_request(struct intel_context *ce) in intel_context_find_active_request() argument
523 GEM_BUG_ON(!intel_engine_uses_guc(ce->engine)); in intel_context_find_active_request()
525 spin_lock_irqsave(&ce->guc_active.lock, flags); in intel_context_find_active_request()
526 list_for_each_entry_reverse(rq, &ce->guc_active.requests, in intel_context_find_active_request()
533 spin_unlock_irqrestore(&ce->guc_active.lock, flags); in intel_context_find_active_request()