• Home
  • Raw
  • Download

Lines Matching refs:rq

111 	struct i915_request *rq = to_request(fence);  in i915_fence_release()  local
113 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release()
114 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release()
123 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
124 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
131 if (!intel_engine_is_virtual(rq->engine) && in i915_fence_release()
132 !cmpxchg(&rq->engine->request_pool, NULL, rq)) { in i915_fence_release()
133 intel_context_put(rq->context); in i915_fence_release()
137 intel_context_put(rq->context); in i915_fence_release()
139 kmem_cache_free(slab_requests, rq); in i915_fence_release()
160 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) in __notify_execute_cb() argument
164 if (llist_empty(&rq->execute_cb)) in __notify_execute_cb()
168 llist_del_all(&rq->execute_cb), in __notify_execute_cb()
173 static void __notify_execute_cb_irq(struct i915_request *rq) in __notify_execute_cb_irq() argument
175 __notify_execute_cb(rq, irq_work_queue); in __notify_execute_cb_irq()
184 void i915_request_notify_execute_cb_imm(struct i915_request *rq) in i915_request_notify_execute_cb_imm() argument
186 __notify_execute_cb(rq, irq_work_imm); in i915_request_notify_execute_cb_imm()
202 static void __i915_request_fill(struct i915_request *rq, u8 val) in __i915_request_fill() argument
204 void *vaddr = rq->ring->vaddr; in __i915_request_fill()
207 head = rq->infix; in __i915_request_fill()
208 if (rq->postfix < head) { in __i915_request_fill()
209 memset(vaddr + head, val, rq->ring->size - head); in __i915_request_fill()
212 memset(vaddr + head, val, rq->postfix - head); in __i915_request_fill()
226 i915_request_active_engine(struct i915_request *rq, in i915_request_active_engine() argument
239 locked = READ_ONCE(rq->engine); in i915_request_active_engine()
241 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { in i915_request_active_engine()
247 if (i915_request_is_active(rq)) { in i915_request_active_engine()
248 if (!__i915_request_is_complete(rq)) in i915_request_active_engine()
258 static void __rq_init_watchdog(struct i915_request *rq) in __rq_init_watchdog() argument
260 rq->watchdog.timer.function = NULL; in __rq_init_watchdog()
265 struct i915_request *rq = in __rq_watchdog_expired() local
267 struct intel_gt *gt = rq->engine->gt; in __rq_watchdog_expired()
269 if (!i915_request_completed(rq)) { in __rq_watchdog_expired()
270 if (llist_add(&rq->watchdog.link, &gt->watchdog.list)) in __rq_watchdog_expired()
273 i915_request_put(rq); in __rq_watchdog_expired()
279 static void __rq_arm_watchdog(struct i915_request *rq) in __rq_arm_watchdog() argument
281 struct i915_request_watchdog *wdg = &rq->watchdog; in __rq_arm_watchdog()
282 struct intel_context *ce = rq->context; in __rq_arm_watchdog()
287 i915_request_get(rq); in __rq_arm_watchdog()
298 static void __rq_cancel_watchdog(struct i915_request *rq) in __rq_cancel_watchdog() argument
300 struct i915_request_watchdog *wdg = &rq->watchdog; in __rq_cancel_watchdog()
303 i915_request_put(rq); in __rq_cancel_watchdog()
306 bool i915_request_retire(struct i915_request *rq) in i915_request_retire() argument
308 if (!__i915_request_is_complete(rq)) in i915_request_retire()
311 RQ_TRACE(rq, "\n"); in i915_request_retire()
313 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); in i915_request_retire()
314 trace_i915_request_retire(rq); in i915_request_retire()
315 i915_request_mark_complete(rq); in i915_request_retire()
317 __rq_cancel_watchdog(rq); in i915_request_retire()
328 GEM_BUG_ON(!list_is_first(&rq->link, in i915_request_retire()
329 &i915_request_timeline(rq)->requests)); in i915_request_retire()
332 __i915_request_fill(rq, POISON_FREE); in i915_request_retire()
333 rq->ring->head = rq->postfix; in i915_request_retire()
335 if (!i915_request_signaled(rq)) { in i915_request_retire()
336 spin_lock_irq(&rq->lock); in i915_request_retire()
337 dma_fence_signal_locked(&rq->fence); in i915_request_retire()
338 spin_unlock_irq(&rq->lock); in i915_request_retire()
341 if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) in i915_request_retire()
342 atomic_dec(&rq->engine->gt->rps.num_waiters); in i915_request_retire()
354 rq->engine->remove_active_request(rq); in i915_request_retire()
355 GEM_BUG_ON(!llist_empty(&rq->execute_cb)); in i915_request_retire()
357 __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */ in i915_request_retire()
359 intel_context_exit(rq->context); in i915_request_retire()
360 intel_context_unpin(rq->context); in i915_request_retire()
362 free_capture_list(rq); in i915_request_retire()
363 i915_sched_node_fini(&rq->sched); in i915_request_retire()
364 i915_request_put(rq); in i915_request_retire()
369 void i915_request_retire_upto(struct i915_request *rq) in i915_request_retire_upto() argument
371 struct intel_timeline * const tl = i915_request_timeline(rq); in i915_request_retire_upto()
374 RQ_TRACE(rq, "\n"); in i915_request_retire_upto()
375 GEM_BUG_ON(!__i915_request_is_complete(rq)); in i915_request_retire_upto()
380 } while (i915_request_retire(tmp) && tmp != rq); in i915_request_retire_upto()
391 struct i915_request * const *port, *rq; in __request_in_flight() local
438 (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */ in __request_in_flight()
440 if (rq->context == signal->context) { in __request_in_flight()
441 inflight = i915_seqno_passed(rq->fence.seqno, in __request_in_flight()
452 __await_execution(struct i915_request *rq, in __await_execution() argument
465 cb->fence = &rq->submit; in __await_execution()
503 void __i915_request_skip(struct i915_request *rq) in __i915_request_skip() argument
505 GEM_BUG_ON(!fatal_error(rq->fence.error)); in __i915_request_skip()
507 if (rq->infix == rq->postfix) in __i915_request_skip()
510 RQ_TRACE(rq, "error: %d\n", rq->fence.error); in __i915_request_skip()
517 __i915_request_fill(rq, 0); in __i915_request_skip()
518 rq->infix = rq->postfix; in __i915_request_skip()
521 bool i915_request_set_error_once(struct i915_request *rq, int error) in i915_request_set_error_once() argument
527 if (i915_request_signaled(rq)) in i915_request_set_error_once()
530 old = READ_ONCE(rq->fence.error); in i915_request_set_error_once()
534 } while (!try_cmpxchg(&rq->fence.error, &old, error)); in i915_request_set_error_once()
539 struct i915_request *i915_request_mark_eio(struct i915_request *rq) in i915_request_mark_eio() argument
541 if (__i915_request_is_complete(rq)) in i915_request_mark_eio()
544 GEM_BUG_ON(i915_request_signaled(rq)); in i915_request_mark_eio()
547 rq = i915_request_get(rq); in i915_request_mark_eio()
549 i915_request_set_error_once(rq, -EIO); in i915_request_mark_eio()
550 i915_request_mark_complete(rq); in i915_request_mark_eio()
552 return rq; in i915_request_mark_eio()
712 void i915_request_cancel(struct i915_request *rq, int error) in i915_request_cancel() argument
714 if (!i915_request_set_error_once(rq, error)) in i915_request_cancel()
717 set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); in i915_request_cancel()
719 intel_context_cancel_request(rq->context, rq); in i915_request_cancel()
761 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); in semaphore_notify() local
768 i915_request_put(rq); in semaphore_notify()
777 struct i915_request *rq, *rn; in retire_requests() local
779 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests()
780 if (!i915_request_retire(rq)) in retire_requests()
789 struct i915_request *rq; in request_alloc_slow() local
793 rq = xchg(rsvd, NULL); in request_alloc_slow()
794 if (!rq) /* Use the normal failure path for one final WARN */ in request_alloc_slow()
797 return rq; in request_alloc_slow()
804 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow()
805 i915_request_retire(rq); in request_alloc_slow()
807 rq = kmem_cache_alloc(slab_requests, in request_alloc_slow()
809 if (rq) in request_alloc_slow()
810 return rq; in request_alloc_slow()
813 rq = list_last_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow()
814 cond_synchronize_rcu(rq->rcustate); in request_alloc_slow()
825 struct i915_request *rq = arg; in __i915_request_ctor() local
827 spin_lock_init(&rq->lock); in __i915_request_ctor()
828 i915_sched_node_init(&rq->sched); in __i915_request_ctor()
829 i915_sw_fence_init(&rq->submit, submit_notify); in __i915_request_ctor()
830 i915_sw_fence_init(&rq->semaphore, semaphore_notify); in __i915_request_ctor()
832 rq->capture_list = NULL; in __i915_request_ctor()
834 init_llist_head(&rq->execute_cb); in __i915_request_ctor()
841 struct i915_request *rq; in __i915_request_create() local
879 rq = kmem_cache_alloc(slab_requests, in __i915_request_create()
881 if (unlikely(!rq)) { in __i915_request_create()
882 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp); in __i915_request_create()
883 if (!rq) { in __i915_request_create()
901 rq->context = intel_context_get(ce); in __i915_request_create()
902 rq->engine = ce->engine; in __i915_request_create()
903 rq->ring = ce->ring; in __i915_request_create()
904 rq->execution_mask = ce->engine->mask; in __i915_request_create()
906 ret = intel_timeline_get_seqno(tl, rq, &seqno); in __i915_request_create()
910 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, in __i915_request_create()
913 RCU_INIT_POINTER(rq->timeline, tl); in __i915_request_create()
914 rq->hwsp_seqno = tl->hwsp_seqno; in __i915_request_create()
915 GEM_BUG_ON(__i915_request_is_complete(rq)); in __i915_request_create()
917 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ in __i915_request_create()
919 rq->guc_prio = GUC_PRIO_INIT; in __i915_request_create()
922 i915_sw_fence_reinit(&i915_request_get(rq)->submit); in __i915_request_create()
923 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore); in __i915_request_create()
925 i915_sched_node_reinit(&rq->sched); in __i915_request_create()
928 rq->batch = NULL; in __i915_request_create()
929 __rq_init_watchdog(rq); in __i915_request_create()
930 GEM_BUG_ON(rq->capture_list); in __i915_request_create()
931 GEM_BUG_ON(!llist_empty(&rq->execute_cb)); in __i915_request_create()
945 rq->reserved_space = in __i915_request_create()
946 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32); in __i915_request_create()
954 rq->head = rq->ring->emit; in __i915_request_create()
956 ret = rq->engine->request_alloc(rq); in __i915_request_create()
960 rq->infix = rq->ring->emit; /* end of header; start of user payload */ in __i915_request_create()
963 list_add_tail_rcu(&rq->link, &tl->requests); in __i915_request_create()
965 return rq; in __i915_request_create()
968 ce->ring->emit = rq->head; in __i915_request_create()
971 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); in __i915_request_create()
972 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); in __i915_request_create()
976 kmem_cache_free(slab_requests, rq); in __i915_request_create()
985 struct i915_request *rq; in i915_request_create() local
993 rq = list_first_entry(&tl->requests, typeof(*rq), link); in i915_request_create()
994 if (!list_is_last(&rq->link, &tl->requests)) in i915_request_create()
995 i915_request_retire(rq); in i915_request_create()
998 rq = __i915_request_create(ce, GFP_KERNEL); in i915_request_create()
1000 if (IS_ERR(rq)) in i915_request_create()
1004 rq->cookie = lockdep_pin_lock(&tl->mutex); in i915_request_create()
1006 return rq; in i915_request_create()
1010 return rq; in i915_request_create()
1014 i915_request_await_start(struct i915_request *rq, struct i915_request *signal) in i915_request_await_start() argument
1019 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline)) in i915_request_await_start()
1070 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) in i915_request_await_start()
1071 err = i915_sw_fence_await_dma_fence(&rq->submit, in i915_request_await_start()
1080 already_busywaiting(struct i915_request *rq) in already_busywaiting() argument
1094 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated); in already_busywaiting()
1279 static void mark_external(struct i915_request *rq) in mark_external() argument
1289 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN; in mark_external()
1293 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) in __i915_request_await_external() argument
1295 mark_external(rq); in __i915_request_await_external()
1296 return i915_sw_fence_await_dma_fence(&rq->submit, fence, in __i915_request_await_external()
1297 i915_fence_context_timeout(rq->engine->i915, in __i915_request_await_external()
1303 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) in i915_request_await_external() argument
1309 return __i915_request_await_external(rq, fence); in i915_request_await_external()
1315 err = __i915_request_await_external(rq, iter); in i915_request_await_external()
1319 err = i915_request_await_dma_fence(rq, chain->fence); in i915_request_await_external()
1329 i915_request_await_execution(struct i915_request *rq, in i915_request_await_execution() argument
1351 if (fence->context == rq->fence.context) in i915_request_await_execution()
1360 ret = __i915_request_await_execution(rq, in i915_request_await_execution()
1363 ret = i915_request_await_external(rq, fence); in i915_request_await_execution()
1423 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) in i915_request_await_dma_fence() argument
1455 if (fence->context == rq->fence.context) in i915_request_await_dma_fence()
1460 intel_timeline_sync_is_later(i915_request_timeline(rq), in i915_request_await_dma_fence()
1465 ret = i915_request_await_request(rq, to_request(fence)); in i915_request_await_dma_fence()
1467 ret = i915_request_await_external(rq, fence); in i915_request_await_dma_fence()
1473 intel_timeline_sync_set(i915_request_timeline(rq), in i915_request_await_dma_fence()
1543 __i915_request_add_to_timeline(struct i915_request *rq) in __i915_request_add_to_timeline() argument
1545 struct intel_timeline *timeline = i915_request_timeline(rq); in __i915_request_add_to_timeline()
1569 &rq->fence)); in __i915_request_add_to_timeline()
1571 bool uses_guc = intel_engine_uses_guc(rq->engine); in __i915_request_add_to_timeline()
1579 GEM_BUG_ON(prev->context == rq->context && in __i915_request_add_to_timeline()
1581 rq->fence.seqno)); in __i915_request_add_to_timeline()
1584 is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) || in __i915_request_add_to_timeline()
1585 (uses_guc && prev->context == rq->context)) in __i915_request_add_to_timeline()
1586 i915_sw_fence_await_sw_fence(&rq->submit, in __i915_request_add_to_timeline()
1588 &rq->submitq); in __i915_request_add_to_timeline()
1590 __i915_sw_fence_await_dma_fence(&rq->submit, in __i915_request_add_to_timeline()
1592 &rq->dmaq); in __i915_request_add_to_timeline()
1593 if (rq->engine->sched_engine->schedule) in __i915_request_add_to_timeline()
1594 __i915_sched_node_add_dependency(&rq->sched, in __i915_request_add_to_timeline()
1596 &rq->dep, in __i915_request_add_to_timeline()
1607 GEM_BUG_ON(timeline->seqno != rq->fence.seqno); in __i915_request_add_to_timeline()
1617 struct i915_request *__i915_request_commit(struct i915_request *rq) in __i915_request_commit() argument
1619 struct intel_engine_cs *engine = rq->engine; in __i915_request_commit()
1620 struct intel_ring *ring = rq->ring; in __i915_request_commit()
1623 RQ_TRACE(rq, "\n"); in __i915_request_commit()
1630 GEM_BUG_ON(rq->reserved_space > ring->space); in __i915_request_commit()
1631 rq->reserved_space = 0; in __i915_request_commit()
1632 rq->emitted_jiffies = jiffies; in __i915_request_commit()
1640 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw); in __i915_request_commit()
1642 rq->postfix = intel_ring_offset(rq, cs); in __i915_request_commit()
1644 return __i915_request_add_to_timeline(rq); in __i915_request_commit()
1647 void __i915_request_queue_bh(struct i915_request *rq) in __i915_request_queue_bh() argument
1649 i915_sw_fence_commit(&rq->semaphore); in __i915_request_queue_bh()
1650 i915_sw_fence_commit(&rq->submit); in __i915_request_queue_bh()
1653 void __i915_request_queue(struct i915_request *rq, in __i915_request_queue() argument
1667 if (attr && rq->engine->sched_engine->schedule) in __i915_request_queue()
1668 rq->engine->sched_engine->schedule(rq, attr); in __i915_request_queue()
1671 __i915_request_queue_bh(rq); in __i915_request_queue()
1675 void i915_request_add(struct i915_request *rq) in i915_request_add() argument
1677 struct intel_timeline * const tl = i915_request_timeline(rq); in i915_request_add()
1682 lockdep_unpin_lock(&tl->mutex, rq->cookie); in i915_request_add()
1684 trace_i915_request_add(rq); in i915_request_add()
1685 __i915_request_commit(rq); in i915_request_add()
1689 ctx = rcu_dereference(rq->context->gem_context); in i915_request_add()
1694 __i915_request_queue(rq, &attr); in i915_request_add()
1732 static bool __i915_spin_request(struct i915_request * const rq, int state) in __i915_spin_request() argument
1748 if (!i915_request_is_running(rq)) in __i915_spin_request()
1762 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns); in __i915_spin_request()
1765 if (dma_fence_is_signaled(&rq->fence)) in __i915_spin_request()
1807 long i915_request_wait(struct i915_request *rq, in i915_request_wait() argument
1818 if (dma_fence_is_signaled(&rq->fence)) in i915_request_wait()
1824 trace_i915_request_wait_begin(rq, flags); in i915_request_wait()
1832 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_); in i915_request_wait()
1858 __i915_spin_request(rq, state)) in i915_request_wait()
1873 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq)) in i915_request_wait()
1874 intel_rps_boost(rq); in i915_request_wait()
1877 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) in i915_request_wait()
1895 if (i915_request_is_ready(rq)) in i915_request_wait()
1896 __intel_engine_flush_submission(rq->engine, false); in i915_request_wait()
1901 if (dma_fence_is_signaled(&rq->fence)) in i915_request_wait()
1919 dma_fence_remove_callback(&rq->fence, &wait.cb); in i915_request_wait()
1923 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); in i915_request_wait()
1924 trace_i915_request_wait_end(rq); in i915_request_wait()
1940 static char queue_status(const struct i915_request *rq) in queue_status() argument
1942 if (i915_request_is_active(rq)) in queue_status()
1945 if (i915_request_is_ready(rq)) in queue_status()
1946 return intel_engine_is_virtual(rq->engine) ? 'V' : 'R'; in queue_status()
1951 static const char *run_status(const struct i915_request *rq) in run_status() argument
1953 if (__i915_request_is_complete(rq)) in run_status()
1956 if (__i915_request_has_started(rq)) in run_status()
1959 if (!i915_sw_fence_signaled(&rq->semaphore)) in run_status()
1965 static const char *fence_status(const struct i915_request *rq) in fence_status() argument
1967 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) in fence_status()
1970 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) in fence_status()
1977 const struct i915_request *rq, in i915_request_show() argument
1981 const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence); in i915_request_show()
2015 x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf)); in i915_request_show()
2019 queue_status(rq), in i915_request_show()
2020 rq->fence.context, rq->fence.seqno, in i915_request_show()
2021 run_status(rq), in i915_request_show()
2022 fence_status(rq), in i915_request_show()
2024 jiffies_to_msecs(jiffies - rq->emitted_jiffies), in i915_request_show()
2028 static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq) in engine_match_ring() argument
2032 return ring == i915_ggtt_offset(rq->ring->vma); in engine_match_ring()
2035 static bool match_ring(struct i915_request *rq) in match_ring() argument
2041 if (!intel_engine_is_virtual(rq->engine)) in match_ring()
2042 return engine_match_ring(rq->engine, rq); in match_ring()
2046 while ((engine = intel_engine_get_sibling(rq->engine, i++))) { in match_ring()
2047 found = engine_match_ring(engine, rq); in match_ring()
2055 enum i915_request_state i915_test_request_state(struct i915_request *rq) in i915_test_request_state() argument
2057 if (i915_request_completed(rq)) in i915_test_request_state()
2060 if (!i915_request_started(rq)) in i915_test_request_state()
2063 if (match_ring(rq)) in i915_test_request_state()