Lines Matching full:request
42 * may be freed when the request is no longer in use by the GPU. in i915_fence_get_timeline_name()
77 * The request is put onto a RCU freelist (i.e. the address in i915_fence_release()
98 i915_request_remove_from_client(struct i915_request *request) in i915_request_remove_from_client() argument
102 file_priv = request->file_priv; in i915_request_remove_from_client()
107 if (request->file_priv) { in i915_request_remove_from_client()
108 list_del(&request->client_link); in i915_request_remove_from_client()
109 request->file_priv = NULL; in i915_request_remove_from_client()
268 * By incrementing the serial for every request, we know that no in reserve_gt()
271 * of every request from all engines onto just one. in reserve_gt()
295 struct i915_request *request) in i915_gem_retire_noop() argument
300 static void advance_ring(struct i915_request *request) in advance_ring() argument
302 struct intel_ring *ring = request->ring; in advance_ring()
306 * We know the GPU must have read the request to have in advance_ring()
308 * of tail of the request to update the last known position in advance_ring()
311 * Note this requires that we are always called in request in advance_ring()
314 GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list)); in advance_ring()
315 if (list_is_last(&request->ring_link, &ring->request_list)) { in advance_ring()
317 * We may race here with execlists resubmitting this request in advance_ring()
319 * forwards (to request->wa_tail). We either read the in advance_ring()
325 tail = READ_ONCE(request->tail); in advance_ring()
328 tail = request->postfix; in advance_ring()
330 list_del_init(&request->ring_link); in advance_ring()
335 static void free_capture_list(struct i915_request *request) in free_capture_list() argument
339 capture = request->capture_list; in free_capture_list()
385 * the subsequent request. in __retire_engine_request()
409 static void i915_request_retire(struct i915_request *request) in i915_request_retire() argument
414 request->engine->name, in i915_request_retire()
415 request->fence.context, request->fence.seqno, in i915_request_retire()
416 request->global_seqno, in i915_request_retire()
417 intel_engine_get_seqno(request->engine)); in i915_request_retire()
419 lockdep_assert_held(&request->i915->drm.struct_mutex); in i915_request_retire()
420 GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); in i915_request_retire()
421 GEM_BUG_ON(!i915_request_completed(request)); in i915_request_retire()
423 trace_i915_request_retire(request); in i915_request_retire()
425 advance_ring(request); in i915_request_retire()
426 free_capture_list(request); in i915_request_retire()
431 * when their *last* active request is completed (updating state in i915_request_retire()
438 list_for_each_entry_safe(active, next, &request->active_list, link) { in i915_request_retire()
452 RCU_INIT_POINTER(active->request, NULL); in i915_request_retire()
454 active->retire(active, request); in i915_request_retire()
457 i915_request_remove_from_client(request); in i915_request_retire()
460 atomic_dec_if_positive(&request->gem_context->ban_score); in i915_request_retire()
461 intel_context_unpin(request->hw_context); in i915_request_retire()
463 __retire_engine_upto(request->engine, request); in i915_request_retire()
465 unreserve_gt(request->i915); in i915_request_retire()
467 i915_sched_node_fini(request->i915, &request->sched); in i915_request_retire()
468 i915_request_put(request); in i915_request_retire()
501 static void move_to_timeline(struct i915_request *request, in move_to_timeline() argument
504 GEM_BUG_ON(request->timeline == &request->engine->timeline); in move_to_timeline()
505 lockdep_assert_held(&request->engine->timeline.lock); in move_to_timeline()
507 spin_lock(&request->timeline->lock); in move_to_timeline()
508 list_move_tail(&request->link, &timeline->requests); in move_to_timeline()
509 spin_unlock(&request->timeline->lock); in move_to_timeline()
512 void __i915_request_submit(struct i915_request *request) in __i915_request_submit() argument
514 struct intel_engine_cs *engine = request->engine; in __i915_request_submit()
519 request->fence.context, request->fence.seqno, in __i915_request_submit()
526 GEM_BUG_ON(request->global_seqno); in __i915_request_submit()
533 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); in __i915_request_submit()
534 request->global_seqno = seqno; in __i915_request_submit()
535 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) in __i915_request_submit()
536 intel_engine_enable_signaling(request, false); in __i915_request_submit()
537 spin_unlock(&request->lock); in __i915_request_submit()
539 engine->emit_breadcrumb(request, in __i915_request_submit()
540 request->ring->vaddr + request->postfix); in __i915_request_submit()
543 move_to_timeline(request, &engine->timeline); in __i915_request_submit()
545 trace_i915_request_execute(request); in __i915_request_submit()
547 wake_up_all(&request->execute); in __i915_request_submit()
550 void i915_request_submit(struct i915_request *request) in i915_request_submit() argument
552 struct intel_engine_cs *engine = request->engine; in i915_request_submit()
558 __i915_request_submit(request); in i915_request_submit()
563 void __i915_request_unsubmit(struct i915_request *request) in __i915_request_unsubmit() argument
565 struct intel_engine_cs *engine = request->engine; in __i915_request_unsubmit()
569 request->fence.context, request->fence.seqno, in __i915_request_unsubmit()
570 request->global_seqno, in __i915_request_unsubmit()
580 GEM_BUG_ON(!request->global_seqno); in __i915_request_unsubmit()
581 GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); in __i915_request_unsubmit()
583 request->global_seqno)); in __i915_request_unsubmit()
587 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); in __i915_request_unsubmit()
588 request->global_seqno = 0; in __i915_request_unsubmit()
589 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) in __i915_request_unsubmit()
590 intel_engine_cancel_signaling(request); in __i915_request_unsubmit()
591 spin_unlock(&request->lock); in __i915_request_unsubmit()
594 move_to_timeline(request, request->timeline); in __i915_request_unsubmit()
597 * We don't need to wake_up any waiters on request->execute, they in __i915_request_unsubmit()
598 * will get woken by any other event or us re-adding this request in __i915_request_unsubmit()
600 * should be quite adapt at finding that the request now has a new in __i915_request_unsubmit()
605 void i915_request_unsubmit(struct i915_request *request) in i915_request_unsubmit() argument
607 struct intel_engine_cs *engine = request->engine; in i915_request_unsubmit()
613 __i915_request_unsubmit(request); in i915_request_unsubmit()
621 struct i915_request *request = in submit_notify() local
622 container_of(fence, typeof(*request), submit); in submit_notify()
626 trace_i915_request_submit(request); in submit_notify()
636 request->engine->submit_request(request); in submit_notify()
641 i915_request_put(request); in submit_notify()
649 * i915_request_alloc - allocate a request structure
651 * @engine: engine that we wish to issue the request on.
652 * @ctx: context that the request will be associated with.
654 * Returns a pointer to the allocated request if successful,
670 * request! in i915_request_alloc()
698 /* Move our oldest request to the slab-cache (if not in use!) */ in i915_request_alloc()
708 * race with the request being allocated from the slab freelist. in i915_request_alloc()
709 * That is the request we are writing to here, may be in the process in i915_request_alloc()
712 * the RCU lookup, we change chase the request->engine pointer, in i915_request_alloc()
713 * read the request->global_seqno and increment the reference count. in i915_request_alloc()
716 * the lookup knows the request is unallocated and complete. Otherwise, in i915_request_alloc()
719 * check that the request we have a reference to and matches the active in i915_request_alloc()
720 * request. in i915_request_alloc()
722 * Before we increment the refcount, we chase the request->engine in i915_request_alloc()
725 * we see the request is completed (based on the value of the in i915_request_alloc()
727 * If we decide the request is not completed (new engine or seqno), in i915_request_alloc()
729 * active request - which it won't be and restart the lookup. in i915_request_alloc()
748 * recover as much memory from the request pool as is possible. in i915_request_alloc()
794 * eventually emit this request. This is to guarantee that the in i915_request_alloc()
796 * to be redone if the request is not actually submitted straight in i915_request_alloc()
803 * Record the position of the start of the request so that in i915_request_alloc()
805 * GPU processing the request, we never over-estimate the in i915_request_alloc()
824 /* Check that we didn't interrupt ourselves with a new request */ in i915_request_alloc()
957 * i915_request_await_object - set this request to (async) wait upon a bo
958 * @to: request we are wishing to use
967 * - If there is an outstanding write request to the object, the new
968 * request must wait for it to complete (either CPU or in hw, requests
971 * - If we are a write request (pending_write_domain is set), the new
972 * request must wait for outstanding read requests to complete.
1027 * As this request likely depends on state from the lost in i915_request_skip()
1041 * request is not being tracked for completion but the work itself is
1044 void i915_request_add(struct i915_request *request) in i915_request_add() argument
1046 struct intel_engine_cs *engine = request->engine; in i915_request_add()
1047 struct i915_timeline *timeline = request->timeline; in i915_request_add()
1048 struct intel_ring *ring = request->ring; in i915_request_add()
1053 engine->name, request->fence.context, request->fence.seqno); in i915_request_add()
1055 lockdep_assert_held(&request->i915->drm.struct_mutex); in i915_request_add()
1056 trace_i915_request_add(request); in i915_request_add()
1059 * Make sure that no request gazumped us - if it was allocated after in i915_request_add()
1063 GEM_BUG_ON(timeline->seqno != request->fence.seqno); in i915_request_add()
1070 request->reserved_space = 0; in i915_request_add()
1071 engine->emit_flush(request, EMIT_FLUSH); in i915_request_add()
1076 * GPU processing the request, we never over-estimate the in i915_request_add()
1079 cs = intel_ring_begin(request, engine->emit_breadcrumb_sz); in i915_request_add()
1081 request->postfix = intel_ring_offset(request, cs); in i915_request_add()
1084 * Seal the request and mark it as pending execution. Note that in i915_request_add()
1091 &request->i915->drm.struct_mutex); in i915_request_add()
1093 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, in i915_request_add()
1094 &request->submitq); in i915_request_add()
1096 __i915_sched_node_add_dependency(&request->sched, in i915_request_add()
1098 &request->dep, in i915_request_add()
1103 list_add_tail(&request->link, &timeline->requests); in i915_request_add()
1106 GEM_BUG_ON(timeline->seqno != request->fence.seqno); in i915_request_add()
1107 i915_gem_active_set(&timeline->last_request, request); in i915_request_add()
1109 list_add_tail(&request->ring_link, &ring->request_list); in i915_request_add()
1110 if (list_is_first(&request->ring_link, &ring->request_list)) { in i915_request_add()
1112 list_add(&ring->active_link, &request->i915->gt.active_rings); in i915_request_add()
1114 request->emitted_jiffies = jiffies; in i915_request_add()
1117 * Let the backend know a new request has arrived that may need in i915_request_add()
1119 * request - i.e. we may want to preempt the current request in order in i915_request_add()
1121 * request. in i915_request_add()
1123 * This is called before the request is ready to run so that we can in i915_request_add()
1130 engine->schedule(request, &request->gem_context->sched); in i915_request_add()
1132 i915_sw_fence_commit(&request->submit); in i915_request_add()
1136 * In typical scenarios, we do not expect the previous request on in i915_request_add()
1138 * has been completed. If the completed request is still here, that in i915_request_add()
1139 * implies that request retirement is a long way behind submission, in i915_request_add()
1142 * retirement worker. So if the last request on this timeline was in i915_request_add()
1198 * Only wait for the request if we know it is likely to complete. in __i915_spin_request()
1201 * request length, so we do not have a good indicator that this in __i915_spin_request()
1202 * request will complete within the timeout. What we do know is the in __i915_spin_request()
1204 * tell if the request has started. If the request hasn't started yet, in __i915_spin_request()
1215 * rate. By busywaiting on the request completion for a short while we in __i915_spin_request()
1217 * if it is a slow request, we want to sleep as quickly as possible. in __i915_spin_request()
1219 * takes to sleep on a request, on the order of a microsecond. in __i915_spin_request()
1249 static bool __i915_wait_request_check_and_reset(struct i915_request *request) in __i915_wait_request_check_and_reset() argument
1251 struct i915_gpu_error *error = &request->i915->gpu_error; in __i915_wait_request_check_and_reset()
1257 i915_reset(request->i915, error->stalled_mask, error->reason); in __i915_wait_request_check_and_reset()
1262 * i915_request_wait - wait until execution of request has finished
1263 * @rq: the request to wait upon
1267 * i915_request_wait() waits for the request to be completed, for a
1275 * Returns the remaining time (in jiffies) if the request completed, which may
1276 * be zero or -ETIME if the request is unfinished after the timeout expires.
1278 * pending before the request completes.
1376 * Carefully check if the request is complete, giving time in i915_request_wait()
1399 /* Only spin if we know the GPU is processing this request */ in i915_request_wait()
1422 struct i915_request *request, *next; in ring_retire_requests() local
1424 list_for_each_entry_safe(request, next, in ring_retire_requests()
1426 if (!i915_request_completed(request)) in ring_retire_requests()
1429 i915_request_retire(request); in ring_retire_requests()