• Home
  • Raw
  • Download

Lines Matching refs:rq

27 static bool is_active(struct i915_request *rq)  in is_active()  argument
29 if (i915_request_is_active(rq)) in is_active()
32 if (i915_request_on_hold(rq)) in is_active()
35 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
42 struct i915_request *rq, in wait_for_submit() argument
52 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
57 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
68 struct i915_request *rq, in wait_for_reset() argument
80 if (i915_request_completed(rq)) in wait_for_reset()
83 if (READ_ONCE(rq->fence.error)) in wait_for_reset()
89 if (rq->fence.error != -EIO) { in wait_for_reset()
92 rq->fence.context, in wait_for_reset()
93 rq->fence.seqno); in wait_for_reset()
98 if (i915_request_wait(rq, 0, in wait_for_reset()
102 rq->fence.context, in wait_for_reset()
103 rq->fence.seqno); in wait_for_reset()
126 struct i915_request *rq; in live_sanitycheck() local
134 rq = igt_spinner_create_request(&spin, ce, MI_NOOP); in live_sanitycheck()
135 if (IS_ERR(rq)) { in live_sanitycheck()
136 err = PTR_ERR(rq); in live_sanitycheck()
140 i915_request_add(rq); in live_sanitycheck()
141 if (!igt_wait_for_spinner(&spin, rq)) { in live_sanitycheck()
183 struct i915_request *rq[2]; in live_unlite_restore() local
229 rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); in live_unlite_restore()
230 if (IS_ERR(rq[0])) { in live_unlite_restore()
231 err = PTR_ERR(rq[0]); in live_unlite_restore()
235 i915_request_get(rq[0]); in live_unlite_restore()
236 i915_request_add(rq[0]); in live_unlite_restore()
237 GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); in live_unlite_restore()
239 if (!igt_wait_for_spinner(&spin, rq[0])) { in live_unlite_restore()
240 i915_request_put(rq[0]); in live_unlite_restore()
244 rq[1] = i915_request_create(ce[1]); in live_unlite_restore()
245 if (IS_ERR(rq[1])) { in live_unlite_restore()
246 err = PTR_ERR(rq[1]); in live_unlite_restore()
247 i915_request_put(rq[0]); in live_unlite_restore()
262 i915_request_await_dma_fence(rq[1], &rq[0]->fence); in live_unlite_restore()
265 i915_request_get(rq[1]); in live_unlite_restore()
266 i915_request_add(rq[1]); in live_unlite_restore()
267 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); in live_unlite_restore()
268 i915_request_put(rq[0]); in live_unlite_restore()
276 engine->sched_engine->schedule(rq[1], &attr); in live_unlite_restore()
280 rq[0] = i915_request_create(ce[0]); in live_unlite_restore()
281 if (IS_ERR(rq[0])) { in live_unlite_restore()
282 err = PTR_ERR(rq[0]); in live_unlite_restore()
283 i915_request_put(rq[1]); in live_unlite_restore()
287 i915_request_await_dma_fence(rq[0], &rq[1]->fence); in live_unlite_restore()
288 i915_request_get(rq[0]); in live_unlite_restore()
289 i915_request_add(rq[0]); in live_unlite_restore()
290 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); in live_unlite_restore()
291 i915_request_put(rq[1]); in live_unlite_restore()
292 i915_request_put(rq[0]); in live_unlite_restore()
345 struct i915_request *rq; in live_unlite_ring() local
384 rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); in live_unlite_ring()
385 if (IS_ERR(rq)) { in live_unlite_ring()
386 err = PTR_ERR(rq); in live_unlite_ring()
390 i915_request_get(rq); in live_unlite_ring()
391 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_unlite_ring()
392 i915_request_add(rq); in live_unlite_ring()
394 if (!igt_wait_for_spinner(&spin, rq)) { in live_unlite_ring()
396 i915_request_put(rq); in live_unlite_ring()
404 rq->wa_tail, in live_unlite_ring()
411 i915_request_put(rq); in live_unlite_ring()
425 rq->tail); in live_unlite_ring()
427 rq->tail, in live_unlite_ring()
429 i915_request_put(rq); in live_unlite_ring()
432 rq = intel_context_create_request(ce[1]); in live_unlite_ring()
433 if (IS_ERR(rq)) { in live_unlite_ring()
434 err = PTR_ERR(rq); in live_unlite_ring()
438 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_unlite_ring()
439 i915_request_get(rq); in live_unlite_ring()
440 i915_request_add(rq); in live_unlite_ring()
442 err = wait_for_submit(engine, rq, HZ / 2); in live_unlite_ring()
443 i915_request_put(rq); in live_unlite_ring()
494 struct i915_request *rq; in live_pin_rewind() local
534 rq = intel_context_create_request(ce); in live_pin_rewind()
537 if (IS_ERR(rq)) { in live_pin_rewind()
538 err = PTR_ERR(rq); in live_pin_rewind()
541 GEM_BUG_ON(!rq->head); in live_pin_rewind()
542 i915_request_add(rq); in live_pin_rewind()
602 struct i915_request *rq; in live_hold_reset() local
612 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in live_hold_reset()
613 if (IS_ERR(rq)) { in live_hold_reset()
614 err = PTR_ERR(rq); in live_hold_reset()
617 i915_request_add(rq); in live_hold_reset()
619 if (!igt_wait_for_spinner(&spin, rq)) { in live_hold_reset()
632 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_hold_reset()
634 i915_request_get(rq); in live_hold_reset()
635 execlists_hold(engine, rq); in live_hold_reset()
636 GEM_BUG_ON(!i915_request_on_hold(rq)); in live_hold_reset()
639 GEM_BUG_ON(rq->fence.error != -EIO); in live_hold_reset()
644 if (!i915_request_wait(rq, 0, HZ / 5)) { in live_hold_reset()
647 i915_request_put(rq); in live_hold_reset()
651 GEM_BUG_ON(!i915_request_on_hold(rq)); in live_hold_reset()
654 execlists_unhold(engine, rq); in live_hold_reset()
655 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in live_hold_reset()
661 i915_request_put(rq); in live_hold_reset()
721 struct i915_request *rq; in live_error_interrupt() local
729 rq = intel_context_create_request(ce); in live_error_interrupt()
731 if (IS_ERR(rq)) { in live_error_interrupt()
732 err = PTR_ERR(rq); in live_error_interrupt()
736 if (rq->engine->emit_init_breadcrumb) { in live_error_interrupt()
737 err = rq->engine->emit_init_breadcrumb(rq); in live_error_interrupt()
739 i915_request_add(rq); in live_error_interrupt()
744 cs = intel_ring_begin(rq, 2); in live_error_interrupt()
746 i915_request_add(rq); in live_error_interrupt()
759 client[i] = i915_request_get(rq); in live_error_interrupt()
760 i915_request_add(rq); in live_error_interrupt()
821 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) in emit_semaphore_chain() argument
825 cs = intel_ring_begin(rq, 10); in emit_semaphore_chain()
853 intel_ring_advance(rq, cs); in emit_semaphore_chain()
861 struct i915_request *rq; in semaphore_queue() local
868 rq = intel_context_create_request(ce); in semaphore_queue()
869 if (IS_ERR(rq)) in semaphore_queue()
873 if (rq->engine->emit_init_breadcrumb) in semaphore_queue()
874 err = rq->engine->emit_init_breadcrumb(rq); in semaphore_queue()
876 err = emit_semaphore_chain(rq, vma, idx); in semaphore_queue()
878 i915_request_get(rq); in semaphore_queue()
879 i915_request_add(rq); in semaphore_queue()
881 rq = ERR_PTR(err); in semaphore_queue()
885 return rq; in semaphore_queue()
896 struct i915_request *rq; in release_queue() local
899 rq = intel_engine_create_kernel_request(engine); in release_queue()
900 if (IS_ERR(rq)) in release_queue()
901 return PTR_ERR(rq); in release_queue()
903 cs = intel_ring_begin(rq, 4); in release_queue()
905 i915_request_add(rq); in release_queue()
914 intel_ring_advance(rq, cs); in release_queue()
916 i915_request_get(rq); in release_queue()
917 i915_request_add(rq); in release_queue()
920 engine->sched_engine->schedule(rq, &attr); in release_queue()
923 i915_request_put(rq); in release_queue()
947 struct i915_request *rq; in slice_semaphore_queue() local
949 rq = semaphore_queue(engine, vma, n++); in slice_semaphore_queue()
950 if (IS_ERR(rq)) { in slice_semaphore_queue()
951 err = PTR_ERR(rq); in slice_semaphore_queue()
955 i915_request_put(rq); in slice_semaphore_queue()
1057 struct i915_request *rq; in create_rewinder() local
1061 rq = intel_context_create_request(ce); in create_rewinder()
1062 if (IS_ERR(rq)) in create_rewinder()
1063 return rq; in create_rewinder()
1066 err = i915_request_await_dma_fence(rq, &wait->fence); in create_rewinder()
1071 cs = intel_ring_begin(rq, 14); in create_rewinder()
1089 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); in create_rewinder()
1098 intel_ring_advance(rq, cs); in create_rewinder()
1102 i915_request_get(rq); in create_rewinder()
1103 i915_request_add(rq); in create_rewinder()
1105 i915_request_put(rq); in create_rewinder()
1109 return rq; in create_rewinder()
1131 struct i915_request *rq[3] = {}; in live_timeslice_rewind() local
1162 rq[A1] = create_rewinder(ce, NULL, slot, X); in live_timeslice_rewind()
1163 if (IS_ERR(rq[A1])) { in live_timeslice_rewind()
1168 rq[A2] = create_rewinder(ce, NULL, slot, Y); in live_timeslice_rewind()
1170 if (IS_ERR(rq[A2])) in live_timeslice_rewind()
1173 err = wait_for_submit(engine, rq[A2], HZ / 2); in live_timeslice_rewind()
1186 rq[B1] = create_rewinder(ce, rq[A1], slot, Z); in live_timeslice_rewind()
1188 if (IS_ERR(rq[2])) in live_timeslice_rewind()
1191 err = wait_for_submit(engine, rq[B1], HZ / 2); in live_timeslice_rewind()
1200 while (i915_request_is_active(rq[A2])) { /* semaphore yield! */ in live_timeslice_rewind()
1207 GEM_BUG_ON(!i915_request_is_active(rq[A1])); in live_timeslice_rewind()
1208 GEM_BUG_ON(!i915_request_is_active(rq[B1])); in live_timeslice_rewind()
1209 GEM_BUG_ON(i915_request_is_active(rq[A2])); in live_timeslice_rewind()
1248 i915_request_put(rq[i]); in live_timeslice_rewind()
1260 struct i915_request *rq; in nop_request() local
1262 rq = intel_engine_create_kernel_request(engine); in nop_request()
1263 if (IS_ERR(rq)) in nop_request()
1264 return rq; in nop_request()
1266 i915_request_get(rq); in nop_request()
1267 i915_request_add(rq); in nop_request()
1269 return rq; in nop_request()
1331 struct i915_request *rq, *nop; in live_timeslice_queue() local
1340 rq = semaphore_queue(engine, vma, 0); in live_timeslice_queue()
1341 if (IS_ERR(rq)) { in live_timeslice_queue()
1342 err = PTR_ERR(rq); in live_timeslice_queue()
1345 engine->sched_engine->schedule(rq, &attr); in live_timeslice_queue()
1346 err = wait_for_submit(engine, rq, HZ / 2); in live_timeslice_queue()
1367 GEM_BUG_ON(i915_request_completed(rq)); in live_timeslice_queue()
1368 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_timeslice_queue()
1371 err = release_queue(engine, vma, 1, effective_prio(rq)); in live_timeslice_queue()
1382 if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) { in live_timeslice_queue()
1395 i915_request_put(rq); in live_timeslice_queue()
1431 struct i915_request *rq; in live_timeslice_nopreempt() local
1448 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in live_timeslice_nopreempt()
1450 if (IS_ERR(rq)) { in live_timeslice_nopreempt()
1451 err = PTR_ERR(rq); in live_timeslice_nopreempt()
1455 i915_request_get(rq); in live_timeslice_nopreempt()
1456 i915_request_add(rq); in live_timeslice_nopreempt()
1458 if (!igt_wait_for_spinner(&spin, rq)) { in live_timeslice_nopreempt()
1459 i915_request_put(rq); in live_timeslice_nopreempt()
1464 set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); in live_timeslice_nopreempt()
1465 i915_request_put(rq); in live_timeslice_nopreempt()
1475 rq = intel_context_create_request(ce); in live_timeslice_nopreempt()
1477 if (IS_ERR(rq)) { in live_timeslice_nopreempt()
1478 err = PTR_ERR(rq); in live_timeslice_nopreempt()
1482 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_timeslice_nopreempt()
1483 i915_request_get(rq); in live_timeslice_nopreempt()
1484 i915_request_add(rq); in live_timeslice_nopreempt()
1490 if (wait_for_submit(engine, rq, HZ / 2)) { in live_timeslice_nopreempt()
1491 i915_request_put(rq); in live_timeslice_nopreempt()
1501 if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) { in live_timeslice_nopreempt()
1506 i915_request_put(rq); in live_timeslice_nopreempt()
1722 struct i915_request *rq; in spinner_create_request() local
1728 rq = igt_spinner_create_request(spin, ce, arb); in spinner_create_request()
1730 return rq; in spinner_create_request()
1760 struct i915_request *rq; in live_preempt() local
1770 rq = spinner_create_request(&spin_lo, ctx_lo, engine, in live_preempt()
1772 if (IS_ERR(rq)) { in live_preempt()
1773 err = PTR_ERR(rq); in live_preempt()
1777 i915_request_add(rq); in live_preempt()
1778 if (!igt_wait_for_spinner(&spin_lo, rq)) { in live_preempt()
1786 rq = spinner_create_request(&spin_hi, ctx_hi, engine, in live_preempt()
1788 if (IS_ERR(rq)) { in live_preempt()
1790 err = PTR_ERR(rq); in live_preempt()
1794 i915_request_add(rq); in live_preempt()
1795 if (!igt_wait_for_spinner(&spin_hi, rq)) { in live_preempt()
1853 struct i915_request *rq; in live_late_preempt() local
1863 rq = spinner_create_request(&spin_lo, ctx_lo, engine, in live_late_preempt()
1865 if (IS_ERR(rq)) { in live_late_preempt()
1866 err = PTR_ERR(rq); in live_late_preempt()
1870 i915_request_add(rq); in live_late_preempt()
1871 if (!igt_wait_for_spinner(&spin_lo, rq)) { in live_late_preempt()
1876 rq = spinner_create_request(&spin_hi, ctx_hi, engine, in live_late_preempt()
1878 if (IS_ERR(rq)) { in live_late_preempt()
1880 err = PTR_ERR(rq); in live_late_preempt()
1884 i915_request_add(rq); in live_late_preempt()
1885 if (igt_wait_for_spinner(&spin_hi, rq)) { in live_late_preempt()
1891 engine->sched_engine->schedule(rq, &attr); in live_late_preempt()
1893 if (!igt_wait_for_spinner(&spin_hi, rq)) { in live_late_preempt()
2059 struct i915_request *rq; in __cancel_active0() local
2069 rq = spinner_create_request(&arg->a.spin, in __cancel_active0()
2072 if (IS_ERR(rq)) in __cancel_active0()
2073 return PTR_ERR(rq); in __cancel_active0()
2075 clear_bit(CONTEXT_BANNED, &rq->context->flags); in __cancel_active0()
2076 i915_request_get(rq); in __cancel_active0()
2077 i915_request_add(rq); in __cancel_active0()
2078 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { in __cancel_active0()
2083 intel_context_set_banned(rq->context); in __cancel_active0()
2088 err = wait_for_reset(arg->engine, rq, HZ / 2); in __cancel_active0()
2095 i915_request_put(rq); in __cancel_active0()
2103 struct i915_request *rq[2] = {}; in __cancel_active1() local
2113 rq[0] = spinner_create_request(&arg->a.spin, in __cancel_active1()
2116 if (IS_ERR(rq[0])) in __cancel_active1()
2117 return PTR_ERR(rq[0]); in __cancel_active1()
2119 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); in __cancel_active1()
2120 i915_request_get(rq[0]); in __cancel_active1()
2121 i915_request_add(rq[0]); in __cancel_active1()
2122 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { in __cancel_active1()
2127 rq[1] = spinner_create_request(&arg->b.spin, in __cancel_active1()
2130 if (IS_ERR(rq[1])) { in __cancel_active1()
2131 err = PTR_ERR(rq[1]); in __cancel_active1()
2135 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); in __cancel_active1()
2136 i915_request_get(rq[1]); in __cancel_active1()
2137 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); in __cancel_active1()
2138 i915_request_add(rq[1]); in __cancel_active1()
2142 intel_context_set_banned(rq[1]->context); in __cancel_active1()
2148 err = wait_for_reset(arg->engine, rq[1], HZ / 2); in __cancel_active1()
2152 if (rq[0]->fence.error != 0) { in __cancel_active1()
2158 if (rq[1]->fence.error != -EIO) { in __cancel_active1()
2165 i915_request_put(rq[1]); in __cancel_active1()
2166 i915_request_put(rq[0]); in __cancel_active1()
2174 struct i915_request *rq[3] = {}; in __cancel_queued() local
2184 rq[0] = spinner_create_request(&arg->a.spin, in __cancel_queued()
2187 if (IS_ERR(rq[0])) in __cancel_queued()
2188 return PTR_ERR(rq[0]); in __cancel_queued()
2190 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); in __cancel_queued()
2191 i915_request_get(rq[0]); in __cancel_queued()
2192 i915_request_add(rq[0]); in __cancel_queued()
2193 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { in __cancel_queued()
2198 rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); in __cancel_queued()
2199 if (IS_ERR(rq[1])) { in __cancel_queued()
2200 err = PTR_ERR(rq[1]); in __cancel_queued()
2204 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); in __cancel_queued()
2205 i915_request_get(rq[1]); in __cancel_queued()
2206 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); in __cancel_queued()
2207 i915_request_add(rq[1]); in __cancel_queued()
2211 rq[2] = spinner_create_request(&arg->b.spin, in __cancel_queued()
2214 if (IS_ERR(rq[2])) { in __cancel_queued()
2215 err = PTR_ERR(rq[2]); in __cancel_queued()
2219 i915_request_get(rq[2]); in __cancel_queued()
2220 err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); in __cancel_queued()
2221 i915_request_add(rq[2]); in __cancel_queued()
2225 intel_context_set_banned(rq[2]->context); in __cancel_queued()
2230 err = wait_for_reset(arg->engine, rq[2], HZ / 2); in __cancel_queued()
2234 if (rq[0]->fence.error != -EIO) { in __cancel_queued()
2240 if (rq[1]->fence.error != 0) { in __cancel_queued()
2246 if (rq[2]->fence.error != -EIO) { in __cancel_queued()
2253 i915_request_put(rq[2]); in __cancel_queued()
2254 i915_request_put(rq[1]); in __cancel_queued()
2255 i915_request_put(rq[0]); in __cancel_queued()
2263 struct i915_request *rq; in __cancel_hostile() local
2274 rq = spinner_create_request(&arg->a.spin, in __cancel_hostile()
2277 if (IS_ERR(rq)) in __cancel_hostile()
2278 return PTR_ERR(rq); in __cancel_hostile()
2280 clear_bit(CONTEXT_BANNED, &rq->context->flags); in __cancel_hostile()
2281 i915_request_get(rq); in __cancel_hostile()
2282 i915_request_add(rq); in __cancel_hostile()
2283 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { in __cancel_hostile()
2288 intel_context_set_banned(rq->context); in __cancel_hostile()
2293 err = wait_for_reset(arg->engine, rq, HZ / 2); in __cancel_hostile()
2300 i915_request_put(rq); in __cancel_hostile()
2320 struct i915_request *rq; in __cancel_fail() local
2330 rq = spinner_create_request(&arg->a.spin, in __cancel_fail()
2333 if (IS_ERR(rq)) in __cancel_fail()
2334 return PTR_ERR(rq); in __cancel_fail()
2336 clear_bit(CONTEXT_BANNED, &rq->context->flags); in __cancel_fail()
2337 i915_request_get(rq); in __cancel_fail()
2338 i915_request_add(rq); in __cancel_fail()
2339 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { in __cancel_fail()
2344 intel_context_set_banned(rq->context); in __cancel_fail()
2362 err = wait_for_reset(engine, rq, HZ / 2); in __cancel_fail()
2371 i915_request_put(rq); in __cancel_fail()
2571 struct i915_request *rq; in live_chain_preempt() local
2577 rq = spinner_create_request(&lo.spin, in live_chain_preempt()
2580 if (IS_ERR(rq)) in live_chain_preempt()
2583 i915_request_get(rq); in live_chain_preempt()
2584 i915_request_add(rq); in live_chain_preempt()
2586 ring_size = rq->wa_tail - rq->head; in live_chain_preempt()
2588 ring_size += rq->ring->size; in live_chain_preempt()
2589 ring_size = rq->ring->size / ring_size; in live_chain_preempt()
2594 if (i915_request_wait(rq, 0, HZ / 2) < 0) { in live_chain_preempt()
2596 i915_request_put(rq); in live_chain_preempt()
2599 i915_request_put(rq); in live_chain_preempt()
2607 rq = spinner_create_request(&hi.spin, in live_chain_preempt()
2610 if (IS_ERR(rq)) in live_chain_preempt()
2612 i915_request_add(rq); in live_chain_preempt()
2613 if (!igt_wait_for_spinner(&hi.spin, rq)) in live_chain_preempt()
2616 rq = spinner_create_request(&lo.spin, in live_chain_preempt()
2619 if (IS_ERR(rq)) in live_chain_preempt()
2621 i915_request_add(rq); in live_chain_preempt()
2624 rq = igt_request_alloc(lo.ctx, engine); in live_chain_preempt()
2625 if (IS_ERR(rq)) in live_chain_preempt()
2627 i915_request_add(rq); in live_chain_preempt()
2630 rq = igt_request_alloc(hi.ctx, engine); in live_chain_preempt()
2631 if (IS_ERR(rq)) in live_chain_preempt()
2634 i915_request_get(rq); in live_chain_preempt()
2635 i915_request_add(rq); in live_chain_preempt()
2636 engine->sched_engine->schedule(rq, &attr); in live_chain_preempt()
2639 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in live_chain_preempt()
2647 i915_request_put(rq); in live_chain_preempt()
2651 i915_request_put(rq); in live_chain_preempt()
2653 rq = igt_request_alloc(lo.ctx, engine); in live_chain_preempt()
2654 if (IS_ERR(rq)) in live_chain_preempt()
2657 i915_request_get(rq); in live_chain_preempt()
2658 i915_request_add(rq); in live_chain_preempt()
2660 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in live_chain_preempt()
2669 i915_request_put(rq); in live_chain_preempt()
2672 i915_request_put(rq); in live_chain_preempt()
2701 struct i915_request *rq; in create_gang() local
2756 rq = intel_context_create_request(ce); in create_gang()
2757 if (IS_ERR(rq)) { in create_gang()
2758 err = PTR_ERR(rq); in create_gang()
2762 rq->batch = i915_vma_get(vma); in create_gang()
2763 i915_request_get(rq); in create_gang()
2766 err = i915_request_await_object(rq, vma->obj, false); in create_gang()
2768 err = i915_vma_move_to_active(vma, rq, 0); in create_gang()
2770 err = rq->engine->emit_bb_start(rq, in create_gang()
2774 i915_request_add(rq); in create_gang()
2781 rq->mock.link.next = &(*prev)->mock.link; in create_gang()
2782 *prev = rq; in create_gang()
2786 i915_vma_put(rq->batch); in create_gang()
2787 i915_request_put(rq); in create_gang()
2800 struct i915_request *rq; in __live_preempt_ring() local
2832 rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK); in __live_preempt_ring()
2833 if (IS_ERR(rq)) { in __live_preempt_ring()
2834 err = PTR_ERR(rq); in __live_preempt_ring()
2838 i915_request_get(rq); in __live_preempt_ring()
2839 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in __live_preempt_ring()
2840 i915_request_add(rq); in __live_preempt_ring()
2842 if (!igt_wait_for_spinner(spin, rq)) { in __live_preempt_ring()
2844 i915_request_put(rq); in __live_preempt_ring()
2851 while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) { in __live_preempt_ring()
2857 i915_request_put(rq); in __live_preempt_ring()
2871 rq->tail); in __live_preempt_ring()
2872 i915_request_put(rq); in __live_preempt_ring()
2875 rq = intel_context_create_request(ce[1]); in __live_preempt_ring()
2876 if (IS_ERR(rq)) { in __live_preempt_ring()
2877 err = PTR_ERR(rq); in __live_preempt_ring()
2881 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in __live_preempt_ring()
2882 i915_request_get(rq); in __live_preempt_ring()
2883 i915_request_add(rq); in __live_preempt_ring()
2885 err = wait_for_submit(engine, rq, HZ / 2); in __live_preempt_ring()
2886 i915_request_put(rq); in __live_preempt_ring()
2974 struct i915_request *rq = NULL; in live_preempt_gang() local
2990 err = create_gang(engine, &rq); in live_preempt_gang()
2995 engine->sched_engine->schedule(rq, &attr); in live_preempt_gang()
3007 cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC); in live_preempt_gang()
3010 i915_gem_object_unpin_map(rq->batch->obj); in live_preempt_gang()
3016 while (rq) { /* wait for each rq from highest to lowest prio */ in live_preempt_gang()
3017 struct i915_request *n = list_next_entry(rq, mock.link); in live_preempt_gang()
3019 if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) { in live_preempt_gang()
3024 prio, rq_prio(rq)); in live_preempt_gang()
3031 i915_vma_put(rq->batch); in live_preempt_gang()
3032 i915_request_put(rq); in live_preempt_gang()
3033 rq = n; in live_preempt_gang()
3152 struct i915_request *rq; in create_gpr_client() local
3175 rq = intel_context_create_request(ce); in create_gpr_client()
3176 if (IS_ERR(rq)) { in create_gpr_client()
3177 err = PTR_ERR(rq); in create_gpr_client()
3182 err = i915_request_await_object(rq, vma->obj, false); in create_gpr_client()
3184 err = i915_vma_move_to_active(vma, rq, 0); in create_gpr_client()
3189 err = i915_request_await_object(rq, batch->obj, false); in create_gpr_client()
3191 err = i915_vma_move_to_active(batch, rq, 0); in create_gpr_client()
3193 err = rq->engine->emit_bb_start(rq, in create_gpr_client()
3200 i915_request_get(rq); in create_gpr_client()
3201 i915_request_add(rq); in create_gpr_client()
3209 return err ? ERR_PTR(err) : rq; in create_gpr_client()
3219 struct i915_request *rq; in preempt_user() local
3223 rq = intel_engine_create_kernel_request(engine); in preempt_user()
3224 if (IS_ERR(rq)) in preempt_user()
3225 return PTR_ERR(rq); in preempt_user()
3227 cs = intel_ring_begin(rq, 4); in preempt_user()
3229 i915_request_add(rq); in preempt_user()
3238 intel_ring_advance(rq, cs); in preempt_user()
3240 i915_request_get(rq); in preempt_user()
3241 i915_request_add(rq); in preempt_user()
3243 engine->sched_engine->schedule(rq, &attr); in preempt_user()
3245 if (i915_request_wait(rq, 0, HZ / 2) < 0) in preempt_user()
3247 i915_request_put(rq); in preempt_user()
3304 struct i915_request *rq; in live_preempt_user() local
3306 rq = create_gpr_client(engine, global, in live_preempt_user()
3308 if (IS_ERR(rq)) { in live_preempt_user()
3309 err = PTR_ERR(rq); in live_preempt_user()
3313 client[i] = rq; in live_preempt_user()
3403 struct i915_request *rq; in live_preempt_timeout() local
3408 rq = spinner_create_request(&spin_lo, ctx_lo, engine, in live_preempt_timeout()
3410 if (IS_ERR(rq)) { in live_preempt_timeout()
3411 err = PTR_ERR(rq); in live_preempt_timeout()
3415 i915_request_add(rq); in live_preempt_timeout()
3416 if (!igt_wait_for_spinner(&spin_lo, rq)) { in live_preempt_timeout()
3422 rq = igt_request_alloc(ctx_hi, engine); in live_preempt_timeout()
3423 if (IS_ERR(rq)) { in live_preempt_timeout()
3425 err = PTR_ERR(rq); in live_preempt_timeout()
3436 i915_request_get(rq); in live_preempt_timeout()
3437 i915_request_add(rq); in live_preempt_timeout()
3442 if (i915_request_wait(rq, 0, HZ / 10) < 0) { in live_preempt_timeout()
3444 i915_request_put(rq); in live_preempt_timeout()
3450 i915_request_put(rq); in live_preempt_timeout()
3495 struct i915_request *rq; in smoke_submit() local
3515 rq = igt_request_alloc(ctx, smoke->engine); in smoke_submit()
3516 if (IS_ERR(rq)) { in smoke_submit()
3517 err = PTR_ERR(rq); in smoke_submit()
3523 err = i915_request_await_object(rq, vma->obj, false); in smoke_submit()
3525 err = i915_vma_move_to_active(vma, rq, 0); in smoke_submit()
3527 err = rq->engine->emit_bb_start(rq, in smoke_submit()
3533 i915_request_add(rq); in smoke_submit()
3763 struct i915_request *rq; in nop_virtual_engine() local
3765 rq = i915_request_create(ve[nc]); in nop_virtual_engine()
3766 if (IS_ERR(rq)) { in nop_virtual_engine()
3767 err = PTR_ERR(rq); in nop_virtual_engine()
3773 request[nc] = i915_request_get(rq); in nop_virtual_engine()
3774 i915_request_add(rq); in nop_virtual_engine()
3780 struct i915_request *rq; in nop_virtual_engine() local
3782 rq = i915_request_create(ve[nc]); in nop_virtual_engine()
3783 if (IS_ERR(rq)) { in nop_virtual_engine()
3784 err = PTR_ERR(rq); in nop_virtual_engine()
3790 request[nc] = i915_request_get(rq); in nop_virtual_engine()
3791 i915_request_add(rq); in nop_virtual_engine()
4036 struct i915_request *rq; in slicein_virtual_engine() local
4055 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in slicein_virtual_engine()
4057 if (IS_ERR(rq)) { in slicein_virtual_engine()
4058 err = PTR_ERR(rq); in slicein_virtual_engine()
4062 i915_request_add(rq); in slicein_virtual_engine()
4071 rq = intel_context_create_request(ce); in slicein_virtual_engine()
4073 if (IS_ERR(rq)) { in slicein_virtual_engine()
4074 err = PTR_ERR(rq); in slicein_virtual_engine()
4078 i915_request_get(rq); in slicein_virtual_engine()
4079 i915_request_add(rq); in slicein_virtual_engine()
4080 if (i915_request_wait(rq, 0, timeout) < 0) { in slicein_virtual_engine()
4082 __func__, rq->engine->name); in slicein_virtual_engine()
4087 i915_request_put(rq); in slicein_virtual_engine()
4103 struct i915_request *rq; in sliceout_virtual_engine() local
4123 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in sliceout_virtual_engine()
4125 if (IS_ERR(rq)) { in sliceout_virtual_engine()
4126 err = PTR_ERR(rq); in sliceout_virtual_engine()
4130 i915_request_add(rq); in sliceout_virtual_engine()
4140 rq = intel_context_create_request(ce); in sliceout_virtual_engine()
4142 if (IS_ERR(rq)) { in sliceout_virtual_engine()
4143 err = PTR_ERR(rq); in sliceout_virtual_engine()
4147 i915_request_get(rq); in sliceout_virtual_engine()
4148 i915_request_add(rq); in sliceout_virtual_engine()
4149 if (i915_request_wait(rq, 0, timeout) < 0) { in sliceout_virtual_engine()
4156 i915_request_put(rq); in sliceout_virtual_engine()
4235 struct i915_request *rq; in preserved_virtual_engine() local
4237 rq = i915_request_create(ve); in preserved_virtual_engine()
4238 if (IS_ERR(rq)) { in preserved_virtual_engine()
4239 err = PTR_ERR(rq); in preserved_virtual_engine()
4244 last = i915_request_get(rq); in preserved_virtual_engine()
4246 cs = intel_ring_begin(rq, 8); in preserved_virtual_engine()
4248 i915_request_add(rq); in preserved_virtual_engine()
4263 intel_ring_advance(rq, cs); in preserved_virtual_engine()
4266 rq->execution_mask = engine->mask; in preserved_virtual_engine()
4267 i915_request_add(rq); in preserved_virtual_engine()
4346 struct i915_request *rq; in reset_virtual_engine() local
4368 rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK); in reset_virtual_engine()
4369 if (IS_ERR(rq)) { in reset_virtual_engine()
4370 err = PTR_ERR(rq); in reset_virtual_engine()
4373 i915_request_add(rq); in reset_virtual_engine()
4375 if (!igt_wait_for_spinner(&spin, rq)) { in reset_virtual_engine()
4381 engine = rq->engine; in reset_virtual_engine()
4390 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in reset_virtual_engine()
4396 GEM_BUG_ON(rq->engine != engine); in reset_virtual_engine()
4399 execlists_hold(engine, rq); in reset_virtual_engine()
4400 GEM_BUG_ON(!i915_request_on_hold(rq)); in reset_virtual_engine()
4403 GEM_BUG_ON(rq->fence.error != -EIO); in reset_virtual_engine()
4409 i915_request_get(rq); in reset_virtual_engine()
4410 if (!i915_request_wait(rq, 0, HZ / 5)) { in reset_virtual_engine()
4417 GEM_BUG_ON(!i915_request_on_hold(rq)); in reset_virtual_engine()
4420 execlists_unhold(engine, rq); in reset_virtual_engine()
4421 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in reset_virtual_engine()
4429 i915_request_put(rq); in reset_virtual_engine()