/drivers/gpu/drm/i915/ |
D | i915_request.c | 111 struct i915_request *rq = to_request(fence); in i915_fence_release() local 113 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release() 114 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release() 123 i915_sw_fence_fini(&rq->submit); in i915_fence_release() 124 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release() 131 if (!intel_engine_is_virtual(rq->engine) && in i915_fence_release() 132 !cmpxchg(&rq->engine->request_pool, NULL, rq)) { in i915_fence_release() 133 intel_context_put(rq->context); in i915_fence_release() 137 intel_context_put(rq->context); in i915_fence_release() 139 kmem_cache_free(slab_requests, rq); in i915_fence_release() [all …]
|
D | i915_request.h | 56 #define RQ_TRACE(rq, fmt, ...) do { \ argument 57 const struct i915_request *rq__ = (rq); \ 327 void __i915_request_skip(struct i915_request *rq); 328 bool i915_request_set_error_once(struct i915_request *rq, int error); 329 struct i915_request *i915_request_mark_eio(struct i915_request *rq); 332 void __i915_request_queue(struct i915_request *rq, 334 void __i915_request_queue_bh(struct i915_request *rq); 336 bool i915_request_retire(struct i915_request *rq); 337 void i915_request_retire_upto(struct i915_request *rq); 349 i915_request_get(struct i915_request *rq) in i915_request_get() argument [all …]
|
/drivers/scsi/fnic/ |
D | vnic_rq.c | 27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument 30 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 34 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs() 35 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs() 42 buf = rq->bufs[i]; in vnic_rq_alloc_bufs() 45 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 46 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 48 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs() 51 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs() 59 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs() [all …]
|
D | vnic_rq.h | 105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument 108 return rq->ring.desc_avail; in vnic_rq_desc_avail() 111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument 114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument 119 return rq->to_use->desc; in vnic_rq_next_desc() 122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument 124 return rq->to_use->index; in vnic_rq_next_index() 127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument 129 return rq->buf_index++; in vnic_rq_next_buf_index() [all …]
|
/drivers/net/ethernet/cisco/enic/ |
D | vnic_rq.c | 31 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument 34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 38 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs() 39 if (!rq->bufs[i]) in vnic_rq_alloc_bufs() 44 buf = rq->bufs[i]; in vnic_rq_alloc_bufs() 47 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 48 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 50 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs() 53 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs() 61 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs() [all …]
|
D | vnic_rq.h | 97 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument 100 return rq->ring.desc_avail; in vnic_rq_desc_avail() 103 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument 106 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 109 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument 111 return rq->to_use->desc; in vnic_rq_next_desc() 114 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument 116 return rq->to_use->index; in vnic_rq_next_index() 119 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument 124 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post() [all …]
|
/drivers/gpu/drm/i915/gt/ |
D | gen8_engine_cs.c | 12 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_rcs() argument 41 if (GRAPHICS_VER(rq->engine->i915) == 9) in gen8_emit_flush_rcs() 45 if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0)) in gen8_emit_flush_rcs() 57 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs() 73 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs() 78 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_xcs() argument 82 cs = intel_ring_begin(rq, 4); in gen8_emit_flush_xcs() 98 if (rq->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush_xcs() 106 intel_ring_advance(rq, cs); in gen8_emit_flush_xcs() 111 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen11_emit_flush_rcs() argument [all …]
|
D | gen6_engine_cs.c | 54 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument 57 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush() 61 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush() 71 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush() 73 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush() 83 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush() 88 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument 91 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs() 97 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs() 129 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs() [all …]
|
D | selftest_execlists.c | 27 static bool is_active(struct i915_request *rq) in is_active() argument 29 if (i915_request_is_active(rq)) in is_active() 32 if (i915_request_on_hold(rq)) in is_active() 35 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active() 42 struct i915_request *rq, in wait_for_submit() argument 52 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit() 57 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit() 68 struct i915_request *rq, in wait_for_reset() argument 80 if (i915_request_completed(rq)) in wait_for_reset() 83 if (READ_ONCE(rq->fence.error)) in wait_for_reset() [all …]
|
D | intel_breadcrumbs.c | 105 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument 107 if (rq->context != ce) in check_signal_order() 110 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order() 111 i915_seqno_passed(rq->fence.seqno, in check_signal_order() 112 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order() 115 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order() 116 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order() 117 rq->fence.seqno)) in check_signal_order() 206 struct i915_request *rq; in signal_irq_work() local 208 list_for_each_entry_rcu(rq, &ce->signals, signal_link) { in signal_irq_work() [all …]
|
D | intel_execlists_submission.c | 208 struct i915_request *rq, in __active_request() argument 211 struct i915_request *active = rq; in __active_request() 213 list_for_each_entry_from_reverse(rq, &tl->requests, link) { in __active_request() 214 if (__i915_request_is_complete(rq)) in __active_request() 218 i915_request_set_error_once(rq, error); in __active_request() 219 __i915_request_skip(rq); in __active_request() 221 active = rq; in __active_request() 228 active_request(const struct intel_timeline * const tl, struct i915_request *rq) in active_request() argument 230 return __active_request(tl, rq, 0); in active_request() 251 static int rq_prio(const struct i915_request *rq) in rq_prio() argument [all …]
|
D | selftest_timeline.c | 452 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) in emit_ggtt_store_dw() argument 456 cs = intel_ring_begin(rq, 4); in emit_ggtt_store_dw() 460 if (GRAPHICS_VER(rq->engine->i915) >= 8) { in emit_ggtt_store_dw() 465 } else if (GRAPHICS_VER(rq->engine->i915) >= 4) { in emit_ggtt_store_dw() 477 intel_ring_advance(rq, cs); in emit_ggtt_store_dw() 485 struct i915_request *rq; in checked_tl_write() local 490 rq = ERR_PTR(err); in checked_tl_write() 501 rq = intel_engine_create_kernel_request(engine); in checked_tl_write() 502 if (IS_ERR(rq)) in checked_tl_write() 505 i915_request_get(rq); in checked_tl_write() [all …]
|
D | gen2_engine_cs.c | 14 int gen2_emit_flush(struct i915_request *rq, u32 mode) in gen2_emit_flush() argument 23 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw); in gen2_emit_flush() 36 intel_ring_advance(rq, cs); in gen2_emit_flush() 41 int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen4_emit_flush_rcs() argument 77 if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5) in gen4_emit_flush_rcs() 85 cs = intel_ring_begin(rq, i); in gen4_emit_flush_rcs() 103 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs() 113 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs() 122 intel_ring_advance(rq, cs); in gen4_emit_flush_rcs() 127 int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode) in gen4_emit_flush_vcs() argument [all …]
|
D | selftest_hangcheck.c | 95 const struct i915_request *rq) in hws_address() argument 97 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); in hws_address() 101 struct i915_request *rq, in move_to_active() argument 107 err = i915_request_await_object(rq, vma->obj, in move_to_active() 110 err = i915_vma_move_to_active(vma, rq, flags); in move_to_active() 122 struct i915_request *rq = NULL; in hang_create_request() local 170 rq = igt_request_alloc(h->ctx, engine); in hang_create_request() 171 if (IS_ERR(rq)) { in hang_create_request() 172 err = PTR_ERR(rq); in hang_create_request() 176 err = move_to_active(vma, rq, 0); in hang_create_request() [all …]
|
D | intel_engine_heartbeat.c | 42 struct i915_request *rq; in heartbeat_create() local 45 rq = __i915_request_create(ce, gfp); in heartbeat_create() 48 return rq; in heartbeat_create() 51 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) in idle_pulse() argument 54 i915_request_add_active_barriers(rq); in idle_pulse() 56 engine->heartbeat.systole = i915_request_get(rq); in idle_pulse() 59 static void heartbeat_commit(struct i915_request *rq, in heartbeat_commit() argument 62 idle_pulse(rq->engine, rq); in heartbeat_commit() 64 __i915_request_commit(rq); in heartbeat_commit() 65 __i915_request_queue(rq, attr); in heartbeat_commit() [all …]
|
D | selftest_lrc.c | 33 static bool is_active(struct i915_request *rq) in is_active() argument 35 if (i915_request_is_active(rq)) in is_active() 38 if (i915_request_on_hold(rq)) in is_active() 41 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active() 48 struct i915_request *rq, in wait_for_submit() argument 58 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit() 63 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit() 78 struct i915_request *rq; in emit_semaphore_signal() local 81 rq = intel_context_create_request(ce); in emit_semaphore_signal() 82 if (IS_ERR(rq)) in emit_semaphore_signal() [all …]
|
D | selftest_context.c | 15 static int request_sync(struct i915_request *rq) in request_sync() argument 17 struct intel_timeline *tl = i915_request_timeline(rq); in request_sync() 22 i915_request_get(rq); in request_sync() 25 __i915_request_commit(rq); in request_sync() 26 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in request_sync() 27 __i915_request_queue_bh(rq); in request_sync() 29 timeout = i915_request_wait(rq, 0, HZ / 10); in request_sync() 33 i915_request_retire_upto(rq); in request_sync() 35 lockdep_unpin_lock(&tl->mutex, rq->cookie); in request_sync() 38 i915_request_put(rq); in request_sync() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 58 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 61 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 63 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 64 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 84 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, in mlx5e_read_title_slot() argument 88 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_title_slot() 94 rq->stats->cqe_compress_blks++; in mlx5e_read_title_slot() 129 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, in mlx5e_decompress_cqe() argument 133 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqe() 145 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { in mlx5e_decompress_cqe() [all …]
|
/drivers/scsi/esas2r/ |
D | esas2r_disc.c | 49 struct esas2r_request *rq); 51 struct esas2r_request *rq); 55 struct esas2r_request *rq); 59 struct esas2r_request *rq); 61 struct esas2r_request *rq); 63 struct esas2r_request *rq); 65 struct esas2r_request *rq); 67 struct esas2r_request *rq); 69 struct esas2r_request *rq); 71 struct esas2r_request *rq); [all …]
|
D | esas2r_vda.c | 59 static void clear_vda_request(struct esas2r_request *rq); 62 struct esas2r_request *rq); 67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument 93 clear_vda_request(rq); in esas2r_process_vda_ioctl() 95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl() 96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl() 97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl() 112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl() 113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl() 115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl() [all …]
|
D | esas2r_int.c | 173 struct esas2r_request *rq, in esas2r_handle_outbound_rsp_err() argument 181 if (unlikely(rq->req_stat != RS_SUCCESS)) { in esas2r_handle_outbound_rsp_err() 182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); in esas2r_handle_outbound_rsp_err() 184 if (rq->req_stat == RS_ABORTED) { in esas2r_handle_outbound_rsp_err() 185 if (rq->timeout > RQ_MAX_TIMEOUT) in esas2r_handle_outbound_rsp_err() 186 rq->req_stat = RS_TIMEOUT; in esas2r_handle_outbound_rsp_err() 187 } else if (rq->req_stat == RS_SCSI_ERROR) { in esas2r_handle_outbound_rsp_err() 188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; in esas2r_handle_outbound_rsp_err() 197 rq->req_stat = RS_SUCCESS; in esas2r_handle_outbound_rsp_err() 198 rq->func_rsp.scsi_rsp.scsi_stat = in esas2r_handle_outbound_rsp_err() [all …]
|
D | esas2r_io.c | 46 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) in esas2r_start_request() argument 49 struct esas2r_request *startrq = rq; in esas2r_start_request() 54 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) in esas2r_start_request() 55 rq->req_stat = RS_SEL2; in esas2r_start_request() 57 rq->req_stat = RS_DEGRADED; in esas2r_start_request() 58 } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { in esas2r_start_request() 59 t = a->targetdb + rq->target_id; in esas2r_start_request() 63 rq->req_stat = RS_SEL; in esas2r_start_request() 66 rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); in esas2r_start_request() 75 rq->req_stat = RS_SEL; in esas2r_start_request() [all …]
|
/drivers/gpu/drm/i915/selftests/ |
D | i915_perf.c | 155 static int write_timestamp(struct i915_request *rq, int slot) in write_timestamp() argument 160 cs = intel_ring_begin(rq, 6); in write_timestamp() 165 if (GRAPHICS_VER(rq->engine->i915) >= 8) in write_timestamp() 177 intel_ring_advance(rq, cs); in write_timestamp() 182 static ktime_t poll_status(struct i915_request *rq, int slot) in poll_status() argument 184 while (!intel_read_status_page(rq->engine, slot) && in poll_status() 185 !i915_request_completed(rq)) in poll_status() 195 struct i915_request *rq; in live_noa_delay() local 218 rq = intel_engine_create_kernel_request(stream->engine); in live_noa_delay() 219 if (IS_ERR(rq)) { in live_noa_delay() [all …]
|
D | igt_spinner.c | 116 const struct i915_request *rq) in hws_address() argument 118 return hws->node.start + seqno_offset(rq->fence.context); in hws_address() 122 struct i915_request *rq, in move_to_active() argument 128 err = i915_request_await_object(rq, vma->obj, in move_to_active() 131 err = i915_vma_move_to_active(vma, rq, flags); in move_to_active() 143 struct i915_request *rq = NULL; in igt_spinner_create_request() local 163 rq = intel_context_create_request(ce); in igt_spinner_create_request() 164 if (IS_ERR(rq)) in igt_spinner_create_request() 165 return ERR_CAST(rq); in igt_spinner_create_request() 167 err = move_to_active(vma, rq, 0); in igt_spinner_create_request() [all …]
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 62 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) argument 155 struct hinic_rq *rq, u16 global_qid) in hinic_rq_prepare_ctxt() argument 162 wq = rq->wq; in hinic_rq_prepare_ctxt() 181 HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); in hinic_rq_prepare_ctxt() 202 rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); in hinic_rq_prepare_ctxt() 203 rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); in hinic_rq_prepare_ctxt() 247 static int alloc_rq_skb_arr(struct hinic_rq *rq) in alloc_rq_skb_arr() argument 249 struct hinic_wq *wq = rq->wq; in alloc_rq_skb_arr() 252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr() 253 rq->saved_skb = vzalloc(skb_arr_size); in alloc_rq_skb_arr() [all …]
|