/drivers/net/ethernet/mellanox/mlx5/core/ |
D | wq.h | 80 void *wqc, struct mlx5_wq_cyc *wq, 82 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides); 83 void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq); 86 void *qpc, struct mlx5_wq_qp *wq, 90 void *cqc, struct mlx5_cqwq *wq, 94 void *wqc, struct mlx5_wq_ll *wq, 96 void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq); 100 static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) in mlx5_wq_cyc_get_size() argument 102 return (u32)wq->fbc.sz_m1 + 1; in mlx5_wq_cyc_get_size() 105 static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) in mlx5_wq_cyc_is_full() argument [all …]
|
D | wq.c | 38 void *wqc, struct mlx5_wq_cyc *wq, in mlx5_wq_cyc_create() argument 41 u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); in mlx5_wq_cyc_create() 42 u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); in mlx5_wq_cyc_create() 43 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; in mlx5_wq_cyc_create() 52 wq->db = wq_ctrl->db.db; in mlx5_wq_cyc_create() 62 wq->sz = mlx5_wq_cyc_get_size(wq); in mlx5_wq_cyc_create() 74 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) in mlx5_wq_cyc_wqe_dump() argument 84 len = nstrides << wq->fbc.log_stride; in mlx5_wq_cyc_wqe_dump() 85 wqe = mlx5_wq_cyc_get_wqe(wq, ix); in mlx5_wq_cyc_wqe_dump() 88 mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len); in mlx5_wq_cyc_wqe_dump() [all …]
|
/drivers/scsi/fnic/ |
D | vnic_wq.c | 16 static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_get_ctrl() argument 19 wq->ctrl = vnic_dev_get_res(vdev, res_type, index); in vnic_wq_get_ctrl() 21 if (!wq->ctrl) in vnic_wq_get_ctrl() 28 static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_alloc_ring() argument 31 return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_alloc_ring() 35 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 38 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 42 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs() 43 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs() 50 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() [all …]
|
D | vnic_wq_copy.h | 24 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_avail() argument 26 return wq->ring.desc_avail; in vnic_wq_copy_desc_avail() 29 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_in_use() argument 31 return wq->ring.desc_count - 1 - wq->ring.desc_avail; in vnic_wq_copy_desc_in_use() 34 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) in vnic_wq_copy_next_desc() argument 36 struct fcpio_host_req *desc = wq->ring.descs; in vnic_wq_copy_next_desc() 37 return &desc[wq->to_use_index]; in vnic_wq_copy_next_desc() 40 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) in vnic_wq_copy_post() argument 43 ((wq->to_use_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_post() 44 (wq->to_use_index = 0) : (wq->to_use_index++); in vnic_wq_copy_post() [all …]
|
D | vnic_wq_copy.c | 13 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) in vnic_wq_copy_enable() argument 15 iowrite32(1, &wq->ctrl->enable); in vnic_wq_copy_enable() 18 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) in vnic_wq_copy_disable() argument 22 iowrite32(0, &wq->ctrl->enable); in vnic_wq_copy_disable() 26 if (!(ioread32(&wq->ctrl->running))) in vnic_wq_copy_disable() 33 wq->index, ioread32(&wq->ctrl->fetch_index), in vnic_wq_copy_disable() 34 ioread32(&wq->ctrl->posted_index)); in vnic_wq_copy_disable() 39 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument 40 void (*q_clean)(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument 43 BUG_ON(ioread32(&wq->ctrl->enable)); in vnic_wq_copy_clean() [all …]
|
D | vnic_wq.h | 86 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 89 return wq->ring.desc_avail; in vnic_wq_desc_avail() 92 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 95 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 98 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument 100 return wq->to_use->desc; in vnic_wq_next_desc() 103 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument 107 struct vnic_wq_buf *buf = wq->to_use; in vnic_wq_post() 122 iowrite32(buf->index, &wq->ctrl->posted_index); in vnic_wq_post() 124 wq->to_use = buf; in vnic_wq_post() [all …]
|
/drivers/net/ethernet/cisco/enic/ |
D | vnic_wq.c | 18 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 21 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 25 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_wq_alloc_bufs() 26 if (!wq->bufs[i]) in vnic_wq_alloc_bufs() 31 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() 34 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs() 35 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs() 37 buf->next = wq->bufs[0]; in vnic_wq_alloc_bufs() 41 buf->next = wq->bufs[i + 1]; in vnic_wq_alloc_bufs() 51 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_alloc_bufs() [all …]
|
D | vnic_wq.h | 86 struct vnic_wq wq; member 90 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 93 return wq->ring.desc_avail; in vnic_wq_desc_avail() 96 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 99 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 102 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument 104 return wq->to_use->desc; in vnic_wq_next_desc() 107 static inline void vnic_wq_doorbell(struct vnic_wq *wq) in vnic_wq_doorbell() argument 115 iowrite32(wq->to_use->index, &wq->ctrl->posted_index); in vnic_wq_doorbell() 118 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument [all …]
|
/drivers/scsi/snic/ |
D | vnic_wq.c | 12 static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_get_ctrl() argument 15 wq->ctrl = svnic_dev_get_res(vdev, res_type, index); in vnic_wq_get_ctrl() 16 if (!wq->ctrl) in vnic_wq_get_ctrl() 22 static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_alloc_ring() argument 25 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, in vnic_wq_alloc_ring() 29 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 32 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 36 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs() 37 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs() 45 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() [all …]
|
D | vnic_wq.h | 71 static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) in svnic_wq_desc_avail() argument 74 return wq->ring.desc_avail; in svnic_wq_desc_avail() 77 static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) in svnic_wq_desc_used() argument 80 return wq->ring.desc_count - wq->ring.desc_avail - 1; in svnic_wq_desc_used() 83 static inline void *svnic_wq_next_desc(struct vnic_wq *wq) in svnic_wq_next_desc() argument 85 return wq->to_use->desc; in svnic_wq_next_desc() 88 static inline void svnic_wq_post(struct vnic_wq *wq, in svnic_wq_post() argument 92 struct vnic_wq_buf *buf = wq->to_use; in svnic_wq_post() 107 iowrite32(buf->index, &wq->ctrl->posted_index); in svnic_wq_post() 109 wq->to_use = buf; in svnic_wq_post() [all …]
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_wq.c | 34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) argument 44 #define WQ_BASE_VADDR(wqs, wq) \ argument 45 ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ 46 + (wq)->block_idx * WQ_BLOCK_SIZE) 48 #define WQ_BASE_PADDR(wqs, wq) \ argument 49 ((wqs)->page_paddr[(wq)->page_idx] \ 50 + (wq)->block_idx * WQ_BLOCK_SIZE) 52 #define WQ_BASE_ADDR(wqs, wq) \ argument 53 ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ 54 + (wq)->block_idx * WQ_BLOCK_SIZE) [all …]
|
D | hinic_hw_qp.c | 61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) 62 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) 98 struct hinic_wq *wq; in hinic_sq_prepare_ctxt() local 100 wq = sq->wq; in hinic_sq_prepare_ctxt() 101 ci_start = atomic_read(&wq->cons_idx); in hinic_sq_prepare_ctxt() 102 pi_start = atomic_read(&wq->prod_idx); in hinic_sq_prepare_ctxt() 105 wq_page_addr = be64_to_cpu(*wq->block_vaddr); in hinic_sq_prepare_ctxt() 112 if (wq->num_q_pages == 1) in hinic_sq_prepare_ctxt() 115 wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); in hinic_sq_prepare_ctxt() 160 struct hinic_wq *wq; in hinic_rq_prepare_ctxt() local [all …]
|
/drivers/dma/idxd/ |
D | device.c | 19 static void idxd_wq_disable_cleanup(struct idxd_wq *wq); 42 static void free_hw_descs(struct idxd_wq *wq) in free_hw_descs() argument 46 for (i = 0; i < wq->num_descs; i++) in free_hw_descs() 47 kfree(wq->hw_descs[i]); in free_hw_descs() 49 kfree(wq->hw_descs); in free_hw_descs() 52 static int alloc_hw_descs(struct idxd_wq *wq, int num) in alloc_hw_descs() argument 54 struct device *dev = &wq->idxd->pdev->dev; in alloc_hw_descs() 58 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), in alloc_hw_descs() 60 if (!wq->hw_descs) in alloc_hw_descs() 64 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), in alloc_hw_descs() [all …]
|
D | cdev.c | 34 struct idxd_wq *wq; member 45 struct idxd_wq *wq = idxd_cdev->wq; in idxd_cdev_dev_release() local 47 cdev_ctx = &ictx[wq->idxd->data->type]; in idxd_cdev_dev_release() 68 return idxd_cdev->wq; in inode_wq() 75 struct idxd_wq *wq; in idxd_cdev_open() local 81 wq = inode_wq(inode); in idxd_cdev_open() 82 idxd = wq->idxd; in idxd_cdev_open() 85 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); in idxd_cdev_open() 91 mutex_lock(&wq->wq_lock); in idxd_cdev_open() 93 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { in idxd_cdev_open() [all …]
|
D | dma.c | 20 return idxd_chan->wq; in to_idxd_wq() 27 struct idxd_device *idxd = desc->wq->idxd; in idxd_dma_complete_txd() 56 idxd_free_desc(desc->wq, desc); in idxd_dma_complete_txd() 72 static inline void idxd_prep_desc_common(struct idxd_wq *wq, in idxd_prep_desc_common() argument 93 struct idxd_wq *wq = to_idxd_wq(c); in idxd_dma_prep_interrupt() local 97 if (wq->state != IDXD_WQ_ENABLED) in idxd_dma_prep_interrupt() 101 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); in idxd_dma_prep_interrupt() 105 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, in idxd_dma_prep_interrupt() 115 struct idxd_wq *wq = to_idxd_wq(c); in idxd_dma_submit_memcpy() local 117 struct idxd_device *idxd = wq->idxd; in idxd_dma_submit_memcpy() [all …]
|
D | idxd.h | 148 struct idxd_wq *wq; member 175 struct idxd_wq *wq; member 313 struct workqueue_struct *wq; member 338 struct idxd_wq *wq; member 350 #define wq_confdev(wq) &wq->idxd_dev.conf_dev argument 449 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) in is_idxd_wq_dmaengine() argument 451 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0) in is_idxd_wq_dmaengine() 456 static inline bool is_idxd_wq_user(struct idxd_wq *wq) in is_idxd_wq_user() argument 458 return wq->type == IDXD_WQT_USER; in is_idxd_wq_user() 461 static inline bool is_idxd_wq_kernel(struct idxd_wq *wq) in is_idxd_wq_kernel() argument [all …]
|
D | submit.c | 11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) in __get_desc() argument 14 struct idxd_device *idxd = wq->idxd; in __get_desc() 16 desc = wq->descs[idx]; in __get_desc() 27 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) in idxd_alloc_desc() argument 30 struct idxd_device *idxd = wq->idxd; in idxd_alloc_desc() 38 sbq = &wq->sbq; in idxd_alloc_desc() 44 return __get_desc(wq, idx, cpu); in idxd_alloc_desc() 62 return __get_desc(wq, idx, cpu); in idxd_alloc_desc() 65 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) in idxd_free_desc() argument 70 sbitmap_queue_clear(&wq->sbq, desc->id, cpu); in idxd_free_desc() [all …]
|
D | sysfs.c | 342 struct idxd_wq *wq = idxd->wqs[i]; in group_work_queues_show() local 344 if (!wq->group) in group_work_queues_show() 347 if (wq->group->id == group->id) in group_work_queues_show() 348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); in group_work_queues_show() 590 struct idxd_wq *wq = confdev_to_wq(dev); in wq_clients_show() local 592 return sysfs_emit(buf, "%d\n", wq->client_count); in wq_clients_show() 601 struct idxd_wq *wq = confdev_to_wq(dev); in wq_state_show() local 603 switch (wq->state) { in wq_state_show() 619 struct idxd_wq *wq = confdev_to_wq(dev); in wq_group_id_show() local 621 if (wq->group) in wq_group_id_show() [all …]
|
D | irq.c | 47 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_reinit() local 49 rc = idxd_wq_enable(wq); in idxd_device_reinit() 53 dev_name(wq_confdev(wq))); in idxd_device_reinit() 71 struct idxd_wq *wq = ie_to_wq(ie); in idxd_int_handle_revoke_drain() local 72 struct idxd_device *idxd = wq->idxd; in idxd_int_handle_revoke_drain() 86 portal = idxd_wq_portal_addr(wq); in idxd_int_handle_revoke_drain() 93 if (wq_dedicated(wq)) { in idxd_int_handle_revoke_drain() 96 rc = idxd_enqcmds(wq, portal, &desc); in idxd_int_handle_revoke_drain() 99 dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id); in idxd_int_handle_revoke_drain() 154 struct idxd_wq *wq = ie_to_wq(ie); in idxd_int_handle_revoke() local [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | t4.h | 480 static inline int t4_rqes_posted(struct t4_wq *wq) in t4_rqes_posted() argument 482 return wq->rq.in_use; in t4_rqes_posted() 485 static inline int t4_rq_empty(struct t4_wq *wq) in t4_rq_empty() argument 487 return wq->rq.in_use == 0; in t4_rq_empty() 490 static inline u32 t4_rq_avail(struct t4_wq *wq) in t4_rq_avail() argument 492 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail() 495 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) in t4_rq_produce() argument 497 wq->rq.in_use++; in t4_rq_produce() 498 if (++wq->rq.pidx == wq->rq.size) in t4_rq_produce() 499 wq->rq.pidx = 0; in t4_rq_produce() [all …]
|
D | qp.c | 150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in destroy_qp() argument 157 dealloc_sq(rdev, &wq->sq); in destroy_qp() 158 kfree(wq->sq.sw_sq); in destroy_qp() 159 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp() 163 wq->rq.memsize, wq->rq.queue, in destroy_qp() 164 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp() 165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp() 166 kfree(wq->rq.sw_rq); in destroy_qp() 167 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp() 199 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in create_qp() argument [all …]
|
D | cq.c | 184 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) in insert_recv_cqe() argument 189 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe() 195 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe() 203 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument 206 int in_use = wq->rq.in_use - count; in c4iw_flush_rq() 209 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq() 211 insert_recv_cqe(wq, cq, 0); in c4iw_flush_rq() 217 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument 223 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe() 229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe() [all …]
|
D | restrack.c | 39 static int fill_sq(struct sk_buff *msg, struct t4_wq *wq) in fill_sq() argument 42 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid)) in fill_sq() 44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed)) in fill_sq() 46 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize)) in fill_sq() 48 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx)) in fill_sq() 50 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx)) in fill_sq() 52 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx)) in fill_sq() 54 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx)) in fill_sq() 56 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use)) in fill_sq() 58 if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size)) in fill_sq() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
D | aso.c | 12 struct mlx5_cqwq wq; member 31 struct mlx5_wq_cyc wq; member 56 err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl); in mlx5_aso_alloc_cq() 64 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { in mlx5_aso_alloc_cq() 65 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); in mlx5_aso_alloc_cq() 161 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq); in mlx5_aso_alloc_sq() 162 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5_aso_alloc_sq() local 170 err = mlx5_wq_cyc_create(mdev, ¶m, sqc_wq, wq, &sq->wq_ctrl); in mlx5_aso_alloc_sq() 173 wq->db = &wq->db[MLX5_SND_DBR]; in mlx5_aso_alloc_sq() 181 void *in, *sqc, *wq; in create_aso_sq() local [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | txrx.h | 88 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) in mlx5e_wqc_has_room_for() argument 90 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); in mlx5e_wqc_has_room_for() 93 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) in mlx5e_fetch_wqe() argument 97 wqe = mlx5_wq_cyc_get_wqe(wq, pi); in mlx5e_fetch_wqe() 104 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe))) 107 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) in mlx5e_post_nop() argument 109 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); in mlx5e_post_nop() 110 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); in mlx5e_post_nop() 124 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) in mlx5e_post_nop_fence() argument 126 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); in mlx5e_post_nop_fence() [all …]
|