Home
last modified time | relevance | path

Searched refs:wq (Results 1 – 25 of 445) sorted by relevance

12345678910>>...18

/drivers/net/ethernet/mellanox/mlx5/core/
Dwq.h80 void *wqc, struct mlx5_wq_cyc *wq,
82 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
83 void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
86 void *qpc, struct mlx5_wq_qp *wq,
90 void *cqc, struct mlx5_cqwq *wq,
94 void *wqc, struct mlx5_wq_ll *wq,
96 void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
100 static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) in mlx5_wq_cyc_get_size() argument
102 return (u32)wq->fbc.sz_m1 + 1; in mlx5_wq_cyc_get_size()
105 static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) in mlx5_wq_cyc_is_full() argument
[all …]
Dwq.c43 void *wqc, struct mlx5_wq_cyc *wq, in mlx5_wq_cyc_create() argument
46 u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); in mlx5_wq_cyc_create()
47 u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); in mlx5_wq_cyc_create()
48 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; in mlx5_wq_cyc_create()
57 wq->db = wq_ctrl->db.db; in mlx5_wq_cyc_create()
67 wq->sz = mlx5_wq_cyc_get_size(wq); in mlx5_wq_cyc_create()
79 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) in mlx5_wq_cyc_wqe_dump() argument
89 len = nstrides << wq->fbc.log_stride; in mlx5_wq_cyc_wqe_dump()
90 wqe = mlx5_wq_cyc_get_wqe(wq, ix); in mlx5_wq_cyc_wqe_dump()
93 mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len); in mlx5_wq_cyc_wqe_dump()
[all …]
/drivers/scsi/fnic/
Dvnic_wq.c28 static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_get_ctrl() argument
31 wq->ctrl = vnic_dev_get_res(vdev, res_type, index); in vnic_wq_get_ctrl()
33 if (!wq->ctrl) in vnic_wq_get_ctrl()
40 static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_alloc_ring() argument
43 return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_alloc_ring()
47 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument
50 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs()
54 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs()
55 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs()
62 buf = wq->bufs[i]; in vnic_wq_alloc_bufs()
[all …]
Dvnic_wq_copy.h36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_avail() argument
38 return wq->ring.desc_avail; in vnic_wq_copy_desc_avail()
41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_in_use() argument
43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; in vnic_wq_copy_desc_in_use()
46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) in vnic_wq_copy_next_desc() argument
48 struct fcpio_host_req *desc = wq->ring.descs; in vnic_wq_copy_next_desc()
49 return &desc[wq->to_use_index]; in vnic_wq_copy_next_desc()
52 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) in vnic_wq_copy_post() argument
55 ((wq->to_use_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_post()
56 (wq->to_use_index = 0) : (wq->to_use_index++); in vnic_wq_copy_post()
[all …]
Dvnic_wq_copy.c25 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) in vnic_wq_copy_enable() argument
27 iowrite32(1, &wq->ctrl->enable); in vnic_wq_copy_enable()
30 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) in vnic_wq_copy_disable() argument
34 iowrite32(0, &wq->ctrl->enable); in vnic_wq_copy_disable()
38 if (!(ioread32(&wq->ctrl->running))) in vnic_wq_copy_disable()
45 wq->index, ioread32(&wq->ctrl->fetch_index), in vnic_wq_copy_disable()
46 ioread32(&wq->ctrl->posted_index)); in vnic_wq_copy_disable()
51 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument
52 void (*q_clean)(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument
55 BUG_ON(ioread32(&wq->ctrl->enable)); in vnic_wq_copy_clean()
[all …]
Dvnic_wq.h98 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument
101 return wq->ring.desc_avail; in vnic_wq_desc_avail()
104 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument
107 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used()
110 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument
112 return wq->to_use->desc; in vnic_wq_next_desc()
115 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument
119 struct vnic_wq_buf *buf = wq->to_use; in vnic_wq_post()
134 iowrite32(buf->index, &wq->ctrl->posted_index); in vnic_wq_post()
136 wq->to_use = buf; in vnic_wq_post()
[all …]
Dfnic_res.h30 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, in fnic_queue_wq_desc() argument
37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); in fnic_queue_wq_desc()
51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); in fnic_queue_wq_desc()
54 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, in fnic_queue_wq_eth_desc() argument
61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); in fnic_queue_wq_eth_desc()
76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); in fnic_queue_wq_eth_desc()
79 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, in fnic_queue_wq_copy_desc_icmnd_16() argument
91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); in fnic_queue_wq_copy_desc_icmnd_16()
121 vnic_wq_copy_post(wq); in fnic_queue_wq_copy_desc_icmnd_16()
124 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, in fnic_queue_wq_copy_desc_itmf() argument
[all …]
/drivers/net/ethernet/cisco/enic/
Dvnic_wq.c31 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument
34 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs()
38 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_wq_alloc_bufs()
39 if (!wq->bufs[i]) in vnic_wq_alloc_bufs()
44 buf = wq->bufs[i]; in vnic_wq_alloc_bufs()
47 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs()
48 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs()
50 buf->next = wq->bufs[0]; in vnic_wq_alloc_bufs()
54 buf->next = wq->bufs[i + 1]; in vnic_wq_alloc_bufs()
64 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_alloc_bufs()
[all …]
Dvnic_wq.h99 struct vnic_wq wq; member
103 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument
106 return wq->ring.desc_avail; in vnic_wq_desc_avail()
109 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument
112 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used()
115 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument
117 return wq->to_use->desc; in vnic_wq_next_desc()
120 static inline void vnic_wq_doorbell(struct vnic_wq *wq) in vnic_wq_doorbell() argument
128 iowrite32(wq->to_use->index, &wq->ctrl->posted_index); in vnic_wq_doorbell()
131 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument
[all …]
/drivers/scsi/snic/
Dvnic_wq.c26 static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_get_ctrl() argument
29 wq->ctrl = svnic_dev_get_res(vdev, res_type, index); in vnic_wq_get_ctrl()
30 if (!wq->ctrl) in vnic_wq_get_ctrl()
36 static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_alloc_ring() argument
39 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, in vnic_wq_alloc_ring()
43 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument
46 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs()
50 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs()
51 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs()
59 buf = wq->bufs[i]; in vnic_wq_alloc_bufs()
[all …]
Dvnic_wq.h85 static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) in svnic_wq_desc_avail() argument
88 return wq->ring.desc_avail; in svnic_wq_desc_avail()
91 static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) in svnic_wq_desc_used() argument
94 return wq->ring.desc_count - wq->ring.desc_avail - 1; in svnic_wq_desc_used()
97 static inline void *svnic_wq_next_desc(struct vnic_wq *wq) in svnic_wq_next_desc() argument
99 return wq->to_use->desc; in svnic_wq_next_desc()
102 static inline void svnic_wq_post(struct vnic_wq *wq, in svnic_wq_post() argument
106 struct vnic_wq_buf *buf = wq->to_use; in svnic_wq_post()
121 iowrite32(buf->index, &wq->ctrl->posted_index); in svnic_wq_post()
123 wq->to_use = buf; in svnic_wq_post()
[all …]
/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_wq.c34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) argument
44 #define WQ_BASE_VADDR(wqs, wq) \ argument
45 ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
46 + (wq)->block_idx * WQ_BLOCK_SIZE)
48 #define WQ_BASE_PADDR(wqs, wq) \ argument
49 ((wqs)->page_paddr[(wq)->page_idx] \
50 + (wq)->block_idx * WQ_BLOCK_SIZE)
52 #define WQ_BASE_ADDR(wqs, wq) \ argument
53 ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
54 + (wq)->block_idx * WQ_BLOCK_SIZE)
[all …]
Dhinic_hw_qp.c61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
62 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
98 struct hinic_wq *wq; in hinic_sq_prepare_ctxt() local
100 wq = sq->wq; in hinic_sq_prepare_ctxt()
101 ci_start = atomic_read(&wq->cons_idx); in hinic_sq_prepare_ctxt()
102 pi_start = atomic_read(&wq->prod_idx); in hinic_sq_prepare_ctxt()
105 wq_page_addr = be64_to_cpu(*wq->block_vaddr); in hinic_sq_prepare_ctxt()
112 if (wq->num_q_pages == 1) in hinic_sq_prepare_ctxt()
115 wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); in hinic_sq_prepare_ctxt()
160 struct hinic_wq *wq; in hinic_rq_prepare_ctxt() local
[all …]
/drivers/dma/idxd/
Ddevice.c62 static void free_hw_descs(struct idxd_wq *wq) in free_hw_descs() argument
66 for (i = 0; i < wq->num_descs; i++) in free_hw_descs()
67 kfree(wq->hw_descs[i]); in free_hw_descs()
69 kfree(wq->hw_descs); in free_hw_descs()
72 static int alloc_hw_descs(struct idxd_wq *wq, int num) in alloc_hw_descs() argument
74 struct device *dev = &wq->idxd->pdev->dev; in alloc_hw_descs()
78 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), in alloc_hw_descs()
80 if (!wq->hw_descs) in alloc_hw_descs()
84 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), in alloc_hw_descs()
86 if (!wq->hw_descs[i]) { in alloc_hw_descs()
[all …]
Dcdev.c33 struct idxd_wq *wq; member
42 struct idxd_wq *wq = idxd_cdev->wq; in idxd_cdev_dev_release() local
44 cdev_ctx = &ictx[wq->idxd->type]; in idxd_cdev_dev_release()
65 return idxd_cdev->wq; in inode_wq()
72 struct idxd_wq *wq; in idxd_cdev_open() local
76 wq = inode_wq(inode); in idxd_cdev_open()
77 idxd = wq->idxd; in idxd_cdev_open()
80 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); in idxd_cdev_open()
86 mutex_lock(&wq->wq_lock); in idxd_cdev_open()
88 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { in idxd_cdev_open()
[all …]
Dsysfs.c59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) in is_idxd_wq_dmaengine() argument
61 if (wq->type == IDXD_WQT_KERNEL && in is_idxd_wq_dmaengine()
62 strcmp(wq->name, "dmaengine") == 0) in is_idxd_wq_dmaengine()
67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) in is_idxd_wq_cdev() argument
69 return wq->type == IDXD_WQT_USER; in is_idxd_wq_cdev()
84 struct idxd_wq *wq = confdev_to_wq(dev); in idxd_config_bus_match() local
85 struct idxd_device *idxd = wq->idxd; in idxd_config_bus_match()
90 if (wq->state != IDXD_WQ_DISABLED) { in idxd_config_bus_match()
149 struct idxd_wq *wq = confdev_to_wq(dev); in idxd_config_bus_probe() local
150 struct idxd_device *idxd = wq->idxd; in idxd_config_bus_probe()
[all …]
Ddma.c20 return idxd_chan->wq; in to_idxd_wq()
62 static inline void idxd_prep_desc_common(struct idxd_wq *wq, in idxd_prep_desc_common() argument
67 struct idxd_device *idxd = wq->idxd; in idxd_prep_desc_common()
74 hw->priv = !!(wq->type == IDXD_WQT_KERNEL); in idxd_prep_desc_common()
81 wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; in idxd_prep_desc_common()
82 hw->int_handle = wq->vec_ptr; in idxd_prep_desc_common()
88 struct idxd_wq *wq = to_idxd_wq(c); in idxd_dma_prep_interrupt() local
92 if (wq->state != IDXD_WQ_ENABLED) in idxd_dma_prep_interrupt()
96 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); in idxd_dma_prep_interrupt()
100 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, in idxd_dma_prep_interrupt()
[all …]
Didxd.h74 struct idxd_wq *wq; member
96 struct idxd_wq *wq; member
209 struct workqueue_struct *wq; member
224 struct idxd_wq *wq; member
232 static inline bool wq_dedicated(struct idxd_wq *wq) in wq_dedicated() argument
234 return test_bit(WQ_FLAG_DEDICATED, &wq->flags); in wq_dedicated()
263 static inline void idxd_wq_get(struct idxd_wq *wq) in idxd_wq_get() argument
265 wq->client_count++; in idxd_wq_get()
268 static inline void idxd_wq_put(struct idxd_wq *wq) in idxd_wq_put() argument
270 wq->client_count--; in idxd_wq_put()
[all …]
Dsubmit.c11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) in __get_desc() argument
15 desc = wq->descs[idx]; in __get_desc()
22 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) in idxd_alloc_desc() argument
25 struct idxd_device *idxd = wq->idxd; in idxd_alloc_desc()
33 sbq = &wq->sbq; in idxd_alloc_desc()
39 return __get_desc(wq, idx, cpu); in idxd_alloc_desc()
57 return __get_desc(wq, idx, cpu); in idxd_alloc_desc()
60 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) in idxd_free_desc() argument
65 sbitmap_queue_clear(&wq->sbq, desc->id, cpu); in idxd_free_desc()
68 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) in idxd_submit_desc() argument
[all …]
/drivers/infiniband/hw/cxgb4/
Dt4.h480 static inline int t4_rqes_posted(struct t4_wq *wq) in t4_rqes_posted() argument
482 return wq->rq.in_use; in t4_rqes_posted()
485 static inline int t4_rq_empty(struct t4_wq *wq) in t4_rq_empty() argument
487 return wq->rq.in_use == 0; in t4_rq_empty()
490 static inline int t4_rq_full(struct t4_wq *wq) in t4_rq_full() argument
492 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full()
495 static inline u32 t4_rq_avail(struct t4_wq *wq) in t4_rq_avail() argument
497 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail()
500 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) in t4_rq_produce() argument
502 wq->rq.in_use++; in t4_rq_produce()
[all …]
Dqp.c150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in destroy_qp() argument
157 dealloc_sq(rdev, &wq->sq); in destroy_qp()
158 kfree(wq->sq.sw_sq); in destroy_qp()
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp()
163 wq->rq.memsize, wq->rq.queue, in destroy_qp()
164 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp()
165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp()
166 kfree(wq->rq.sw_rq); in destroy_qp()
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp()
199 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in create_qp() argument
[all …]
Dcq.c184 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) in insert_recv_cqe() argument
189 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe()
195 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe()
203 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument
206 int in_use = wq->rq.in_use - count; in c4iw_flush_rq()
209 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq()
211 insert_recv_cqe(wq, cq, 0); in c4iw_flush_rq()
217 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument
223 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe()
229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe()
[all …]
Drestrack.c39 static int fill_sq(struct sk_buff *msg, struct t4_wq *wq) in fill_sq() argument
42 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid)) in fill_sq()
44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed)) in fill_sq()
46 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize)) in fill_sq()
48 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx)) in fill_sq()
50 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx)) in fill_sq()
52 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx)) in fill_sq()
54 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx)) in fill_sq()
56 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use)) in fill_sq()
58 if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size)) in fill_sq()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en/
Dtxrx.h68 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) in mlx5e_wqc_has_room_for() argument
70 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); in mlx5e_wqc_has_room_for()
73 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) in mlx5e_fetch_wqe() argument
77 wqe = mlx5_wq_cyc_get_wqe(wq, pi); in mlx5e_fetch_wqe()
84 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
87 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) in mlx5e_post_nop() argument
89 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); in mlx5e_post_nop()
90 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); in mlx5e_post_nop()
104 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) in mlx5e_post_nop_fence() argument
106 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); in mlx5e_post_nop_fence()
[all …]
/drivers/infiniband/hw/mlx5/
Dsrq_cmd.c27 static void set_wq(void *wq, struct mlx5_srq_attr *in) in set_wq() argument
29 MLX5_SET(wq, wq, wq_signature, !!(in->flags in set_wq()
31 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size); in set_wq()
32 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4); in set_wq()
33 MLX5_SET(wq, wq, log_wq_sz, in->log_size); in set_wq()
34 MLX5_SET(wq, wq, page_offset, in->page_offset); in set_wq()
35 MLX5_SET(wq, wq, lwm, in->lwm); in set_wq()
36 MLX5_SET(wq, wq, pd, in->pd); in set_wq()
37 MLX5_SET64(wq, wq, dbr_addr, in->db_record); in set_wq()
55 static void get_wq(void *wq, struct mlx5_srq_attr *in) in get_wq() argument
[all …]

12345678910>>...18