Home
last modified time | relevance | path

Searched refs:sq (Results 1 – 25 of 205) sorted by relevance

123456789

/drivers/net/ethernet/mellanox/mlx5/core/
Den_tx.c44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) in mlx5e_dma_unmap_wqe_err() argument
50 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err()
52 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); in mlx5e_dma_unmap_wqe_err()
218 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, in mlx5e_txwqe_build_eseg_csum() argument
222 if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg))) in mlx5e_txwqe_build_eseg_csum()
230 sq->stats->csum_partial_inner++; in mlx5e_txwqe_build_eseg_csum()
233 sq->stats->csum_partial++; in mlx5e_txwqe_build_eseg_csum()
238 sq->stats->csum_partial++; in mlx5e_txwqe_build_eseg_csum()
241 sq->stats->csum_none++; in mlx5e_txwqe_build_eseg_csum()
245 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) in mlx5e_tx_get_gso_ihs() argument
[all …]
Den_txrx.c48 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) in mlx5e_handle_tx_dim() argument
50 struct mlx5e_sq_stats *stats = sq->stats; in mlx5e_handle_tx_dim()
53 if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) in mlx5e_handle_tx_dim()
56 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); in mlx5e_handle_tx_dim()
57 net_dim(&sq->dim, dim_sample); in mlx5e_handle_tx_dim()
72 void mlx5e_trigger_irq(struct mlx5e_icosq *sq) in mlx5e_trigger_irq() argument
74 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_trigger_irq()
76 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_trigger_irq()
78 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { in mlx5e_trigger_irq()
83 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq()
[all …]
Den_main.c204 struct mlx5e_icosq *sq, in mlx5e_build_umr_wqe() argument
211 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | in mlx5e_build_umr_wqe()
918 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) in mlx5e_free_xdpsq_db() argument
920 kvfree(sq->db.xdpi_fifo.xi); in mlx5e_free_xdpsq_db()
921 kvfree(sq->db.wqe_info); in mlx5e_free_xdpsq_db()
924 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) in mlx5e_alloc_xdpsq_fifo() argument
926 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; in mlx5e_alloc_xdpsq_fifo()
927 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); in mlx5e_alloc_xdpsq_fifo()
935 xdpi_fifo->pc = &sq->xdpi_fifo_pc; in mlx5e_alloc_xdpsq_fifo()
936 xdpi_fifo->cc = &sq->xdpi_fifo_cc; in mlx5e_alloc_xdpsq_fifo()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en/
Dxdp.c59 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, in mlx5e_xmit_xdp_buff() argument
89 dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, in mlx5e_xmit_xdp_buff()
91 if (dma_mapping_error(sq->pdev, dma_addr)) { in mlx5e_xmit_xdp_buff()
109 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, in mlx5e_xmit_xdp_buff()
117 return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, in mlx5e_xmit_xdp_buff()
118 mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0); in mlx5e_xmit_xdp_buff()
166 static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size) in mlx5e_xdpsq_get_next_pi() argument
168 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_xdpsq_get_next_pi()
171 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_xdpsq_get_next_pi()
176 wi = &sq->db.wqe_info[pi]; in mlx5e_xdpsq_get_next_pi()
[all …]
Dreporter_tx.c8 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) in mlx5e_wait_for_sq_flush() argument
14 if (sq->cc == sq->pc) in mlx5e_wait_for_sq_flush()
20 netdev_err(sq->netdev, in mlx5e_wait_for_sq_flush()
22 sq->sqn, sq->cc, sq->pc); in mlx5e_wait_for_sq_flush()
27 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) in mlx5e_reset_txqsq_cc_pc() argument
29 WARN_ONCE(sq->cc != sq->pc, in mlx5e_reset_txqsq_cc_pc()
31 sq->sqn, sq->cc, sq->pc); in mlx5e_reset_txqsq_cc_pc()
32 sq->cc = 0; in mlx5e_reset_txqsq_cc_pc()
33 sq->dma_fifo_cc = 0; in mlx5e_reset_txqsq_cc_pc()
34 sq->pc = 0; in mlx5e_reset_txqsq_cc_pc()
[all …]
Dxdp.h52 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
54 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
55 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
60 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
64 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
68 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
69 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
99 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) in mlx5e_xmit_xdp_doorbell() argument
101 if (sq->doorbell_cseg) { in mlx5e_xmit_xdp_doorbell()
102 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); in mlx5e_xmit_xdp_doorbell()
[all …]
Dtxrx.h52 void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
74 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
98 #define MLX5E_TX_FETCH_WQE(sq, pi) \ argument
99 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
147 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) in mlx5e_txqsq_get_next_pi() argument
149 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_txqsq_get_next_pi()
152 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_txqsq_get_next_pi()
157 wi = &sq->db.wqe_info[pi]; in mlx5e_txqsq_get_next_pi()
165 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_txqsq_get_next_pi()
167 sq->stats->nop += contig_wqebbs; in mlx5e_txqsq_get_next_pi()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
Dktls_tx.c146 static void tx_fill_wi(struct mlx5e_txqsq *sq, in tx_fill_wi() argument
150 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; in tx_fill_wi()
170 post_static_params(struct mlx5e_txqsq *sq, in post_static_params() argument
178 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); in post_static_params()
179 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); in post_static_params()
180 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, in post_static_params()
183 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); in post_static_params()
184 sq->pc += num_wqebbs; in post_static_params()
188 post_progress_params(struct mlx5e_txqsq *sq, in post_progress_params() argument
196 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); in post_progress_params()
[all …]
Dktls_rx.c129 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, in icosq_fill_wi() argument
132 sq->db.wqe_info[pi] = *wi; in icosq_fill_wi()
136 post_static_params(struct mlx5e_icosq *sq, in post_static_params() argument
144 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) in post_static_params()
147 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); in post_static_params()
148 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); in post_static_params()
149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, in post_static_params()
158 icosq_fill_wi(sq, pi, &wi); in post_static_params()
159 sq->pc += num_wqebbs; in post_static_params()
165 post_progress_params(struct mlx5e_icosq *sq, in post_progress_params() argument
[all …]
/drivers/net/ethernet/cavium/thunder/
Dnicvf_queues.c19 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
504 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument
508 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, in nicvf_init_snd_queue()
513 sq->desc = sq->dmem.base; in nicvf_init_snd_queue()
514 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue()
515 if (!sq->skbuff) in nicvf_init_snd_queue()
518 sq->head = 0; in nicvf_init_snd_queue()
519 sq->tail = 0; in nicvf_init_snd_queue()
520 sq->thresh = SND_QUEUE_THRESH; in nicvf_init_snd_queue()
527 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Dtx.c52 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, in mlx5e_xsk_tx_post_err() argument
55 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_xsk_tx_post_err()
56 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; in mlx5e_xsk_tx_post_err()
62 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err()
63 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); in mlx5e_xsk_tx_post_err()
64 sq->doorbell_cseg = &nopwqe->ctrl; in mlx5e_xsk_tx_post_err()
67 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) in mlx5e_xsk_tx() argument
69 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx()
78 int check_result = INDIRECT_CALL_2(sq->xmit_xdp_frame_check, in mlx5e_xsk_tx()
81 sq); in mlx5e_xsk_tx()
[all …]
Dtx.h14 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
16 static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) in mlx5e_xsk_update_tx_wakeup() argument
18 if (!xsk_uses_need_wakeup(sq->xsk_pool)) in mlx5e_xsk_update_tx_wakeup()
21 if (sq->pc != sq->cc) in mlx5e_xsk_update_tx_wakeup()
22 xsk_clear_tx_need_wakeup(sq->xsk_pool); in mlx5e_xsk_update_tx_wakeup()
24 xsk_set_tx_need_wakeup(sq->xsk_pool); in mlx5e_xsk_update_tx_wakeup()
/drivers/net/ethernet/intel/ice/
Dice_controlq.c8 (qinfo)->sq.head = prefix##_ATQH; \
9 (qinfo)->sq.tail = prefix##_ATQT; \
10 (qinfo)->sq.len = prefix##_ATQLEN; \
11 (qinfo)->sq.bah = prefix##_ATQBAH; \
12 (qinfo)->sq.bal = prefix##_ATQBAL; \
13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
[all …]
/drivers/net/ethernet/marvell/octeontx2/nic/
Dotx2_txrx.c103 struct otx2_snd_queue *sq, in otx2_snd_pkt_handler() argument
119 sg = &sq->sg[snd_comp->sqe_id]; in otx2_snd_pkt_handler()
125 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; in otx2_snd_pkt_handler()
418 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx], in otx2_tx_napi_handler()
489 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, in otx2_sqe_flush() argument
498 memcpy(sq->lmt_addr, sq->sqe_base, size); in otx2_sqe_flush()
499 status = otx2_lmt_flush(sq->io_addr); in otx2_sqe_flush()
502 sq->head++; in otx2_sqe_flush()
503 sq->head &= (sq->sqe_cnt - 1); in otx2_sqe_flush()
508 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_sg() argument
[all …]
Dotx2_common.c87 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; in otx2_update_sq_stats() local
89 if (!pfvf->qset.sq) in otx2_update_sq_stats()
92 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); in otx2_update_sq_stats()
795 struct otx2_snd_queue *sq; in otx2_sq_aq_init() local
798 sq = &pfvf->qset.sq[qidx]; in otx2_sq_aq_init()
799 sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); in otx2_sq_aq_init()
805 aq->sq.cq = pfvf->hw.rx_queues + qidx; in otx2_sq_aq_init()
806 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ in otx2_sq_aq_init()
807 aq->sq.cq_ena = 1; in otx2_sq_aq_init()
808 aq->sq.ena = 1; in otx2_sq_aq_init()
[all …]
/drivers/soc/qcom/
Dqmi_interface.c18 struct sockaddr_qrtr *sq);
167 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local
177 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup()
178 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup()
179 sq.sq_port = QRTR_PORT_CTRL; in qmi_send_new_lookup()
181 msg.msg_name = &sq; in qmi_send_new_lookup()
182 msg.msg_namelen = sizeof(sq); in qmi_send_new_lookup()
230 struct sockaddr_qrtr sq; in qmi_send_new_server() local
239 pkt.server.node = cpu_to_le32(qmi->sq.sq_node); in qmi_send_new_server()
240 pkt.server.port = cpu_to_le32(qmi->sq.sq_port); in qmi_send_new_server()
[all …]
/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_qp.c59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument
61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument
93 struct hinic_sq *sq, u16 global_qid) in hinic_sq_prepare_ctxt() argument
100 wq = sq->wq; in hinic_sq_prepare_ctxt()
219 static int alloc_sq_skb_arr(struct hinic_sq *sq) in alloc_sq_skb_arr() argument
221 struct hinic_wq *wq = sq->wq; in alloc_sq_skb_arr()
224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr()
225 sq->saved_skb = vzalloc(skb_arr_size); in alloc_sq_skb_arr()
226 if (!sq->saved_skb) in alloc_sq_skb_arr()
236 static void free_sq_skb_arr(struct hinic_sq *sq) in free_sq_skb_arr() argument
[all …]
Dhinic_tx.c46 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) argument
502 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame()
511 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame()
515 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame()
532 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_lb_xmit_frame()
533 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_lb_xmit_frame()
538 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_lb_xmit_frame()
563 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame()
592 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
599 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
[all …]
Dhinic_hw_qp.h57 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument
58 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
122 struct hinic_sq sq; member
133 struct hinic_sq *sq, u16 global_qid);
138 int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
142 void hinic_clean_sq(struct hinic_sq *sq);
149 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
178 void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
182 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
185 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
[all …]
/drivers/infiniband/hw/cxgb4/
Dqp.c95 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_oc_sq() argument
97 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); in dealloc_oc_sq()
100 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_host_sq() argument
102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, in dealloc_host_sq()
103 dma_unmap_addr(sq, mapping)); in dealloc_host_sq()
106 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_sq() argument
108 if (t4_sq_onchip(sq)) in dealloc_sq()
109 dealloc_oc_sq(rdev, sq); in dealloc_sq()
111 dealloc_host_sq(rdev, sq); in dealloc_sq()
114 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in alloc_oc_sq() argument
[all …]
Dcq.c195 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe()
229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe()
247 if (wq->sq.flush_cidx == -1) in c4iw_flush_sq()
248 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq()
249 idx = wq->sq.flush_cidx; in c4iw_flush_sq()
250 while (idx != wq->sq.pidx) { in c4iw_flush_sq()
251 swsqe = &wq->sq.sw_sq[idx]; in c4iw_flush_sq()
254 if (wq->sq.oldest_read == swsqe) { in c4iw_flush_sq()
258 if (++idx == wq->sq.size) in c4iw_flush_sq()
261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq()
[all …]
/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c61 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing()
62 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing()
63 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing()
74 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp()
79 qp->sq.flushed = true; in __bnxt_qplib_add_flush_qp()
124 if (qp->sq.flushed) { in __bnxt_qplib_del_flush_qp()
125 qp->sq.flushed = false; in __bnxt_qplib_del_flush_qp()
142 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp()
143 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp()
177 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_free_qp_hdr_buf() local
[all …]
/drivers/infiniband/hw/mlx5/
Dmem.c118 spin_lock_irqsave(&qp->sq.lock, flags); in post_send_nop()
120 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in post_send_nop()
121 ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); in post_send_nop()
126 cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP); in post_send_nop()
130 qp->sq.wrid[idx] = wr_id; in post_send_nop()
131 qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP; in post_send_nop()
132 qp->sq.wqe_head[idx] = qp->sq.head + 1; in post_send_nop()
133 qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), in post_send_nop()
135 qp->sq.w_list[idx].next = qp->sq.cur_post; in post_send_nop()
136 qp->sq.head++; in post_send_nop()
[all …]
/drivers/net/
Dvirtio_net.c185 struct send_queue *sq; member
368 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
520 struct send_queue *sq, in __virtnet_xdp_xmit_one() argument
536 sg_init_one(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
538 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), in __virtnet_xdp_xmit_one()
570 v->sq + qp; \
577 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
590 struct send_queue *sq; in virtnet_xdp_xmit() local
607 sq = virtnet_xdp_get_sq(vi); in virtnet_xdp_xmit()
615 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in virtnet_xdp_xmit()
[all …]
/drivers/infiniband/hw/efa/
Defa_com.c119 struct efa_com_admin_sq *sq = &aq->sq; in efa_com_admin_init_sq() local
120 u16 size = aq->depth * sizeof(*sq->entries); in efa_com_admin_init_sq()
125 sq->entries = in efa_com_admin_init_sq()
126 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL); in efa_com_admin_init_sq()
127 if (!sq->entries) in efa_com_admin_init_sq()
130 spin_lock_init(&sq->lock); in efa_com_admin_init_sq()
132 sq->cc = 0; in efa_com_admin_init_sq()
133 sq->pc = 0; in efa_com_admin_init_sq()
134 sq->phase = 1; in efa_com_admin_init_sq()
136 sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF); in efa_com_admin_init_sq()
[all …]

123456789