/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | reporter_tx.c | 6 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) in mlx5e_wait_for_sq_flush() argument 11 if (sq->cc == sq->pc) in mlx5e_wait_for_sq_flush() 17 netdev_err(sq->channel->netdev, in mlx5e_wait_for_sq_flush() 19 sq->sqn, sq->cc, sq->pc); in mlx5e_wait_for_sq_flush() 24 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) in mlx5e_reset_txqsq_cc_pc() argument 26 WARN_ONCE(sq->cc != sq->pc, in mlx5e_reset_txqsq_cc_pc() 28 sq->sqn, sq->cc, sq->pc); in mlx5e_reset_txqsq_cc_pc() 29 sq->cc = 0; in mlx5e_reset_txqsq_cc_pc() 30 sq->dma_fifo_cc = 0; in mlx5e_reset_txqsq_cc_pc() 31 sq->pc = 0; in mlx5e_reset_txqsq_cc_pc() [all …]
|
D | xdp.c | 59 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, in mlx5e_xmit_xdp_buff() argument 89 dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, in mlx5e_xmit_xdp_buff() 91 if (dma_mapping_error(sq->pdev, dma_addr)) { in mlx5e_xmit_xdp_buff() 109 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, in mlx5e_xmit_xdp_buff() 117 return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0); in mlx5e_xmit_xdp_buff() 181 static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) in mlx5e_xdp_mpwqe_session_start() argument 183 struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_session_start() 184 struct mlx5e_xdpsq_stats *stats = sq->stats; in mlx5e_xdp_mpwqe_session_start() 185 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_xdp_mpwqe_session_start() 188 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_xdp_mpwqe_session_start() [all …]
|
D | xdp.h | 67 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); 69 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); 70 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw); 103 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) in mlx5e_xmit_xdp_doorbell() argument 105 if (sq->doorbell_cseg) { in mlx5e_xmit_xdp_doorbell() 106 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); in mlx5e_xmit_xdp_doorbell() 107 sq->doorbell_cseg = NULL; in mlx5e_xmit_xdp_doorbell() 114 static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) in mlx5e_xdp_update_inline_state() argument 116 u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc; in mlx5e_xdp_update_inline_state() 117 struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_update_inline_state() [all …]
|
D | txrx.h | 37 mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi) in mlx5e_sq_fetch_wqe() argument 39 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_sq_fetch_wqe() 42 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_sq_fetch_wqe() 85 mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq, in mlx5e_fill_sq_frag_edge() argument 88 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; in mlx5e_fill_sq_frag_edge() 96 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_fill_sq_frag_edge() 98 sq->stats->nop += nnops; in mlx5e_fill_sq_frag_edge() 125 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, in mlx5e_tx_wqe_inline_mode() argument 133 mode = sq->min_inline_mode; in mlx5e_tx_wqe_inline_mode() 136 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state)) in mlx5e_tx_wqe_inline_mode() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_tx.c | 44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) in mlx5e_dma_unmap_wqe_err() argument 50 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err() 52 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); in mlx5e_dma_unmap_wqe_err() 150 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *e… in mlx5e_txwqe_build_eseg_csum() argument 157 sq->stats->csum_partial_inner++; in mlx5e_txwqe_build_eseg_csum() 160 sq->stats->csum_partial++; in mlx5e_txwqe_build_eseg_csum() 163 sq->stats->csum_none++; in mlx5e_txwqe_build_eseg_csum() 167 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) in mlx5e_tx_get_gso_ihs() argument 169 struct mlx5e_sq_stats *stats = sq->stats; in mlx5e_tx_get_gso_ihs() 189 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, in mlx5e_txwqe_build_dsegs() argument [all …]
|
D | en_txrx.c | 50 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) in mlx5e_handle_tx_dim() argument 52 struct mlx5e_sq_stats *stats = sq->stats; in mlx5e_handle_tx_dim() 55 if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) in mlx5e_handle_tx_dim() 58 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); in mlx5e_handle_tx_dim() 59 net_dim(&sq->dim, dim_sample); in mlx5e_handle_tx_dim() 74 void mlx5e_trigger_irq(struct mlx5e_icosq *sq) in mlx5e_trigger_irq() argument 76 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_trigger_irq() 78 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_trigger_irq() 80 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; in mlx5e_trigger_irq() 81 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq() [all …]
|
D | en_main.c | 233 struct mlx5e_icosq *sq, in mlx5e_build_umr_wqe() argument 240 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | in mlx5e_build_umr_wqe() 925 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) in mlx5e_free_xdpsq_db() argument 927 kvfree(sq->db.xdpi_fifo.xi); in mlx5e_free_xdpsq_db() 928 kvfree(sq->db.wqe_info); in mlx5e_free_xdpsq_db() 931 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) in mlx5e_alloc_xdpsq_fifo() argument 933 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; in mlx5e_alloc_xdpsq_fifo() 934 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); in mlx5e_alloc_xdpsq_fifo() 942 xdpi_fifo->pc = &sq->xdpi_fifo_pc; in mlx5e_alloc_xdpsq_fifo() 943 xdpi_fifo->cc = &sq->xdpi_fifo_cc; in mlx5e_alloc_xdpsq_fifo() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | ktls_tx.c | 105 static void tx_fill_wi(struct mlx5e_txqsq *sq, in tx_fill_wi() argument 109 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; in tx_fill_wi() 133 post_static_params(struct mlx5e_txqsq *sq, in post_static_params() argument 140 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); in post_static_params() 141 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); in post_static_params() 142 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL); in post_static_params() 143 sq->pc += MLX5E_KTLS_STATIC_WQEBBS; in post_static_params() 147 post_progress_params(struct mlx5e_txqsq *sq, in post_progress_params() argument 154 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); in post_progress_params() 155 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); in post_progress_params() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | tx.c | 49 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, in mlx5e_xsk_tx_post_err() argument 52 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_xsk_tx_post_err() 53 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; in mlx5e_xsk_tx_post_err() 59 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err() 60 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); in mlx5e_xsk_tx_post_err() 61 sq->doorbell_cseg = &nopwqe->ctrl; in mlx5e_xsk_tx_post_err() 64 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) in mlx5e_xsk_tx() argument 66 struct xdp_umem *umem = sq->umem; in mlx5e_xsk_tx() 75 int check_result = sq->xmit_xdp_frame_check(sq); in mlx5e_xsk_tx() 96 dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr, in mlx5e_xsk_tx() [all …]
|
D | tx.h | 14 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget); 16 static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) in mlx5e_xsk_update_tx_wakeup() argument 18 if (!xsk_umem_uses_need_wakeup(sq->umem)) in mlx5e_xsk_update_tx_wakeup() 21 if (sq->pc != sq->cc) in mlx5e_xsk_update_tx_wakeup() 22 xsk_clear_tx_need_wakeup(sq->umem); in mlx5e_xsk_update_tx_wakeup() 24 xsk_set_tx_need_wakeup(sq->umem); in mlx5e_xsk_update_tx_wakeup()
|
/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.c | 19 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 504 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument 508 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, in nicvf_init_snd_queue() 513 sq->desc = sq->dmem.base; in nicvf_init_snd_queue() 514 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue() 515 if (!sq->skbuff) in nicvf_init_snd_queue() 518 sq->head = 0; in nicvf_init_snd_queue() 519 sq->tail = 0; in nicvf_init_snd_queue() 520 sq->thresh = SND_QUEUE_THRESH; in nicvf_init_snd_queue() 527 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue() [all …]
|
/drivers/net/ethernet/intel/ice/ |
D | ice_controlq.c | 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 62 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive() 63 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive() [all …]
|
/drivers/soc/qcom/ |
D | qmi_interface.c | 18 struct sockaddr_qrtr *sq); 167 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local 177 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup() 178 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup() 179 sq.sq_port = QRTR_PORT_CTRL; in qmi_send_new_lookup() 181 msg.msg_name = &sq; in qmi_send_new_lookup() 182 msg.msg_namelen = sizeof(sq); in qmi_send_new_lookup() 230 struct sockaddr_qrtr sq; in qmi_send_new_server() local 239 pkt.server.node = cpu_to_le32(qmi->sq.sq_node); in qmi_send_new_server() 240 pkt.server.port = cpu_to_le32(qmi->sq.sq_port); in qmi_send_new_server() [all …]
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument 61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument 93 struct hinic_sq *sq, u16 global_qid) in hinic_sq_prepare_ctxt() argument 100 wq = sq->wq; in hinic_sq_prepare_ctxt() 214 static int alloc_sq_skb_arr(struct hinic_sq *sq) in alloc_sq_skb_arr() argument 216 struct hinic_wq *wq = sq->wq; in alloc_sq_skb_arr() 219 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr() 220 sq->saved_skb = vzalloc(skb_arr_size); in alloc_sq_skb_arr() 221 if (!sq->saved_skb) in alloc_sq_skb_arr() 231 static void free_sq_skb_arr(struct hinic_sq *sq) in free_sq_skb_arr() argument [all …]
|
D | hinic_hw_qp.h | 54 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument 55 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) 112 struct hinic_sq sq; member 123 struct hinic_sq *sq, u16 global_qid); 128 int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, 132 void hinic_clean_sq(struct hinic_sq *sq); 139 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); 168 void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, 172 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, 175 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, [all …]
|
D | hinic_tx.c | 46 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) argument 474 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame() 503 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame() 510 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame() 527 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_xmit_frame() 533 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_xmit_frame() 538 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_xmit_frame() 543 hinic_sq_return_wqe(txq->sq, wqe_size); in hinic_xmit_frame() 578 struct hinic_sq *sq = txq->sq; in free_all_tx_skbs() local 585 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { in free_all_tx_skbs() [all …]
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 62 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing() 63 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing() 64 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing() 75 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp() 80 qp->sq.flushed = true; in __bnxt_qplib_add_flush_qp() 125 if (qp->sq.flushed) { in __bnxt_qplib_del_flush_qp() 126 qp->sq.flushed = false; in __bnxt_qplib_del_flush_qp() 143 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp() 144 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp() 178 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_free_qp_hdr_buf() local [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 95 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_oc_sq() argument 97 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); in dealloc_oc_sq() 100 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_host_sq() argument 102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, in dealloc_host_sq() 103 dma_unmap_addr(sq, mapping)); in dealloc_host_sq() 106 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_sq() argument 108 if (t4_sq_onchip(sq)) in dealloc_sq() 109 dealloc_oc_sq(rdev, sq); in dealloc_sq() 111 dealloc_host_sq(rdev, sq); in dealloc_sq() 114 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in alloc_oc_sq() argument [all …]
|
D | cq.c | 195 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe() 229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe() 247 if (wq->sq.flush_cidx == -1) in c4iw_flush_sq() 248 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq() 249 idx = wq->sq.flush_cidx; in c4iw_flush_sq() 250 while (idx != wq->sq.pidx) { in c4iw_flush_sq() 251 swsqe = &wq->sq.sw_sq[idx]; in c4iw_flush_sq() 254 if (wq->sq.oldest_read == swsqe) { in c4iw_flush_sq() 258 if (++idx == wq->sq.size) in c4iw_flush_sq() 261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq() [all …]
|
D | t4.h | 383 struct t4_sq sq; member 527 static inline int t4_sq_onchip(struct t4_sq *sq) in t4_sq_onchip() argument 529 return sq->flags & T4_SQ_ONCHIP; in t4_sq_onchip() 534 return wq->sq.in_use == 0; in t4_sq_empty() 539 return wq->sq.in_use == (wq->sq.size - 1); in t4_sq_full() 544 return wq->sq.size - 1 - wq->sq.in_use; in t4_sq_avail() 549 wq->sq.in_use++; in t4_sq_produce() 550 if (++wq->sq.pidx == wq->sq.size) in t4_sq_produce() 551 wq->sq.pidx = 0; in t4_sq_produce() 552 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); in t4_sq_produce() [all …]
|
/drivers/nvme/target/ |
D | core.c | 662 if (req->sq->size) { in nvmet_update_sq_head() 666 old_sqhd = req->sq->sqhd; in nvmet_update_sq_head() 667 new_sqhd = (old_sqhd + 1) % req->sq->size; in nvmet_update_sq_head() 668 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != in nvmet_update_sq_head() 671 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); in nvmet_update_sq_head() 676 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_error() 691 new_error_slot->sqid = cpu_to_le16(req->sq->qid); in nvmet_set_error() 705 if (!req->sq->sqhd_disabled) in __nvmet_req_complete() 707 req->cqe->sq_id = cpu_to_le16(req->sq->qid); in __nvmet_req_complete() 723 percpu_ref_put(&req->sq->ref); in nvmet_req_complete() [all …]
|
/drivers/infiniband/hw/efa/ |
D | efa_com.c | 138 struct efa_com_admin_sq *sq = &aq->sq; in efa_com_admin_init_sq() local 139 u16 size = aq->depth * sizeof(*sq->entries); in efa_com_admin_init_sq() 144 sq->entries = in efa_com_admin_init_sq() 145 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL); in efa_com_admin_init_sq() 146 if (!sq->entries) in efa_com_admin_init_sq() 149 spin_lock_init(&sq->lock); in efa_com_admin_init_sq() 151 sq->cc = 0; in efa_com_admin_init_sq() 152 sq->pc = 0; in efa_com_admin_init_sq() 153 sq->phase = 1; in efa_com_admin_init_sq() 155 sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF); in efa_com_admin_init_sq() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 360 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) || in hns_roce_set_user_sq_size() 361 hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) in hns_roce_set_user_sq_size() 370 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in hns_roce_set_user_sq_size() 374 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); in hns_roce_set_user_sq_size() 376 hr_qp->sq.max_gs = max_cnt; in hns_roce_set_user_sq_size() 378 if (hr_qp->sq.max_gs > 2) in hns_roce_set_user_sq_size() 379 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * in hns_roce_set_user_sq_size() 380 (hr_qp->sq.max_gs - 2)); in hns_roce_set_user_sq_size() 382 if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) { in hns_roce_set_user_sq_size() 398 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << in hns_roce_set_user_sq_size() [all …]
|
/drivers/net/ |
D | virtio_net.c | 180 struct send_queue *sq; member 340 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done() 447 struct send_queue *sq, in __virtnet_xdp_xmit_one() argument 467 sg_init_one(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one() 469 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), in __virtnet_xdp_xmit_one() 482 return &vi->sq[qp]; in virtnet_xdp_sq() 491 struct send_queue *sq; in virtnet_xdp_xmit() local 508 sq = virtnet_xdp_sq(vi); in virtnet_xdp_xmit() 517 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in virtnet_xdp_xmit() 535 err = __virtnet_xdp_xmit_one(vi, sq, xdpf); in virtnet_xdp_xmit() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 221 (n << qp->sq.wqe_shift); in get_send_wqe() 224 (n << qp->sq.wqe_shift)) >> in get_send_wqe() 226 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & in get_send_wqe() 505 qp_attr->cap.max_send_wr = qp->sq.max; in mthca_query_qp() 507 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mthca_query_qp() 619 if (qp->sq.max) in __mthca_modify_qp() 620 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; in __mthca_modify_qp() 621 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mthca_modify_qp() 739 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); in __mthca_modify_qp() 840 mthca_wq_reset(&qp->sq); in __mthca_modify_qp() [all …]
|