Home
last modified time | relevance | path

Searched refs:q_id (Results 1 – 25 of 26) sorted by relevance

12

/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_eqs.c32 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
33 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
36 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
37 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
40 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
41 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
44 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
45 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
79 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
82 container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
[all …]
Dhinic_hw_csr.h88 #define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ argument
89 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
92 #define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ argument
93 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
96 #define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ argument
97 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
100 #define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ argument
101 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
Dhinic_hw_io.c29 #define CI_ADDR(base_addr, q_id) ((base_addr) + \ argument
30 (q_id) * CI_Q_ADDR_SIZE)
129 base_qpn + qp->q_id); in write_sq_ctxts()
173 base_qpn + qp->q_id); in write_rq_ctxts()
269 struct hinic_qp *qp, int q_id, in init_qp() argument
278 qp->q_id = q_id; in init_qp()
280 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], in init_qp()
288 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], in init_qp()
303 func_to_io->sq_db[q_id] = db_base; in init_qp()
305 err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], in init_qp()
[all …]
Dhinic_tx.c465 u16 prod_idx, q_id = skb->queue_mapping; in hinic_xmit_frame() local
473 txq = &nic_dev->txqs[q_id]; in hinic_xmit_frame()
505 netif_stop_subqueue(netdev, qp->q_id); in hinic_xmit_frame()
512 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_xmit_frame()
536 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_xmit_frame()
652 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && in free_tx_poll()
654 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); in free_tx_poll()
658 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in free_tx_poll()
783 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; in hinic_init_txq()
790 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); in hinic_init_txq()
Dhinic_hw_cmdq.h110 u32 q_id; member
171 enum hinic_set_arm_qtype q_type, u32 q_id);
Dhinic_hw_qp.c42 #define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ argument
43 (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE)
45 #define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ argument
47 (max_sqs + (q_id)) * Q_CTXT_SIZE)
624 HINIC_SQ_DB_INFO_SET(qp->q_id, QID)); in sq_prepare_db()
Dhinic_rx.c384 skb_record_rx_queue(skb, qp->q_id); in rxq_recv()
495 cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask); in rx_request_irq()
530 "hinic_rxq%d", qp->q_id); in hinic_init_rxq()
Dhinic_hw_cmdq.c511 enum hinic_set_arm_qtype q_type, u32 q_id) in hinic_set_arm_bit() argument
520 arm_bit.q_id = q_id; in hinic_set_arm_bit()
524 dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id); in hinic_set_arm_bit()
Dhinic_hw_eqs.h176 int q_id; member
Dhinic_hw_qp.h115 u16 q_id; member
Dhinic_hw_dev.c997 hw_ci.sq_id = qp->q_id; in hinic_hwdev_hw_ci_addr_set()
/drivers/crypto/hisilicon/sec/
Dsec_drv.c688 int q_id; in sec_isr_handle() local
693 q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M; in sec_isr_handle()
694 msg = msg_ring->vaddr + q_id; in sec_isr_handle()
701 set_bit(q_id, queue->unprocessed); in sec_isr_handle()
702 if (q_id == queue->expected) in sec_isr_handle()
719 q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M; in sec_isr_handle()
720 msg = msg_ring->vaddr + q_id; in sec_isr_handle()
/drivers/staging/qlge/
Dqlge.h1238 u8 q_id; member
2231 u16 q_id);
2311 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
2317 #define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ argument
2318 ql_dump_hw_cb(qdev, size, bit, q_id)
2325 #define QL_DUMP_HW_CB(qdev, size, bit, q_id) argument
Dqlge_dbg.c1802 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) in ql_dump_hw_cb() argument
1812 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { in ql_dump_hw_cb()
/drivers/net/ethernet/intel/ice/
Dice_lib.c1367 int tx_rings_per_v, rx_rings_per_v, q_id, q_base; in ice_vsi_map_rings_to_vectors() local
1376 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { in ice_vsi_map_rings_to_vectors()
1377 struct ice_ring *tx_ring = vsi->tx_rings[q_id]; in ice_vsi_map_rings_to_vectors()
1392 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { in ice_vsi_map_rings_to_vectors()
1393 struct ice_ring *rx_ring = vsi->rx_rings[q_id]; in ice_vsi_map_rings_to_vectors()
2188 &txq_meta->q_id, &txq_meta->q_teid, rst_src, in ice_vsi_stop_tx_ring()
2232 txq_meta->q_id = ring->reg_idx; in ice_fill_txq_meta()
Dice_lib.h13 u16 q_id; member
Dice_switch.h109 u16 q_id:11; member
Dice_common.c2894 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); in ice_aq_dis_lan_txq()
2897 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); in ice_aq_dis_lan_txq()
2915 le16_to_cpu(qg_list[0].q_id[0]), in ice_aq_dis_lan_txq()
3318 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); in ice_dis_vsi_txq()
Dice_switch.c761 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & in ice_fill_sw_rule()
772 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & in ice_fill_sw_rule()
Dice_adminq_cmd.h1442 __le16 q_id[1]; member
/drivers/net/hyperv/
Dhyperv_net.h680 u16 q_id; member
691 u16 q_id; member
697 u16 q_id; member
742 u16 q_id; member
/drivers/net/ethernet/hisilicon/hns/
Dhns_dsaf_main.c250 u32 q_id, q_num_per_port; in hns_dsaf_mix_def_qid_cfg() local
256 for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) { in hns_dsaf_mix_def_qid_cfg()
259 0xff, 0, q_id); in hns_dsaf_mix_def_qid_cfg()
260 q_id += q_num_per_port; in hns_dsaf_mix_def_qid_cfg()
267 u32 q_id, q_num_per_port; in hns_dsaf_inner_qid_cfg() local
276 for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) { in hns_dsaf_inner_qid_cfg()
281 q_id); in hns_dsaf_inner_qid_cfg()
282 q_id += q_num_per_port; in hns_dsaf_inner_qid_cfg()
/drivers/net/ethernet/hisilicon/hns3/hns3pf/
Dhclge_err.c1705 u16 *q_id) in hclge_query_over_8bd_err_info() argument
1718 *q_id = le16_to_cpu(req->over_8bd_no_fe_qid); in hclge_query_over_8bd_err_info()
1735 u16 q_id; in hclge_handle_over_8bd_err() local
1738 ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id); in hclge_handle_over_8bd_err()
1746 vf_id, q_id); in hclge_handle_over_8bd_err()
Dhclge_tm.c302 u16 q_id, u16 qs_id) in hclge_tm_q_to_qs_map_cfg() argument
311 map->nq_id = cpu_to_le16(q_id); in hclge_tm_q_to_qs_map_cfg()
/drivers/net/ethernet/intel/i40e/
Di40e_virtchnl_pf.c2291 u16 q_id; in i40e_ctrl_vf_tx_rings() local
2293 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { in i40e_ctrl_vf_tx_rings()
2295 vsi->base_queue + q_id, in i40e_ctrl_vf_tx_rings()
2314 u16 q_id; in i40e_ctrl_vf_rx_rings() local
2316 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { in i40e_ctrl_vf_rx_rings()
2317 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, in i40e_ctrl_vf_rx_rings()

12