/drivers/net/ethernet/fungible/funcore/ |
D | fun_queue.c | 175 for (i = 0; i < funq->rq_depth; i++) { in fun_clean_rq() 192 for (i = 0; i < funq->rq_depth; i++) { in fun_fill_rq() 209 funq->rq_tail = funq->rq_depth - 1; in fun_fill_rq() 222 if (++funq->rq_buf_idx == funq->rq_depth) in fun_rq_update_pos() 344 funq->rq_depth; in fun_process_cq() 375 funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth, in fun_alloc_rqes() 396 fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes), in fun_free_queue() 419 if (req->rq_depth) { in fun_alloc_queue() 458 if (req->rq_depth) { in fun_alloc_queue() 460 funq->rq_depth = req->rq_depth; in fun_alloc_queue() [all …]
|
D | fun_queue.h | 47 u32 rq_depth; member 124 u32 rq_depth; member
|
D | fun_dev.c | 232 .rq_depth = areq->rq_depth, in fun_enable_admin_queue() 280 if (areq->rq_depth) { in fun_enable_admin_queue() 582 if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth) in fun_get_dev_limits()
|
D | fun_dev.h | 94 u16 rq_depth; member
|
/drivers/net/ethernet/fungible/funeth/ |
D | funeth.h | 76 unsigned int rq_depth; member 116 unsigned int rq_depth; member
|
D | funeth_ethtool.c | 566 ring->rx_pending = fp->rq_depth; in fun_get_ringparam() 594 fp->rq_depth == ring->rx_pending) in fun_set_ringparam() 600 .rq_depth = ring->rx_pending, in fun_set_ringparam() 610 fp->rq_depth = ring->rx_pending; in fun_set_ringparam() 611 fp->cq_depth = 2 * fp->rq_depth; in fun_set_ringparam()
|
D | funeth_main.c | 509 qset->rq_depth, qset->rxq_start, qset->state); in fun_alloc_rings() 842 .rq_depth = fp->rq_depth, in funeth_open() 1643 .rq_depth = fp->rq_depth, in fun_change_num_queues() 1783 fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth); in fun_create_netdev() 2003 .rq_depth = ADMIN_RQ_DEPTH, in funeth_probe()
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_dev.c | 271 unsigned int rq_depth) in set_hw_ioctxt() argument 285 hw_ioctxt.rq_depth = ilog2(rq_depth); in set_hw_ioctxt() 438 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth) in hinic_hwdev_ifup() argument 462 func_to_io->rq_depth = rq_depth; in hinic_hwdev_ifup() 489 err = set_hw_ioctxt(hwdev, sq_depth, rq_depth); in hinic_hwdev_ifup()
|
D | hinic_hw_io.h | 74 u16 rq_depth; member
|
D | hinic_dev.h | 100 u16 rq_depth; member
|
D | hinic_hw_dev.h | 286 u16 rq_depth; member 627 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
|
D | hinic_ethtool.c | 557 ring->rx_pending = nic_dev->rq_depth; in hinic_get_ringparam() 600 new_rq_depth == nic_dev->rq_depth) in hinic_set_ringparam() 605 nic_dev->sq_depth, nic_dev->rq_depth, in hinic_set_ringparam() 609 nic_dev->rq_depth = new_rq_depth; in hinic_set_ringparam()
|
D | hinic_hw_mbox.c | 1289 ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \ 1290 (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \ 1300 if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && in hw_ctxt_qps_param_valid()
|
D | hinic_port.c | 491 rq_num.rq_depth = ilog2(nic_dev->rq_depth); in hinic_set_max_qnum()
|
D | hinic_main.c | 423 nic_dev->rq_depth); in hinic_open() 1213 nic_dev->rq_depth = HINIC_RQ_DEPTH; in nic_dev_init()
|
D | hinic_hw_io.c | 293 func_to_io->rq_depth, HINIC_RQ_WQE_SIZE); in init_qp()
|
D | hinic_port.h | 317 u32 rq_depth; member
|
/drivers/infiniband/hw/irdma/ |
D | user.h | 302 u32 *rq_depth, u8 *rq_shift); 379 u32 rq_depth; member
|
D | verbs.c | 637 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, in irdma_setup_umode_qp() 645 (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; in irdma_setup_umode_qp() 647 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; in irdma_setup_umode_qp() 677 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, in irdma_setup_kmode_qp() 688 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); in irdma_setup_kmode_qp() 699 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE; in irdma_setup_kmode_qp() 717 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem; in irdma_setup_kmode_qp() 719 info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE); in irdma_setup_kmode_qp() 721 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; in irdma_setup_kmode_qp() 724 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; in irdma_setup_kmode_qp()
|
D | uk.c | 1392 u32 *rq_depth, u8 *rq_shift) in irdma_uk_calc_depth_shift_rq() argument 1405 *rq_shift, rq_depth); in irdma_uk_calc_depth_shift_rq()
|
/drivers/infiniband/hw/efa/ |
D | efa_com_cmd.h | 26 u32 rq_depth; member
|
D | efa_com_cmd.c | 32 params->rq_depth; in efa_com_create_qp()
|
D | efa_verbs.c | 705 create_qp_params.rq_depth = init_attr->cap.max_recv_wr; in efa_create_qp()
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_hw_v2.h | 1184 __le16 rq_depth; member
|