Home
last modified time | relevance | path

Searched refs:q_depth (Results 1 – 22 of 22) sorted by relevance

/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_wq.c34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
506 u16 wqebb_size, u16 wq_page_size, u16 q_depth, in hinic_wq_allocate() argument
525 if (q_depth & (q_depth - 1)) { in hinic_wq_allocate()
549 wq->q_depth = q_depth; in hinic_wq_allocate()
566 atomic_set(&wq->delta, q_depth); in hinic_wq_allocate()
567 wq->mask = q_depth - 1; in hinic_wq_allocate()
604 u16 q_depth, u16 max_wqe_size) in hinic_wqs_cmdq_alloc() argument
622 if (q_depth & (q_depth - 1)) { in hinic_wqs_cmdq_alloc()
652 wq[i].q_depth = q_depth; in hinic_wqs_cmdq_alloc()
670 atomic_set(&wq[i].delta, q_depth); in hinic_wqs_cmdq_alloc()
[all …]
Dhinic_hw_wq.h30 u16 q_depth; member
80 u16 q_depth, u16 max_wqe_size);
91 u16 wqebb_size, u16 wq_page_size, u16 q_depth,
Dhinic_hw_cmdq.c368 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp()
370 next_prod_idx -= wq->q_depth; in cmdq_sync_cmd_direct_resp()
445 if (next_prod_idx >= wq->q_depth) { in cmdq_set_arm_bit()
447 next_prod_idx -= wq->q_depth; in cmdq_set_arm_bit()
747 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq()
752 wq->q_depth)); in init_cmdq()
Dhinic_hw_qp.c219 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr()
247 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr()
319 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
324 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); in alloc_rq_cqe()
329 for (i = 0; i < wq->q_depth; i++) { in alloc_rq_cqe()
362 for (i = 0; i < wq->q_depth; i++) in free_rq_cqe()
/drivers/net/ethernet/amazon/ena/
Dena_eth_com.h106 return io_sq->q_depth - 1 - cnt; in ena_com_free_desc()
201 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); in ena_com_update_dev_comp_head()
238 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head()
249 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get()
267 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
Dena_com.c112 u16 size = ADMIN_SQ_SIZE(queue->q_depth); in ena_com_admin_init_sq()
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth); in ena_com_admin_init_cq()
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq()
167 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq()
177 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq()
203 if (unlikely(command_id >= queue->q_depth)) { in get_comp_ctxt()
205 command_id, queue->q_depth); in get_comp_ctxt()
233 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd()
239 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd()
282 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); in ena_com_init_comp_ctxt()
[all …]
Dena_eth_com.c42 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc()
67 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue()
82 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev()
108 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev()
236 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail()
245 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr()
273 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get()
Dena_com.h153 u16 q_depth; member
195 u16 q_depth; member
243 u16 q_depth; member
273 u16 q_depth; member
/drivers/nvme/host/
Dpci.c31 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
32 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
102 int q_depth; member
174 u16 q_depth; member
471 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
495 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_cmd()
958 if (unlikely(cqe->command_id >= nvmeq->q_depth)) { in nvme_handle_cqe()
987 if (++start == nvmeq->q_depth) in nvme_complete_cqes()
994 if (nvmeq->cq_head == nvmeq->q_depth - 1) { in nvme_update_cq_head()
1139 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
[all …]
/drivers/net/ethernet/brocade/bna/
Dbnad.c78 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup()
91 u32 q_depth, u32 index) in bnad_tx_buff_unmap() argument
114 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap()
126 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap()
143 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup()
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup()
162 u32 wis, unmap_wis, hw_cons, cons, q_depth; in bnad_txcmpl_process() local
174 q_depth = tcb->q_depth; in bnad_txcmpl_process()
176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); in bnad_txcmpl_process()
177 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); in bnad_txcmpl_process()
[all …]
Dbna_types.h422 u32 q_depth; member
550 u32 q_depth; member
566 int q_depth; member
614 u32 q_depth; member
Dbfa_msgq.c518 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); in bfa_msgq_init()
520 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); in bfa_msgq_init()
Dbfi.h413 u16 q_depth; /* Total num of entries in the queue */ member
Dbna_tx_rx.c2385 q0->rcb->q_depth = rx_cfg->q0_depth; in bna_rx_create()
2386 q0->q_depth = rx_cfg->q0_depth; in bna_rx_create()
2412 q1->rcb->q_depth = rx_cfg->q1_depth; in bna_rx_create()
2413 q1->q_depth = rx_cfg->q1_depth; in bna_rx_create()
2444 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create()
3466 txq->tcb->q_depth = tx_cfg->txq_depth; in bna_tx_create()
/drivers/block/rsxx/
Dcregs.c133 card->creg_ctrl.q_depth--; in creg_kick_queue()
185 card->creg_ctrl.q_depth++; in creg_queue_cmd()
322 card->creg_ctrl.q_depth--; in creg_reset()
399 card->creg_ctrl.q_depth + 20000); in __issue_creg_rw()
706 card->creg_ctrl.q_depth++; in rsxx_eeh_save_issued_creg()
Drsxx_priv.h128 unsigned int q_depth; member
Ddma.c193 u32 q_depth = 0; in dma_intr_coal_auto_tune() local
201 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); in dma_intr_coal_auto_tune()
204 q_depth / 2, in dma_intr_coal_auto_tune()
/drivers/scsi/bfa/
Dbfi.h538 #define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci)
540 #define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth)
541 #define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth)
544 #define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1))
585 u16 q_depth; /* Total num of entries in the queue */ member
Dbfa_fcpim.h119 u16 q_depth; member
Dbfa_defs_svc.h958 u16 q_depth; /* SCSI Queue depth */ member
Dbfa_svc.c3567 fcport->cfg.q_depth = in bfa_fcport_isr()
3568 cpu_to_be16(fcport->cfg.q_depth); in bfa_fcport_isr()
3988 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); in bfa_fcport_get_attr()
Dbfa_fcpim.c497 return fcpim->q_depth; in bfa_fcpim_qdepth_get()