Home
last modified time | relevance | path

Searched refs:q_size (Results 1 – 16 of 16) sorted by relevance

/drivers/misc/bcm-vk/
Dbcm_vk_msg.c102 return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1); in msgq_avail_space()
495 qinfo->q_size = msgq_size; in bcm_vk_sync_msgq()
497 qinfo->q_low = qinfo->q_size >> 1; in bcm_vk_sync_msgq()
498 qinfo->q_mask = qinfo->q_size - 1; in bcm_vk_sync_msgq()
560 avail, qinfo->q_size); in bcm_vk_append_ib_sgl()
644 if (wr_idx >= qinfo->q_size) { in bcm_to_v_msg_enqueue()
646 wr_idx, qinfo->q_size); in bcm_to_v_msg_enqueue()
806 max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size; in bcm_to_h_msg_dequeue()
826 if ((rd_idx >= qinfo->q_size) || in bcm_to_h_msg_dequeue()
827 (src_size > (qinfo->q_size - 1))) { in bcm_to_h_msg_dequeue()
[all …]
Dbcm_vk_msg.h44 u32 q_size; member
/drivers/net/ethernet/cavium/liquidio/
Drequest_manager.c63 u32 q_size; in octeon_init_instr_queue() local
80 q_size = (u32)conf->instr_type * num_descs; in octeon_init_instr_queue()
86 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue()
103 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue()
149 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue()
167 u64 desc_size = 0, q_size; in octeon_delete_instr_queue() local
186 q_size = iq->max_count * desc_size; in octeon_delete_instr_queue()
187 lio_dma_free(oct, (u32)q_size, iq->base_addr, in octeon_delete_instr_queue()
/drivers/crypto/cavium/cpt/
Dcptvf_main.c208 size_t q_size; in alloc_command_queues() local
218 q_size = qlen * cqinfo->cmd_size; in alloc_command_queues()
223 size_t rem_q_size = q_size; in alloc_command_queues()
/drivers/net/ethernet/pensando/ionic/
Dionic_lif.c384 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); in ionic_qcq_free()
571 int q_size, cq_size; in ionic_qcq_alloc() local
574 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); in ionic_qcq_alloc()
577 new->q_size = PAGE_SIZE + q_size + cq_size; in ionic_qcq_alloc()
578 new->q_base = dma_alloc_coherent(dev, new->q_size, in ionic_qcq_alloc()
589 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); in ionic_qcq_alloc()
590 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); in ionic_qcq_alloc()
594 new->q_size = PAGE_SIZE + (num_descs * desc_size); in ionic_qcq_alloc()
595 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, in ionic_qcq_alloc()
644 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); in ionic_qcq_alloc()
[all …]
Dionic_debugfs.c131 debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size); in ionic_debugfs_add_qcq()
Dionic_lif.h76 u32 q_size; member
/drivers/crypto/marvell/octeontx/
Dotx_cptvf_main.c194 size_t q_size, c_size, rem_q_size; in alloc_command_queues() local
203 q_size = qlen * OTX_CPT_INST_SIZE; in alloc_command_queues()
210 rem_q_size = q_size; in alloc_command_queues()
/drivers/media/platform/qcom/venus/
Dhfi_venus.c62 u32 q_size; member
186 qsize = qhdr->q_size; in venus_write_queue()
256 qsize = qhdr->q_size; in venus_read_queue()
745 qhdr->q_size = IFACEQ_QUEUE_SIZE / 4; in venus_set_qhdr_defaults()
/drivers/infiniband/ulp/rtrs/
Drtrs-srv.c977 static int post_recv_io(struct rtrs_srv_con *con, size_t q_size) in post_recv_io() argument
981 for (i = 0; i < q_size; i++) { in post_recv_io()
994 size_t q_size; in post_recv_path() local
999 q_size = SERVICE_CON_QUEUE_DEPTH; in post_recv_path()
1001 q_size = srv->queue_depth; in post_recv_path()
1003 err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); in post_recv_path()
Drtrs-clt.c695 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) in post_recv_io() argument
700 for (i = 0; i < q_size; i++) { in post_recv_io()
717 size_t q_size = 0; in post_recv_path() local
722 q_size = SERVICE_CON_QUEUE_DEPTH; in post_recv_path()
724 q_size = clt_path->queue_depth; in post_recv_path()
730 q_size *= 2; in post_recv_path()
732 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); in post_recv_path()
/drivers/net/hyperv/
Dhyperv_net.h729 u16 q_size; member
791 u16 q_size; member
/drivers/net/ethernet/brocade/bna/
Dbna_tx_rx.c3318 u32 q_size; in bna_tx_res_req() local
3328 q_size = txq_depth * BFI_TXQ_WI_SIZE; in bna_tx_res_req()
3329 q_size = ALIGN(q_size, PAGE_SIZE); in bna_tx_res_req()
3330 page_count = q_size >> PAGE_SHIFT; in bna_tx_res_req()
/drivers/media/platform/
Drcar_jpu.c1019 unsigned int q_size = q_data->format.plane_fmt[i].sizeimage; in jpu_queue_setup() local
1021 if (sizes[i] < q_size) in jpu_queue_setup()
/drivers/md/
Ddm-cache-policy-smq.c287 static unsigned q_size(struct queue *q) in q_size() function
1146 return q_size(&mq->dirty) == 0u; in clean_target_met()
/drivers/scsi/ibmvscsi/
Dibmvfc.c2461 int wait, i, q_index, q_size; in ibmvfc_wait_for_ops() local
2469 q_size = vhost->scsi_scrqs.active_queues; in ibmvfc_wait_for_ops()
2472 q_size = 1; in ibmvfc_wait_for_ops()
2478 for (q_index = 0; q_index < q_size; q_index++) { in ibmvfc_wait_for_ops()
2499 for (q_index = 0; q_index < q_size; q_index++) { in ibmvfc_wait_for_ops()