Home
last modified time | relevance | path

Searched refs:num_desc (Results 1 – 25 of 30) sorted by relevance

12

/drivers/staging/rdma/hfi1/
Dsdma.h370 u16 num_desc; member
638 tx->num_desc = 0; in sdma_txinit_ahg()
722 struct sdma_desc *desc = &tx->descp[tx->num_desc]; in make_tx_sdma_desc()
724 if (!tx->num_desc) { in make_tx_sdma_desc()
750 tx->descp[tx->num_desc].qw[0] |= in _sdma_close_tx()
752 tx->descp[tx->num_desc].qw[1] |= in _sdma_close_tx()
755 tx->descp[tx->num_desc].qw[1] |= in _sdma_close_tx()
782 tx->num_desc++; in _sdma_txadd_daddr()
812 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_page()
859 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_daddr()
[all …]
Dsdma.c1329 if (tx->num_desc) { in sdma_txclean()
1337 for (i = 1 + skip; i < tx->num_desc; i++) in sdma_txclean()
1339 tx->num_desc = 0; in sdma_txclean()
1438 u16 num_desc = 0; in sdma_desc_avail() local
1449 num_desc = stx->num_desc; in sdma_desc_avail()
1451 if (num_desc > avail) in sdma_desc_avail()
1453 avail -= num_desc; in sdma_desc_avail()
1997 for (i = 1; i < tx->num_desc; i++, descp++) { in submit_tx()
2021 sde->desc_avail -= tx->num_desc; in submit_tx()
2036 if (tx->num_desc <= sde->desc_avail) in sdma_check_progress()
[all …]
/drivers/gpu/drm/via/
Dvia_dmablit.c66 int num_desc = vsg->num_desc; in via_unmap_blit_from_device() local
67 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; in via_unmap_blit_from_device()
68 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; in via_unmap_blit_from_device()
73 while (num_desc--) { in via_unmap_blit_from_device()
109 int num_desc = 0; in via_map_blit_for_device() local
148 num_desc++; in via_map_blit_for_device()
161 vsg->num_desc = num_desc; in via_map_blit_for_device()
272 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / in via_alloc_desc_pages()
285 vsg->num_desc); in via_alloc_desc_pages()
Dvia_dmablit.h45 int num_desc; member
/drivers/soc/ti/
Dknav_qmss_queue.c683 for (i = 0; i < pool->num_desc; i++) { in kdesc_fill_pool()
717 WARN_ON(i != pool->num_desc); in kdesc_empty_pool()
748 int num_desc, int region_id) in knav_pool_create() argument
794 if (num_desc > (region->num_desc - region->used_desc)) { in knav_pool_create()
809 if ((pi->region_offset - last_offset) >= num_desc) { in knav_pool_create()
813 last_offset = pi->region_offset + pi->num_desc; in knav_pool_create()
819 pool->num_desc = num_desc; in knav_pool_create()
821 region->used_desc += num_desc; in knav_pool_create()
861 pool->region->used_desc -= pool->num_desc; in knav_pool_destroy()
979 if (!region->num_desc) { in knav_queue_setup_region()
[all …]
Dknav_qmss.h205 unsigned num_desc; member
232 int num_desc; member
/drivers/net/ethernet/ti/
Ddavinci_cpdma.c89 int num_desc, used_desc; member
170 pool->num_desc = size / pool->desc_size; in cpdma_desc_pool_create()
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); in cpdma_desc_pool_create()
223 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) in cpdma_desc_alloc() argument
235 desc_end = pool->num_desc/2; in cpdma_desc_alloc()
237 desc_start = pool->num_desc/2; in cpdma_desc_alloc()
238 desc_end = pool->num_desc; in cpdma_desc_alloc()
242 desc_end, desc_start, num_desc, 0); in cpdma_desc_alloc()
244 bitmap_set(pool->bitmap, index, num_desc); in cpdma_desc_alloc()
254 struct cpdma_desc __iomem *desc, int num_desc) in cpdma_desc_free() argument
[all …]
/drivers/net/ethernet/qlogic/netxen/
Dnetxen_nic_init.c117 for (i = 0; i < rds_ring->num_desc; ++i) { in netxen_release_rx_buffers()
140 for (i = 0; i < tx_ring->num_desc; i++) { in netxen_release_tx_buffers()
212 tx_ring->num_desc = adapter->num_txd; in netxen_alloc_sw_resources()
234 rds_ring->num_desc = adapter->num_rxd; in netxen_alloc_sw_resources()
253 rds_ring->num_desc = adapter->num_jumbo_rxd; in netxen_alloc_sw_resources()
269 rds_ring->num_desc = adapter->num_lro_rxd; in netxen_alloc_sw_resources()
286 for (i = 0; i < rds_ring->num_desc; i++) { in netxen_alloc_sw_resources()
300 sds_ring->num_desc = adapter->num_rxd; in netxen_alloc_sw_resources()
1458 index = get_next_index(index, sds_ring->num_desc); in netxen_handle_fw_message()
1552 if (unlikely(index >= rds_ring->num_desc)) in netxen_process_rcv()
[all …]
Dnetxen_nic_ethtool.c426 u32 num_desc; in netxen_validate_ringparam() local
427 num_desc = max(val, min); in netxen_validate_ringparam()
428 num_desc = min(num_desc, max); in netxen_validate_ringparam()
429 num_desc = roundup_pow_of_two(num_desc); in netxen_validate_ringparam()
431 if (val != num_desc) { in netxen_validate_ringparam()
433 netxen_nic_driver_name, r_name, num_desc, val); in netxen_validate_ringparam()
436 return num_desc; in netxen_validate_ringparam()
Dnetxen_nic.h76 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
78 (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc)
80 (sizeof(struct status_desc) * (sds_ring)->num_desc)
82 (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
84 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
627 u32 num_desc; member
641 u32 num_desc; member
662 u32 num_desc; member
1878 tx_ring->sw_consumer, tx_ring->num_desc); in netxen_tx_avail()
Dnetxen_nic_ctx.c349 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); in nx_fw_cmd_create_rx_ctx()
362 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); in nx_fw_cmd_create_rx_ctx()
491 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); in nx_fw_cmd_create_tx_ctx()
717 hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); in netxen_init_old_ctx()
726 cpu_to_le32(rds_ring->num_desc); in netxen_init_old_ctx()
734 hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); in netxen_init_old_ctx()
737 hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); in netxen_init_old_ctx()
/drivers/net/ethernet/toshiba/
Dspider_net.c300 dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), in spider_net_free_chain()
324 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); in spider_net_init_chain()
331 memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr)); in spider_net_init_chain()
337 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) { in spider_net_init_chain()
734 if (cnt < card->tx_chain.num_desc/4) in spider_net_set_low_watermark()
1006 chain->num_desc); in show_rx_chain()
1036 int from = (chain->num_desc + off - cnt) % chain->num_desc; in show_rx_chain()
1037 int to = (chain->num_desc + off - 1) % chain->num_desc; in show_rx_chain()
1105 for (i=0; i<chain->num_desc; i++) { in spider_net_resync_head_ptr()
1125 for (i=0; i<chain->num_desc; i++) { in spider_net_resync_tail_ptr()
[all …]
Dspider_net_ethtool.c127 ering->tx_pending = card->tx_chain.num_desc; in spider_net_ethtool_get_ringparam()
129 ering->rx_pending = card->rx_chain.num_desc; in spider_net_ethtool_get_ringparam()
Dspider_net.h410 int num_desc; member
/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_io.c298 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_82xx_change_filter()
414 producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_tx_encap_pkt()
535 producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_tx_pkt()
548 producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_tx_pkt()
674 num_txd = tx_ring->num_desc; in qlcnic_xmit_frame()
873 producer = get_next_index(producer, rds_ring->num_desc); in qlcnic_post_rx_buffers_nodb()
877 writel((producer - 1) & (rds_ring->num_desc - 1), in qlcnic_post_rx_buffers_nodb()
918 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); in qlcnic_process_cmd_ring()
1093 index = get_next_index(index, sds_ring->num_desc); in qlcnic_handle_fw_message()
1209 if (unlikely(index >= rds_ring->num_desc)) in qlcnic_process_rcv()
[all …]
Dqlcnic_init.c93 for (i = 0; i < rds_ring->num_desc; ++i) { in qlcnic_release_rx_buffers()
122 for (i = 0; i < rds_ring->num_desc; i++) { in qlcnic_reset_rx_buffers_list()
140 for (i = 0; i < tx_ring->num_desc; i++) { in qlcnic_release_tx_buffers()
206 rds_ring->num_desc = adapter->num_rxd; in qlcnic_alloc_sw_resources()
212 rds_ring->num_desc = adapter->num_jumbo_rxd; in qlcnic_alloc_sw_resources()
234 for (i = 0; i < rds_ring->num_desc; i++) { in qlcnic_alloc_sw_resources()
247 sds_ring->num_desc = adapter->num_rxd; in qlcnic_alloc_sw_resources()
Dqlcnic_ethtool.c642 u32 num_desc; in qlcnic_validate_ringparam() local
643 num_desc = max(val, min); in qlcnic_validate_ringparam()
644 num_desc = min(num_desc, max); in qlcnic_validate_ringparam()
645 num_desc = roundup_pow_of_two(num_desc); in qlcnic_validate_ringparam()
647 if (val != num_desc) { in qlcnic_validate_ringparam()
649 qlcnic_driver_name, r_name, num_desc, val); in qlcnic_validate_ringparam()
652 return num_desc; in qlcnic_validate_ringparam()
Dqlcnic.h66 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
68 (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
70 (sizeof(struct status_desc) * (sds_ring)->num_desc)
72 (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
74 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
580 u32 num_desc; member
592 u32 num_desc; member
625 u32 num_desc; member
1733 return tx_ring->sw_consumer + tx_ring->num_desc - in qlcnic_tx_avail()
Dqlcnic_ctx.c316 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); in qlcnic_82xx_fw_cmd_create_rx_ctx()
329 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); in qlcnic_82xx_fw_cmd_create_rx_ctx()
480 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); in qlcnic_82xx_fw_cmd_create_tx_ctx()
Dqlcnic_main.c2409 tx_ring->num_desc = adapter->num_txd; in qlcnic_alloc_tx_rings()
2998 for (i = 0; i < tx_ring->num_desc; i++) { in dump_tx_ring_desc()
3026 rds_ring->producer, rds_ring->num_desc); in qlcnic_dump_rings()
3037 sds_ring->num_desc); in qlcnic_dump_rings()
3064 tx_ring->num_desc, qlcnic_tx_avail(tx_ring)); in qlcnic_dump_rings()
Dqlcnic_83xx_hw.c1098 sds_mbx.sds_ring_size = sds->num_desc; in qlcnic_83xx_add_rings()
1221 sds_mbx.sds_ring_size = sds->num_desc; in qlcnic_83xx_create_rx_ctx()
1243 rds_mbx.reg_ring_len = rds->num_desc; in qlcnic_83xx_create_rx_ctx()
1250 rds_mbx.jmb_ring_len = rds->num_desc; in qlcnic_83xx_create_rx_ctx()
1341 mbx.size = tx->num_desc; in qlcnic_83xx_create_tx_ctx()
/drivers/dma/
Dqcom_bam_dma.c69 u32 num_desc; member
624 async_desc->num_desc = num_alloc; in bam_prep_slave_sg()
773 async_desc->num_desc -= async_desc->xfer_len; in process_channel_irqs()
786 if (!async_desc->num_desc) in process_channel_irqs()
858 for (i = 0; i < bchan->curr_txd->num_desc; i++) in bam_tx_status()
925 if (async_desc->num_desc > MAX_DESCRIPTORS) in bam_start_dma()
928 async_desc->xfer_len = async_desc->num_desc; in bam_start_dma()
931 if (async_desc->num_desc == async_desc->xfer_len) in bam_start_dma()
Dmic_x100_dma.c160 int num_desc = len / max_xfer_size + 3; in mic_dma_prog_memcpy_desc() local
164 num_desc++; in mic_dma_prog_memcpy_desc()
166 ret = mic_dma_avail_desc_ring_space(ch, num_desc); in mic_dma_prog_memcpy_desc()
/drivers/net/ethernet/intel/i40e/
Di40e.h529 u16 num_desc; member
/drivers/atm/
Diphase.c616 int num_desc; in ia_que_tx() local
618 num_desc = ia_avail_descs(iadev); in ia_que_tx()
620 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) { in ia_que_tx()
634 num_desc--; in ia_que_tx()

12