/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
D | flowring.c | 142 if (flow->rings[i] == NULL) in brcmf_flowring_create() 160 flow->rings[i] = ring; in brcmf_flowring_create() 172 ring = flow->rings[flowid]; in brcmf_flowring_tid() 192 ring = flow->rings[flowid]; in brcmf_flowring_block() 201 if ((flow->rings[i]) && (i != flowid)) { in brcmf_flowring_block() 202 ring = flow->rings[i]; in brcmf_flowring_block() 212 flow->rings[flowid]->blocked = blocked; in brcmf_flowring_block() 236 ring = flow->rings[flowid]; in brcmf_flowring_delete() 247 flow->rings[flowid] = NULL; in brcmf_flowring_delete() 264 ring = flow->rings[flowid]; in brcmf_flowring_enqueue() [all …]
|
D | flowring.h | 41 struct brcmf_flowring_ring **rings; member
|
D | pcie.c | 1094 struct brcmf_pcie_ringbuf *rings; in brcmf_pcie_init_ringbuffers() local 1222 rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL); in brcmf_pcie_init_ringbuffers() 1223 if (!rings) in brcmf_pcie_init_ringbuffers() 1229 ring = &rings[i]; in brcmf_pcie_init_ringbuffers() 1244 devinfo->shared.flowrings = rings; in brcmf_pcie_init_ringbuffers()
|
/drivers/i3c/master/mipi-i3c-hci/ |
D | dma.c | 167 struct hci_rings_data *rings = hci->io_data; in hci_dma_cleanup() local 171 if (!rings) in hci_dma_cleanup() 174 for (i = 0; i < rings->total; i++) { in hci_dma_cleanup() 175 rh = &rings->headers[i]; in hci_dma_cleanup() 204 kfree(rings); in hci_dma_cleanup() 210 struct hci_rings_data *rings; in hci_dma_init() local 226 rings = kzalloc(sizeof(*rings) + nr_rings * sizeof(*rh), GFP_KERNEL); in hci_dma_init() 227 if (!rings) in hci_dma_init() 229 hci->io_data = rings; in hci_dma_init() 230 rings->total = nr_rings; in hci_dma_init() [all …]
|
/drivers/soc/ti/ |
D | k3-ringacc.c | 219 struct k3_ring *rings; member 355 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) in k3_ringacc_request_ring() 357 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) in k3_ringacc_request_ring() 369 ringacc->rings[id].proxy_id = proxy_id; in k3_ringacc_request_ring() 378 ringacc->rings[id].use_count++; in k3_ringacc_request_ring() 380 return &ringacc->rings[id]; in k3_ringacc_request_ring() 408 *fwd_ring = &ringacc->rings[fwd_id]; in k3_dmaring_request_dual_ring() 409 *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings]; in k3_dmaring_request_dual_ring() 411 ringacc->rings[fwd_id].use_count++; in k3_dmaring_request_dual_ring() 740 reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings]; in k3_dmaring_cfg() [all …]
|
/drivers/crypto/qat/qat_common/ |
D | adf_transport.c | 267 ring = &bank->rings[ring_num]; in adf_create_ring() 338 adf_handle_response(&bank->rings[i]); in adf_ring_response_handler() 406 bank->rings = kzalloc_node(size, GFP_KERNEL, in adf_init_bank() 408 if (!bank->rings) in adf_init_bank() 425 ring = &bank->rings[i]; in adf_init_bank() 439 tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; in adf_init_bank() 456 ring = &bank->rings[i]; in adf_init_bank() 460 kfree(bank->rings); in adf_init_bank() 532 struct adf_etr_ring_data *ring = &bank->rings[i]; in cleanup_bank() 540 kfree(bank->rings); in cleanup_bank() [all …]
|
D | adf_transport_internal.h | 32 struct adf_etr_ring_data *rings; member
|
D | adf_transport_debug.c | 155 struct adf_etr_ring_data *ring = &bank->rings[ring_id]; in adf_bank_show()
|
/drivers/block/xen-blkback/ |
D | xenbus.c | 83 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev) in xen_update_blkif_status() 109 ring = &blkif->rings[i]; in xen_update_blkif_status() 123 ring = &blkif->rings[i]; in xen_update_blkif_status() 133 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), in xen_blkif_alloc_rings() 135 if (!blkif->rings) in xen_blkif_alloc_rings() 139 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_alloc_rings() 273 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_disconnect() 337 kfree(blkif->rings); in xen_blkif_disconnect() 338 blkif->rings = NULL; in xen_blkif_disconnect() 388 if (!blkif->rings) \ [all …]
|
/drivers/crypto/inside-secure/ |
D | safexcel.c | 51 for (i = 0; i < priv->config.rings; i++) { in eip197_trc_cache_setupvirt() 502 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_cdesc_rings() 550 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_rdesc_rings() 592 priv->config.pes, priv->config.rings); in safexcel_hw_init() 654 GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init() 712 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init() 738 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init() 762 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init() 766 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init() 1336 priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings); in safexcel_configure() [all …]
|
D | safexcel_ring.c | 74 return (atomic_inc_return(&priv->ring_used) % priv->config.rings); in safexcel_select_ring()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ring.c | 193 adev->rings[ring->idx] = ring; in amdgpu_ring_init() 287 if (!(ring->adev) || !(ring->adev->rings[ring->idx])) in amdgpu_ring_fini() 306 ring->adev->rings[ring->idx] = NULL; in amdgpu_ring_fini()
|
D | amdgpu_fence.c | 550 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_fini() 578 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_sw_fini() 617 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_init() 748 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info_show()
|
D | amdgpu_debugfs.c | 1148 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_test_ib_show() 1164 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_test_ib_show() 1365 ring = adev->rings[val]; in amdgpu_debugfs_ib_preempt() 1511 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_init()
|
/drivers/net/ |
D | tap.c | 1279 struct ptr_ring **rings; in tap_queue_resize() local 1283 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); in tap_queue_resize() 1284 if (!rings) in tap_queue_resize() 1288 rings[i++] = &q->ring; in tap_queue_resize() 1290 ret = ptr_ring_resize_multiple(rings, n, in tap_queue_resize() 1294 kfree(rings); in tap_queue_resize()
|
D | tun.c | 3594 struct ptr_ring **rings; in tun_queue_resize() local 3598 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); in tun_queue_resize() 3599 if (!rings) in tun_queue_resize() 3604 rings[i] = &tfile->tx_ring; in tun_queue_resize() 3607 rings[i++] = &tfile->tx_ring; in tun_queue_resize() 3609 ret = ptr_ring_resize_multiple(rings, n, in tun_queue_resize() 3613 kfree(rings); in tun_queue_resize()
|
/drivers/net/ethernet/broadcom/ |
D | bcm4908_enet.c | 236 struct bcm4908_enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring }; in bcm4908_enet_dma_reset() local 240 for (i = 0; i < ARRAY_SIZE(rings); i++) in bcm4908_enet_dma_reset() 241 enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0); in bcm4908_enet_dma_reset() 245 for (i = 0; i < ARRAY_SIZE(rings); i++) { in bcm4908_enet_dma_reset() 246 struct bcm4908_enet_dma_ring *ring = rings[i]; in bcm4908_enet_dma_reset()
|
/drivers/mailbox/ |
D | bcm-flexrm-mailbox.c | 295 struct flexrm_ring *rings; member 941 ring = &mbox->rings[i]; in flexrm_write_config_in_seqfile() 969 ring = &mbox->rings[i]; in flexrm_write_stats_in_seqfile() 1487 struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index]; in flexrm_mbox_msi_write() 1547 mbox->rings = ring; in flexrm_mbox_probe() 1552 ring = &mbox->rings[index]; in flexrm_mbox_probe() 1612 ring = &mbox->rings[desc->platform.msi_index]; in flexrm_mbox_probe() 1647 mbox->controller.chans[index].con_priv = &mbox->rings[index]; in flexrm_mbox_probe()
|
/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
D | hv_vhca.h | 28 u16 rings; member
|
/drivers/crypto/caam/ |
D | Kconfig | 123 number of job rings assigned to the kernel. The number of portals 125 job rings.
|
/drivers/net/hippi/ |
D | Kconfig | 31 bool "Use large TX/RX rings"
|
/drivers/net/ethernet/marvell/ |
D | Kconfig | 139 transmit and receive rings. 174 transmit and receive rings.
|
/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | hv_vhca_stats.c | 103 block->rings = priv->stats_nch; in mlx5e_hv_vhca_stats_control()
|
/drivers/net/ethernet/neterion/ |
D | s2io.c | 699 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem() 719 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem() 791 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem() 935 struct ring_info *ring = &mac_control->rings[i]; in free_shared_mem() 956 struct ring_info *ring = &mac_control->rings[i]; in free_shared_mem() 2220 struct ring_info *ring = &mac_control->rings[i]; in start_nic() 2673 rxdp = mac_control->rings[ring_no]. in free_rxd_blk() 2702 mac_control->rings[ring_no].rx_bufs_left -= 1; in free_rxd_blk() 2723 struct ring_info *ring = &mac_control->rings[i]; in free_rx_buffers() 2803 struct ring_info *ring = &mac_control->rings[i]; in s2io_poll_inta() [all …]
|
/drivers/net/ethernet/intel/ice/ |
D | ice_lib.c | 1780 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count) in ice_vsi_cfg_txqs() argument 1793 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); in ice_vsi_cfg_txqs() 2071 u16 rel_vmvf_num, struct ice_ring **rings, u16 count) in ice_vsi_stop_tx_rings() argument 2082 if (!rings || !rings[q_idx]) in ice_vsi_stop_tx_rings() 2085 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings() 2087 rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
|