/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | pool.c | 25 if (!xsk->pools) { in mlx5e_xsk_get_pools() 26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools() 27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools() 28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools() 41 kfree(xsk->pools); in mlx5e_xsk_put_pools() 42 xsk->pools = NULL; in mlx5e_xsk_put_pools() 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
|
D | pool.h | 12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
/drivers/dma-buf/heaps/ |
D | system_heap.c | 64 struct dmabuf_page_pool *pools[NUM_ORDERS]; variable 347 dmabuf_page_pool_free(pools[j], page); in system_heap_dma_buf_release() 377 page = dmabuf_page_pool_alloc(pools[i]); in alloc_largest_available() 499 pool = pools; in system_get_pool_size() 541 pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]); in system_heap_create() 543 if (IS_ERR(pools[i])) { in system_heap_create() 548 dmabuf_page_pool_destroy(pools[j]); in system_heap_create() 549 return PTR_ERR(pools[i]); in system_heap_create()
|
/drivers/net/ethernet/chelsio/libcxgb/ |
D | libcxgb_ppm.c | 348 struct cxgbi_ppm_pool *pools; in ppm_alloc_cpu_pool() local 350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 370 if (!pools) in ppm_alloc_cpu_pool() 374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 384 return pools; in ppm_alloc_cpu_pool()
|
/drivers/atm/ |
D | zatm.c | 1038 unsigned long pools; in zatm_int() local 1041 pools = zin(RQA); in zatm_int() 1042 EVENT("RQA (0x%08x)\n",pools,0); in zatm_int() 1043 for (i = 0; pools; i++) { in zatm_int() 1044 if (pools & 1) { in zatm_int() 1048 pools >>= 1; in zatm_int() 1052 unsigned long pools; in zatm_int() local 1054 pools = zin(RQU); in zatm_int() 1056 dev->number,pools); in zatm_int() 1058 for (i = 0; pools; i++) { in zatm_int() [all …]
|
/drivers/soc/ti/ |
D | knav_qmss.h | 203 struct list_head pools; member 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
D | knav_qmss_queue.c | 820 node = ®ion->pools; in knav_pool_create() 821 list_for_each_entry(pi, ®ion->pools, region_inst) { in knav_pool_create() 835 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create() 1037 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region() 1121 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions() 1359 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions() 1783 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
|
/drivers/md/ |
D | dm.c | 2839 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools() local 2844 if (!pools) in dm_alloc_md_mempools() 2853 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); in dm_alloc_md_mempools() 2856 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) in dm_alloc_md_mempools() 2868 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); in dm_alloc_md_mempools() 2872 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) in dm_alloc_md_mempools() 2875 return pools; in dm_alloc_md_mempools() 2878 dm_free_md_mempools(pools); in dm_alloc_md_mempools() 2883 void dm_free_md_mempools(struct dm_md_mempools *pools) in dm_free_md_mempools() argument 2885 if (!pools) in dm_free_md_mempools() [all …]
|
D | dm.h | 230 void dm_free_md_mempools(struct dm_md_mempools *pools);
|
D | dm-thin.c | 525 struct list_head pools; member 531 INIT_LIST_HEAD(&dm_thin_pool_table.pools); in pool_table_init() 542 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert() 557 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup() 573 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup_metadata_dev()
|
/drivers/soc/fsl/qbman/ |
D | qman_priv.h | 177 u32 pools; member
|
D | qman_portal.c | 248 pcfg->pools = qm_get_pools_sdqcr(); in qman_portal_probe()
|
D | qman.c | 1760 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) in qman_p_static_dequeue_add() argument 1765 pools &= p->config->pools; in qman_p_static_dequeue_add() 1766 p->sdqcr |= pools; in qman_p_static_dequeue_add()
|
/drivers/net/ethernet/freescale/dpaa2/ |
D | dpsw.c | 1158 cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); in dpsw_ctrl_if_set_pools() 1160 cpu_to_le16(cfg->pools[i].buffer_size); in dpsw_ctrl_if_set_pools() 1162 DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i); in dpsw_ctrl_if_set_pools()
|
D | dpni.c | 177 cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); in dpni_set_pools() 179 cpu_to_le16(cfg->pools[i].buffer_size); in dpni_set_pools() 181 DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); in dpni_set_pools()
|
D | dpsw.h | 210 } pools[DPSW_MAX_DPBP]; member
|
D | dpni.h | 110 } pools[DPNI_MAX_DPBP]; member
|
D | dpaa2-switch.c | 2665 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; in dpaa2_switch_setup_dpbp() 2666 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; in dpaa2_switch_setup_dpbp() 2667 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; in dpaa2_switch_setup_dpbp()
|
D | dpaa2-eth.c | 3883 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; in dpaa2_eth_bind_dpni() 3884 pools_params.pools[0].backup_pool = 0; in dpaa2_eth_bind_dpni() 3885 pools_params.pools[0].buffer_size = priv->rx_buf_size; in dpaa2_eth_bind_dpni()
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en.h | 797 struct xsk_buff_pool **pools; member
|
/drivers/message/fusion/lsi/ |
D | mpi_history.txt | 309 * Added generic defines for hot spare pools and RAID
|