Home
last modified time | relevance | path

Searched refs:pools (Results 1 – 23 of 23) sorted by relevance

/drivers/gpu/drm/ttm/
Dttm_page_alloc_dma.c107 struct list_head pools; /* The 'struct device->dma_pools link */ member
158 struct list_head pools; member
174 struct list_head pools; member
530 list_for_each_entry_reverse(p, &_manager->pools, pools) { in ttm_dma_free_pool()
537 list_del(&p->pools); in ttm_dma_free_pool()
542 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { in ttm_dma_free_pool()
553 list_del(&pool->pools); in ttm_dma_free_pool()
607 INIT_LIST_HEAD(&sec_pool->pools); in ttm_dma_pool_init()
613 INIT_LIST_HEAD(&pool->pools); in ttm_dma_pool_init()
636 list_add(&sec_pool->pools, &_manager->pools); in ttm_dma_pool_init()
[all …]
Dttm_page_alloc.c119 struct ttm_page_pool pools[NUM_POOLS]; member
275 return &_manager->pools[pool_index]; in ttm_get_pool()
416 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; in ttm_pool_shrink_scan()
433 count += _manager->pools[i].npages; in ttm_pool_shrink_count()
863 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); in ttm_page_alloc_fini()
937 p = &_manager->pools[i]; in ttm_page_alloc_debugfs()
/drivers/net/ethernet/chelsio/libcxgb/
Dlibcxgb_ppm.c344 struct cxgbi_ppm_pool *pools; in ppm_alloc_cpu_pool() local
346 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool()
360 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool()
361 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool()
363 if (!pools) in ppm_alloc_cpu_pool()
367 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool()
377 return pools; in ppm_alloc_cpu_pool()
/drivers/staging/android/ion/
Dion_system_heap.c288 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) in ion_system_heap_destroy_pools() argument
293 if (pools[i]) in ion_system_heap_destroy_pools()
294 ion_page_pool_destroy(pools[i]); in ion_system_heap_destroy_pools()
297 static int ion_system_heap_create_pools(struct ion_page_pool **pools, in ion_system_heap_create_pools() argument
312 pools[i] = pool; in ion_system_heap_create_pools()
317 ion_system_heap_destroy_pools(pools); in ion_system_heap_create_pools()
/drivers/staging/lustre/lustre/ptlrpc/
Dsec_bulk.c281 static unsigned long enc_pools_cleanup(struct page ***pools, int npools) in enc_pools_cleanup() argument
287 if (pools[i]) { in enc_pools_cleanup()
289 if (pools[i][j]) { in enc_pools_cleanup()
290 __free_page(pools[i][j]); in enc_pools_cleanup()
294 kfree(pools[i]); in enc_pools_cleanup()
295 pools[i] = NULL; in enc_pools_cleanup()
/drivers/atm/
Dzatm.c1037 unsigned long pools; in zatm_int() local
1040 pools = zin(RQA); in zatm_int()
1041 EVENT("RQA (0x%08x)\n",pools,0); in zatm_int()
1042 for (i = 0; pools; i++) { in zatm_int()
1043 if (pools & 1) { in zatm_int()
1047 pools >>= 1; in zatm_int()
1051 unsigned long pools; in zatm_int() local
1053 pools = zin(RQU); in zatm_int()
1055 dev->number,pools); in zatm_int()
1057 for (i = 0; pools; i++) { in zatm_int()
[all …]
/drivers/soc/ti/
Dknav_qmss.h209 struct list_head pools; member
305 struct list_head pools; member
363 list_for_each_entry(pool, &kdev->pools, list)
Dknav_qmss_queue.c799 node = &region->pools; in knav_pool_create()
800 list_for_each_entry(pi, &region->pools, region_inst) { in knav_pool_create()
814 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create()
1015 list_add(&pool->region_inst, &region->pools); in knav_queue_setup_region()
1098 INIT_LIST_HEAD(&region->pools); in knav_queue_setup_regions()
1333 list_for_each_entry_safe(pool, tmp, &region->pools, region_inst) in knav_queue_free_regions()
1715 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
/drivers/md/
Ddm.c2769 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools() local
2773 if (!pools) in dm_alloc_md_mempools()
2782 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); in dm_alloc_md_mempools()
2783 if (!pools->io_pool) in dm_alloc_md_mempools()
2796 pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER); in dm_alloc_md_mempools()
2797 if (!pools->bs) in dm_alloc_md_mempools()
2800 if (integrity && bioset_integrity_create(pools->bs, pool_size)) in dm_alloc_md_mempools()
2803 return pools; in dm_alloc_md_mempools()
2806 dm_free_md_mempools(pools); in dm_alloc_md_mempools()
2811 void dm_free_md_mempools(struct dm_md_mempools *pools) in dm_free_md_mempools() argument
[all …]
Ddm.h209 void dm_free_md_mempools(struct dm_md_mempools *pools);
Ddm-thin.c522 struct list_head pools; member
528 INIT_LIST_HEAD(&dm_thin_pool_table.pools); in pool_table_init()
534 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert()
549 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup()
565 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup_metadata_dev()
/drivers/staging/fsl-dpaa2/ethernet/
DREADME28 - buffer pools
71 DPBPs represent hardware buffer pools. Packet I/O is performed in the context
126 The role of hardware buffer pools is storage of ingress frame data. Each network
Ddpni.c202 cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); in dpni_set_pools()
204 cpu_to_le16(cfg->pools[i].buffer_size); in dpni_set_pools()
206 DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); in dpni_set_pools()
Ddpni.h133 } pools[DPNI_MAX_DPBP]; member
Ddpaa2-eth.c2081 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; in bind_dpni()
2082 pools_params.pools[0].backup_pool = 0; in bind_dpni()
2083 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; in bind_dpni()
/drivers/soc/fsl/qbman/
Dqman_priv.h181 u32 pools; member
Dqman_portal.c282 pcfg->pools = qm_get_pools_sdqcr(); in qman_portal_probe()
Dqman.c1636 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) in qman_p_static_dequeue_add() argument
1641 pools &= p->config->pools; in qman_p_static_dequeue_add()
1642 p->sdqcr |= pools; in qman_p_static_dequeue_add()
/drivers/staging/fsl-mc/
DREADME.txt33 The MC uses DPAA2 hardware resources such as queues, buffer pools, and
65 | -buffer pools -DPMCP |
339 -DPBPs for network buffer pools
/drivers/staging/fsl-mc/bus/dpio/
Ddpio-driver.txt26 C) allow drivers to manage hardware buffer pools
/drivers/net/ethernet/intel/ixgbe/
Dixgbe_lib.c515 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); in ixgbe_set_sriov_queues() local
528 if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) { in ixgbe_set_sriov_queues()
Dixgbe_main.c8790 bool pools; in ixgbe_setup_tc() local
8799 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); in ixgbe_setup_tc()
8800 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) in ixgbe_setup_tc()
/drivers/message/fusion/lsi/
Dmpi_history.txt309 * Added generic defines for hot spare pools and RAID