/drivers/staging/android/ion/heaps/ |
D | ion_system_heap.c | 46 struct ion_page_pool *pools[NUM_ORDERS]; member 53 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page() 70 pool = heap->pools[order_to_index(order)]; in free_buffer_page() 191 pool = sys_heap->pools[i]; in ion_system_heap_shrink() 211 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) in ion_system_heap_destroy_pools() argument 216 if (pools[i]) in ion_system_heap_destroy_pools() 217 ion_page_pool_destroy(pools[i]); in ion_system_heap_destroy_pools() 220 static int ion_system_heap_create_pools(struct ion_page_pool **pools) in ion_system_heap_create_pools() argument 234 pools[i] = pool; in ion_system_heap_create_pools() 240 ion_system_heap_destroy_pools(pools); in ion_system_heap_create_pools() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc_dma.c | 98 struct list_head pools; /* The 'struct device->dma_pools link */ member 149 struct list_head pools; member 165 struct list_head pools; member 507 list_for_each_entry_reverse(p, &_manager->pools, pools) { in ttm_dma_free_pool() 514 list_del(&p->pools); in ttm_dma_free_pool() 519 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { in ttm_dma_free_pool() 530 list_del(&pool->pools); in ttm_dma_free_pool() 584 INIT_LIST_HEAD(&sec_pool->pools); in ttm_dma_pool_init() 589 INIT_LIST_HEAD(&pool->pools); in ttm_dma_pool_init() 619 list_add(&sec_pool->pools, &_manager->pools); in ttm_dma_pool_init() [all …]
|
D | ttm_page_alloc.c | 114 struct ttm_page_pool pools[NUM_POOLS]; member 243 return &_manager->pools[pool_index]; in ttm_get_pool() 399 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; in ttm_pool_shrink_scan() 422 pool = &_manager->pools[i]; in ttm_pool_shrink_count() 1022 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); in ttm_page_alloc_fini() 1181 p = &_manager->pools[i]; in ttm_page_alloc_debugfs()
|
/drivers/net/ethernet/chelsio/libcxgb/ |
D | libcxgb_ppm.c | 349 struct cxgbi_ppm_pool *pools; in ppm_alloc_cpu_pool() local 351 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 368 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 369 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 371 if (!pools) in ppm_alloc_cpu_pool() 375 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 385 return pools; in ppm_alloc_cpu_pool()
|
/drivers/atm/ |
D | zatm.c | 1038 unsigned long pools; in zatm_int() local 1041 pools = zin(RQA); in zatm_int() 1042 EVENT("RQA (0x%08x)\n",pools,0); in zatm_int() 1043 for (i = 0; pools; i++) { in zatm_int() 1044 if (pools & 1) { in zatm_int() 1048 pools >>= 1; in zatm_int() 1052 unsigned long pools; in zatm_int() local 1054 pools = zin(RQU); in zatm_int() 1056 dev->number,pools); in zatm_int() 1058 for (i = 0; pools; i++) { in zatm_int() [all …]
|
/drivers/soc/ti/ |
D | knav_qmss.h | 203 struct list_head pools; member 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
D | knav_qmss_queue.c | 827 node = ®ion->pools; in knav_pool_create() 828 list_for_each_entry(pi, ®ion->pools, region_inst) { in knav_pool_create() 842 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create() 1043 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region() 1126 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions() 1365 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions() 1787 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
|
/drivers/md/ |
D | dm.c | 3060 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools() local 3065 if (!pools) in dm_alloc_md_mempools() 3075 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); in dm_alloc_md_mempools() 3078 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) in dm_alloc_md_mempools() 3090 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); in dm_alloc_md_mempools() 3094 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) in dm_alloc_md_mempools() 3097 return pools; in dm_alloc_md_mempools() 3100 dm_free_md_mempools(pools); in dm_alloc_md_mempools() 3105 void dm_free_md_mempools(struct dm_md_mempools *pools) in dm_free_md_mempools() argument 3107 if (!pools) in dm_free_md_mempools() [all …]
|
D | dm.h | 208 void dm_free_md_mempools(struct dm_md_mempools *pools);
|
D | dm-thin.c | 524 struct list_head pools; member 530 INIT_LIST_HEAD(&dm_thin_pool_table.pools); in pool_table_init() 541 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert() 556 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup() 572 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup_metadata_dev()
|
/drivers/soc/fsl/qbman/ |
D | qman_priv.h | 177 u32 pools; member
|
D | qman_portal.c | 297 pcfg->pools = qm_get_pools_sdqcr(); in qman_portal_probe()
|
D | qman.c | 1758 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) in qman_p_static_dequeue_add() argument 1763 pools &= p->config->pools; in qman_p_static_dequeue_add() 1764 p->sdqcr |= pools; in qman_p_static_dequeue_add()
|
/drivers/net/ethernet/freescale/dpaa2/ |
D | dpni.c | 174 cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); in dpni_set_pools() 176 cpu_to_le16(cfg->pools[i].buffer_size); in dpni_set_pools() 178 DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); in dpni_set_pools()
|
D | dpni.h | 102 } pools[DPNI_MAX_DPBP]; member
|
D | dpaa2-eth.c | 3097 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; in bind_dpni() 3098 pools_params.pools[0].backup_pool = 0; in bind_dpni() 3099 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; in bind_dpni()
|
/drivers/message/fusion/lsi/ |
D | mpi_history.txt | 309 * Added generic defines for hot spare pools and RAID
|