Searched refs:freed (Results 1 – 7 of 7) sorted by relevance
/mm/ |
D | vmscan.c | 315 unsigned long freed = 0; in do_shrink_slab() local 401 freed += ret; in do_shrink_slab() 425 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); in do_shrink_slab() 426 return freed; in do_shrink_slab() 463 unsigned long freed = 0; in shrink_slab() local 478 freed = 1; in shrink_slab() 501 freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible); in shrink_slab() 507 return freed; in shrink_slab() 512 unsigned long freed; in drop_slab_node() local 517 freed = 0; in drop_slab_node() [all …]
|
D | oom_kill.c | 1003 unsigned long freed = 0; in out_of_memory() local 1010 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); in out_of_memory() 1011 if (freed > 0) in out_of_memory()
|
D | shmem.c | 236 long freed; in shmem_recalc_inode() local 238 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; in shmem_recalc_inode() 239 if (freed > 0) { in shmem_recalc_inode() 242 percpu_counter_add(&sbinfo->used_blocks, -freed); in shmem_recalc_inode() 243 info->alloced -= freed; in shmem_recalc_inode() 244 inode->i_blocks -= freed * BLOCKS_PER_PAGE; in shmem_recalc_inode() 245 shmem_unacct_blocks(info->flags, freed); in shmem_recalc_inode()
|
D | Kconfig.debug | 50 reduce the risk of information leaks from freed data. This does
|
D | vmalloc.c | 487 unsigned long freed = 0; in alloc_vmap_area() local 488 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); in alloc_vmap_area() 489 if (freed > 0) { in alloc_vmap_area()
|
D | hugetlb.c | 4247 long freed) in hugetlb_unreserve_pages() argument 4267 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages() 4274 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); in hugetlb_unreserve_pages()
|
D | slab.c | 4085 int freed; in cache_reap() local 4087 freed = drain_freelist(searchp, n, (n->free_limit + in cache_reap() 4089 STATS_ADD_REAPED(searchp, freed); in cache_reap()
|