Searched refs:first (Results 1 – 6 of 6) sorted by relevance
/mm/ |
D | interval_tree.c | 94 unsigned long first, unsigned long last) in anon_vma_interval_tree_iter_first() argument 96 return __anon_vma_interval_tree_iter_first(root, first, last); in anon_vma_interval_tree_iter_first() 101 unsigned long first, unsigned long last) in anon_vma_interval_tree_iter_next() argument 103 return __anon_vma_interval_tree_iter_next(node, first, last); in anon_vma_interval_tree_iter_next()
|
D | vmalloc.c | 353 struct vmap_area *first; in alloc_vmap_area() local 395 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); in alloc_vmap_area() 396 addr = ALIGN(first->va_end, align); in alloc_vmap_area() 408 first = NULL; in alloc_vmap_area() 414 first = tmp; in alloc_vmap_area() 422 if (!first) in alloc_vmap_area() 427 while (addr + size > first->va_start && addr + size <= vend) { in alloc_vmap_area() 428 if (addr + cached_hole_size < first->va_start) in alloc_vmap_area() 429 cached_hole_size = first->va_start - addr; in alloc_vmap_area() 430 addr = ALIGN(first->va_end, align); in alloc_vmap_area() [all …]
|
D | rmap.c | 1038 int first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap() local 1039 if (first) { in do_page_add_anon_rmap() 1057 if (first) in do_page_add_anon_rmap() 1804 int first; in hugepage_add_anon_rmap() local 1809 first = atomic_inc_and_test(&page->_mapcount); in hugepage_add_anon_rmap() 1810 if (first) in hugepage_add_anon_rmap()
|
D | mmu_notifier.c | 78 mn = hlist_entry(mm->mmu_notifier_mm->list.first, in __mmu_notifier_release()
|
D | Kconfig | 273 two situations. The first is on NUMA systems to put pages nearer 462 memory. So when the PFRA "evicts" a page, it first attempts to use 467 filesystem wishes to access a page in a file on disk, it first
|
D | ksm.c | 629 if (stable_node->hlist.first) in remove_rmap_item_from_tree()
|