/mm/ |
D | mmap.c | 81 struct vm_area_struct *vma, struct vm_area_struct *prev, 353 unsigned long prev = 0, pend = 0; in browse_rb() local 358 if (vma->vm_start < prev) { in browse_rb() 360 vma->vm_start, prev); in browse_rb() 383 prev = vma->vm_start; in browse_rb() 717 struct vm_area_struct *prev, struct rb_node **rb_link, in __vma_link() argument 720 __vma_link_list(mm, vma, prev); in __vma_link() 725 struct vm_area_struct *prev, struct rb_node **rb_link, in vma_link() argument 735 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link() 751 struct vm_area_struct *prev; in __insert_vm_struct() local [all …]
|
D | madvise.c | 70 struct vm_area_struct **prev, in madvise_behavior() argument 134 *prev = vma; in madvise_behavior() 139 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior() 142 if (*prev) { in madvise_behavior() 143 vma = *prev; in madvise_behavior() 147 *prev = vma; in madvise_behavior() 263 struct vm_area_struct **prev, in madvise_willneed() argument 270 *prev = vma; in madvise_willneed() 299 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ in madvise_willneed() 525 struct vm_area_struct **prev, in madvise_cold() argument [all …]
|
D | slob.c | 240 slob_t *prev, *cur, *aligned = NULL; in slob_page_alloc() local 244 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { in slob_page_alloc() 267 prev = cur; in slob_page_alloc() 274 if (prev) in slob_page_alloc() 275 set_slob(prev, slob_units(prev), next); in slob_page_alloc() 279 if (prev) in slob_page_alloc() 280 set_slob(prev, slob_units(prev), cur + units); in slob_page_alloc() 385 slob_t *prev, *next, *b = (slob_t *)block; in slob_free() local 441 prev = sp->freelist; in slob_free() 442 next = slob_next(prev); in slob_free() [all …]
|
D | mlock.c | 534 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument 551 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, in mlock_fixup() 554 if (*prev) { in mlock_fixup() 555 vma = *prev; in mlock_fixup() 595 *prev = vma; in mlock_fixup() 603 struct vm_area_struct * vma, * prev; in apply_vma_lock_flags() local 617 prev = vma->vm_prev; in apply_vma_lock_flags() 619 prev = vma; in apply_vma_lock_flags() 630 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags() 634 if (nstart < prev->vm_end) in apply_vma_lock_flags() [all …]
|
D | interval_tree.c | 29 struct vm_area_struct *prev, in vma_interval_tree_insert_after() argument 36 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); in vma_interval_tree_insert_after() 38 if (!prev->shared.rb.rb_right) { in vma_interval_tree_insert_after() 39 parent = prev; in vma_interval_tree_insert_after() 40 link = &prev->shared.rb.rb_right; in vma_interval_tree_insert_after() 42 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after()
|
D | util.c | 279 struct vm_area_struct *prev) in __vma_link_list() argument 283 vma->vm_prev = prev; in __vma_link_list() 284 if (prev) { in __vma_link_list() 285 next = prev->vm_next; in __vma_link_list() 286 prev->vm_next = vma; in __vma_link_list() 298 struct vm_area_struct *prev, *next; in __vma_unlink_list() local 301 prev = vma->vm_prev; in __vma_unlink_list() 302 if (prev) in __vma_unlink_list() 303 prev->vm_next = next; in __vma_unlink_list() 307 next->vm_prev = prev; in __vma_unlink_list()
|
D | mprotect.c | 518 struct vm_area_struct *vma, *prev; in do_mprotect_pkey() local 558 prev = vma->vm_prev; in do_mprotect_pkey() 577 prev = vma; in do_mprotect_pkey() 621 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey() 626 if (nstart < prev->vm_end) in do_mprotect_pkey() 627 nstart = prev->vm_end; in do_mprotect_pkey() 631 vma = prev->vm_next; in do_mprotect_pkey()
|
D | mempolicy.c | 816 struct vm_area_struct *prev; in mbind_range() local 826 prev = vma->vm_prev; in mbind_range() 828 prev = vma; in mbind_range() 830 for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) { in mbind_range() 839 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, in mbind_range() 843 if (prev) { in mbind_range() 844 vma = prev; in mbind_range() 2395 struct rb_node *prev = rb_prev(n); in sp_lookup() local 2396 if (!prev) in sp_lookup() 2398 w = rb_entry(prev, struct sp_node, nd); in sp_lookup() [all …]
|
D | swapfile.c | 761 unsigned long prev; in set_cluster_next() local 768 prev = this_cpu_read(*si->cluster_next_cpu); in set_cluster_next() 774 if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) != in set_cluster_next() 1479 struct swap_info_struct *p, *prev; in swapcache_free_entries() local 1485 prev = NULL; in swapcache_free_entries() 1496 p = swap_info_get_cont(entries[i], prev); in swapcache_free_entries() 1499 prev = p; in swapcache_free_entries() 2168 unsigned int prev, bool frontswap) in find_next_to_unuse() argument 2179 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
|
D | memcontrol.c | 1142 struct mem_cgroup *prev, in mem_cgroup_iter() argument 1156 if (prev && !reclaim) in mem_cgroup_iter() 1157 pos = prev; in mem_cgroup_iter() 1160 if (prev) in mem_cgroup_iter() 1173 if (prev && reclaim->generation != iter->generation) in mem_cgroup_iter() 1204 if (!prev) in mem_cgroup_iter() 1238 else if (!prev) in mem_cgroup_iter() 1245 if (prev && prev != root) in mem_cgroup_iter() 1246 css_put(&prev->css); in mem_cgroup_iter() 1257 struct mem_cgroup *prev) in mem_cgroup_iter_break() argument [all …]
|
D | nommu.c | 565 struct vm_area_struct *pvma, *prev; in add_vma_to_mm() local 617 prev = NULL; in add_vma_to_mm() 619 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); in add_vma_to_mm() 621 __vma_link_list(mm, vma, prev); in add_vma_to_mm()
|
D | slab.c | 2202 p = n->slabs_free.prev; in drain_freelist() 3825 struct array_cache __percpu *cpu_cache, *prev; in do_tune_cpucache() local 3832 prev = cachep->cpu_cache; in do_tune_cpucache() 3838 if (prev) in do_tune_cpucache() 3846 if (!prev) in do_tune_cpucache() 3853 struct array_cache *ac = per_cpu_ptr(prev, cpu); in do_tune_cpucache() 3862 free_percpu(prev); in do_tune_cpucache()
|
D | internal.h | 352 struct vm_area_struct *prev);
|
D | zsmalloc.c | 1209 static bool can_merge(struct size_class *prev, int pages_per_zspage, in can_merge() argument 1212 if (prev->pages_per_zspage == pages_per_zspage && in can_merge() 1213 prev->objs_per_zspage == objs_per_zspage) in can_merge()
|
D | vmscan.c | 168 if ((_page)->lru.prev != _base) { \ 169 struct page *prev; \ 171 prev = lru_to_page(&(_page->lru)); \ 172 prefetchw(&prev->_field); \
|
D | vmalloc.c | 601 head = head->prev; in link_va() 798 if (next->prev != head) { in merge_or_add_vmap_area() 799 sibling = list_entry(next->prev, struct vmap_area, list); in merge_or_add_vmap_area()
|
D | hugetlb.c | 413 list_add(&nrg->link, rg->link.prev); in add_reservation_in_range() 431 list_add(&nrg->link, rg->link.prev); in add_reservation_in_range()
|
D | ksm.c | 229 #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
|
/mm/damon/ |
D | core.c | 759 struct damon_region *r, *prev = NULL, *next; in damon_merge_regions_of() local 767 if (prev && prev->ar.end == r->ar.start && in damon_merge_regions_of() 768 abs(prev->nr_accesses - r->nr_accesses) <= thres && in damon_merge_regions_of() 769 sz_damon_region(prev) + sz_damon_region(r) <= sz_limit) in damon_merge_regions_of() 770 damon_merge_two_regions(t, prev, r); in damon_merge_regions_of() 772 prev = r; in damon_merge_regions_of()
|
D | dbgfs.c | 492 struct damon_region *r, *prev; in add_init_region() local 509 prev = damon_prev_region(r); in add_init_region() 510 if (prev->ar.end > r->ar.start) { in add_init_region()
|