Home
last modified time | relevance | path

Searched refs:prev (Results 1 – 18 of 18) sorted by relevance

/mm/
Dmmap.c77 struct vm_area_struct *vma, struct vm_area_struct *prev,
333 unsigned long prev = 0, pend = 0; in browse_rb() local
338 if (vma->vm_start < prev) { in browse_rb()
340 vma->vm_start, prev); in browse_rb()
363 prev = vma->vm_start; in browse_rb()
635 struct vm_area_struct *prev, struct rb_node **rb_link, in __vma_link() argument
638 __vma_link_list(mm, vma, prev, rb_parent); in __vma_link()
643 struct vm_area_struct *prev, struct rb_node **rb_link, in vma_link() argument
653 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
669 struct vm_area_struct *prev; in __insert_vm_struct() local
[all …]
Dmadvise.c66 struct vm_area_struct **prev, in madvise_behavior() argument
130 *prev = vma; in madvise_behavior()
135 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
138 if (*prev) { in madvise_behavior()
139 vma = *prev; in madvise_behavior()
143 *prev = vma; in madvise_behavior()
254 struct vm_area_struct **prev, in madvise_willneed() argument
260 *prev = vma; in madvise_willneed()
289 *prev = NULL; /* tell sys_madvise we drop mmap_sem */ in madvise_willneed()
486 struct vm_area_struct **prev, in madvise_cold() argument
[all …]
Dslob.c240 slob_t *prev, *cur, *aligned = NULL; in slob_page_alloc() local
244 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { in slob_page_alloc()
267 prev = cur; in slob_page_alloc()
274 if (prev) in slob_page_alloc()
275 set_slob(prev, slob_units(prev), next); in slob_page_alloc()
279 if (prev) in slob_page_alloc()
280 set_slob(prev, slob_units(prev), cur + units); in slob_page_alloc()
385 slob_t *prev, *next, *b = (slob_t *)block; in slob_free() local
441 prev = sp->freelist; in slob_free()
442 next = slob_next(prev); in slob_free()
[all …]
Dmlock.c519 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument
536 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, in mlock_fixup()
539 if (*prev) { in mlock_fixup()
540 vma = *prev; in mlock_fixup()
579 *prev = vma; in mlock_fixup()
587 struct vm_area_struct * vma, * prev; in apply_vma_lock_flags() local
601 prev = vma->vm_prev; in apply_vma_lock_flags()
603 prev = vma; in apply_vma_lock_flags()
614 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags()
618 if (nstart < prev->vm_end) in apply_vma_lock_flags()
[all …]
Dinterval_tree.c29 struct vm_area_struct *prev, in vma_interval_tree_insert_after() argument
36 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); in vma_interval_tree_insert_after()
38 if (!prev->shared.rb.rb_right) { in vma_interval_tree_insert_after()
39 parent = prev; in vma_interval_tree_insert_after()
40 link = &prev->shared.rb.rb_right; in vma_interval_tree_insert_after()
42 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after()
Dmprotect.c456 struct vm_area_struct *vma, *prev; in do_mprotect_pkey() local
496 prev = vma->vm_prev; in do_mprotect_pkey()
515 prev = vma; in do_mprotect_pkey()
553 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()
558 if (nstart < prev->vm_end) in do_mprotect_pkey()
559 nstart = prev->vm_end; in do_mprotect_pkey()
563 vma = prev->vm_next; in do_mprotect_pkey()
Dmempolicy.c413 struct vm_area_struct *prev; member
637 if (qp->prev && qp->prev->vm_end < vma->vm_start) in queue_pages_test_walk()
641 qp->prev = vma; in queue_pages_test_walk()
688 .prev = NULL, in queue_pages_range()
735 struct vm_area_struct *prev; in mbind_range() local
746 prev = vma->vm_prev; in mbind_range()
748 prev = vma; in mbind_range()
750 for (; vma && vma->vm_start < end; prev = vma, vma = next) { in mbind_range()
760 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, in mbind_range()
764 if (prev) { in mbind_range()
[all …]
Dutil.c274 struct vm_area_struct *prev, struct rb_node *rb_parent) in __vma_link_list() argument
278 vma->vm_prev = prev; in __vma_link_list()
279 if (prev) { in __vma_link_list()
280 next = prev->vm_next; in __vma_link_list()
281 prev->vm_next = vma; in __vma_link_list()
Dswapfile.c1398 struct swap_info_struct *p, *prev; in swapcache_free_entries() local
1404 prev = NULL; in swapcache_free_entries()
1415 p = swap_info_get_cont(entries[i], prev); in swapcache_free_entries()
1418 prev = p; in swapcache_free_entries()
2093 unsigned int prev, bool frontswap) in find_next_to_unuse() argument
2104 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
3678 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
3683 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
3703 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
3709 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
Dmemcontrol.c1026 struct mem_cgroup *prev, in mem_cgroup_iter() argument
1040 if (prev && !reclaim) in mem_cgroup_iter()
1041 pos = prev; in mem_cgroup_iter()
1044 if (prev) in mem_cgroup_iter()
1057 if (prev && reclaim->generation != iter->generation) in mem_cgroup_iter()
1088 if (!prev) in mem_cgroup_iter()
1122 else if (!prev) in mem_cgroup_iter()
1129 if (prev && prev != root) in mem_cgroup_iter()
1130 css_put(&prev->css); in mem_cgroup_iter()
1141 struct mem_cgroup *prev) in mem_cgroup_iter_break() argument
[all …]
Dvmscan.c139 if ((_page)->lru.prev != _base) { \
140 struct page *prev; \
142 prev = lru_to_page(&(_page->lru)); \
143 prefetch(&prev->_field); \
153 if ((_page)->lru.prev != _base) { \
154 struct page *prev; \
156 prev = lru_to_page(&(_page->lru)); \
157 prefetchw(&prev->_field); \
Dnommu.c584 struct vm_area_struct *pvma, *prev; in add_vma_to_mm() local
636 prev = NULL; in add_vma_to_mm()
638 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); in add_vma_to_mm()
640 __vma_link_list(mm, vma, prev, parent); in add_vma_to_mm()
Dslab.c2192 p = n->slabs_free.prev; in drain_freelist()
3801 struct array_cache __percpu *cpu_cache, *prev; in __do_tune_cpucache() local
3808 prev = cachep->cpu_cache; in __do_tune_cpucache()
3814 if (prev) in __do_tune_cpucache()
3822 if (!prev) in __do_tune_cpucache()
3829 struct array_cache *ac = per_cpu_ptr(prev, cpu); in __do_tune_cpucache()
3838 free_percpu(prev); in __do_tune_cpucache()
Dinternal.h293 struct vm_area_struct *prev, struct rb_node *rb_parent);
Dzsmalloc.c1255 static bool can_merge(struct size_class *prev, int pages_per_zspage, in can_merge() argument
1258 if (prev->pages_per_zspage == pages_per_zspage && in can_merge()
1259 prev->objs_per_zspage == objs_per_zspage) in can_merge()
Dvmalloc.c510 head = head->prev; in link_va()
739 if (next->prev != head) { in merge_or_add_vmap_area()
740 sibling = list_entry(next->prev, struct vmap_area, list); in merge_or_add_vmap_area()
Dhugetlb.c290 list_add(&nrg->link, rg->link.prev); in region_add()
302 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { in region_add()
415 list_add(&nrg->link, rg->link.prev); in region_chg()
426 list_for_each_entry(rg, rg->link.prev, link) { in region_chg()
Dksm.c229 #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)