Home
last modified time | relevance | path

Searched refs:prev (Results 1 – 23 of 23) sorted by relevance

/mm/
Dmadvise.c46 struct vm_area_struct **prev, in madvise_behavior() argument
99 *prev = vma; in madvise_behavior()
104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
107 if (*prev) { in madvise_behavior()
108 vma = *prev; in madvise_behavior()
112 *prev = vma; in madvise_behavior()
220 struct vm_area_struct **prev, in madvise_willneed() argument
225 *prev = vma; in madvise_willneed()
274 struct vm_area_struct **prev, in madvise_dontneed() argument
277 *prev = vma; in madvise_dontneed()
[all …]
Dmmap.c73 struct vm_area_struct *vma, struct vm_area_struct *prev,
410 unsigned long prev = 0, pend = 0; in browse_rb() local
415 if (vma->vm_start < prev) { in browse_rb()
417 vma->vm_start, prev); in browse_rb()
438 prev = vma->vm_start; in browse_rb()
692 struct vm_area_struct *prev, struct rb_node **rb_link, in __vma_link() argument
695 __vma_link_list(mm, vma, prev, rb_parent); in __vma_link()
700 struct vm_area_struct *prev, struct rb_node **rb_link, in vma_link() argument
710 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
726 struct vm_area_struct *prev; in __insert_vm_struct() local
[all …]
Dslob.c219 slob_t *prev, *cur, *aligned = NULL; in slob_page_alloc() local
222 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { in slob_page_alloc()
236 prev = cur; in slob_page_alloc()
243 if (prev) in slob_page_alloc()
244 set_slob(prev, slob_units(prev), next); in slob_page_alloc()
248 if (prev) in slob_page_alloc()
249 set_slob(prev, slob_units(prev), cur + units); in slob_page_alloc()
271 struct list_head *prev; in slob_alloc() local
299 prev = sp->lru.prev; in slob_alloc()
307 if (prev != slob_list->prev && in slob_alloc()
[all …]
Dmlock.c555 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument
569 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, in mlock_fixup()
572 if (*prev) { in mlock_fixup()
573 vma = *prev; in mlock_fixup()
610 *prev = vma; in mlock_fixup()
617 struct vm_area_struct * vma, * prev; in do_mlock() local
631 prev = vma->vm_prev; in do_mlock()
633 prev = vma; in do_mlock()
647 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in do_mlock()
651 if (nstart < prev->vm_end) in do_mlock()
[all …]
Dinterval_tree.c30 struct vm_area_struct *prev, in vma_interval_tree_insert_after() argument
37 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); in vma_interval_tree_insert_after()
39 if (!prev->shared.linear.rb.rb_right) { in vma_interval_tree_insert_after()
40 parent = prev; in vma_interval_tree_insert_after()
41 link = &prev->shared.linear.rb.rb_right; in vma_interval_tree_insert_after()
43 parent = rb_entry(prev->shared.linear.rb.rb_right, in vma_interval_tree_insert_after()
Dmprotect.c339 struct vm_area_struct *vma, *prev; in SYSCALL_DEFINE3() local
372 prev = vma->vm_prev; in SYSCALL_DEFINE3()
391 prev = vma; in SYSCALL_DEFINE3()
414 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in SYSCALL_DEFINE3()
419 if (nstart < prev->vm_end) in SYSCALL_DEFINE3()
420 nstart = prev->vm_end; in SYSCALL_DEFINE3()
424 vma = prev->vm_next; in SYSCALL_DEFINE3()
Dutil.c142 struct vm_area_struct *prev, struct rb_node *rb_parent) in __vma_link_list() argument
146 vma->vm_prev = prev; in __vma_link_list()
147 if (prev) { in __vma_link_list()
148 next = prev->vm_next; in __vma_link_list()
149 prev->vm_next = vma; in __vma_link_list()
Dmempolicy.c662 struct vm_area_struct *vma, *prev; in queue_pages_range() local
667 prev = NULL; in queue_pages_range()
679 if (prev && prev->vm_end < vma->vm_start) in queue_pages_range()
700 prev = vma; in queue_pages_range()
746 struct vm_area_struct *prev; in mbind_range() local
757 prev = vma->vm_prev; in mbind_range()
759 prev = vma; in mbind_range()
761 for (; vma && vma->vm_start < end; prev = vma, vma = next) { in mbind_range()
771 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, in mbind_range()
774 if (prev) { in mbind_range()
[all …]
Dvmalloc.c331 struct vmap_area *prev; in __insert_vmap_area() local
332 prev = rb_entry(tmp, struct vmap_area, rb_node); in __insert_vmap_area()
333 list_add_rcu(&va->list, &prev->list); in __insert_vmap_area()
2367 struct vmap_area **vas, *prev, *next; in pcpu_get_vm_areas() local
2424 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { in pcpu_get_vm_areas()
2428 base = pvm_determine_end(&next, &prev, align) - end; in pcpu_get_vm_areas()
2432 BUG_ON(prev && prev->va_end > base + end); in pcpu_get_vm_areas()
2453 base = pvm_determine_end(&next, &prev, align) - end; in pcpu_get_vm_areas()
2463 if (prev && prev->va_end > base + start) { in pcpu_get_vm_areas()
2464 next = prev; in pcpu_get_vm_areas()
[all …]
Dswapfile.c1325 unsigned int prev, bool frontswap) in find_next_to_unuse() argument
1328 unsigned int i = prev; in find_next_to_unuse()
1339 if (!prev) { in find_next_to_unuse()
1347 max = prev + 1; in find_next_to_unuse()
1348 prev = 0; in find_next_to_unuse()
1704 lh = sis->first_swap_extent.list.prev; /* Highest extent */ in add_swap_extent()
2926 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
2931 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
2951 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
2957 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
Dvmscan.c107 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
112 if ((_page)->lru.prev != _base) { \
113 struct page *prev; \
115 prev = lru_to_page(&(_page->lru)); \
116 prefetch(&prev->_field); \
126 if ((_page)->lru.prev != _base) { \
127 struct page *prev; \
129 prev = lru_to_page(&(_page->lru)); \
130 prefetchw(&prev->_field); \
Dmemcontrol.c1199 struct mem_cgroup *prev, in mem_cgroup_iter() argument
1211 if (prev && !reclaim) in mem_cgroup_iter()
1212 last_visited = prev; in mem_cgroup_iter()
1215 if (prev) in mem_cgroup_iter()
1230 if (prev && reclaim->generation != iter->generation) { in mem_cgroup_iter()
1246 else if (!prev && memcg) in mem_cgroup_iter()
1250 if (prev && !memcg) in mem_cgroup_iter()
1256 if (prev && prev != root) in mem_cgroup_iter()
1257 css_put(&prev->css); in mem_cgroup_iter()
1268 struct mem_cgroup *prev) in mem_cgroup_iter_break() argument
[all …]
Dzsmalloc.c1217 static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) in can_merge() argument
1219 if (prev->pages_per_zspage != pages_per_zspage) in can_merge()
1222 if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage) in can_merge()
Dnommu.c708 struct vm_area_struct *pvma, *prev; in add_vma_to_mm() local
764 prev = NULL; in add_vma_to_mm()
766 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); in add_vma_to_mm()
768 __vma_link_list(mm, vma, prev, parent); in add_vma_to_mm()
Dswap.c351 victim = list_entry(pages->prev, struct page, lru); in put_pages_list()
1013 list_head = page_tail->lru.prev; in lru_add_page_tail()
Dinternal.h222 struct vm_area_struct *prev, struct rb_node *rb_parent);
Dpercpu.c338 int prev = chunk->map[i - 1]; in pcpu_count_occupied_pages() local
340 if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE)) in pcpu_count_occupied_pages()
Dslab.c2368 p = n->slabs_free.prev; in drain_freelist()
3671 struct array_cache __percpu *cpu_cache, *prev; in __do_tune_cpucache() local
3678 prev = cachep->cpu_cache; in __do_tune_cpucache()
3687 if (!prev) in __do_tune_cpucache()
3694 struct array_cache *ac = per_cpu_ptr(prev, cpu); in __do_tune_cpucache()
3703 free_percpu(prev); in __do_tune_cpucache()
Dzbud.c452 list_entry((ptr)->prev, type, member)
Dreadahead.c35 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
Dhugetlb.c172 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { in region_add()
223 list_add(&nrg->link, rg->link.prev); in region_chg()
234 list_for_each_entry(rg, rg->link.prev, link) { in region_chg()
282 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { in region_truncate()
Dpage_alloc.c739 page = list_entry(list->prev, struct page, lru); in free_pcppages_bulk()
1614 page = list_entry(list->prev, struct page, lru); in buffered_rmqueue()
Dslub.c3477 list_splice(slabs_by_inuse + i, n->partial.prev); in __kmem_cache_shrink()