/mm/ |
D | pagewalk.c | 31 unsigned long next; in walk_pmd_range() local 37 next = pmd_addr_end(addr, end); in walk_pmd_range() 40 err = walk->pte_hole(addr, next, walk); in walk_pmd_range() 50 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 64 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range() 67 } while (pmd++, addr = next, addr != end); in walk_pmd_range() 76 unsigned long next; in walk_pud_range() local 81 next = pud_addr_end(addr, end); in walk_pud_range() 84 err = walk->pte_hole(addr, next, walk); in walk_pud_range() 90 err = walk->pud_entry(pud, addr, next, walk); in walk_pud_range() [all …]
|
D | mincore.c | 120 unsigned long next; in mincore_pte_range() local 129 next = addr + PAGE_SIZE; in mincore_pte_range() 131 mincore_unmapped_range(vma, addr, next, vec); in mincore_pte_range() 155 } while (ptep++, addr = next, addr != end); in mincore_pte_range() 163 unsigned long next; in mincore_pmd_range() local 168 next = pmd_addr_end(addr, end); in mincore_pmd_range() 170 if (mincore_huge_pmd(vma, pmd, addr, next, vec)) { in mincore_pmd_range() 171 vec += (next - addr) >> PAGE_SHIFT; in mincore_pmd_range() 177 mincore_unmapped_range(vma, addr, next, vec); in mincore_pmd_range() 179 mincore_pte_range(vma, pmd, addr, next, vec); in mincore_pmd_range() [all …]
|
D | slob.c | 145 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument 148 slobidx_t offset = next - base; in set_slob() 173 slobidx_t next; in slob_next() local 176 next = -s[0].units; in slob_next() 178 next = s[1].units; in slob_next() 179 return base+next; in slob_next() 230 slob_t *next; in slob_page_alloc() local 233 next = slob_next(cur); in slob_page_alloc() 234 set_slob(aligned, avail - delta, next); in slob_page_alloc() 241 next = slob_next(cur); in slob_page_alloc() [all …]
|
D | mmap.c | 286 struct vm_area_struct *next = vma->vm_next; in remove_vma() local 295 return next; in remove_vma() 305 struct vm_area_struct *next; in SYSCALL_DEFINE1() local 350 next = find_vma(mm, oldbrk); in SYSCALL_DEFINE1() 351 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) in SYSCALL_DEFINE1() 740 struct vm_area_struct *next; in __vma_unlink() local 743 prev->vm_next = next = vma->vm_next; in __vma_unlink() 744 if (next) in __vma_unlink() 745 next->vm_prev = prev; in __vma_unlink() 762 struct vm_area_struct *next = vma->vm_next; in vma_adjust() local [all …]
|
D | memory.c | 188 if (batch->next) { in tlb_next_batch() 189 tlb->active = batch->next; in tlb_next_batch() 201 batch->next = NULL; in tlb_next_batch() 205 tlb->active->next = batch; in tlb_next_batch() 223 tlb->local.next = NULL; in tlb_gather_mmu() 252 for (batch = &tlb->local; batch; batch = batch->next) { in tlb_flush_mmu_free() 271 struct mmu_gather_batch *batch, *next; in tlb_finish_mmu() local 278 for (batch = tlb->local.next; batch; batch = next) { in tlb_finish_mmu() 279 next = batch->next; in tlb_finish_mmu() 282 tlb->local.next = NULL; in tlb_finish_mmu() [all …]
|
D | vmalloc.c | 72 unsigned long next; in vunmap_pmd_range() local 76 next = pmd_addr_end(addr, end); in vunmap_pmd_range() 79 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range() 80 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range() 86 unsigned long next; in vunmap_pud_range() local 90 next = pud_addr_end(addr, end); in vunmap_pud_range() 93 vunmap_pmd_range(pud, addr, next); in vunmap_pud_range() 94 } while (pud++, addr = next, addr != end); in vunmap_pud_range() 100 unsigned long next; in vunmap_page_range() local 105 next = pgd_addr_end(addr, end); in vunmap_page_range() [all …]
|
D | mprotect.c | 139 unsigned long next; in change_pmd_range() local 148 next = pmd_addr_end(addr, end); in change_pmd_range() 159 if (next - addr != HPAGE_PMD_SIZE) in change_pmd_range() 177 this_pages = change_pte_range(vma, pmd, addr, next, newprot, in change_pmd_range() 180 } while (pmd++, addr = next, addr != end); in change_pmd_range() 195 unsigned long next; in change_pud_range() local 200 next = pud_addr_end(addr, end); in change_pud_range() 203 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range() 205 } while (pud++, addr = next, addr != end); in change_pud_range() 216 unsigned long next; in change_protection_range() local [all …]
|
D | dmapool.c | 72 char *next; in show_pools() local 76 next = buf; in show_pools() 79 temp = scnprintf(next, size, "poolinfo - 0.1\n"); in show_pools() 81 next += temp; in show_pools() 96 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", in show_pools() 101 next += temp; in show_pools() 212 unsigned int next = offset + pool->size; in pool_initialise_page() local 213 if (unlikely((next + pool->size) >= next_boundary)) { in pool_initialise_page() 214 next = next_boundary; in pool_initialise_page() 217 *(int *)(page->vaddr + offset) = next; in pool_initialise_page() [all …]
|
D | gup.c | 877 unsigned long next; in gup_pmd_range() local 884 next = pmd_addr_end(addr, end); in gup_pmd_range() 897 if (!gup_huge_pmd(pmd, pmdp, addr, next, write, in gup_pmd_range() 901 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) in gup_pmd_range() 903 } while (pmdp++, addr = next, addr != end); in gup_pmd_range() 911 unsigned long next; in gup_pud_range() local 918 next = pud_addr_end(addr, end); in gup_pud_range() 922 if (!gup_huge_pud(pud, pudp, addr, next, write, in gup_pud_range() 925 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) in gup_pud_range() 927 } while (pudp++, addr = next, addr != end); in gup_pud_range() [all …]
|
D | mempolicy.c | 557 unsigned long next; in queue_pages_pmd_range() local 561 next = pmd_addr_end(addr, end); in queue_pages_pmd_range() 572 if (queue_pages_pte_range(vma, pmd, addr, next, nodes, in queue_pages_pmd_range() 575 } while (pmd++, addr = next, addr != end); in queue_pages_pmd_range() 585 unsigned long next; in queue_pages_pud_range() local 589 next = pud_addr_end(addr, end); in queue_pages_pud_range() 594 if (queue_pages_pmd_range(vma, pud, addr, next, nodes, in queue_pages_pud_range() 597 } while (pud++, addr = next, addr != end); in queue_pages_pud_range() 607 unsigned long next; in queue_pages_pgd_range() local 611 next = pgd_addr_end(addr, end); in queue_pages_pgd_range() [all …]
|
D | util.c | 144 struct vm_area_struct *next; in __vma_link_list() local 148 next = prev->vm_next; in __vma_link_list() 153 next = rb_entry(rb_parent, in __vma_link_list() 156 next = NULL; in __vma_link_list() 158 vma->vm_next = next; in __vma_link_list() 159 if (next) in __vma_link_list() 160 next->vm_prev = vma; in __vma_link_list()
|
D | swapfile.c | 191 lh = se->list.next; in discard_swap_cluster() 449 cluster->next = cluster_next(&cluster->index) * in scan_swap_map_try_ssd_cluster() 469 tmp = cluster->next; in scan_swap_map_try_ssd_cluster() 482 cluster->next = tmp + 1; in scan_swap_map_try_ssd_cluster() 641 struct swap_info_struct *si, *next; in get_swap_page() local 651 plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { in get_swap_page() 692 if (plist_node_empty(&next->avail_list)) in get_swap_page() 906 page = list_entry(page->lru.next, struct page, lru); in swp_swapcount() 1230 unsigned long next; in unuse_pmd_range() local 1235 next = pmd_addr_end(addr, end); in unuse_pmd_range() [all …]
|
D | mremap.c | 169 unsigned long extent, next, old_end; in move_page_tables() local 184 next = (old_addr + PMD_SIZE) & PMD_MASK; in move_page_tables() 186 extent = next - old_addr; in move_page_tables() 220 next = (new_addr + PMD_SIZE) & PMD_MASK; in move_page_tables() 221 if (extent > next - new_addr) in move_page_tables() 222 extent = next - new_addr; in move_page_tables()
|
D | zsmalloc.c | 241 void *next; member 692 *head = (struct page *)list_entry((*head)->lru.next, in remove_zspage() 781 struct page *next; in get_next_page() local 784 next = NULL; in get_next_page() 786 next = (struct page *)page_private(page); in get_next_page() 788 next = list_entry(page->lru.next, struct page, lru); in get_next_page() 790 return next; in get_next_page() 932 link->next = location_to_obj(page, i++); in init_zspage() 942 link->next = location_to_obj(next_page, 0); in init_zspage() 1362 first_page->freelist = link->next; in obj_malloc() [all …]
|
D | ksm.c | 501 if (rmap_item->hlist.next) in remove_node_from_stable_tree() 744 struct list_head *this, *next; in remove_all_stable_nodes() local 759 list_for_each_safe(this, next, &migrate_nodes) { in remove_all_stable_nodes() 776 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, in unmerge_and_remove_all_rmap_items() 798 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, in unmerge_and_remove_all_rmap_items() 1401 if (rmap_item->hlist.next) in stable_tree_append() 1569 struct list_head *this, *next; in scan_get_next_rmap_item() local 1572 list_for_each_safe(this, next, &migrate_nodes) { in scan_get_next_rmap_item() 1586 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); in scan_get_next_rmap_item() 1656 ksm_scan.mm_slot = list_entry(slot->mm_list.next, in scan_get_next_rmap_item() [all …]
|
D | memblock.c | 435 struct memblock_region *next = &type->regions[i + 1]; in memblock_merge_regions() local 437 if (this->base + this->size != next->base || in memblock_merge_regions() 439 memblock_get_region_node(next) || in memblock_merge_regions() 440 this->flags != next->flags) { in memblock_merge_regions() 441 BUG_ON(this->base + this->size > next->base); in memblock_merge_regions() 446 this->size += next->size; in memblock_merge_regions() 448 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); in memblock_merge_regions()
|
D | zbud.c | 523 goto next; in zbud_reclaim_page() 528 goto next; in zbud_reclaim_page() 530 next: in zbud_reclaim_page()
|
D | vmstat.c | 1045 .next = frag_next, 1064 .next = frag_next, 1157 .next = frag_next, 1239 .next = vmstat_next, 1524 .next = frag_next, 1576 .next = frag_next,
|
D | huge_memory.c | 2661 mm_slot = list_entry(khugepaged_scan.mm_head.next, in khugepaged_scan_mm_slot() 2737 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { in khugepaged_scan_mm_slot() 2739 mm_slot->mm_node.next, in khugepaged_scan_mm_slot() 2981 struct vm_area_struct *next = vma->vm_next; in __vma_adjust_trans_huge() local 2982 unsigned long nstart = next->vm_start; in __vma_adjust_trans_huge() 2985 (nstart & HPAGE_PMD_MASK) >= next->vm_start && in __vma_adjust_trans_huge() 2986 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) in __vma_adjust_trans_huge() 2987 split_huge_page_address(next->vm_mm, nstart); in __vma_adjust_trans_huge()
|
D | memcontrol.c | 976 unsigned long val, next; in mem_cgroup_event_ratelimit() local 979 next = __this_cpu_read(memcg->stat->targets[target]); in mem_cgroup_event_ratelimit() 981 if ((long)next - (long)val < 0) { in mem_cgroup_event_ratelimit() 984 next = val + THRESHOLDS_EVENTS_TARGET; in mem_cgroup_event_ratelimit() 987 next = val + SOFTLIMIT_EVENTS_TARGET; in mem_cgroup_event_ratelimit() 990 next = val + NUMAINFO_EVENTS_TARGET; in mem_cgroup_event_ratelimit() 995 __this_cpu_write(memcg->stat->targets[target], next); in mem_cgroup_event_ratelimit() 6491 struct list_head *next; in uncharge_list() local 6494 next = page_list->next; in uncharge_list() 6499 page = list_entry(next, struct page, lru); in uncharge_list() [all …]
|
D | percpu.c | 345 int next = chunk->map[i + 1]; in pcpu_count_occupied_pages() local 348 if (!(next & 1) && nend >= round_up(end, PAGE_SIZE)) in pcpu_count_occupied_pages() 1115 struct pcpu_chunk *chunk, *next; in pcpu_balance_workfn() local 1125 list_for_each_entry_safe(chunk, next, free_head, list) { in pcpu_balance_workfn() 1138 list_for_each_entry_safe(chunk, next, &to_free, list) { in pcpu_balance_workfn()
|
D | slab.c | 2789 entry = n->slabs_partial.next; in cache_alloc_refill() 2792 entry = n->slabs_free.next; in cache_alloc_refill() 3093 entry = n->slabs_partial.next; in ____cache_alloc_node() 3096 entry = n->slabs_free.next; in ____cache_alloc_node() 3333 p = n->slabs_free.next; in cache_flusharray() 3341 p = p->next; in cache_flusharray() 3650 if (!cachep->list.next) { in alloc_kmem_cache_node() 3875 goto next; in cache_reap() 3890 next: in cache_reap() 4181 .next = slab_next,
|
D | shmem.c | 93 pgoff_t next; /* the next page offset to be fallocated */ member 637 if (shmem_swaplist.next != &info->swaplist) in shmem_unuse_inode() 697 struct list_head *this, *next; in shmem_unuse() local 721 list_for_each_safe(this, next, &shmem_swaplist) { in shmem_unuse() 798 index < shmem_falloc->next) in shmem_writepage() 1309 vmf->pgoff < shmem_falloc->next) { in shmem_fault() 2081 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; in shmem_fallocate() 2120 shmem_falloc.next = start; in shmem_fallocate() 2155 shmem_falloc.next++; in shmem_fallocate()
|
D | compaction.c | 43 struct page *page, *next; in release_freepages() local 46 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 971 freepage = list_entry(cc->freepages.next, struct page, lru); in compaction_alloc()
|
D | mlock.c | 534 goto next; in munlock_vma_pages_range() 541 next: in munlock_vma_pages_range()
|