/mm/ |
D | vmalloc.c | 58 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument 62 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range() 64 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range() 66 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range() 69 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument 74 pmd = pmd_offset(pud, addr); in vunmap_pmd_range() 76 next = pmd_addr_end(addr, end); in vunmap_pmd_range() 79 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range() 80 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range() 83 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_pud_range() argument [all …]
|
D | pagewalk.c | 6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument 12 pte = pte_offset_map(pmd, addr); in walk_pte_range() 14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range() 17 addr += PAGE_SIZE; in walk_pte_range() 18 if (addr == end) in walk_pte_range() 27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument 34 pmd = pmd_offset(pud, addr); in walk_pmd_range() 37 next = pmd_addr_end(addr, end); in walk_pmd_range() 40 err = walk->pte_hole(addr, next, walk); in walk_pmd_range() 50 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() [all …]
|
D | mincore.c | 23 unsigned long addr, unsigned long end, in mincore_hugetlb_page_range() argument 38 addr & huge_page_mask(h)); in mincore_hugetlb_page_range() 43 addr += PAGE_SIZE; in mincore_hugetlb_page_range() 44 if (addr == end) in mincore_hugetlb_page_range() 47 if (!(addr & ~huge_page_mask(h))) in mincore_hugetlb_page_range() 98 unsigned long addr, unsigned long end, in mincore_unmapped_range() argument 101 unsigned long nr = (end - addr) >> PAGE_SHIFT; in mincore_unmapped_range() 107 pgoff = linear_page_index(vma, addr); in mincore_unmapped_range() 117 unsigned long addr, unsigned long end, in mincore_pte_range() argument 124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mincore_pte_range() [all …]
|
D | memory.c | 391 unsigned long addr) in free_pte_range() argument 395 pte_free_tlb(tlb, token, addr); in free_pte_range() 400 unsigned long addr, unsigned long end, in free_pmd_range() argument 407 start = addr; in free_pmd_range() 408 pmd = pmd_offset(pud, addr); in free_pmd_range() 410 next = pmd_addr_end(addr, end); in free_pmd_range() 413 free_pte_range(tlb, pmd, addr); in free_pmd_range() 414 } while (pmd++, addr = next, addr != end); in free_pmd_range() 433 unsigned long addr, unsigned long end, in free_pud_range() argument 440 start = addr; in free_pud_range() [all …]
|
D | mprotect.c | 40 unsigned long addr, int prot_numa, spinlock_t **ptl) in lock_pte_protection() argument 47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 61 unsigned long addr, unsigned long end, pgprot_t newprot, in change_pte_range() argument 69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range() 81 ptent = ptep_modify_prot_start(mm, addr, pte); in change_pte_range() 93 ptep_modify_prot_commit(mm, addr, pte, ptent); in change_pte_range() 98 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 101 ptep_set_numa(mm, addr, pte); in change_pte_range() 121 set_pte_at(mm, addr, pte, newpte); in change_pte_range() [all …]
|
D | mmap.c | 53 #define arch_mmap_check(addr, len, flags) (0) argument 57 #define arch_rebalance_pgtables(addr, len) (addr) argument 298 static unsigned long do_brk(unsigned long addr, unsigned long len); 582 static int find_vma_links(struct mm_struct *mm, unsigned long addr, in find_vma_links() argument 597 if (vma_tmp->vm_end > addr) { in find_vma_links() 617 unsigned long addr, unsigned long end) in count_vma_pages_range() argument 623 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range() 628 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range() 1073 struct vm_area_struct *prev, unsigned long addr, in vma_merge() argument 1079 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; in vma_merge() [all …]
|
D | nommu.c | 239 void vfree(const void *addr) in vfree() argument 241 kfree(addr); in vfree() 275 struct page *vmalloc_to_page(const void *addr) in vmalloc_to_page() argument 277 return virt_to_page(addr); in vmalloc_to_page() 281 unsigned long vmalloc_to_pfn(const void *addr) in vmalloc_to_pfn() argument 283 return page_to_pfn(virt_to_page(addr)); in vmalloc_to_pfn() 287 long vread(char *buf, char *addr, unsigned long count) in vread() argument 293 memcpy(buf, addr, count); in vread() 297 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument 300 if ((unsigned long) addr + count < count) in vwrite() [all …]
|
D | mremap.c | 31 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) in get_old_pmd() argument 37 pgd = pgd_offset(mm, addr); in get_old_pmd() 41 pud = pud_offset(pgd, addr); in get_old_pmd() 45 pmd = pmd_offset(pud, addr); in get_old_pmd() 53 unsigned long addr) in alloc_new_pmd() argument 59 pgd = pgd_offset(mm, addr); in alloc_new_pmd() 60 pud = pud_alloc(mm, pgd, addr); in alloc_new_pmd() 64 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd() 336 static struct vm_area_struct *vma_to_resize(unsigned long addr, in vma_to_resize() argument 340 struct vm_area_struct *vma = find_vma(mm, addr); in vma_to_resize() [all …]
|
D | sparse-vmemmap.c | 101 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) in vmemmap_pte_populate() argument 103 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate() 110 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_pte_populate() 115 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) in vmemmap_pmd_populate() argument 117 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate() 127 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) in vmemmap_pud_populate() argument 129 pud_t *pud = pud_offset(pgd, addr); in vmemmap_pud_populate() 139 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) in vmemmap_pgd_populate() argument 141 pgd_t *pgd = pgd_offset_k(addr); in vmemmap_pgd_populate() 154 unsigned long addr = start; in vmemmap_populate_basepages() local [all …]
|
D | gup.c | 666 struct page *get_dump_page(unsigned long addr) in get_dump_page() argument 671 if (__get_user_pages(current, current->mm, addr, 1, in get_dump_page() 675 flush_cache_page(vma, addr, page_to_pfn(page)); in get_dump_page() 719 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, in gup_pte_range() argument 725 ptem = ptep = pte_offset_map(&pmd, addr); in gup_pte_range() 759 } while (ptep++, addr += PAGE_SIZE, addr != end); in gup_pte_range() 778 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, in gup_pte_range() argument 785 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, in gup_huge_pmd() argument 796 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in gup_huge_pmd() 804 } while (addr += PAGE_SIZE, addr != end); in gup_huge_pmd() [all …]
|
D | fremap.c | 32 unsigned long addr, pte_t *ptep) in zap_pte() argument 39 flush_cache_page(vma, addr, pte_pfn(pte)); in zap_pte() 40 pte = ptep_clear_flush(vma, addr, ptep); in zap_pte() 41 page = vm_normal_page(vma, addr, pte); in zap_pte() 64 pte_clear_not_present_full(mm, addr, ptep, 0); in zap_pte() 73 unsigned long addr, unsigned long pgoff, pgprot_t prot) in install_file_pte() argument 79 pte = get_locked_pte(mm, addr, &ptl); in install_file_pte() 86 zap_pte(mm, vma, addr, pte); in install_file_pte() 88 set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile)); in install_file_pte() 102 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, in generic_file_remap_pages() argument [all …]
|
D | ksm.c | 364 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument 371 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm() 375 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm() 413 unsigned long addr) in find_mergeable_vma() argument 418 vma = find_vma(mm, addr); in find_mergeable_vma() 419 if (!vma || vma->vm_start > addr) in find_mergeable_vma() 429 unsigned long addr = rmap_item->address; in break_cow() local 439 vma = find_mergeable_vma(mm, addr); in break_cow() 441 break_ksm(vma, addr); in break_cow() 462 unsigned long addr = rmap_item->address; in get_mergeable_page() local [all …]
|
D | mempolicy.c | 485 unsigned long addr, unsigned long end, in queue_pages_pte_range() argument 493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in queue_pages_pte_range() 500 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range() 517 } while (pte++, addr += PAGE_SIZE, addr != end); in queue_pages_pte_range() 519 return addr != end; in queue_pages_pte_range() 552 unsigned long addr, unsigned long end, in queue_pages_pmd_range() argument 559 pmd = pmd_offset(pud, addr); in queue_pages_pmd_range() 561 next = pmd_addr_end(addr, end); in queue_pages_pmd_range() 569 split_huge_page_pmd(vma, addr, pmd); in queue_pages_pmd_range() 572 if (queue_pages_pte_range(vma, pmd, addr, next, nodes, in queue_pages_pmd_range() [all …]
|
D | page_cgroup.c | 112 void *addr = NULL; in alloc_page_cgroup() local 114 addr = alloc_pages_exact_nid(nid, size, flags); in alloc_page_cgroup() 115 if (addr) { in alloc_page_cgroup() 116 kmemleak_alloc(addr, size, 1, flags); in alloc_page_cgroup() 117 return addr; in alloc_page_cgroup() 121 addr = vzalloc_node(size, nid); in alloc_page_cgroup() 123 addr = vzalloc(size); in alloc_page_cgroup() 125 return addr; in alloc_page_cgroup() 164 static void free_page_cgroup(void *addr) in free_page_cgroup() argument 166 if (is_vmalloc_addr(addr)) { in free_page_cgroup() [all …]
|
D | nobootmem.c | 39 u64 addr; in __alloc_memory_core_early() local 44 addr = memblock_find_in_range_node(size, align, goal, limit, nid); in __alloc_memory_core_early() 45 if (!addr) in __alloc_memory_core_early() 48 if (memblock_reserve(addr, size)) in __alloc_memory_core_early() 51 ptr = phys_to_virt(addr); in __alloc_memory_core_early() 70 void __init free_bootmem_late(unsigned long addr, unsigned long size) in free_bootmem_late() argument 74 kmemleak_free_part(__va(addr), size); in free_bootmem_late() 76 cursor = PFN_UP(addr); in free_bootmem_late() 77 end = PFN_DOWN(addr + size); in free_bootmem_late() 216 void __init free_bootmem(unsigned long addr, unsigned long size) in free_bootmem() argument [all …]
|
D | percpu.c | 87 #define __addr_to_pcpu_ptr(addr) \ argument 88 (void __percpu *)((unsigned long)(addr) - \ 100 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) argument 193 static bool pcpu_addr_in_first_chunk(void *addr) in pcpu_addr_in_first_chunk() argument 197 return addr >= first_start && addr < first_start + pcpu_unit_size; in pcpu_addr_in_first_chunk() 200 static bool pcpu_addr_in_reserved_chunk(void *addr) in pcpu_addr_in_reserved_chunk() argument 204 return addr >= first_start && in pcpu_addr_in_reserved_chunk() 205 addr < first_start + pcpu_reserved_chunk_limit; in pcpu_addr_in_reserved_chunk() 820 static struct page *pcpu_addr_to_page(void *addr); 836 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) in pcpu_chunk_addr_search() argument [all …]
|
D | early_ioremap.c | 159 void __init early_iounmap(void __iomem *addr, unsigned long size) in early_iounmap() argument 169 if (prev_map[i] == addr) { in early_iounmap() 176 addr, size)) in early_iounmap() 181 addr, size, slot, prev_size[slot])) in early_iounmap() 185 addr, size, slot); in early_iounmap() 187 virt_addr = (unsigned long)addr; in early_iounmap() 235 void __init early_iounmap(void __iomem *addr, unsigned long size) in early_iounmap() argument 242 void __init early_memunmap(void *addr, unsigned long size) in early_memunmap() argument 244 early_iounmap((__force void __iomem *)addr, size); in early_memunmap()
|
D | debug-pagealloc.c | 26 void *addr = kmap_atomic(page); in poison_page() local 29 memset(addr, PAGE_POISON, PAGE_SIZE); in poison_page() 30 kunmap_atomic(addr); in poison_page() 77 void *addr; in unpoison_page() local 82 addr = kmap_atomic(page); in unpoison_page() 83 check_poison_mem(addr, PAGE_SIZE); in unpoison_page() 85 kunmap_atomic(addr); in unpoison_page()
|
D | hugetlb.c | 1358 struct vm_area_struct *vma, unsigned long addr) in vma_needs_reservation() argument 1368 idx = vma_hugecache_offset(h, vma, addr); in vma_needs_reservation() 1377 struct vm_area_struct *vma, unsigned long addr) in vma_commit_reservation() argument 1386 idx = vma_hugecache_offset(h, vma, addr); in vma_commit_reservation() 1391 unsigned long addr, int avoid_reserve) in alloc_huge_page() argument 1409 chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page() 1421 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); in alloc_huge_page() 1437 vma_commit_reservation(h, vma, addr); in alloc_huge_page() 1454 unsigned long addr, int avoid_reserve) in alloc_huge_page_noerr() argument 1456 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); in alloc_huge_page_noerr() [all …]
|
D | migrate.c | 107 unsigned long addr, void *old) in remove_migration_pte() argument 116 ptep = huge_pte_offset(mm, addr); in remove_migration_pte() 121 pmd = mm_find_pmd(mm, addr); in remove_migration_pte() 125 ptep = pte_offset_map(pmd, addr); in remove_migration_pte() 162 set_pte_at(mm, addr, ptep, pte); in remove_migration_pte() 166 hugepage_add_anon_rmap(new, vma, addr); in remove_migration_pte() 170 page_add_anon_rmap(new, vma, addr); in remove_migration_pte() 175 update_mmu_cache(vma, addr, ptep); in remove_migration_pte() 201 unsigned long addr; in remove_linear_migration_ptes_from_nonlinear() local 206 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in remove_linear_migration_ptes_from_nonlinear() [all …]
|
D | iov_iter.c | 453 unsigned long addr; in get_pages_iovec() local 462 addr = (unsigned long)iov->iov_base + offset; in get_pages_iovec() 463 len += *start = addr & (PAGE_SIZE - 1); in get_pages_iovec() 466 addr &= ~(PAGE_SIZE - 1); in get_pages_iovec() 468 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); in get_pages_iovec() 481 unsigned long addr; in get_pages_alloc_iovec() local 491 addr = (unsigned long)iov->iov_base + offset; in get_pages_alloc_iovec() 492 len += *start = addr & (PAGE_SIZE - 1); in get_pages_alloc_iovec() 493 addr &= ~(PAGE_SIZE - 1); in get_pages_alloc_iovec() 502 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); in get_pages_alloc_iovec() [all …]
|
D | memblock.c | 280 phys_addr_t *addr) in get_allocated_memblock_reserved_regions_info() argument 285 *addr = __pa(memblock.reserved.regions); in get_allocated_memblock_reserved_regions_info() 292 phys_addr_t *addr) in get_allocated_memblock_memory_regions_info() argument 297 *addr = __pa(memblock.memory.regions); in get_allocated_memblock_memory_regions_info() 326 phys_addr_t old_size, new_size, addr; in memblock_double_array() local 365 addr = new_array ? __pa(new_array) : 0; in memblock_double_array() 371 addr = memblock_find_in_range(new_area_start + new_area_size, in memblock_double_array() 374 if (!addr && new_area_size) in memblock_double_array() 375 addr = memblock_find_in_range(0, in memblock_double_array() 379 new_array = addr ? __va(addr) : NULL; in memblock_double_array() [all …]
|
D | util.c | 254 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, in vm_mmap_pgoff() argument 265 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, in vm_mmap_pgoff() 274 unsigned long vm_mmap(struct file *file, unsigned long addr, in vm_mmap() argument 283 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); in vm_mmap() 287 void kvfree(const void *addr) in kvfree() argument 289 if (is_vmalloc_addr(addr)) in kvfree() 290 vfree(addr); in kvfree() 292 kfree(addr); in kvfree()
|
D | vmacache.c | 58 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) in vmacache_update() argument 61 current->vmacache[VMACACHE_HASH(addr)] = newvma; in vmacache_update() 84 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) in vmacache_find() argument 100 if (vma->vm_start <= addr && vma->vm_end > addr) { in vmacache_find()
|
D | percpu-vm.c | 133 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) in __pcpu_unmap_pages() argument 135 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); in __pcpu_unmap_pages() 191 static int __pcpu_map_pages(unsigned long addr, struct page **pages, in __pcpu_map_pages() argument 194 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, in __pcpu_map_pages() 346 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; in pcpu_create_chunk() 357 static struct page *pcpu_addr_to_page(void *addr) in pcpu_addr_to_page() argument 359 return vmalloc_to_page(addr); in pcpu_addr_to_page()
|