Home
last modified time | relevance | path

Searched refs:addr (Results 1 – 21 of 21) sorted by relevance

/mm/
Dvmalloc.c35 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument
39 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range()
41 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range()
43 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range()
46 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument
51 pmd = pmd_offset(pud, addr); in vunmap_pmd_range()
53 next = pmd_addr_end(addr, end); in vunmap_pmd_range()
56 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range()
57 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
60 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_pud_range() argument
[all …]
Dpagewalk.c5 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument
11 pte = pte_offset_map(pmd, addr); in walk_pte_range()
13 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range()
16 addr += PAGE_SIZE; in walk_pte_range()
17 if (addr == end) in walk_pte_range()
26 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument
33 pmd = pmd_offset(pud, addr); in walk_pmd_range()
35 next = pmd_addr_end(addr, end); in walk_pmd_range()
38 err = walk->pte_hole(addr, next, walk); in walk_pmd_range()
44 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range()
[all …]
Dmemory.c147 unsigned long addr, unsigned long end, in free_pmd_range() argument
154 start = addr; in free_pmd_range()
155 pmd = pmd_offset(pud, addr); in free_pmd_range()
157 next = pmd_addr_end(addr, end); in free_pmd_range()
161 } while (pmd++, addr = next, addr != end); in free_pmd_range()
180 unsigned long addr, unsigned long end, in free_pud_range() argument
187 start = addr; in free_pud_range()
188 pud = pud_offset(pgd, addr); in free_pud_range()
190 next = pud_addr_end(addr, end); in free_pud_range()
193 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
[all …]
Dmmap.c39 #define arch_mmap_check(addr, len, flags) (0) argument
43 #define arch_rebalance_pgtables(addr, len) (addr) argument
357 find_vma_prepare(struct mm_struct *mm, unsigned long addr, in find_vma_prepare() argument
374 if (vma_tmp->vm_end > addr) { in find_vma_prepare()
376 if (vma_tmp->vm_start <= addr) in find_vma_prepare()
763 struct vm_area_struct *prev, unsigned long addr, in vma_merge() argument
768 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; in vma_merge()
789 if (prev && prev->vm_end == addr && in vma_merge()
818 if (prev && addr < prev->vm_end) /* case 4 */ in vma_merge()
820 addr, prev->vm_pgoff, NULL); in vma_merge()
[all …]
Dmprotect.c39 unsigned long addr, unsigned long end, pgprot_t newprot, in change_pte_range() argument
45 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in change_pte_range()
52 ptent = ptep_modify_prot_start(mm, addr, pte); in change_pte_range()
62 ptep_modify_prot_commit(mm, addr, pte, ptent); in change_pte_range()
72 set_pte_at(mm, addr, pte, in change_pte_range()
76 } while (pte++, addr += PAGE_SIZE, addr != end); in change_pte_range()
82 unsigned long addr, unsigned long end, pgprot_t newprot, in change_pmd_range() argument
88 pmd = pmd_offset(pud, addr); in change_pmd_range()
90 next = pmd_addr_end(addr, end); in change_pmd_range()
93 change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); in change_pmd_range()
[all …]
Dmremap.c29 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) in get_old_pmd() argument
35 pgd = pgd_offset(mm, addr); in get_old_pmd()
39 pud = pud_offset(pgd, addr); in get_old_pmd()
43 pmd = pmd_offset(pud, addr); in get_old_pmd()
50 static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) in alloc_new_pmd() argument
56 pgd = pgd_offset(mm, addr); in alloc_new_pmd()
57 pud = pud_alloc(mm, pgd, addr); in alloc_new_pmd()
61 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd()
65 if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr)) in alloc_new_pmd()
257 unsigned long do_mremap(unsigned long addr, in do_mremap() argument
[all …]
Dsparse-vmemmap.c72 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) in vmemmap_pte_populate() argument
74 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate()
81 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_pte_populate()
86 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) in vmemmap_pmd_populate() argument
88 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate()
98 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) in vmemmap_pud_populate() argument
100 pud_t *pud = pud_offset(pgd, addr); in vmemmap_pud_populate()
110 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) in vmemmap_pgd_populate() argument
112 pgd_t *pgd = pgd_offset_k(addr); in vmemmap_pgd_populate()
125 unsigned long addr = (unsigned long)start_page; in vmemmap_populate_basepages() local
[all …]
Dnommu.c243 void vfree(const void *addr) in vfree() argument
245 kfree(addr); in vfree()
279 struct page *vmalloc_to_page(const void *addr) in vmalloc_to_page() argument
281 return virt_to_page(addr); in vmalloc_to_page()
285 unsigned long vmalloc_to_pfn(const void *addr) in vmalloc_to_pfn() argument
287 return page_to_pfn(virt_to_page(addr)); in vmalloc_to_pfn()
291 long vread(char *buf, char *addr, unsigned long count) in vread() argument
293 memcpy(buf, addr, count); in vread()
297 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument
300 if ((unsigned long) addr + count < count) in vwrite()
[all …]
Dfremap.c27 unsigned long addr, pte_t *ptep) in zap_pte() argument
34 flush_cache_page(vma, addr, pte_pfn(pte)); in zap_pte()
35 pte = ptep_clear_flush(vma, addr, ptep); in zap_pte()
36 page = vm_normal_page(vma, addr, pte); in zap_pte()
48 pte_clear_not_present_full(mm, addr, ptep, 0); in zap_pte()
57 unsigned long addr, unsigned long pgoff, pgprot_t prot) in install_file_pte() argument
63 pte = get_locked_pte(mm, addr, &ptl); in install_file_pte()
68 zap_pte(mm, vma, addr, pte); in install_file_pte()
70 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); in install_file_pte()
85 unsigned long addr, unsigned long size, pgoff_t pgoff) in populate_range() argument
[all …]
Dmincore.c56 static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pages) in do_mincore() argument
66 struct vm_area_struct *vma = find_vma(current->mm, addr); in do_mincore()
72 if (!vma || addr < vma->vm_start) in do_mincore()
79 nr = PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1)); in do_mincore()
84 nr = min(nr, (vma->vm_end - addr) >> PAGE_SHIFT); in do_mincore()
91 pgd = pgd_offset(vma->vm_mm, addr); in do_mincore()
94 pud = pud_offset(pgd, addr); in do_mincore()
97 pmd = pmd_offset(pud, addr); in do_mincore()
101 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in do_mincore()
102 for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) { in do_mincore()
[all …]
Dmempolicy.c374 unsigned long addr, unsigned long end, in check_pte_range() argument
382 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in check_pte_range()
389 page = vm_normal_page(vma, addr, *pte); in check_pte_range()
415 } while (pte++, addr += PAGE_SIZE, addr != end); in check_pte_range()
417 return addr != end; in check_pte_range()
421 unsigned long addr, unsigned long end, in check_pmd_range() argument
428 pmd = pmd_offset(pud, addr); in check_pmd_range()
430 next = pmd_addr_end(addr, end); in check_pmd_range()
433 if (check_pte_range(vma, pmd, addr, next, nodes, in check_pmd_range()
436 } while (pmd++, addr = next, addr != end); in check_pmd_range()
[all …]
Dswapfile.c694 unsigned long addr, swp_entry_t entry, struct page *page) in unuse_pte() argument
706 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
716 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte()
718 page_add_anon_rmap(page, vma, addr); in unuse_pte()
733 unsigned long addr, unsigned long end, in unuse_pte_range() argument
749 pte = pte_offset_map(pmd, addr); in unuse_pte_range()
757 ret = unuse_pte(vma, pmd, addr, entry, page); in unuse_pte_range()
760 pte = pte_offset_map(pmd, addr); in unuse_pte_range()
762 } while (pte++, addr += PAGE_SIZE, addr != end); in unuse_pte_range()
769 unsigned long addr, unsigned long end, in unuse_pmd_range() argument
[all …]
Dmigrate.c89 unsigned long addr = page_address_in_vma(new, vma); in remove_migration_pte() local
91 if (addr == -EFAULT) in remove_migration_pte()
94 pgd = pgd_offset(mm, addr); in remove_migration_pte()
98 pud = pud_offset(pgd, addr); in remove_migration_pte()
102 pmd = pmd_offset(pud, addr); in remove_migration_pte()
106 ptep = pte_offset_map(pmd, addr); in remove_migration_pte()
128 flush_cache_page(vma, addr, pte_pfn(pte)); in remove_migration_pte()
129 set_pte_at(mm, addr, ptep, pte); in remove_migration_pte()
132 page_add_anon_rmap(new, vma, addr); in remove_migration_pte()
137 update_mmu_cache(vma, addr, pte); in remove_migration_pte()
[all …]
Dhugetlb.c387 unsigned long addr, unsigned long sz) in clear_gigantic_page() argument
395 clear_user_highpage(p, addr + i * PAGE_SIZE); in clear_gigantic_page()
399 unsigned long addr, unsigned long sz) in clear_huge_page() argument
404 clear_gigantic_page(page, addr, sz); in clear_huge_page()
411 clear_user_highpage(page + i, addr + i * PAGE_SIZE); in clear_huge_page()
416 unsigned long addr, struct vm_area_struct *vma) in copy_gigantic_page() argument
425 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_gigantic_page()
433 unsigned long addr, struct vm_area_struct *vma) in copy_huge_page() argument
439 copy_gigantic_page(dst, src, addr, vma); in copy_huge_page()
446 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); in copy_huge_page()
[all …]
Dslub.c186 unsigned long addr; /* Called from address */ member
289 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) in slab_index() argument
291 return (p - addr) / s->size; in slab_index()
329 static void print_section(char *text, u8 *addr, unsigned int length) in print_section() argument
339 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); in print_section()
342 printk(KERN_CONT " %02x", addr[i]); in print_section()
344 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; in print_section()
375 enum track_item alloc, unsigned long addr) in set_track() argument
385 if (addr) { in set_track()
386 p->addr = addr; in set_track()
[all …]
Dswap_state.c271 struct vm_area_struct *vma, unsigned long addr) in read_swap_cache_async() argument
290 new_page = alloc_page_vma(gfp_mask, vma, addr); in read_swap_cache_async()
350 struct vm_area_struct *vma, unsigned long addr) in swapin_readahead() argument
368 gfp_mask, vma, addr); in swapin_readahead()
374 return read_swap_cache_async(entry, gfp_mask, vma, addr); in swapin_readahead()
Dslab.c249 void *addr; member
1702 static void kmem_freepages(struct kmem_cache *cachep, void *addr) in kmem_freepages() argument
1705 struct page *page = virt_to_page(addr); in kmem_freepages()
1721 free_pages((unsigned long)addr, cachep->gfporder); in kmem_freepages()
1729 kmem_freepages(cachep, slab_rcu->addr); in kmem_rcu_free()
1737 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, in store_stackinfo() argument
1742 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; in store_stackinfo()
1747 *addr++ = 0x12345678; in store_stackinfo()
1748 *addr++ = caller; in store_stackinfo()
1749 *addr++ = smp_processor_id(); in store_stackinfo()
[all …]
Dbootmem.c353 void __init free_bootmem(unsigned long addr, unsigned long size) in free_bootmem() argument
357 start = PFN_UP(addr); in free_bootmem()
358 end = PFN_DOWN(addr + size); in free_bootmem()
396 int __init reserve_bootmem(unsigned long addr, unsigned long size, in reserve_bootmem() argument
401 start = PFN_DOWN(addr); in reserve_bootmem()
402 end = PFN_UP(addr + size); in reserve_bootmem()
Dpage_alloc.c1728 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
1730 if (addr != 0) { in free_pages()
1731 VM_BUG_ON(!virt_addr_valid((void *)addr)); in free_pages()
1732 __free_pages(virt_to_page((void *)addr), order); in free_pages()
1754 unsigned long addr; in alloc_pages_exact() local
1756 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
1757 if (addr) { in alloc_pages_exact()
1758 unsigned long alloc_end = addr + (PAGE_SIZE << order); in alloc_pages_exact()
1759 unsigned long used = addr + PAGE_ALIGN(size); in alloc_pages_exact()
1761 split_page(virt_to_page(addr), order); in alloc_pages_exact()
[all …]
Dmlock.c162 unsigned long addr = start; in __mlock_vma_pages_range() local
198 ret = __get_user_pages(current, mm, addr, in __mlock_vma_pages_range()
243 addr += PAGE_SIZE; /* for next get_user_pages() */ in __mlock_vma_pages_range()
Dshmem.c1474 unsigned long addr) in shmem_get_policy() argument
1479 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in shmem_get_policy()