• Home
  • Raw
  • Download

Lines Matching refs:addr

215 			   unsigned long addr)  in free_pte_range()  argument
219 pte_free_tlb(tlb, token, addr); in free_pte_range()
224 unsigned long addr, unsigned long end, in free_pmd_range() argument
231 start = addr; in free_pmd_range()
232 pmd = pmd_offset(pud, addr); in free_pmd_range()
234 next = pmd_addr_end(addr, end); in free_pmd_range()
237 free_pte_range(tlb, pmd, addr); in free_pmd_range()
238 } while (pmd++, addr = next, addr != end); in free_pmd_range()
258 unsigned long addr, unsigned long end, in free_pud_range() argument
265 start = addr; in free_pud_range()
266 pud = pud_offset(p4d, addr); in free_pud_range()
268 next = pud_addr_end(addr, end); in free_pud_range()
271 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
272 } while (pud++, addr = next, addr != end); in free_pud_range()
292 unsigned long addr, unsigned long end, in free_p4d_range() argument
299 start = addr; in free_p4d_range()
300 p4d = p4d_offset(pgd, addr); in free_p4d_range()
302 next = p4d_addr_end(addr, end); in free_p4d_range()
305 free_pud_range(tlb, p4d, addr, next, floor, ceiling); in free_p4d_range()
306 } while (p4d++, addr = next, addr != end); in free_p4d_range()
328 unsigned long addr, unsigned long end, in free_pgd_range() argument
360 addr &= PMD_MASK; in free_pgd_range()
361 if (addr < floor) { in free_pgd_range()
362 addr += PMD_SIZE; in free_pgd_range()
363 if (!addr) in free_pgd_range()
373 if (addr > end - 1) in free_pgd_range()
380 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
382 next = pgd_addr_end(addr, end); in free_pgd_range()
385 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); in free_pgd_range()
386 } while (pgd++, addr = next, addr != end); in free_pgd_range()
394 unsigned long addr = vma->vm_start; in free_pgtables() local
404 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
417 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
500 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
503 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
504 p4d_t *p4d = p4d_offset(pgd, addr); in print_bad_pte()
505 pud_t *pud = pud_offset(p4d, addr); in print_bad_pte()
506 pmd_t *pmd = pmd_offset(pud, addr); in print_bad_pte()
533 index = linear_page_index(vma, addr); in print_bad_pte()
541 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
593 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
602 return vma->vm_ops->find_special_page(vma, addr); in vm_normal_page()
610 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
623 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
636 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
649 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
666 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
699 unsigned long addr, int *rss) in copy_one_pte() argument
737 set_pte_at(src_mm, addr, src_pte, pte); in copy_one_pte()
766 set_pte_at(src_mm, addr, src_pte, pte); in copy_one_pte()
777 ptep_set_wrprotect(src_mm, addr, src_pte); in copy_one_pte()
789 page = vm_normal_page(vma, addr, pte); in copy_one_pte()
799 set_pte_at(dst_mm, addr, dst_pte, pte); in copy_one_pte()
805 unsigned long addr, unsigned long end) in copy_pte_range() argument
817 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); in copy_pte_range()
820 src_pte = pte_offset_map(src_pmd, addr); in copy_pte_range()
843 vma, addr, rss); in copy_pte_range()
847 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); in copy_pte_range()
861 if (addr != end) in copy_pte_range()
868 unsigned long addr, unsigned long end) in copy_pmd_range() argument
873 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); in copy_pmd_range()
876 src_pmd = pmd_offset(src_pud, addr); in copy_pmd_range()
878 next = pmd_addr_end(addr, end); in copy_pmd_range()
882 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma); in copy_pmd_range()
884 dst_pmd, src_pmd, addr, vma); in copy_pmd_range()
894 vma, addr, next)) in copy_pmd_range()
896 } while (dst_pmd++, src_pmd++, addr = next, addr != end); in copy_pmd_range()
902 unsigned long addr, unsigned long end) in copy_pud_range() argument
907 dst_pud = pud_alloc(dst_mm, dst_p4d, addr); in copy_pud_range()
910 src_pud = pud_offset(src_p4d, addr); in copy_pud_range()
912 next = pud_addr_end(addr, end); in copy_pud_range()
916 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma); in copy_pud_range()
918 dst_pud, src_pud, addr, vma); in copy_pud_range()
928 vma, addr, next)) in copy_pud_range()
930 } while (dst_pud++, src_pud++, addr = next, addr != end); in copy_pud_range()
936 unsigned long addr, unsigned long end) in copy_p4d_range() argument
941 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); in copy_p4d_range()
944 src_p4d = p4d_offset(src_pgd, addr); in copy_p4d_range()
946 next = p4d_addr_end(addr, end); in copy_p4d_range()
950 vma, addr, next)) in copy_p4d_range()
952 } while (dst_p4d++, src_p4d++, addr = next, addr != end); in copy_p4d_range()
961 unsigned long addr = vma->vm_start; in copy_page_range() local
1000 0, vma, src_mm, addr, end); in copy_page_range()
1005 dst_pgd = pgd_offset(dst_mm, addr); in copy_page_range()
1006 src_pgd = pgd_offset(src_mm, addr); in copy_page_range()
1008 next = pgd_addr_end(addr, end); in copy_page_range()
1012 vma, addr, next))) { in copy_page_range()
1016 } while (dst_pgd++, src_pgd++, addr = next, addr != end); in copy_page_range()
1025 unsigned long addr, unsigned long end, in zap_pte_range() argument
1039 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1054 page = vm_normal_page(vma, addr, ptent); in zap_pte_range()
1065 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range()
1067 tlb_remove_tlb_entry(tlb, pte, addr); in zap_pte_range()
1083 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1086 addr += PAGE_SIZE; in zap_pte_range()
1107 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1127 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1128 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1129 } while (pte++, addr += PAGE_SIZE, addr != end); in zap_pte_range()
1150 if (addr != end) { in zap_pte_range()
1155 return addr; in zap_pte_range()
1160 unsigned long addr, unsigned long end, in zap_pmd_range() argument
1166 pmd = pmd_offset(pud, addr); in zap_pmd_range()
1168 next = pmd_addr_end(addr, end); in zap_pmd_range()
1170 if (next - addr != HPAGE_PMD_SIZE) in zap_pmd_range()
1171 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1172 else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1185 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1188 } while (pmd++, addr = next, addr != end); in zap_pmd_range()
1190 return addr; in zap_pmd_range()
1195 unsigned long addr, unsigned long end, in zap_pud_range() argument
1201 pud = pud_offset(p4d, addr); in zap_pud_range()
1203 next = pud_addr_end(addr, end); in zap_pud_range()
1205 if (next - addr != HPAGE_PUD_SIZE) { in zap_pud_range()
1207 split_huge_pud(vma, pud, addr); in zap_pud_range()
1208 } else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
1214 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1217 } while (pud++, addr = next, addr != end); in zap_pud_range()
1219 return addr; in zap_pud_range()
1224 unsigned long addr, unsigned long end, in zap_p4d_range() argument
1230 p4d = p4d_offset(pgd, addr); in zap_p4d_range()
1232 next = p4d_addr_end(addr, end); in zap_p4d_range()
1235 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
1236 } while (p4d++, addr = next, addr != end); in zap_p4d_range()
1238 return addr; in zap_p4d_range()
1243 unsigned long addr, unsigned long end, in unmap_page_range() argument
1249 BUG_ON(addr >= end); in unmap_page_range()
1251 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1253 next = pgd_addr_end(addr, end); in unmap_page_range()
1256 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1257 } while (pgd++, addr = next, addr != end); in unmap_page_range()
1411 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, in __get_locked_pte() argument
1419 pgd = pgd_offset(mm, addr); in __get_locked_pte()
1420 p4d = p4d_alloc(mm, pgd, addr); in __get_locked_pte()
1423 pud = pud_alloc(mm, p4d, addr); in __get_locked_pte()
1426 pmd = pmd_alloc(mm, pud, addr); in __get_locked_pte()
1431 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1441 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1454 pte = get_locked_pte(mm, addr, &ptl); in insert_page()
1465 set_pte_at(mm, addr, pte, mk_pte(page, prot)); in insert_page()
1503 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
1506 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
1515 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
1600 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
1607 pte = get_locked_pte(mm, addr, &ptl); in insert_pfn()
1628 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) in insert_pfn()
1629 update_mmu_cache(vma, addr, pte); in insert_pfn()
1645 set_pte_at(mm, addr, pte, entry); in insert_pfn()
1646 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
1671 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
1686 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
1694 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, in vmf_insert_pfn_prot()
1719 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn() argument
1722 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
1741 unsigned long addr, pfn_t pfn, bool mkwrite) in __vm_insert_mixed() argument
1748 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
1773 err = insert_page(vma, addr, page, pgprot); in __vm_insert_mixed()
1775 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
1786 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed() argument
1789 return __vm_insert_mixed(vma, addr, pfn, false); in vmf_insert_mixed()
1799 unsigned long addr, pfn_t pfn) in vmf_insert_mixed_mkwrite() argument
1801 return __vm_insert_mixed(vma, addr, pfn, true); in vmf_insert_mixed_mkwrite()
1811 unsigned long addr, unsigned long end, in remap_pte_range() argument
1818 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
1828 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); in remap_pte_range()
1830 } while (pte++, addr += PAGE_SIZE, addr != end); in remap_pte_range()
1837 unsigned long addr, unsigned long end, in remap_pmd_range() argument
1844 pfn -= addr >> PAGE_SHIFT; in remap_pmd_range()
1845 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
1850 next = pmd_addr_end(addr, end); in remap_pmd_range()
1851 err = remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
1852 pfn + (addr >> PAGE_SHIFT), prot); in remap_pmd_range()
1855 } while (pmd++, addr = next, addr != end); in remap_pmd_range()
1860 unsigned long addr, unsigned long end, in remap_pud_range() argument
1867 pfn -= addr >> PAGE_SHIFT; in remap_pud_range()
1868 pud = pud_alloc(mm, p4d, addr); in remap_pud_range()
1872 next = pud_addr_end(addr, end); in remap_pud_range()
1873 err = remap_pmd_range(mm, pud, addr, next, in remap_pud_range()
1874 pfn + (addr >> PAGE_SHIFT), prot); in remap_pud_range()
1877 } while (pud++, addr = next, addr != end); in remap_pud_range()
1882 unsigned long addr, unsigned long end, in remap_p4d_range() argument
1889 pfn -= addr >> PAGE_SHIFT; in remap_p4d_range()
1890 p4d = p4d_alloc(mm, pgd, addr); in remap_p4d_range()
1894 next = p4d_addr_end(addr, end); in remap_p4d_range()
1895 err = remap_pud_range(mm, p4d, addr, next, in remap_p4d_range()
1896 pfn + (addr >> PAGE_SHIFT), prot); in remap_p4d_range()
1899 } while (p4d++, addr = next, addr != end); in remap_p4d_range()
1915 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1920 unsigned long end = addr + PAGE_ALIGN(size); in remap_pfn_range()
1944 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range()
1949 err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
1955 BUG_ON(addr >= end); in remap_pfn_range()
1956 pfn -= addr >> PAGE_SHIFT; in remap_pfn_range()
1957 pgd = pgd_offset(mm, addr); in remap_pfn_range()
1958 flush_cache_range(vma, addr, end); in remap_pfn_range()
1960 next = pgd_addr_end(addr, end); in remap_pfn_range()
1961 err = remap_p4d_range(mm, pgd, addr, next, in remap_pfn_range()
1962 pfn + (addr >> PAGE_SHIFT), prot); in remap_pfn_range()
1965 } while (pgd++, addr = next, addr != end); in remap_pfn_range()
2024 unsigned long addr, unsigned long end, in apply_to_pte_range() argument
2032 pte_alloc_kernel(pmd, addr) : in apply_to_pte_range()
2033 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
2042 err = fn(pte++, addr, data); in apply_to_pte_range()
2045 } while (addr += PAGE_SIZE, addr != end); in apply_to_pte_range()
2055 unsigned long addr, unsigned long end, in apply_to_pmd_range() argument
2064 pmd = pmd_alloc(mm, pud, addr); in apply_to_pmd_range()
2068 next = pmd_addr_end(addr, end); in apply_to_pmd_range()
2069 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); in apply_to_pmd_range()
2072 } while (pmd++, addr = next, addr != end); in apply_to_pmd_range()
2077 unsigned long addr, unsigned long end, in apply_to_pud_range() argument
2084 pud = pud_alloc(mm, p4d, addr); in apply_to_pud_range()
2088 next = pud_addr_end(addr, end); in apply_to_pud_range()
2089 err = apply_to_pmd_range(mm, pud, addr, next, fn, data); in apply_to_pud_range()
2092 } while (pud++, addr = next, addr != end); in apply_to_pud_range()
2097 unsigned long addr, unsigned long end, in apply_to_p4d_range() argument
2104 p4d = p4d_alloc(mm, pgd, addr); in apply_to_p4d_range()
2108 next = p4d_addr_end(addr, end); in apply_to_p4d_range()
2109 err = apply_to_pud_range(mm, p4d, addr, next, fn, data); in apply_to_p4d_range()
2112 } while (p4d++, addr = next, addr != end); in apply_to_p4d_range()
2120 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_page_range() argument
2125 unsigned long end = addr + size; in apply_to_page_range()
2128 if (WARN_ON(addr >= end)) in apply_to_page_range()
2131 pgd = pgd_offset(mm, addr); in apply_to_page_range()
2133 next = pgd_addr_end(addr, end); in apply_to_page_range()
2134 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data); in apply_to_page_range()
2137 } while (pgd++, addr = next, addr != end); in apply_to_page_range()
3668 unsigned long addr, int page_nid, in numa_migrate_prep() argument
3679 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
4302 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
4308 int offset = addr & (PAGE_SIZE-1); in generic_access_phys()
4310 if (follow_phys(vma, addr, write, &prot, &phys_addr)) in generic_access_phys()
4333 unsigned long addr, void *buf, int len, unsigned int gup_flags) in __access_remote_vm() argument
4348 ret = get_user_pages_remote(tsk, mm, addr, 1, in __access_remote_vm()
4358 vma = find_vma(mm, addr); in __access_remote_vm()
4359 if (!vma || vma->vm_start > addr) in __access_remote_vm()
4362 ret = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
4370 offset = addr & (PAGE_SIZE-1); in __access_remote_vm()
4376 copy_to_user_page(vma, page, addr, in __access_remote_vm()
4380 copy_from_user_page(vma, page, addr, in __access_remote_vm()
4388 addr += bytes; in __access_remote_vm()
4407 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
4410 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags); in access_remote_vm()
4418 int access_process_vm(struct task_struct *tsk, unsigned long addr, in access_process_vm() argument
4428 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); in access_process_vm()
4499 void (*process_subpage)(unsigned long addr, int idx, void *arg), in process_huge_page() argument
4503 unsigned long addr = addr_hint & in process_huge_page() local
4508 n = (addr_hint - addr) / PAGE_SIZE; in process_huge_page()
4516 process_subpage(addr + i * PAGE_SIZE, i, arg); in process_huge_page()
4525 process_subpage(addr + i * PAGE_SIZE, i, arg); in process_huge_page()
4537 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); in process_huge_page()
4539 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); in process_huge_page()
4544 unsigned long addr, in clear_gigantic_page() argument
4554 clear_user_highpage(p, addr + i * PAGE_SIZE); in clear_gigantic_page()
4558 static void clear_subpage(unsigned long addr, int idx, void *arg) in clear_subpage() argument
4562 clear_user_highpage(page + idx, addr); in clear_subpage()
4568 unsigned long addr = addr_hint & in clear_huge_page() local
4572 clear_gigantic_page(page, addr, pages_per_huge_page); in clear_huge_page()
4580 unsigned long addr, in copy_user_gigantic_page() argument
4590 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page()
4604 static void copy_subpage(unsigned long addr, int idx, void *arg) in copy_subpage() argument
4609 addr, copy_arg->vma); in copy_subpage()
4616 unsigned long addr = addr_hint & in copy_user_huge_page() local
4625 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page()