• Home
  • Raw
  • Download

Lines Matching refs:addr

246 			   unsigned long addr)  in free_pte_range()  argument
260 pte_free_tlb(tlb, token, addr); in free_pte_range()
265 unsigned long addr, unsigned long end, in free_pmd_range() argument
272 start = addr; in free_pmd_range()
273 pmd = pmd_offset(pud, addr); in free_pmd_range()
275 next = pmd_addr_end(addr, end); in free_pmd_range()
278 free_pte_range(tlb, pmd, addr); in free_pmd_range()
279 } while (pmd++, addr = next, addr != end); in free_pmd_range()
299 unsigned long addr, unsigned long end, in free_pud_range() argument
306 start = addr; in free_pud_range()
307 pud = pud_offset(p4d, addr); in free_pud_range()
309 next = pud_addr_end(addr, end); in free_pud_range()
312 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
313 } while (pud++, addr = next, addr != end); in free_pud_range()
333 unsigned long addr, unsigned long end, in free_p4d_range() argument
340 start = addr; in free_p4d_range()
341 p4d = p4d_offset(pgd, addr); in free_p4d_range()
343 next = p4d_addr_end(addr, end); in free_p4d_range()
346 free_pud_range(tlb, p4d, addr, next, floor, ceiling); in free_p4d_range()
347 } while (p4d++, addr = next, addr != end); in free_p4d_range()
369 unsigned long addr, unsigned long end, in free_pgd_range() argument
401 addr &= PMD_MASK; in free_pgd_range()
402 if (addr < floor) { in free_pgd_range()
403 addr += PMD_SIZE; in free_pgd_range()
404 if (!addr) in free_pgd_range()
414 if (addr > end - 1) in free_pgd_range()
421 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
423 next = pgd_addr_end(addr, end); in free_pgd_range()
426 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); in free_pgd_range()
427 } while (pgd++, addr = next, addr != end); in free_pgd_range()
435 unsigned long addr = vma->vm_start; in free_pgtables() local
447 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
462 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
545 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
548 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
549 p4d_t *p4d = p4d_offset(pgd, addr); in print_bad_pte()
550 pud_t *pud = pud_offset(p4d, addr); in print_bad_pte()
551 pmd_t *pmd = pmd_offset(pud, addr); in print_bad_pte()
578 index = linear_page_index(vma, addr); in print_bad_pte()
586 (void *)addr, READ_ONCE(vma->vm_flags), vma->anon_vma, mapping, index); in print_bad_pte()
639 struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in _vm_normal_page() argument
648 return vma->vm_ops->find_special_page(vma, addr); in _vm_normal_page()
656 print_bad_pte(vma, addr, pte, NULL); in _vm_normal_page()
673 off = (addr - vma->vm_start) >> PAGE_SHIFT; in _vm_normal_page()
686 print_bad_pte(vma, addr, pte, NULL); in _vm_normal_page()
699 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
716 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
749 struct vm_area_struct *src_vma, unsigned long addr, int *rss) in copy_nonpresent_pte() argument
786 set_pte_at(src_mm, addr, src_pte, pte); in copy_nonpresent_pte()
817 set_pte_at(src_mm, addr, src_pte, pte); in copy_nonpresent_pte()
822 set_pte_at(dst_mm, addr, dst_pte, pte); in copy_nonpresent_pte()
848 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, in copy_present_page() argument
895 copy_user_highpage(new_page, page, addr, src_vma); in copy_present_page()
897 page_add_new_anon_rmap(new_page, dst_vma, addr, false); in copy_present_page()
907 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page()
917 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, in copy_present_pte() argument
925 page = vm_normal_page(src_vma, addr, pte); in copy_present_pte()
930 addr, rss, prealloc, pte, page); in copy_present_pte()
944 ptep_set_wrprotect(src_mm, addr, src_pte); in copy_present_pte()
959 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte()
965 unsigned long addr) in page_copy_prealloc() argument
969 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr); in page_copy_prealloc()
984 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, in copy_pte_range() argument
1001 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); in copy_pte_range()
1006 src_pte = pte_offset_map(src_pmd, addr); in copy_pte_range()
1032 addr, rss); in copy_pte_range()
1040 addr, rss, &prealloc); in copy_pte_range()
1058 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); in copy_pte_range()
1075 prealloc = page_copy_prealloc(src_mm, src_vma, addr); in copy_pte_range()
1081 if (addr != end) in copy_pte_range()
1091 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, in copy_pmd_range() argument
1099 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); in copy_pmd_range()
1102 src_pmd = pmd_offset(src_pud, addr); in copy_pmd_range()
1104 next = pmd_addr_end(addr, end); in copy_pmd_range()
1108 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); in copy_pmd_range()
1110 addr, dst_vma, src_vma); in copy_pmd_range()
1120 addr, next)) in copy_pmd_range()
1122 } while (dst_pmd++, src_pmd++, addr = next, addr != end); in copy_pmd_range()
1128 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, in copy_pud_range() argument
1136 dst_pud = pud_alloc(dst_mm, dst_p4d, addr); in copy_pud_range()
1139 src_pud = pud_offset(src_p4d, addr); in copy_pud_range()
1141 next = pud_addr_end(addr, end); in copy_pud_range()
1145 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); in copy_pud_range()
1147 dst_pud, src_pud, addr, src_vma); in copy_pud_range()
1157 addr, next)) in copy_pud_range()
1159 } while (dst_pud++, src_pud++, addr = next, addr != end); in copy_pud_range()
1165 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, in copy_p4d_range() argument
1172 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); in copy_p4d_range()
1175 src_p4d = p4d_offset(src_pgd, addr); in copy_p4d_range()
1177 next = p4d_addr_end(addr, end); in copy_p4d_range()
1181 addr, next)) in copy_p4d_range()
1183 } while (dst_p4d++, src_p4d++, addr = next, addr != end); in copy_p4d_range()
1192 unsigned long addr = src_vma->vm_start; in copy_page_range() local
1233 0, src_vma, src_mm, addr, end); in copy_page_range()
1247 dst_pgd = pgd_offset(dst_mm, addr); in copy_page_range()
1248 src_pgd = pgd_offset(src_mm, addr); in copy_page_range()
1250 next = pgd_addr_end(addr, end); in copy_page_range()
1254 addr, next))) { in copy_page_range()
1258 } while (dst_pgd++, src_pgd++, addr = next, addr != end); in copy_page_range()
1280 unsigned long addr, unsigned long end, in zap_pte_range() argument
1294 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1309 page = vm_normal_page(vma, addr, ptent); in zap_pte_range()
1320 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range()
1322 tlb_remove_tlb_entry(tlb, pte, addr); in zap_pte_range()
1338 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1342 addr += PAGE_SIZE; in zap_pte_range()
1363 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1385 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1386 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1387 } while (pte++, addr += PAGE_SIZE, addr != end); in zap_pte_range()
1408 if (addr != end) { in zap_pte_range()
1413 return addr; in zap_pte_range()
1418 unsigned long addr, unsigned long end, in zap_pmd_range() argument
1424 pmd = pmd_offset(pud, addr); in zap_pmd_range()
1426 next = pmd_addr_end(addr, end); in zap_pmd_range()
1428 if (next - addr != HPAGE_PMD_SIZE) in zap_pmd_range()
1429 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1430 else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1435 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { in zap_pmd_range()
1454 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1457 } while (pmd++, addr = next, addr != end); in zap_pmd_range()
1459 return addr; in zap_pmd_range()
1464 unsigned long addr, unsigned long end, in zap_pud_range() argument
1470 pud = pud_offset(p4d, addr); in zap_pud_range()
1472 next = pud_addr_end(addr, end); in zap_pud_range()
1474 if (next - addr != HPAGE_PUD_SIZE) { in zap_pud_range()
1476 split_huge_pud(vma, pud, addr); in zap_pud_range()
1477 } else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
1483 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1486 } while (pud++, addr = next, addr != end); in zap_pud_range()
1488 return addr; in zap_pud_range()
1493 unsigned long addr, unsigned long end, in zap_p4d_range() argument
1499 p4d = p4d_offset(pgd, addr); in zap_p4d_range()
1501 next = p4d_addr_end(addr, end); in zap_p4d_range()
1504 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
1505 } while (p4d++, addr = next, addr != end); in zap_p4d_range()
1507 return addr; in zap_p4d_range()
1512 unsigned long addr, unsigned long end, in unmap_page_range() argument
1518 BUG_ON(addr >= end); in unmap_page_range()
1520 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1522 next = pgd_addr_end(addr, end); in unmap_page_range()
1525 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1526 } while (pgd++, addr = next, addr != end); in unmap_page_range()
1680 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) in walk_to_pmd() argument
1687 pgd = pgd_offset(mm, addr); in walk_to_pmd()
1688 p4d = p4d_alloc(mm, pgd, addr); in walk_to_pmd()
1691 pud = pud_alloc(mm, p4d, addr); in walk_to_pmd()
1694 pmd = pmd_alloc(mm, pud, addr); in walk_to_pmd()
1702 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, in __get_locked_pte() argument
1705 pmd_t *pmd = walk_to_pmd(mm, addr); in __get_locked_pte()
1709 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1721 unsigned long addr, struct page *page, pgprot_t prot) in insert_page_into_pte_locked() argument
1729 set_pte_at(mm, addr, pte, mk_pte(page, prot)); in insert_page_into_pte_locked()
1740 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1752 pte = get_locked_pte(mm, addr, &ptl); in insert_page()
1755 retval = insert_page_into_pte_locked(mm, pte, addr, page, prot); in insert_page()
1763 unsigned long addr, struct page *page, pgprot_t prot) in insert_page_in_batch_locked() argument
1772 return insert_page_into_pte_locked(mm, pte, addr, page, prot); in insert_page_in_batch_locked()
1778 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, in insert_pages() argument
1791 pmd = walk_to_pmd(mm, addr); in insert_pages()
1796 remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); in insert_pages()
1807 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); in insert_pages()
1810 addr, pages[curr_page_idx], prot); in insert_pages()
1817 addr += PAGE_SIZE; in insert_pages()
1848 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
1852 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; in vm_insert_pages()
1854 if (addr < vma->vm_start || end_addr >= vma->vm_end) in vm_insert_pages()
1862 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); in vm_insert_pages()
1868 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); in vm_insert_pages()
1907 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
1910 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
1919 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
2004 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
2011 pte = get_locked_pte(mm, addr, &ptl); in insert_pfn()
2033 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) in insert_pfn()
2034 update_mmu_cache(vma, addr, pte); in insert_pfn()
2050 set_pte_at(mm, addr, pte, entry); in insert_pfn()
2051 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
2079 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
2094 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
2102 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, in vmf_insert_pfn_prot()
2127 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn() argument
2130 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
2149 unsigned long addr, pfn_t pfn, pgprot_t pgprot, in __vm_insert_mixed() argument
2156 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
2181 err = insert_page(vma, addr, page, pgprot); in __vm_insert_mixed()
2183 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
2220 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed_prot() argument
2223 return __vm_insert_mixed(vma, addr, pfn, pgprot, false); in vmf_insert_mixed_prot()
2227 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed() argument
2230 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false); in vmf_insert_mixed()
2240 unsigned long addr, pfn_t pfn) in vmf_insert_mixed_mkwrite() argument
2242 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true); in vmf_insert_mixed_mkwrite()
2252 unsigned long addr, unsigned long end, in remap_pte_range() argument
2259 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
2269 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); in remap_pte_range()
2271 } while (pte++, addr += PAGE_SIZE, addr != end); in remap_pte_range()
2278 unsigned long addr, unsigned long end, in remap_pmd_range() argument
2285 pfn -= addr >> PAGE_SHIFT; in remap_pmd_range()
2286 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
2291 next = pmd_addr_end(addr, end); in remap_pmd_range()
2292 err = remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
2293 pfn + (addr >> PAGE_SHIFT), prot); in remap_pmd_range()
2296 } while (pmd++, addr = next, addr != end); in remap_pmd_range()
2301 unsigned long addr, unsigned long end, in remap_pud_range() argument
2308 pfn -= addr >> PAGE_SHIFT; in remap_pud_range()
2309 pud = pud_alloc(mm, p4d, addr); in remap_pud_range()
2313 next = pud_addr_end(addr, end); in remap_pud_range()
2314 err = remap_pmd_range(mm, pud, addr, next, in remap_pud_range()
2315 pfn + (addr >> PAGE_SHIFT), prot); in remap_pud_range()
2318 } while (pud++, addr = next, addr != end); in remap_pud_range()
2323 unsigned long addr, unsigned long end, in remap_p4d_range() argument
2330 pfn -= addr >> PAGE_SHIFT; in remap_p4d_range()
2331 p4d = p4d_alloc(mm, pgd, addr); in remap_p4d_range()
2335 next = p4d_addr_end(addr, end); in remap_p4d_range()
2336 err = remap_pud_range(mm, p4d, addr, next, in remap_p4d_range()
2337 pfn + (addr >> PAGE_SHIFT), prot); in remap_p4d_range()
2340 } while (p4d++, addr = next, addr != end); in remap_p4d_range()
2356 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
2361 unsigned long end = addr + PAGE_ALIGN(size); in remap_pfn_range()
2366 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) in remap_pfn_range()
2388 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range()
2393 err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
2399 BUG_ON(addr >= end); in remap_pfn_range()
2400 pfn -= addr >> PAGE_SHIFT; in remap_pfn_range()
2401 pgd = pgd_offset(mm, addr); in remap_pfn_range()
2402 flush_cache_range(vma, addr, end); in remap_pfn_range()
2404 next = pgd_addr_end(addr, end); in remap_pfn_range()
2405 err = remap_p4d_range(mm, pgd, addr, next, in remap_pfn_range()
2406 pfn + (addr >> PAGE_SHIFT), prot); in remap_pfn_range()
2409 } while (pgd++, addr = next, addr != end); in remap_pfn_range()
2468 unsigned long addr, unsigned long end, in apply_to_pte_range() argument
2478 pte_alloc_kernel_track(pmd, addr, mask) : in apply_to_pte_range()
2479 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
2484 pte_offset_kernel(pmd, addr) : in apply_to_pte_range()
2485 pte_offset_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
2495 err = fn(pte++, addr, data); in apply_to_pte_range()
2499 } while (addr += PAGE_SIZE, addr != end); in apply_to_pte_range()
2511 unsigned long addr, unsigned long end, in apply_to_pmd_range() argument
2522 pmd = pmd_alloc_track(mm, pud, addr, mask); in apply_to_pmd_range()
2526 pmd = pmd_offset(pud, addr); in apply_to_pmd_range()
2529 next = pmd_addr_end(addr, end); in apply_to_pmd_range()
2531 err = apply_to_pte_range(mm, pmd, addr, next, fn, data, in apply_to_pmd_range()
2536 } while (pmd++, addr = next, addr != end); in apply_to_pmd_range()
2541 unsigned long addr, unsigned long end, in apply_to_pud_range() argument
2550 pud = pud_alloc_track(mm, p4d, addr, mask); in apply_to_pud_range()
2554 pud = pud_offset(p4d, addr); in apply_to_pud_range()
2557 next = pud_addr_end(addr, end); in apply_to_pud_range()
2559 err = apply_to_pmd_range(mm, pud, addr, next, fn, data, in apply_to_pud_range()
2564 } while (pud++, addr = next, addr != end); in apply_to_pud_range()
2569 unsigned long addr, unsigned long end, in apply_to_p4d_range() argument
2578 p4d = p4d_alloc_track(mm, pgd, addr, mask); in apply_to_p4d_range()
2582 p4d = p4d_offset(pgd, addr); in apply_to_p4d_range()
2585 next = p4d_addr_end(addr, end); in apply_to_p4d_range()
2587 err = apply_to_pud_range(mm, p4d, addr, next, fn, data, in apply_to_p4d_range()
2592 } while (p4d++, addr = next, addr != end); in apply_to_p4d_range()
2596 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, in __apply_to_page_range() argument
2601 unsigned long start = addr, next; in __apply_to_page_range()
2602 unsigned long end = addr + size; in __apply_to_page_range()
2606 if (WARN_ON(addr >= end)) in __apply_to_page_range()
2609 pgd = pgd_offset(mm, addr); in __apply_to_page_range()
2611 next = pgd_addr_end(addr, end); in __apply_to_page_range()
2614 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask); in __apply_to_page_range()
2617 } while (pgd++, addr = next, addr != end); in __apply_to_page_range()
2629 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_page_range() argument
2632 return __apply_to_page_range(mm, addr, size, fn, data, true); in apply_to_page_range()
2689 static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr) in __pte_map_lock_speculative() argument
2705 trace_spf_vma_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2715 trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2727 pte = pte_offset_map(&pmdval, addr); in __pte_map_lock_speculative()
2730 trace_spf_pte_lock(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2740 trace_spf_vma_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2763 bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr) in pte_map_lock_addr() argument
2767 addr, &vmf->ptl); in pte_map_lock_addr()
2771 return __pte_map_lock_speculative(vmf, addr); in pte_map_lock_addr()
2842 inline bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr) in pte_map_lock_addr() argument
2845 addr, &vmf->ptl); in pte_map_lock_addr()
2862 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_existing_page_range() argument
2865 return __apply_to_page_range(mm, addr, size, fn, data, false); in apply_to_existing_page_range()
2910 unsigned long addr = vmf->address; in cow_user_page() local
2913 copy_user_highpage(dst, src, addr, vma); in cow_user_page()
2924 uaddr = (void __user *)(addr & PAGE_MASK); in cow_user_page()
2933 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2940 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2946 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in cow_user_page()
2947 update_mmu_cache(vma, addr, vmf->pte); in cow_user_page()
2961 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2965 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
4167 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) in do_set_pte() argument
4171 bool prefault = vmf->address != addr; in do_set_pte()
4187 __page_add_new_anon_rmap(page, vma, addr, false); in do_set_pte()
4193 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); in do_set_pte()
4537 unsigned long addr, int page_nid, in numa_migrate_prep() argument
4548 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
5574 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
5580 int offset = addr & (PAGE_SIZE-1); in generic_access_phys()
5582 if (follow_phys(vma, addr, write, &prot, &phys_addr)) in generic_access_phys()
5605 unsigned long addr, void *buf, int len, unsigned int gup_flags) in __access_remote_vm() argument
5620 ret = get_user_pages_remote(mm, addr, 1, in __access_remote_vm()
5630 vma = find_vma(mm, addr); in __access_remote_vm()
5631 if (!vma || vma->vm_start > addr) in __access_remote_vm()
5634 ret = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
5642 offset = addr & (PAGE_SIZE-1); in __access_remote_vm()
5648 copy_to_user_page(vma, page, addr, in __access_remote_vm()
5652 copy_from_user_page(vma, page, addr, in __access_remote_vm()
5660 addr += bytes; in __access_remote_vm()
5679 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
5682 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags); in access_remote_vm()
5690 int access_process_vm(struct task_struct *tsk, unsigned long addr, in access_process_vm() argument
5700 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); in access_process_vm()
5771 void (*process_subpage)(unsigned long addr, int idx, void *arg), in process_huge_page() argument
5775 unsigned long addr = addr_hint & in process_huge_page() local
5780 n = (addr_hint - addr) / PAGE_SIZE; in process_huge_page()
5788 process_subpage(addr + i * PAGE_SIZE, i, arg); in process_huge_page()
5797 process_subpage(addr + i * PAGE_SIZE, i, arg); in process_huge_page()
5809 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); in process_huge_page()
5811 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); in process_huge_page()
5816 unsigned long addr, in clear_gigantic_page() argument
5826 clear_user_highpage(p, addr + i * PAGE_SIZE); in clear_gigantic_page()
5830 static void clear_subpage(unsigned long addr, int idx, void *arg) in clear_subpage() argument
5834 clear_user_highpage(page + idx, addr); in clear_subpage()
5840 unsigned long addr = addr_hint & in clear_huge_page() local
5844 clear_gigantic_page(page, addr, pages_per_huge_page); in clear_huge_page()
5852 unsigned long addr, in copy_user_gigantic_page() argument
5862 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page()
5876 static void copy_subpage(unsigned long addr, int idx, void *arg) in copy_subpage() argument
5881 addr, copy_arg->vma); in copy_subpage()
5888 unsigned long addr = addr_hint & in copy_user_huge_page() local
5897 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page()