Lines Matching refs:pgd
362 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, in free_p4d_range() argument
371 p4d = p4d_offset(pgd, addr); in free_p4d_range()
390 p4d = p4d_offset(pgd, start); in free_p4d_range()
391 pgd_clear(pgd); in free_p4d_range()
402 pgd_t *pgd; in free_pgd_range() local
451 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
454 if (pgd_none_or_clear_bad(pgd)) in free_pgd_range()
456 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); in free_pgd_range()
457 } while (pgd++, addr = next, addr != end); in free_pgd_range()
574 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() local
575 p4d_t *p4d = p4d_offset(pgd, addr); in print_bad_pte()
1591 struct vm_area_struct *vma, pgd_t *pgd, in zap_p4d_range() argument
1598 p4d = p4d_offset(pgd, addr); in zap_p4d_range()
1614 pgd_t *pgd; in unmap_page_range() local
1619 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1622 if (pgd_none_or_clear_bad(pgd)) in unmap_page_range()
1624 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1625 } while (pgd++, addr = next, addr != end); in unmap_page_range()
1781 pgd_t *pgd; in walk_to_pmd() local
1786 pgd = pgd_offset(mm, addr); in walk_to_pmd()
1787 p4d = p4d_alloc(mm, pgd, addr); in walk_to_pmd()
2420 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, in remap_p4d_range() argument
2429 p4d = p4d_alloc(mm, pgd, addr); in remap_p4d_range()
2449 pgd_t *pgd; in remap_pfn_range_notrack() local
2486 pgd = pgd_offset(mm, addr); in remap_pfn_range_notrack()
2490 err = remap_p4d_range(mm, pgd, addr, next, in remap_pfn_range_notrack()
2494 } while (pgd++, addr = next, addr != end); in remap_pfn_range_notrack()
2693 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, in apply_to_p4d_range() argument
2703 p4d = p4d_alloc_track(mm, pgd, addr, mask); in apply_to_p4d_range()
2707 p4d = p4d_offset(pgd, addr); in apply_to_p4d_range()
2733 pgd_t *pgd; in __apply_to_page_range() local
2742 pgd = pgd_offset(mm, addr); in __apply_to_page_range()
2745 if (pgd_none(*pgd) && !create) in __apply_to_page_range()
2747 if (WARN_ON_ONCE(pgd_leaf(*pgd))) in __apply_to_page_range()
2749 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { in __apply_to_page_range()
2752 pgd_clear_bad(pgd); in __apply_to_page_range()
2754 err = apply_to_p4d_range(mm, pgd, addr, next, in __apply_to_page_range()
2758 } while (pgd++, addr = next, addr != end); in __apply_to_page_range()
4927 pgd_t *pgd; in __handle_mm_fault() local
4951 pgd = pgd_offset(mm, address); in __handle_mm_fault()
4952 pgdval = READ_ONCE(*pgd); in __handle_mm_fault()
4958 p4d = p4d_offset(pgd, address); in __handle_mm_fault()
4959 if (pgd_val(READ_ONCE(*pgd)) != pgd_val(pgdval)) in __handle_mm_fault()
5042 pgd = pgd_offset(mm, address); in __handle_mm_fault()
5043 p4d = p4d_alloc(mm, pgd, address); in __handle_mm_fault()
5313 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __p4d_alloc() argument
5322 if (pgd_present(*pgd)) /* Another has populated it */ in __p4d_alloc()
5325 pgd_populate(mm, pgd, new); in __p4d_alloc()
5384 pgd_t *pgd; in follow_invalidate_pte() local
5390 pgd = pgd_offset(mm, address); in follow_invalidate_pte()
5391 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) in follow_invalidate_pte()
5394 p4d = p4d_offset(pgd, address); in follow_invalidate_pte()