Lines Matching full:range
30 struct hmm_range *range; member
41 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument
43 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill()
46 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill()
51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52 * @addr: range virtual start address (inclusive)
53 * @end: range virtual end address (exclusive)
59 * or whenever there is no page directory covering the virtual address range.
88 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local
92 * consider the default flags requested for the range. The API can in hmm_pte_need_fault()
96 * fault a range with specific flags. For the latter one it is a in hmm_pte_need_fault()
100 pfn_req_flags &= range->pfn_flags_mask; in hmm_pte_need_fault()
101 pfn_req_flags |= range->default_flags; in hmm_pte_need_fault()
123 struct hmm_range *range = hmm_vma_walk->range; in hmm_range_need_fault() local
132 if (!((range->default_flags | range->pfn_flags_mask) & in hmm_range_need_fault()
149 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local
154 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
156 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_hole()
162 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); in hmm_vma_walk_hole()
166 return hmm_pfns_fill(addr, end, range, 0); in hmm_vma_walk_hole()
174 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, in pmd_to_hmm_pfn_flags() argument
190 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pmd() local
196 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); in hmm_vma_handle_pmd()
213 static inline bool hmm_is_device_private_entry(struct hmm_range *range, in hmm_is_device_private_entry() argument
218 range->dev_private_owner; in hmm_is_device_private_entry()
221 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, in pte_to_hmm_pfn_flags() argument
234 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pte() local
256 if (hmm_is_device_private_entry(range, entry)) { in hmm_vma_handle_pte()
287 cpu_flags = pte_to_hmm_pfn_flags(range, pte); in hmm_vma_handle_pte()
325 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pmd() local
327 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; in hmm_vma_walk_pmd()
344 return hmm_pfns_fill(start, end, range, 0); in hmm_vma_walk_pmd()
350 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_pmd()
380 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_pmd()
399 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, in pud_to_hmm_pfn_flags() argument
413 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pud() local
442 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pud()
444 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_pud()
446 cpu_flags = pud_to_hmm_pfn_flags(range, pud); in hmm_vma_walk_pud()
478 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hugetlb_entry() local
489 i = (start - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hugetlb_entry()
490 pfn_req_flags = range->hmm_pfns[i]; in hmm_vma_walk_hugetlb_entry()
491 cpu_flags = pte_to_hmm_pfn_flags(range, entry) | in hmm_vma_walk_hugetlb_entry()
502 range->hmm_pfns[i] = pfn | cpu_flags; in hmm_vma_walk_hugetlb_entry()
515 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_test() local
530 * If a fault is requested for an unsupported range then it is a hard in hmm_vma_walk_test()
534 range->hmm_pfns + in hmm_vma_walk_test()
535 ((start - range->start) >> PAGE_SHIFT), in hmm_vma_walk_test()
539 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_test()
554 * hmm_range_fault - try to fault some address in a virtual address range
555 * @range: argument structure
562 * -EPERM: Invalid permission (e.g., asking for write and range is read
564 * -EBUSY: The range has been invalidated and the caller needs to wait for
572 int hmm_range_fault(struct hmm_range *range) in hmm_range_fault() argument
575 .range = range, in hmm_range_fault()
576 .last = range->start, in hmm_range_fault()
578 struct mm_struct *mm = range->notifier->mm; in hmm_range_fault()
584 /* If range is no longer valid force retry. */ in hmm_range_fault()
585 if (mmu_interval_check_retry(range->notifier, in hmm_range_fault()
586 range->notifier_seq)) in hmm_range_fault()
588 ret = walk_page_range(mm, hmm_vma_walk.last, range->end, in hmm_range_fault()