• Home
  • Raw
  • Download

Lines Matching full:range

30 	struct hmm_range	*range;  member
41 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument
43 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill()
46 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill()
51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52 * @addr: range virtual start address (inclusive)
53 * @end: range virtual end address (exclusive)
59 * or whenever there is no page directory covering the virtual address range.
88 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local
92 * consider the default flags requested for the range. The API can in hmm_pte_need_fault()
96 * fault a range with specific flags. For the latter one it is a in hmm_pte_need_fault()
100 pfn_req_flags &= range->pfn_flags_mask; in hmm_pte_need_fault()
101 pfn_req_flags |= range->default_flags; in hmm_pte_need_fault()
123 struct hmm_range *range = hmm_vma_walk->range; in hmm_range_need_fault() local
132 if (!((range->default_flags | range->pfn_flags_mask) & in hmm_range_need_fault()
149 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local
154 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
156 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_hole()
162 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); in hmm_vma_walk_hole()
166 return hmm_pfns_fill(addr, end, range, 0); in hmm_vma_walk_hole()
174 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, in pmd_to_hmm_pfn_flags() argument
190 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pmd() local
196 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); in hmm_vma_handle_pmd()
213 static inline bool hmm_is_device_private_entry(struct hmm_range *range, in hmm_is_device_private_entry() argument
218 range->dev_private_owner; in hmm_is_device_private_entry()
221 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, in pte_to_hmm_pfn_flags() argument
234 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pte() local
256 if (hmm_is_device_private_entry(range, entry)) { in hmm_vma_handle_pte()
287 cpu_flags = pte_to_hmm_pfn_flags(range, pte); in hmm_vma_handle_pte()
324 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pmd() local
326 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; in hmm_vma_walk_pmd()
343 return hmm_pfns_fill(start, end, range, 0); in hmm_vma_walk_pmd()
349 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_pmd()
379 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_pmd()
398 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, in pud_to_hmm_pfn_flags() argument
412 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pud() local
441 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pud()
443 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_pud()
445 cpu_flags = pud_to_hmm_pfn_flags(range, pud); in hmm_vma_walk_pud()
477 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hugetlb_entry() local
488 i = (start - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hugetlb_entry()
489 pfn_req_flags = range->hmm_pfns[i]; in hmm_vma_walk_hugetlb_entry()
490 cpu_flags = pte_to_hmm_pfn_flags(range, entry) | in hmm_vma_walk_hugetlb_entry()
501 range->hmm_pfns[i] = pfn | cpu_flags; in hmm_vma_walk_hugetlb_entry()
514 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_test() local
529 * If a fault is requested for an unsupported range then it is a hard in hmm_vma_walk_test()
533 range->hmm_pfns + in hmm_vma_walk_test()
534 ((start - range->start) >> PAGE_SHIFT), in hmm_vma_walk_test()
538 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_test()
553 * hmm_range_fault - try to fault some address in a virtual address range
554 * @range: argument structure
561 * -EPERM: Invalid permission (e.g., asking for write and range is read
563 * -EBUSY: The range has been invalidated and the caller needs to wait for
571 int hmm_range_fault(struct hmm_range *range) in hmm_range_fault() argument
574 .range = range, in hmm_range_fault()
575 .last = range->start, in hmm_range_fault()
577 struct mm_struct *mm = range->notifier->mm; in hmm_range_fault()
583 /* If range is no longer valid force retry. */ in hmm_range_fault()
584 if (mmu_interval_check_retry(range->notifier, in hmm_range_fault()
585 range->notifier_seq)) in hmm_range_fault()
587 ret = walk_page_range(mm, hmm_vma_walk.last, range->end, in hmm_range_fault()