Lines Matching full:range
47 * @ranges: list of range being snapshotted
135 struct hmm_range *range; in hmm_invalidate_range() local
138 list_for_each_entry(range, &hmm->ranges, list) { in hmm_invalidate_range()
141 if (end < range->start || start >= range->end) in hmm_invalidate_range()
144 range->valid = false; in hmm_invalidate_range()
145 addr = max(start, range->start); in hmm_invalidate_range()
146 idx = (addr - range->start) >> PAGE_SHIFT; in hmm_invalidate_range()
147 npages = (min(range->end, end) - addr) >> PAGE_SHIFT; in hmm_invalidate_range()
148 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); in hmm_invalidate_range()
298 struct hmm_range *range; member
309 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_do_fault() local
319 *pfn = range->values[HMM_PFN_ERROR]; in hmm_vma_do_fault()
331 struct hmm_range *range = hmm_vma_walk->range; in hmm_pfns_bad() local
332 uint64_t *pfns = range->pfns; in hmm_pfns_bad()
335 i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_bad()
337 pfns[i] = range->values[HMM_PFN_ERROR]; in hmm_pfns_bad()
343 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
344 * @start: range virtual start address (inclusive)
345 * @end: range virtual end address (exclusive)
352 * or whenever there is no page directory covering the virtual address range.
359 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole_() local
360 uint64_t *pfns = range->pfns; in hmm_vma_walk_hole_()
364 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole_()
366 pfns[i] = range->values[HMM_PFN_NONE]; in hmm_vma_walk_hole_()
384 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local
391 if (!(pfns & range->flags[HMM_PFN_VALID])) in hmm_pte_need_fault()
394 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { in hmm_pte_need_fault()
396 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { in hmm_pte_need_fault()
397 *write_fault = pfns & range->flags[HMM_PFN_WRITE]; in hmm_pte_need_fault()
404 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); in hmm_pte_need_fault()
406 if ((pfns & range->flags[HMM_PFN_WRITE]) && in hmm_pte_need_fault()
407 !(cpu_flags & range->flags[HMM_PFN_WRITE])) { in hmm_pte_need_fault()
437 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local
442 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
444 pfns = &range->pfns[i]; in hmm_vma_walk_hole()
450 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) in pmd_to_hmm_pfn_flags() argument
454 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | in pmd_to_hmm_pfn_flags()
455 range->flags[HMM_PFN_WRITE] : in pmd_to_hmm_pfn_flags()
456 range->flags[HMM_PFN_VALID]; in pmd_to_hmm_pfn_flags()
466 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pmd() local
472 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); in hmm_vma_handle_pmd()
481 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; in hmm_vma_handle_pmd()
486 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) in pte_to_hmm_pfn_flags() argument
490 return pte_write(pte) ? range->flags[HMM_PFN_VALID] | in pte_to_hmm_pfn_flags()
491 range->flags[HMM_PFN_WRITE] : in pte_to_hmm_pfn_flags()
492 range->flags[HMM_PFN_VALID]; in pte_to_hmm_pfn_flags()
500 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pte() local
507 *pfn = range->values[HMM_PFN_NONE]; in hmm_vma_handle_pte()
508 cpu_flags = pte_to_hmm_pfn_flags(range, pte); in hmm_vma_handle_pte()
532 cpu_flags = range->flags[HMM_PFN_VALID] | in hmm_vma_handle_pte()
533 range->flags[HMM_PFN_DEVICE_PRIVATE]; in hmm_vma_handle_pte()
535 range->flags[HMM_PFN_WRITE] : 0; in hmm_vma_handle_pte()
540 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); in hmm_vma_handle_pte()
557 *pfn = range->values[HMM_PFN_ERROR]; in hmm_vma_handle_pte()
564 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; in hmm_vma_handle_pte()
579 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pmd() local
580 uint64_t *pfns = range->pfns; in hmm_vma_walk_pmd()
584 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pmd()
590 if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB)) in hmm_vma_walk_pmd()
633 static void hmm_pfns_clear(struct hmm_range *range, in hmm_pfns_clear() argument
639 *pfns = range->values[HMM_PFN_NONE]; in hmm_pfns_clear()
642 static void hmm_pfns_special(struct hmm_range *range) in hmm_pfns_special() argument
644 unsigned long addr = range->start, i = 0; in hmm_pfns_special()
646 for (; addr < range->end; addr += PAGE_SIZE, i++) in hmm_pfns_special()
647 range->pfns[i] = range->values[HMM_PFN_SPECIAL]; in hmm_pfns_special()
651 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
652 * @range: range being snapshotted
656 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
657 * validity is tracked by range struct. See hmm_vma_range_done() for further
660 * The range struct is initialized here. It tracks the CPU page table, but only
662 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
667 int hmm_vma_get_pfns(struct hmm_range *range) in hmm_vma_get_pfns() argument
669 struct vm_area_struct *vma = range->vma; in hmm_vma_get_pfns()
675 if (range->start < vma->vm_start || range->start >= vma->vm_end) in hmm_vma_get_pfns()
677 if (range->end < vma->vm_start || range->end > vma->vm_end) in hmm_vma_get_pfns()
690 hmm_pfns_special(range); in hmm_vma_get_pfns()
701 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_get_pfns()
705 /* Initialize range to track CPU page table update */ in hmm_vma_get_pfns()
707 range->valid = true; in hmm_vma_get_pfns()
708 list_add_rcu(&range->list, &hmm->ranges); in hmm_vma_get_pfns()
712 hmm_vma_walk.range = range; in hmm_vma_get_pfns()
723 walk_page_range(range->start, range->end, &mm_walk); in hmm_vma_get_pfns()
729 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
730 * @range: range being tracked
731 * Returns: false if range data has been invalidated, true otherwise
733 * Range struct is used to track updates to the CPU page table after a call to
744 * to be used while trying to duplicate CPU page table contents for a range of
749 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
752 * if (!hmm_vma_range_done(range)) {
760 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
762 * hmm_vma_range_done(range);
763 * device_update_page_table(range->pfns);
766 bool hmm_vma_range_done(struct hmm_range *range) in hmm_vma_range_done() argument
768 unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; in hmm_vma_range_done()
771 if (range->end <= range->start) { in hmm_vma_range_done()
776 hmm = hmm_register(range->vma->vm_mm); in hmm_vma_range_done()
778 memset(range->pfns, 0, sizeof(*range->pfns) * npages); in hmm_vma_range_done()
783 list_del_rcu(&range->list); in hmm_vma_range_done()
786 return range->valid; in hmm_vma_range_done()
791 * hmm_vma_fault() - try to fault some address in a virtual address range
792 * @range: range being faulted
799 * On error, for one virtual address in the range, the function will mark the
807 * ret = hmm_vma_fault(range, write, block);
810 * hmm_vma_range_done(range);
827 * hmm_vma_range_done(range);
833 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
837 int hmm_vma_fault(struct hmm_range *range, bool block) in hmm_vma_fault() argument
839 struct vm_area_struct *vma = range->vma; in hmm_vma_fault()
840 unsigned long start = range->start; in hmm_vma_fault()
847 if (range->start < vma->vm_start || range->start >= vma->vm_end) in hmm_vma_fault()
849 if (range->end < vma->vm_start || range->end > vma->vm_end) in hmm_vma_fault()
854 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_fault()
864 hmm_pfns_special(range); in hmm_vma_fault()
875 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_fault()
879 /* Initialize range to track CPU page table update */ in hmm_vma_fault()
881 range->valid = true; in hmm_vma_fault()
882 list_add_rcu(&range->list, &hmm->ranges); in hmm_vma_fault()
887 hmm_vma_walk.range = range; in hmm_vma_fault()
889 hmm_vma_walk.last = range->start; in hmm_vma_fault()
900 ret = walk_page_range(start, range->end, &mm_walk); in hmm_vma_fault()
907 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; in hmm_vma_fault()
908 hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last, in hmm_vma_fault()
909 range->end); in hmm_vma_fault()
910 hmm_vma_range_done(range); in hmm_vma_fault()
1134 * This function first finds an empty range of physical address big enough to
1180 * range in hmm_devmem_add()