Lines Matching refs:range
162 static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range) in dmirror_do_fault() argument
164 unsigned long *pfns = range->hmm_pfns; in dmirror_do_fault()
167 for (pfn = (range->start >> PAGE_SHIFT); in dmirror_do_fault()
168 pfn < (range->end >> PAGE_SHIFT); in dmirror_do_fault()
186 else if (WARN_ON(range->default_flags & HMM_PFN_WRITE)) in dmirror_do_fault()
213 const struct mmu_notifier_range *range, in dmirror_interval_invalidate() argument
222 if (range->event == MMU_NOTIFY_MIGRATE && in dmirror_interval_invalidate()
223 range->owner == dmirror->mdevice) in dmirror_interval_invalidate()
226 if (mmu_notifier_range_blockable(range)) in dmirror_interval_invalidate()
232 dmirror_do_update(dmirror, range->start, range->end); in dmirror_interval_invalidate()
243 struct hmm_range *range) in dmirror_range_fault() argument
256 range->notifier_seq = mmu_interval_read_begin(range->notifier); in dmirror_range_fault()
258 ret = hmm_range_fault(range); in dmirror_range_fault()
267 if (mmu_interval_read_retry(range->notifier, in dmirror_range_fault()
268 range->notifier_seq)) { in dmirror_range_fault()
275 ret = dmirror_do_fault(dmirror, range); in dmirror_range_fault()
288 struct hmm_range range = { in dmirror_fault() local
302 for (addr = start; addr < end; addr = range.end) { in dmirror_fault()
303 range.start = addr; in dmirror_fault()
304 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault()
306 ret = dmirror_range_fault(dmirror, &range); in dmirror_fault()
474 devmem->pagemap.range.start = res->start; in dmirror_allocate_chunk()
475 devmem->pagemap.range.end = res->end; in dmirror_allocate_chunk()
502 pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; in dmirror_allocate_chunk()
503 pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT); in dmirror_allocate_chunk()
532 release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range)); in dmirror_allocate_chunk()
859 static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range, in dmirror_mkentry() argument
895 const struct mmu_notifier_range *range, in dmirror_snapshot_invalidate() argument
902 if (mmu_notifier_range_blockable(range)) in dmirror_snapshot_invalidate()
922 struct hmm_range *range, in dmirror_range_snapshot() argument
934 range->notifier = ¬ifier.notifier; in dmirror_range_snapshot()
936 ret = mmu_interval_notifier_insert(range->notifier, mm, in dmirror_range_snapshot()
937 range->start, range->end - range->start, in dmirror_range_snapshot()
948 range->notifier_seq = mmu_interval_read_begin(range->notifier); in dmirror_range_snapshot()
951 ret = hmm_range_fault(range); in dmirror_range_snapshot()
960 if (mmu_interval_read_retry(range->notifier, in dmirror_range_snapshot()
961 range->notifier_seq)) { in dmirror_range_snapshot()
968 n = (range->end - range->start) >> PAGE_SHIFT; in dmirror_range_snapshot()
970 dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]); in dmirror_range_snapshot()
974 mmu_interval_notifier_remove(range->notifier); in dmirror_range_snapshot()
989 struct hmm_range range = { in dmirror_snapshot() local
1013 range.start = addr; in dmirror_snapshot()
1014 range.end = next; in dmirror_snapshot()
1016 ret = dmirror_range_snapshot(dmirror, &range, perm); in dmirror_snapshot()
1020 n = (range.end - range.start) >> PAGE_SHIFT; in dmirror_snapshot()
1255 release_mem_region(devmem->pagemap.range.start, in dmirror_device_remove()
1256 range_len(&devmem->pagemap.range)); in dmirror_device_remove()