• Home
  • Raw
  • Download

Lines Matching refs:range

207 static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)  in dmirror_do_fault()  argument
209 unsigned long *pfns = range->hmm_pfns; in dmirror_do_fault()
212 for (pfn = (range->start >> PAGE_SHIFT); in dmirror_do_fault()
213 pfn < (range->end >> PAGE_SHIFT); in dmirror_do_fault()
231 else if (WARN_ON(range->default_flags & HMM_PFN_WRITE)) in dmirror_do_fault()
258 const struct mmu_notifier_range *range, in dmirror_interval_invalidate() argument
267 if (range->event == MMU_NOTIFY_MIGRATE && in dmirror_interval_invalidate()
268 range->owner == dmirror->mdevice) in dmirror_interval_invalidate()
271 if (mmu_notifier_range_blockable(range)) in dmirror_interval_invalidate()
277 dmirror_do_update(dmirror, range->start, range->end); in dmirror_interval_invalidate()
288 struct hmm_range *range) in dmirror_range_fault() argument
301 range->notifier_seq = mmu_interval_read_begin(range->notifier); in dmirror_range_fault()
303 ret = hmm_range_fault(range); in dmirror_range_fault()
312 if (mmu_interval_read_retry(range->notifier, in dmirror_range_fault()
313 range->notifier_seq)) { in dmirror_range_fault()
320 ret = dmirror_do_fault(dmirror, range); in dmirror_range_fault()
333 struct hmm_range range = { in dmirror_fault() local
347 for (addr = start; addr < end; addr = range.end) { in dmirror_fault()
348 range.start = addr; in dmirror_fault()
349 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault()
351 ret = dmirror_range_fault(dmirror, &range); in dmirror_fault()
514 devmem->pagemap.range.start = res->start; in dmirror_allocate_chunk()
515 devmem->pagemap.range.end = res->end; in dmirror_allocate_chunk()
519 devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ? in dmirror_allocate_chunk()
522 devmem->pagemap.range.end = devmem->pagemap.range.start + in dmirror_allocate_chunk()
561 pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; in dmirror_allocate_chunk()
562 pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT); in dmirror_allocate_chunk()
592 release_mem_region(devmem->pagemap.range.start, in dmirror_allocate_chunk()
593 range_len(&devmem->pagemap.range)); in dmirror_allocate_chunk()
1039 static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range, in dmirror_mkentry() argument
1081 const struct mmu_notifier_range *range, in dmirror_snapshot_invalidate() argument
1088 if (mmu_notifier_range_blockable(range)) in dmirror_snapshot_invalidate()
1108 struct hmm_range *range, in dmirror_range_snapshot() argument
1120 range->notifier = &notifier.notifier; in dmirror_range_snapshot()
1122 ret = mmu_interval_notifier_insert(range->notifier, mm, in dmirror_range_snapshot()
1123 range->start, range->end - range->start, in dmirror_range_snapshot()
1134 range->notifier_seq = mmu_interval_read_begin(range->notifier); in dmirror_range_snapshot()
1137 ret = hmm_range_fault(range); in dmirror_range_snapshot()
1146 if (mmu_interval_read_retry(range->notifier, in dmirror_range_snapshot()
1147 range->notifier_seq)) { in dmirror_range_snapshot()
1154 n = (range->end - range->start) >> PAGE_SHIFT; in dmirror_range_snapshot()
1156 dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]); in dmirror_range_snapshot()
1160 mmu_interval_notifier_remove(range->notifier); in dmirror_range_snapshot()
1175 struct hmm_range range = { in dmirror_snapshot() local
1199 range.start = addr; in dmirror_snapshot()
1200 range.end = next; in dmirror_snapshot()
1202 ret = dmirror_range_snapshot(dmirror, &range, perm); in dmirror_snapshot()
1206 n = (range.end - range.start) >> PAGE_SHIFT; in dmirror_snapshot()
1222 unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT; in dmirror_device_evict_chunk()
1223 unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT; in dmirror_device_evict_chunk()
1286 release_mem_region(devmem->pagemap.range.start, in dmirror_device_remove_chunks()
1287 range_len(&devmem->pagemap.range)); in dmirror_device_remove_chunks()