1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2013 Red Hat Inc.
4 *
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
6 */
7 /*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
28
29 struct hmm_vma_walk {
30 struct hmm_range *range;
31 unsigned long last;
32 };
33
34 enum {
35 HMM_NEED_FAULT = 1 << 0,
36 HMM_NEED_WRITE_FAULT = 1 << 1,
37 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
38 };
39
hmm_pfns_fill(unsigned long addr,unsigned long end,struct hmm_range * range,unsigned long cpu_flags)40 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
41 struct hmm_range *range, unsigned long cpu_flags)
42 {
43 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
44
45 for (; addr < end; addr += PAGE_SIZE, i++)
46 range->hmm_pfns[i] = cpu_flags;
47 return 0;
48 }
49
50 /*
51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52 * @addr: range virtual start address (inclusive)
53 * @end: range virtual end address (exclusive)
54 * @required_fault: HMM_NEED_* flags
55 * @walk: mm_walk structure
56 * Return: -EBUSY after page fault, or page fault error
57 *
58 * This function will be called whenever pmd_none() or pte_none() returns true,
59 * or whenever there is no page directory covering the virtual address range.
60 */
hmm_vma_fault(unsigned long addr,unsigned long end,unsigned int required_fault,struct mm_walk * walk)61 static int hmm_vma_fault(unsigned long addr, unsigned long end,
62 unsigned int required_fault, struct mm_walk *walk)
63 {
64 struct hmm_vma_walk *hmm_vma_walk = walk->private;
65 struct vm_area_struct *vma = walk->vma;
66 unsigned int fault_flags = FAULT_FLAG_REMOTE;
67
68 WARN_ON_ONCE(!required_fault);
69 hmm_vma_walk->last = addr;
70
71 if (required_fault & HMM_NEED_WRITE_FAULT) {
72 if (!(vma->vm_flags & VM_WRITE))
73 return -EPERM;
74 fault_flags |= FAULT_FLAG_WRITE;
75 }
76
77 for (; addr < end; addr += PAGE_SIZE)
78 if (handle_mm_fault(vma, addr, fault_flags, NULL) &
79 VM_FAULT_ERROR)
80 return -EFAULT;
81 return -EBUSY;
82 }
83
hmm_pte_need_fault(const struct hmm_vma_walk * hmm_vma_walk,unsigned long pfn_req_flags,unsigned long cpu_flags)84 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
85 unsigned long pfn_req_flags,
86 unsigned long cpu_flags)
87 {
88 struct hmm_range *range = hmm_vma_walk->range;
89
90 /*
91 * So we not only consider the individual per page request we also
92 * consider the default flags requested for the range. The API can
93 * be used 2 ways. The first one where the HMM user coalesces
94 * multiple page faults into one request and sets flags per pfn for
95 * those faults. The second one where the HMM user wants to pre-
96 * fault a range with specific flags. For the latter one it is a
97 * waste to have the user pre-fill the pfn arrays with a default
98 * flags value.
99 */
100 pfn_req_flags &= range->pfn_flags_mask;
101 pfn_req_flags |= range->default_flags;
102
103 /* We aren't ask to do anything ... */
104 if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
105 return 0;
106
107 /* Need to write fault ? */
108 if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
109 !(cpu_flags & HMM_PFN_WRITE))
110 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
111
112 /* If CPU page table is not valid then we need to fault */
113 if (!(cpu_flags & HMM_PFN_VALID))
114 return HMM_NEED_FAULT;
115 return 0;
116 }
117
118 static unsigned int
hmm_range_need_fault(const struct hmm_vma_walk * hmm_vma_walk,const unsigned long hmm_pfns[],unsigned long npages,unsigned long cpu_flags)119 hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
120 const unsigned long hmm_pfns[], unsigned long npages,
121 unsigned long cpu_flags)
122 {
123 struct hmm_range *range = hmm_vma_walk->range;
124 unsigned int required_fault = 0;
125 unsigned long i;
126
127 /*
128 * If the default flags do not request to fault pages, and the mask does
129 * not allow for individual pages to be faulted, then
130 * hmm_pte_need_fault() will always return 0.
131 */
132 if (!((range->default_flags | range->pfn_flags_mask) &
133 HMM_PFN_REQ_FAULT))
134 return 0;
135
136 for (i = 0; i < npages; ++i) {
137 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
138 cpu_flags);
139 if (required_fault == HMM_NEED_ALL_BITS)
140 return required_fault;
141 }
142 return required_fault;
143 }
144
hmm_vma_walk_hole(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)145 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
146 __always_unused int depth, struct mm_walk *walk)
147 {
148 struct hmm_vma_walk *hmm_vma_walk = walk->private;
149 struct hmm_range *range = hmm_vma_walk->range;
150 unsigned int required_fault;
151 unsigned long i, npages;
152 unsigned long *hmm_pfns;
153
154 i = (addr - range->start) >> PAGE_SHIFT;
155 npages = (end - addr) >> PAGE_SHIFT;
156 hmm_pfns = &range->hmm_pfns[i];
157 required_fault =
158 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
159 if (!walk->vma) {
160 if (required_fault)
161 return -EFAULT;
162 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
163 }
164 if (required_fault)
165 return hmm_vma_fault(addr, end, required_fault, walk);
166 return hmm_pfns_fill(addr, end, range, 0);
167 }
168
hmm_pfn_flags_order(unsigned long order)169 static inline unsigned long hmm_pfn_flags_order(unsigned long order)
170 {
171 return order << HMM_PFN_ORDER_SHIFT;
172 }
173
pmd_to_hmm_pfn_flags(struct hmm_range * range,pmd_t pmd)174 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
175 pmd_t pmd)
176 {
177 if (pmd_protnone(pmd))
178 return 0;
179 return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
180 HMM_PFN_VALID) |
181 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
182 }
183
184 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
hmm_vma_handle_pmd(struct mm_walk * walk,unsigned long addr,unsigned long end,unsigned long hmm_pfns[],pmd_t pmd)185 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
186 unsigned long end, unsigned long hmm_pfns[],
187 pmd_t pmd)
188 {
189 struct hmm_vma_walk *hmm_vma_walk = walk->private;
190 struct hmm_range *range = hmm_vma_walk->range;
191 unsigned long pfn, npages, i;
192 unsigned int required_fault;
193 unsigned long cpu_flags;
194
195 npages = (end - addr) >> PAGE_SHIFT;
196 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
197 required_fault =
198 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
199 if (required_fault)
200 return hmm_vma_fault(addr, end, required_fault, walk);
201
202 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
203 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
204 hmm_pfns[i] = pfn | cpu_flags;
205 return 0;
206 }
207 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
208 /* stub to allow the code below to compile */
209 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
210 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
211 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
212
hmm_is_device_private_entry(struct hmm_range * range,swp_entry_t entry)213 static inline bool hmm_is_device_private_entry(struct hmm_range *range,
214 swp_entry_t entry)
215 {
216 return is_device_private_entry(entry) &&
217 device_private_entry_to_page(entry)->pgmap->owner ==
218 range->dev_private_owner;
219 }
220
pte_to_hmm_pfn_flags(struct hmm_range * range,pte_t pte)221 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
222 pte_t pte)
223 {
224 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
225 return 0;
226 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
227 }
228
hmm_vma_handle_pte(struct mm_walk * walk,unsigned long addr,unsigned long end,pmd_t * pmdp,pte_t * ptep,unsigned long * hmm_pfn)229 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
230 unsigned long end, pmd_t *pmdp, pte_t *ptep,
231 unsigned long *hmm_pfn)
232 {
233 struct hmm_vma_walk *hmm_vma_walk = walk->private;
234 struct hmm_range *range = hmm_vma_walk->range;
235 unsigned int required_fault;
236 unsigned long cpu_flags;
237 pte_t pte = *ptep;
238 uint64_t pfn_req_flags = *hmm_pfn;
239
240 if (pte_none(pte)) {
241 required_fault =
242 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
243 if (required_fault)
244 goto fault;
245 *hmm_pfn = 0;
246 return 0;
247 }
248
249 if (!pte_present(pte)) {
250 swp_entry_t entry = pte_to_swp_entry(pte);
251
252 /*
253 * Never fault in device private pages, but just report
254 * the PFN even if not present.
255 */
256 if (hmm_is_device_private_entry(range, entry)) {
257 cpu_flags = HMM_PFN_VALID;
258 if (is_write_device_private_entry(entry))
259 cpu_flags |= HMM_PFN_WRITE;
260 *hmm_pfn = device_private_entry_to_pfn(entry) |
261 cpu_flags;
262 return 0;
263 }
264
265 required_fault =
266 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
267 if (!required_fault) {
268 *hmm_pfn = 0;
269 return 0;
270 }
271
272 if (!non_swap_entry(entry))
273 goto fault;
274
275 if (is_migration_entry(entry)) {
276 pte_unmap(ptep);
277 hmm_vma_walk->last = addr;
278 migration_entry_wait(walk->mm, pmdp, addr);
279 return -EBUSY;
280 }
281
282 /* Report error for everything else */
283 pte_unmap(ptep);
284 return -EFAULT;
285 }
286
287 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
288 required_fault =
289 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
290 if (required_fault)
291 goto fault;
292
293 /*
294 * Bypass devmap pte such as DAX page when all pfn requested
295 * flags(pfn_req_flags) are fulfilled.
296 * Since each architecture defines a struct page for the zero page, just
297 * fall through and treat it like a normal page.
298 */
299 if (!vm_normal_page(walk->vma, addr, pte) &&
300 !pte_devmap(pte) &&
301 !is_zero_pfn(pte_pfn(pte))) {
302 if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
303 pte_unmap(ptep);
304 return -EFAULT;
305 }
306 *hmm_pfn = HMM_PFN_ERROR;
307 return 0;
308 }
309
310 *hmm_pfn = pte_pfn(pte) | cpu_flags;
311 return 0;
312
313 fault:
314 pte_unmap(ptep);
315 /* Fault any virtual address we were asked to fault */
316 return hmm_vma_fault(addr, end, required_fault, walk);
317 }
318
hmm_vma_walk_pmd(pmd_t * pmdp,unsigned long start,unsigned long end,struct mm_walk * walk)319 static int hmm_vma_walk_pmd(pmd_t *pmdp,
320 unsigned long start,
321 unsigned long end,
322 struct mm_walk *walk)
323 {
324 struct hmm_vma_walk *hmm_vma_walk = walk->private;
325 struct hmm_range *range = hmm_vma_walk->range;
326 unsigned long *hmm_pfns =
327 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
328 unsigned long npages = (end - start) >> PAGE_SHIFT;
329 unsigned long addr = start;
330 pte_t *ptep;
331 pmd_t pmd;
332
333 again:
334 pmd = READ_ONCE(*pmdp);
335 if (pmd_none(pmd))
336 return hmm_vma_walk_hole(start, end, -1, walk);
337
338 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
339 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
340 hmm_vma_walk->last = addr;
341 pmd_migration_entry_wait(walk->mm, pmdp);
342 return -EBUSY;
343 }
344 return hmm_pfns_fill(start, end, range, 0);
345 }
346
347 if (!pmd_present(pmd)) {
348 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
349 return -EFAULT;
350 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
351 }
352
353 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
354 /*
355 * No need to take pmd_lock here, even if some other thread
356 * is splitting the huge pmd we will get that event through
357 * mmu_notifier callback.
358 *
359 * So just read pmd value and check again it's a transparent
360 * huge or device mapping one and compute corresponding pfn
361 * values.
362 */
363 pmd = pmd_read_atomic(pmdp);
364 barrier();
365 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
366 goto again;
367
368 return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
369 }
370
371 /*
372 * We have handled all the valid cases above ie either none, migration,
373 * huge or transparent huge. At this point either it is a valid pmd
374 * entry pointing to pte directory or it is a bad pmd that will not
375 * recover.
376 */
377 if (pmd_bad(pmd)) {
378 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
379 return -EFAULT;
380 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
381 }
382
383 ptep = pte_offset_map(pmdp, addr);
384 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
385 int r;
386
387 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
388 if (r) {
389 /* hmm_vma_handle_pte() did pte_unmap() */
390 return r;
391 }
392 }
393 pte_unmap(ptep - 1);
394 return 0;
395 }
396
397 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
398 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_to_hmm_pfn_flags(struct hmm_range * range,pud_t pud)399 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
400 pud_t pud)
401 {
402 if (!pud_present(pud))
403 return 0;
404 return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
405 HMM_PFN_VALID) |
406 hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
407 }
408
hmm_vma_walk_pud(pud_t * pudp,unsigned long start,unsigned long end,struct mm_walk * walk)409 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
410 struct mm_walk *walk)
411 {
412 struct hmm_vma_walk *hmm_vma_walk = walk->private;
413 struct hmm_range *range = hmm_vma_walk->range;
414 unsigned long addr = start;
415 pud_t pud;
416 int ret = 0;
417 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
418
419 if (!ptl)
420 return 0;
421
422 /* Normally we don't want to split the huge page */
423 walk->action = ACTION_CONTINUE;
424
425 pud = READ_ONCE(*pudp);
426 if (pud_none(pud)) {
427 spin_unlock(ptl);
428 return hmm_vma_walk_hole(start, end, -1, walk);
429 }
430
431 if (pud_huge(pud) && pud_devmap(pud)) {
432 unsigned long i, npages, pfn;
433 unsigned int required_fault;
434 unsigned long *hmm_pfns;
435 unsigned long cpu_flags;
436
437 if (!pud_present(pud)) {
438 spin_unlock(ptl);
439 return hmm_vma_walk_hole(start, end, -1, walk);
440 }
441
442 i = (addr - range->start) >> PAGE_SHIFT;
443 npages = (end - addr) >> PAGE_SHIFT;
444 hmm_pfns = &range->hmm_pfns[i];
445
446 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
447 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
448 npages, cpu_flags);
449 if (required_fault) {
450 spin_unlock(ptl);
451 return hmm_vma_fault(addr, end, required_fault, walk);
452 }
453
454 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
455 for (i = 0; i < npages; ++i, ++pfn)
456 hmm_pfns[i] = pfn | cpu_flags;
457 goto out_unlock;
458 }
459
460 /* Ask for the PUD to be split */
461 walk->action = ACTION_SUBTREE;
462
463 out_unlock:
464 spin_unlock(ptl);
465 return ret;
466 }
467 #else
468 #define hmm_vma_walk_pud NULL
469 #endif
470
471 #ifdef CONFIG_HUGETLB_PAGE
hmm_vma_walk_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long start,unsigned long end,struct mm_walk * walk)472 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
473 unsigned long start, unsigned long end,
474 struct mm_walk *walk)
475 {
476 unsigned long addr = start, i, pfn;
477 struct hmm_vma_walk *hmm_vma_walk = walk->private;
478 struct hmm_range *range = hmm_vma_walk->range;
479 struct vm_area_struct *vma = walk->vma;
480 unsigned int required_fault;
481 unsigned long pfn_req_flags;
482 unsigned long cpu_flags;
483 spinlock_t *ptl;
484 pte_t entry;
485
486 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
487 entry = huge_ptep_get(pte);
488
489 i = (start - range->start) >> PAGE_SHIFT;
490 pfn_req_flags = range->hmm_pfns[i];
491 cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
492 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
493 required_fault =
494 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
495 if (required_fault) {
496 spin_unlock(ptl);
497 return hmm_vma_fault(addr, end, required_fault, walk);
498 }
499
500 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
501 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
502 range->hmm_pfns[i] = pfn | cpu_flags;
503
504 spin_unlock(ptl);
505 return 0;
506 }
507 #else
508 #define hmm_vma_walk_hugetlb_entry NULL
509 #endif /* CONFIG_HUGETLB_PAGE */
510
hmm_vma_walk_test(unsigned long start,unsigned long end,struct mm_walk * walk)511 static int hmm_vma_walk_test(unsigned long start, unsigned long end,
512 struct mm_walk *walk)
513 {
514 struct hmm_vma_walk *hmm_vma_walk = walk->private;
515 struct hmm_range *range = hmm_vma_walk->range;
516 struct vm_area_struct *vma = walk->vma;
517
518 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
519 vma->vm_flags & VM_READ)
520 return 0;
521
522 /*
523 * vma ranges that don't have struct page backing them or map I/O
524 * devices directly cannot be handled by hmm_range_fault().
525 *
526 * If the vma does not allow read access, then assume that it does not
527 * allow write access either. HMM does not support architectures that
528 * allow write without read.
529 *
530 * If a fault is requested for an unsupported range then it is a hard
531 * failure.
532 */
533 if (hmm_range_need_fault(hmm_vma_walk,
534 range->hmm_pfns +
535 ((start - range->start) >> PAGE_SHIFT),
536 (end - start) >> PAGE_SHIFT, 0))
537 return -EFAULT;
538
539 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
540
541 /* Skip this vma and continue processing the next vma. */
542 return 1;
543 }
544
545 static const struct mm_walk_ops hmm_walk_ops = {
546 .pud_entry = hmm_vma_walk_pud,
547 .pmd_entry = hmm_vma_walk_pmd,
548 .pte_hole = hmm_vma_walk_hole,
549 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
550 .test_walk = hmm_vma_walk_test,
551 };
552
553 /**
554 * hmm_range_fault - try to fault some address in a virtual address range
555 * @range: argument structure
556 *
557 * Returns 0 on success or one of the following error codes:
558 *
559 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
560 * (e.g., device file vma).
561 * -ENOMEM: Out of memory.
562 * -EPERM: Invalid permission (e.g., asking for write and range is read
563 * only).
564 * -EBUSY: The range has been invalidated and the caller needs to wait for
565 * the invalidation to finish.
566 * -EFAULT: A page was requested to be valid and could not be made valid
567 * ie it has no backing VMA or it is illegal to access
568 *
569 * This is similar to get_user_pages(), except that it can read the page tables
570 * without mutating them (ie causing faults).
571 */
hmm_range_fault(struct hmm_range * range)572 int hmm_range_fault(struct hmm_range *range)
573 {
574 struct hmm_vma_walk hmm_vma_walk = {
575 .range = range,
576 .last = range->start,
577 };
578 struct mm_struct *mm = range->notifier->mm;
579 int ret;
580
581 mmap_assert_locked(mm);
582
583 do {
584 /* If range is no longer valid force retry. */
585 if (mmu_interval_check_retry(range->notifier,
586 range->notifier_seq))
587 return -EBUSY;
588 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
589 &hmm_walk_ops, &hmm_vma_walk);
590 /*
591 * When -EBUSY is returned the loop restarts with
592 * hmm_vma_walk.last set to an address that has not been stored
593 * in pfns. All entries < last in the pfn array are set to their
594 * output, and all >= are still at their input values.
595 */
596 } while (ret == -EBUSY);
597 return ret;
598 }
599 EXPORT_SYMBOL(hmm_range_fault);
600