• Home
  • Raw
  • Download

Lines Matching +full:data +full:- +full:mirror

42  * struct hmm - HMM per mm struct
63 * hmm_register - register HMM against an mm (HMM internal)
72 struct hmm *hmm = READ_ONCE(mm->hmm); in hmm_register()
77 * hence we should always have pre-allocated an new hmm struct in hmm_register()
86 INIT_LIST_HEAD(&hmm->mirrors); in hmm_register()
87 init_rwsem(&hmm->mirrors_sem); in hmm_register()
88 atomic_set(&hmm->sequence, 0); in hmm_register()
89 hmm->mmu_notifier.ops = NULL; in hmm_register()
90 INIT_LIST_HEAD(&hmm->ranges); in hmm_register()
91 spin_lock_init(&hmm->lock); in hmm_register()
92 hmm->mm = mm; in hmm_register()
94 spin_lock(&mm->page_table_lock); in hmm_register()
95 if (!mm->hmm) in hmm_register()
96 mm->hmm = hmm; in hmm_register()
99 spin_unlock(&mm->page_table_lock); in hmm_register()
106 * registration of first mirror through hmm_mirror_register() in hmm_register()
108 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; in hmm_register()
109 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) in hmm_register()
112 return mm->hmm; in hmm_register()
115 spin_lock(&mm->page_table_lock); in hmm_register()
116 if (mm->hmm == hmm) in hmm_register()
117 mm->hmm = NULL; in hmm_register()
118 spin_unlock(&mm->page_table_lock); in hmm_register()
126 kfree(mm->hmm); in hmm_mm_destroy()
134 struct hmm_mirror *mirror; in hmm_invalidate_range() local
137 spin_lock(&hmm->lock); in hmm_invalidate_range()
138 list_for_each_entry(range, &hmm->ranges, list) { in hmm_invalidate_range()
141 if (end < range->start || start >= range->end) in hmm_invalidate_range()
144 range->valid = false; in hmm_invalidate_range()
145 addr = max(start, range->start); in hmm_invalidate_range()
146 idx = (addr - range->start) >> PAGE_SHIFT; in hmm_invalidate_range()
147 npages = (min(range->end, end) - addr) >> PAGE_SHIFT; in hmm_invalidate_range()
148 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); in hmm_invalidate_range()
150 spin_unlock(&hmm->lock); in hmm_invalidate_range()
152 down_read(&hmm->mirrors_sem); in hmm_invalidate_range()
153 list_for_each_entry(mirror, &hmm->mirrors, list) in hmm_invalidate_range()
154 mirror->ops->sync_cpu_device_pagetables(mirror, action, in hmm_invalidate_range()
156 up_read(&hmm->mirrors_sem); in hmm_invalidate_range()
161 struct hmm_mirror *mirror; in hmm_release() local
162 struct hmm *hmm = mm->hmm; in hmm_release()
164 down_write(&hmm->mirrors_sem); in hmm_release()
165 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, in hmm_release()
167 while (mirror) { in hmm_release()
168 list_del_init(&mirror->list); in hmm_release()
169 if (mirror->ops->release) { in hmm_release()
175 up_write(&hmm->mirrors_sem); in hmm_release()
176 mirror->ops->release(mirror); in hmm_release()
177 down_write(&hmm->mirrors_sem); in hmm_release()
179 mirror = list_first_entry_or_null(&hmm->mirrors, in hmm_release()
182 up_write(&hmm->mirrors_sem); in hmm_release()
191 struct hmm *hmm = mm->hmm; in hmm_invalidate_range_start()
195 atomic_inc(&hmm->sequence); in hmm_invalidate_range_start()
205 struct hmm *hmm = mm->hmm; in hmm_invalidate_range_end()
209 hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end); in hmm_invalidate_range_end()
219 * hmm_mirror_register() - register a mirror against an mm
221 * @mirror: new mirror struct to register
225 * an HMM mirror struct.
227 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
229 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) in hmm_mirror_register() argument
232 if (!mm || !mirror || !mirror->ops) in hmm_mirror_register()
233 return -EINVAL; in hmm_mirror_register()
236 mirror->hmm = hmm_register(mm); in hmm_mirror_register()
237 if (!mirror->hmm) in hmm_mirror_register()
238 return -ENOMEM; in hmm_mirror_register()
240 down_write(&mirror->hmm->mirrors_sem); in hmm_mirror_register()
241 if (mirror->hmm->mm == NULL) { in hmm_mirror_register()
246 up_write(&mirror->hmm->mirrors_sem); in hmm_mirror_register()
247 mirror->hmm = NULL; in hmm_mirror_register()
250 list_add(&mirror->list, &mirror->hmm->mirrors); in hmm_mirror_register()
251 up_write(&mirror->hmm->mirrors_sem); in hmm_mirror_register()
259 * hmm_mirror_unregister() - unregister a mirror
261 * @mirror: new mirror struct to register
265 void hmm_mirror_unregister(struct hmm_mirror *mirror) in hmm_mirror_unregister() argument
271 if (mirror->hmm == NULL) in hmm_mirror_unregister()
274 hmm = mirror->hmm; in hmm_mirror_unregister()
275 down_write(&hmm->mirrors_sem); in hmm_mirror_unregister()
276 list_del_init(&mirror->list); in hmm_mirror_unregister()
277 should_unregister = list_empty(&hmm->mirrors); in hmm_mirror_unregister()
278 mirror->hmm = NULL; in hmm_mirror_unregister()
279 mm = hmm->mm; in hmm_mirror_unregister()
280 hmm->mm = NULL; in hmm_mirror_unregister()
281 up_write(&hmm->mirrors_sem); in hmm_mirror_unregister()
286 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); in hmm_mirror_unregister()
288 spin_lock(&mm->page_table_lock); in hmm_mirror_unregister()
289 if (mm->hmm == hmm) in hmm_mirror_unregister()
290 mm->hmm = NULL; in hmm_mirror_unregister()
291 spin_unlock(&mm->page_table_lock); in hmm_mirror_unregister()
308 struct hmm_vma_walk *hmm_vma_walk = walk->private; in hmm_vma_do_fault()
309 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_do_fault()
310 struct vm_area_struct *vma = walk->vma; in hmm_vma_do_fault()
313 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; in hmm_vma_do_fault()
317 return -EBUSY; in hmm_vma_do_fault()
319 *pfn = range->values[HMM_PFN_ERROR]; in hmm_vma_do_fault()
320 return -EFAULT; in hmm_vma_do_fault()
323 return -EAGAIN; in hmm_vma_do_fault()
330 struct hmm_vma_walk *hmm_vma_walk = walk->private; in hmm_pfns_bad()
331 struct hmm_range *range = hmm_vma_walk->range; in hmm_pfns_bad()
332 uint64_t *pfns = range->pfns; in hmm_pfns_bad()
335 i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_bad()
337 pfns[i] = range->values[HMM_PFN_ERROR]; in hmm_pfns_bad()
343 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
349 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
358 struct hmm_vma_walk *hmm_vma_walk = walk->private; in hmm_vma_walk_hole_()
359 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole_()
360 uint64_t *pfns = range->pfns; in hmm_vma_walk_hole_()
363 hmm_vma_walk->last = addr; in hmm_vma_walk_hole_()
364 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole_()
366 pfns[i] = range->values[HMM_PFN_NONE]; in hmm_vma_walk_hole_()
372 if (ret != -EAGAIN) in hmm_vma_walk_hole_()
377 return (fault || write_fault) ? -EAGAIN : 0; in hmm_vma_walk_hole_()
384 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault()
387 if (!hmm_vma_walk->fault) in hmm_pte_need_fault()
391 if (!(pfns & range->flags[HMM_PFN_VALID])) in hmm_pte_need_fault()
394 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { in hmm_pte_need_fault()
396 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { in hmm_pte_need_fault()
397 *write_fault = pfns & range->flags[HMM_PFN_WRITE]; in hmm_pte_need_fault()
404 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); in hmm_pte_need_fault()
406 if ((pfns & range->flags[HMM_PFN_WRITE]) && in hmm_pte_need_fault()
407 !(cpu_flags & range->flags[HMM_PFN_WRITE])) { in hmm_pte_need_fault()
420 if (!hmm_vma_walk->fault) { in hmm_range_need_fault()
436 struct hmm_vma_walk *hmm_vma_walk = walk->private; in hmm_vma_walk_hole()
437 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole()
442 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
443 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole()
444 pfns = &range->pfns[i]; in hmm_vma_walk_hole()
454 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | in pmd_to_hmm_pfn_flags()
455 range->flags[HMM_PFN_WRITE] : in pmd_to_hmm_pfn_flags()
456 range->flags[HMM_PFN_VALID]; in pmd_to_hmm_pfn_flags()
465 struct hmm_vma_walk *hmm_vma_walk = walk->private; in hmm_vma_handle_pmd()
466 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pmd()
471 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd()
482 hmm_vma_walk->last = end; in hmm_vma_handle_pmd()
490 return pte_write(pte) ? range->flags[HMM_PFN_VALID] | in pte_to_hmm_pfn_flags()
491 range->flags[HMM_PFN_WRITE] : in pte_to_hmm_pfn_flags()
492 range->flags[HMM_PFN_VALID]; in pte_to_hmm_pfn_flags()
499 struct hmm_vma_walk *hmm_vma_walk = walk->private; in hmm_vma_handle_pte()
500 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pte()
501 struct vm_area_struct *vma = walk->vma; in hmm_vma_handle_pte()
507 *pfn = range->values[HMM_PFN_NONE]; in hmm_vma_handle_pte()
532 cpu_flags = range->flags[HMM_PFN_VALID] | in hmm_vma_handle_pte()
533 range->flags[HMM_PFN_DEVICE_PRIVATE]; in hmm_vma_handle_pte()
535 range->flags[HMM_PFN_WRITE] : 0; in hmm_vma_handle_pte()
548 hmm_vma_walk->last = addr; in hmm_vma_handle_pte()
549 migration_entry_wait(vma->vm_mm, in hmm_vma_handle_pte()
551 return -EAGAIN; in hmm_vma_handle_pte()
557 *pfn = range->values[HMM_PFN_ERROR]; in hmm_vma_handle_pte()
558 return -EFAULT; in hmm_vma_handle_pte()
578 struct hmm_vma_walk *hmm_vma_walk = walk->private; in hmm_vma_walk_pmd()
579 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pmd()
580 uint64_t *pfns = range->pfns; in hmm_vma_walk_pmd()
584 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pmd()
590 if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB)) in hmm_vma_walk_pmd()
623 hmm_vma_walk->last = addr; in hmm_vma_walk_pmd()
627 pte_unmap(ptep - 1); in hmm_vma_walk_pmd()
629 hmm_vma_walk->last = addr; in hmm_vma_walk_pmd()
639 *pfns = range->values[HMM_PFN_NONE]; in hmm_pfns_clear()
644 unsigned long addr = range->start, i = 0; in hmm_pfns_special()
646 for (; addr < range->end; addr += PAGE_SIZE, i++) in hmm_pfns_special()
647 range->pfns[i] = range->values[HMM_PFN_SPECIAL]; in hmm_pfns_special()
651 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
653 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
669 struct vm_area_struct *vma = range->vma; in hmm_vma_get_pfns()
675 if (range->start < vma->vm_start || range->start >= vma->vm_end) in hmm_vma_get_pfns()
676 return -EINVAL; in hmm_vma_get_pfns()
677 if (range->end < vma->vm_start || range->end > vma->vm_end) in hmm_vma_get_pfns()
678 return -EINVAL; in hmm_vma_get_pfns()
680 hmm = hmm_register(vma->vm_mm); in hmm_vma_get_pfns()
682 return -ENOMEM; in hmm_vma_get_pfns()
683 /* Caller must have registered a mirror, via hmm_mirror_register() ! */ in hmm_vma_get_pfns()
684 if (!hmm->mmu_notifier.ops) in hmm_vma_get_pfns()
685 return -EINVAL; in hmm_vma_get_pfns()
688 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || in hmm_vma_get_pfns()
691 return -EINVAL; in hmm_vma_get_pfns()
694 if (!(vma->vm_flags & VM_READ)) { in hmm_vma_get_pfns()
701 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_get_pfns()
702 return -EPERM; in hmm_vma_get_pfns()
706 spin_lock(&hmm->lock); in hmm_vma_get_pfns()
707 range->valid = true; in hmm_vma_get_pfns()
708 list_add_rcu(&range->list, &hmm->ranges); in hmm_vma_get_pfns()
709 spin_unlock(&hmm->lock); in hmm_vma_get_pfns()
716 mm_walk.mm = vma->vm_mm; in hmm_vma_get_pfns()
723 walk_page_range(range->start, range->end, &mm_walk); in hmm_vma_get_pfns()
729 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
731 * Returns: false if range data has been invalidated, true otherwise
735 * using the data, or wants to lock updates to the data it got from those
763 * device_update_page_table(range->pfns);
768 unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; in hmm_vma_range_done()
771 if (range->end <= range->start) { in hmm_vma_range_done()
776 hmm = hmm_register(range->vma->vm_mm); in hmm_vma_range_done()
778 memset(range->pfns, 0, sizeof(*range->pfns) * npages); in hmm_vma_range_done()
782 spin_lock(&hmm->lock); in hmm_vma_range_done()
783 list_del_rcu(&range->list); in hmm_vma_range_done()
784 spin_unlock(&hmm->lock); in hmm_vma_range_done()
786 return range->valid; in hmm_vma_range_done()
791 * hmm_vma_fault() - try to fault some address in a virtual address range
794 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
804 * down_read(&mm->mmap_sem);
809 * case -EAGAIN:
817 * case -ENOMEM:
818 * case -EINVAL:
819 * case -EPERM:
822 * up_read(&mm->mmap_sem)
830 * up_read(&mm->mmap_sem)
839 struct vm_area_struct *vma = range->vma; in hmm_vma_fault()
840 unsigned long start = range->start; in hmm_vma_fault()
847 if (range->start < vma->vm_start || range->start >= vma->vm_end) in hmm_vma_fault()
848 return -EINVAL; in hmm_vma_fault()
849 if (range->end < vma->vm_start || range->end > vma->vm_end) in hmm_vma_fault()
850 return -EINVAL; in hmm_vma_fault()
852 hmm = hmm_register(vma->vm_mm); in hmm_vma_fault()
854 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_fault()
855 return -ENOMEM; in hmm_vma_fault()
857 /* Caller must have registered a mirror using hmm_mirror_register() */ in hmm_vma_fault()
858 if (!hmm->mmu_notifier.ops) in hmm_vma_fault()
859 return -EINVAL; in hmm_vma_fault()
862 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || in hmm_vma_fault()
865 return -EINVAL; in hmm_vma_fault()
868 if (!(vma->vm_flags & VM_READ)) { in hmm_vma_fault()
875 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_fault()
876 return -EPERM; in hmm_vma_fault()
880 spin_lock(&hmm->lock); in hmm_vma_fault()
881 range->valid = true; in hmm_vma_fault()
882 list_add_rcu(&range->list, &hmm->ranges); in hmm_vma_fault()
883 spin_unlock(&hmm->lock); in hmm_vma_fault()
889 hmm_vma_walk.last = range->start; in hmm_vma_fault()
892 mm_walk.mm = vma->vm_mm; in hmm_vma_fault()
900 ret = walk_page_range(start, range->end, &mm_walk); in hmm_vma_fault()
902 } while (ret == -EAGAIN); in hmm_vma_fault()
907 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; in hmm_vma_fault()
908 hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last, in hmm_vma_fault()
909 range->end); in hmm_vma_fault()
938 complete(&devmem->completion); in hmm_devmem_ref_release()
941 static void hmm_devmem_ref_exit(void *data) in hmm_devmem_ref_exit() argument
943 struct percpu_ref *ref = data; in hmm_devmem_ref_exit()
950 static void hmm_devmem_ref_kill(void *data) in hmm_devmem_ref_kill() argument
952 struct percpu_ref *ref = data; in hmm_devmem_ref_kill()
957 wait_for_completion(&devmem->completion); in hmm_devmem_ref_kill()
966 struct hmm_devmem *devmem = page->pgmap->data; in hmm_devmem_fault()
968 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); in hmm_devmem_fault()
971 static void hmm_devmem_free(struct page *page, void *data) in hmm_devmem_free() argument
973 struct hmm_devmem *devmem = data; in hmm_devmem_free()
975 page->mapping = NULL; in hmm_devmem_free()
977 devmem->ops->free(devmem, page); in hmm_devmem_free()
988 for (key = resource->start; in hmm_devmem_radix_release()
989 key <= resource->end; in hmm_devmem_radix_release()
995 static void hmm_devmem_release(void *data) in hmm_devmem_release() argument
997 struct hmm_devmem *devmem = data; in hmm_devmem_release()
998 struct resource *resource = devmem->resource; in hmm_devmem_release()
1004 start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; in hmm_devmem_release()
1011 if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) in hmm_devmem_release()
1024 struct device *device = devmem->device; in hmm_devmem_pages_create()
1028 align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); in hmm_devmem_pages_create()
1029 align_size = ALIGN(devmem->resource->start + in hmm_devmem_pages_create()
1030 resource_size(devmem->resource), in hmm_devmem_pages_create()
1031 PA_SECTION_SIZE) - align_start; in hmm_devmem_pages_create()
1038 __func__, devmem->resource); in hmm_devmem_pages_create()
1039 return -ENXIO; in hmm_devmem_pages_create()
1042 return -ENXIO; in hmm_devmem_pages_create()
1044 if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY) in hmm_devmem_pages_create()
1045 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; in hmm_devmem_pages_create()
1047 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; in hmm_devmem_pages_create()
1049 devmem->pagemap.res = *devmem->resource; in hmm_devmem_pages_create()
1050 devmem->pagemap.page_fault = hmm_devmem_fault; in hmm_devmem_pages_create()
1051 devmem->pagemap.page_free = hmm_devmem_free; in hmm_devmem_pages_create()
1052 devmem->pagemap.dev = devmem->device; in hmm_devmem_pages_create()
1053 devmem->pagemap.ref = &devmem->ref; in hmm_devmem_pages_create()
1054 devmem->pagemap.data = devmem; in hmm_devmem_pages_create()
1057 align_end = align_start + align_size - 1; in hmm_devmem_pages_create()
1065 __func__, dev_name(dup->device)); in hmm_devmem_pages_create()
1067 ret = -EBUSY; in hmm_devmem_pages_create()
1088 * allocate and initialize struct page for the device memory. More- in hmm_devmem_pages_create()
1089 * over the device memory is un-accessible thus we do not want to in hmm_devmem_pages_create()
1096 if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) in hmm_devmem_pages_create()
1106 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in hmm_devmem_pages_create()
1111 for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { in hmm_devmem_pages_create()
1114 page->pgmap = &devmem->pagemap; in hmm_devmem_pages_create()
1121 hmm_devmem_radix_release(devmem->resource); in hmm_devmem_pages_create()
1127 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1155 return ERR_PTR(-ENOMEM); in hmm_devmem_add()
1157 init_completion(&devmem->completion); in hmm_devmem_add()
1158 devmem->pfn_first = -1UL; in hmm_devmem_add()
1159 devmem->pfn_last = -1UL; in hmm_devmem_add()
1160 devmem->resource = NULL; in hmm_devmem_add()
1161 devmem->device = device; in hmm_devmem_add()
1162 devmem->ops = ops; in hmm_devmem_add()
1164 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, in hmm_devmem_add()
1169 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); in hmm_devmem_add()
1175 (1UL << MAX_PHYSMEM_BITS) - 1); in hmm_devmem_add()
1176 addr = addr - size + 1UL; in hmm_devmem_add()
1184 for (; addr > size && addr >= iomem_resource.start; addr -= size) { in hmm_devmem_add()
1189 devmem->resource = devm_request_mem_region(device, addr, size, in hmm_devmem_add()
1191 if (!devmem->resource) in hmm_devmem_add()
1192 return ERR_PTR(-ENOMEM); in hmm_devmem_add()
1195 if (!devmem->resource) in hmm_devmem_add()
1196 return ERR_PTR(-ERANGE); in hmm_devmem_add()
1198 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; in hmm_devmem_add()
1199 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; in hmm_devmem_add()
1200 devmem->pfn_last = devmem->pfn_first + in hmm_devmem_add()
1201 (resource_size(devmem->resource) >> PAGE_SHIFT); in hmm_devmem_add()
1222 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) in hmm_devmem_add_resource()
1223 return ERR_PTR(-EINVAL); in hmm_devmem_add_resource()
1229 return ERR_PTR(-ENOMEM); in hmm_devmem_add_resource()
1231 init_completion(&devmem->completion); in hmm_devmem_add_resource()
1232 devmem->pfn_first = -1UL; in hmm_devmem_add_resource()
1233 devmem->pfn_last = -1UL; in hmm_devmem_add_resource()
1234 devmem->resource = res; in hmm_devmem_add_resource()
1235 devmem->device = device; in hmm_devmem_add_resource()
1236 devmem->ops = ops; in hmm_devmem_add_resource()
1238 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, in hmm_devmem_add_resource()
1244 &devmem->ref); in hmm_devmem_add_resource()
1248 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; in hmm_devmem_add_resource()
1249 devmem->pfn_last = devmem->pfn_first + in hmm_devmem_add_resource()
1250 (resource_size(devmem->resource) >> PAGE_SHIFT); in hmm_devmem_add_resource()
1261 &devmem->ref); in hmm_devmem_add_resource()
1287 clear_bit(hmm_device->minor, hmm_device_mask); in hmm_device_release()
1299 return ERR_PTR(-ENOMEM); in hmm_device_new()
1302 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); in hmm_device_new()
1303 if (hmm_device->minor >= HMM_DEVICE_MAX) { in hmm_device_new()
1306 return ERR_PTR(-EBUSY); in hmm_device_new()
1308 set_bit(hmm_device->minor, hmm_device_mask); in hmm_device_new()
1311 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); in hmm_device_new()
1312 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), in hmm_device_new()
1313 hmm_device->minor); in hmm_device_new()
1314 hmm_device->device.release = hmm_device_release; in hmm_device_new()
1315 dev_set_drvdata(&hmm_device->device, drvdata); in hmm_device_new()
1316 hmm_device->device.class = hmm_device_class; in hmm_device_new()
1317 device_initialize(&hmm_device->device); in hmm_device_new()
1325 put_device(&hmm_device->device); in hmm_device_put()