Home
last modified time | relevance | path

Searched refs:range (Results 1 – 19 of 19) sorted by relevance

/mm/
Dmemremap.c66 static void pgmap_array_delete(struct range *range) in pgmap_array_delete() argument
68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), in pgmap_array_delete()
75 struct range *range = &pgmap->ranges[range_id]; in pfn_first() local
76 unsigned long pfn = PHYS_PFN(range->start); in pfn_first()
88 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid() local
90 if (pfn >= PHYS_PFN(range->start) && in pgmap_pfn_valid()
91 pfn <= PHYS_PFN(range->end)) in pgmap_pfn_valid()
100 const struct range *range = &pgmap->ranges[range_id]; in pfn_end() local
102 return (range->start + range_len(range)) >> PAGE_SHIFT; in pfn_end()
141 struct range *range = &pgmap->ranges[range_id]; in pageunmap_range() local
[all …]
Dhmm.c32 struct hmm_range *range; member
43 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument
45 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill()
48 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill()
90 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local
102 pfn_req_flags &= range->pfn_flags_mask; in hmm_pte_need_fault()
103 pfn_req_flags |= range->default_flags; in hmm_pte_need_fault()
125 struct hmm_range *range = hmm_vma_walk->range; in hmm_range_need_fault() local
134 if (!((range->default_flags | range->pfn_flags_mask) & in hmm_range_need_fault()
151 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local
[all …]
Dmmu_notifier.c95 const struct mmu_notifier_range *range, in mn_itree_inv_start_range() argument
103 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
104 range->end - 1); in mn_itree_inv_start_range()
118 const struct mmu_notifier_range *range) in mn_itree_inv_next() argument
123 range->start, range->end - 1); in mn_itree_inv_next()
264 struct mmu_notifier_range range = { in mn_itree_release() local
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq); in mn_itree_release()
278 interval_sub = mn_itree_inv_next(interval_sub, &range)) { in mn_itree_release()
279 ret = interval_sub->ops->invalidate(interval_sub, &range, in mn_itree_release()
445 const struct mmu_notifier_range *range) in mn_itree_invalidate() argument
[all …]
Drmap.c944 struct mmu_notifier_range range; in page_mkclean_one() local
951 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, in page_mkclean_one()
954 mmu_notifier_invalidate_range_start(&range); in page_mkclean_one()
1004 mmu_notifier_invalidate_range_end(&range); in page_mkclean_one()
1491 struct mmu_notifier_range range; in try_to_unmap_one() local
1514 range.end = PageKsm(page) ? in try_to_unmap_one()
1516 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in try_to_unmap_one()
1517 address, range.end); in try_to_unmap_one()
1523 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_unmap_one()
1524 &range.end); in try_to_unmap_one()
[all …]
Dptdump.c145 const struct ptdump_range *range = st->range; in ptdump_walk_pgd() local
148 while (range->start != range->end) { in ptdump_walk_pgd()
149 walk_page_range_novma(mm, range->start, range->end, in ptdump_walk_pgd()
151 range++; in ptdump_walk_pgd()
Dmapping_dirty_helpers.c17 struct mmu_notifier_range range; member
192 mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0, in wp_clean_pre_vma()
194 mmu_notifier_invalidate_range_start(&wpwalk->range); in wp_clean_pre_vma()
218 flush_tlb_range(walk->vma, wpwalk->range.start, in wp_clean_post_vma()
219 wpwalk->range.end); in wp_clean_post_vma()
224 mmu_notifier_invalidate_range_end(&wpwalk->range); in wp_clean_post_vma()
Dmadvise.c742 struct mmu_notifier_range range; in madvise_free_single_vma() local
749 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma()
750 if (range.start >= vma->vm_end) in madvise_free_single_vma()
752 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma()
753 if (range.end <= vma->vm_start) in madvise_free_single_vma()
755 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in madvise_free_single_vma()
756 range.start, range.end); in madvise_free_single_vma()
762 mmu_notifier_invalidate_range_start(&range); in madvise_free_single_vma()
764 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
767 mmu_notifier_invalidate_range_end(&range); in madvise_free_single_vma()
Dmprotect.c235 struct mmu_notifier_range range; in change_pmd_range() local
237 range.start = 0; in change_pmd_range()
258 if (!range.start) { in change_pmd_range()
259 mmu_notifier_range_init(&range, in change_pmd_range()
262 mmu_notifier_invalidate_range_start(&range); in change_pmd_range()
295 if (range.start) in change_pmd_range()
296 mmu_notifier_invalidate_range_end(&range); in change_pmd_range()
Dmemory.c1292 struct mmu_notifier_range range; in copy_page_range() local
1328 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, in copy_page_range()
1330 mmu_notifier_invalidate_range_start(&range); in copy_page_range()
1358 mmu_notifier_invalidate_range_end(&range); in copy_page_range()
1695 struct mmu_notifier_range range; in unmap_vmas() local
1697 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in unmap_vmas()
1699 mmu_notifier_invalidate_range_start(&range); in unmap_vmas()
1702 mmu_notifier_invalidate_range_end(&range); in unmap_vmas()
1716 struct mmu_notifier_range range; in zap_page_range() local
1720 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range()
[all …]
Dutil.c364 unsigned long randomize_page(unsigned long start, unsigned long range) in randomize_page() argument
367 range -= PAGE_ALIGN(start) - start; in randomize_page()
371 if (start > ULONG_MAX - range) in randomize_page()
372 range = ULONG_MAX - start; in randomize_page()
374 range >>= PAGE_SHIFT; in randomize_page()
376 if (range == 0) in randomize_page()
379 return start + (get_random_long() % range << PAGE_SHIFT); in randomize_page()
Dhugetlb.c4301 struct mmu_notifier_range range; in copy_hugetlb_page_range() local
4305 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src, in copy_hugetlb_page_range()
4308 mmu_notifier_invalidate_range_start(&range); in copy_hugetlb_page_range()
4442 mmu_notifier_invalidate_range_end(&range); in copy_hugetlb_page_range()
4461 struct mmu_notifier_range range; in __unmap_hugepage_range() local
4478 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, in __unmap_hugepage_range()
4480 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); in __unmap_hugepage_range()
4481 mmu_notifier_invalidate_range_start(&range); in __unmap_hugepage_range()
4547 mmu_notifier_invalidate_range_end(&range); in __unmap_hugepage_range()
4668 struct mmu_notifier_range range; in hugetlb_cow() local
[all …]
Dkhugepaged.c1076 struct mmu_notifier_range range; in collapse_huge_page() local
1144 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, in collapse_huge_page()
1146 mmu_notifier_invalidate_range_start(&range); in collapse_huge_page()
1162 mmu_notifier_invalidate_range_end(&range); in collapse_huge_page()
1450 struct mmu_notifier_range range; in collapse_pte_mapped_thp() local
1540 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr, in collapse_pte_mapped_thp()
1542 mmu_notifier_invalidate_range_start(&range); in collapse_pte_mapped_thp()
1546 mmu_notifier_invalidate_range_end(&range); in collapse_pte_mapped_thp()
1632 struct mmu_notifier_range range; in retract_page_tables() local
1634 mmu_notifier_range_init(&range, in retract_page_tables()
[all …]
Doom_kill.c547 struct mmu_notifier_range range; in __oom_reap_task_mm() local
550 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, in __oom_reap_task_mm()
554 if (mmu_notifier_invalidate_range_start_nonblock(&range)) { in __oom_reap_task_mm()
559 unmap_page_range(&tlb, vma, range.start, range.end, NULL); in __oom_reap_task_mm()
560 mmu_notifier_invalidate_range_end(&range); in __oom_reap_task_mm()
Dmremap.c536 struct mmu_notifier_range range; in move_page_tables() local
546 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in move_page_tables()
548 mmu_notifier_invalidate_range_start(&range); in move_page_tables()
611 mmu_notifier_invalidate_range_end(&range); in move_page_tables()
Dmigrate.c2474 struct mmu_notifier_range range; in migrate_vma_collect() local
2481 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, in migrate_vma_collect()
2484 mmu_notifier_invalidate_range_start(&range); in migrate_vma_collect()
2489 mmu_notifier_invalidate_range_end(&range); in migrate_vma_collect()
2971 struct mmu_notifier_range range; in migrate_vma_pages() local
2992 mmu_notifier_range_init_owner(&range, in migrate_vma_pages()
2996 mmu_notifier_invalidate_range_start(&range); in migrate_vma_pages()
3036 mmu_notifier_invalidate_range_only_end(&range); in migrate_vma_pages()
Dksm.c1037 struct mmu_notifier_range range; in write_protect_page() local
1045 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in write_protect_page()
1048 mmu_notifier_invalidate_range_start(&range); in write_protect_page()
1100 mmu_notifier_invalidate_range_end(&range); in write_protect_page()
1124 struct mmu_notifier_range range; in replace_page() local
1134 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, in replace_page()
1136 mmu_notifier_invalidate_range_start(&range); in replace_page()
1182 mmu_notifier_invalidate_range_end(&range); in replace_page()
Dhuge_memory.c1910 struct mmu_notifier_range range; in __split_huge_pud() local
1912 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pud()
1915 mmu_notifier_invalidate_range_start(&range); in __split_huge_pud()
1919 __split_huge_pud_locked(vma, pud, range.start); in __split_huge_pud()
1927 mmu_notifier_invalidate_range_only_end(&range); in __split_huge_pud()
2171 struct mmu_notifier_range range; in __split_huge_pmd() local
2175 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pmd()
2178 mmu_notifier_invalidate_range_start(&range); in __split_huge_pmd()
2225 __split_huge_pmd_locked(vma, pmd, range.start, freeze); in __split_huge_pmd()
2243 mmu_notifier_invalidate_range_only_end(&range); in __split_huge_pmd()
Dmemory_hotplug.c1605 struct range __weak arch_get_mappable_range(void) in arch_get_mappable_range()
1607 struct range mhp_range = { in arch_get_mappable_range()
1614 struct range mhp_get_pluggable_range(bool need_mapping) in mhp_get_pluggable_range()
1617 struct range mhp_range; in mhp_get_pluggable_range()
1636 struct range mhp_range = mhp_get_pluggable_range(need_mapping); in mhp_range_allowed()
DKconfig273 Enable bounce buffers for devices that cannot access the full range of
718 range 8 2048
802 # Helpers to mirror range of the CPU page tables of a process into device page
848 range of user-space addresses. These pages are either pinned via