Home
last modified time | relevance | path

Searched refs:range (Results 1 – 18 of 18) sorted by relevance

/mm/
Dmemremap.c66 static void pgmap_array_delete(struct range *range) in pgmap_array_delete() argument
68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), in pgmap_array_delete()
75 struct range *range = &pgmap->ranges[range_id]; in pfn_first() local
76 unsigned long pfn = PHYS_PFN(range->start); in pfn_first()
88 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid() local
90 if (pfn >= PHYS_PFN(range->start) && in pgmap_pfn_valid()
91 pfn <= PHYS_PFN(range->end)) in pgmap_pfn_valid()
100 const struct range *range = &pgmap->ranges[range_id]; in pfn_end() local
102 return (range->start + range_len(range)) >> PAGE_SHIFT; in pfn_end()
141 struct range *range = &pgmap->ranges[range_id]; in pageunmap_range() local
[all …]
Dhmm.c30 struct hmm_range *range; member
41 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument
43 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill()
46 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill()
88 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local
100 pfn_req_flags &= range->pfn_flags_mask; in hmm_pte_need_fault()
101 pfn_req_flags |= range->default_flags; in hmm_pte_need_fault()
123 struct hmm_range *range = hmm_vma_walk->range; in hmm_range_need_fault() local
132 if (!((range->default_flags | range->pfn_flags_mask) & in hmm_range_need_fault()
149 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local
[all …]
Dmmu_notifier.c101 const struct mmu_notifier_range *range, in mn_itree_inv_start_range() argument
109 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
110 range->end - 1); in mn_itree_inv_start_range()
124 const struct mmu_notifier_range *range) in mn_itree_inv_next() argument
129 range->start, range->end - 1); in mn_itree_inv_next()
270 struct mmu_notifier_range range = { in mn_itree_release() local
282 mn_itree_inv_start_range(subscriptions, &range, &cur_seq); in mn_itree_release()
284 interval_sub = mn_itree_inv_next(interval_sub, &range)) { in mn_itree_release()
285 ret = interval_sub->ops->invalidate(interval_sub, &range, in mn_itree_release()
451 const struct mmu_notifier_range *range) in mn_itree_invalidate() argument
[all …]
Dptdump.c145 const struct ptdump_range *range = st->range; in ptdump_walk_pgd() local
148 while (range->start != range->end) { in ptdump_walk_pgd()
149 walk_page_range_novma(mm, range->start, range->end, in ptdump_walk_pgd()
151 range++; in ptdump_walk_pgd()
Dmadvise.c742 struct mmu_notifier_range range; in madvise_free_single_vma() local
749 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma()
750 if (range.start >= vma->vm_end) in madvise_free_single_vma()
752 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma()
753 if (range.end <= vma->vm_start) in madvise_free_single_vma()
755 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in madvise_free_single_vma()
756 range.start, range.end); in madvise_free_single_vma()
759 tlb_gather_mmu(&tlb, mm, range.start, range.end); in madvise_free_single_vma()
762 mmu_notifier_invalidate_range_start(&range); in madvise_free_single_vma()
764 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
[all …]
Dmapping_dirty_helpers.c17 struct mmu_notifier_range range; member
188 mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0, in wp_clean_pre_vma()
190 mmu_notifier_invalidate_range_start(&wpwalk->range); in wp_clean_pre_vma()
214 flush_tlb_range(walk->vma, wpwalk->range.start, in wp_clean_post_vma()
215 wpwalk->range.end); in wp_clean_post_vma()
220 mmu_notifier_invalidate_range_end(&wpwalk->range); in wp_clean_post_vma()
Doom_kill.c582 struct mmu_notifier_range range; in __oom_reap_task_mm() local
585 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, in __oom_reap_task_mm()
588 tlb_gather_mmu(&tlb, mm, range.start, range.end); in __oom_reap_task_mm()
589 if (mmu_notifier_invalidate_range_start_nonblock(&range)) { in __oom_reap_task_mm()
590 tlb_finish_mmu(&tlb, range.start, range.end); in __oom_reap_task_mm()
594 unmap_page_range(&tlb, vma, range.start, range.end, NULL); in __oom_reap_task_mm()
595 mmu_notifier_invalidate_range_end(&range); in __oom_reap_task_mm()
596 tlb_finish_mmu(&tlb, range.start, range.end); in __oom_reap_task_mm()
Drmap.c929 struct mmu_notifier_range range; in page_mkclean_one() local
936 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, in page_mkclean_one()
939 mmu_notifier_invalidate_range_start(&range); in page_mkclean_one()
989 mmu_notifier_invalidate_range_end(&range); in page_mkclean_one()
1460 struct mmu_notifier_range range; in try_to_unmap_one() local
1493 range.end = PageKsm(page) ? in try_to_unmap_one()
1495 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in try_to_unmap_one()
1496 address, range.end); in try_to_unmap_one()
1502 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_unmap_one()
1503 &range.end); in try_to_unmap_one()
[all …]
Dmprotect.c220 struct mmu_notifier_range range; in change_pmd_range() local
222 range.start = 0; in change_pmd_range()
243 if (!range.start) { in change_pmd_range()
244 mmu_notifier_range_init(&range, in change_pmd_range()
247 mmu_notifier_invalidate_range_start(&range); in change_pmd_range()
276 if (range.start) in change_pmd_range()
277 mmu_notifier_invalidate_range_end(&range); in change_pmd_range()
Dmemory.c1196 struct mmu_notifier_range range; in copy_page_range() local
1232 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, in copy_page_range()
1234 mmu_notifier_invalidate_range_start(&range); in copy_page_range()
1262 mmu_notifier_invalidate_range_end(&range); in copy_page_range()
1596 struct mmu_notifier_range range; in unmap_vmas() local
1598 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in unmap_vmas()
1600 mmu_notifier_invalidate_range_start(&range); in unmap_vmas()
1603 mmu_notifier_invalidate_range_end(&range); in unmap_vmas()
1617 struct mmu_notifier_range range; in zap_page_range() local
1621 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range()
[all …]
Dutil.c352 unsigned long randomize_page(unsigned long start, unsigned long range) in randomize_page() argument
355 range -= PAGE_ALIGN(start) - start; in randomize_page()
359 if (start > ULONG_MAX - range) in randomize_page()
360 range = ULONG_MAX - start; in randomize_page()
362 range >>= PAGE_SHIFT; in randomize_page()
364 if (range == 0) in randomize_page()
367 return start + (get_random_long() % range << PAGE_SHIFT); in randomize_page()
Dhugetlb.c3825 struct mmu_notifier_range range; in copy_hugetlb_page_range() local
3831 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src, in copy_hugetlb_page_range()
3834 mmu_notifier_invalidate_range_start(&range); in copy_hugetlb_page_range()
3919 mmu_notifier_invalidate_range_end(&range); in copy_hugetlb_page_range()
3938 struct mmu_notifier_range range; in __unmap_hugepage_range() local
3955 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, in __unmap_hugepage_range()
3957 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); in __unmap_hugepage_range()
3958 mmu_notifier_invalidate_range_start(&range); in __unmap_hugepage_range()
4024 mmu_notifier_invalidate_range_end(&range); in __unmap_hugepage_range()
4159 struct mmu_notifier_range range; in hugetlb_cow() local
[all …]
Dkhugepaged.c1076 struct mmu_notifier_range range; in collapse_huge_page() local
1145 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, in collapse_huge_page()
1147 mmu_notifier_invalidate_range_start(&range); in collapse_huge_page()
1163 mmu_notifier_invalidate_range_end(&range); in collapse_huge_page()
1456 struct mmu_notifier_range range; in collapse_pte_mapped_thp() local
1548 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr, in collapse_pte_mapped_thp()
1550 mmu_notifier_invalidate_range_start(&range); in collapse_pte_mapped_thp()
1555 mmu_notifier_invalidate_range_end(&range); in collapse_pte_mapped_thp()
1643 struct mmu_notifier_range range; in retract_page_tables() local
1646 mmu_notifier_range_init(&range, in retract_page_tables()
[all …]
Dmremap.c467 struct mmu_notifier_range range; in move_page_tables() local
476 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in move_page_tables()
478 mmu_notifier_invalidate_range_start(&range); in move_page_tables()
534 mmu_notifier_invalidate_range_end(&range); in move_page_tables()
Dksm.c1044 struct mmu_notifier_range range; in write_protect_page() local
1052 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in write_protect_page()
1055 mmu_notifier_invalidate_range_start(&range); in write_protect_page()
1107 mmu_notifier_invalidate_range_end(&range); in write_protect_page()
1131 struct mmu_notifier_range range; in replace_page() local
1141 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, in replace_page()
1143 mmu_notifier_invalidate_range_start(&range); in replace_page()
1189 mmu_notifier_invalidate_range_end(&range); in replace_page()
Dhuge_memory.c1971 struct mmu_notifier_range range; in __split_huge_pud() local
1973 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pud()
1976 mmu_notifier_invalidate_range_start(&range); in __split_huge_pud()
1980 __split_huge_pud_locked(vma, pud, range.start); in __split_huge_pud()
1988 mmu_notifier_invalidate_range_only_end(&range); in __split_huge_pud()
2226 struct mmu_notifier_range range; in __split_huge_pmd() local
2230 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pmd()
2233 mmu_notifier_invalidate_range_start(&range); in __split_huge_pmd()
2280 __split_huge_pmd_locked(vma, pmd, range.start, freeze); in __split_huge_pmd()
2298 mmu_notifier_invalidate_range_only_end(&range); in __split_huge_pmd()
Dmigrate.c2489 struct mmu_notifier_range range; in migrate_vma_collect() local
2496 mmu_notifier_range_init_migrate(&range, 0, migrate->vma, in migrate_vma_collect()
2499 mmu_notifier_invalidate_range_start(&range); in migrate_vma_collect()
2504 mmu_notifier_invalidate_range_end(&range); in migrate_vma_collect()
2983 struct mmu_notifier_range range; in migrate_vma_pages() local
3004 mmu_notifier_range_init(&range, in migrate_vma_pages()
3009 mmu_notifier_invalidate_range_start(&range); in migrate_vma_pages()
3050 mmu_notifier_invalidate_range_only_end(&range); in migrate_vma_pages()
DKconfig289 the full range of memory available to the CPU. Enabled
733 range 8 2048
804 # Helpers to mirror range of the CPU page tables of a process into device page