/kernel/linux/linux-5.10/mm/ |
D | purgeable.c | 85 struct mm_struct *mm = vma->vm_mm; in lookup_uxpte_page() 157 spin_lock(&vma->vm_mm->uxpgd_lock); in lock_uxpte() 169 spin_unlock(&vma->vm_mm->uxpgd_lock); in lock_uxpte() 178 spin_lock(&vma->vm_mm->uxpgd_lock); in unlock_uxpte() 184 spin_unlock(&vma->vm_mm->uxpgd_lock); in unlock_uxpte() 192 spin_lock(&vma->vm_mm->uxpgd_lock); in uxpte_set_present() 204 spin_unlock(&vma->vm_mm->uxpgd_lock); in uxpte_set_present() 214 spin_lock(&vma->vm_mm->uxpgd_lock); in uxpte_clear_present() 225 spin_unlock(&vma->vm_mm->uxpgd_lock); in uxpte_clear_present() 239 spin_lock(&vma->vm_mm->uxpgd_lock); in do_uxpte_page_fault() [all …]
|
D | pgtable-generic.c | 70 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 93 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 112 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 140 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush() 153 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush() 215 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
|
D | memory.c | 505 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() 803 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_page() 848 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page() 861 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_pte() 900 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte() 928 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range() 929 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range() 1035 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range() 1036 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range() 1072 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range() [all …]
|
D | huge_memory.c | 591 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page() 599 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page() 613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 619 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page() 629 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 639 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() 640 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page() 641 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 642 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page() 645 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in __do_huge_pmd_anonymous_page() [all …]
|
D | madvise.c | 91 mmap_assert_locked(vma->vm_mm); in anon_vma_name() 140 struct mm_struct *mm = vma->vm_mm; in madvise_update_vma() 207 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 268 struct mm_struct *mm = vma->vm_mm; in madvise_willneed() 275 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed() 400 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() 499 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range() 507 struct mm_struct *mm = vma->vm_mm; in madvise_cold() 532 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range() 556 struct mm_struct *mm = vma->vm_mm; in madvise_pageout() [all …]
|
D | mremap.c | 120 struct mm_struct *mm = vma->vm_mm; in move_ptes() 156 flush_tlb_batched_pending(vma->vm_mm); in move_ptes() 199 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd() 232 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd() 266 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in move_page_tables() 280 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables() 283 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables() 319 if (pte_alloc(new_vma->vm_mm, new_pmd)) in move_page_tables() 336 struct mm_struct *mm = vma->vm_mm; in move_vma()
|
/kernel/linux/linux-5.10/arch/mips/mm/ |
D | tlb-r3k.c | 73 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 154 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 159 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page() 161 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; in local_flush_tlb_page() 190 if (current->active_mm != vma->vm_mm) in __update_tlb() 196 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb() 198 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
|
D | tlb-r4k.c | 109 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 215 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 227 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page() 229 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page() 306 if (current->active_mm != vma->vm_mm) in __update_tlb() 319 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
|
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/ |
D | radix_hugetlbpage.c | 16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page() 25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page() 35 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 97 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit() 107 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in radix__huge_ptep_modify_prot_commit()
|
D | pgtable.c | 42 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); in pmdp_set_access_flags() 59 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_test_and_clear_young() 111 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); in pmdp_invalidate() 123 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); in pmdp_huge_get_and_clear_full() 434 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); in ptep_modify_prot_start() 446 set_pte_at(vma->vm_mm, addr, ptep, pte); in ptep_modify_prot_commit()
|
/kernel/linux/linux-5.10/arch/sh/mm/ |
D | tlbflush_32.c | 19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page() 42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
|
/kernel/linux/linux-5.10/arch/riscv/kernel/ |
D | vdso.c | 107 if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) in arch_vma_name() 109 if (vma->vm_mm && (vma->vm_start == in arch_vma_name() 110 (long)vma->vm_mm->context.vdso + PAGE_SIZE)) in arch_vma_name()
|
/kernel/linux/linux-5.10/arch/arm/mm/ |
D | fault-armv.c | 57 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 99 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 120 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte() 136 struct mm_struct *mm = vma->vm_mm; in make_coherent() 156 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
|
/kernel/linux/linux-5.10/arch/arc/mm/ |
D | tlb.c | 322 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range() 335 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range() 337 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range() 388 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page() 389 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page() 451 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page() 463 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range() 476 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range() 526 if (current->active_mm != vma->vm_mm) in create_tlb() 531 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr); in create_tlb() [all …]
|
/kernel/linux/linux-5.10/include/linux/ |
D | khugepaged.h | 59 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter() 64 !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in khugepaged_enter() 65 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter()
|
D | mmu_notifier.h | 540 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 553 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 566 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ 577 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ 585 struct mm_struct *___mm = (__vma)->vm_mm; \ 598 struct mm_struct *___mm = (__vma)->vm_mm; \ 611 struct mm_struct *___mm = (__vma)->vm_mm; \
|
/kernel/linux/linux-5.10/arch/arm/kernel/ |
D | smp_tlb.c | 202 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page() 206 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page() 228 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range() 232 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/progs/ |
D | lsm.c | 49 is_stack = (vma->vm_start <= vma->vm_mm->start_stack && in BPF_PROG() 50 vma->vm_end >= vma->vm_mm->start_stack); in BPF_PROG() 71 bpf_copy_from_user(args, sizeof(args), (void *)bprm->vma->vm_mm->arg_start); in BPF_PROG()
|
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s32/ |
D | tlb.c | 137 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); in flush_tlb_mm() 150 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; in flush_tlb_page() 165 flush_range(vma->vm_mm, start, end); in flush_tlb_range()
|
/kernel/linux/linux-5.10/arch/m68k/include/asm/ |
D | tlbflush.h | 87 if (vma->vm_mm == current->active_mm) { in flush_tlb_page() 98 if (vma->vm_mm == current->active_mm) in flush_tlb_range() 178 sun3_put_context(vma->vm_mm->context); in flush_tlb_page() 195 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
/kernel/linux/linux-5.10/arch/s390/include/asm/ |
D | hugetlb.h | 56 huge_ptep_get_and_clear(vma->vm_mm, address, ptep); in huge_ptep_clear_flush() 65 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_set_access_flags() 66 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
|
/kernel/linux/linux-5.10/arch/parisc/include/asm/ |
D | tlbflush.h | 20 __flush_tlb_range((vma)->vm_mm->context, start, end) 67 purge_tlb_entries(vma->vm_mm, addr); in flush_tlb_page()
|
/kernel/linux/linux-5.10/arch/hexagon/mm/ |
D | vm_tlb.c | 28 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 68 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
|
/kernel/linux/linux-5.10/arch/riscv/mm/ |
D | tlbflush.c | 49 __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE); in flush_tlb_page() 55 __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); in flush_tlb_range()
|
/kernel/linux/linux-5.10/arch/nds32/mm/ |
D | tlb.c | 27 ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id; in local_flush_tlb_range() 44 ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id; in local_flush_tlb_page()
|