Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0
2 /* arch/sparc64/mm/tlb.c
9 #include <linux/mm.h>
25 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local
27 if (!tb->tlb_nr) in flush_tlb_pending()
32 if (CTX_VALID(mm->context)) { in flush_tlb_pending()
33 if (tb->tlb_nr == 1) { in flush_tlb_pending()
34 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending()
37 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending()
38 &tb->vaddrs[0]); in flush_tlb_pending()
40 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending()
41 tb->tlb_nr, &tb->vaddrs[0]); in flush_tlb_pending()
46 tb->tlb_nr = 0; in flush_tlb_pending()
56 tb->active = 1; in arch_enter_lazy_mmu_mode()
63 if (tb->tlb_nr) in arch_leave_lazy_mmu_mode()
65 tb->active = 0; in arch_leave_lazy_mmu_mode()
68 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument
76 vaddr |= 0x1UL; in tlb_batch_add_one()
78 nr = tb->tlb_nr; in tlb_batch_add_one()
80 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one()
82 nr = 0; in tlb_batch_add_one()
85 if (!tb->active) { in tlb_batch_add_one()
86 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one()
87 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one()
91 if (nr == 0) { in tlb_batch_add_one()
92 tb->mm = mm; in tlb_batch_add_one()
93 tb->hugepage_shift = hugepage_shift; in tlb_batch_add_one()
96 if (tb->hugepage_shift != hugepage_shift) { in tlb_batch_add_one()
98 tb->hugepage_shift = hugepage_shift; in tlb_batch_add_one()
99 nr = 0; in tlb_batch_add_one()
102 tb->vaddrs[nr] = vaddr; in tlb_batch_add_one()
103 tb->tlb_nr = ++nr; in tlb_batch_add_one()
111 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add() argument
135 flush_dcache_page_all(mm, page); in tlb_batch_add()
140 tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); in tlb_batch_add()
144 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_pmd_scan() argument
156 tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); in tlb_batch_pmd_scan()
165 static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr, in __set_pmd_acct() argument
168 if (mm == &init_mm) in __set_pmd_acct()
182 mm->context.hugetlb_pte_count++; in __set_pmd_acct()
184 mm->context.thp_pte_count++; in __set_pmd_acct()
187 mm->context.hugetlb_pte_count--; in __set_pmd_acct()
189 mm->context.thp_pte_count--; in __set_pmd_acct()
208 tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); in __set_pmd_acct()
209 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, in __set_pmd_acct()
212 tlb_batch_pmd_scan(mm, addr, orig); in __set_pmd_acct()
217 void set_pmd_at(struct mm_struct *mm, unsigned long addr, in set_pmd_at() argument
223 __set_pmd_acct(mm, addr, orig, pmd); in set_pmd_at()
233 } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); in pmdp_establish()
234 __set_pmd_acct(vma->vm_mm, address, old, pmd); in pmdp_establish()
258 (vma->vm_mm)->context.thp_pte_count--; in pmdp_invalidate()
263 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument
268 assert_spin_locked(&mm->page_table_lock); in pgtable_trans_huge_deposit()
271 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit()
274 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); in pgtable_trans_huge_deposit()
275 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit()
278 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument
283 assert_spin_locked(&mm->page_table_lock); in pgtable_trans_huge_withdraw()
286 pgtable = pmd_huge_pte(mm, pmdp); in pgtable_trans_huge_withdraw()
289 pmd_huge_pte(mm, pmdp) = NULL; in pgtable_trans_huge_withdraw()
291 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in pgtable_trans_huge_withdraw()
294 pte_val(pgtable[0]) = 0; in pgtable_trans_huge_withdraw()
295 pte_val(pgtable[1]) = 0; in pgtable_trans_huge_withdraw()