1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <asm/sbi.h>
7 #include <asm/mmu_context.h>
8
local_flush_tlb_all_asid(unsigned long asid)9 static inline void local_flush_tlb_all_asid(unsigned long asid)
10 {
11 __asm__ __volatile__ ("sfence.vma x0, %0"
12 :
13 : "r" (asid)
14 : "memory");
15 }
16
local_flush_tlb_page_asid(unsigned long addr,unsigned long asid)17 static inline void local_flush_tlb_page_asid(unsigned long addr,
18 unsigned long asid)
19 {
20 __asm__ __volatile__ ("sfence.vma %0, %1"
21 :
22 : "r" (addr), "r" (asid)
23 : "memory");
24 }
25
flush_tlb_all(void)26 void flush_tlb_all(void)
27 {
28 sbi_remote_sfence_vma(NULL, 0, -1);
29 }
30
__sbi_tlb_flush_range(struct mm_struct * mm,unsigned long start,unsigned long size,unsigned long stride)31 static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
32 unsigned long size, unsigned long stride)
33 {
34 struct cpumask *cmask = mm_cpumask(mm);
35 struct cpumask hmask;
36 unsigned int cpuid;
37 bool broadcast;
38
39 if (cpumask_empty(cmask))
40 return;
41
42 cpuid = get_cpu();
43 /* check if the tlbflush needs to be sent to other CPUs */
44 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
45 if (static_branch_unlikely(&use_asid_allocator)) {
46 unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
47
48 if (broadcast) {
49 riscv_cpuid_to_hartid_mask(cmask, &hmask);
50 sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
51 start, size, asid);
52 } else if (size <= stride) {
53 local_flush_tlb_page_asid(start, asid);
54 } else {
55 local_flush_tlb_all_asid(asid);
56 }
57 } else {
58 if (broadcast) {
59 riscv_cpuid_to_hartid_mask(cmask, &hmask);
60 sbi_remote_sfence_vma(cpumask_bits(&hmask),
61 start, size);
62 } else if (size <= stride) {
63 local_flush_tlb_page(start);
64 } else {
65 local_flush_tlb_all();
66 }
67 }
68
69 put_cpu();
70 }
71
flush_tlb_mm(struct mm_struct * mm)72 void flush_tlb_mm(struct mm_struct *mm)
73 {
74 __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
75 }
76
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)77 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
78 {
79 __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
80 }
81
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)82 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
83 unsigned long end)
84 {
85 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
86 }
87 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)88 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
89 unsigned long end)
90 {
91 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
92 }
93 #endif
94