• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <linux/hugetlb.h>
7 #include <linux/mmu_notifier.h>
8 #include <asm/sbi.h>
9 #include <asm/mmu_context.h>
10 
11 /*
12  * Flush entire TLB if number of entries to be flushed is greater
13  * than the threshold below.
14  */
15 unsigned long tlb_flush_all_threshold __read_mostly = 64;
16 
local_flush_tlb_range_threshold_asid(unsigned long start,unsigned long size,unsigned long stride,unsigned long asid)17 static void local_flush_tlb_range_threshold_asid(unsigned long start,
18 						 unsigned long size,
19 						 unsigned long stride,
20 						 unsigned long asid)
21 {
22 	unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
23 	int i;
24 
25 	if (nr_ptes_in_range > tlb_flush_all_threshold) {
26 		local_flush_tlb_all_asid(asid);
27 		return;
28 	}
29 
30 	for (i = 0; i < nr_ptes_in_range; ++i) {
31 		local_flush_tlb_page_asid(start, asid);
32 		start += stride;
33 	}
34 }
35 
local_flush_tlb_range_asid(unsigned long start,unsigned long size,unsigned long stride,unsigned long asid)36 static inline void local_flush_tlb_range_asid(unsigned long start,
37 		unsigned long size, unsigned long stride, unsigned long asid)
38 {
39 	if (size <= stride)
40 		local_flush_tlb_page_asid(start, asid);
41 	else if (size == FLUSH_TLB_MAX_SIZE)
42 		local_flush_tlb_all_asid(asid);
43 	else
44 		local_flush_tlb_range_threshold_asid(start, size, stride, asid);
45 }
46 
47 /* Flush a range of kernel pages without broadcasting */
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)48 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
49 {
50 	local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
51 }
52 
__ipi_flush_tlb_all(void * info)53 static void __ipi_flush_tlb_all(void *info)
54 {
55 	local_flush_tlb_all();
56 }
57 
flush_tlb_all(void)58 void flush_tlb_all(void)
59 {
60 	if (num_online_cpus() < 2)
61 		local_flush_tlb_all();
62 	else if (riscv_use_sbi_for_rfence())
63 		sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
64 	else
65 		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
66 }
67 
68 struct flush_tlb_range_data {
69 	unsigned long asid;
70 	unsigned long start;
71 	unsigned long size;
72 	unsigned long stride;
73 };
74 
__ipi_flush_tlb_range_asid(void * info)75 static void __ipi_flush_tlb_range_asid(void *info)
76 {
77 	struct flush_tlb_range_data *d = info;
78 
79 	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
80 }
81 
get_mm_asid(struct mm_struct * mm)82 static inline unsigned long get_mm_asid(struct mm_struct *mm)
83 {
84 	return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
85 }
86 
__flush_tlb_range(struct mm_struct * mm,const struct cpumask * cmask,unsigned long start,unsigned long size,unsigned long stride)87 static void __flush_tlb_range(struct mm_struct *mm,
88 			      const struct cpumask *cmask,
89 			      unsigned long start, unsigned long size,
90 			      unsigned long stride)
91 {
92 	unsigned long asid = get_mm_asid(mm);
93 	unsigned int cpu;
94 
95 	if (cpumask_empty(cmask))
96 		return;
97 
98 	cpu = get_cpu();
99 
100 	/* Check if the TLB flush needs to be sent to other CPUs. */
101 	if (cpumask_any_but(cmask, cpu) >= nr_cpu_ids) {
102 		local_flush_tlb_range_asid(start, size, stride, asid);
103 	} else if (riscv_use_sbi_for_rfence()) {
104 		sbi_remote_sfence_vma_asid(cmask, start, size, asid);
105 	} else {
106 		struct flush_tlb_range_data ftd;
107 
108 		ftd.asid = asid;
109 		ftd.start = start;
110 		ftd.size = size;
111 		ftd.stride = stride;
112 		on_each_cpu_mask(cmask, __ipi_flush_tlb_range_asid, &ftd, 1);
113 	}
114 
115 	put_cpu();
116 
117 	if (mm)
118 		mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
119 }
120 
flush_tlb_mm(struct mm_struct * mm)121 void flush_tlb_mm(struct mm_struct *mm)
122 {
123 	__flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
124 }
125 
flush_tlb_mm_range(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned int page_size)126 void flush_tlb_mm_range(struct mm_struct *mm,
127 			unsigned long start, unsigned long end,
128 			unsigned int page_size)
129 {
130 	__flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
131 }
132 
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)133 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
134 {
135 	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
136 			  addr, PAGE_SIZE, PAGE_SIZE);
137 }
138 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)139 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
140 		     unsigned long end)
141 {
142 	unsigned long stride_size;
143 
144 	if (!is_vm_hugetlb_page(vma)) {
145 		stride_size = PAGE_SIZE;
146 	} else {
147 		stride_size = huge_page_size(hstate_vma(vma));
148 
149 		/*
150 		 * As stated in the privileged specification, every PTE in a
151 		 * NAPOT region must be invalidated, so reset the stride in that
152 		 * case.
153 		 */
154 		if (has_svnapot()) {
155 			if (stride_size >= PGDIR_SIZE)
156 				stride_size = PGDIR_SIZE;
157 			else if (stride_size >= P4D_SIZE)
158 				stride_size = P4D_SIZE;
159 			else if (stride_size >= PUD_SIZE)
160 				stride_size = PUD_SIZE;
161 			else if (stride_size >= PMD_SIZE)
162 				stride_size = PMD_SIZE;
163 			else
164 				stride_size = PAGE_SIZE;
165 		}
166 	}
167 
168 	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
169 			  start, end - start, stride_size);
170 }
171 
flush_tlb_kernel_range(unsigned long start,unsigned long end)172 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
173 {
174 	__flush_tlb_range(NULL, cpu_online_mask,
175 			  start, end - start, PAGE_SIZE);
176 }
177 
178 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)179 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
180 			unsigned long end)
181 {
182 	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
183 			  start, end - start, PMD_SIZE);
184 }
185 #endif
186 
arch_tlbbatch_should_defer(struct mm_struct * mm)187 bool arch_tlbbatch_should_defer(struct mm_struct *mm)
188 {
189 	return true;
190 }
191 
arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch * batch,struct mm_struct * mm,unsigned long uaddr)192 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
193 			       struct mm_struct *mm,
194 			       unsigned long uaddr)
195 {
196 	unsigned long start = uaddr & PAGE_MASK;
197 
198 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
199 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE);
200 }
201 
arch_flush_tlb_batched_pending(struct mm_struct * mm)202 void arch_flush_tlb_batched_pending(struct mm_struct *mm)
203 {
204 	flush_tlb_mm(mm);
205 }
206 
arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch * batch)207 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
208 {
209 	__flush_tlb_range(NULL, &batch->cpumask,
210 			  0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
211 	cpumask_clear(&batch->cpumask);
212 }
213