1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6
7 #include <asm/processor.h>
8 #include <asm/cpufeature.h>
9 #include <asm/special_insns.h>
10 #include <asm/smp.h>
11
__invpcid(unsigned long pcid,unsigned long addr,unsigned long type)12 static inline void __invpcid(unsigned long pcid, unsigned long addr,
13 unsigned long type)
14 {
15 struct { u64 d[2]; } desc = { { pcid, addr } };
16
17 /*
18 * The memory clobber is because the whole point is to invalidate
19 * stale TLB entries and, especially if we're flushing global
20 * mappings, we don't want the compiler to reorder any subsequent
21 * memory accesses before the TLB flush.
22 *
23 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
24 * invpcid (%rcx), %rax in long mode.
25 */
26 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
27 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
28 }
29
30 #define INVPCID_TYPE_INDIV_ADDR 0
31 #define INVPCID_TYPE_SINGLE_CTXT 1
32 #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
33 #define INVPCID_TYPE_ALL_NON_GLOBAL 3
34
35 /* Flush all mappings for a given pcid and addr, not including globals. */
invpcid_flush_one(unsigned long pcid,unsigned long addr)36 static inline void invpcid_flush_one(unsigned long pcid,
37 unsigned long addr)
38 {
39 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
40 }
41
42 /* Flush all mappings for a given PCID, not including globals. */
invpcid_flush_single_context(unsigned long pcid)43 static inline void invpcid_flush_single_context(unsigned long pcid)
44 {
45 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
46 }
47
48 /* Flush all mappings, including globals, for all PCIDs. */
invpcid_flush_all(void)49 static inline void invpcid_flush_all(void)
50 {
51 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
52 }
53
54 /* Flush all mappings for all PCIDs except globals. */
invpcid_flush_all_nonglobals(void)55 static inline void invpcid_flush_all_nonglobals(void)
56 {
57 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
58 }
59
60 #ifdef CONFIG_PARAVIRT
61 #include <asm/paravirt.h>
62 #else
63 #define __flush_tlb() __native_flush_tlb()
64 #define __flush_tlb_global() __native_flush_tlb_global()
65 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
66 #endif
67
68 struct tlb_state {
69 struct mm_struct *active_mm;
70 int state;
71 /* last user mm's ctx id */
72 u64 last_ctx_id;
73
74 /*
75 * Access to this CR4 shadow and to H/W CR4 is protected by
76 * disabling interrupts when modifying either one.
77 */
78 unsigned long cr4;
79 };
80 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
81
82 /* Initialize cr4 shadow for this CPU. */
cr4_init_shadow(void)83 static inline void cr4_init_shadow(void)
84 {
85 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
86 }
87
88 /* Set in this cpu's CR4. */
cr4_set_bits(unsigned long mask)89 static inline void cr4_set_bits(unsigned long mask)
90 {
91 unsigned long cr4;
92
93 cr4 = this_cpu_read(cpu_tlbstate.cr4);
94 if ((cr4 | mask) != cr4) {
95 cr4 |= mask;
96 this_cpu_write(cpu_tlbstate.cr4, cr4);
97 __write_cr4(cr4);
98 }
99 }
100
101 /* Clear in this cpu's CR4. */
cr4_clear_bits(unsigned long mask)102 static inline void cr4_clear_bits(unsigned long mask)
103 {
104 unsigned long cr4;
105
106 cr4 = this_cpu_read(cpu_tlbstate.cr4);
107 if ((cr4 & ~mask) != cr4) {
108 cr4 &= ~mask;
109 this_cpu_write(cpu_tlbstate.cr4, cr4);
110 __write_cr4(cr4);
111 }
112 }
113
114 /* Read the CR4 shadow. */
cr4_read_shadow(void)115 static inline unsigned long cr4_read_shadow(void)
116 {
117 return this_cpu_read(cpu_tlbstate.cr4);
118 }
119
120 /*
121 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
122 * enable and PPro Global page enable), so that any CPU's that boot
123 * up after us can get the correct flags. This should only be used
124 * during boot on the boot cpu.
125 */
126 extern unsigned long mmu_cr4_features;
127 extern u32 *trampoline_cr4_features;
128
cr4_set_bits_and_update_boot(unsigned long mask)129 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
130 {
131 mmu_cr4_features |= mask;
132 if (trampoline_cr4_features)
133 *trampoline_cr4_features = mmu_cr4_features;
134 cr4_set_bits(mask);
135 }
136
137 /*
138 * Declare a couple of kaiser interfaces here for convenience,
139 * to avoid the need for asm/kaiser.h in unexpected places.
140 */
141 #ifdef CONFIG_PAGE_TABLE_ISOLATION
142 extern int kaiser_enabled;
143 extern void kaiser_setup_pcid(void);
144 extern void kaiser_flush_tlb_on_return_to_user(void);
145 #else
146 #define kaiser_enabled 0
kaiser_setup_pcid(void)147 static inline void kaiser_setup_pcid(void)
148 {
149 }
kaiser_flush_tlb_on_return_to_user(void)150 static inline void kaiser_flush_tlb_on_return_to_user(void)
151 {
152 }
153 #endif
154
__native_flush_tlb(void)155 static inline void __native_flush_tlb(void)
156 {
157 /*
158 * If current->mm == NULL then we borrow a mm which may change during a
159 * task switch and therefore we must not be preempted while we write CR3
160 * back:
161 */
162 preempt_disable();
163 if (kaiser_enabled)
164 kaiser_flush_tlb_on_return_to_user();
165 native_write_cr3(native_read_cr3());
166 preempt_enable();
167 }
168
__native_flush_tlb_global_irq_disabled(void)169 static inline void __native_flush_tlb_global_irq_disabled(void)
170 {
171 unsigned long cr4;
172
173 cr4 = this_cpu_read(cpu_tlbstate.cr4);
174 if (cr4 & X86_CR4_PGE) {
175 /* clear PGE and flush TLB of all entries */
176 native_write_cr4(cr4 & ~X86_CR4_PGE);
177 /* restore PGE as it was before */
178 native_write_cr4(cr4);
179 } else {
180 /* do it with cr3, letting kaiser flush user PCID */
181 __native_flush_tlb();
182 }
183 }
184
__native_flush_tlb_global(void)185 static inline void __native_flush_tlb_global(void)
186 {
187 unsigned long flags;
188
189 if (this_cpu_has(X86_FEATURE_INVPCID)) {
190 /*
191 * Using INVPCID is considerably faster than a pair of writes
192 * to CR4 sandwiched inside an IRQ flag save/restore.
193 *
194 * Note, this works with CR4.PCIDE=0 or 1.
195 */
196 invpcid_flush_all();
197 return;
198 }
199
200 /*
201 * Read-modify-write to CR4 - protect it from preemption and
202 * from interrupts. (Use the raw variant because this code can
203 * be called from deep inside debugging code.)
204 */
205 raw_local_irq_save(flags);
206 __native_flush_tlb_global_irq_disabled();
207 raw_local_irq_restore(flags);
208 }
209
__native_flush_tlb_single(unsigned long addr)210 static inline void __native_flush_tlb_single(unsigned long addr)
211 {
212 /*
213 * SIMICS #GP's if you run INVPCID with type 2/3
214 * and X86_CR4_PCIDE clear. Shame!
215 *
216 * The ASIDs used below are hard-coded. But, we must not
217 * call invpcid(type=1/2) before CR4.PCIDE=1. Just call
218 * invlpg in the case we are called early.
219 */
220
221 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) {
222 if (kaiser_enabled)
223 kaiser_flush_tlb_on_return_to_user();
224 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
225 return;
226 }
227 /* Flush the address out of both PCIDs. */
228 /*
229 * An optimization here might be to determine addresses
230 * that are only kernel-mapped and only flush the kernel
231 * ASID. But, userspace flushes are probably much more
232 * important performance-wise.
233 *
234 * Make sure to do only a single invpcid when KAISER is
235 * disabled and we have only a single ASID.
236 */
237 if (kaiser_enabled)
238 invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr);
239 invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr);
240 }
241
__flush_tlb_all(void)242 static inline void __flush_tlb_all(void)
243 {
244 __flush_tlb_global();
245 /*
246 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
247 * we'd end up flushing kernel translations for the current ASID but
248 * we might fail to flush kernel translations for other cached ASIDs.
249 *
250 * To avoid this issue, we force PCID off if PGE is off.
251 */
252 }
253
__flush_tlb_one(unsigned long addr)254 static inline void __flush_tlb_one(unsigned long addr)
255 {
256 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
257 __flush_tlb_single(addr);
258 }
259
260 #define TLB_FLUSH_ALL -1UL
261
262 /*
263 * TLB flushing:
264 *
265 * - flush_tlb_all() flushes all processes TLBs
266 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
267 * - flush_tlb_page(vma, vmaddr) flushes one page
268 * - flush_tlb_range(vma, start, end) flushes a range of pages
269 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
270 * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
271 *
272 * ..but the i386 has somewhat limited tlb flushing capabilities,
273 * and page-granular flushes are available only on i486 and up.
274 */
275
276 #define local_flush_tlb() __flush_tlb()
277
278 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
279
280 #define flush_tlb_range(vma, start, end) \
281 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
282
283 extern void flush_tlb_all(void);
284 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
285 unsigned long end, unsigned long vmflag);
286 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
287
flush_tlb_page(struct vm_area_struct * vma,unsigned long a)288 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
289 {
290 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
291 }
292
293 void native_flush_tlb_others(const struct cpumask *cpumask,
294 struct mm_struct *mm,
295 unsigned long start, unsigned long end);
296
297 #define TLBSTATE_OK 1
298 #define TLBSTATE_LAZY 2
299
reset_lazy_tlbstate(void)300 static inline void reset_lazy_tlbstate(void)
301 {
302 this_cpu_write(cpu_tlbstate.state, 0);
303 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
304 }
305
306 #ifndef CONFIG_PARAVIRT
307 #define flush_tlb_others(mask, mm, start, end) \
308 native_flush_tlb_others(mask, mm, start, end)
309 #endif
310
311 #endif /* _ASM_X86_TLBFLUSH_H */
312