• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/init.h>
2 
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
8 #include <linux/cpu.h>
9 #include <linux/debugfs.h>
10 
11 #include <asm/tlbflush.h>
12 #include <asm/mmu_context.h>
13 #include <asm/nospec-branch.h>
14 #include <asm/cache.h>
15 #include <asm/apic.h>
16 #include <asm/uv/uv.h>
17 #include <asm/kaiser.h>
18 
19 /*
20  *	TLB flushing, formerly SMP-only
21  *		c/o Linus Torvalds.
22  *
23  *	These mean you can really definitely utterly forget about
24  *	writing to user space from interrupts. (Its not allowed anyway).
25  *
26  *	Optimizations Manfred Spraul <manfred@colorfullife.com>
27  *
28  *	More scalable flush, from Andi Kleen
29  *
30  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
31  */
32 
33 /*
34  * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
35  * stored in cpu_tlb_state.last_user_mm_ibpb.
36  */
37 #define LAST_USER_MM_IBPB	0x1UL
38 
39 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
40 
41 struct flush_tlb_info {
42 	struct mm_struct *flush_mm;
43 	unsigned long flush_start;
44 	unsigned long flush_end;
45 };
46 
load_new_mm_cr3(pgd_t * pgdir)47 static void load_new_mm_cr3(pgd_t *pgdir)
48 {
49 	unsigned long new_mm_cr3 = __pa(pgdir);
50 
51 	if (kaiser_enabled) {
52 		/*
53 		 * We reuse the same PCID for different tasks, so we must
54 		 * flush all the entries for the PCID out when we change tasks.
55 		 * Flush KERN below, flush USER when returning to userspace in
56 		 * kaiser's SWITCH_USER_CR3 (_SWITCH_TO_USER_CR3) macro.
57 		 *
58 		 * invpcid_flush_single_context(X86_CR3_PCID_ASID_USER) could
59 		 * do it here, but can only be used if X86_FEATURE_INVPCID is
60 		 * available - and many machines support pcid without invpcid.
61 		 *
62 		 * If X86_CR3_PCID_KERN_FLUSH actually added something, then it
63 		 * would be needed in the write_cr3() below - if PCIDs enabled.
64 		 */
65 		BUILD_BUG_ON(X86_CR3_PCID_KERN_FLUSH);
66 		kaiser_flush_tlb_on_return_to_user();
67 	}
68 
69 	/*
70 	 * Caution: many callers of this function expect
71 	 * that load_cr3() is serializing and orders TLB
72 	 * fills with respect to the mm_cpumask writes.
73 	 */
74 	write_cr3(new_mm_cr3);
75 }
76 
77 /*
78  * We cannot call mmdrop() because we are in interrupt context,
79  * instead update mm->cpu_vm_mask.
80  */
leave_mm(int cpu)81 void leave_mm(int cpu)
82 {
83 	struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
84 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
85 		BUG();
86 	if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
87 		cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
88 		load_new_mm_cr3(swapper_pg_dir);
89 		/*
90 		 * This gets called in the idle path where RCU
91 		 * functions differently.  Tracing normally
92 		 * uses RCU, so we have to call the tracepoint
93 		 * specially here.
94 		 */
95 		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
96 	}
97 }
98 EXPORT_SYMBOL_GPL(leave_mm);
99 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)100 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
101 	       struct task_struct *tsk)
102 {
103 	unsigned long flags;
104 
105 	local_irq_save(flags);
106 	switch_mm_irqs_off(prev, next, tsk);
107 	local_irq_restore(flags);
108 }
109 
mm_mangle_tif_spec_ib(struct task_struct * next)110 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
111 {
112 	unsigned long next_tif = task_thread_info(next)->flags;
113 	unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
114 
115 	return (unsigned long)next->mm | ibpb;
116 }
117 
cond_ibpb(struct task_struct * next)118 static void cond_ibpb(struct task_struct *next)
119 {
120 	if (!next || !next->mm)
121 		return;
122 
123 	/*
124 	 * Both, the conditional and the always IBPB mode use the mm
125 	 * pointer to avoid the IBPB when switching between tasks of the
126 	 * same process. Using the mm pointer instead of mm->context.ctx_id
127 	 * opens a hypothetical hole vs. mm_struct reuse, which is more or
128 	 * less impossible to control by an attacker. Aside of that it
129 	 * would only affect the first schedule so the theoretically
130 	 * exposed data is not really interesting.
131 	 */
132 	if (static_branch_likely(&switch_mm_cond_ibpb)) {
133 		unsigned long prev_mm, next_mm;
134 
135 		/*
136 		 * This is a bit more complex than the always mode because
137 		 * it has to handle two cases:
138 		 *
139 		 * 1) Switch from a user space task (potential attacker)
140 		 *    which has TIF_SPEC_IB set to a user space task
141 		 *    (potential victim) which has TIF_SPEC_IB not set.
142 		 *
143 		 * 2) Switch from a user space task (potential attacker)
144 		 *    which has TIF_SPEC_IB not set to a user space task
145 		 *    (potential victim) which has TIF_SPEC_IB set.
146 		 *
147 		 * This could be done by unconditionally issuing IBPB when
148 		 * a task which has TIF_SPEC_IB set is either scheduled in
149 		 * or out. Though that results in two flushes when:
150 		 *
151 		 * - the same user space task is scheduled out and later
152 		 *   scheduled in again and only a kernel thread ran in
153 		 *   between.
154 		 *
155 		 * - a user space task belonging to the same process is
156 		 *   scheduled in after a kernel thread ran in between
157 		 *
158 		 * - a user space task belonging to the same process is
159 		 *   scheduled in immediately.
160 		 *
161 		 * Optimize this with reasonably small overhead for the
162 		 * above cases. Mangle the TIF_SPEC_IB bit into the mm
163 		 * pointer of the incoming task which is stored in
164 		 * cpu_tlbstate.last_user_mm_ibpb for comparison.
165 		 */
166 		next_mm = mm_mangle_tif_spec_ib(next);
167 		prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
168 
169 		/*
170 		 * Issue IBPB only if the mm's are different and one or
171 		 * both have the IBPB bit set.
172 		 */
173 		if (next_mm != prev_mm &&
174 		    (next_mm | prev_mm) & LAST_USER_MM_IBPB)
175 			indirect_branch_prediction_barrier();
176 
177 		this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
178 	}
179 
180 	if (static_branch_unlikely(&switch_mm_always_ibpb)) {
181 		/*
182 		 * Only flush when switching to a user space task with a
183 		 * different context than the user space task which ran
184 		 * last on this CPU.
185 		 */
186 		if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
187 			indirect_branch_prediction_barrier();
188 			this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
189 		}
190 	}
191 }
192 
switch_mm_irqs_off(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)193 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
194 			struct task_struct *tsk)
195 {
196 	unsigned cpu = smp_processor_id();
197 
198 	if (likely(prev != next)) {
199 		/*
200 		 * Avoid user/user BTB poisoning by flushing the branch
201 		 * predictor when switching between processes. This stops
202 		 * one process from doing Spectre-v2 attacks on another.
203 		 */
204 		cond_ibpb(tsk);
205 
206 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
207 		this_cpu_write(cpu_tlbstate.active_mm, next);
208 		cpumask_set_cpu(cpu, mm_cpumask(next));
209 
210 		/*
211 		 * Re-load page tables.
212 		 *
213 		 * This logic has an ordering constraint:
214 		 *
215 		 *  CPU 0: Write to a PTE for 'next'
216 		 *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
217 		 *  CPU 1: set bit 1 in next's mm_cpumask
218 		 *  CPU 1: load from the PTE that CPU 0 writes (implicit)
219 		 *
220 		 * We need to prevent an outcome in which CPU 1 observes
221 		 * the new PTE value and CPU 0 observes bit 1 clear in
222 		 * mm_cpumask.  (If that occurs, then the IPI will never
223 		 * be sent, and CPU 0's TLB will contain a stale entry.)
224 		 *
225 		 * The bad outcome can occur if either CPU's load is
226 		 * reordered before that CPU's store, so both CPUs must
227 		 * execute full barriers to prevent this from happening.
228 		 *
229 		 * Thus, switch_mm needs a full barrier between the
230 		 * store to mm_cpumask and any operation that could load
231 		 * from next->pgd.  TLB fills are special and can happen
232 		 * due to instruction fetches or for no reason at all,
233 		 * and neither LOCK nor MFENCE orders them.
234 		 * Fortunately, load_cr3() is serializing and gives the
235 		 * ordering guarantee we need.
236 		 *
237 		 */
238 		load_new_mm_cr3(next->pgd);
239 
240 		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
241 
242 		/* Stop flush ipis for the previous mm */
243 		cpumask_clear_cpu(cpu, mm_cpumask(prev));
244 
245 		/* Load per-mm CR4 state */
246 		load_mm_cr4(next);
247 
248 #ifdef CONFIG_MODIFY_LDT_SYSCALL
249 		/*
250 		 * Load the LDT, if the LDT is different.
251 		 *
252 		 * It's possible that prev->context.ldt doesn't match
253 		 * the LDT register.  This can happen if leave_mm(prev)
254 		 * was called and then modify_ldt changed
255 		 * prev->context.ldt but suppressed an IPI to this CPU.
256 		 * In this case, prev->context.ldt != NULL, because we
257 		 * never set context.ldt to NULL while the mm still
258 		 * exists.  That means that next->context.ldt !=
259 		 * prev->context.ldt, because mms never share an LDT.
260 		 */
261 		if (unlikely(prev->context.ldt != next->context.ldt))
262 			load_mm_ldt(next);
263 #endif
264 	} else {
265 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
266 		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
267 
268 		if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
269 			/*
270 			 * On established mms, the mm_cpumask is only changed
271 			 * from irq context, from ptep_clear_flush() while in
272 			 * lazy tlb mode, and here. Irqs are blocked during
273 			 * schedule, protecting us from simultaneous changes.
274 			 */
275 			cpumask_set_cpu(cpu, mm_cpumask(next));
276 
277 			/*
278 			 * We were in lazy tlb mode and leave_mm disabled
279 			 * tlb flush IPI delivery. We must reload CR3
280 			 * to make sure to use no freed page tables.
281 			 *
282 			 * As above, load_cr3() is serializing and orders TLB
283 			 * fills with respect to the mm_cpumask write.
284 			 */
285 			load_new_mm_cr3(next->pgd);
286 			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
287 			load_mm_cr4(next);
288 			load_mm_ldt(next);
289 		}
290 	}
291 }
292 
293 /*
294  * The flush IPI assumes that a thread switch happens in this order:
295  * [cpu0: the cpu that switches]
296  * 1) switch_mm() either 1a) or 1b)
297  * 1a) thread switch to a different mm
298  * 1a1) set cpu_tlbstate to TLBSTATE_OK
299  *	Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
300  *	if cpu0 was in lazy tlb mode.
301  * 1a2) update cpu active_mm
302  *	Now cpu0 accepts tlb flushes for the new mm.
303  * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
304  *	Now the other cpus will send tlb flush ipis.
305  * 1a4) change cr3.
306  * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
307  *	Stop ipi delivery for the old mm. This is not synchronized with
308  *	the other cpus, but flush_tlb_func ignore flush ipis for the wrong
309  *	mm, and in the worst case we perform a superfluous tlb flush.
310  * 1b) thread switch without mm change
311  *	cpu active_mm is correct, cpu0 already handles flush ipis.
312  * 1b1) set cpu_tlbstate to TLBSTATE_OK
313  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
314  *	Atomically set the bit [other cpus will start sending flush ipis],
315  *	and test the bit.
316  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
317  * 2) switch %%esp, ie current
318  *
319  * The interrupt must handle 2 special cases:
320  * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
321  * - the cpu performs speculative tlb reads, i.e. even if the cpu only
322  *   runs in kernel space, the cpu could load tlb entries for user space
323  *   pages.
324  *
325  * The good news is that cpu_tlbstate is local to each cpu, no
326  * write/read ordering problems.
327  */
328 
329 /*
330  * TLB flush funcation:
331  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
332  * 2) Leave the mm if we are in the lazy tlb mode.
333  */
flush_tlb_func(void * info)334 static void flush_tlb_func(void *info)
335 {
336 	struct flush_tlb_info *f = info;
337 
338 	inc_irq_stat(irq_tlb_count);
339 
340 	if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
341 		return;
342 
343 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
344 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
345 		if (f->flush_end == TLB_FLUSH_ALL) {
346 			local_flush_tlb();
347 			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
348 		} else {
349 			unsigned long addr;
350 			unsigned long nr_pages =
351 				(f->flush_end - f->flush_start) / PAGE_SIZE;
352 			addr = f->flush_start;
353 			while (addr < f->flush_end) {
354 				__flush_tlb_single(addr);
355 				addr += PAGE_SIZE;
356 			}
357 			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
358 		}
359 	} else
360 		leave_mm(smp_processor_id());
361 
362 }
363 
native_flush_tlb_others(const struct cpumask * cpumask,struct mm_struct * mm,unsigned long start,unsigned long end)364 void native_flush_tlb_others(const struct cpumask *cpumask,
365 				 struct mm_struct *mm, unsigned long start,
366 				 unsigned long end)
367 {
368 	struct flush_tlb_info info;
369 
370 	info.flush_mm = mm;
371 	info.flush_start = start;
372 	info.flush_end = end;
373 
374 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
375 	if (end == TLB_FLUSH_ALL)
376 		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
377 	else
378 		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
379 				(end - start) >> PAGE_SHIFT);
380 
381 	if (is_uv_system()) {
382 		unsigned int cpu;
383 
384 		cpu = smp_processor_id();
385 		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
386 		if (cpumask)
387 			smp_call_function_many(cpumask, flush_tlb_func,
388 								&info, 1);
389 		return;
390 	}
391 	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
392 }
393 
394 /*
395  * See Documentation/x86/tlb.txt for details.  We choose 33
396  * because it is large enough to cover the vast majority (at
397  * least 95%) of allocations, and is small enough that we are
398  * confident it will not cause too much overhead.  Each single
399  * flush is about 100 ns, so this caps the maximum overhead at
400  * _about_ 3,000 ns.
401  *
402  * This is in units of pages.
403  */
404 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
405 
flush_tlb_mm_range(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned long vmflag)406 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
407 				unsigned long end, unsigned long vmflag)
408 {
409 	unsigned long addr;
410 	/* do a global flush by default */
411 	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
412 
413 	preempt_disable();
414 
415 	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
416 		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
417 	if (base_pages_to_flush > tlb_single_page_flush_ceiling)
418 		base_pages_to_flush = TLB_FLUSH_ALL;
419 
420 	if (current->active_mm != mm) {
421 		/* Synchronize with switch_mm. */
422 		smp_mb();
423 
424 		goto out;
425 	}
426 
427 	if (!current->mm) {
428 		leave_mm(smp_processor_id());
429 
430 		/* Synchronize with switch_mm. */
431 		smp_mb();
432 
433 		goto out;
434 	}
435 
436 	/*
437 	 * Both branches below are implicit full barriers (MOV to CR or
438 	 * INVLPG) that synchronize with switch_mm.
439 	 */
440 	if (base_pages_to_flush == TLB_FLUSH_ALL) {
441 		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
442 		local_flush_tlb();
443 	} else {
444 		/* flush range by one by one 'invlpg' */
445 		for (addr = start; addr < end;	addr += PAGE_SIZE) {
446 			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
447 			__flush_tlb_single(addr);
448 		}
449 	}
450 	trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
451 out:
452 	if (base_pages_to_flush == TLB_FLUSH_ALL) {
453 		start = 0UL;
454 		end = TLB_FLUSH_ALL;
455 	}
456 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
457 		flush_tlb_others(mm_cpumask(mm), mm, start, end);
458 	preempt_enable();
459 }
460 
do_flush_tlb_all(void * info)461 static void do_flush_tlb_all(void *info)
462 {
463 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
464 	__flush_tlb_all();
465 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
466 		leave_mm(smp_processor_id());
467 }
468 
flush_tlb_all(void)469 void flush_tlb_all(void)
470 {
471 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
472 	on_each_cpu(do_flush_tlb_all, NULL, 1);
473 }
474 
do_kernel_range_flush(void * info)475 static void do_kernel_range_flush(void *info)
476 {
477 	struct flush_tlb_info *f = info;
478 	unsigned long addr;
479 
480 	/* flush range by one by one 'invlpg' */
481 	for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
482 		__flush_tlb_single(addr);
483 }
484 
flush_tlb_kernel_range(unsigned long start,unsigned long end)485 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
486 {
487 
488 	/* Balance as user space task's flush, a bit conservative */
489 	if (end == TLB_FLUSH_ALL ||
490 	    (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
491 		on_each_cpu(do_flush_tlb_all, NULL, 1);
492 	} else {
493 		struct flush_tlb_info info;
494 		info.flush_start = start;
495 		info.flush_end = end;
496 		on_each_cpu(do_kernel_range_flush, &info, 1);
497 	}
498 }
499 
tlbflush_read_file(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)500 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
501 			     size_t count, loff_t *ppos)
502 {
503 	char buf[32];
504 	unsigned int len;
505 
506 	len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
507 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
508 }
509 
tlbflush_write_file(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)510 static ssize_t tlbflush_write_file(struct file *file,
511 		 const char __user *user_buf, size_t count, loff_t *ppos)
512 {
513 	char buf[32];
514 	ssize_t len;
515 	int ceiling;
516 
517 	len = min(count, sizeof(buf) - 1);
518 	if (copy_from_user(buf, user_buf, len))
519 		return -EFAULT;
520 
521 	buf[len] = '\0';
522 	if (kstrtoint(buf, 0, &ceiling))
523 		return -EINVAL;
524 
525 	if (ceiling < 0)
526 		return -EINVAL;
527 
528 	tlb_single_page_flush_ceiling = ceiling;
529 	return count;
530 }
531 
532 static const struct file_operations fops_tlbflush = {
533 	.read = tlbflush_read_file,
534 	.write = tlbflush_write_file,
535 	.llseek = default_llseek,
536 };
537 
create_tlb_single_page_flush_ceiling(void)538 static int __init create_tlb_single_page_flush_ceiling(void)
539 {
540 	debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
541 			    arch_debugfs_dir, NULL, &fops_tlbflush);
542 	return 0;
543 }
544 late_initcall(create_tlb_single_page_flush_ceiling);
545