1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/init.h>
3
4 #include <linux/mm.h>
5 #include <linux/spinlock.h>
6 #include <linux/smp.h>
7 #include <linux/interrupt.h>
8 #include <linux/export.h>
9 #include <linux/cpu.h>
10 #include <linux/debugfs.h>
11
12 #include <asm/tlbflush.h>
13 #include <asm/mmu_context.h>
14 #include <asm/nospec-branch.h>
15 #include <asm/cache.h>
16 #include <asm/apic.h>
17 #include <asm/uv/uv.h>
18
19 #include "mm_internal.h"
20
21 /*
22 * TLB flushing, formerly SMP-only
23 * c/o Linus Torvalds.
24 *
25 * These mean you can really definitely utterly forget about
26 * writing to user space from interrupts. (Its not allowed anyway).
27 *
28 * Optimizations Manfred Spraul <manfred@colorfullife.com>
29 *
30 * More scalable flush, from Andi Kleen
31 *
32 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
33 */
34
35 /*
36 * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
37 * stored in cpu_tlb_state.last_user_mm_ibpb.
38 */
39 #define LAST_USER_MM_IBPB 0x1UL
40
41 /*
42 * We get here when we do something requiring a TLB invalidation
43 * but could not go invalidate all of the contexts. We do the
44 * necessary invalidation by clearing out the 'ctx_id' which
45 * forces a TLB flush when the context is loaded.
46 */
clear_asid_other(void)47 static void clear_asid_other(void)
48 {
49 u16 asid;
50
51 /*
52 * This is only expected to be set if we have disabled
53 * kernel _PAGE_GLOBAL pages.
54 */
55 if (!static_cpu_has(X86_FEATURE_PTI)) {
56 WARN_ON_ONCE(1);
57 return;
58 }
59
60 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
61 /* Do not need to flush the current asid */
62 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
63 continue;
64 /*
65 * Make sure the next time we go to switch to
66 * this asid, we do a flush:
67 */
68 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
69 }
70 this_cpu_write(cpu_tlbstate.invalidate_other, false);
71 }
72
73 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
74
75
choose_new_asid(struct mm_struct * next,u64 next_tlb_gen,u16 * new_asid,bool * need_flush)76 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
77 u16 *new_asid, bool *need_flush)
78 {
79 u16 asid;
80
81 if (!static_cpu_has(X86_FEATURE_PCID)) {
82 *new_asid = 0;
83 *need_flush = true;
84 return;
85 }
86
87 if (this_cpu_read(cpu_tlbstate.invalidate_other))
88 clear_asid_other();
89
90 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
91 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
92 next->context.ctx_id)
93 continue;
94
95 *new_asid = asid;
96 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
97 next_tlb_gen);
98 return;
99 }
100
101 /*
102 * We don't currently own an ASID slot on this CPU.
103 * Allocate a slot.
104 */
105 *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
106 if (*new_asid >= TLB_NR_DYN_ASIDS) {
107 *new_asid = 0;
108 this_cpu_write(cpu_tlbstate.next_asid, 1);
109 }
110 *need_flush = true;
111 }
112
load_new_mm_cr3(pgd_t * pgdir,u16 new_asid,bool need_flush)113 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
114 {
115 unsigned long new_mm_cr3;
116
117 if (need_flush) {
118 invalidate_user_asid(new_asid);
119 new_mm_cr3 = build_cr3(pgdir, new_asid);
120 } else {
121 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
122 }
123
124 /*
125 * Caution: many callers of this function expect
126 * that load_cr3() is serializing and orders TLB
127 * fills with respect to the mm_cpumask writes.
128 */
129 write_cr3(new_mm_cr3);
130 }
131
leave_mm(int cpu)132 void leave_mm(int cpu)
133 {
134 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
135
136 /*
137 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
138 * If so, our callers still expect us to flush the TLB, but there
139 * aren't any user TLB entries in init_mm to worry about.
140 *
141 * This needs to happen before any other sanity checks due to
142 * intel_idle's shenanigans.
143 */
144 if (loaded_mm == &init_mm)
145 return;
146
147 /* Warn if we're not lazy. */
148 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
149
150 switch_mm(NULL, &init_mm, NULL);
151 }
152 EXPORT_SYMBOL_GPL(leave_mm);
153
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)154 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
155 struct task_struct *tsk)
156 {
157 unsigned long flags;
158
159 local_irq_save(flags);
160 switch_mm_irqs_off(prev, next, tsk);
161 local_irq_restore(flags);
162 }
163
sync_current_stack_to_mm(struct mm_struct * mm)164 static void sync_current_stack_to_mm(struct mm_struct *mm)
165 {
166 unsigned long sp = current_stack_pointer;
167 pgd_t *pgd = pgd_offset(mm, sp);
168
169 if (pgtable_l5_enabled()) {
170 if (unlikely(pgd_none(*pgd))) {
171 pgd_t *pgd_ref = pgd_offset_k(sp);
172
173 set_pgd(pgd, *pgd_ref);
174 }
175 } else {
176 /*
177 * "pgd" is faked. The top level entries are "p4d"s, so sync
178 * the p4d. This compiles to approximately the same code as
179 * the 5-level case.
180 */
181 p4d_t *p4d = p4d_offset(pgd, sp);
182
183 if (unlikely(p4d_none(*p4d))) {
184 pgd_t *pgd_ref = pgd_offset_k(sp);
185 p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
186
187 set_p4d(p4d, *p4d_ref);
188 }
189 }
190 }
191
mm_mangle_tif_spec_ib(struct task_struct * next)192 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
193 {
194 unsigned long next_tif = task_thread_info(next)->flags;
195 unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
196
197 return (unsigned long)next->mm | ibpb;
198 }
199
cond_ibpb(struct task_struct * next)200 static void cond_ibpb(struct task_struct *next)
201 {
202 if (!next || !next->mm)
203 return;
204
205 /*
206 * Both, the conditional and the always IBPB mode use the mm
207 * pointer to avoid the IBPB when switching between tasks of the
208 * same process. Using the mm pointer instead of mm->context.ctx_id
209 * opens a hypothetical hole vs. mm_struct reuse, which is more or
210 * less impossible to control by an attacker. Aside of that it
211 * would only affect the first schedule so the theoretically
212 * exposed data is not really interesting.
213 */
214 if (static_branch_likely(&switch_mm_cond_ibpb)) {
215 unsigned long prev_mm, next_mm;
216
217 /*
218 * This is a bit more complex than the always mode because
219 * it has to handle two cases:
220 *
221 * 1) Switch from a user space task (potential attacker)
222 * which has TIF_SPEC_IB set to a user space task
223 * (potential victim) which has TIF_SPEC_IB not set.
224 *
225 * 2) Switch from a user space task (potential attacker)
226 * which has TIF_SPEC_IB not set to a user space task
227 * (potential victim) which has TIF_SPEC_IB set.
228 *
229 * This could be done by unconditionally issuing IBPB when
230 * a task which has TIF_SPEC_IB set is either scheduled in
231 * or out. Though that results in two flushes when:
232 *
233 * - the same user space task is scheduled out and later
234 * scheduled in again and only a kernel thread ran in
235 * between.
236 *
237 * - a user space task belonging to the same process is
238 * scheduled in after a kernel thread ran in between
239 *
240 * - a user space task belonging to the same process is
241 * scheduled in immediately.
242 *
243 * Optimize this with reasonably small overhead for the
244 * above cases. Mangle the TIF_SPEC_IB bit into the mm
245 * pointer of the incoming task which is stored in
246 * cpu_tlbstate.last_user_mm_ibpb for comparison.
247 */
248 next_mm = mm_mangle_tif_spec_ib(next);
249 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
250
251 /*
252 * Issue IBPB only if the mm's are different and one or
253 * both have the IBPB bit set.
254 */
255 if (next_mm != prev_mm &&
256 (next_mm | prev_mm) & LAST_USER_MM_IBPB)
257 indirect_branch_prediction_barrier();
258
259 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
260 }
261
262 if (static_branch_unlikely(&switch_mm_always_ibpb)) {
263 /*
264 * Only flush when switching to a user space task with a
265 * different context than the user space task which ran
266 * last on this CPU.
267 */
268 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
269 indirect_branch_prediction_barrier();
270 this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
271 }
272 }
273 }
274
switch_mm_irqs_off(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)275 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
276 struct task_struct *tsk)
277 {
278 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
279 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
280 bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
281 unsigned cpu = smp_processor_id();
282 u64 next_tlb_gen;
283 bool need_flush;
284 u16 new_asid;
285
286 /*
287 * NB: The scheduler will call us with prev == next when switching
288 * from lazy TLB mode to normal mode if active_mm isn't changing.
289 * When this happens, we don't assume that CR3 (and hence
290 * cpu_tlbstate.loaded_mm) matches next.
291 *
292 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
293 */
294
295 /* We don't want flush_tlb_func_* to run concurrently with us. */
296 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
297 WARN_ON_ONCE(!irqs_disabled());
298
299 /*
300 * Verify that CR3 is what we think it is. This will catch
301 * hypothetical buggy code that directly switches to swapper_pg_dir
302 * without going through leave_mm() / switch_mm_irqs_off() or that
303 * does something like write_cr3(read_cr3_pa()).
304 *
305 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
306 * isn't free.
307 */
308 #ifdef CONFIG_DEBUG_VM
309 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
310 /*
311 * If we were to BUG here, we'd be very likely to kill
312 * the system so hard that we don't see the call trace.
313 * Try to recover instead by ignoring the error and doing
314 * a global flush to minimize the chance of corruption.
315 *
316 * (This is far from being a fully correct recovery.
317 * Architecturally, the CPU could prefetch something
318 * back into an incorrect ASID slot and leave it there
319 * to cause trouble down the road. It's better than
320 * nothing, though.)
321 */
322 __flush_tlb_all();
323 }
324 #endif
325 this_cpu_write(cpu_tlbstate.is_lazy, false);
326
327 /*
328 * The membarrier system call requires a full memory barrier and
329 * core serialization before returning to user-space, after
330 * storing to rq->curr, when changing mm. This is because
331 * membarrier() sends IPIs to all CPUs that are in the target mm
332 * to make them issue memory barriers. However, if another CPU
333 * switches to/from the target mm concurrently with
334 * membarrier(), it can cause that CPU not to receive an IPI
335 * when it really should issue a memory barrier. Writing to CR3
336 * provides that full memory barrier and core serializing
337 * instruction.
338 */
339 if (real_prev == next) {
340 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
341 next->context.ctx_id);
342
343 /*
344 * Even in lazy TLB mode, the CPU should stay set in the
345 * mm_cpumask. The TLB shootdown code can figure out from
346 * from cpu_tlbstate.is_lazy whether or not to send an IPI.
347 */
348 if (WARN_ON_ONCE(real_prev != &init_mm &&
349 !cpumask_test_cpu(cpu, mm_cpumask(next))))
350 cpumask_set_cpu(cpu, mm_cpumask(next));
351
352 /*
353 * If the CPU is not in lazy TLB mode, we are just switching
354 * from one thread in a process to another thread in the same
355 * process. No TLB flush required.
356 */
357 if (!was_lazy)
358 return;
359
360 /*
361 * Read the tlb_gen to check whether a flush is needed.
362 * If the TLB is up to date, just use it.
363 * The barrier synchronizes with the tlb_gen increment in
364 * the TLB shootdown code.
365 */
366 smp_mb();
367 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
368 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
369 next_tlb_gen)
370 return;
371
372 /*
373 * TLB contents went out of date while we were in lazy
374 * mode. Fall through to the TLB switching code below.
375 */
376 new_asid = prev_asid;
377 need_flush = true;
378 } else {
379 /*
380 * Avoid user/user BTB poisoning by flushing the branch
381 * predictor when switching between processes. This stops
382 * one process from doing Spectre-v2 attacks on another.
383 */
384 cond_ibpb(tsk);
385
386 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
387 /*
388 * If our current stack is in vmalloc space and isn't
389 * mapped in the new pgd, we'll double-fault. Forcibly
390 * map it.
391 */
392 sync_current_stack_to_mm(next);
393 }
394
395 /*
396 * Stop remote flushes for the previous mm.
397 * Skip kernel threads; we never send init_mm TLB flushing IPIs,
398 * but the bitmap manipulation can cause cache line contention.
399 */
400 if (real_prev != &init_mm) {
401 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
402 mm_cpumask(real_prev)));
403 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
404 }
405
406 /*
407 * Start remote flushes and then read tlb_gen.
408 */
409 if (next != &init_mm)
410 cpumask_set_cpu(cpu, mm_cpumask(next));
411 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
412
413 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
414
415 /* Let nmi_uaccess_okay() know that we're changing CR3. */
416 this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
417 barrier();
418 }
419
420 if (need_flush) {
421 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
422 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
423 load_new_mm_cr3(next->pgd, new_asid, true);
424
425 /*
426 * NB: This gets called via leave_mm() in the idle path
427 * where RCU functions differently. Tracing normally
428 * uses RCU, so we need to use the _rcuidle variant.
429 *
430 * (There is no good reason for this. The idle code should
431 * be rearranged to call this before rcu_idle_enter().)
432 */
433 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
434 } else {
435 /* The new ASID is already up to date. */
436 load_new_mm_cr3(next->pgd, new_asid, false);
437
438 /* See above wrt _rcuidle. */
439 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
440 }
441
442 /* Make sure we write CR3 before loaded_mm. */
443 barrier();
444
445 this_cpu_write(cpu_tlbstate.loaded_mm, next);
446 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
447
448 if (next != real_prev) {
449 load_mm_cr4_irqsoff(next);
450 switch_ldt(real_prev, next);
451 }
452 }
453
454 /*
455 * Please ignore the name of this function. It should be called
456 * switch_to_kernel_thread().
457 *
458 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
459 * kernel thread or other context without an mm. Acceptable implementations
460 * include doing nothing whatsoever, switching to init_mm, or various clever
461 * lazy tricks to try to minimize TLB flushes.
462 *
463 * The scheduler reserves the right to call enter_lazy_tlb() several times
464 * in a row. It will notify us that we're going back to a real mm by
465 * calling switch_mm_irqs_off().
466 */
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)467 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
468 {
469 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
470 return;
471
472 this_cpu_write(cpu_tlbstate.is_lazy, true);
473 }
474
475 /*
476 * Call this when reinitializing a CPU. It fixes the following potential
477 * problems:
478 *
479 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
480 * because the CPU was taken down and came back up with CR3's PCID
481 * bits clear. CPU hotplug can do this.
482 *
483 * - The TLB contains junk in slots corresponding to inactive ASIDs.
484 *
485 * - The CPU went so far out to lunch that it may have missed a TLB
486 * flush.
487 */
initialize_tlbstate_and_flush(void)488 void initialize_tlbstate_and_flush(void)
489 {
490 int i;
491 struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
492 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
493 unsigned long cr3 = __read_cr3();
494
495 /* Assert that CR3 already references the right mm. */
496 WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
497
498 /*
499 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
500 * doesn't work like other CR4 bits because it can only be set from
501 * long mode.)
502 */
503 WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
504 !(cr4_read_shadow() & X86_CR4_PCIDE));
505
506 /* Force ASID 0 and force a TLB flush. */
507 write_cr3(build_cr3(mm->pgd, 0));
508
509 /* Reinitialize tlbstate. */
510 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
511 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
512 this_cpu_write(cpu_tlbstate.next_asid, 1);
513 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
514 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
515
516 for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
517 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
518 }
519
520 /*
521 * flush_tlb_func_common()'s memory ordering requirement is that any
522 * TLB fills that happen after we flush the TLB are ordered after we
523 * read active_mm's tlb_gen. We don't need any explicit barriers
524 * because all x86 flush operations are serializing and the
525 * atomic64_read operation won't be reordered by the compiler.
526 */
flush_tlb_func_common(const struct flush_tlb_info * f,bool local,enum tlb_flush_reason reason)527 static void flush_tlb_func_common(const struct flush_tlb_info *f,
528 bool local, enum tlb_flush_reason reason)
529 {
530 /*
531 * We have three different tlb_gen values in here. They are:
532 *
533 * - mm_tlb_gen: the latest generation.
534 * - local_tlb_gen: the generation that this CPU has already caught
535 * up to.
536 * - f->new_tlb_gen: the generation that the requester of the flush
537 * wants us to catch up to.
538 */
539 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
540 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
541 u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
542 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
543
544 /* This code cannot presently handle being reentered. */
545 VM_WARN_ON(!irqs_disabled());
546
547 if (unlikely(loaded_mm == &init_mm))
548 return;
549
550 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
551 loaded_mm->context.ctx_id);
552
553 if (this_cpu_read(cpu_tlbstate.is_lazy)) {
554 /*
555 * We're in lazy mode. We need to at least flush our
556 * paging-structure cache to avoid speculatively reading
557 * garbage into our TLB. Since switching to init_mm is barely
558 * slower than a minimal flush, just switch to init_mm.
559 *
560 * This should be rare, with native_flush_tlb_others skipping
561 * IPIs to lazy TLB mode CPUs.
562 */
563 switch_mm_irqs_off(NULL, &init_mm, NULL);
564 return;
565 }
566
567 if (unlikely(local_tlb_gen == mm_tlb_gen)) {
568 /*
569 * There's nothing to do: we're already up to date. This can
570 * happen if two concurrent flushes happen -- the first flush to
571 * be handled can catch us all the way up, leaving no work for
572 * the second flush.
573 */
574 trace_tlb_flush(reason, 0);
575 return;
576 }
577
578 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
579 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
580
581 /*
582 * If we get to this point, we know that our TLB is out of date.
583 * This does not strictly imply that we need to flush (it's
584 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
585 * going to need to flush in the very near future, so we might
586 * as well get it over with.
587 *
588 * The only question is whether to do a full or partial flush.
589 *
590 * We do a partial flush if requested and two extra conditions
591 * are met:
592 *
593 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
594 * we've always done all needed flushes to catch up to
595 * local_tlb_gen. If, for example, local_tlb_gen == 2 and
596 * f->new_tlb_gen == 3, then we know that the flush needed to bring
597 * us up to date for tlb_gen 3 is the partial flush we're
598 * processing.
599 *
600 * As an example of why this check is needed, suppose that there
601 * are two concurrent flushes. The first is a full flush that
602 * changes context.tlb_gen from 1 to 2. The second is a partial
603 * flush that changes context.tlb_gen from 2 to 3. If they get
604 * processed on this CPU in reverse order, we'll see
605 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
606 * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
607 * 3, we'd be break the invariant: we'd update local_tlb_gen above
608 * 1 without the full flush that's needed for tlb_gen 2.
609 *
610 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
611 * Partial TLB flushes are not all that much cheaper than full TLB
612 * flushes, so it seems unlikely that it would be a performance win
613 * to do a partial flush if that won't bring our TLB fully up to
614 * date. By doing a full flush instead, we can increase
615 * local_tlb_gen all the way to mm_tlb_gen and we can probably
616 * avoid another flush in the very near future.
617 */
618 if (f->end != TLB_FLUSH_ALL &&
619 f->new_tlb_gen == local_tlb_gen + 1 &&
620 f->new_tlb_gen == mm_tlb_gen) {
621 /* Partial flush */
622 unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
623 unsigned long addr = f->start;
624
625 while (addr < f->end) {
626 __flush_tlb_one_user(addr);
627 addr += 1UL << f->stride_shift;
628 }
629 if (local)
630 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
631 trace_tlb_flush(reason, nr_invalidate);
632 } else {
633 /* Full flush. */
634 local_flush_tlb();
635 if (local)
636 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
637 trace_tlb_flush(reason, TLB_FLUSH_ALL);
638 }
639
640 /* Both paths above update our state to mm_tlb_gen. */
641 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
642 }
643
flush_tlb_func_local(const void * info,enum tlb_flush_reason reason)644 static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
645 {
646 const struct flush_tlb_info *f = info;
647
648 flush_tlb_func_common(f, true, reason);
649 }
650
flush_tlb_func_remote(void * info)651 static void flush_tlb_func_remote(void *info)
652 {
653 const struct flush_tlb_info *f = info;
654
655 inc_irq_stat(irq_tlb_count);
656
657 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
658 return;
659
660 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
661 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
662 }
663
tlb_is_not_lazy(int cpu,void * data)664 static bool tlb_is_not_lazy(int cpu, void *data)
665 {
666 return !per_cpu(cpu_tlbstate.is_lazy, cpu);
667 }
668
native_flush_tlb_others(const struct cpumask * cpumask,const struct flush_tlb_info * info)669 void native_flush_tlb_others(const struct cpumask *cpumask,
670 const struct flush_tlb_info *info)
671 {
672 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
673 if (info->end == TLB_FLUSH_ALL)
674 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
675 else
676 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
677 (info->end - info->start) >> PAGE_SHIFT);
678
679 if (is_uv_system()) {
680 /*
681 * This whole special case is confused. UV has a "Broadcast
682 * Assist Unit", which seems to be a fancy way to send IPIs.
683 * Back when x86 used an explicit TLB flush IPI, UV was
684 * optimized to use its own mechanism. These days, x86 uses
685 * smp_call_function_many(), but UV still uses a manual IPI,
686 * and that IPI's action is out of date -- it does a manual
687 * flush instead of calling flush_tlb_func_remote(). This
688 * means that the percpu tlb_gen variables won't be updated
689 * and we'll do pointless flushes on future context switches.
690 *
691 * Rather than hooking native_flush_tlb_others() here, I think
692 * that UV should be updated so that smp_call_function_many(),
693 * etc, are optimal on UV.
694 */
695 cpumask = uv_flush_tlb_others(cpumask, info);
696 if (cpumask)
697 smp_call_function_many(cpumask, flush_tlb_func_remote,
698 (void *)info, 1);
699 return;
700 }
701
702 /*
703 * If no page tables were freed, we can skip sending IPIs to
704 * CPUs in lazy TLB mode. They will flush the CPU themselves
705 * at the next context switch.
706 *
707 * However, if page tables are getting freed, we need to send the
708 * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping
709 * up on the new contents of what used to be page tables, while
710 * doing a speculative memory access.
711 */
712 if (info->freed_tables)
713 smp_call_function_many(cpumask, flush_tlb_func_remote,
714 (void *)info, 1);
715 else
716 on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
717 (void *)info, 1, GFP_ATOMIC, cpumask);
718 }
719
720 /*
721 * See Documentation/x86/tlb.rst for details. We choose 33
722 * because it is large enough to cover the vast majority (at
723 * least 95%) of allocations, and is small enough that we are
724 * confident it will not cause too much overhead. Each single
725 * flush is about 100 ns, so this caps the maximum overhead at
726 * _about_ 3,000 ns.
727 *
728 * This is in units of pages.
729 */
730 unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
731
732 static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
733
734 #ifdef CONFIG_DEBUG_VM
735 static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
736 #endif
737
get_flush_tlb_info(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned int stride_shift,bool freed_tables,u64 new_tlb_gen)738 static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
739 unsigned long start, unsigned long end,
740 unsigned int stride_shift, bool freed_tables,
741 u64 new_tlb_gen)
742 {
743 struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
744
745 #ifdef CONFIG_DEBUG_VM
746 /*
747 * Ensure that the following code is non-reentrant and flush_tlb_info
748 * is not overwritten. This means no TLB flushing is initiated by
749 * interrupt handlers and machine-check exception handlers.
750 */
751 BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
752 #endif
753
754 info->start = start;
755 info->end = end;
756 info->mm = mm;
757 info->stride_shift = stride_shift;
758 info->freed_tables = freed_tables;
759 info->new_tlb_gen = new_tlb_gen;
760
761 return info;
762 }
763
put_flush_tlb_info(void)764 static inline void put_flush_tlb_info(void)
765 {
766 #ifdef CONFIG_DEBUG_VM
767 /* Complete reentrency prevention checks */
768 barrier();
769 this_cpu_dec(flush_tlb_info_idx);
770 #endif
771 }
772
flush_tlb_mm_range(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned int stride_shift,bool freed_tables)773 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
774 unsigned long end, unsigned int stride_shift,
775 bool freed_tables)
776 {
777 struct flush_tlb_info *info;
778 u64 new_tlb_gen;
779 int cpu;
780
781 cpu = get_cpu();
782
783 /* Should we flush just the requested range? */
784 if ((end == TLB_FLUSH_ALL) ||
785 ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
786 start = 0;
787 end = TLB_FLUSH_ALL;
788 }
789
790 /* This is also a barrier that synchronizes with switch_mm(). */
791 new_tlb_gen = inc_mm_tlb_gen(mm);
792
793 info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
794 new_tlb_gen);
795
796 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
797 lockdep_assert_irqs_enabled();
798 local_irq_disable();
799 flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
800 local_irq_enable();
801 }
802
803 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
804 flush_tlb_others(mm_cpumask(mm), info);
805
806 put_flush_tlb_info();
807 put_cpu();
808 }
809
810
do_flush_tlb_all(void * info)811 static void do_flush_tlb_all(void *info)
812 {
813 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
814 __flush_tlb_all();
815 }
816
flush_tlb_all(void)817 void flush_tlb_all(void)
818 {
819 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
820 on_each_cpu(do_flush_tlb_all, NULL, 1);
821 }
822
do_kernel_range_flush(void * info)823 static void do_kernel_range_flush(void *info)
824 {
825 struct flush_tlb_info *f = info;
826 unsigned long addr;
827
828 /* flush range by one by one 'invlpg' */
829 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
830 __flush_tlb_one_kernel(addr);
831 }
832
flush_tlb_kernel_range(unsigned long start,unsigned long end)833 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
834 {
835 /* Balance as user space task's flush, a bit conservative */
836 if (end == TLB_FLUSH_ALL ||
837 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
838 on_each_cpu(do_flush_tlb_all, NULL, 1);
839 } else {
840 struct flush_tlb_info *info;
841
842 preempt_disable();
843 info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
844
845 on_each_cpu(do_kernel_range_flush, info, 1);
846
847 put_flush_tlb_info();
848 preempt_enable();
849 }
850 }
851
852 /*
853 * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
854 * This means that the 'struct flush_tlb_info' that describes which mappings to
855 * flush is actually fixed. We therefore set a single fixed struct and use it in
856 * arch_tlbbatch_flush().
857 */
858 static const struct flush_tlb_info full_flush_tlb_info = {
859 .mm = NULL,
860 .start = 0,
861 .end = TLB_FLUSH_ALL,
862 };
863
arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch * batch)864 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
865 {
866 int cpu = get_cpu();
867
868 if (cpumask_test_cpu(cpu, &batch->cpumask)) {
869 lockdep_assert_irqs_enabled();
870 local_irq_disable();
871 flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
872 local_irq_enable();
873 }
874
875 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
876 flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
877
878 cpumask_clear(&batch->cpumask);
879
880 put_cpu();
881 }
882
tlbflush_read_file(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)883 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
884 size_t count, loff_t *ppos)
885 {
886 char buf[32];
887 unsigned int len;
888
889 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
890 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
891 }
892
tlbflush_write_file(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)893 static ssize_t tlbflush_write_file(struct file *file,
894 const char __user *user_buf, size_t count, loff_t *ppos)
895 {
896 char buf[32];
897 ssize_t len;
898 int ceiling;
899
900 len = min(count, sizeof(buf) - 1);
901 if (copy_from_user(buf, user_buf, len))
902 return -EFAULT;
903
904 buf[len] = '\0';
905 if (kstrtoint(buf, 0, &ceiling))
906 return -EINVAL;
907
908 if (ceiling < 0)
909 return -EINVAL;
910
911 tlb_single_page_flush_ceiling = ceiling;
912 return count;
913 }
914
915 static const struct file_operations fops_tlbflush = {
916 .read = tlbflush_read_file,
917 .write = tlbflush_write_file,
918 .llseek = default_llseek,
919 };
920
create_tlb_single_page_flush_ceiling(void)921 static int __init create_tlb_single_page_flush_ceiling(void)
922 {
923 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
924 arch_debugfs_dir, NULL, &fops_tlbflush);
925 return 0;
926 }
927 late_initcall(create_tlb_single_page_flush_ceiling);
928