• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
3  *
4  *	Generic TLB shootdown code
5  *
6  * Copyright 2001 Red Hat, Inc.
7  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8  *
9  * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10  */
11 #ifndef _ASM_GENERIC__TLB_H
12 #define _ASM_GENERIC__TLB_H
13 
14 #include <linux/mmu_notifier.h>
15 #include <linux/swap.h>
16 #include <linux/hugetlb_inline.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 
20 /*
21  * Blindly accessing user memory from NMI context can be dangerous
22  * if we're in the middle of switching the current user task or switching
23  * the loaded mm.
24  */
25 #ifndef nmi_uaccess_okay
26 # define nmi_uaccess_okay() true
27 #endif
28 
29 #ifdef CONFIG_MMU
30 
31 /*
32  * Generic MMU-gather implementation.
33  *
34  * The mmu_gather data structure is used by the mm code to implement the
35  * correct and efficient ordering of freeing pages and TLB invalidations.
36  *
37  * This correct ordering is:
38  *
39  *  1) unhook page
40  *  2) TLB invalidate page
41  *  3) free page
42  *
43  * That is, we must never free a page before we have ensured there are no live
44  * translations left to it. Otherwise it might be possible to observe (or
45  * worse, change) the page content after it has been reused.
46  *
47  * The mmu_gather API consists of:
48  *
49  *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
50  *
51  *    Finish in particular will issue a (final) TLB invalidate and free
52  *    all (remaining) queued pages.
53  *
54  *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
55  *
56  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
57  *    there's large holes between the VMAs.
58  *
59  *  - tlb_remove_table()
60  *
61  *    tlb_remove_table() is the basic primitive to free page-table directories
62  *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
63  *    tlb_remove_page() below, for when page directories are pages and have no
64  *    additional constraints.
65  *
66  *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
67  *
68  *  - tlb_remove_page() / __tlb_remove_page()
69  *  - tlb_remove_page_size() / __tlb_remove_page_size()
70  *
71  *    __tlb_remove_page_size() is the basic primitive that queues a page for
72  *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
73  *    boolean indicating if the queue is (now) full and a call to
74  *    tlb_flush_mmu() is required.
75  *
76  *    tlb_remove_page() and tlb_remove_page_size() imply the call to
77  *    tlb_flush_mmu() when required and has no return value.
78  *
79  *  - tlb_change_page_size()
80  *
81  *    call before __tlb_remove_page*() to set the current page-size; implies a
82  *    possible tlb_flush_mmu() call.
83  *
84  *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
85  *
86  *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
87  *                              related state, like the range)
88  *
89  *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
90  *			whatever pages are still batched.
91  *
92  *  - mmu_gather::fullmm
93  *
94  *    A flag set by tlb_gather_mmu() to indicate we're going to free
95  *    the entire mm; this allows a number of optimizations.
96  *
97  *    - We can ignore tlb_{start,end}_vma(); because we don't
98  *      care about ranges. Everything will be shot down.
99  *
100  *    - (RISC) architectures that use ASIDs can cycle to a new ASID
101  *      and delay the invalidation until ASID space runs out.
102  *
103  *  - mmu_gather::need_flush_all
104  *
105  *    A flag that can be set by the arch code if it wants to force
106  *    flush the entire TLB irrespective of the range. For instance
107  *    x86-PAE needs this when changing top-level entries.
108  *
109  * And allows the architecture to provide and implement tlb_flush():
110  *
111  * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
112  * use of:
113  *
114  *  - mmu_gather::start / mmu_gather::end
115  *
116  *    which provides the range that needs to be flushed to cover the pages to
117  *    be freed.
118  *
119  *  - mmu_gather::freed_tables
120  *
121  *    set when we freed page table pages
122  *
123  *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
124  *
125  *    returns the smallest TLB entry size unmapped in this range.
126  *
127  * If an architecture does not provide tlb_flush() a default implementation
128  * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
129  * specified, in which case we'll default to flush_tlb_mm().
130  *
131  * Additionally there are a few opt-in features:
132  *
133  *  MMU_GATHER_PAGE_SIZE
134  *
135  *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
136  *  changes the size and provides mmu_gather::page_size to tlb_flush().
137  *
138  *  This might be useful if your architecture has size specific TLB
139  *  invalidation instructions.
140  *
141  *  MMU_GATHER_TABLE_FREE
142  *
143  *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
144  *  for page directores (__p*_free_tlb()).
145  *
146  *  Useful if your architecture has non-page page directories.
147  *
148  *  When used, an architecture is expected to provide __tlb_remove_table()
149  *  which does the actual freeing of these pages.
150  *
151  *  MMU_GATHER_RCU_TABLE_FREE
152  *
153  *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
154  *  comment below).
155  *
156  *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
157  *  and therefore doesn't naturally serialize with software page-table walkers.
158  *
159  *  MMU_GATHER_NO_RANGE
160  *
161  *  Use this if your architecture lacks an efficient flush_tlb_range().
162  *
163  *  MMU_GATHER_NO_GATHER
164  *
165  *  If the option is set the mmu_gather will not track individual pages for
166  *  delayed page free anymore. A platform that enables the option needs to
167  *  provide its own implementation of the __tlb_remove_page_size() function to
168  *  free pages.
169  *
170  *  This is useful if your architecture already flushes TLB entries in the
171  *  various ptep_get_and_clear() functions.
172  */
173 
174 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
175 
176 struct mmu_table_batch {
177 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
178 	struct rcu_head		rcu;
179 #endif
180 	unsigned int		nr;
181 	void			*tables[0];
182 };
183 
184 #define MAX_TABLE_BATCH		\
185 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
186 
187 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
188 
189 #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
190 
191 /*
192  * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
193  * page directories and we can use the normal page batching to free them.
194  */
195 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
196 
197 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
198 
199 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
200 /*
201  * This allows an architecture that does not use the linux page-tables for
202  * hardware to skip the TLBI when freeing page tables.
203  */
204 #ifndef tlb_needs_table_invalidate
205 #define tlb_needs_table_invalidate() (true)
206 #endif
207 
208 void tlb_remove_table_sync_one(void);
209 
210 #else
211 
212 #ifdef tlb_needs_table_invalidate
213 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
214 #endif
215 
tlb_remove_table_sync_one(void)216 static inline void tlb_remove_table_sync_one(void) { }
217 
218 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
219 
220 
221 #ifndef CONFIG_MMU_GATHER_NO_GATHER
222 /*
223  * If we can't allocate a page to make a big batch of page pointers
224  * to work on, then just handle a few from the on-stack structure.
225  */
226 #define MMU_GATHER_BUNDLE	8
227 
228 struct mmu_gather_batch {
229 	struct mmu_gather_batch	*next;
230 	unsigned int		nr;
231 	unsigned int		max;
232 	struct page		*pages[0];
233 };
234 
235 #define MAX_GATHER_BATCH	\
236 	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
237 
238 /*
239  * Limit the maximum number of mmu_gather batches to reduce a risk of soft
240  * lockups for non-preemptible kernels on huge machines when a lot of memory
241  * is zapped during unmapping.
242  * 10K pages freed at once should be safe even without a preemption point.
243  */
244 #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
245 
246 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
247 				   int page_size);
248 #endif
249 
250 /*
251  * struct mmu_gather is an opaque type used by the mm code for passing around
252  * any data needed by arch specific code for tlb_remove_page.
253  */
254 struct mmu_gather {
255 	struct mm_struct	*mm;
256 
257 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
258 	struct mmu_table_batch	*batch;
259 #endif
260 
261 	unsigned long		start;
262 	unsigned long		end;
263 	/*
264 	 * we are in the middle of an operation to clear
265 	 * a full mm and can make some optimizations
266 	 */
267 	unsigned int		fullmm : 1;
268 
269 	/*
270 	 * we have performed an operation which
271 	 * requires a complete flush of the tlb
272 	 */
273 	unsigned int		need_flush_all : 1;
274 
275 	/*
276 	 * we have removed page directories
277 	 */
278 	unsigned int		freed_tables : 1;
279 
280 	/*
281 	 * at which levels have we cleared entries?
282 	 */
283 	unsigned int		cleared_ptes : 1;
284 	unsigned int		cleared_pmds : 1;
285 	unsigned int		cleared_puds : 1;
286 	unsigned int		cleared_p4ds : 1;
287 
288 	/*
289 	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
290 	 */
291 	unsigned int		vma_exec : 1;
292 	unsigned int		vma_huge : 1;
293 
294 	unsigned int		batch_count;
295 
296 #ifndef CONFIG_MMU_GATHER_NO_GATHER
297 	struct mmu_gather_batch *active;
298 	struct mmu_gather_batch	local;
299 	struct page		*__pages[MMU_GATHER_BUNDLE];
300 
301 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
302 	unsigned int page_size;
303 #endif
304 #endif
305 };
306 
307 void tlb_flush_mmu(struct mmu_gather *tlb);
308 
__tlb_adjust_range(struct mmu_gather * tlb,unsigned long address,unsigned int range_size)309 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
310 				      unsigned long address,
311 				      unsigned int range_size)
312 {
313 	tlb->start = min(tlb->start, address);
314 	tlb->end = max(tlb->end, address + range_size);
315 }
316 
__tlb_reset_range(struct mmu_gather * tlb)317 static inline void __tlb_reset_range(struct mmu_gather *tlb)
318 {
319 	if (tlb->fullmm) {
320 		tlb->start = tlb->end = ~0;
321 	} else {
322 		tlb->start = TASK_SIZE;
323 		tlb->end = 0;
324 	}
325 	tlb->freed_tables = 0;
326 	tlb->cleared_ptes = 0;
327 	tlb->cleared_pmds = 0;
328 	tlb->cleared_puds = 0;
329 	tlb->cleared_p4ds = 0;
330 	/*
331 	 * Do not reset mmu_gather::vma_* fields here, we do not
332 	 * call into tlb_start_vma() again to set them if there is an
333 	 * intermediate flush.
334 	 */
335 }
336 
337 #ifdef CONFIG_MMU_GATHER_NO_RANGE
338 
339 #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
340 #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
341 #endif
342 
343 /*
344  * When an architecture does not have efficient means of range flushing TLBs
345  * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
346  * range small. We equally don't have to worry about page granularity or other
347  * things.
348  *
349  * All we need to do is issue a full flush for any !0 range.
350  */
tlb_flush(struct mmu_gather * tlb)351 static inline void tlb_flush(struct mmu_gather *tlb)
352 {
353 	if (tlb->end)
354 		flush_tlb_mm(tlb->mm);
355 }
356 
357 static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)358 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
359 
360 #define tlb_end_vma tlb_end_vma
tlb_end_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)361 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
362 
363 #else /* CONFIG_MMU_GATHER_NO_RANGE */
364 
365 #ifndef tlb_flush
366 
367 #if defined(tlb_start_vma) || defined(tlb_end_vma)
368 #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
369 #endif
370 
371 /*
372  * When an architecture does not provide its own tlb_flush() implementation
373  * but does have a reasonably efficient flush_vma_range() implementation
374  * use that.
375  */
tlb_flush(struct mmu_gather * tlb)376 static inline void tlb_flush(struct mmu_gather *tlb)
377 {
378 	if (tlb->fullmm || tlb->need_flush_all) {
379 		flush_tlb_mm(tlb->mm);
380 	} else if (tlb->end) {
381 		struct vm_area_struct vma = {
382 			.vm_mm = tlb->mm,
383 			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
384 				    (tlb->vma_huge ? VM_HUGETLB : 0),
385 		};
386 
387 		flush_tlb_range(&vma, tlb->start, tlb->end);
388 	}
389 }
390 
391 static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)392 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
393 {
394 	/*
395 	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
396 	 * mips-4k) flush only large pages.
397 	 *
398 	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
399 	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
400 	 * range.
401 	 *
402 	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
403 	 * these values the batch is empty.
404 	 */
405 	tlb->vma_huge = is_vm_hugetlb_page(vma);
406 	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
407 }
408 
409 #else
410 
411 static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)412 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
413 
414 #endif
415 
416 #endif /* CONFIG_MMU_GATHER_NO_RANGE */
417 
tlb_flush_mmu_tlbonly(struct mmu_gather * tlb)418 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
419 {
420 	/*
421 	 * Anything calling __tlb_adjust_range() also sets at least one of
422 	 * these bits.
423 	 */
424 	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
425 	      tlb->cleared_puds || tlb->cleared_p4ds))
426 		return;
427 
428 	tlb_flush(tlb);
429 	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
430 	__tlb_reset_range(tlb);
431 }
432 
tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)433 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
434 					struct page *page, int page_size)
435 {
436 	if (__tlb_remove_page_size(tlb, page, page_size))
437 		tlb_flush_mmu(tlb);
438 }
439 
__tlb_remove_page(struct mmu_gather * tlb,struct page * page)440 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
441 {
442 	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
443 }
444 
445 /* tlb_remove_page
446  *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
447  *	required.
448  */
tlb_remove_page(struct mmu_gather * tlb,struct page * page)449 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
450 {
451 	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
452 }
453 
tlb_change_page_size(struct mmu_gather * tlb,unsigned int page_size)454 static inline void tlb_change_page_size(struct mmu_gather *tlb,
455 						     unsigned int page_size)
456 {
457 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
458 	if (tlb->page_size && tlb->page_size != page_size) {
459 		if (!tlb->fullmm && !tlb->need_flush_all)
460 			tlb_flush_mmu(tlb);
461 	}
462 
463 	tlb->page_size = page_size;
464 #endif
465 }
466 
tlb_get_unmap_shift(struct mmu_gather * tlb)467 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
468 {
469 	if (tlb->cleared_ptes)
470 		return PAGE_SHIFT;
471 	if (tlb->cleared_pmds)
472 		return PMD_SHIFT;
473 	if (tlb->cleared_puds)
474 		return PUD_SHIFT;
475 	if (tlb->cleared_p4ds)
476 		return P4D_SHIFT;
477 
478 	return PAGE_SHIFT;
479 }
480 
tlb_get_unmap_size(struct mmu_gather * tlb)481 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
482 {
483 	return 1UL << tlb_get_unmap_shift(tlb);
484 }
485 
486 /*
487  * In the case of tlb vma handling, we can optimise these away in the
488  * case where we're doing a full MM flush.  When we're doing a munmap,
489  * the vmas are adjusted to only cover the region to be torn down.
490  */
491 #ifndef tlb_start_vma
tlb_start_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)492 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
493 {
494 	if (tlb->fullmm)
495 		return;
496 
497 	tlb_update_vma_flags(tlb, vma);
498 	flush_cache_range(vma, vma->vm_start, vma->vm_end);
499 }
500 #endif
501 
502 #ifndef tlb_end_vma
tlb_end_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)503 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
504 {
505 	if (tlb->fullmm)
506 		return;
507 
508 	/*
509 	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
510 	 * the ranges growing with the unused space between consecutive VMAs,
511 	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
512 	 * this.
513 	 */
514 	tlb_flush_mmu_tlbonly(tlb);
515 }
516 #endif
517 
518 /*
519  * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
520  * and set corresponding cleared_*.
521  */
tlb_flush_pte_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)522 static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
523 				     unsigned long address, unsigned long size)
524 {
525 	__tlb_adjust_range(tlb, address, size);
526 	tlb->cleared_ptes = 1;
527 }
528 
tlb_flush_pmd_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)529 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
530 				     unsigned long address, unsigned long size)
531 {
532 	__tlb_adjust_range(tlb, address, size);
533 	tlb->cleared_pmds = 1;
534 }
535 
tlb_flush_pud_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)536 static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
537 				     unsigned long address, unsigned long size)
538 {
539 	__tlb_adjust_range(tlb, address, size);
540 	tlb->cleared_puds = 1;
541 }
542 
tlb_flush_p4d_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)543 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
544 				     unsigned long address, unsigned long size)
545 {
546 	__tlb_adjust_range(tlb, address, size);
547 	tlb->cleared_p4ds = 1;
548 }
549 
550 #ifndef __tlb_remove_tlb_entry
551 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
552 #endif
553 
554 /**
555  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
556  *
557  * Record the fact that pte's were really unmapped by updating the range,
558  * so we can later optimise away the tlb invalidate.   This helps when
559  * userspace is unmapping already-unmapped pages, which happens quite a lot.
560  */
561 #define tlb_remove_tlb_entry(tlb, ptep, address)		\
562 	do {							\
563 		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
564 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
565 	} while (0)
566 
567 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
568 	do {							\
569 		unsigned long _sz = huge_page_size(h);		\
570 		if (_sz >= P4D_SIZE)				\
571 			tlb_flush_p4d_range(tlb, address, _sz);	\
572 		else if (_sz >= PUD_SIZE)			\
573 			tlb_flush_pud_range(tlb, address, _sz);	\
574 		else if (_sz >= PMD_SIZE)			\
575 			tlb_flush_pmd_range(tlb, address, _sz);	\
576 		else						\
577 			tlb_flush_pte_range(tlb, address, _sz);	\
578 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
579 	} while (0)
580 
581 /**
582  * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
583  * This is a nop so far, because only x86 needs it.
584  */
585 #ifndef __tlb_remove_pmd_tlb_entry
586 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
587 #endif
588 
589 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
590 	do {								\
591 		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
592 		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
593 	} while (0)
594 
595 /**
596  * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
597  * invalidation. This is a nop so far, because only x86 needs it.
598  */
599 #ifndef __tlb_remove_pud_tlb_entry
600 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
601 #endif
602 
603 #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
604 	do {								\
605 		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
606 		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
607 	} while (0)
608 
609 /*
610  * For things like page tables caches (ie caching addresses "inside" the
611  * page tables, like x86 does), for legacy reasons, flushing an
612  * individual page had better flush the page table caches behind it. This
613  * is definitely how x86 works, for example. And if you have an
614  * architected non-legacy page table cache (which I'm not aware of
615  * anybody actually doing), you're going to have some architecturally
616  * explicit flushing for that, likely *separate* from a regular TLB entry
617  * flush, and thus you'd need more than just some range expansion..
618  *
619  * So if we ever find an architecture
620  * that would want something that odd, I think it is up to that
621  * architecture to do its own odd thing, not cause pain for others
622  * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
623  *
624  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
625  */
626 
627 #ifndef pte_free_tlb
628 #define pte_free_tlb(tlb, ptep, address)			\
629 	do {							\
630 		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
631 		tlb->freed_tables = 1;				\
632 		__pte_free_tlb(tlb, ptep, address);		\
633 	} while (0)
634 #endif
635 
636 #ifndef pmd_free_tlb
637 #define pmd_free_tlb(tlb, pmdp, address)			\
638 	do {							\
639 		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
640 		tlb->freed_tables = 1;				\
641 		__pmd_free_tlb(tlb, pmdp, address);		\
642 	} while (0)
643 #endif
644 
645 #ifndef pud_free_tlb
646 #define pud_free_tlb(tlb, pudp, address)			\
647 	do {							\
648 		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
649 		tlb->freed_tables = 1;				\
650 		__pud_free_tlb(tlb, pudp, address);		\
651 	} while (0)
652 #endif
653 
654 #ifndef p4d_free_tlb
655 #define p4d_free_tlb(tlb, pudp, address)			\
656 	do {							\
657 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
658 		tlb->freed_tables = 1;				\
659 		__p4d_free_tlb(tlb, pudp, address);		\
660 	} while (0)
661 #endif
662 
663 #endif /* CONFIG_MMU */
664 
665 #endif /* _ASM_GENERIC__TLB_H */
666