• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  #ifndef __UM_TLB_H
2  #define __UM_TLB_H
3  
4  #include <linux/pagemap.h>
5  #include <linux/swap.h>
6  #include <asm/percpu.h>
7  #include <asm/pgalloc.h>
8  #include <asm/tlbflush.h>
9  
10  #define tlb_start_vma(tlb, vma) do { } while (0)
11  #define tlb_end_vma(tlb, vma) do { } while (0)
12  #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
13  
14  /* struct mmu_gather is an opaque type used by the mm code for passing around
15   * any data needed by arch specific code for tlb_remove_page.
16   */
17  struct mmu_gather {
18  	struct mm_struct	*mm;
19  	unsigned int		need_flush; /* Really unmapped some ptes? */
20  	unsigned long		start;
21  	unsigned long		end;
22  	unsigned int		fullmm; /* non-zero means full mm flush */
23  };
24  
__tlb_remove_tlb_entry(struct mmu_gather * tlb,pte_t * ptep,unsigned long address)25  static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
26  					  unsigned long address)
27  {
28  	if (tlb->start > address)
29  		tlb->start = address;
30  	if (tlb->end < address + PAGE_SIZE)
31  		tlb->end = address + PAGE_SIZE;
32  }
33  
init_tlb_gather(struct mmu_gather * tlb)34  static inline void init_tlb_gather(struct mmu_gather *tlb)
35  {
36  	tlb->need_flush = 0;
37  
38  	tlb->start = TASK_SIZE;
39  	tlb->end = 0;
40  
41  	if (tlb->fullmm) {
42  		tlb->start = 0;
43  		tlb->end = TASK_SIZE;
44  	}
45  }
46  
47  static inline void
tlb_gather_mmu(struct mmu_gather * tlb,struct mm_struct * mm,unsigned int full_mm_flush)48  tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
49  {
50  	tlb->mm = mm;
51  	tlb->fullmm = full_mm_flush;
52  
53  	init_tlb_gather(tlb);
54  }
55  
56  extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
57  			       unsigned long end);
58  
59  static inline void
tlb_flush_mmu(struct mmu_gather * tlb)60  tlb_flush_mmu(struct mmu_gather *tlb)
61  {
62  	if (!tlb->need_flush)
63  		return;
64  
65  	flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
66  	init_tlb_gather(tlb);
67  }
68  
69  /* tlb_finish_mmu
70   *	Called at the end of the shootdown operation to free up any resources
71   *	that were required.
72   */
73  static inline void
tlb_finish_mmu(struct mmu_gather * tlb,unsigned long start,unsigned long end)74  tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
75  {
76  	tlb_flush_mmu(tlb);
77  
78  	/* keep the page table cache within bounds */
79  	check_pgt_cache();
80  }
81  
82  /* tlb_remove_page
83   *	Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
84   *	while handling the additional races in SMP caused by other CPUs
85   *	caching valid mappings in their TLBs.
86   */
__tlb_remove_page(struct mmu_gather * tlb,struct page * page)87  static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
88  {
89  	tlb->need_flush = 1;
90  	free_page_and_swap_cache(page);
91  	return 1; /* avoid calling tlb_flush_mmu */
92  }
93  
tlb_remove_page(struct mmu_gather * tlb,struct page * page)94  static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
95  {
96  	__tlb_remove_page(tlb, page);
97  }
98  
99  /**
100   * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
101   *
102   * Record the fact that pte's were really umapped in ->need_flush, so we can
103   * later optimise away the tlb invalidate.   This helps when userspace is
104   * unmapping already-unmapped pages, which happens quite a lot.
105   */
106  #define tlb_remove_tlb_entry(tlb, ptep, address)		\
107  	do {							\
108  		tlb->need_flush = 1;				\
109  		__tlb_remove_tlb_entry(tlb, ptep, address);	\
110  	} while (0)
111  
112  #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
113  
114  #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
115  
116  #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
117  
118  #define tlb_migrate_finish(mm) do {} while (0)
119  
120  #endif
121