1 #ifndef _S390_TLB_H
2 #define _S390_TLB_H
3
4 /*
5 * TLB flushing on s390 is complicated. The following requirement
6 * from the principles of operation is the most arduous:
7 *
8 * "A valid table entry must not be changed while it is attached
9 * to any CPU and may be used for translation by that CPU except to
10 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12 * table entry, or (3) make a change by means of a COMPARE AND SWAP
13 * AND PURGE instruction that purges the TLB."
14 *
15 * The modification of a pte of an active mm struct therefore is
16 * a two step process: i) invalidate the pte, ii) store the new pte.
17 * This is true for the page protection bit as well.
18 * The only possible optimization is to flush at the beginning of
19 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20 *
21 * Pages used for the page tables is a different story. FIXME: more
22 */
23
24 #include <linux/mm.h>
25 #include <linux/pagemap.h>
26 #include <linux/swap.h>
27 #include <asm/processor.h>
28 #include <asm/pgalloc.h>
29 #include <asm/tlbflush.h>
30
31 struct mmu_gather {
32 struct mm_struct *mm;
33 struct mmu_table_batch *batch;
34 unsigned int fullmm;
35 unsigned long start, end;
36 };
37
38 struct mmu_table_batch {
39 struct rcu_head rcu;
40 unsigned int nr;
41 void *tables[0];
42 };
43
44 #define MAX_TABLE_BATCH \
45 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
46
47 extern void tlb_table_flush(struct mmu_gather *tlb);
48 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
49
tlb_gather_mmu(struct mmu_gather * tlb,struct mm_struct * mm,unsigned long start,unsigned long end)50 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
51 struct mm_struct *mm,
52 unsigned long start,
53 unsigned long end)
54 {
55 tlb->mm = mm;
56 tlb->start = start;
57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1));
59 tlb->batch = NULL;
60 }
61
tlb_flush_mmu_tlbonly(struct mmu_gather * tlb)62 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63 {
64 __tlb_flush_mm_lazy(tlb->mm);
65 }
66
tlb_flush_mmu_free(struct mmu_gather * tlb)67 static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
68 {
69 tlb_table_flush(tlb);
70 }
71
72
tlb_flush_mmu(struct mmu_gather * tlb)73 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
74 {
75 tlb_flush_mmu_tlbonly(tlb);
76 tlb_flush_mmu_free(tlb);
77 }
78
tlb_finish_mmu(struct mmu_gather * tlb,unsigned long start,unsigned long end)79 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
80 unsigned long start, unsigned long end)
81 {
82 tlb_flush_mmu(tlb);
83 }
84
85 /*
86 * Release the page cache reference for a pte removed by
87 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
88 * has already been freed, so just do free_page_and_swap_cache.
89 */
__tlb_remove_page(struct mmu_gather * tlb,struct page * page)90 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
91 {
92 free_page_and_swap_cache(page);
93 return false; /* avoid calling tlb_flush_mmu */
94 }
95
tlb_remove_page(struct mmu_gather * tlb,struct page * page)96 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97 {
98 free_page_and_swap_cache(page);
99 }
100
__tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)101 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
102 struct page *page, int page_size)
103 {
104 return __tlb_remove_page(tlb, page);
105 }
106
__tlb_remove_pte_page(struct mmu_gather * tlb,struct page * page)107 static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
108 struct page *page)
109 {
110 return __tlb_remove_page(tlb, page);
111 }
112
tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)113 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
114 struct page *page, int page_size)
115 {
116 return tlb_remove_page(tlb, page);
117 }
118
119 /*
120 * pte_free_tlb frees a pte table and clears the CRSTE for the
121 * page table from the tlb.
122 */
pte_free_tlb(struct mmu_gather * tlb,pgtable_t pte,unsigned long address)123 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
124 unsigned long address)
125 {
126 page_table_free_rcu(tlb, (unsigned long *) pte, address);
127 }
128
129 /*
130 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
131 * segment table entry from the tlb.
132 * If the mm uses a two level page table the single pmd is freed
133 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
134 * to avoid the double free of the pmd in this case.
135 */
pmd_free_tlb(struct mmu_gather * tlb,pmd_t * pmd,unsigned long address)136 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
137 unsigned long address)
138 {
139 if (tlb->mm->context.asce_limit <= (1UL << 31))
140 return;
141 pgtable_pmd_page_dtor(virt_to_page(pmd));
142 tlb_remove_table(tlb, pmd);
143 }
144
145 /*
146 * pud_free_tlb frees a pud table and clears the CRSTE for the
147 * region third table entry from the tlb.
148 * If the mm uses a three level page table the single pud is freed
149 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
150 * to avoid the double free of the pud in this case.
151 */
pud_free_tlb(struct mmu_gather * tlb,pud_t * pud,unsigned long address)152 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
153 unsigned long address)
154 {
155 if (tlb->mm->context.asce_limit <= (1UL << 42))
156 return;
157 tlb_remove_table(tlb, pud);
158 }
159
160 #define tlb_start_vma(tlb, vma) do { } while (0)
161 #define tlb_end_vma(tlb, vma) do { } while (0)
162 #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
163 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
164 #define tlb_migrate_finish(mm) do { } while (0)
165
166 #endif /* _S390_TLB_H */
167