1 /*
2 * Based on arch/arm/include/asm/tlbflush.h
3 *
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
21
22 #ifndef __ASSEMBLY__
23
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
26 #include <asm/mmu.h>
27
28 /*
29 * Raw TLBI operations.
30 *
31 * Where necessary, use the __tlbi() macro to avoid asm()
32 * boilerplate. Drivers and most kernel code should use the TLB
33 * management routines in preference to the macro below.
34 *
35 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
36 * on whether a particular TLBI operation takes an argument or
37 * not. The macros handles invoking the asm with or without the
38 * register argument as appropriate.
39 */
40 #define __TLBI_0(op, arg) asm ("tlbi " #op)
41 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg))
42 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
43
44 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
45
46 #define __tlbi_user(op, arg) do { \
47 if (arm64_kernel_unmapped_at_el0()) \
48 __tlbi(op, (arg) | USER_ASID_FLAG); \
49 } while (0)
50
51 /*
52 * TLB Management
53 * ==============
54 *
55 * The TLB specific code is expected to perform whatever tests it needs
56 * to determine if it should invalidate the TLB for each call. Start
57 * addresses are inclusive and end addresses are exclusive; it is safe to
58 * round these addresses down.
59 *
60 * flush_tlb_all()
61 *
62 * Invalidate the entire TLB.
63 *
64 * flush_tlb_mm(mm)
65 *
66 * Invalidate all TLB entries in a particular address space.
67 * - mm - mm_struct describing address space
68 *
69 * flush_tlb_range(mm,start,end)
70 *
71 * Invalidate a range of TLB entries in the specified address
72 * space.
73 * - mm - mm_struct describing address space
74 * - start - start address (may not be aligned)
75 * - end - end address (exclusive, may not be aligned)
76 *
77 * flush_tlb_page(vaddr,vma)
78 *
79 * Invalidate the specified page in the specified address range.
80 * - vaddr - virtual address (may not be aligned)
81 * - vma - vma_struct describing address range
82 *
83 * flush_kern_tlb_page(kaddr)
84 *
85 * Invalidate the TLB entry for the specified page. The address
86 * will be in the kernels virtual memory space. Current uses
87 * only require the D-TLB to be invalidated.
88 * - kaddr - Kernel virtual memory address
89 */
local_flush_tlb_all(void)90 static inline void local_flush_tlb_all(void)
91 {
92 dsb(nshst);
93 __tlbi(vmalle1);
94 dsb(nsh);
95 isb();
96 }
97
flush_tlb_all(void)98 static inline void flush_tlb_all(void)
99 {
100 dsb(ishst);
101 __tlbi(vmalle1is);
102 dsb(ish);
103 isb();
104 }
105
flush_tlb_mm(struct mm_struct * mm)106 static inline void flush_tlb_mm(struct mm_struct *mm)
107 {
108 unsigned long asid = ASID(mm) << 48;
109
110 dsb(ishst);
111 __tlbi(aside1is, asid);
112 __tlbi_user(aside1is, asid);
113 dsb(ish);
114 }
115
flush_tlb_page(struct vm_area_struct * vma,unsigned long uaddr)116 static inline void flush_tlb_page(struct vm_area_struct *vma,
117 unsigned long uaddr)
118 {
119 unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
120
121 dsb(ishst);
122 __tlbi(vale1is, addr);
123 __tlbi_user(vale1is, addr);
124 dsb(ish);
125 }
126
127 /*
128 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
129 * necessarily a performance improvement.
130 */
131 #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
132
__flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,bool last_level)133 static inline void __flush_tlb_range(struct vm_area_struct *vma,
134 unsigned long start, unsigned long end,
135 bool last_level)
136 {
137 unsigned long asid = ASID(vma->vm_mm) << 48;
138 unsigned long addr;
139
140 if ((end - start) > MAX_TLB_RANGE) {
141 flush_tlb_mm(vma->vm_mm);
142 return;
143 }
144
145 start = asid | (start >> 12);
146 end = asid | (end >> 12);
147
148 dsb(ishst);
149 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
150 if (last_level) {
151 __tlbi(vale1is, addr);
152 __tlbi_user(vale1is, addr);
153 } else {
154 __tlbi(vae1is, addr);
155 __tlbi_user(vae1is, addr);
156 }
157 }
158 dsb(ish);
159 }
160
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)161 static inline void flush_tlb_range(struct vm_area_struct *vma,
162 unsigned long start, unsigned long end)
163 {
164 __flush_tlb_range(vma, start, end, false);
165 }
166
flush_tlb_kernel_range(unsigned long start,unsigned long end)167 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
168 {
169 unsigned long addr;
170
171 if ((end - start) > MAX_TLB_RANGE) {
172 flush_tlb_all();
173 return;
174 }
175
176 start >>= 12;
177 end >>= 12;
178
179 dsb(ishst);
180 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
181 __tlbi(vaae1is, addr);
182 dsb(ish);
183 isb();
184 }
185
186 /*
187 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
188 * table levels (pgd/pud/pmd).
189 */
__flush_tlb_pgtable(struct mm_struct * mm,unsigned long uaddr)190 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
191 unsigned long uaddr)
192 {
193 unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
194
195 __tlbi(vae1is, addr);
196 __tlbi_user(vae1is, addr);
197 dsb(ish);
198 }
199
200 #endif
201
202 #endif
203