1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_CONTEXT_H
3 #define _ASM_X86_MMU_CONTEXT_H
4
5 #include <asm/desc.h>
6 #include <linux/atomic.h>
7 #include <linux/mm_types.h>
8 #include <linux/pkeys.h>
9
10 #include <trace/events/tlb.h>
11
12 #include <asm/tlbflush.h>
13 #include <asm/paravirt.h>
14 #include <asm/debugreg.h>
15
16 extern atomic64_t last_mm_ctx_id;
17
18 #ifndef CONFIG_PARAVIRT_XXL
paravirt_activate_mm(struct mm_struct * prev,struct mm_struct * next)19 static inline void paravirt_activate_mm(struct mm_struct *prev,
20 struct mm_struct *next)
21 {
22 }
23 #endif /* !CONFIG_PARAVIRT_XXL */
24
25 #ifdef CONFIG_PERF_EVENTS
26 DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
27 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
28 void cr4_update_pce(void *ignored);
29 #endif
30
31 #ifdef CONFIG_MODIFY_LDT_SYSCALL
32 /*
33 * ldt_structs can be allocated, used, and freed, but they are never
34 * modified while live.
35 */
36 struct ldt_struct {
37 /*
38 * Xen requires page-aligned LDTs with special permissions. This is
39 * needed to prevent us from installing evil descriptors such as
40 * call gates. On native, we could merge the ldt_struct and LDT
41 * allocations, but it's not worth trying to optimize.
42 */
43 struct desc_struct *entries;
44 unsigned int nr_entries;
45
46 /*
47 * If PTI is in use, then the entries array is not mapped while we're
48 * in user mode. The whole array will be aliased at the addressed
49 * given by ldt_slot_va(slot). We use two slots so that we can allocate
50 * and map, and enable a new LDT without invalidating the mapping
51 * of an older, still-in-use LDT.
52 *
53 * slot will be -1 if this LDT doesn't have an alias mapping.
54 */
55 int slot;
56 };
57
58 /*
59 * Used for LDT copy/destruction.
60 */
init_new_context_ldt(struct mm_struct * mm)61 static inline void init_new_context_ldt(struct mm_struct *mm)
62 {
63 mm->context.ldt = NULL;
64 init_rwsem(&mm->context.ldt_usr_sem);
65 }
66 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
67 void destroy_context_ldt(struct mm_struct *mm);
68 void ldt_arch_exit_mmap(struct mm_struct *mm);
69 #else /* CONFIG_MODIFY_LDT_SYSCALL */
init_new_context_ldt(struct mm_struct * mm)70 static inline void init_new_context_ldt(struct mm_struct *mm) { }
ldt_dup_context(struct mm_struct * oldmm,struct mm_struct * mm)71 static inline int ldt_dup_context(struct mm_struct *oldmm,
72 struct mm_struct *mm)
73 {
74 return 0;
75 }
destroy_context_ldt(struct mm_struct * mm)76 static inline void destroy_context_ldt(struct mm_struct *mm) { }
ldt_arch_exit_mmap(struct mm_struct * mm)77 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
78 #endif
79
80 #ifdef CONFIG_MODIFY_LDT_SYSCALL
81 extern void load_mm_ldt(struct mm_struct *mm);
82 extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
83 #else
load_mm_ldt(struct mm_struct * mm)84 static inline void load_mm_ldt(struct mm_struct *mm)
85 {
86 clear_LDT();
87 }
switch_ldt(struct mm_struct * prev,struct mm_struct * next)88 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
89 {
90 DEBUG_LOCKS_WARN_ON(preemptible());
91 }
92 #endif
93
94 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
95
96 /*
97 * Init a new mm. Used on mm copies, like at fork()
98 * and on mm's that are brand-new, like at execve().
99 */
init_new_context(struct task_struct * tsk,struct mm_struct * mm)100 static inline int init_new_context(struct task_struct *tsk,
101 struct mm_struct *mm)
102 {
103 mutex_init(&mm->context.lock);
104
105 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
106 atomic64_set(&mm->context.tlb_gen, 0);
107
108 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
109 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
110 /* pkey 0 is the default and allocated implicitly */
111 mm->context.pkey_allocation_map = 0x1;
112 /* -1 means unallocated or invalid */
113 mm->context.execute_only_pkey = -1;
114 }
115 #endif
116 init_new_context_ldt(mm);
117 return 0;
118 }
destroy_context(struct mm_struct * mm)119 static inline void destroy_context(struct mm_struct *mm)
120 {
121 destroy_context_ldt(mm);
122 }
123
124 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
125 struct task_struct *tsk);
126
127 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
128 struct task_struct *tsk);
129 #define switch_mm_irqs_off switch_mm_irqs_off
130
131 #define activate_mm(prev, next) \
132 do { \
133 paravirt_activate_mm((prev), (next)); \
134 switch_mm((prev), (next), NULL); \
135 } while (0);
136
137 #ifdef CONFIG_X86_32
138 #define deactivate_mm(tsk, mm) \
139 do { \
140 lazy_load_gs(0); \
141 } while (0)
142 #else
143 #define deactivate_mm(tsk, mm) \
144 do { \
145 load_gs_index(0); \
146 loadsegment(fs, 0); \
147 } while (0)
148 #endif
149
arch_dup_pkeys(struct mm_struct * oldmm,struct mm_struct * mm)150 static inline void arch_dup_pkeys(struct mm_struct *oldmm,
151 struct mm_struct *mm)
152 {
153 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
154 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
155 return;
156
157 /* Duplicate the oldmm pkey state in mm: */
158 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
159 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
160 #endif
161 }
162
arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm)163 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
164 {
165 arch_dup_pkeys(oldmm, mm);
166 paravirt_arch_dup_mmap(oldmm, mm);
167 return ldt_dup_context(oldmm, mm);
168 }
169
arch_exit_mmap(struct mm_struct * mm)170 static inline void arch_exit_mmap(struct mm_struct *mm)
171 {
172 paravirt_arch_exit_mmap(mm);
173 ldt_arch_exit_mmap(mm);
174 }
175
176 #ifdef CONFIG_X86_64
is_64bit_mm(struct mm_struct * mm)177 static inline bool is_64bit_mm(struct mm_struct *mm)
178 {
179 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
180 !(mm->context.ia32_compat == TIF_IA32);
181 }
182 #else
is_64bit_mm(struct mm_struct * mm)183 static inline bool is_64bit_mm(struct mm_struct *mm)
184 {
185 return false;
186 }
187 #endif
188
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)189 static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
190 unsigned long end)
191 {
192 }
193
194 /*
195 * We only want to enforce protection keys on the current process
196 * because we effectively have no access to PKRU for other
197 * processes or any way to tell *which * PKRU in a threaded
198 * process we could use.
199 *
200 * So do not enforce things if the VMA is not from the current
201 * mm, or if we are in a kernel thread.
202 */
arch_vma_access_permitted(struct vm_area_struct * vma,bool write,bool execute,bool foreign)203 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
204 bool write, bool execute, bool foreign)
205 {
206 /* pkeys never affect instruction fetches */
207 if (execute)
208 return true;
209 /* allow access if the VMA is not one from this process */
210 if (foreign || vma_is_foreign(vma))
211 return true;
212 return __pkru_allows_pkey(vma_pkey(vma), write);
213 }
214
215 unsigned long __get_current_cr3_fast(void);
216
217 #endif /* _ASM_X86_MMU_CONTEXT_H */
218