1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
3 #define _ASM_POWERPC_BOOK3S_32_KUP_H
4
5 #include <asm/bug.h>
6 #include <asm/book3s/32/mmu-hash.h>
7 #include <asm/mmu.h>
8 #include <asm/synch.h>
9
10 #ifndef __ASSEMBLY__
11
12 #include <linux/jump_label.h>
13
14 extern struct static_key_false disable_kuap_key;
15
kuap_is_disabled(void)16 static __always_inline bool kuap_is_disabled(void)
17 {
18 return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
19 }
20
kuep_is_disabled(void)21 static __always_inline bool kuep_is_disabled(void)
22 {
23 return !IS_ENABLED(CONFIG_PPC_KUEP);
24 }
25
26 #ifdef CONFIG_PPC_KUAP
27
28 #include <linux/sched.h>
29
30 #define KUAP_NONE (~0UL)
31 #define KUAP_ALL (~1UL)
32
kuap_lock_one(unsigned long addr)33 static inline void kuap_lock_one(unsigned long addr)
34 {
35 mtsr(mfsr(addr) | SR_KS, addr);
36 isync(); /* Context sync required after mtsr() */
37 }
38
kuap_unlock_one(unsigned long addr)39 static inline void kuap_unlock_one(unsigned long addr)
40 {
41 mtsr(mfsr(addr) & ~SR_KS, addr);
42 isync(); /* Context sync required after mtsr() */
43 }
44
kuap_lock_all(void)45 static inline void kuap_lock_all(void)
46 {
47 update_user_segments(mfsr(0) | SR_KS);
48 isync(); /* Context sync required after mtsr() */
49 }
50
kuap_unlock_all(void)51 static inline void kuap_unlock_all(void)
52 {
53 update_user_segments(mfsr(0) & ~SR_KS);
54 isync(); /* Context sync required after mtsr() */
55 }
56
57 void kuap_lock_all_ool(void);
58 void kuap_unlock_all_ool(void);
59
kuap_lock(unsigned long addr,bool ool)60 static inline void kuap_lock(unsigned long addr, bool ool)
61 {
62 if (likely(addr != KUAP_ALL))
63 kuap_lock_one(addr);
64 else if (!ool)
65 kuap_lock_all();
66 else
67 kuap_lock_all_ool();
68 }
69
kuap_unlock(unsigned long addr,bool ool)70 static inline void kuap_unlock(unsigned long addr, bool ool)
71 {
72 if (likely(addr != KUAP_ALL))
73 kuap_unlock_one(addr);
74 else if (!ool)
75 kuap_unlock_all();
76 else
77 kuap_unlock_all_ool();
78 }
79
kuap_save_and_lock(struct pt_regs * regs)80 static inline void kuap_save_and_lock(struct pt_regs *regs)
81 {
82 unsigned long kuap = current->thread.kuap;
83
84 if (kuap_is_disabled())
85 return;
86
87 regs->kuap = kuap;
88 if (unlikely(kuap == KUAP_NONE))
89 return;
90
91 current->thread.kuap = KUAP_NONE;
92 kuap_lock(kuap, false);
93 }
94
kuap_user_restore(struct pt_regs * regs)95 static inline void kuap_user_restore(struct pt_regs *regs)
96 {
97 }
98
kuap_kernel_restore(struct pt_regs * regs,unsigned long kuap)99 static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
100 {
101 if (kuap_is_disabled())
102 return;
103
104 if (unlikely(kuap != KUAP_NONE)) {
105 current->thread.kuap = KUAP_NONE;
106 kuap_lock(kuap, false);
107 }
108
109 if (likely(regs->kuap == KUAP_NONE))
110 return;
111
112 current->thread.kuap = regs->kuap;
113
114 kuap_unlock(regs->kuap, false);
115 }
116
kuap_get_and_assert_locked(void)117 static inline unsigned long kuap_get_and_assert_locked(void)
118 {
119 unsigned long kuap = current->thread.kuap;
120
121 if (kuap_is_disabled())
122 return KUAP_NONE;
123
124 WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
125
126 return kuap;
127 }
128
kuap_assert_locked(void)129 static inline void kuap_assert_locked(void)
130 {
131 kuap_get_and_assert_locked();
132 }
133
allow_user_access(void __user * to,const void __user * from,u32 size,unsigned long dir)134 static __always_inline void allow_user_access(void __user *to, const void __user *from,
135 u32 size, unsigned long dir)
136 {
137 if (kuap_is_disabled())
138 return;
139
140 BUILD_BUG_ON(!__builtin_constant_p(dir));
141
142 if (!(dir & KUAP_WRITE))
143 return;
144
145 current->thread.kuap = (__force u32)to;
146 kuap_unlock_one((__force u32)to);
147 }
148
prevent_user_access(unsigned long dir)149 static __always_inline void prevent_user_access(unsigned long dir)
150 {
151 u32 kuap = current->thread.kuap;
152
153 if (kuap_is_disabled())
154 return;
155
156 BUILD_BUG_ON(!__builtin_constant_p(dir));
157
158 if (!(dir & KUAP_WRITE))
159 return;
160
161 current->thread.kuap = KUAP_NONE;
162 kuap_lock(kuap, true);
163 }
164
prevent_user_access_return(void)165 static inline unsigned long prevent_user_access_return(void)
166 {
167 unsigned long flags = current->thread.kuap;
168
169 if (kuap_is_disabled())
170 return KUAP_NONE;
171
172 if (flags != KUAP_NONE) {
173 current->thread.kuap = KUAP_NONE;
174 kuap_lock(flags, true);
175 }
176
177 return flags;
178 }
179
restore_user_access(unsigned long flags)180 static inline void restore_user_access(unsigned long flags)
181 {
182 if (kuap_is_disabled())
183 return;
184
185 if (flags != KUAP_NONE) {
186 current->thread.kuap = flags;
187 kuap_unlock(flags, true);
188 }
189 }
190
191 static inline bool
bad_kuap_fault(struct pt_regs * regs,unsigned long address,bool is_write)192 bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
193 {
194 unsigned long kuap = regs->kuap;
195
196 if (kuap_is_disabled())
197 return false;
198
199 if (!is_write || kuap == KUAP_ALL)
200 return false;
201 if (kuap == KUAP_NONE)
202 return true;
203
204 /* If faulting address doesn't match unlocked segment, unlock all */
205 if ((kuap ^ address) & 0xf0000000)
206 regs->kuap = KUAP_ALL;
207
208 return false;
209 }
210
211 #endif /* CONFIG_PPC_KUAP */
212
213 #endif /* __ASSEMBLY__ */
214
215 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
216