1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Clang Control Flow Integrity (CFI) error and slowpath handling.
4 *
5 * Copyright (C) 2019 Google LLC
6 */
7
8 #include <linux/hardirq.h>
9 #include <linux/kallsyms.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/printk.h>
13 #include <linux/ratelimit.h>
14 #include <linux/rcupdate.h>
15 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
17 #include <asm/set_memory.h>
18
19 /* Compiler-defined handler names */
20 #ifdef CONFIG_CFI_PERMISSIVE
21 #define cfi_failure_handler __ubsan_handle_cfi_check_fail
22 #define cfi_slowpath_handler __cfi_slowpath_diag
23 #else /* enforcing */
24 #define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort
25 #define cfi_slowpath_handler __cfi_slowpath
26 #endif /* CONFIG_CFI_PERMISSIVE */
27
handle_cfi_failure(void * ptr)28 static inline void handle_cfi_failure(void *ptr)
29 {
30 if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
31 WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr);
32 else
33 panic("CFI failure (target: %pS)\n", ptr);
34 }
35
36 #ifdef CONFIG_MODULES
37 #ifdef CONFIG_CFI_CLANG_SHADOW
38 /*
39 * Index type. A 16-bit index can address at most (2^16)-2 pages (taking
40 * into account SHADOW_INVALID), i.e. ~256M with 4k pages.
41 */
42 typedef u16 shadow_t;
43 #define SHADOW_INVALID ((shadow_t)~0UL)
44
45 struct cfi_shadow {
46 /* Page index for the beginning of the shadow */
47 unsigned long base;
48 /* An array of __cfi_check locations (as indices to the shadow) */
49 shadow_t shadow[1];
50 } __packed;
51
52 /*
53 * The shadow covers ~128M from the beginning of the module region. If
54 * the region is larger, we fall back to __module_address for the rest.
55 */
56 #define __SHADOW_RANGE (_UL(SZ_128M) >> PAGE_SHIFT)
57
58 /* The in-memory size of struct cfi_shadow, always at least one page */
59 #define __SHADOW_PAGES ((__SHADOW_RANGE * sizeof(shadow_t)) >> PAGE_SHIFT)
60 #define SHADOW_PAGES max(1UL, __SHADOW_PAGES)
61 #define SHADOW_SIZE (SHADOW_PAGES << PAGE_SHIFT)
62
63 /* The actual size of the shadow array, minus metadata */
64 #define SHADOW_ARR_SIZE (SHADOW_SIZE - offsetof(struct cfi_shadow, shadow))
65 #define SHADOW_ARR_SLOTS (SHADOW_ARR_SIZE / sizeof(shadow_t))
66
67 static DEFINE_MUTEX(shadow_update_lock);
68 static struct cfi_shadow __rcu *cfi_shadow __read_mostly;
69
70 /* Returns the index in the shadow for the given address */
ptr_to_shadow(const struct cfi_shadow * s,unsigned long ptr)71 static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
72 {
73 unsigned long index;
74 unsigned long page = ptr >> PAGE_SHIFT;
75
76 if (unlikely(page < s->base))
77 return -1; /* Outside of module area */
78
79 index = page - s->base;
80
81 if (index >= SHADOW_ARR_SLOTS)
82 return -1; /* Cannot be addressed with shadow */
83
84 return (int)index;
85 }
86
87 /* Returns the page address for an index in the shadow */
shadow_to_ptr(const struct cfi_shadow * s,int index)88 static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
89 int index)
90 {
91 if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
92 return 0;
93
94 return (s->base + index) << PAGE_SHIFT;
95 }
96
97 /* Returns the __cfi_check function address for the given shadow location */
shadow_to_check_fn(const struct cfi_shadow * s,int index)98 static inline unsigned long shadow_to_check_fn(const struct cfi_shadow *s,
99 int index)
100 {
101 if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
102 return 0;
103
104 if (unlikely(s->shadow[index] == SHADOW_INVALID))
105 return 0;
106
107 /* __cfi_check is always page aligned */
108 return (s->base + s->shadow[index]) << PAGE_SHIFT;
109 }
110
prepare_next_shadow(const struct cfi_shadow __rcu * prev,struct cfi_shadow * next)111 static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
112 struct cfi_shadow *next)
113 {
114 int i, index, check;
115
116 /* Mark everything invalid */
117 memset(next->shadow, 0xFF, SHADOW_ARR_SIZE);
118
119 if (!prev)
120 return; /* No previous shadow */
121
122 /* If the base address didn't change, an update is not needed */
123 if (prev->base == next->base) {
124 memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE);
125 return;
126 }
127
128 /* Convert the previous shadow to the new address range */
129 for (i = 0; i < SHADOW_ARR_SLOTS; ++i) {
130 if (prev->shadow[i] == SHADOW_INVALID)
131 continue;
132
133 index = ptr_to_shadow(next, shadow_to_ptr(prev, i));
134 if (index < 0)
135 continue;
136
137 check = ptr_to_shadow(next,
138 shadow_to_check_fn(prev, prev->shadow[i]));
139 if (check < 0)
140 continue;
141
142 next->shadow[index] = (shadow_t)check;
143 }
144 }
145
add_module_to_shadow(struct cfi_shadow * s,struct module * mod,unsigned long min_addr,unsigned long max_addr)146 static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod,
147 unsigned long min_addr, unsigned long max_addr)
148 {
149 int check_index;
150 unsigned long check = (unsigned long)mod->cfi_check;
151 unsigned long ptr;
152
153 if (unlikely(!PAGE_ALIGNED(check))) {
154 pr_warn("cfi: not using shadow for module %s\n", mod->name);
155 return;
156 }
157
158 check_index = ptr_to_shadow(s, check);
159 if (check_index < 0)
160 return; /* Module not addressable with shadow */
161
162 /* For each page, store the check function index in the shadow */
163 for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
164 int index = ptr_to_shadow(s, ptr);
165
166 if (index >= 0) {
167 /* Each page must only contain one module */
168 WARN_ON_ONCE(s->shadow[index] != SHADOW_INVALID);
169 s->shadow[index] = (shadow_t)check_index;
170 }
171 }
172 }
173
remove_module_from_shadow(struct cfi_shadow * s,struct module * mod,unsigned long min_addr,unsigned long max_addr)174 static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod,
175 unsigned long min_addr, unsigned long max_addr)
176 {
177 unsigned long ptr;
178
179 for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
180 int index = ptr_to_shadow(s, ptr);
181
182 if (index >= 0)
183 s->shadow[index] = SHADOW_INVALID;
184 }
185 }
186
187 typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *,
188 unsigned long min_addr, unsigned long max_addr);
189
update_shadow(struct module * mod,unsigned long base_addr,update_shadow_fn fn)190 static void update_shadow(struct module *mod, unsigned long base_addr,
191 update_shadow_fn fn)
192 {
193 struct cfi_shadow *prev;
194 struct cfi_shadow *next;
195 unsigned long min_addr, max_addr;
196
197 next = (struct cfi_shadow *)vmalloc(SHADOW_SIZE);
198 WARN_ON(!next);
199
200 mutex_lock(&shadow_update_lock);
201 prev = rcu_dereference_protected(cfi_shadow,
202 mutex_is_locked(&shadow_update_lock));
203
204 if (next) {
205 next->base = base_addr >> PAGE_SHIFT;
206 prepare_next_shadow(prev, next);
207
208 min_addr = (unsigned long)mod->core_layout.base;
209 max_addr = min_addr + mod->core_layout.text_size;
210 fn(next, mod, min_addr & PAGE_MASK, max_addr & PAGE_MASK);
211
212 set_memory_ro((unsigned long)next, SHADOW_PAGES);
213 }
214
215 rcu_assign_pointer(cfi_shadow, next);
216 mutex_unlock(&shadow_update_lock);
217 synchronize_rcu_expedited();
218
219 if (prev) {
220 set_memory_rw((unsigned long)prev, SHADOW_PAGES);
221 vfree(prev);
222 }
223 }
224
cfi_module_add(struct module * mod,unsigned long base_addr)225 void cfi_module_add(struct module *mod, unsigned long base_addr)
226 {
227 update_shadow(mod, base_addr, add_module_to_shadow);
228 }
229
cfi_module_remove(struct module * mod,unsigned long base_addr)230 void cfi_module_remove(struct module *mod, unsigned long base_addr)
231 {
232 update_shadow(mod, base_addr, remove_module_from_shadow);
233 }
234
ptr_to_check_fn(const struct cfi_shadow __rcu * s,unsigned long ptr)235 static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
236 unsigned long ptr)
237 {
238 int index;
239
240 if (unlikely(!s))
241 return NULL; /* No shadow available */
242
243 index = ptr_to_shadow(s, ptr);
244 if (index < 0)
245 return NULL; /* Cannot be addressed with shadow */
246
247 return (cfi_check_fn)shadow_to_check_fn(s, index);
248 }
249
__find_shadow_check_fn(unsigned long ptr)250 static inline cfi_check_fn __find_shadow_check_fn(unsigned long ptr)
251 {
252 cfi_check_fn fn;
253
254 rcu_read_lock_sched_notrace();
255 fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
256 rcu_read_unlock_sched_notrace();
257
258 return fn;
259 }
260
261 #else /* !CONFIG_CFI_CLANG_SHADOW */
262
__find_shadow_check_fn(unsigned long ptr)263 static inline cfi_check_fn __find_shadow_check_fn(unsigned long ptr)
264 {
265 return NULL;
266 }
267
268 #endif /* CONFIG_CFI_CLANG_SHADOW */
269
__find_module_check_fn(unsigned long ptr)270 static inline cfi_check_fn __find_module_check_fn(unsigned long ptr)
271 {
272 cfi_check_fn fn = NULL;
273 struct module *mod;
274
275 rcu_read_lock_sched_notrace();
276 mod = __module_address(ptr);
277 if (mod)
278 fn = mod->cfi_check;
279 rcu_read_unlock_sched_notrace();
280
281 return fn;
282 }
283
find_check_fn(unsigned long ptr)284 static inline cfi_check_fn find_check_fn(unsigned long ptr)
285 {
286 bool rcu;
287 cfi_check_fn fn = NULL;
288
289 /*
290 * Indirect call checks can happen when RCU is not watching. Both
291 * the shadow and __module_address use RCU, so we need to wake it
292 * up before proceeding. Use rcu_nmi_enter/exit() as these calls
293 * can happen anywhere.
294 */
295 rcu = rcu_is_watching();
296 if (!rcu)
297 rcu_nmi_enter();
298
299 if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW)) {
300 fn = __find_shadow_check_fn(ptr);
301 if (fn)
302 goto out;
303 }
304
305 if (is_kernel_text(ptr)) {
306 fn = __cfi_check;
307 goto out;
308 }
309
310 fn = __find_module_check_fn(ptr);
311
312 out:
313 if (!rcu)
314 rcu_nmi_exit();
315
316 return fn;
317 }
318
cfi_slowpath_handler(uint64_t id,void * ptr,void * diag)319 void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
320 {
321 cfi_check_fn fn = find_check_fn((unsigned long)ptr);
322
323 if (!IS_ENABLED(CONFIG_CFI_PERMISSIVE))
324 diag = NULL;
325
326 if (likely(fn))
327 fn(id, ptr, diag);
328 else /* Don't allow unchecked modules */
329 handle_cfi_failure(ptr);
330 }
331
332 #else /* !CONFIG_MODULES */
333
cfi_slowpath_handler(uint64_t id,void * ptr,void * diag)334 void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
335 {
336 handle_cfi_failure(ptr); /* No modules */
337 }
338
339 #endif /* CONFIG_MODULES */
340
341 EXPORT_SYMBOL(cfi_slowpath_handler);
342
cfi_failure_handler(void * data,void * ptr,void * vtable)343 void cfi_failure_handler(void *data, void *ptr, void *vtable)
344 {
345 handle_cfi_failure(ptr);
346 }
347 EXPORT_SYMBOL(cfi_failure_handler);
348
__cfi_check_fail(void * data,void * ptr)349 void __cfi_check_fail(void *data, void *ptr)
350 {
351 handle_cfi_failure(ptr);
352 }
353