• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4  * which are designed to protect kernel memory from needless exposure
5  * and overwrite under many unintended conditions. This code is based
6  * on PAX_USERCOPY, which is:
7  *
8  * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9  * Security Inc.
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/mm.h>
14 #include <linux/highmem.h>
15 #include <linux/kstrtox.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/sched/task.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/thread_info.h>
21 #include <linux/vmalloc.h>
22 #include <linux/atomic.h>
23 #include <linux/jump_label.h>
24 #include <asm/sections.h>
25 #include <trace/hooks/mm.h>
26 #include "slab.h"
27 
28 /*
29  * Checks if a given pointer and length is contained by the current
30  * stack frame (if possible).
31  *
32  * Returns:
33  *	NOT_STACK: not at all on the stack
34  *	GOOD_FRAME: fully within a valid stack frame
35  *	GOOD_STACK: within the current stack (when can't frame-check exactly)
36  *	BAD_STACK: error condition (invalid stack position or bad stack frame)
37  */
check_stack_object(const void * obj,unsigned long len)38 static noinline int check_stack_object(const void *obj, unsigned long len)
39 {
40 	const void * const stack = task_stack_page(current);
41 	const void * const stackend = stack + THREAD_SIZE;
42 	int ret;
43 
44 	/* Object is not on the stack at all. */
45 	if (obj + len <= stack || stackend <= obj)
46 		return NOT_STACK;
47 
48 	/*
49 	 * Reject: object partially overlaps the stack (passing the
50 	 * check above means at least one end is within the stack,
51 	 * so if this check fails, the other end is outside the stack).
52 	 */
53 	if (obj < stack || stackend < obj + len)
54 		return BAD_STACK;
55 
56 	/* Check if object is safely within a valid frame. */
57 	ret = arch_within_stack_frames(stack, stackend, obj, len);
58 	if (ret)
59 		return ret;
60 
61 	/* Finally, check stack depth if possible. */
62 #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
63 	if (IS_ENABLED(CONFIG_STACK_GROWSUP)) {
64 		if ((void *)current_stack_pointer < obj + len)
65 			return BAD_STACK;
66 	} else {
67 		if (obj < (void *)current_stack_pointer)
68 			return BAD_STACK;
69 	}
70 #endif
71 
72 	return GOOD_STACK;
73 }
74 
75 /*
76  * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
77  * an unexpected state during a copy_from_user() or copy_to_user() call.
78  * There are several checks being performed on the buffer by the
79  * __check_object_size() function. Normal stack buffer usage should never
80  * trip the checks, and kernel text addressing will always trip the check.
81  * For cache objects, it is checking that only the whitelisted range of
82  * bytes for a given cache is being accessed (via the cache's usersize and
83  * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
84  * kmem_cache_create_usercopy() function to create the cache (and
85  * carefully audit the whitelist range).
86  */
usercopy_abort(const char * name,const char * detail,bool to_user,unsigned long offset,unsigned long len)87 void __noreturn usercopy_abort(const char *name, const char *detail,
88 			       bool to_user, unsigned long offset,
89 			       unsigned long len)
90 {
91 	pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
92 		 to_user ? "exposure" : "overwrite",
93 		 to_user ? "from" : "to",
94 		 name ? : "unknown?!",
95 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
96 		 offset, len);
97 
98 	/*
99 	 * For greater effect, it would be nice to do do_group_exit(),
100 	 * but BUG() actually hooks all the lock-breaking and per-arch
101 	 * Oops code, so that is used here instead.
102 	 */
103 	BUG();
104 }
105 
106 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
overlaps(const unsigned long ptr,unsigned long n,unsigned long low,unsigned long high)107 static bool overlaps(const unsigned long ptr, unsigned long n,
108 		     unsigned long low, unsigned long high)
109 {
110 	const unsigned long check_low = ptr;
111 	unsigned long check_high = check_low + n;
112 
113 	/* Does not overlap if entirely above or entirely below. */
114 	if (check_low >= high || check_high <= low)
115 		return false;
116 
117 	return true;
118 }
119 
120 /* Is this address range in the kernel text area? */
check_kernel_text_object(const unsigned long ptr,unsigned long n,bool to_user)121 static inline void check_kernel_text_object(const unsigned long ptr,
122 					    unsigned long n, bool to_user)
123 {
124 	unsigned long textlow = (unsigned long)_stext;
125 	unsigned long texthigh = (unsigned long)_etext;
126 	unsigned long textlow_linear, texthigh_linear;
127 
128 	if (overlaps(ptr, n, textlow, texthigh))
129 		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
130 
131 	/*
132 	 * Some architectures have virtual memory mappings with a secondary
133 	 * mapping of the kernel text, i.e. there is more than one virtual
134 	 * kernel address that points to the kernel image. It is usually
135 	 * when there is a separate linear physical memory mapping, in that
136 	 * __pa() is not just the reverse of __va(). This can be detected
137 	 * and checked:
138 	 */
139 	textlow_linear = (unsigned long)lm_alias(textlow);
140 	/* No different mapping: we're done. */
141 	if (textlow_linear == textlow)
142 		return;
143 
144 	/* Check the secondary mapping... */
145 	texthigh_linear = (unsigned long)lm_alias(texthigh);
146 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
147 		usercopy_abort("linear kernel text", NULL, to_user,
148 			       ptr - textlow_linear, n);
149 }
150 
check_bogus_address(const unsigned long ptr,unsigned long n,bool to_user)151 static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
152 				       bool to_user)
153 {
154 	/* Reject if object wraps past end of memory. */
155 	if (ptr + (n - 1) < ptr)
156 		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
157 
158 	/* Reject if NULL or ZERO-allocation. */
159 	if (ZERO_OR_NULL_PTR(ptr))
160 		usercopy_abort("null address", NULL, to_user, ptr, n);
161 }
162 
check_heap_object(const void * ptr,unsigned long n,bool to_user)163 static inline void check_heap_object(const void *ptr, unsigned long n,
164 				     bool to_user)
165 {
166 	unsigned long addr = (unsigned long)ptr;
167 	unsigned long offset;
168 	struct folio *folio;
169 	bool bypass = false;
170 
171 	if (is_kmap_addr(ptr)) {
172 		offset = offset_in_page(ptr);
173 		if (n > PAGE_SIZE - offset)
174 			usercopy_abort("kmap", NULL, to_user, offset, n);
175 		return;
176 	}
177 
178 	if (is_vmalloc_addr(ptr) && !pagefault_disabled()) {
179 		struct vmap_area *area = find_vmap_area(addr);
180 
181 		if (!area)
182 			usercopy_abort("vmalloc", "no area", to_user, 0, n);
183 
184 		if (n > area->va_end - addr) {
185 			offset = addr - area->va_start;
186 			usercopy_abort("vmalloc", NULL, to_user, offset, n);
187 		}
188 		return;
189 	}
190 
191 	if (!virt_addr_valid(ptr))
192 		return;
193 
194 	folio = virt_to_folio(ptr);
195 
196 	if (folio_test_slab(folio)) {
197 		/* Check slab allocator for flags and size. */
198 		__check_heap_object(ptr, n, folio_slab(folio), to_user);
199 	} else if (folio_test_large(folio)) {
200 		trace_android_vh_check_heap_object_bypass(folio, &bypass);
201 		if (bypass)
202 			return;
203 
204 		offset = ptr - folio_address(folio);
205 		if (n > folio_size(folio) - offset)
206 			usercopy_abort("page alloc", NULL, to_user, offset, n);
207 	}
208 }
209 
210 static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
211 
212 /*
213  * Validates that the given object is:
214  * - not bogus address
215  * - fully contained by stack (or stack frame, when available)
216  * - fully within SLAB object (or object whitelist area, when available)
217  * - not in kernel text
218  */
__check_object_size(const void * ptr,unsigned long n,bool to_user)219 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
220 {
221 	if (static_branch_unlikely(&bypass_usercopy_checks))
222 		return;
223 
224 	/* Skip all tests if size is zero. */
225 	if (!n)
226 		return;
227 
228 	/* Check for invalid addresses. */
229 	check_bogus_address((const unsigned long)ptr, n, to_user);
230 
231 	/* Check for bad stack object. */
232 	switch (check_stack_object(ptr, n)) {
233 	case NOT_STACK:
234 		/* Object is not touching the current process stack. */
235 		break;
236 	case GOOD_FRAME:
237 	case GOOD_STACK:
238 		/*
239 		 * Object is either in the correct frame (when it
240 		 * is possible to check) or just generally on the
241 		 * process stack (when frame checking not available).
242 		 */
243 		return;
244 	default:
245 		usercopy_abort("process stack", NULL, to_user,
246 #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
247 			IS_ENABLED(CONFIG_STACK_GROWSUP) ?
248 				ptr - (void *)current_stack_pointer :
249 				(void *)current_stack_pointer - ptr,
250 #else
251 			0,
252 #endif
253 			n);
254 	}
255 
256 	/* Check for bad heap object. */
257 	check_heap_object(ptr, n, to_user);
258 
259 	/* Check for object in kernel to avoid text exposure. */
260 	check_kernel_text_object((const unsigned long)ptr, n, to_user);
261 }
262 EXPORT_SYMBOL(__check_object_size);
263 
264 static bool enable_checks __initdata = true;
265 
parse_hardened_usercopy(char * str)266 static int __init parse_hardened_usercopy(char *str)
267 {
268 	if (kstrtobool(str, &enable_checks))
269 		pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
270 			str);
271 	return 1;
272 }
273 
274 __setup("hardened_usercopy=", parse_hardened_usercopy);
275 
set_hardened_usercopy(void)276 static int __init set_hardened_usercopy(void)
277 {
278 	if (enable_checks == false)
279 		static_branch_enable(&bypass_usercopy_checks);
280 	return 1;
281 }
282 
283 late_initcall(set_hardened_usercopy);
284