• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4  * which are designed to protect kernel memory from needless exposure
5  * and overwrite under many unintended conditions. This code is based
6  * on PAX_USERCOPY, which is:
7  *
8  * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9  * Security Inc.
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/mm.h>
14 #include <linux/highmem.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/sched/task.h>
18 #include <linux/sched/task_stack.h>
19 #include <linux/thread_info.h>
20 #include <linux/atomic.h>
21 #include <linux/jump_label.h>
22 #include <asm/sections.h>
23 
24 /*
25  * Checks if a given pointer and length is contained by the current
26  * stack frame (if possible).
27  *
28  * Returns:
29  *	NOT_STACK: not at all on the stack
30  *	GOOD_FRAME: fully within a valid stack frame
31  *	GOOD_STACK: fully on the stack (when can't do frame-checking)
32  *	BAD_STACK: error condition (invalid stack position or bad stack frame)
33  */
check_stack_object(const void * obj,unsigned long len)34 static noinline int check_stack_object(const void *obj, unsigned long len)
35 {
36 	const void * const stack = task_stack_page(current);
37 	const void * const stackend = stack + THREAD_SIZE;
38 	int ret;
39 
40 	/* Object is not on the stack at all. */
41 	if (obj + len <= stack || stackend <= obj)
42 		return NOT_STACK;
43 
44 	/*
45 	 * Reject: object partially overlaps the stack (passing the
46 	 * check above means at least one end is within the stack,
47 	 * so if this check fails, the other end is outside the stack).
48 	 */
49 	if (obj < stack || stackend < obj + len)
50 		return BAD_STACK;
51 
52 	/* Check if object is safely within a valid frame. */
53 	ret = arch_within_stack_frames(stack, stackend, obj, len);
54 	if (ret)
55 		return ret;
56 
57 	return GOOD_STACK;
58 }
59 
60 /*
61  * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
62  * an unexpected state during a copy_from_user() or copy_to_user() call.
63  * There are several checks being performed on the buffer by the
64  * __check_object_size() function. Normal stack buffer usage should never
65  * trip the checks, and kernel text addressing will always trip the check.
66  * For cache objects, it is checking that only the whitelisted range of
67  * bytes for a given cache is being accessed (via the cache's usersize and
68  * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
69  * kmem_cache_create_usercopy() function to create the cache (and
70  * carefully audit the whitelist range).
71  */
usercopy_warn(const char * name,const char * detail,bool to_user,unsigned long offset,unsigned long len)72 void usercopy_warn(const char *name, const char *detail, bool to_user,
73 		   unsigned long offset, unsigned long len)
74 {
75 	WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
76 		 to_user ? "exposure" : "overwrite",
77 		 to_user ? "from" : "to",
78 		 name ? : "unknown?!",
79 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
80 		 offset, len);
81 }
82 
usercopy_abort(const char * name,const char * detail,bool to_user,unsigned long offset,unsigned long len)83 void __noreturn usercopy_abort(const char *name, const char *detail,
84 			       bool to_user, unsigned long offset,
85 			       unsigned long len)
86 {
87 	pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
88 		 to_user ? "exposure" : "overwrite",
89 		 to_user ? "from" : "to",
90 		 name ? : "unknown?!",
91 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
92 		 offset, len);
93 
94 	/*
95 	 * For greater effect, it would be nice to do do_group_exit(),
96 	 * but BUG() actually hooks all the lock-breaking and per-arch
97 	 * Oops code, so that is used here instead.
98 	 */
99 	BUG();
100 }
101 
102 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
overlaps(const unsigned long ptr,unsigned long n,unsigned long low,unsigned long high)103 static bool overlaps(const unsigned long ptr, unsigned long n,
104 		     unsigned long low, unsigned long high)
105 {
106 	const unsigned long check_low = ptr;
107 	unsigned long check_high = check_low + n;
108 
109 	/* Does not overlap if entirely above or entirely below. */
110 	if (check_low >= high || check_high <= low)
111 		return false;
112 
113 	return true;
114 }
115 
116 /* Is this address range in the kernel text area? */
check_kernel_text_object(const unsigned long ptr,unsigned long n,bool to_user)117 static inline void check_kernel_text_object(const unsigned long ptr,
118 					    unsigned long n, bool to_user)
119 {
120 	unsigned long textlow = (unsigned long)_stext;
121 	unsigned long texthigh = (unsigned long)_etext;
122 	unsigned long textlow_linear, texthigh_linear;
123 
124 	if (overlaps(ptr, n, textlow, texthigh))
125 		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
126 
127 	/*
128 	 * Some architectures have virtual memory mappings with a secondary
129 	 * mapping of the kernel text, i.e. there is more than one virtual
130 	 * kernel address that points to the kernel image. It is usually
131 	 * when there is a separate linear physical memory mapping, in that
132 	 * __pa() is not just the reverse of __va(). This can be detected
133 	 * and checked:
134 	 */
135 	textlow_linear = (unsigned long)lm_alias(textlow);
136 	/* No different mapping: we're done. */
137 	if (textlow_linear == textlow)
138 		return;
139 
140 	/* Check the secondary mapping... */
141 	texthigh_linear = (unsigned long)lm_alias(texthigh);
142 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
143 		usercopy_abort("linear kernel text", NULL, to_user,
144 			       ptr - textlow_linear, n);
145 }
146 
check_bogus_address(const unsigned long ptr,unsigned long n,bool to_user)147 static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
148 				       bool to_user)
149 {
150 	/* Reject if object wraps past end of memory. */
151 	if (ptr + (n - 1) < ptr)
152 		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
153 
154 	/* Reject if NULL or ZERO-allocation. */
155 	if (ZERO_OR_NULL_PTR(ptr))
156 		usercopy_abort("null address", NULL, to_user, ptr, n);
157 }
158 
159 /* Checks for allocs that are marked in some way as spanning multiple pages. */
check_page_span(const void * ptr,unsigned long n,struct page * page,bool to_user)160 static inline void check_page_span(const void *ptr, unsigned long n,
161 				   struct page *page, bool to_user)
162 {
163 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
164 	const void *end = ptr + n - 1;
165 	struct page *endpage;
166 	bool is_reserved, is_cma;
167 
168 	/*
169 	 * Sometimes the kernel data regions are not marked Reserved (see
170 	 * check below). And sometimes [_sdata,_edata) does not cover
171 	 * rodata and/or bss, so check each range explicitly.
172 	 */
173 
174 	/* Allow reads of kernel rodata region (if not marked as Reserved). */
175 	if (ptr >= (const void *)__start_rodata &&
176 	    end <= (const void *)__end_rodata) {
177 		if (!to_user)
178 			usercopy_abort("rodata", NULL, to_user, 0, n);
179 		return;
180 	}
181 
182 	/* Allow kernel data region (if not marked as Reserved). */
183 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
184 		return;
185 
186 	/* Allow kernel bss region (if not marked as Reserved). */
187 	if (ptr >= (const void *)__bss_start &&
188 	    end <= (const void *)__bss_stop)
189 		return;
190 
191 	/* Is the object wholly within one base page? */
192 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
193 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
194 		return;
195 
196 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
197 	endpage = virt_to_head_page(end);
198 	if (likely(endpage == page))
199 		return;
200 
201 	/*
202 	 * Reject if range is entirely either Reserved (i.e. special or
203 	 * device memory), or CMA. Otherwise, reject since the object spans
204 	 * several independently allocated pages.
205 	 */
206 	is_reserved = PageReserved(page);
207 	is_cma = is_migrate_cma_page(page);
208 	if (!is_reserved && !is_cma)
209 		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
210 
211 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
212 		page = virt_to_head_page(ptr);
213 		if (is_reserved && !PageReserved(page))
214 			usercopy_abort("spans Reserved and non-Reserved pages",
215 				       NULL, to_user, 0, n);
216 		if (is_cma && !is_migrate_cma_page(page))
217 			usercopy_abort("spans CMA and non-CMA pages", NULL,
218 				       to_user, 0, n);
219 	}
220 #endif
221 }
222 
check_heap_object(const void * ptr,unsigned long n,bool to_user)223 static inline void check_heap_object(const void *ptr, unsigned long n,
224 				     bool to_user)
225 {
226 	struct page *page;
227 
228 	if (!virt_addr_valid(ptr))
229 		return;
230 
231 	/*
232 	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
233 	 * highmem page or fallback to virt_to_page(). The following
234 	 * is effectively a highmem-aware virt_to_head_page().
235 	 */
236 	page = compound_head(kmap_to_page((void *)ptr));
237 
238 	if (PageSlab(page)) {
239 		/* Check slab allocator for flags and size. */
240 		__check_heap_object(ptr, n, page, to_user);
241 	} else {
242 		/* Verify object does not incorrectly span multiple pages. */
243 		check_page_span(ptr, n, page, to_user);
244 	}
245 }
246 
247 static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
248 
249 /*
250  * Validates that the given object is:
251  * - not bogus address
252  * - fully contained by stack (or stack frame, when available)
253  * - fully within SLAB object (or object whitelist area, when available)
254  * - not in kernel text
255  */
__check_object_size(const void * ptr,unsigned long n,bool to_user)256 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
257 {
258 	if (static_branch_unlikely(&bypass_usercopy_checks))
259 		return;
260 
261 	/* Skip all tests if size is zero. */
262 	if (!n)
263 		return;
264 
265 	/* Check for invalid addresses. */
266 	check_bogus_address((const unsigned long)ptr, n, to_user);
267 
268 	/* Check for bad stack object. */
269 	switch (check_stack_object(ptr, n)) {
270 	case NOT_STACK:
271 		/* Object is not touching the current process stack. */
272 		break;
273 	case GOOD_FRAME:
274 	case GOOD_STACK:
275 		/*
276 		 * Object is either in the correct frame (when it
277 		 * is possible to check) or just generally on the
278 		 * process stack (when frame checking not available).
279 		 */
280 		return;
281 	default:
282 		usercopy_abort("process stack", NULL, to_user, 0, n);
283 	}
284 
285 	/* Check for bad heap object. */
286 	check_heap_object(ptr, n, to_user);
287 
288 	/* Check for object in kernel to avoid text exposure. */
289 	check_kernel_text_object((const unsigned long)ptr, n, to_user);
290 }
291 EXPORT_SYMBOL(__check_object_size);
292 
293 static bool enable_checks __initdata = true;
294 
parse_hardened_usercopy(char * str)295 static int __init parse_hardened_usercopy(char *str)
296 {
297 	if (strtobool(str, &enable_checks))
298 		pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
299 			str);
300 	return 1;
301 }
302 
303 __setup("hardened_usercopy=", parse_hardened_usercopy);
304 
set_hardened_usercopy(void)305 static int __init set_hardened_usercopy(void)
306 {
307 	if (enable_checks == false)
308 		static_branch_enable(&bypass_usercopy_checks);
309 	return 1;
310 }
311 
312 late_initcall(set_hardened_usercopy);
313