• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/string.h>
4 #include <linux/mm.h>
5 #include <linux/mmdebug.h>
6 #include <linux/highmem.h>
7 #include <linux/page_ext.h>
8 #include <linux/poison.h>
9 #include <linux/ratelimit.h>
10 #include <linux/kasan.h>
11 
12 static DEFINE_STATIC_KEY_FALSE_RO(want_page_poisoning);
13 
early_page_poison_param(char * buf)14 static int __init early_page_poison_param(char *buf)
15 {
16 	int ret;
17 	bool tmp;
18 
19 	ret = strtobool(buf, &tmp);
20 	if (ret)
21 		return ret;
22 
23 	if (tmp)
24 		static_branch_enable(&want_page_poisoning);
25 	else
26 		static_branch_disable(&want_page_poisoning);
27 
28 	return 0;
29 }
30 early_param("page_poison", early_page_poison_param);
31 
32 /**
33  * page_poisoning_enabled - check if page poisoning is enabled
34  *
35  * Return true if page poisoning is enabled, or false if not.
36  */
page_poisoning_enabled(void)37 bool page_poisoning_enabled(void)
38 {
39 	/*
40 	 * Assumes that debug_pagealloc_enabled is set before
41 	 * memblock_free_all.
42 	 * Page poisoning is debug page alloc for some arches. If
43 	 * either of those options are enabled, enable poisoning.
44 	 */
45 	return (static_branch_unlikely(&want_page_poisoning) ||
46 		(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
47 		debug_pagealloc_enabled()));
48 }
49 EXPORT_SYMBOL_GPL(page_poisoning_enabled);
50 
poison_page(struct page * page)51 static void poison_page(struct page *page)
52 {
53 	void *addr = kmap_atomic(page);
54 
55 	/* KASAN still think the page is in-use, so skip it. */
56 	kasan_disable_current();
57 	memset(addr, PAGE_POISON, PAGE_SIZE);
58 	kasan_enable_current();
59 	kunmap_atomic(addr);
60 }
61 
poison_pages(struct page * page,int n)62 static void poison_pages(struct page *page, int n)
63 {
64 	int i;
65 
66 	for (i = 0; i < n; i++)
67 		poison_page(page + i);
68 }
69 
single_bit_flip(unsigned char a,unsigned char b)70 static bool single_bit_flip(unsigned char a, unsigned char b)
71 {
72 	unsigned char error = a ^ b;
73 
74 	return error && !(error & (error - 1));
75 }
76 
check_poison_mem(struct page * page,unsigned char * mem,size_t bytes)77 static void check_poison_mem(struct page *page, unsigned char *mem, size_t bytes)
78 {
79 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
80 	unsigned char *start;
81 	unsigned char *end;
82 
83 	if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
84 		return;
85 
86 	start = memchr_inv(mem, PAGE_POISON, bytes);
87 	if (!start)
88 		return;
89 
90 	for (end = mem + bytes - 1; end > start; end--) {
91 		if (*end != PAGE_POISON)
92 			break;
93 	}
94 
95 	if (!__ratelimit(&ratelimit))
96 		return;
97 	else if (start == end && single_bit_flip(*start, PAGE_POISON))
98 		pr_err("pagealloc: single bit error\n");
99 	else
100 		pr_err("pagealloc: memory corruption\n");
101 
102 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
103 			end - start + 1, 1);
104 	dump_stack();
105 	dump_page(page, "pagealloc: corrupted page details");
106 }
107 
unpoison_page(struct page * page)108 static void unpoison_page(struct page *page)
109 {
110 	void *addr;
111 
112 	addr = kmap_atomic(page);
113 	kasan_disable_current();
114 	/*
115 	 * Page poisoning when enabled poisons each and every page
116 	 * that is freed to buddy. Thus no extra check is done to
117 	 * see if a page was poisoned.
118 	 */
119 	check_poison_mem(page, kasan_reset_tag(addr), PAGE_SIZE);
120 	kasan_enable_current();
121 	kunmap_atomic(addr);
122 }
123 
unpoison_pages(struct page * page,int n)124 static void unpoison_pages(struct page *page, int n)
125 {
126 	int i;
127 
128 	for (i = 0; i < n; i++)
129 		unpoison_page(page + i);
130 }
131 
kernel_poison_pages(struct page * page,int numpages,int enable)132 void kernel_poison_pages(struct page *page, int numpages, int enable)
133 {
134 	if (!page_poisoning_enabled())
135 		return;
136 
137 	if (enable)
138 		unpoison_pages(page, numpages);
139 	else
140 		poison_pages(page, numpages);
141 }
142 
143 #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
__kernel_map_pages(struct page * page,int numpages,int enable)144 void __kernel_map_pages(struct page *page, int numpages, int enable)
145 {
146 	/* This function does nothing, all work is done via poison pages */
147 }
148 #endif
149