• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
4 
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/stddef.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/libfdt.h>
16 #include <linux/crash_core.h>
17 #include <asm/cacheflush.h>
18 #include <asm/prom.h>
19 #include <asm/kdump.h>
20 #include <mm/mmu_decl.h>
21 #include <generated/utsrelease.h>
22 
23 struct regions {
24 	unsigned long pa_start;
25 	unsigned long pa_end;
26 	unsigned long kernel_size;
27 	unsigned long dtb_start;
28 	unsigned long dtb_end;
29 	unsigned long initrd_start;
30 	unsigned long initrd_end;
31 	unsigned long crash_start;
32 	unsigned long crash_end;
33 	int reserved_mem;
34 	int reserved_mem_addr_cells;
35 	int reserved_mem_size_cells;
36 };
37 
38 struct regions __initdata regions;
39 
kaslr_get_cmdline(void * fdt)40 static __init void kaslr_get_cmdline(void *fdt)
41 {
42 	int node = fdt_path_offset(fdt, "/chosen");
43 
44 	early_init_dt_scan_chosen(node, "chosen", 1, boot_command_line);
45 }
46 
rotate_xor(unsigned long hash,const void * area,size_t size)47 static unsigned long __init rotate_xor(unsigned long hash, const void *area,
48 				       size_t size)
49 {
50 	size_t i;
51 	const unsigned long *ptr = area;
52 
53 	for (i = 0; i < size / sizeof(hash); i++) {
54 		/* Rotate by odd number of bits and XOR. */
55 		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
56 		hash ^= ptr[i];
57 	}
58 
59 	return hash;
60 }
61 
62 /* Attempt to create a simple starting entropy. This can make it defferent for
63  * every build but it is still not enough. Stronger entropy should
64  * be added to make it change for every boot.
65  */
get_boot_seed(void * fdt)66 static unsigned long __init get_boot_seed(void *fdt)
67 {
68 	unsigned long hash = 0;
69 
70 	/* build-specific string for starting entropy. */
71 	hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
72 	hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
73 
74 	return hash;
75 }
76 
get_kaslr_seed(void * fdt)77 static __init u64 get_kaslr_seed(void *fdt)
78 {
79 	int node, len;
80 	fdt64_t *prop;
81 	u64 ret;
82 
83 	node = fdt_path_offset(fdt, "/chosen");
84 	if (node < 0)
85 		return 0;
86 
87 	prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
88 	if (!prop || len != sizeof(u64))
89 		return 0;
90 
91 	ret = fdt64_to_cpu(*prop);
92 	*prop = 0;
93 	return ret;
94 }
95 
regions_overlap(u32 s1,u32 e1,u32 s2,u32 e2)96 static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
97 {
98 	return e1 >= s2 && e2 >= s1;
99 }
100 
overlaps_reserved_region(const void * fdt,u32 start,u32 end)101 static __init bool overlaps_reserved_region(const void *fdt, u32 start,
102 					    u32 end)
103 {
104 	int subnode, len, i;
105 	u64 base, size;
106 
107 	/* check for overlap with /memreserve/ entries */
108 	for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
109 		if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
110 			continue;
111 		if (regions_overlap(start, end, base, base + size))
112 			return true;
113 	}
114 
115 	if (regions.reserved_mem < 0)
116 		return false;
117 
118 	/* check for overlap with static reservations in /reserved-memory */
119 	for (subnode = fdt_first_subnode(fdt, regions.reserved_mem);
120 	     subnode >= 0;
121 	     subnode = fdt_next_subnode(fdt, subnode)) {
122 		const fdt32_t *reg;
123 		u64 rsv_end;
124 
125 		len = 0;
126 		reg = fdt_getprop(fdt, subnode, "reg", &len);
127 		while (len >= (regions.reserved_mem_addr_cells +
128 			       regions.reserved_mem_size_cells)) {
129 			base = fdt32_to_cpu(reg[0]);
130 			if (regions.reserved_mem_addr_cells == 2)
131 				base = (base << 32) | fdt32_to_cpu(reg[1]);
132 
133 			reg += regions.reserved_mem_addr_cells;
134 			len -= 4 * regions.reserved_mem_addr_cells;
135 
136 			size = fdt32_to_cpu(reg[0]);
137 			if (regions.reserved_mem_size_cells == 2)
138 				size = (size << 32) | fdt32_to_cpu(reg[1]);
139 
140 			reg += regions.reserved_mem_size_cells;
141 			len -= 4 * regions.reserved_mem_size_cells;
142 
143 			if (base >= regions.pa_end)
144 				continue;
145 
146 			rsv_end = min(base + size, (u64)U32_MAX);
147 
148 			if (regions_overlap(start, end, base, rsv_end))
149 				return true;
150 		}
151 	}
152 	return false;
153 }
154 
overlaps_region(const void * fdt,u32 start,u32 end)155 static __init bool overlaps_region(const void *fdt, u32 start,
156 				   u32 end)
157 {
158 	if (regions_overlap(start, end, __pa(_stext), __pa(_end)))
159 		return true;
160 
161 	if (regions_overlap(start, end, regions.dtb_start,
162 			    regions.dtb_end))
163 		return true;
164 
165 	if (regions_overlap(start, end, regions.initrd_start,
166 			    regions.initrd_end))
167 		return true;
168 
169 	if (regions_overlap(start, end, regions.crash_start,
170 			    regions.crash_end))
171 		return true;
172 
173 	return overlaps_reserved_region(fdt, start, end);
174 }
175 
get_crash_kernel(void * fdt,unsigned long size)176 static void __init get_crash_kernel(void *fdt, unsigned long size)
177 {
178 #ifdef CONFIG_CRASH_CORE
179 	unsigned long long crash_size, crash_base;
180 	int ret;
181 
182 	ret = parse_crashkernel(boot_command_line, size, &crash_size,
183 				&crash_base);
184 	if (ret != 0 || crash_size == 0)
185 		return;
186 	if (crash_base == 0)
187 		crash_base = KDUMP_KERNELBASE;
188 
189 	regions.crash_start = (unsigned long)crash_base;
190 	regions.crash_end = (unsigned long)(crash_base + crash_size);
191 
192 	pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
193 #endif
194 }
195 
get_initrd_range(void * fdt)196 static void __init get_initrd_range(void *fdt)
197 {
198 	u64 start, end;
199 	int node, len;
200 	const __be32 *prop;
201 
202 	node = fdt_path_offset(fdt, "/chosen");
203 	if (node < 0)
204 		return;
205 
206 	prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
207 	if (!prop)
208 		return;
209 	start = of_read_number(prop, len / 4);
210 
211 	prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
212 	if (!prop)
213 		return;
214 	end = of_read_number(prop, len / 4);
215 
216 	regions.initrd_start = (unsigned long)start;
217 	regions.initrd_end = (unsigned long)end;
218 
219 	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
220 }
221 
get_usable_address(const void * fdt,unsigned long start,unsigned long offset)222 static __init unsigned long get_usable_address(const void *fdt,
223 					       unsigned long start,
224 					       unsigned long offset)
225 {
226 	unsigned long pa;
227 	unsigned long pa_end;
228 
229 	for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) {
230 		pa_end = pa + regions.kernel_size;
231 		if (overlaps_region(fdt, pa, pa_end))
232 			continue;
233 
234 		return pa;
235 	}
236 	return 0;
237 }
238 
get_cell_sizes(const void * fdt,int node,int * addr_cells,int * size_cells)239 static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
240 				  int *size_cells)
241 {
242 	const int *prop;
243 	int len;
244 
245 	/*
246 	 * Retrieve the #address-cells and #size-cells properties
247 	 * from the 'node', or use the default if not provided.
248 	 */
249 	*addr_cells = *size_cells = 1;
250 
251 	prop = fdt_getprop(fdt, node, "#address-cells", &len);
252 	if (len == 4)
253 		*addr_cells = fdt32_to_cpu(*prop);
254 	prop = fdt_getprop(fdt, node, "#size-cells", &len);
255 	if (len == 4)
256 		*size_cells = fdt32_to_cpu(*prop);
257 }
258 
kaslr_legal_offset(void * dt_ptr,unsigned long index,unsigned long offset)259 static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
260 					       unsigned long offset)
261 {
262 	unsigned long koffset = 0;
263 	unsigned long start;
264 
265 	while ((long)index >= 0) {
266 		offset = memstart_addr + index * SZ_64M + offset;
267 		start = memstart_addr + index * SZ_64M;
268 		koffset = get_usable_address(dt_ptr, start, offset);
269 		if (koffset)
270 			break;
271 		index--;
272 	}
273 
274 	if (koffset != 0)
275 		koffset -= memstart_addr;
276 
277 	return koffset;
278 }
279 
kaslr_disabled(void)280 static inline __init bool kaslr_disabled(void)
281 {
282 	return strstr(boot_command_line, "nokaslr") != NULL;
283 }
284 
kaslr_choose_location(void * dt_ptr,phys_addr_t size,unsigned long kernel_sz)285 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
286 						  unsigned long kernel_sz)
287 {
288 	unsigned long offset, random;
289 	unsigned long ram, linear_sz;
290 	u64 seed;
291 	unsigned long index;
292 
293 	kaslr_get_cmdline(dt_ptr);
294 	if (kaslr_disabled())
295 		return 0;
296 
297 	random = get_boot_seed(dt_ptr);
298 
299 	seed = get_tb() << 32;
300 	seed ^= get_tb();
301 	random = rotate_xor(random, &seed, sizeof(seed));
302 
303 	/*
304 	 * Retrieve (and wipe) the seed from the FDT
305 	 */
306 	seed = get_kaslr_seed(dt_ptr);
307 	if (seed)
308 		random = rotate_xor(random, &seed, sizeof(seed));
309 	else
310 		pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
311 
312 	ram = min_t(phys_addr_t, __max_low_memory, size);
313 	ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true);
314 	linear_sz = min_t(unsigned long, ram, SZ_512M);
315 
316 	/* If the linear size is smaller than 64M, do not randmize */
317 	if (linear_sz < SZ_64M)
318 		return 0;
319 
320 	/* check for a reserved-memory node and record its cell sizes */
321 	regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
322 	if (regions.reserved_mem >= 0)
323 		get_cell_sizes(dt_ptr, regions.reserved_mem,
324 			       &regions.reserved_mem_addr_cells,
325 			       &regions.reserved_mem_size_cells);
326 
327 	regions.pa_start = memstart_addr;
328 	regions.pa_end = memstart_addr + linear_sz;
329 	regions.dtb_start = __pa(dt_ptr);
330 	regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
331 	regions.kernel_size = kernel_sz;
332 
333 	get_initrd_range(dt_ptr);
334 	get_crash_kernel(dt_ptr, ram);
335 
336 	/*
337 	 * Decide which 64M we want to start
338 	 * Only use the low 8 bits of the random seed
339 	 */
340 	index = random & 0xFF;
341 	index %= linear_sz / SZ_64M;
342 
343 	/* Decide offset inside 64M */
344 	offset = random % (SZ_64M - kernel_sz);
345 	offset = round_down(offset, SZ_16K);
346 
347 	return kaslr_legal_offset(dt_ptr, index, offset);
348 }
349 
350 /*
351  * To see if we need to relocate the kernel to a random offset
352  * void *dt_ptr - address of the device tree
353  * phys_addr_t size - size of the first memory block
354  */
kaslr_early_init(void * dt_ptr,phys_addr_t size)355 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
356 {
357 	unsigned long tlb_virt;
358 	phys_addr_t tlb_phys;
359 	unsigned long offset;
360 	unsigned long kernel_sz;
361 
362 	kernel_sz = (unsigned long)_end - (unsigned long)_stext;
363 
364 	offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
365 	if (offset == 0)
366 		return;
367 
368 	kernstart_virt_addr += offset;
369 	kernstart_addr += offset;
370 
371 	is_second_reloc = 1;
372 
373 	if (offset >= SZ_64M) {
374 		tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
375 		tlb_phys = round_down(kernstart_addr, SZ_64M);
376 
377 		/* Create kernel map to relocate in */
378 		create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
379 	}
380 
381 	/* Copy the kernel to it's new location and run */
382 	memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
383 	flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
384 
385 	reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
386 }
387 
kaslr_late_init(void)388 void __init kaslr_late_init(void)
389 {
390 	/* If randomized, clear the original kernel */
391 	if (kernstart_virt_addr != KERNELBASE) {
392 		unsigned long kernel_sz;
393 
394 		kernel_sz = (unsigned long)_end - kernstart_virt_addr;
395 		memzero_explicit((void *)KERNELBASE, kernel_sz);
396 	}
397 }
398