• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <linux/pgtable.h>
6 #include <asm/pgalloc.h>
7 #include <asm/kasan.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
10 #include <asm/sclp.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
14 #include <asm/uv.h>
15 
16 unsigned long kasan_vmax;
17 static unsigned long segment_pos __initdata;
18 static unsigned long segment_low __initdata;
19 static unsigned long pgalloc_pos __initdata;
20 static unsigned long pgalloc_low __initdata;
21 static unsigned long pgalloc_freeable __initdata;
22 static bool has_edat __initdata;
23 static bool has_nx __initdata;
24 
25 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
26 
27 static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
28 
kasan_early_panic(const char * reason)29 static void __init kasan_early_panic(const char *reason)
30 {
31 	sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
32 	sclp_early_printk(reason);
33 	disabled_wait();
34 }
35 
kasan_early_alloc_segment(void)36 static void * __init kasan_early_alloc_segment(void)
37 {
38 	segment_pos -= _SEGMENT_SIZE;
39 
40 	if (segment_pos < segment_low)
41 		kasan_early_panic("out of memory during initialisation\n");
42 
43 	return (void *)segment_pos;
44 }
45 
kasan_early_alloc_pages(unsigned int order)46 static void * __init kasan_early_alloc_pages(unsigned int order)
47 {
48 	pgalloc_pos -= (PAGE_SIZE << order);
49 
50 	if (pgalloc_pos < pgalloc_low)
51 		kasan_early_panic("out of memory during initialisation\n");
52 
53 	return (void *)pgalloc_pos;
54 }
55 
kasan_early_crst_alloc(unsigned long val)56 static void * __init kasan_early_crst_alloc(unsigned long val)
57 {
58 	unsigned long *table;
59 
60 	table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
61 	if (table)
62 		crst_table_init(table, val);
63 	return table;
64 }
65 
kasan_early_pte_alloc(void)66 static pte_t * __init kasan_early_pte_alloc(void)
67 {
68 	static void *pte_leftover;
69 	pte_t *pte;
70 
71 	BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
72 
73 	if (!pte_leftover) {
74 		pte_leftover = kasan_early_alloc_pages(0);
75 		pte = pte_leftover + _PAGE_TABLE_SIZE;
76 	} else {
77 		pte = pte_leftover;
78 		pte_leftover = NULL;
79 	}
80 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
81 	return pte;
82 }
83 
84 enum populate_mode {
85 	POPULATE_ONE2ONE,
86 	POPULATE_MAP,
87 	POPULATE_ZERO_SHADOW,
88 	POPULATE_SHALLOW
89 };
kasan_early_vmemmap_populate(unsigned long address,unsigned long end,enum populate_mode mode)90 static void __init kasan_early_vmemmap_populate(unsigned long address,
91 						unsigned long end,
92 						enum populate_mode mode)
93 {
94 	unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
95 	pgd_t *pg_dir;
96 	p4d_t *p4_dir;
97 	pud_t *pu_dir;
98 	pmd_t *pm_dir;
99 	pte_t *pt_dir;
100 
101 	pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
102 	if (!has_nx)
103 		pgt_prot_zero &= ~_PAGE_NOEXEC;
104 	pgt_prot = pgprot_val(PAGE_KERNEL);
105 	sgt_prot = pgprot_val(SEGMENT_KERNEL);
106 	if (!has_nx || mode == POPULATE_ONE2ONE) {
107 		pgt_prot &= ~_PAGE_NOEXEC;
108 		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
109 	}
110 
111 	/*
112 	 * The first 1MB of 1:1 mapping is mapped with 4KB pages
113 	 */
114 	while (address < end) {
115 		pg_dir = pgd_offset_k(address);
116 		if (pgd_none(*pg_dir)) {
117 			if (mode == POPULATE_ZERO_SHADOW &&
118 			    IS_ALIGNED(address, PGDIR_SIZE) &&
119 			    end - address >= PGDIR_SIZE) {
120 				pgd_populate(&init_mm, pg_dir,
121 						kasan_early_shadow_p4d);
122 				address = (address + PGDIR_SIZE) & PGDIR_MASK;
123 				continue;
124 			}
125 			p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
126 			pgd_populate(&init_mm, pg_dir, p4_dir);
127 		}
128 
129 		if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
130 		    mode == POPULATE_SHALLOW) {
131 			address = (address + P4D_SIZE) & P4D_MASK;
132 			continue;
133 		}
134 
135 		p4_dir = p4d_offset(pg_dir, address);
136 		if (p4d_none(*p4_dir)) {
137 			if (mode == POPULATE_ZERO_SHADOW &&
138 			    IS_ALIGNED(address, P4D_SIZE) &&
139 			    end - address >= P4D_SIZE) {
140 				p4d_populate(&init_mm, p4_dir,
141 						kasan_early_shadow_pud);
142 				address = (address + P4D_SIZE) & P4D_MASK;
143 				continue;
144 			}
145 			pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
146 			p4d_populate(&init_mm, p4_dir, pu_dir);
147 		}
148 
149 		if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
150 		    mode == POPULATE_SHALLOW) {
151 			address = (address + PUD_SIZE) & PUD_MASK;
152 			continue;
153 		}
154 
155 		pu_dir = pud_offset(p4_dir, address);
156 		if (pud_none(*pu_dir)) {
157 			if (mode == POPULATE_ZERO_SHADOW &&
158 			    IS_ALIGNED(address, PUD_SIZE) &&
159 			    end - address >= PUD_SIZE) {
160 				pud_populate(&init_mm, pu_dir,
161 						kasan_early_shadow_pmd);
162 				address = (address + PUD_SIZE) & PUD_MASK;
163 				continue;
164 			}
165 			pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
166 			pud_populate(&init_mm, pu_dir, pm_dir);
167 		}
168 
169 		pm_dir = pmd_offset(pu_dir, address);
170 		if (pmd_none(*pm_dir)) {
171 			if (IS_ALIGNED(address, PMD_SIZE) &&
172 			    end - address >= PMD_SIZE) {
173 				if (mode == POPULATE_ZERO_SHADOW) {
174 					pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
175 					address = (address + PMD_SIZE) & PMD_MASK;
176 					continue;
177 				} else if (has_edat && address) {
178 					void *page;
179 
180 					if (mode == POPULATE_ONE2ONE) {
181 						page = (void *)address;
182 					} else {
183 						page = kasan_early_alloc_segment();
184 						memset(page, 0, _SEGMENT_SIZE);
185 					}
186 					pmd_val(*pm_dir) = __pa(page) | sgt_prot;
187 					address = (address + PMD_SIZE) & PMD_MASK;
188 					continue;
189 				}
190 			}
191 			pt_dir = kasan_early_pte_alloc();
192 			pmd_populate(&init_mm, pm_dir, pt_dir);
193 		} else if (pmd_large(*pm_dir)) {
194 			address = (address + PMD_SIZE) & PMD_MASK;
195 			continue;
196 		}
197 
198 		pt_dir = pte_offset_kernel(pm_dir, address);
199 		if (pte_none(*pt_dir)) {
200 			void *page;
201 
202 			switch (mode) {
203 			case POPULATE_ONE2ONE:
204 				page = (void *)address;
205 				pte_val(*pt_dir) = __pa(page) | pgt_prot;
206 				break;
207 			case POPULATE_MAP:
208 				page = kasan_early_alloc_pages(0);
209 				memset(page, 0, PAGE_SIZE);
210 				pte_val(*pt_dir) = __pa(page) | pgt_prot;
211 				break;
212 			case POPULATE_ZERO_SHADOW:
213 				page = kasan_early_shadow_page;
214 				pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
215 				break;
216 			case POPULATE_SHALLOW:
217 				/* should never happen */
218 				break;
219 			}
220 		}
221 		address += PAGE_SIZE;
222 	}
223 }
224 
kasan_set_pgd(pgd_t * pgd,unsigned long asce_type)225 static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
226 {
227 	unsigned long asce_bits;
228 
229 	asce_bits = asce_type | _ASCE_TABLE_LENGTH;
230 	S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
231 	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
232 
233 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
234 	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
235 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
236 }
237 
kasan_enable_dat(void)238 static void __init kasan_enable_dat(void)
239 {
240 	psw_t psw;
241 
242 	psw.mask = __extract_psw();
243 	psw_bits(psw).dat = 1;
244 	psw_bits(psw).as = PSW_BITS_AS_HOME;
245 	__load_psw_mask(psw.mask);
246 }
247 
kasan_early_detect_facilities(void)248 static void __init kasan_early_detect_facilities(void)
249 {
250 	if (test_facility(8)) {
251 		has_edat = true;
252 		__ctl_set_bit(0, 23);
253 	}
254 	if (!noexec_disabled && test_facility(130)) {
255 		has_nx = true;
256 		__ctl_set_bit(0, 20);
257 	}
258 }
259 
has_uv_sec_stor_limit(void)260 static bool __init has_uv_sec_stor_limit(void)
261 {
262 	/*
263 	 * keep these conditions in line with setup_uv()
264 	 */
265 	if (!is_prot_virt_host())
266 		return false;
267 
268 	if (is_prot_virt_guest())
269 		return false;
270 
271 	if (!test_facility(158))
272 		return false;
273 
274 	return !!uv_info.max_sec_stor_addr;
275 }
276 
kasan_early_init(void)277 void __init kasan_early_init(void)
278 {
279 	unsigned long untracked_mem_end;
280 	unsigned long shadow_alloc_size;
281 	unsigned long vmax_unlimited;
282 	unsigned long initrd_end;
283 	unsigned long asce_type;
284 	unsigned long memsize;
285 	unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
286 	pte_t pte_z;
287 	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
288 	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
289 	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
290 
291 	kasan_early_detect_facilities();
292 	if (!has_nx)
293 		pgt_prot &= ~_PAGE_NOEXEC;
294 	pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
295 
296 	memsize = get_mem_detect_end();
297 	if (!memsize)
298 		kasan_early_panic("cannot detect physical memory size\n");
299 	/* respect mem= cmdline parameter */
300 	if (memory_end_set && memsize > memory_end)
301 		memsize = memory_end;
302 	if (IS_ENABLED(CONFIG_CRASH_DUMP) && OLDMEM_BASE)
303 		memsize = min(memsize, OLDMEM_SIZE);
304 	memsize = min(memsize, KASAN_SHADOW_START);
305 
306 	if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
307 		/* 4 level paging */
308 		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
309 		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
310 		crst_table_init((unsigned long *)early_pg_dir,
311 				_REGION2_ENTRY_EMPTY);
312 		untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
313 		if (has_uv_sec_stor_limit())
314 			kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
315 		asce_type = _ASCE_TYPE_REGION2;
316 	} else {
317 		/* 3 level paging */
318 		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
319 		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
320 		crst_table_init((unsigned long *)early_pg_dir,
321 				_REGION3_ENTRY_EMPTY);
322 		untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION2_SIZE;
323 		asce_type = _ASCE_TYPE_REGION3;
324 	}
325 
326 	/* init kasan zero shadow */
327 	crst_table_init((unsigned long *)kasan_early_shadow_p4d,
328 				p4d_val(p4d_z));
329 	crst_table_init((unsigned long *)kasan_early_shadow_pud,
330 				pud_val(pud_z));
331 	crst_table_init((unsigned long *)kasan_early_shadow_pmd,
332 				pmd_val(pmd_z));
333 	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
334 
335 	shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
336 	pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
337 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
338 		initrd_end =
339 		    round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
340 		pgalloc_low = max(pgalloc_low, initrd_end);
341 	}
342 
343 	if (pgalloc_low + shadow_alloc_size > memsize)
344 		kasan_early_panic("out of memory during initialisation\n");
345 
346 	if (has_edat) {
347 		segment_pos = round_down(memsize, _SEGMENT_SIZE);
348 		segment_low = segment_pos - shadow_alloc_size;
349 		pgalloc_pos = segment_low;
350 	} else {
351 		pgalloc_pos = memsize;
352 	}
353 	init_mm.pgd = early_pg_dir;
354 	/*
355 	 * Current memory layout:
356 	 * +- 0 -------------+	   +- shadow start -+
357 	 * | 1:1 ram mapping |	  /| 1/8 ram	    |
358 	 * |		     |	 / |		    |
359 	 * +- end of ram ----+	/  +----------------+
360 	 * | ... gap ...     | /   |		    |
361 	 * |		     |/    |	kasan	    |
362 	 * +- shadow start --+	   |	zero	    |
363 	 * | 1/8 addr space  |	   |	page	    |
364 	 * +- shadow end    -+	   |	mapping	    |
365 	 * | ... gap ...     |\    |  (untracked)   |
366 	 * +- vmalloc area  -+ \   |		    |
367 	 * | vmalloc_size    |	\  |		    |
368 	 * +- modules vaddr -+	 \ +----------------+
369 	 * | 2Gb	     |	  \|	  unmapped  | allocated per module
370 	 * +-----------------+	   +- shadow end ---+
371 	 *
372 	 * Current memory layout (KASAN_VMALLOC):
373 	 * +- 0 -------------+	   +- shadow start -+
374 	 * | 1:1 ram mapping |	  /| 1/8 ram	    |
375 	 * |		     |	 / |		    |
376 	 * +- end of ram ----+	/  +----------------+
377 	 * | ... gap ...     | /   |	kasan	    |
378 	 * |		     |/    |	zero	    |
379 	 * +- shadow start --+	   |	page	    |
380 	 * | 1/8 addr space  |	   |	mapping     |
381 	 * +- shadow end    -+	   |  (untracked)   |
382 	 * | ... gap ...     |\    |		    |
383 	 * +- vmalloc area  -+ \   +- vmalloc area -+
384 	 * | vmalloc_size    |	\  |shallow populate|
385 	 * +- modules vaddr -+	 \ +- modules area -+
386 	 * | 2Gb	     |	  \|shallow populate|
387 	 * +-----------------+	   +- shadow end ---+
388 	 */
389 	/* populate kasan shadow (for identity mapping and zero page mapping) */
390 	kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
391 	if (IS_ENABLED(CONFIG_MODULES))
392 		untracked_mem_end = kasan_vmax - MODULES_LEN;
393 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
394 		untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
395 		/* shallowly populate kasan shadow for vmalloc and modules */
396 		kasan_early_vmemmap_populate(__sha(untracked_mem_end),
397 					     __sha(kasan_vmax), POPULATE_SHALLOW);
398 	}
399 	/* populate kasan shadow for untracked memory */
400 	kasan_early_vmemmap_populate(__sha(max_physmem_end),
401 				     __sha(untracked_mem_end),
402 				     POPULATE_ZERO_SHADOW);
403 	kasan_early_vmemmap_populate(__sha(kasan_vmax),
404 				     __sha(vmax_unlimited),
405 				     POPULATE_ZERO_SHADOW);
406 	/* memory allocated for identity mapping structs will be freed later */
407 	pgalloc_freeable = pgalloc_pos;
408 	/* populate identity mapping */
409 	kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
410 	kasan_set_pgd(early_pg_dir, asce_type);
411 	kasan_enable_dat();
412 	/* enable kasan */
413 	init_task.kasan_depth = 0;
414 	memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
415 	sclp_early_printk("KernelAddressSanitizer initialized\n");
416 }
417 
kasan_copy_shadow(pgd_t * pg_dir)418 void __init kasan_copy_shadow(pgd_t *pg_dir)
419 {
420 	/*
421 	 * At this point we are still running on early pages setup early_pg_dir,
422 	 * while swapper_pg_dir has just been initialized with identity mapping.
423 	 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
424 	 */
425 
426 	pgd_t *pg_dir_src;
427 	pgd_t *pg_dir_dst;
428 	p4d_t *p4_dir_src;
429 	p4d_t *p4_dir_dst;
430 	pud_t *pu_dir_src;
431 	pud_t *pu_dir_dst;
432 
433 	pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
434 	pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
435 	p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
436 	p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
437 	if (!p4d_folded(*p4_dir_src)) {
438 		/* 4 level paging */
439 		memcpy(p4_dir_dst, p4_dir_src,
440 		       (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
441 		return;
442 	}
443 	/* 3 level paging */
444 	pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
445 	pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
446 	memcpy(pu_dir_dst, pu_dir_src,
447 	       (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
448 }
449 
kasan_free_early_identity(void)450 void __init kasan_free_early_identity(void)
451 {
452 	memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
453 }
454