• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
4  *
5  *  Modifications for ppc64:
6  *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
7  *
8  *  Copyright 2008 Michael Ellerman, IBM Corporation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/jump_label.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/stop_machine.h>
18 #include <asm/cputable.h>
19 #include <asm/code-patching.h>
20 #include <asm/page.h>
21 #include <asm/sections.h>
22 #include <asm/setup.h>
23 #include <asm/security_features.h>
24 #include <asm/firmware.h>
25 
26 struct fixup_entry {
27 	unsigned long	mask;
28 	unsigned long	value;
29 	long		start_off;
30 	long		end_off;
31 	long		alt_start_off;
32 	long		alt_end_off;
33 };
34 
calc_addr(struct fixup_entry * fcur,long offset)35 static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
36 {
37 	/*
38 	 * We store the offset to the code as a negative offset from
39 	 * the start of the alt_entry, to support the VDSO. This
40 	 * routine converts that back into an actual address.
41 	 */
42 	return (unsigned int *)((unsigned long)fcur + offset);
43 }
44 
patch_alt_instruction(unsigned int * src,unsigned int * dest,unsigned int * alt_start,unsigned int * alt_end)45 static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
46 				 unsigned int *alt_start, unsigned int *alt_end)
47 {
48 	unsigned int instr;
49 
50 	instr = *src;
51 
52 	if (instr_is_relative_branch(*src)) {
53 		unsigned int *target = (unsigned int *)branch_target(src);
54 
55 		/* Branch within the section doesn't need translating */
56 		if (target < alt_start || target > alt_end) {
57 			instr = translate_branch(dest, src);
58 			if (!instr)
59 				return 1;
60 		}
61 	}
62 
63 	raw_patch_instruction(dest, instr);
64 
65 	return 0;
66 }
67 
patch_feature_section(unsigned long value,struct fixup_entry * fcur)68 static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
69 {
70 	unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
71 
72 	start = calc_addr(fcur, fcur->start_off);
73 	end = calc_addr(fcur, fcur->end_off);
74 	alt_start = calc_addr(fcur, fcur->alt_start_off);
75 	alt_end = calc_addr(fcur, fcur->alt_end_off);
76 
77 	if ((alt_end - alt_start) > (end - start))
78 		return 1;
79 
80 	if ((value & fcur->mask) == fcur->value)
81 		return 0;
82 
83 	src = alt_start;
84 	dest = start;
85 
86 	for (; src < alt_end; src++, dest++) {
87 		if (patch_alt_instruction(src, dest, alt_start, alt_end))
88 			return 1;
89 	}
90 
91 	for (; dest < end; dest++)
92 		raw_patch_instruction(dest, PPC_INST_NOP);
93 
94 	return 0;
95 }
96 
do_feature_fixups(unsigned long value,void * fixup_start,void * fixup_end)97 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
98 {
99 	struct fixup_entry *fcur, *fend;
100 
101 	fcur = fixup_start;
102 	fend = fixup_end;
103 
104 	for (; fcur < fend; fcur++) {
105 		if (patch_feature_section(value, fcur)) {
106 			WARN_ON(1);
107 			printk("Unable to patch feature section at %p - %p" \
108 				" with %p - %p\n",
109 				calc_addr(fcur, fcur->start_off),
110 				calc_addr(fcur, fcur->end_off),
111 				calc_addr(fcur, fcur->alt_start_off),
112 				calc_addr(fcur, fcur->alt_end_off));
113 		}
114 	}
115 }
116 
117 #ifdef CONFIG_PPC_BOOK3S_64
do_stf_entry_barrier_fixups(enum stf_barrier_type types)118 static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
119 {
120 	unsigned int instrs[3], *dest;
121 	long *start, *end;
122 	int i;
123 
124 	start = PTRRELOC(&__start___stf_entry_barrier_fixup),
125 	end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
126 
127 	instrs[0] = 0x60000000; /* nop */
128 	instrs[1] = 0x60000000; /* nop */
129 	instrs[2] = 0x60000000; /* nop */
130 
131 	i = 0;
132 	if (types & STF_BARRIER_FALLBACK) {
133 		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
134 		instrs[i++] = 0x60000000; /* branch patched below */
135 		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
136 	} else if (types & STF_BARRIER_EIEIO) {
137 		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
138 	} else if (types & STF_BARRIER_SYNC_ORI) {
139 		instrs[i++] = 0x7c0004ac; /* hwsync		*/
140 		instrs[i++] = 0xe94d0000; /* ld r10,0(r13)	*/
141 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
142 	}
143 
144 	for (i = 0; start < end; start++, i++) {
145 		dest = (void *)start + *start;
146 
147 		pr_devel("patching dest %lx\n", (unsigned long)dest);
148 
149 		patch_instruction(dest, instrs[0]);
150 
151 		if (types & STF_BARRIER_FALLBACK)
152 			patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
153 				     BRANCH_SET_LINK);
154 		else
155 			patch_instruction(dest + 1, instrs[1]);
156 
157 		patch_instruction(dest + 2, instrs[2]);
158 	}
159 
160 	printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
161 		(types == STF_BARRIER_NONE)                  ? "no" :
162 		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
163 		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
164 		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
165 		                                           : "unknown");
166 }
167 
do_stf_exit_barrier_fixups(enum stf_barrier_type types)168 static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
169 {
170 	unsigned int instrs[6], *dest;
171 	long *start, *end;
172 	int i;
173 
174 	start = PTRRELOC(&__start___stf_exit_barrier_fixup),
175 	end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
176 
177 	instrs[0] = 0x60000000; /* nop */
178 	instrs[1] = 0x60000000; /* nop */
179 	instrs[2] = 0x60000000; /* nop */
180 	instrs[3] = 0x60000000; /* nop */
181 	instrs[4] = 0x60000000; /* nop */
182 	instrs[5] = 0x60000000; /* nop */
183 
184 	i = 0;
185 	if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
186 		if (cpu_has_feature(CPU_FTR_HVMODE)) {
187 			instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
188 			instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
189 		} else {
190 			instrs[i++] = 0x7db243a6; /* mtsprg 2,r13	*/
191 			instrs[i++] = 0x7db142a6; /* mfsprg r13,1    */
192 	        }
193 		instrs[i++] = 0x7c0004ac; /* hwsync		*/
194 		instrs[i++] = 0xe9ad0000; /* ld r13,0(r13)	*/
195 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
196 		if (cpu_has_feature(CPU_FTR_HVMODE)) {
197 			instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
198 		} else {
199 			instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
200 		}
201 	} else if (types & STF_BARRIER_EIEIO) {
202 		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
203 	}
204 
205 	for (i = 0; start < end; start++, i++) {
206 		dest = (void *)start + *start;
207 
208 		pr_devel("patching dest %lx\n", (unsigned long)dest);
209 
210 		patch_instruction(dest, instrs[0]);
211 		patch_instruction(dest + 1, instrs[1]);
212 		patch_instruction(dest + 2, instrs[2]);
213 		patch_instruction(dest + 3, instrs[3]);
214 		patch_instruction(dest + 4, instrs[4]);
215 		patch_instruction(dest + 5, instrs[5]);
216 	}
217 	printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
218 		(types == STF_BARRIER_NONE)                  ? "no" :
219 		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
220 		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
221 		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
222 		                                           : "unknown");
223 }
224 
__do_stf_barrier_fixups(void * data)225 static int __do_stf_barrier_fixups(void *data)
226 {
227 	enum stf_barrier_type *types = data;
228 
229 	do_stf_entry_barrier_fixups(*types);
230 	do_stf_exit_barrier_fixups(*types);
231 
232 	return 0;
233 }
234 
do_stf_barrier_fixups(enum stf_barrier_type types)235 void do_stf_barrier_fixups(enum stf_barrier_type types)
236 {
237 	/*
238 	 * The call to the fallback entry flush, and the fallback/sync-ori exit
239 	 * flush can not be safely patched in/out while other CPUs are executing
240 	 * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
241 	 * spin in the stop machine core with interrupts hard disabled.
242 	 */
243 	stop_machine(__do_stf_barrier_fixups, &types, NULL);
244 }
245 
do_uaccess_flush_fixups(enum l1d_flush_type types)246 void do_uaccess_flush_fixups(enum l1d_flush_type types)
247 {
248 	unsigned int instrs[4], *dest;
249 	long *start, *end;
250 	int i;
251 
252 	start = PTRRELOC(&__start___uaccess_flush_fixup);
253 	end = PTRRELOC(&__stop___uaccess_flush_fixup);
254 
255 	instrs[0] = 0x60000000; /* nop */
256 	instrs[1] = 0x60000000; /* nop */
257 	instrs[2] = 0x60000000; /* nop */
258 	instrs[3] = 0x4e800020; /* blr */
259 
260 	i = 0;
261 	if (types == L1D_FLUSH_FALLBACK) {
262 		instrs[3] = 0x60000000; /* nop */
263 		/* fallthrough to fallback flush */
264 	}
265 
266 	if (types & L1D_FLUSH_ORI) {
267 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
268 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
269 	}
270 
271 	if (types & L1D_FLUSH_MTTRIG)
272 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
273 
274 	for (i = 0; start < end; start++, i++) {
275 		dest = (void *)start + *start;
276 
277 		pr_devel("patching dest %lx\n", (unsigned long)dest);
278 
279 		patch_instruction(dest, instrs[0]);
280 
281 		patch_instruction((dest + 1), instrs[1]);
282 		patch_instruction((dest + 2), instrs[2]);
283 		patch_instruction((dest + 3), instrs[3]);
284 	}
285 
286 	printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
287 		(types == L1D_FLUSH_NONE)       ? "no" :
288 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
289 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
290 							? "ori+mttrig type"
291 							: "ori type" :
292 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
293 						: "unknown");
294 }
295 
__do_entry_flush_fixups(void * data)296 static int __do_entry_flush_fixups(void *data)
297 {
298 	enum l1d_flush_type types = *(enum l1d_flush_type *)data;
299 	unsigned int instrs[3], *dest;
300 	long *start, *end;
301 	int i;
302 
303 	start = PTRRELOC(&__start___entry_flush_fixup);
304 	end = PTRRELOC(&__stop___entry_flush_fixup);
305 
306 	instrs[0] = 0x60000000; /* nop */
307 	instrs[1] = 0x60000000; /* nop */
308 	instrs[2] = 0x60000000; /* nop */
309 
310 	i = 0;
311 	if (types == L1D_FLUSH_FALLBACK) {
312 		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
313 		instrs[i++] = 0x60000000; /* branch patched below */
314 		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
315 	}
316 
317 	if (types & L1D_FLUSH_ORI) {
318 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
319 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
320 	}
321 
322 	if (types & L1D_FLUSH_MTTRIG)
323 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
324 
325 	for (i = 0; start < end; start++, i++) {
326 		dest = (void *)start + *start;
327 
328 		pr_devel("patching dest %lx\n", (unsigned long)dest);
329 
330 		patch_instruction(dest, instrs[0]);
331 
332 		if (types == L1D_FLUSH_FALLBACK)
333 			patch_branch((dest + 1), (unsigned long)&entry_flush_fallback,
334 				     BRANCH_SET_LINK);
335 		else
336 			patch_instruction((dest + 1), instrs[1]);
337 
338 		patch_instruction((dest + 2), instrs[2]);
339 	}
340 
341 	printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
342 		(types == L1D_FLUSH_NONE)       ? "no" :
343 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
344 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
345 							? "ori+mttrig type"
346 							: "ori type" :
347 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
348 						: "unknown");
349 
350 	return 0;
351 }
352 
do_entry_flush_fixups(enum l1d_flush_type types)353 void do_entry_flush_fixups(enum l1d_flush_type types)
354 {
355 	/*
356 	 * The call to the fallback flush can not be safely patched in/out while
357 	 * other CPUs are executing it. So call __do_entry_flush_fixups() on one
358 	 * CPU while all other CPUs spin in the stop machine core with interrupts
359 	 * hard disabled.
360 	 */
361 	stop_machine(__do_entry_flush_fixups, &types, NULL);
362 }
363 
do_rfi_flush_fixups(enum l1d_flush_type types)364 void do_rfi_flush_fixups(enum l1d_flush_type types)
365 {
366 	unsigned int instrs[3], *dest;
367 	long *start, *end;
368 	int i;
369 
370 	start = PTRRELOC(&__start___rfi_flush_fixup),
371 	end = PTRRELOC(&__stop___rfi_flush_fixup);
372 
373 	instrs[0] = 0x60000000; /* nop */
374 	instrs[1] = 0x60000000; /* nop */
375 	instrs[2] = 0x60000000; /* nop */
376 
377 	if (types & L1D_FLUSH_FALLBACK)
378 		/* b .+16 to fallback flush */
379 		instrs[0] = 0x48000010;
380 
381 	i = 0;
382 	if (types & L1D_FLUSH_ORI) {
383 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
384 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
385 	}
386 
387 	if (types & L1D_FLUSH_MTTRIG)
388 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
389 
390 	for (i = 0; start < end; start++, i++) {
391 		dest = (void *)start + *start;
392 
393 		pr_devel("patching dest %lx\n", (unsigned long)dest);
394 
395 		patch_instruction(dest, instrs[0]);
396 		patch_instruction(dest + 1, instrs[1]);
397 		patch_instruction(dest + 2, instrs[2]);
398 	}
399 
400 	printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
401 		(types == L1D_FLUSH_NONE)       ? "no" :
402 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
403 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
404 							? "ori+mttrig type"
405 							: "ori type" :
406 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
407 						: "unknown");
408 }
409 
do_barrier_nospec_fixups_range(bool enable,void * fixup_start,void * fixup_end)410 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
411 {
412 	unsigned int instr, *dest;
413 	long *start, *end;
414 	int i;
415 
416 	start = fixup_start;
417 	end = fixup_end;
418 
419 	instr = 0x60000000; /* nop */
420 
421 	if (enable) {
422 		pr_info("barrier-nospec: using ORI speculation barrier\n");
423 		instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
424 	}
425 
426 	for (i = 0; start < end; start++, i++) {
427 		dest = (void *)start + *start;
428 
429 		pr_devel("patching dest %lx\n", (unsigned long)dest);
430 		patch_instruction(dest, instr);
431 	}
432 
433 	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
434 }
435 
436 #endif /* CONFIG_PPC_BOOK3S_64 */
437 
438 #ifdef CONFIG_PPC_BARRIER_NOSPEC
do_barrier_nospec_fixups(bool enable)439 void do_barrier_nospec_fixups(bool enable)
440 {
441 	void *start, *end;
442 
443 	start = PTRRELOC(&__start___barrier_nospec_fixup),
444 	end = PTRRELOC(&__stop___barrier_nospec_fixup);
445 
446 	do_barrier_nospec_fixups_range(enable, start, end);
447 }
448 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
449 
450 #ifdef CONFIG_PPC_FSL_BOOK3E
do_barrier_nospec_fixups_range(bool enable,void * fixup_start,void * fixup_end)451 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
452 {
453 	unsigned int instr[2], *dest;
454 	long *start, *end;
455 	int i;
456 
457 	start = fixup_start;
458 	end = fixup_end;
459 
460 	instr[0] = PPC_INST_NOP;
461 	instr[1] = PPC_INST_NOP;
462 
463 	if (enable) {
464 		pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
465 		instr[0] = PPC_INST_ISYNC;
466 		instr[1] = PPC_INST_SYNC;
467 	}
468 
469 	for (i = 0; start < end; start++, i++) {
470 		dest = (void *)start + *start;
471 
472 		pr_devel("patching dest %lx\n", (unsigned long)dest);
473 		patch_instruction(dest, instr[0]);
474 		patch_instruction(dest + 1, instr[1]);
475 	}
476 
477 	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
478 }
479 
patch_btb_flush_section(long * curr)480 static void patch_btb_flush_section(long *curr)
481 {
482 	unsigned int *start, *end;
483 
484 	start = (void *)curr + *curr;
485 	end = (void *)curr + *(curr + 1);
486 	for (; start < end; start++) {
487 		pr_devel("patching dest %lx\n", (unsigned long)start);
488 		patch_instruction(start, PPC_INST_NOP);
489 	}
490 }
491 
do_btb_flush_fixups(void)492 void do_btb_flush_fixups(void)
493 {
494 	long *start, *end;
495 
496 	start = PTRRELOC(&__start__btb_flush_fixup);
497 	end = PTRRELOC(&__stop__btb_flush_fixup);
498 
499 	for (; start < end; start += 2)
500 		patch_btb_flush_section(start);
501 }
502 #endif /* CONFIG_PPC_FSL_BOOK3E */
503 
do_lwsync_fixups(unsigned long value,void * fixup_start,void * fixup_end)504 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
505 {
506 	long *start, *end;
507 	unsigned int *dest;
508 
509 	if (!(value & CPU_FTR_LWSYNC))
510 		return ;
511 
512 	start = fixup_start;
513 	end = fixup_end;
514 
515 	for (; start < end; start++) {
516 		dest = (void *)start + *start;
517 		raw_patch_instruction(dest, PPC_INST_LWSYNC);
518 	}
519 }
520 
do_final_fixups(void)521 static void do_final_fixups(void)
522 {
523 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
524 	int *src, *dest;
525 	unsigned long length;
526 
527 	if (PHYSICAL_START == 0)
528 		return;
529 
530 	src = (int *)(KERNELBASE + PHYSICAL_START);
531 	dest = (int *)KERNELBASE;
532 	length = (__end_interrupts - _stext) / sizeof(int);
533 
534 	while (length--) {
535 		raw_patch_instruction(dest, *src);
536 		src++;
537 		dest++;
538 	}
539 #endif
540 }
541 
542 static unsigned long __initdata saved_cpu_features;
543 static unsigned int __initdata saved_mmu_features;
544 #ifdef CONFIG_PPC64
545 static unsigned long __initdata saved_firmware_features;
546 #endif
547 
apply_feature_fixups(void)548 void __init apply_feature_fixups(void)
549 {
550 	struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
551 
552 	*PTRRELOC(&saved_cpu_features) = spec->cpu_features;
553 	*PTRRELOC(&saved_mmu_features) = spec->mmu_features;
554 
555 	/*
556 	 * Apply the CPU-specific and firmware specific fixups to kernel text
557 	 * (nop out sections not relevant to this CPU or this firmware).
558 	 */
559 	do_feature_fixups(spec->cpu_features,
560 			  PTRRELOC(&__start___ftr_fixup),
561 			  PTRRELOC(&__stop___ftr_fixup));
562 
563 	do_feature_fixups(spec->mmu_features,
564 			  PTRRELOC(&__start___mmu_ftr_fixup),
565 			  PTRRELOC(&__stop___mmu_ftr_fixup));
566 
567 	do_lwsync_fixups(spec->cpu_features,
568 			 PTRRELOC(&__start___lwsync_fixup),
569 			 PTRRELOC(&__stop___lwsync_fixup));
570 
571 #ifdef CONFIG_PPC64
572 	saved_firmware_features = powerpc_firmware_features;
573 	do_feature_fixups(powerpc_firmware_features,
574 			  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
575 #endif
576 	do_final_fixups();
577 }
578 
setup_feature_keys(void)579 void __init setup_feature_keys(void)
580 {
581 	/*
582 	 * Initialise jump label. This causes all the cpu/mmu_has_feature()
583 	 * checks to take on their correct polarity based on the current set of
584 	 * CPU/MMU features.
585 	 */
586 	jump_label_init();
587 	cpu_feature_keys_init();
588 	mmu_feature_keys_init();
589 }
590 
check_features(void)591 static int __init check_features(void)
592 {
593 	WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
594 	     "CPU features changed after feature patching!\n");
595 	WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
596 	     "MMU features changed after feature patching!\n");
597 #ifdef CONFIG_PPC64
598 	WARN(saved_firmware_features != powerpc_firmware_features,
599 	     "Firmware features changed after feature patching!\n");
600 #endif
601 
602 	return 0;
603 }
604 late_initcall(check_features);
605 
606 #ifdef CONFIG_FTR_FIXUP_SELFTEST
607 
608 #define check(x)	\
609 	if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
610 
611 /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
612 static struct fixup_entry fixup;
613 
calc_offset(struct fixup_entry * entry,unsigned int * p)614 static long calc_offset(struct fixup_entry *entry, unsigned int *p)
615 {
616 	return (unsigned long)p - (unsigned long)entry;
617 }
618 
test_basic_patching(void)619 static void test_basic_patching(void)
620 {
621 	extern unsigned int ftr_fixup_test1[];
622 	extern unsigned int end_ftr_fixup_test1[];
623 	extern unsigned int ftr_fixup_test1_orig[];
624 	extern unsigned int ftr_fixup_test1_expected[];
625 	int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1);
626 
627 	fixup.value = fixup.mask = 8;
628 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
629 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
630 	fixup.alt_start_off = fixup.alt_end_off = 0;
631 
632 	/* Sanity check */
633 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
634 
635 	/* Check we don't patch if the value matches */
636 	patch_feature_section(8, &fixup);
637 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
638 
639 	/* Check we do patch if the value doesn't match */
640 	patch_feature_section(0, &fixup);
641 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
642 
643 	/* Check we do patch if the mask doesn't match */
644 	memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
645 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
646 	patch_feature_section(~8, &fixup);
647 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
648 }
649 
test_alternative_patching(void)650 static void test_alternative_patching(void)
651 {
652 	extern unsigned int ftr_fixup_test2[];
653 	extern unsigned int end_ftr_fixup_test2[];
654 	extern unsigned int ftr_fixup_test2_orig[];
655 	extern unsigned int ftr_fixup_test2_alt[];
656 	extern unsigned int ftr_fixup_test2_expected[];
657 	int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2);
658 
659 	fixup.value = fixup.mask = 0xF;
660 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
661 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
662 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
663 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
664 
665 	/* Sanity check */
666 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
667 
668 	/* Check we don't patch if the value matches */
669 	patch_feature_section(0xF, &fixup);
670 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
671 
672 	/* Check we do patch if the value doesn't match */
673 	patch_feature_section(0, &fixup);
674 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
675 
676 	/* Check we do patch if the mask doesn't match */
677 	memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
678 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
679 	patch_feature_section(~0xF, &fixup);
680 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
681 }
682 
test_alternative_case_too_big(void)683 static void test_alternative_case_too_big(void)
684 {
685 	extern unsigned int ftr_fixup_test3[];
686 	extern unsigned int end_ftr_fixup_test3[];
687 	extern unsigned int ftr_fixup_test3_orig[];
688 	extern unsigned int ftr_fixup_test3_alt[];
689 	int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3);
690 
691 	fixup.value = fixup.mask = 0xC;
692 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
693 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
694 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
695 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
696 
697 	/* Sanity check */
698 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
699 
700 	/* Expect nothing to be patched, and the error returned to us */
701 	check(patch_feature_section(0xF, &fixup) == 1);
702 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
703 	check(patch_feature_section(0, &fixup) == 1);
704 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
705 	check(patch_feature_section(~0xF, &fixup) == 1);
706 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
707 }
708 
test_alternative_case_too_small(void)709 static void test_alternative_case_too_small(void)
710 {
711 	extern unsigned int ftr_fixup_test4[];
712 	extern unsigned int end_ftr_fixup_test4[];
713 	extern unsigned int ftr_fixup_test4_orig[];
714 	extern unsigned int ftr_fixup_test4_alt[];
715 	extern unsigned int ftr_fixup_test4_expected[];
716 	int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4);
717 	unsigned long flag;
718 
719 	/* Check a high-bit flag */
720 	flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
721 	fixup.value = fixup.mask = flag;
722 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
723 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
724 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
725 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
726 
727 	/* Sanity check */
728 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
729 
730 	/* Check we don't patch if the value matches */
731 	patch_feature_section(flag, &fixup);
732 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
733 
734 	/* Check we do patch if the value doesn't match */
735 	patch_feature_section(0, &fixup);
736 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
737 
738 	/* Check we do patch if the mask doesn't match */
739 	memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
740 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
741 	patch_feature_section(~flag, &fixup);
742 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
743 }
744 
test_alternative_case_with_branch(void)745 static void test_alternative_case_with_branch(void)
746 {
747 	extern unsigned int ftr_fixup_test5[];
748 	extern unsigned int end_ftr_fixup_test5[];
749 	extern unsigned int ftr_fixup_test5_expected[];
750 	int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5);
751 
752 	check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
753 }
754 
test_alternative_case_with_external_branch(void)755 static void test_alternative_case_with_external_branch(void)
756 {
757 	extern unsigned int ftr_fixup_test6[];
758 	extern unsigned int end_ftr_fixup_test6[];
759 	extern unsigned int ftr_fixup_test6_expected[];
760 	int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6);
761 
762 	check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
763 }
764 
test_alternative_case_with_branch_to_end(void)765 static void test_alternative_case_with_branch_to_end(void)
766 {
767 	extern unsigned int ftr_fixup_test7[];
768 	extern unsigned int end_ftr_fixup_test7[];
769 	extern unsigned int ftr_fixup_test7_expected[];
770 	int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7);
771 
772 	check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
773 }
774 
test_cpu_macros(void)775 static void test_cpu_macros(void)
776 {
777 	extern u8 ftr_fixup_test_FTR_macros[];
778 	extern u8 ftr_fixup_test_FTR_macros_expected[];
779 	unsigned long size = ftr_fixup_test_FTR_macros_expected -
780 			     ftr_fixup_test_FTR_macros;
781 
782 	/* The fixups have already been done for us during boot */
783 	check(memcmp(ftr_fixup_test_FTR_macros,
784 		     ftr_fixup_test_FTR_macros_expected, size) == 0);
785 }
786 
test_fw_macros(void)787 static void test_fw_macros(void)
788 {
789 #ifdef CONFIG_PPC64
790 	extern u8 ftr_fixup_test_FW_FTR_macros[];
791 	extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
792 	unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
793 			     ftr_fixup_test_FW_FTR_macros;
794 
795 	/* The fixups have already been done for us during boot */
796 	check(memcmp(ftr_fixup_test_FW_FTR_macros,
797 		     ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
798 #endif
799 }
800 
test_lwsync_macros(void)801 static void test_lwsync_macros(void)
802 {
803 	extern u8 lwsync_fixup_test[];
804 	extern u8 end_lwsync_fixup_test[];
805 	extern u8 lwsync_fixup_test_expected_LWSYNC[];
806 	extern u8 lwsync_fixup_test_expected_SYNC[];
807 	unsigned long size = end_lwsync_fixup_test -
808 			     lwsync_fixup_test;
809 
810 	/* The fixups have already been done for us during boot */
811 	if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
812 		check(memcmp(lwsync_fixup_test,
813 			     lwsync_fixup_test_expected_LWSYNC, size) == 0);
814 	} else {
815 		check(memcmp(lwsync_fixup_test,
816 			     lwsync_fixup_test_expected_SYNC, size) == 0);
817 	}
818 }
819 
test_feature_fixups(void)820 static int __init test_feature_fixups(void)
821 {
822 	printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
823 
824 	test_basic_patching();
825 	test_alternative_patching();
826 	test_alternative_case_too_big();
827 	test_alternative_case_too_small();
828 	test_alternative_case_with_branch();
829 	test_alternative_case_with_external_branch();
830 	test_alternative_case_with_branch_to_end();
831 	test_cpu_macros();
832 	test_fw_macros();
833 	test_lwsync_macros();
834 
835 	return 0;
836 }
837 late_initcall(test_feature_fixups);
838 
839 #endif /* CONFIG_FTR_FIXUP_SELFTEST */
840