• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
3  *
4  *  Modifications for ppc64:
5  *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6  *
7  *  Copyright 2008 Michael Ellerman, IBM Corporation.
8  *
9  *  This program is free software; you can redistribute it and/or
10  *  modify it under the terms of the GNU General Public License
11  *  as published by the Free Software Foundation; either version
12  *  2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/stop_machine.h>
20 #include <asm/cputable.h>
21 #include <asm/code-patching.h>
22 #include <asm/page.h>
23 #include <asm/sections.h>
24 #include <asm/setup.h>
25 #include <asm/security_features.h>
26 
27 struct fixup_entry {
28 	unsigned long	mask;
29 	unsigned long	value;
30 	long		start_off;
31 	long		end_off;
32 	long		alt_start_off;
33 	long		alt_end_off;
34 };
35 
calc_addr(struct fixup_entry * fcur,long offset)36 static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
37 {
38 	/*
39 	 * We store the offset to the code as a negative offset from
40 	 * the start of the alt_entry, to support the VDSO. This
41 	 * routine converts that back into an actual address.
42 	 */
43 	return (unsigned int *)((unsigned long)fcur + offset);
44 }
45 
patch_alt_instruction(unsigned int * src,unsigned int * dest,unsigned int * alt_start,unsigned int * alt_end)46 static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
47 				 unsigned int *alt_start, unsigned int *alt_end)
48 {
49 	unsigned int instr;
50 
51 	instr = *src;
52 
53 	if (instr_is_relative_branch(*src)) {
54 		unsigned int *target = (unsigned int *)branch_target(src);
55 
56 		/* Branch within the section doesn't need translating */
57 		if (target < alt_start || target > alt_end) {
58 			instr = translate_branch(dest, src);
59 			if (!instr)
60 				return 1;
61 		}
62 	}
63 
64 	patch_instruction(dest, instr);
65 
66 	return 0;
67 }
68 
patch_feature_section(unsigned long value,struct fixup_entry * fcur)69 static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
70 {
71 	unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
72 
73 	start = calc_addr(fcur, fcur->start_off);
74 	end = calc_addr(fcur, fcur->end_off);
75 	alt_start = calc_addr(fcur, fcur->alt_start_off);
76 	alt_end = calc_addr(fcur, fcur->alt_end_off);
77 
78 	if ((alt_end - alt_start) > (end - start))
79 		return 1;
80 
81 	if ((value & fcur->mask) == fcur->value)
82 		return 0;
83 
84 	src = alt_start;
85 	dest = start;
86 
87 	for (; src < alt_end; src++, dest++) {
88 		if (patch_alt_instruction(src, dest, alt_start, alt_end))
89 			return 1;
90 	}
91 
92 	for (; dest < end; dest++)
93 		patch_instruction(dest, PPC_INST_NOP);
94 
95 	return 0;
96 }
97 
do_feature_fixups(unsigned long value,void * fixup_start,void * fixup_end)98 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
99 {
100 	struct fixup_entry *fcur, *fend;
101 
102 	fcur = fixup_start;
103 	fend = fixup_end;
104 
105 	for (; fcur < fend; fcur++) {
106 		if (patch_feature_section(value, fcur)) {
107 			WARN_ON(1);
108 			printk("Unable to patch feature section at %p - %p" \
109 				" with %p - %p\n",
110 				calc_addr(fcur, fcur->start_off),
111 				calc_addr(fcur, fcur->end_off),
112 				calc_addr(fcur, fcur->alt_start_off),
113 				calc_addr(fcur, fcur->alt_end_off));
114 		}
115 	}
116 }
117 
118 #ifdef CONFIG_PPC_BOOK3S_64
do_stf_entry_barrier_fixups(enum stf_barrier_type types)119 void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
120 {
121 	unsigned int instrs[3], *dest;
122 	long *start, *end;
123 	int i;
124 
125 	start = PTRRELOC(&__start___stf_entry_barrier_fixup),
126 	end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
127 
128 	instrs[0] = 0x60000000; /* nop */
129 	instrs[1] = 0x60000000; /* nop */
130 	instrs[2] = 0x60000000; /* nop */
131 
132 	i = 0;
133 	if (types & STF_BARRIER_FALLBACK) {
134 		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
135 		instrs[i++] = 0x60000000; /* branch patched below */
136 		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
137 	} else if (types & STF_BARRIER_EIEIO) {
138 		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
139 	} else if (types & STF_BARRIER_SYNC_ORI) {
140 		instrs[i++] = 0x7c0004ac; /* hwsync		*/
141 		instrs[i++] = 0xe94d0000; /* ld r10,0(r13)	*/
142 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
143 	}
144 
145 	for (i = 0; start < end; start++, i++) {
146 		dest = (void *)start + *start;
147 
148 		pr_devel("patching dest %lx\n", (unsigned long)dest);
149 
150 		patch_instruction(dest, instrs[0]);
151 
152 		if (types & STF_BARRIER_FALLBACK)
153 			patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
154 				     BRANCH_SET_LINK);
155 		else
156 			patch_instruction(dest + 1, instrs[1]);
157 
158 		patch_instruction(dest + 2, instrs[2]);
159 	}
160 
161 	printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
162 		(types == STF_BARRIER_NONE)                  ? "no" :
163 		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
164 		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
165 		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
166 		                                           : "unknown");
167 }
168 
do_stf_exit_barrier_fixups(enum stf_barrier_type types)169 void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
170 {
171 	unsigned int instrs[6], *dest;
172 	long *start, *end;
173 	int i;
174 
175 	start = PTRRELOC(&__start___stf_exit_barrier_fixup),
176 	end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
177 
178 	instrs[0] = 0x60000000; /* nop */
179 	instrs[1] = 0x60000000; /* nop */
180 	instrs[2] = 0x60000000; /* nop */
181 	instrs[3] = 0x60000000; /* nop */
182 	instrs[4] = 0x60000000; /* nop */
183 	instrs[5] = 0x60000000; /* nop */
184 
185 	i = 0;
186 	if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
187 		if (cpu_has_feature(CPU_FTR_HVMODE)) {
188 			instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
189 			instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
190 		} else {
191 			instrs[i++] = 0x7db243a6; /* mtsprg 2,r13	*/
192 			instrs[i++] = 0x7db142a6; /* mfsprg r13,1    */
193 	        }
194 		instrs[i++] = 0x7c0004ac; /* hwsync		*/
195 		instrs[i++] = 0xe9ad0000; /* ld r13,0(r13)	*/
196 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
197 		if (cpu_has_feature(CPU_FTR_HVMODE)) {
198 			instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
199 		} else {
200 			instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
201 		}
202 	} else if (types & STF_BARRIER_EIEIO) {
203 		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
204 	}
205 
206 	for (i = 0; start < end; start++, i++) {
207 		dest = (void *)start + *start;
208 
209 		pr_devel("patching dest %lx\n", (unsigned long)dest);
210 
211 		patch_instruction(dest, instrs[0]);
212 		patch_instruction(dest + 1, instrs[1]);
213 		patch_instruction(dest + 2, instrs[2]);
214 		patch_instruction(dest + 3, instrs[3]);
215 		patch_instruction(dest + 4, instrs[4]);
216 		patch_instruction(dest + 5, instrs[5]);
217 	}
218 	printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
219 		(types == STF_BARRIER_NONE)                  ? "no" :
220 		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
221 		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
222 		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
223 		                                           : "unknown");
224 }
225 
226 
do_stf_barrier_fixups(enum stf_barrier_type types)227 void do_stf_barrier_fixups(enum stf_barrier_type types)
228 {
229 	do_stf_entry_barrier_fixups(types);
230 	do_stf_exit_barrier_fixups(types);
231 }
232 
do_uaccess_flush_fixups(enum l1d_flush_type types)233 void do_uaccess_flush_fixups(enum l1d_flush_type types)
234 {
235 	unsigned int instrs[4], *dest;
236 	long *start, *end;
237 	int i;
238 
239 	start = PTRRELOC(&__start___uaccess_flush_fixup);
240 	end = PTRRELOC(&__stop___uaccess_flush_fixup);
241 
242 	instrs[0] = 0x60000000; /* nop */
243 	instrs[1] = 0x60000000; /* nop */
244 	instrs[2] = 0x60000000; /* nop */
245 	instrs[3] = 0x4e800020; /* blr */
246 
247 	i = 0;
248 	if (types == L1D_FLUSH_FALLBACK) {
249 		instrs[3] = 0x60000000; /* nop */
250 		/* fallthrough to fallback flush */
251 	}
252 
253 	if (types & L1D_FLUSH_ORI) {
254 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
255 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
256 	}
257 
258 	if (types & L1D_FLUSH_MTTRIG)
259 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
260 
261 	for (i = 0; start < end; start++, i++) {
262 		dest = (void *)start + *start;
263 
264 		pr_devel("patching dest %lx\n", (unsigned long)dest);
265 
266 		patch_instruction(dest, instrs[0]);
267 
268 		patch_instruction((dest + 1), instrs[1]);
269 		patch_instruction((dest + 2), instrs[2]);
270 		patch_instruction((dest + 3), instrs[3]);
271 	}
272 
273 	printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
274 		(types == L1D_FLUSH_NONE)       ? "no" :
275 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
276 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
277 							? "ori+mttrig type"
278 							: "ori type" :
279 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
280 						: "unknown");
281 }
282 
__do_entry_flush_fixups(void * data)283 static int __do_entry_flush_fixups(void *data)
284 {
285 	enum l1d_flush_type types = *(enum l1d_flush_type *)data;
286 	unsigned int instrs[3], *dest;
287 	long *start, *end;
288 	int i;
289 
290 	start = PTRRELOC(&__start___entry_flush_fixup);
291 	end = PTRRELOC(&__stop___entry_flush_fixup);
292 
293 	instrs[0] = 0x60000000; /* nop */
294 	instrs[1] = 0x60000000; /* nop */
295 	instrs[2] = 0x60000000; /* nop */
296 
297 	i = 0;
298 	if (types == L1D_FLUSH_FALLBACK) {
299 		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
300 		instrs[i++] = 0x60000000; /* branch patched below */
301 		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
302 	}
303 
304 	if (types & L1D_FLUSH_ORI) {
305 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
306 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
307 	}
308 
309 	if (types & L1D_FLUSH_MTTRIG)
310 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
311 
312 	for (i = 0; start < end; start++, i++) {
313 		dest = (void *)start + *start;
314 
315 		pr_devel("patching dest %lx\n", (unsigned long)dest);
316 
317 		patch_instruction(dest, instrs[0]);
318 
319 		if (types == L1D_FLUSH_FALLBACK)
320 			patch_branch((dest + 1), (unsigned long)&entry_flush_fallback,
321 				     BRANCH_SET_LINK);
322 		else
323 			patch_instruction((dest + 1), instrs[1]);
324 
325 		patch_instruction((dest + 2), instrs[2]);
326 	}
327 
328 	printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
329 		(types == L1D_FLUSH_NONE)       ? "no" :
330 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
331 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
332 							? "ori+mttrig type"
333 							: "ori type" :
334 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
335 						: "unknown");
336 
337 	return 0;
338 }
339 
do_entry_flush_fixups(enum l1d_flush_type types)340 void do_entry_flush_fixups(enum l1d_flush_type types)
341 {
342 	/*
343 	 * The call to the fallback flush can not be safely patched in/out while
344 	 * other CPUs are executing it. So call __do_entry_flush_fixups() on one
345 	 * CPU while all other CPUs spin in the stop machine core with interrupts
346 	 * hard disabled.
347 	 */
348 	stop_machine(__do_entry_flush_fixups, &types, NULL);
349 }
350 
do_rfi_flush_fixups(enum l1d_flush_type types)351 void do_rfi_flush_fixups(enum l1d_flush_type types)
352 {
353 	unsigned int instrs[3], *dest;
354 	long *start, *end;
355 	int i;
356 
357 	start = PTRRELOC(&__start___rfi_flush_fixup),
358 	end = PTRRELOC(&__stop___rfi_flush_fixup);
359 
360 	instrs[0] = 0x60000000; /* nop */
361 	instrs[1] = 0x60000000; /* nop */
362 	instrs[2] = 0x60000000; /* nop */
363 
364 	if (types & L1D_FLUSH_FALLBACK)
365 		/* b .+16 to fallback flush */
366 		instrs[0] = 0x48000010;
367 
368 	i = 0;
369 	if (types & L1D_FLUSH_ORI) {
370 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
371 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
372 	}
373 
374 	if (types & L1D_FLUSH_MTTRIG)
375 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
376 
377 	for (i = 0; start < end; start++, i++) {
378 		dest = (void *)start + *start;
379 
380 		pr_devel("patching dest %lx\n", (unsigned long)dest);
381 
382 		patch_instruction(dest, instrs[0]);
383 		patch_instruction(dest + 1, instrs[1]);
384 		patch_instruction(dest + 2, instrs[2]);
385 	}
386 
387 	printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
388 		(types == L1D_FLUSH_NONE)       ? "no" :
389 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
390 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
391 							? "ori+mttrig type"
392 							: "ori type" :
393 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
394 						: "unknown");
395 }
396 
do_barrier_nospec_fixups_range(bool enable,void * fixup_start,void * fixup_end)397 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
398 {
399 	unsigned int instr, *dest;
400 	long *start, *end;
401 	int i;
402 
403 	start = fixup_start;
404 	end = fixup_end;
405 
406 	instr = 0x60000000; /* nop */
407 
408 	if (enable) {
409 		pr_info("barrier-nospec: using ORI speculation barrier\n");
410 		instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
411 	}
412 
413 	for (i = 0; start < end; start++, i++) {
414 		dest = (void *)start + *start;
415 
416 		pr_devel("patching dest %lx\n", (unsigned long)dest);
417 		patch_instruction(dest, instr);
418 	}
419 
420 	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
421 }
422 
423 #endif /* CONFIG_PPC_BOOK3S_64 */
424 
425 #ifdef CONFIG_PPC_BARRIER_NOSPEC
do_barrier_nospec_fixups(bool enable)426 void do_barrier_nospec_fixups(bool enable)
427 {
428 	void *start, *end;
429 
430 	start = PTRRELOC(&__start___barrier_nospec_fixup),
431 	end = PTRRELOC(&__stop___barrier_nospec_fixup);
432 
433 	do_barrier_nospec_fixups_range(enable, start, end);
434 }
435 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
436 
437 #ifdef CONFIG_PPC_FSL_BOOK3E
do_barrier_nospec_fixups_range(bool enable,void * fixup_start,void * fixup_end)438 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
439 {
440 	unsigned int instr[2], *dest;
441 	long *start, *end;
442 	int i;
443 
444 	start = fixup_start;
445 	end = fixup_end;
446 
447 	instr[0] = PPC_INST_NOP;
448 	instr[1] = PPC_INST_NOP;
449 
450 	if (enable) {
451 		pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
452 		instr[0] = PPC_INST_ISYNC;
453 		instr[1] = PPC_INST_SYNC;
454 	}
455 
456 	for (i = 0; start < end; start++, i++) {
457 		dest = (void *)start + *start;
458 
459 		pr_devel("patching dest %lx\n", (unsigned long)dest);
460 		patch_instruction(dest, instr[0]);
461 		patch_instruction(dest + 1, instr[1]);
462 	}
463 
464 	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
465 }
466 
patch_btb_flush_section(long * curr)467 static void patch_btb_flush_section(long *curr)
468 {
469 	unsigned int *start, *end;
470 
471 	start = (void *)curr + *curr;
472 	end = (void *)curr + *(curr + 1);
473 	for (; start < end; start++) {
474 		pr_devel("patching dest %lx\n", (unsigned long)start);
475 		patch_instruction(start, PPC_INST_NOP);
476 	}
477 }
478 
do_btb_flush_fixups(void)479 void do_btb_flush_fixups(void)
480 {
481 	long *start, *end;
482 
483 	start = PTRRELOC(&__start__btb_flush_fixup);
484 	end = PTRRELOC(&__stop__btb_flush_fixup);
485 
486 	for (; start < end; start += 2)
487 		patch_btb_flush_section(start);
488 }
489 #endif /* CONFIG_PPC_FSL_BOOK3E */
490 
do_lwsync_fixups(unsigned long value,void * fixup_start,void * fixup_end)491 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
492 {
493 	long *start, *end;
494 	unsigned int *dest;
495 
496 	if (!(value & CPU_FTR_LWSYNC))
497 		return ;
498 
499 	start = fixup_start;
500 	end = fixup_end;
501 
502 	for (; start < end; start++) {
503 		dest = (void *)start + *start;
504 		patch_instruction(dest, PPC_INST_LWSYNC);
505 	}
506 }
507 
do_final_fixups(void)508 void do_final_fixups(void)
509 {
510 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
511 	int *src, *dest;
512 	unsigned long length;
513 
514 	if (PHYSICAL_START == 0)
515 		return;
516 
517 	src = (int *)(KERNELBASE + PHYSICAL_START);
518 	dest = (int *)KERNELBASE;
519 	length = (__end_interrupts - _stext) / sizeof(int);
520 
521 	while (length--) {
522 		patch_instruction(dest, *src);
523 		src++;
524 		dest++;
525 	}
526 #endif
527 }
528 
529 #ifdef CONFIG_FTR_FIXUP_SELFTEST
530 
531 #define check(x)	\
532 	if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
533 
534 /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
535 static struct fixup_entry fixup;
536 
calc_offset(struct fixup_entry * entry,unsigned int * p)537 static long calc_offset(struct fixup_entry *entry, unsigned int *p)
538 {
539 	return (unsigned long)p - (unsigned long)entry;
540 }
541 
test_basic_patching(void)542 static void test_basic_patching(void)
543 {
544 	extern unsigned int ftr_fixup_test1;
545 	extern unsigned int end_ftr_fixup_test1;
546 	extern unsigned int ftr_fixup_test1_orig;
547 	extern unsigned int ftr_fixup_test1_expected;
548 	int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
549 
550 	fixup.value = fixup.mask = 8;
551 	fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
552 	fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
553 	fixup.alt_start_off = fixup.alt_end_off = 0;
554 
555 	/* Sanity check */
556 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
557 
558 	/* Check we don't patch if the value matches */
559 	patch_feature_section(8, &fixup);
560 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
561 
562 	/* Check we do patch if the value doesn't match */
563 	patch_feature_section(0, &fixup);
564 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
565 
566 	/* Check we do patch if the mask doesn't match */
567 	memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
568 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
569 	patch_feature_section(~8, &fixup);
570 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
571 }
572 
test_alternative_patching(void)573 static void test_alternative_patching(void)
574 {
575 	extern unsigned int ftr_fixup_test2;
576 	extern unsigned int end_ftr_fixup_test2;
577 	extern unsigned int ftr_fixup_test2_orig;
578 	extern unsigned int ftr_fixup_test2_alt;
579 	extern unsigned int ftr_fixup_test2_expected;
580 	int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
581 
582 	fixup.value = fixup.mask = 0xF;
583 	fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
584 	fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
585 	fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
586 	fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
587 
588 	/* Sanity check */
589 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
590 
591 	/* Check we don't patch if the value matches */
592 	patch_feature_section(0xF, &fixup);
593 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
594 
595 	/* Check we do patch if the value doesn't match */
596 	patch_feature_section(0, &fixup);
597 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
598 
599 	/* Check we do patch if the mask doesn't match */
600 	memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
601 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
602 	patch_feature_section(~0xF, &fixup);
603 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
604 }
605 
test_alternative_case_too_big(void)606 static void test_alternative_case_too_big(void)
607 {
608 	extern unsigned int ftr_fixup_test3;
609 	extern unsigned int end_ftr_fixup_test3;
610 	extern unsigned int ftr_fixup_test3_orig;
611 	extern unsigned int ftr_fixup_test3_alt;
612 	int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
613 
614 	fixup.value = fixup.mask = 0xC;
615 	fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
616 	fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
617 	fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
618 	fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
619 
620 	/* Sanity check */
621 	check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
622 
623 	/* Expect nothing to be patched, and the error returned to us */
624 	check(patch_feature_section(0xF, &fixup) == 1);
625 	check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
626 	check(patch_feature_section(0, &fixup) == 1);
627 	check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
628 	check(patch_feature_section(~0xF, &fixup) == 1);
629 	check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
630 }
631 
test_alternative_case_too_small(void)632 static void test_alternative_case_too_small(void)
633 {
634 	extern unsigned int ftr_fixup_test4;
635 	extern unsigned int end_ftr_fixup_test4;
636 	extern unsigned int ftr_fixup_test4_orig;
637 	extern unsigned int ftr_fixup_test4_alt;
638 	extern unsigned int ftr_fixup_test4_expected;
639 	int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
640 	unsigned long flag;
641 
642 	/* Check a high-bit flag */
643 	flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
644 	fixup.value = fixup.mask = flag;
645 	fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
646 	fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
647 	fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
648 	fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
649 
650 	/* Sanity check */
651 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
652 
653 	/* Check we don't patch if the value matches */
654 	patch_feature_section(flag, &fixup);
655 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
656 
657 	/* Check we do patch if the value doesn't match */
658 	patch_feature_section(0, &fixup);
659 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
660 
661 	/* Check we do patch if the mask doesn't match */
662 	memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
663 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
664 	patch_feature_section(~flag, &fixup);
665 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
666 }
667 
test_alternative_case_with_branch(void)668 static void test_alternative_case_with_branch(void)
669 {
670 	extern unsigned int ftr_fixup_test5;
671 	extern unsigned int end_ftr_fixup_test5;
672 	extern unsigned int ftr_fixup_test5_expected;
673 	int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
674 
675 	check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
676 }
677 
test_alternative_case_with_external_branch(void)678 static void test_alternative_case_with_external_branch(void)
679 {
680 	extern unsigned int ftr_fixup_test6;
681 	extern unsigned int end_ftr_fixup_test6;
682 	extern unsigned int ftr_fixup_test6_expected;
683 	int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
684 
685 	check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
686 }
687 
test_cpu_macros(void)688 static void test_cpu_macros(void)
689 {
690 	extern u8 ftr_fixup_test_FTR_macros;
691 	extern u8 ftr_fixup_test_FTR_macros_expected;
692 	unsigned long size = &ftr_fixup_test_FTR_macros_expected -
693 			     &ftr_fixup_test_FTR_macros;
694 
695 	/* The fixups have already been done for us during boot */
696 	check(memcmp(&ftr_fixup_test_FTR_macros,
697 		     &ftr_fixup_test_FTR_macros_expected, size) == 0);
698 }
699 
test_fw_macros(void)700 static void test_fw_macros(void)
701 {
702 #ifdef CONFIG_PPC64
703 	extern u8 ftr_fixup_test_FW_FTR_macros;
704 	extern u8 ftr_fixup_test_FW_FTR_macros_expected;
705 	unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
706 			     &ftr_fixup_test_FW_FTR_macros;
707 
708 	/* The fixups have already been done for us during boot */
709 	check(memcmp(&ftr_fixup_test_FW_FTR_macros,
710 		     &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
711 #endif
712 }
713 
test_lwsync_macros(void)714 static void test_lwsync_macros(void)
715 {
716 	extern u8 lwsync_fixup_test;
717 	extern u8 end_lwsync_fixup_test;
718 	extern u8 lwsync_fixup_test_expected_LWSYNC;
719 	extern u8 lwsync_fixup_test_expected_SYNC;
720 	unsigned long size = &end_lwsync_fixup_test -
721 			     &lwsync_fixup_test;
722 
723 	/* The fixups have already been done for us during boot */
724 	if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
725 		check(memcmp(&lwsync_fixup_test,
726 			     &lwsync_fixup_test_expected_LWSYNC, size) == 0);
727 	} else {
728 		check(memcmp(&lwsync_fixup_test,
729 			     &lwsync_fixup_test_expected_SYNC, size) == 0);
730 	}
731 }
732 
test_feature_fixups(void)733 static int __init test_feature_fixups(void)
734 {
735 	printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
736 
737 	test_basic_patching();
738 	test_alternative_patching();
739 	test_alternative_case_too_big();
740 	test_alternative_case_too_small();
741 	test_alternative_case_with_branch();
742 	test_alternative_case_with_external_branch();
743 	test_cpu_macros();
744 	test_fw_macros();
745 	test_lwsync_macros();
746 
747 	return 0;
748 }
749 late_initcall(test_feature_fixups);
750 
751 #endif /* CONFIG_FTR_FIXUP_SELFTEST */
752