• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
4  *
5  *  Modifications for ppc64:
6  *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
7  *
8  *  Copyright 2008 Michael Ellerman, IBM Corporation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/jump_label.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/stop_machine.h>
18 #include <asm/cputable.h>
19 #include <asm/code-patching.h>
20 #include <asm/page.h>
21 #include <asm/sections.h>
22 #include <asm/setup.h>
23 #include <asm/security_features.h>
24 #include <asm/firmware.h>
25 #include <asm/inst.h>
26 
27 struct fixup_entry {
28 	unsigned long	mask;
29 	unsigned long	value;
30 	long		start_off;
31 	long		end_off;
32 	long		alt_start_off;
33 	long		alt_end_off;
34 };
35 
calc_addr(struct fixup_entry * fcur,long offset)36 static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset)
37 {
38 	/*
39 	 * We store the offset to the code as a negative offset from
40 	 * the start of the alt_entry, to support the VDSO. This
41 	 * routine converts that back into an actual address.
42 	 */
43 	return (struct ppc_inst *)((unsigned long)fcur + offset);
44 }
45 
patch_alt_instruction(struct ppc_inst * src,struct ppc_inst * dest,struct ppc_inst * alt_start,struct ppc_inst * alt_end)46 static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
47 				 struct ppc_inst *alt_start, struct ppc_inst *alt_end)
48 {
49 	int err;
50 	struct ppc_inst instr;
51 
52 	instr = ppc_inst_read(src);
53 
54 	if (instr_is_relative_branch(*src)) {
55 		struct ppc_inst *target = (struct ppc_inst *)branch_target(src);
56 
57 		/* Branch within the section doesn't need translating */
58 		if (target < alt_start || target > alt_end) {
59 			err = translate_branch(&instr, dest, src);
60 			if (err)
61 				return 1;
62 		}
63 	}
64 
65 	raw_patch_instruction(dest, instr);
66 
67 	return 0;
68 }
69 
patch_feature_section(unsigned long value,struct fixup_entry * fcur)70 static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
71 {
72 	struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest, nop;
73 
74 	start = calc_addr(fcur, fcur->start_off);
75 	end = calc_addr(fcur, fcur->end_off);
76 	alt_start = calc_addr(fcur, fcur->alt_start_off);
77 	alt_end = calc_addr(fcur, fcur->alt_end_off);
78 
79 	if ((alt_end - alt_start) > (end - start))
80 		return 1;
81 
82 	if ((value & fcur->mask) == fcur->value)
83 		return 0;
84 
85 	src = alt_start;
86 	dest = start;
87 
88 	for (; src < alt_end; src = ppc_inst_next(src, src),
89 			      dest = ppc_inst_next(dest, dest)) {
90 		if (patch_alt_instruction(src, dest, alt_start, alt_end))
91 			return 1;
92 	}
93 
94 	nop = ppc_inst(PPC_INST_NOP);
95 	for (; dest < end; dest = ppc_inst_next(dest, &nop))
96 		raw_patch_instruction(dest, nop);
97 
98 	return 0;
99 }
100 
do_feature_fixups(unsigned long value,void * fixup_start,void * fixup_end)101 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
102 {
103 	struct fixup_entry *fcur, *fend;
104 
105 	fcur = fixup_start;
106 	fend = fixup_end;
107 
108 	for (; fcur < fend; fcur++) {
109 		if (patch_feature_section(value, fcur)) {
110 			WARN_ON(1);
111 			printk("Unable to patch feature section at %p - %p" \
112 				" with %p - %p\n",
113 				calc_addr(fcur, fcur->start_off),
114 				calc_addr(fcur, fcur->end_off),
115 				calc_addr(fcur, fcur->alt_start_off),
116 				calc_addr(fcur, fcur->alt_end_off));
117 		}
118 	}
119 }
120 
121 #ifdef CONFIG_PPC_BOOK3S_64
do_stf_entry_barrier_fixups(enum stf_barrier_type types)122 static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
123 {
124 	unsigned int instrs[3], *dest;
125 	long *start, *end;
126 	int i;
127 
128 	start = PTRRELOC(&__start___stf_entry_barrier_fixup),
129 	end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
130 
131 	instrs[0] = 0x60000000; /* nop */
132 	instrs[1] = 0x60000000; /* nop */
133 	instrs[2] = 0x60000000; /* nop */
134 
135 	i = 0;
136 	if (types & STF_BARRIER_FALLBACK) {
137 		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
138 		instrs[i++] = 0x60000000; /* branch patched below */
139 		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
140 	} else if (types & STF_BARRIER_EIEIO) {
141 		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
142 	} else if (types & STF_BARRIER_SYNC_ORI) {
143 		instrs[i++] = 0x7c0004ac; /* hwsync		*/
144 		instrs[i++] = 0xe94d0000; /* ld r10,0(r13)	*/
145 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
146 	}
147 
148 	for (i = 0; start < end; start++, i++) {
149 		dest = (void *)start + *start;
150 
151 		pr_devel("patching dest %lx\n", (unsigned long)dest);
152 
153 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
154 
155 		if (types & STF_BARRIER_FALLBACK)
156 			patch_branch((struct ppc_inst *)(dest + 1),
157 				     (unsigned long)&stf_barrier_fallback,
158 				     BRANCH_SET_LINK);
159 		else
160 			patch_instruction((struct ppc_inst *)(dest + 1),
161 					  ppc_inst(instrs[1]));
162 
163 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
164 	}
165 
166 	printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
167 		(types == STF_BARRIER_NONE)                  ? "no" :
168 		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
169 		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
170 		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
171 		                                           : "unknown");
172 }
173 
do_stf_exit_barrier_fixups(enum stf_barrier_type types)174 static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
175 {
176 	unsigned int instrs[6], *dest;
177 	long *start, *end;
178 	int i;
179 
180 	start = PTRRELOC(&__start___stf_exit_barrier_fixup),
181 	end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
182 
183 	instrs[0] = 0x60000000; /* nop */
184 	instrs[1] = 0x60000000; /* nop */
185 	instrs[2] = 0x60000000; /* nop */
186 	instrs[3] = 0x60000000; /* nop */
187 	instrs[4] = 0x60000000; /* nop */
188 	instrs[5] = 0x60000000; /* nop */
189 
190 	i = 0;
191 	if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
192 		if (cpu_has_feature(CPU_FTR_HVMODE)) {
193 			instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
194 			instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
195 		} else {
196 			instrs[i++] = 0x7db243a6; /* mtsprg 2,r13	*/
197 			instrs[i++] = 0x7db142a6; /* mfsprg r13,1    */
198 	        }
199 		instrs[i++] = 0x7c0004ac; /* hwsync		*/
200 		instrs[i++] = 0xe9ad0000; /* ld r13,0(r13)	*/
201 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
202 		if (cpu_has_feature(CPU_FTR_HVMODE)) {
203 			instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
204 		} else {
205 			instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
206 		}
207 	} else if (types & STF_BARRIER_EIEIO) {
208 		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
209 	}
210 
211 	for (i = 0; start < end; start++, i++) {
212 		dest = (void *)start + *start;
213 
214 		pr_devel("patching dest %lx\n", (unsigned long)dest);
215 
216 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
217 		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
218 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
219 		patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
220 		patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4]));
221 		patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5]));
222 	}
223 	printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
224 		(types == STF_BARRIER_NONE)                  ? "no" :
225 		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
226 		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
227 		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
228 		                                           : "unknown");
229 }
230 
__do_stf_barrier_fixups(void * data)231 static int __do_stf_barrier_fixups(void *data)
232 {
233 	enum stf_barrier_type *types = data;
234 
235 	do_stf_entry_barrier_fixups(*types);
236 	do_stf_exit_barrier_fixups(*types);
237 
238 	return 0;
239 }
240 
do_stf_barrier_fixups(enum stf_barrier_type types)241 void do_stf_barrier_fixups(enum stf_barrier_type types)
242 {
243 	/*
244 	 * The call to the fallback entry flush, and the fallback/sync-ori exit
245 	 * flush can not be safely patched in/out while other CPUs are executing
246 	 * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
247 	 * spin in the stop machine core with interrupts hard disabled.
248 	 */
249 	stop_machine(__do_stf_barrier_fixups, &types, NULL);
250 }
251 
do_uaccess_flush_fixups(enum l1d_flush_type types)252 void do_uaccess_flush_fixups(enum l1d_flush_type types)
253 {
254 	unsigned int instrs[4], *dest;
255 	long *start, *end;
256 	int i;
257 
258 	start = PTRRELOC(&__start___uaccess_flush_fixup);
259 	end = PTRRELOC(&__stop___uaccess_flush_fixup);
260 
261 	instrs[0] = 0x60000000; /* nop */
262 	instrs[1] = 0x60000000; /* nop */
263 	instrs[2] = 0x60000000; /* nop */
264 	instrs[3] = 0x4e800020; /* blr */
265 
266 	i = 0;
267 	if (types == L1D_FLUSH_FALLBACK) {
268 		instrs[3] = 0x60000000; /* nop */
269 		/* fallthrough to fallback flush */
270 	}
271 
272 	if (types & L1D_FLUSH_ORI) {
273 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
274 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
275 	}
276 
277 	if (types & L1D_FLUSH_MTTRIG)
278 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
279 
280 	for (i = 0; start < end; start++, i++) {
281 		dest = (void *)start + *start;
282 
283 		pr_devel("patching dest %lx\n", (unsigned long)dest);
284 
285 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
286 
287 		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
288 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
289 		patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
290 	}
291 
292 	printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
293 		(types == L1D_FLUSH_NONE)       ? "no" :
294 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
295 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
296 							? "ori+mttrig type"
297 							: "ori type" :
298 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
299 						: "unknown");
300 }
301 
__do_entry_flush_fixups(void * data)302 static int __do_entry_flush_fixups(void *data)
303 {
304 	enum l1d_flush_type types = *(enum l1d_flush_type *)data;
305 	unsigned int instrs[3], *dest;
306 	long *start, *end;
307 	int i;
308 
309 	instrs[0] = 0x60000000; /* nop */
310 	instrs[1] = 0x60000000; /* nop */
311 	instrs[2] = 0x60000000; /* nop */
312 
313 	i = 0;
314 	if (types == L1D_FLUSH_FALLBACK) {
315 		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
316 		instrs[i++] = 0x60000000; /* branch patched below */
317 		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
318 	}
319 
320 	if (types & L1D_FLUSH_ORI) {
321 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
322 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
323 	}
324 
325 	if (types & L1D_FLUSH_MTTRIG)
326 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
327 
328 	start = PTRRELOC(&__start___entry_flush_fixup);
329 	end = PTRRELOC(&__stop___entry_flush_fixup);
330 	for (i = 0; start < end; start++, i++) {
331 		dest = (void *)start + *start;
332 
333 		pr_devel("patching dest %lx\n", (unsigned long)dest);
334 
335 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
336 
337 		if (types == L1D_FLUSH_FALLBACK)
338 			patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
339 				     BRANCH_SET_LINK);
340 		else
341 			patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
342 
343 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
344 	}
345 
346 	start = PTRRELOC(&__start___scv_entry_flush_fixup);
347 	end = PTRRELOC(&__stop___scv_entry_flush_fixup);
348 	for (; start < end; start++, i++) {
349 		dest = (void *)start + *start;
350 
351 		pr_devel("patching dest %lx\n", (unsigned long)dest);
352 
353 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
354 
355 		if (types == L1D_FLUSH_FALLBACK)
356 			patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
357 				     BRANCH_SET_LINK);
358 		else
359 			patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
360 
361 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
362 	}
363 
364 
365 	printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
366 		(types == L1D_FLUSH_NONE)       ? "no" :
367 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
368 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
369 							? "ori+mttrig type"
370 							: "ori type" :
371 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
372 						: "unknown");
373 
374 	return 0;
375 }
376 
do_entry_flush_fixups(enum l1d_flush_type types)377 void do_entry_flush_fixups(enum l1d_flush_type types)
378 {
379 	/*
380 	 * The call to the fallback flush can not be safely patched in/out while
381 	 * other CPUs are executing it. So call __do_entry_flush_fixups() on one
382 	 * CPU while all other CPUs spin in the stop machine core with interrupts
383 	 * hard disabled.
384 	 */
385 	stop_machine(__do_entry_flush_fixups, &types, NULL);
386 }
387 
do_rfi_flush_fixups(enum l1d_flush_type types)388 void do_rfi_flush_fixups(enum l1d_flush_type types)
389 {
390 	unsigned int instrs[3], *dest;
391 	long *start, *end;
392 	int i;
393 
394 	start = PTRRELOC(&__start___rfi_flush_fixup),
395 	end = PTRRELOC(&__stop___rfi_flush_fixup);
396 
397 	instrs[0] = 0x60000000; /* nop */
398 	instrs[1] = 0x60000000; /* nop */
399 	instrs[2] = 0x60000000; /* nop */
400 
401 	if (types & L1D_FLUSH_FALLBACK)
402 		/* b .+16 to fallback flush */
403 		instrs[0] = 0x48000010;
404 
405 	i = 0;
406 	if (types & L1D_FLUSH_ORI) {
407 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
408 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
409 	}
410 
411 	if (types & L1D_FLUSH_MTTRIG)
412 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
413 
414 	for (i = 0; start < end; start++, i++) {
415 		dest = (void *)start + *start;
416 
417 		pr_devel("patching dest %lx\n", (unsigned long)dest);
418 
419 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
420 		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
421 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
422 	}
423 
424 	printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
425 		(types == L1D_FLUSH_NONE)       ? "no" :
426 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
427 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
428 							? "ori+mttrig type"
429 							: "ori type" :
430 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
431 						: "unknown");
432 }
433 
do_barrier_nospec_fixups_range(bool enable,void * fixup_start,void * fixup_end)434 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
435 {
436 	unsigned int instr, *dest;
437 	long *start, *end;
438 	int i;
439 
440 	start = fixup_start;
441 	end = fixup_end;
442 
443 	instr = 0x60000000; /* nop */
444 
445 	if (enable) {
446 		pr_info("barrier-nospec: using ORI speculation barrier\n");
447 		instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
448 	}
449 
450 	for (i = 0; start < end; start++, i++) {
451 		dest = (void *)start + *start;
452 
453 		pr_devel("patching dest %lx\n", (unsigned long)dest);
454 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instr));
455 	}
456 
457 	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
458 }
459 
460 #endif /* CONFIG_PPC_BOOK3S_64 */
461 
462 #ifdef CONFIG_PPC_BARRIER_NOSPEC
do_barrier_nospec_fixups(bool enable)463 void do_barrier_nospec_fixups(bool enable)
464 {
465 	void *start, *end;
466 
467 	start = PTRRELOC(&__start___barrier_nospec_fixup),
468 	end = PTRRELOC(&__stop___barrier_nospec_fixup);
469 
470 	do_barrier_nospec_fixups_range(enable, start, end);
471 }
472 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
473 
474 #ifdef CONFIG_PPC_FSL_BOOK3E
do_barrier_nospec_fixups_range(bool enable,void * fixup_start,void * fixup_end)475 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
476 {
477 	unsigned int instr[2], *dest;
478 	long *start, *end;
479 	int i;
480 
481 	start = fixup_start;
482 	end = fixup_end;
483 
484 	instr[0] = PPC_INST_NOP;
485 	instr[1] = PPC_INST_NOP;
486 
487 	if (enable) {
488 		pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
489 		instr[0] = PPC_INST_ISYNC;
490 		instr[1] = PPC_INST_SYNC;
491 	}
492 
493 	for (i = 0; start < end; start++, i++) {
494 		dest = (void *)start + *start;
495 
496 		pr_devel("patching dest %lx\n", (unsigned long)dest);
497 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0]));
498 		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1]));
499 	}
500 
501 	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
502 }
503 
patch_btb_flush_section(long * curr)504 static void patch_btb_flush_section(long *curr)
505 {
506 	unsigned int *start, *end;
507 
508 	start = (void *)curr + *curr;
509 	end = (void *)curr + *(curr + 1);
510 	for (; start < end; start++) {
511 		pr_devel("patching dest %lx\n", (unsigned long)start);
512 		patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP));
513 	}
514 }
515 
do_btb_flush_fixups(void)516 void do_btb_flush_fixups(void)
517 {
518 	long *start, *end;
519 
520 	start = PTRRELOC(&__start__btb_flush_fixup);
521 	end = PTRRELOC(&__stop__btb_flush_fixup);
522 
523 	for (; start < end; start += 2)
524 		patch_btb_flush_section(start);
525 }
526 #endif /* CONFIG_PPC_FSL_BOOK3E */
527 
do_lwsync_fixups(unsigned long value,void * fixup_start,void * fixup_end)528 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
529 {
530 	long *start, *end;
531 	struct ppc_inst *dest;
532 
533 	if (!(value & CPU_FTR_LWSYNC))
534 		return ;
535 
536 	start = fixup_start;
537 	end = fixup_end;
538 
539 	for (; start < end; start++) {
540 		dest = (void *)start + *start;
541 		raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC));
542 	}
543 }
544 
do_final_fixups(void)545 static void do_final_fixups(void)
546 {
547 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
548 	struct ppc_inst inst, *src, *dest, *end;
549 
550 	if (PHYSICAL_START == 0)
551 		return;
552 
553 	src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START);
554 	dest = (struct ppc_inst *)KERNELBASE;
555 	end = (void *)src + (__end_interrupts - _stext);
556 
557 	while (src < end) {
558 		inst = ppc_inst_read(src);
559 		raw_patch_instruction(dest, inst);
560 		src = ppc_inst_next(src, src);
561 		dest = ppc_inst_next(dest, dest);
562 	}
563 #endif
564 }
565 
566 static unsigned long __initdata saved_cpu_features;
567 static unsigned int __initdata saved_mmu_features;
568 #ifdef CONFIG_PPC64
569 static unsigned long __initdata saved_firmware_features;
570 #endif
571 
apply_feature_fixups(void)572 void __init apply_feature_fixups(void)
573 {
574 	struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
575 
576 	*PTRRELOC(&saved_cpu_features) = spec->cpu_features;
577 	*PTRRELOC(&saved_mmu_features) = spec->mmu_features;
578 
579 	/*
580 	 * Apply the CPU-specific and firmware specific fixups to kernel text
581 	 * (nop out sections not relevant to this CPU or this firmware).
582 	 */
583 	do_feature_fixups(spec->cpu_features,
584 			  PTRRELOC(&__start___ftr_fixup),
585 			  PTRRELOC(&__stop___ftr_fixup));
586 
587 	do_feature_fixups(spec->mmu_features,
588 			  PTRRELOC(&__start___mmu_ftr_fixup),
589 			  PTRRELOC(&__stop___mmu_ftr_fixup));
590 
591 	do_lwsync_fixups(spec->cpu_features,
592 			 PTRRELOC(&__start___lwsync_fixup),
593 			 PTRRELOC(&__stop___lwsync_fixup));
594 
595 #ifdef CONFIG_PPC64
596 	saved_firmware_features = powerpc_firmware_features;
597 	do_feature_fixups(powerpc_firmware_features,
598 			  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
599 #endif
600 	do_final_fixups();
601 }
602 
setup_feature_keys(void)603 void __init setup_feature_keys(void)
604 {
605 	/*
606 	 * Initialise jump label. This causes all the cpu/mmu_has_feature()
607 	 * checks to take on their correct polarity based on the current set of
608 	 * CPU/MMU features.
609 	 */
610 	jump_label_init();
611 	cpu_feature_keys_init();
612 	mmu_feature_keys_init();
613 }
614 
check_features(void)615 static int __init check_features(void)
616 {
617 	WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
618 	     "CPU features changed after feature patching!\n");
619 	WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
620 	     "MMU features changed after feature patching!\n");
621 #ifdef CONFIG_PPC64
622 	WARN(saved_firmware_features != powerpc_firmware_features,
623 	     "Firmware features changed after feature patching!\n");
624 #endif
625 
626 	return 0;
627 }
628 late_initcall(check_features);
629 
630 #ifdef CONFIG_FTR_FIXUP_SELFTEST
631 
632 #define check(x)	\
633 	if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
634 
635 /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
636 static struct fixup_entry fixup;
637 
calc_offset(struct fixup_entry * entry,unsigned int * p)638 static long calc_offset(struct fixup_entry *entry, unsigned int *p)
639 {
640 	return (unsigned long)p - (unsigned long)entry;
641 }
642 
test_basic_patching(void)643 static void test_basic_patching(void)
644 {
645 	extern unsigned int ftr_fixup_test1[];
646 	extern unsigned int end_ftr_fixup_test1[];
647 	extern unsigned int ftr_fixup_test1_orig[];
648 	extern unsigned int ftr_fixup_test1_expected[];
649 	int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1);
650 
651 	fixup.value = fixup.mask = 8;
652 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
653 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
654 	fixup.alt_start_off = fixup.alt_end_off = 0;
655 
656 	/* Sanity check */
657 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
658 
659 	/* Check we don't patch if the value matches */
660 	patch_feature_section(8, &fixup);
661 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
662 
663 	/* Check we do patch if the value doesn't match */
664 	patch_feature_section(0, &fixup);
665 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
666 
667 	/* Check we do patch if the mask doesn't match */
668 	memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
669 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
670 	patch_feature_section(~8, &fixup);
671 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
672 }
673 
test_alternative_patching(void)674 static void test_alternative_patching(void)
675 {
676 	extern unsigned int ftr_fixup_test2[];
677 	extern unsigned int end_ftr_fixup_test2[];
678 	extern unsigned int ftr_fixup_test2_orig[];
679 	extern unsigned int ftr_fixup_test2_alt[];
680 	extern unsigned int ftr_fixup_test2_expected[];
681 	int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2);
682 
683 	fixup.value = fixup.mask = 0xF;
684 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
685 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
686 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
687 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
688 
689 	/* Sanity check */
690 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
691 
692 	/* Check we don't patch if the value matches */
693 	patch_feature_section(0xF, &fixup);
694 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
695 
696 	/* Check we do patch if the value doesn't match */
697 	patch_feature_section(0, &fixup);
698 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
699 
700 	/* Check we do patch if the mask doesn't match */
701 	memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
702 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
703 	patch_feature_section(~0xF, &fixup);
704 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
705 }
706 
test_alternative_case_too_big(void)707 static void test_alternative_case_too_big(void)
708 {
709 	extern unsigned int ftr_fixup_test3[];
710 	extern unsigned int end_ftr_fixup_test3[];
711 	extern unsigned int ftr_fixup_test3_orig[];
712 	extern unsigned int ftr_fixup_test3_alt[];
713 	int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3);
714 
715 	fixup.value = fixup.mask = 0xC;
716 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
717 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
718 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
719 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
720 
721 	/* Sanity check */
722 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
723 
724 	/* Expect nothing to be patched, and the error returned to us */
725 	check(patch_feature_section(0xF, &fixup) == 1);
726 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
727 	check(patch_feature_section(0, &fixup) == 1);
728 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
729 	check(patch_feature_section(~0xF, &fixup) == 1);
730 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
731 }
732 
test_alternative_case_too_small(void)733 static void test_alternative_case_too_small(void)
734 {
735 	extern unsigned int ftr_fixup_test4[];
736 	extern unsigned int end_ftr_fixup_test4[];
737 	extern unsigned int ftr_fixup_test4_orig[];
738 	extern unsigned int ftr_fixup_test4_alt[];
739 	extern unsigned int ftr_fixup_test4_expected[];
740 	int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4);
741 	unsigned long flag;
742 
743 	/* Check a high-bit flag */
744 	flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
745 	fixup.value = fixup.mask = flag;
746 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
747 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
748 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
749 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
750 
751 	/* Sanity check */
752 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
753 
754 	/* Check we don't patch if the value matches */
755 	patch_feature_section(flag, &fixup);
756 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
757 
758 	/* Check we do patch if the value doesn't match */
759 	patch_feature_section(0, &fixup);
760 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
761 
762 	/* Check we do patch if the mask doesn't match */
763 	memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
764 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
765 	patch_feature_section(~flag, &fixup);
766 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
767 }
768 
test_alternative_case_with_branch(void)769 static void test_alternative_case_with_branch(void)
770 {
771 	extern unsigned int ftr_fixup_test5[];
772 	extern unsigned int end_ftr_fixup_test5[];
773 	extern unsigned int ftr_fixup_test5_expected[];
774 	int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5);
775 
776 	check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
777 }
778 
test_alternative_case_with_external_branch(void)779 static void test_alternative_case_with_external_branch(void)
780 {
781 	extern unsigned int ftr_fixup_test6[];
782 	extern unsigned int end_ftr_fixup_test6[];
783 	extern unsigned int ftr_fixup_test6_expected[];
784 	int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6);
785 
786 	check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
787 }
788 
test_alternative_case_with_branch_to_end(void)789 static void test_alternative_case_with_branch_to_end(void)
790 {
791 	extern unsigned int ftr_fixup_test7[];
792 	extern unsigned int end_ftr_fixup_test7[];
793 	extern unsigned int ftr_fixup_test7_expected[];
794 	int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7);
795 
796 	check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
797 }
798 
test_cpu_macros(void)799 static void test_cpu_macros(void)
800 {
801 	extern u8 ftr_fixup_test_FTR_macros[];
802 	extern u8 ftr_fixup_test_FTR_macros_expected[];
803 	unsigned long size = ftr_fixup_test_FTR_macros_expected -
804 			     ftr_fixup_test_FTR_macros;
805 
806 	/* The fixups have already been done for us during boot */
807 	check(memcmp(ftr_fixup_test_FTR_macros,
808 		     ftr_fixup_test_FTR_macros_expected, size) == 0);
809 }
810 
test_fw_macros(void)811 static void test_fw_macros(void)
812 {
813 #ifdef CONFIG_PPC64
814 	extern u8 ftr_fixup_test_FW_FTR_macros[];
815 	extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
816 	unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
817 			     ftr_fixup_test_FW_FTR_macros;
818 
819 	/* The fixups have already been done for us during boot */
820 	check(memcmp(ftr_fixup_test_FW_FTR_macros,
821 		     ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
822 #endif
823 }
824 
test_lwsync_macros(void)825 static void test_lwsync_macros(void)
826 {
827 	extern u8 lwsync_fixup_test[];
828 	extern u8 end_lwsync_fixup_test[];
829 	extern u8 lwsync_fixup_test_expected_LWSYNC[];
830 	extern u8 lwsync_fixup_test_expected_SYNC[];
831 	unsigned long size = end_lwsync_fixup_test -
832 			     lwsync_fixup_test;
833 
834 	/* The fixups have already been done for us during boot */
835 	if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
836 		check(memcmp(lwsync_fixup_test,
837 			     lwsync_fixup_test_expected_LWSYNC, size) == 0);
838 	} else {
839 		check(memcmp(lwsync_fixup_test,
840 			     lwsync_fixup_test_expected_SYNC, size) == 0);
841 	}
842 }
843 
844 #ifdef CONFIG_PPC64
test_prefix_patching(void)845 static void __init test_prefix_patching(void)
846 {
847 	extern unsigned int ftr_fixup_prefix1[];
848 	extern unsigned int end_ftr_fixup_prefix1[];
849 	extern unsigned int ftr_fixup_prefix1_orig[];
850 	extern unsigned int ftr_fixup_prefix1_expected[];
851 	int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1);
852 
853 	fixup.value = fixup.mask = 8;
854 	fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1);
855 	fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3);
856 	fixup.alt_start_off = fixup.alt_end_off = 0;
857 
858 	/* Sanity check */
859 	check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0);
860 
861 	patch_feature_section(0, &fixup);
862 	check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0);
863 	check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0);
864 }
865 
test_prefix_alt_patching(void)866 static void __init test_prefix_alt_patching(void)
867 {
868 	extern unsigned int ftr_fixup_prefix2[];
869 	extern unsigned int end_ftr_fixup_prefix2[];
870 	extern unsigned int ftr_fixup_prefix2_orig[];
871 	extern unsigned int ftr_fixup_prefix2_expected[];
872 	extern unsigned int ftr_fixup_prefix2_alt[];
873 	int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2);
874 
875 	fixup.value = fixup.mask = 8;
876 	fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1);
877 	fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3);
878 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt);
879 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2);
880 	/* Sanity check */
881 	check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0);
882 
883 	patch_feature_section(0, &fixup);
884 	check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0);
885 	check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0);
886 }
887 
test_prefix_word_alt_patching(void)888 static void __init test_prefix_word_alt_patching(void)
889 {
890 	extern unsigned int ftr_fixup_prefix3[];
891 	extern unsigned int end_ftr_fixup_prefix3[];
892 	extern unsigned int ftr_fixup_prefix3_orig[];
893 	extern unsigned int ftr_fixup_prefix3_expected[];
894 	extern unsigned int ftr_fixup_prefix3_alt[];
895 	int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3);
896 
897 	fixup.value = fixup.mask = 8;
898 	fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1);
899 	fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4);
900 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt);
901 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3);
902 	/* Sanity check */
903 	check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0);
904 
905 	patch_feature_section(0, &fixup);
906 	check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0);
907 	patch_feature_section(0, &fixup);
908 	check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0);
909 }
910 #else
test_prefix_patching(void)911 static inline void test_prefix_patching(void) {}
test_prefix_alt_patching(void)912 static inline void test_prefix_alt_patching(void) {}
test_prefix_word_alt_patching(void)913 static inline void test_prefix_word_alt_patching(void) {}
914 #endif /* CONFIG_PPC64 */
915 
test_feature_fixups(void)916 static int __init test_feature_fixups(void)
917 {
918 	printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
919 
920 	test_basic_patching();
921 	test_alternative_patching();
922 	test_alternative_case_too_big();
923 	test_alternative_case_too_small();
924 	test_alternative_case_with_branch();
925 	test_alternative_case_with_external_branch();
926 	test_alternative_case_with_branch_to_end();
927 	test_cpu_macros();
928 	test_fw_macros();
929 	test_lwsync_macros();
930 	test_prefix_patching();
931 	test_prefix_alt_patching();
932 	test_prefix_word_alt_patching();
933 
934 	return 0;
935 }
936 late_initcall(test_feature_fixups);
937 
938 #endif /* CONFIG_FTR_FIXUP_SELFTEST */
939