• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
3  *
4  *  Modifications for ppc64:
5  *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6  *
7  *  Copyright 2008 Michael Ellerman, IBM Corporation.
8  *
9  *  This program is free software; you can redistribute it and/or
10  *  modify it under the terms of the GNU General Public License
11  *  as published by the Free Software Foundation; either version
12  *  2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <asm/cputable.h>
20 #include <asm/code-patching.h>
21 #include <asm/page.h>
22 #include <asm/sections.h>
23 
24 
25 struct fixup_entry {
26 	unsigned long	mask;
27 	unsigned long	value;
28 	long		start_off;
29 	long		end_off;
30 	long		alt_start_off;
31 	long		alt_end_off;
32 };
33 
calc_addr(struct fixup_entry * fcur,long offset)34 static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
35 {
36 	/*
37 	 * We store the offset to the code as a negative offset from
38 	 * the start of the alt_entry, to support the VDSO. This
39 	 * routine converts that back into an actual address.
40 	 */
41 	return (unsigned int *)((unsigned long)fcur + offset);
42 }
43 
patch_alt_instruction(unsigned int * src,unsigned int * dest,unsigned int * alt_start,unsigned int * alt_end)44 static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
45 				 unsigned int *alt_start, unsigned int *alt_end)
46 {
47 	unsigned int instr;
48 
49 	instr = *src;
50 
51 	if (instr_is_relative_branch(*src)) {
52 		unsigned int *target = (unsigned int *)branch_target(src);
53 
54 		/* Branch within the section doesn't need translating */
55 		if (target < alt_start || target >= alt_end) {
56 			instr = translate_branch(dest, src);
57 			if (!instr)
58 				return 1;
59 		}
60 	}
61 
62 	patch_instruction(dest, instr);
63 
64 	return 0;
65 }
66 
patch_feature_section(unsigned long value,struct fixup_entry * fcur)67 static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
68 {
69 	unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
70 
71 	start = calc_addr(fcur, fcur->start_off);
72 	end = calc_addr(fcur, fcur->end_off);
73 	alt_start = calc_addr(fcur, fcur->alt_start_off);
74 	alt_end = calc_addr(fcur, fcur->alt_end_off);
75 
76 	if ((alt_end - alt_start) > (end - start))
77 		return 1;
78 
79 	if ((value & fcur->mask) == fcur->value)
80 		return 0;
81 
82 	src = alt_start;
83 	dest = start;
84 
85 	for (; src < alt_end; src++, dest++) {
86 		if (patch_alt_instruction(src, dest, alt_start, alt_end))
87 			return 1;
88 	}
89 
90 	for (; dest < end; dest++)
91 		patch_instruction(dest, PPC_INST_NOP);
92 
93 	return 0;
94 }
95 
do_feature_fixups(unsigned long value,void * fixup_start,void * fixup_end)96 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
97 {
98 	struct fixup_entry *fcur, *fend;
99 
100 	fcur = fixup_start;
101 	fend = fixup_end;
102 
103 	for (; fcur < fend; fcur++) {
104 		if (patch_feature_section(value, fcur)) {
105 			WARN_ON(1);
106 			printk("Unable to patch feature section at %p - %p" \
107 				" with %p - %p\n",
108 				calc_addr(fcur, fcur->start_off),
109 				calc_addr(fcur, fcur->end_off),
110 				calc_addr(fcur, fcur->alt_start_off),
111 				calc_addr(fcur, fcur->alt_end_off));
112 		}
113 	}
114 }
115 
do_lwsync_fixups(unsigned long value,void * fixup_start,void * fixup_end)116 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
117 {
118 	long *start, *end;
119 	unsigned int *dest;
120 
121 	if (!(value & CPU_FTR_LWSYNC))
122 		return ;
123 
124 	start = fixup_start;
125 	end = fixup_end;
126 
127 	for (; start < end; start++) {
128 		dest = (void *)start + *start;
129 		patch_instruction(dest, PPC_INST_LWSYNC);
130 	}
131 }
132 
do_final_fixups(void)133 void do_final_fixups(void)
134 {
135 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
136 	int *src, *dest;
137 	unsigned long length;
138 
139 	if (PHYSICAL_START == 0)
140 		return;
141 
142 	src = (int *)(KERNELBASE + PHYSICAL_START);
143 	dest = (int *)KERNELBASE;
144 	length = (__end_interrupts - _stext) / sizeof(int);
145 
146 	while (length--) {
147 		patch_instruction(dest, *src);
148 		src++;
149 		dest++;
150 	}
151 #endif
152 }
153 
154 #ifdef CONFIG_FTR_FIXUP_SELFTEST
155 
156 #define check(x)	\
157 	if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
158 
159 /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
160 static struct fixup_entry fixup;
161 
calc_offset(struct fixup_entry * entry,unsigned int * p)162 static long calc_offset(struct fixup_entry *entry, unsigned int *p)
163 {
164 	return (unsigned long)p - (unsigned long)entry;
165 }
166 
test_basic_patching(void)167 void test_basic_patching(void)
168 {
169 	extern unsigned int ftr_fixup_test1;
170 	extern unsigned int end_ftr_fixup_test1;
171 	extern unsigned int ftr_fixup_test1_orig;
172 	extern unsigned int ftr_fixup_test1_expected;
173 	int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
174 
175 	fixup.value = fixup.mask = 8;
176 	fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
177 	fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
178 	fixup.alt_start_off = fixup.alt_end_off = 0;
179 
180 	/* Sanity check */
181 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
182 
183 	/* Check we don't patch if the value matches */
184 	patch_feature_section(8, &fixup);
185 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
186 
187 	/* Check we do patch if the value doesn't match */
188 	patch_feature_section(0, &fixup);
189 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
190 
191 	/* Check we do patch if the mask doesn't match */
192 	memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
193 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
194 	patch_feature_section(~8, &fixup);
195 	check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
196 }
197 
test_alternative_patching(void)198 static void test_alternative_patching(void)
199 {
200 	extern unsigned int ftr_fixup_test2;
201 	extern unsigned int end_ftr_fixup_test2;
202 	extern unsigned int ftr_fixup_test2_orig;
203 	extern unsigned int ftr_fixup_test2_alt;
204 	extern unsigned int ftr_fixup_test2_expected;
205 	int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
206 
207 	fixup.value = fixup.mask = 0xF;
208 	fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
209 	fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
210 	fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
211 	fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
212 
213 	/* Sanity check */
214 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
215 
216 	/* Check we don't patch if the value matches */
217 	patch_feature_section(0xF, &fixup);
218 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
219 
220 	/* Check we do patch if the value doesn't match */
221 	patch_feature_section(0, &fixup);
222 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
223 
224 	/* Check we do patch if the mask doesn't match */
225 	memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
226 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
227 	patch_feature_section(~0xF, &fixup);
228 	check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
229 }
230 
test_alternative_case_too_big(void)231 static void test_alternative_case_too_big(void)
232 {
233 	extern unsigned int ftr_fixup_test3;
234 	extern unsigned int end_ftr_fixup_test3;
235 	extern unsigned int ftr_fixup_test3_orig;
236 	extern unsigned int ftr_fixup_test3_alt;
237 	int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
238 
239 	fixup.value = fixup.mask = 0xC;
240 	fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
241 	fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
242 	fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
243 	fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
244 
245 	/* Sanity check */
246 	check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
247 
248 	/* Expect nothing to be patched, and the error returned to us */
249 	check(patch_feature_section(0xF, &fixup) == 1);
250 	check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
251 	check(patch_feature_section(0, &fixup) == 1);
252 	check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
253 	check(patch_feature_section(~0xF, &fixup) == 1);
254 	check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
255 }
256 
test_alternative_case_too_small(void)257 static void test_alternative_case_too_small(void)
258 {
259 	extern unsigned int ftr_fixup_test4;
260 	extern unsigned int end_ftr_fixup_test4;
261 	extern unsigned int ftr_fixup_test4_orig;
262 	extern unsigned int ftr_fixup_test4_alt;
263 	extern unsigned int ftr_fixup_test4_expected;
264 	int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
265 	unsigned long flag;
266 
267 	/* Check a high-bit flag */
268 	flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
269 	fixup.value = fixup.mask = flag;
270 	fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
271 	fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
272 	fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
273 	fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
274 
275 	/* Sanity check */
276 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
277 
278 	/* Check we don't patch if the value matches */
279 	patch_feature_section(flag, &fixup);
280 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
281 
282 	/* Check we do patch if the value doesn't match */
283 	patch_feature_section(0, &fixup);
284 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
285 
286 	/* Check we do patch if the mask doesn't match */
287 	memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
288 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
289 	patch_feature_section(~flag, &fixup);
290 	check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
291 }
292 
test_alternative_case_with_branch(void)293 static void test_alternative_case_with_branch(void)
294 {
295 	extern unsigned int ftr_fixup_test5;
296 	extern unsigned int end_ftr_fixup_test5;
297 	extern unsigned int ftr_fixup_test5_expected;
298 	int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
299 
300 	check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
301 }
302 
test_alternative_case_with_external_branch(void)303 static void test_alternative_case_with_external_branch(void)
304 {
305 	extern unsigned int ftr_fixup_test6;
306 	extern unsigned int end_ftr_fixup_test6;
307 	extern unsigned int ftr_fixup_test6_expected;
308 	int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
309 
310 	check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
311 }
312 
test_cpu_macros(void)313 static void test_cpu_macros(void)
314 {
315 	extern u8 ftr_fixup_test_FTR_macros;
316 	extern u8 ftr_fixup_test_FTR_macros_expected;
317 	unsigned long size = &ftr_fixup_test_FTR_macros_expected -
318 			     &ftr_fixup_test_FTR_macros;
319 
320 	/* The fixups have already been done for us during boot */
321 	check(memcmp(&ftr_fixup_test_FTR_macros,
322 		     &ftr_fixup_test_FTR_macros_expected, size) == 0);
323 }
324 
test_fw_macros(void)325 static void test_fw_macros(void)
326 {
327 #ifdef CONFIG_PPC64
328 	extern u8 ftr_fixup_test_FW_FTR_macros;
329 	extern u8 ftr_fixup_test_FW_FTR_macros_expected;
330 	unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
331 			     &ftr_fixup_test_FW_FTR_macros;
332 
333 	/* The fixups have already been done for us during boot */
334 	check(memcmp(&ftr_fixup_test_FW_FTR_macros,
335 		     &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
336 #endif
337 }
338 
test_lwsync_macros(void)339 static void test_lwsync_macros(void)
340 {
341 	extern u8 lwsync_fixup_test;
342 	extern u8 end_lwsync_fixup_test;
343 	extern u8 lwsync_fixup_test_expected_LWSYNC;
344 	extern u8 lwsync_fixup_test_expected_SYNC;
345 	unsigned long size = &end_lwsync_fixup_test -
346 			     &lwsync_fixup_test;
347 
348 	/* The fixups have already been done for us during boot */
349 	if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
350 		check(memcmp(&lwsync_fixup_test,
351 			     &lwsync_fixup_test_expected_LWSYNC, size) == 0);
352 	} else {
353 		check(memcmp(&lwsync_fixup_test,
354 			     &lwsync_fixup_test_expected_SYNC, size) == 0);
355 	}
356 }
357 
test_feature_fixups(void)358 static int __init test_feature_fixups(void)
359 {
360 	printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
361 
362 	test_basic_patching();
363 	test_alternative_patching();
364 	test_alternative_case_too_big();
365 	test_alternative_case_too_small();
366 	test_alternative_case_with_branch();
367 	test_alternative_case_with_external_branch();
368 	test_cpu_macros();
369 	test_fw_macros();
370 	test_lwsync_macros();
371 
372 	return 0;
373 }
374 late_initcall(test_feature_fixups);
375 
376 #endif /* CONFIG_FTR_FIXUP_SELFTEST */
377