• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Handle unaligned accesses by emulation.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Copyright (C) 2014 Imagination Technologies Ltd.
11  *
12  * This file contains exception handler for address error exception with the
13  * special capability to execute faulting instructions in software.  The
14  * handler does not try to handle the case when the program counter points
15  * to an address not aligned to a word boundary.
16  *
17  * Putting data to unaligned addresses is a bad practice even on Intel where
18  * only the performance is affected.  Much worse is that such code is non-
19  * portable.  Due to several programs that die on MIPS due to alignment
20  * problems I decided to implement this handler anyway though I originally
21  * didn't intend to do this at all for user code.
22  *
23  * For now I enable fixing of address errors by default to make life easier.
24  * I however intend to disable this somewhen in the future when the alignment
25  * problems with user programs have been fixed.	 For programmers this is the
26  * right way to go.
27  *
28  * Fixing address errors is a per process option.  The option is inherited
29  * across fork(2) and execve(2) calls.	If you really want to use the
30  * option in your user programs - I discourage the use of the software
31  * emulation strongly - use the following code in your userland stuff:
32  *
33  * #include <sys/sysmips.h>
34  *
35  * ...
36  * sysmips(MIPS_FIXADE, x);
37  * ...
38  *
39  * The argument x is 0 for disabling software emulation, enabled otherwise.
40  *
41  * Below a little program to play around with this feature.
42  *
43  * #include <stdio.h>
44  * #include <sys/sysmips.h>
45  *
46  * struct foo {
47  *	   unsigned char bar[8];
48  * };
49  *
50  * main(int argc, char *argv[])
51  * {
52  *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53  *	   unsigned int *p = (unsigned int *) (x.bar + 3);
54  *	   int i;
55  *
56  *	   if (argc > 1)
57  *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
58  *
59  *	   printf("*p = %08lx\n", *p);
60  *
61  *	   *p = 0xdeadface;
62  *
63  *	   for(i = 0; i <= 7; i++)
64  *	   printf("%02x ", x.bar[i]);
65  *	   printf("\n");
66  * }
67  *
68  * Coprocessor loads are not supported; I think this case is unimportant
69  * in the practice.
70  *
71  * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72  *	 exception for the R6000.
73  *	 A store crossing a page boundary might be executed only partially.
74  *	 Undo the partial store in this case.
75  */
76 #include <linux/context_tracking.h>
77 #include <linux/mm.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
83 
84 #include <asm/asm.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
87 #include <asm/cop2.h>
88 #include <asm/fpu.h>
89 #include <asm/fpu_emulator.h>
90 #include <asm/inst.h>
91 #include <asm/uaccess.h>
92 
93 #define STR(x)	__STR(x)
94 #define __STR(x)  #x
95 
96 enum {
97 	UNALIGNED_ACTION_QUIET,
98 	UNALIGNED_ACTION_SIGNAL,
99 	UNALIGNED_ACTION_SHOW,
100 };
101 #ifdef CONFIG_DEBUG_FS
102 static u32 unaligned_instructions;
103 static u32 unaligned_action;
104 #else
105 #define unaligned_action UNALIGNED_ACTION_QUIET
106 #endif
107 extern void show_registers(struct pt_regs *regs);
108 
109 #ifdef __BIG_ENDIAN
110 #define     _LoadHW(addr, value, res, type)  \
111 do {                                                        \
112 		__asm__ __volatile__ (".set\tnoat\n"        \
113 			"1:\t"type##_lb("%0", "0(%2)")"\n"  \
114 			"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
115 			"sll\t%0, 0x8\n\t"                  \
116 			"or\t%0, $1\n\t"                    \
117 			"li\t%1, 0\n"                       \
118 			"3:\t.set\tat\n\t"                  \
119 			".insn\n\t"                         \
120 			".section\t.fixup,\"ax\"\n\t"       \
121 			"4:\tli\t%1, %3\n\t"                \
122 			"j\t3b\n\t"                         \
123 			".previous\n\t"                     \
124 			".section\t__ex_table,\"a\"\n\t"    \
125 			STR(PTR)"\t1b, 4b\n\t"              \
126 			STR(PTR)"\t2b, 4b\n\t"              \
127 			".previous"                         \
128 			: "=&r" (value), "=r" (res)         \
129 			: "r" (addr), "i" (-EFAULT));       \
130 } while(0)
131 
132 #ifndef CONFIG_CPU_MIPSR6
133 #define     _LoadW(addr, value, res, type)   \
134 do {                                                        \
135 		__asm__ __volatile__ (                      \
136 			"1:\t"type##_lwl("%0", "(%2)")"\n"   \
137 			"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
138 			"li\t%1, 0\n"                       \
139 			"3:\n\t"                            \
140 			".insn\n\t"                         \
141 			".section\t.fixup,\"ax\"\n\t"       \
142 			"4:\tli\t%1, %3\n\t"                \
143 			"j\t3b\n\t"                         \
144 			".previous\n\t"                     \
145 			".section\t__ex_table,\"a\"\n\t"    \
146 			STR(PTR)"\t1b, 4b\n\t"              \
147 			STR(PTR)"\t2b, 4b\n\t"              \
148 			".previous"                         \
149 			: "=&r" (value), "=r" (res)         \
150 			: "r" (addr), "i" (-EFAULT));       \
151 } while(0)
152 
153 #else
154 /* MIPSR6 has no lwl instruction */
155 #define     _LoadW(addr, value, res, type) \
156 do {                                                        \
157 		__asm__ __volatile__ (			    \
158 			".set\tpush\n"			    \
159 			".set\tnoat\n\t"		    \
160 			"1:"type##_lb("%0", "0(%2)")"\n\t"  \
161 			"2:"type##_lbu("$1", "1(%2)")"\n\t" \
162 			"sll\t%0, 0x8\n\t"		    \
163 			"or\t%0, $1\n\t"		    \
164 			"3:"type##_lbu("$1", "2(%2)")"\n\t" \
165 			"sll\t%0, 0x8\n\t"		    \
166 			"or\t%0, $1\n\t"		    \
167 			"4:"type##_lbu("$1", "3(%2)")"\n\t" \
168 			"sll\t%0, 0x8\n\t"		    \
169 			"or\t%0, $1\n\t"		    \
170 			"li\t%1, 0\n"			    \
171 			".set\tpop\n"			    \
172 			"10:\n\t"			    \
173 			".insn\n\t"			    \
174 			".section\t.fixup,\"ax\"\n\t"	    \
175 			"11:\tli\t%1, %3\n\t"		    \
176 			"j\t10b\n\t"			    \
177 			".previous\n\t"			    \
178 			".section\t__ex_table,\"a\"\n\t"    \
179 			STR(PTR)"\t1b, 11b\n\t"		    \
180 			STR(PTR)"\t2b, 11b\n\t"		    \
181 			STR(PTR)"\t3b, 11b\n\t"		    \
182 			STR(PTR)"\t4b, 11b\n\t"		    \
183 			".previous"			    \
184 			: "=&r" (value), "=r" (res)	    \
185 			: "r" (addr), "i" (-EFAULT));       \
186 } while(0)
187 
188 #endif /* CONFIG_CPU_MIPSR6 */
189 
190 #define     _LoadHWU(addr, value, res, type) \
191 do {                                                        \
192 		__asm__ __volatile__ (                      \
193 			".set\tnoat\n"                      \
194 			"1:\t"type##_lbu("%0", "0(%2)")"\n" \
195 			"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
196 			"sll\t%0, 0x8\n\t"                  \
197 			"or\t%0, $1\n\t"                    \
198 			"li\t%1, 0\n"                       \
199 			"3:\n\t"                            \
200 			".insn\n\t"                         \
201 			".set\tat\n\t"                      \
202 			".section\t.fixup,\"ax\"\n\t"       \
203 			"4:\tli\t%1, %3\n\t"                \
204 			"j\t3b\n\t"                         \
205 			".previous\n\t"                     \
206 			".section\t__ex_table,\"a\"\n\t"    \
207 			STR(PTR)"\t1b, 4b\n\t"              \
208 			STR(PTR)"\t2b, 4b\n\t"              \
209 			".previous"                         \
210 			: "=&r" (value), "=r" (res)         \
211 			: "r" (addr), "i" (-EFAULT));       \
212 } while(0)
213 
214 #ifndef CONFIG_CPU_MIPSR6
215 #define     _LoadWU(addr, value, res, type)  \
216 do {                                                        \
217 		__asm__ __volatile__ (                      \
218 			"1:\t"type##_lwl("%0", "(%2)")"\n"  \
219 			"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
220 			"dsll\t%0, %0, 32\n\t"              \
221 			"dsrl\t%0, %0, 32\n\t"              \
222 			"li\t%1, 0\n"                       \
223 			"3:\n\t"                            \
224 			".insn\n\t"                         \
225 			"\t.section\t.fixup,\"ax\"\n\t"     \
226 			"4:\tli\t%1, %3\n\t"                \
227 			"j\t3b\n\t"                         \
228 			".previous\n\t"                     \
229 			".section\t__ex_table,\"a\"\n\t"    \
230 			STR(PTR)"\t1b, 4b\n\t"              \
231 			STR(PTR)"\t2b, 4b\n\t"              \
232 			".previous"                         \
233 			: "=&r" (value), "=r" (res)         \
234 			: "r" (addr), "i" (-EFAULT));       \
235 } while(0)
236 
237 #define     _LoadDW(addr, value, res)  \
238 do {                                                        \
239 		__asm__ __volatile__ (                      \
240 			"1:\tldl\t%0, (%2)\n"               \
241 			"2:\tldr\t%0, 7(%2)\n\t"            \
242 			"li\t%1, 0\n"                       \
243 			"3:\n\t"                            \
244 			".insn\n\t"                         \
245 			"\t.section\t.fixup,\"ax\"\n\t"     \
246 			"4:\tli\t%1, %3\n\t"                \
247 			"j\t3b\n\t"                         \
248 			".previous\n\t"                     \
249 			".section\t__ex_table,\"a\"\n\t"    \
250 			STR(PTR)"\t1b, 4b\n\t"              \
251 			STR(PTR)"\t2b, 4b\n\t"              \
252 			".previous"                         \
253 			: "=&r" (value), "=r" (res)         \
254 			: "r" (addr), "i" (-EFAULT));       \
255 } while(0)
256 
257 #else
258 /* MIPSR6 has not lwl and ldl instructions */
259 #define	    _LoadWU(addr, value, res, type) \
260 do {                                                        \
261 		__asm__ __volatile__ (			    \
262 			".set\tpush\n\t"		    \
263 			".set\tnoat\n\t"		    \
264 			"1:"type##_lbu("%0", "0(%2)")"\n\t" \
265 			"2:"type##_lbu("$1", "1(%2)")"\n\t" \
266 			"sll\t%0, 0x8\n\t"		    \
267 			"or\t%0, $1\n\t"		    \
268 			"3:"type##_lbu("$1", "2(%2)")"\n\t" \
269 			"sll\t%0, 0x8\n\t"		    \
270 			"or\t%0, $1\n\t"		    \
271 			"4:"type##_lbu("$1", "3(%2)")"\n\t" \
272 			"sll\t%0, 0x8\n\t"		    \
273 			"or\t%0, $1\n\t"		    \
274 			"li\t%1, 0\n"			    \
275 			".set\tpop\n"			    \
276 			"10:\n\t"			    \
277 			".insn\n\t"			    \
278 			".section\t.fixup,\"ax\"\n\t"	    \
279 			"11:\tli\t%1, %3\n\t"		    \
280 			"j\t10b\n\t"			    \
281 			".previous\n\t"			    \
282 			".section\t__ex_table,\"a\"\n\t"    \
283 			STR(PTR)"\t1b, 11b\n\t"		    \
284 			STR(PTR)"\t2b, 11b\n\t"		    \
285 			STR(PTR)"\t3b, 11b\n\t"		    \
286 			STR(PTR)"\t4b, 11b\n\t"		    \
287 			".previous"			    \
288 			: "=&r" (value), "=r" (res)	    \
289 			: "r" (addr), "i" (-EFAULT));       \
290 } while(0)
291 
292 #define     _LoadDW(addr, value, res)  \
293 do {                                                        \
294 		__asm__ __volatile__ (			    \
295 			".set\tpush\n\t"		    \
296 			".set\tnoat\n\t"		    \
297 			"1:lb\t%0, 0(%2)\n\t"    	    \
298 			"2:lbu\t $1, 1(%2)\n\t"   	    \
299 			"dsll\t%0, 0x8\n\t"		    \
300 			"or\t%0, $1\n\t"		    \
301 			"3:lbu\t$1, 2(%2)\n\t"   	    \
302 			"dsll\t%0, 0x8\n\t"		    \
303 			"or\t%0, $1\n\t"		    \
304 			"4:lbu\t$1, 3(%2)\n\t"   	    \
305 			"dsll\t%0, 0x8\n\t"		    \
306 			"or\t%0, $1\n\t"		    \
307 			"5:lbu\t$1, 4(%2)\n\t"   	    \
308 			"dsll\t%0, 0x8\n\t"		    \
309 			"or\t%0, $1\n\t"		    \
310 			"6:lbu\t$1, 5(%2)\n\t"   	    \
311 			"dsll\t%0, 0x8\n\t"		    \
312 			"or\t%0, $1\n\t"		    \
313 			"7:lbu\t$1, 6(%2)\n\t"   	    \
314 			"dsll\t%0, 0x8\n\t"		    \
315 			"or\t%0, $1\n\t"		    \
316 			"8:lbu\t$1, 7(%2)\n\t"   	    \
317 			"dsll\t%0, 0x8\n\t"		    \
318 			"or\t%0, $1\n\t"		    \
319 			"li\t%1, 0\n"			    \
320 			".set\tpop\n\t"			    \
321 			"10:\n\t"			    \
322 			".insn\n\t"			    \
323 			".section\t.fixup,\"ax\"\n\t"	    \
324 			"11:\tli\t%1, %3\n\t"		    \
325 			"j\t10b\n\t"			    \
326 			".previous\n\t"			    \
327 			".section\t__ex_table,\"a\"\n\t"    \
328 			STR(PTR)"\t1b, 11b\n\t"		    \
329 			STR(PTR)"\t2b, 11b\n\t"		    \
330 			STR(PTR)"\t3b, 11b\n\t"		    \
331 			STR(PTR)"\t4b, 11b\n\t"		    \
332 			STR(PTR)"\t5b, 11b\n\t"		    \
333 			STR(PTR)"\t6b, 11b\n\t"		    \
334 			STR(PTR)"\t7b, 11b\n\t"		    \
335 			STR(PTR)"\t8b, 11b\n\t"		    \
336 			".previous"			    \
337 			: "=&r" (value), "=r" (res)	    \
338 			: "r" (addr), "i" (-EFAULT));       \
339 } while(0)
340 
341 #endif /* CONFIG_CPU_MIPSR6 */
342 
343 
344 #define     _StoreHW(addr, value, res, type) \
345 do {                                                        \
346 		__asm__ __volatile__ (                      \
347 			".set\tnoat\n"                      \
348 			"1:\t"type##_sb("%1", "1(%2)")"\n"  \
349 			"srl\t$1, %1, 0x8\n"                \
350 			"2:\t"type##_sb("$1", "0(%2)")"\n"  \
351 			".set\tat\n\t"                      \
352 			"li\t%0, 0\n"                       \
353 			"3:\n\t"                            \
354 			".insn\n\t"                         \
355 			".section\t.fixup,\"ax\"\n\t"       \
356 			"4:\tli\t%0, %3\n\t"                \
357 			"j\t3b\n\t"                         \
358 			".previous\n\t"                     \
359 			".section\t__ex_table,\"a\"\n\t"    \
360 			STR(PTR)"\t1b, 4b\n\t"              \
361 			STR(PTR)"\t2b, 4b\n\t"              \
362 			".previous"                         \
363 			: "=r" (res)                        \
364 			: "r" (value), "r" (addr), "i" (-EFAULT));\
365 } while(0)
366 
367 #ifndef CONFIG_CPU_MIPSR6
368 #define     _StoreW(addr, value, res, type)  \
369 do {                                                        \
370 		__asm__ __volatile__ (                      \
371 			"1:\t"type##_swl("%1", "(%2)")"\n"  \
372 			"2:\t"type##_swr("%1", "3(%2)")"\n\t"\
373 			"li\t%0, 0\n"                       \
374 			"3:\n\t"                            \
375 			".insn\n\t"                         \
376 			".section\t.fixup,\"ax\"\n\t"       \
377 			"4:\tli\t%0, %3\n\t"                \
378 			"j\t3b\n\t"                         \
379 			".previous\n\t"                     \
380 			".section\t__ex_table,\"a\"\n\t"    \
381 			STR(PTR)"\t1b, 4b\n\t"              \
382 			STR(PTR)"\t2b, 4b\n\t"              \
383 			".previous"                         \
384 		: "=r" (res)                                \
385 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
386 } while(0)
387 
388 #define     _StoreDW(addr, value, res) \
389 do {                                                        \
390 		__asm__ __volatile__ (                      \
391 			"1:\tsdl\t%1,(%2)\n"                \
392 			"2:\tsdr\t%1, 7(%2)\n\t"            \
393 			"li\t%0, 0\n"                       \
394 			"3:\n\t"                            \
395 			".insn\n\t"                         \
396 			".section\t.fixup,\"ax\"\n\t"       \
397 			"4:\tli\t%0, %3\n\t"                \
398 			"j\t3b\n\t"                         \
399 			".previous\n\t"                     \
400 			".section\t__ex_table,\"a\"\n\t"    \
401 			STR(PTR)"\t1b, 4b\n\t"              \
402 			STR(PTR)"\t2b, 4b\n\t"              \
403 			".previous"                         \
404 		: "=r" (res)                                \
405 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
406 } while(0)
407 
408 #else
409 /* MIPSR6 has no swl and sdl instructions */
410 #define     _StoreW(addr, value, res, type)  \
411 do {                                                        \
412 		__asm__ __volatile__ (                      \
413 			".set\tpush\n\t"		    \
414 			".set\tnoat\n\t"		    \
415 			"1:"type##_sb("%1", "3(%2)")"\n\t"  \
416 			"srl\t$1, %1, 0x8\n\t"		    \
417 			"2:"type##_sb("$1", "2(%2)")"\n\t"  \
418 			"srl\t$1, $1,  0x8\n\t"		    \
419 			"3:"type##_sb("$1", "1(%2)")"\n\t"  \
420 			"srl\t$1, $1, 0x8\n\t"		    \
421 			"4:"type##_sb("$1", "0(%2)")"\n\t"  \
422 			".set\tpop\n\t"			    \
423 			"li\t%0, 0\n"			    \
424 			"10:\n\t"			    \
425 			".insn\n\t"			    \
426 			".section\t.fixup,\"ax\"\n\t"	    \
427 			"11:\tli\t%0, %3\n\t"		    \
428 			"j\t10b\n\t"			    \
429 			".previous\n\t"			    \
430 			".section\t__ex_table,\"a\"\n\t"    \
431 			STR(PTR)"\t1b, 11b\n\t"		    \
432 			STR(PTR)"\t2b, 11b\n\t"		    \
433 			STR(PTR)"\t3b, 11b\n\t"		    \
434 			STR(PTR)"\t4b, 11b\n\t"		    \
435 			".previous"			    \
436 		: "=&r" (res)			    	    \
437 		: "r" (value), "r" (addr), "i" (-EFAULT)    \
438 		: "memory");                                \
439 } while(0)
440 
441 #define     _StoreDW(addr, value, res) \
442 do {                                                        \
443 		__asm__ __volatile__ (                      \
444 			".set\tpush\n\t"		    \
445 			".set\tnoat\n\t"		    \
446 			"1:sb\t%1, 7(%2)\n\t"    	    \
447 			"dsrl\t$1, %1, 0x8\n\t"		    \
448 			"2:sb\t$1, 6(%2)\n\t"    	    \
449 			"dsrl\t$1, $1, 0x8\n\t"		    \
450 			"3:sb\t$1, 5(%2)\n\t"    	    \
451 			"dsrl\t$1, $1, 0x8\n\t"		    \
452 			"4:sb\t$1, 4(%2)\n\t"    	    \
453 			"dsrl\t$1, $1, 0x8\n\t"		    \
454 			"5:sb\t$1, 3(%2)\n\t"    	    \
455 			"dsrl\t$1, $1, 0x8\n\t"		    \
456 			"6:sb\t$1, 2(%2)\n\t"    	    \
457 			"dsrl\t$1, $1, 0x8\n\t"		    \
458 			"7:sb\t$1, 1(%2)\n\t"    	    \
459 			"dsrl\t$1, $1, 0x8\n\t"		    \
460 			"8:sb\t$1, 0(%2)\n\t"    	    \
461 			"dsrl\t$1, $1, 0x8\n\t"		    \
462 			".set\tpop\n\t"			    \
463 			"li\t%0, 0\n"			    \
464 			"10:\n\t"			    \
465 			".insn\n\t"			    \
466 			".section\t.fixup,\"ax\"\n\t"	    \
467 			"11:\tli\t%0, %3\n\t"		    \
468 			"j\t10b\n\t"			    \
469 			".previous\n\t"			    \
470 			".section\t__ex_table,\"a\"\n\t"    \
471 			STR(PTR)"\t1b, 11b\n\t"		    \
472 			STR(PTR)"\t2b, 11b\n\t"		    \
473 			STR(PTR)"\t3b, 11b\n\t"		    \
474 			STR(PTR)"\t4b, 11b\n\t"		    \
475 			STR(PTR)"\t5b, 11b\n\t"		    \
476 			STR(PTR)"\t6b, 11b\n\t"		    \
477 			STR(PTR)"\t7b, 11b\n\t"		    \
478 			STR(PTR)"\t8b, 11b\n\t"		    \
479 			".previous"			    \
480 		: "=&r" (res)			    	    \
481 		: "r" (value), "r" (addr), "i" (-EFAULT)    \
482 		: "memory");                                \
483 } while(0)
484 
485 #endif /* CONFIG_CPU_MIPSR6 */
486 
487 #else /* __BIG_ENDIAN */
488 
489 #define     _LoadHW(addr, value, res, type)  \
490 do {                                                        \
491 		__asm__ __volatile__ (".set\tnoat\n"        \
492 			"1:\t"type##_lb("%0", "1(%2)")"\n"  \
493 			"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
494 			"sll\t%0, 0x8\n\t"                  \
495 			"or\t%0, $1\n\t"                    \
496 			"li\t%1, 0\n"                       \
497 			"3:\t.set\tat\n\t"                  \
498 			".insn\n\t"                         \
499 			".section\t.fixup,\"ax\"\n\t"       \
500 			"4:\tli\t%1, %3\n\t"                \
501 			"j\t3b\n\t"                         \
502 			".previous\n\t"                     \
503 			".section\t__ex_table,\"a\"\n\t"    \
504 			STR(PTR)"\t1b, 4b\n\t"              \
505 			STR(PTR)"\t2b, 4b\n\t"              \
506 			".previous"                         \
507 			: "=&r" (value), "=r" (res)         \
508 			: "r" (addr), "i" (-EFAULT));       \
509 } while(0)
510 
511 #ifndef CONFIG_CPU_MIPSR6
512 #define     _LoadW(addr, value, res, type)   \
513 do {                                                        \
514 		__asm__ __volatile__ (                      \
515 			"1:\t"type##_lwl("%0", "3(%2)")"\n" \
516 			"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
517 			"li\t%1, 0\n"                       \
518 			"3:\n\t"                            \
519 			".insn\n\t"                         \
520 			".section\t.fixup,\"ax\"\n\t"       \
521 			"4:\tli\t%1, %3\n\t"                \
522 			"j\t3b\n\t"                         \
523 			".previous\n\t"                     \
524 			".section\t__ex_table,\"a\"\n\t"    \
525 			STR(PTR)"\t1b, 4b\n\t"              \
526 			STR(PTR)"\t2b, 4b\n\t"              \
527 			".previous"                         \
528 			: "=&r" (value), "=r" (res)         \
529 			: "r" (addr), "i" (-EFAULT));       \
530 } while(0)
531 
532 #else
533 /* MIPSR6 has no lwl instruction */
534 #define     _LoadW(addr, value, res, type) \
535 do {                                                        \
536 		__asm__ __volatile__ (			    \
537 			".set\tpush\n"			    \
538 			".set\tnoat\n\t"		    \
539 			"1:"type##_lb("%0", "3(%2)")"\n\t"  \
540 			"2:"type##_lbu("$1", "2(%2)")"\n\t" \
541 			"sll\t%0, 0x8\n\t"		    \
542 			"or\t%0, $1\n\t"		    \
543 			"3:"type##_lbu("$1", "1(%2)")"\n\t" \
544 			"sll\t%0, 0x8\n\t"		    \
545 			"or\t%0, $1\n\t"		    \
546 			"4:"type##_lbu("$1", "0(%2)")"\n\t" \
547 			"sll\t%0, 0x8\n\t"		    \
548 			"or\t%0, $1\n\t"		    \
549 			"li\t%1, 0\n"			    \
550 			".set\tpop\n"			    \
551 			"10:\n\t"			    \
552 			".insn\n\t"			    \
553 			".section\t.fixup,\"ax\"\n\t"	    \
554 			"11:\tli\t%1, %3\n\t"		    \
555 			"j\t10b\n\t"			    \
556 			".previous\n\t"			    \
557 			".section\t__ex_table,\"a\"\n\t"    \
558 			STR(PTR)"\t1b, 11b\n\t"		    \
559 			STR(PTR)"\t2b, 11b\n\t"		    \
560 			STR(PTR)"\t3b, 11b\n\t"		    \
561 			STR(PTR)"\t4b, 11b\n\t"		    \
562 			".previous"			    \
563 			: "=&r" (value), "=r" (res)	    \
564 			: "r" (addr), "i" (-EFAULT));       \
565 } while(0)
566 
567 #endif /* CONFIG_CPU_MIPSR6 */
568 
569 
570 #define     _LoadHWU(addr, value, res, type) \
571 do {                                                        \
572 		__asm__ __volatile__ (                      \
573 			".set\tnoat\n"                      \
574 			"1:\t"type##_lbu("%0", "1(%2)")"\n" \
575 			"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
576 			"sll\t%0, 0x8\n\t"                  \
577 			"or\t%0, $1\n\t"                    \
578 			"li\t%1, 0\n"                       \
579 			"3:\n\t"                            \
580 			".insn\n\t"                         \
581 			".set\tat\n\t"                      \
582 			".section\t.fixup,\"ax\"\n\t"       \
583 			"4:\tli\t%1, %3\n\t"                \
584 			"j\t3b\n\t"                         \
585 			".previous\n\t"                     \
586 			".section\t__ex_table,\"a\"\n\t"    \
587 			STR(PTR)"\t1b, 4b\n\t"              \
588 			STR(PTR)"\t2b, 4b\n\t"              \
589 			".previous"                         \
590 			: "=&r" (value), "=r" (res)         \
591 			: "r" (addr), "i" (-EFAULT));       \
592 } while(0)
593 
594 #ifndef CONFIG_CPU_MIPSR6
595 #define     _LoadWU(addr, value, res, type)  \
596 do {                                                        \
597 		__asm__ __volatile__ (                      \
598 			"1:\t"type##_lwl("%0", "3(%2)")"\n" \
599 			"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
600 			"dsll\t%0, %0, 32\n\t"              \
601 			"dsrl\t%0, %0, 32\n\t"              \
602 			"li\t%1, 0\n"                       \
603 			"3:\n\t"                            \
604 			".insn\n\t"                         \
605 			"\t.section\t.fixup,\"ax\"\n\t"     \
606 			"4:\tli\t%1, %3\n\t"                \
607 			"j\t3b\n\t"                         \
608 			".previous\n\t"                     \
609 			".section\t__ex_table,\"a\"\n\t"    \
610 			STR(PTR)"\t1b, 4b\n\t"              \
611 			STR(PTR)"\t2b, 4b\n\t"              \
612 			".previous"                         \
613 			: "=&r" (value), "=r" (res)         \
614 			: "r" (addr), "i" (-EFAULT));       \
615 } while(0)
616 
617 #define     _LoadDW(addr, value, res)  \
618 do {                                                        \
619 		__asm__ __volatile__ (                      \
620 			"1:\tldl\t%0, 7(%2)\n"              \
621 			"2:\tldr\t%0, (%2)\n\t"             \
622 			"li\t%1, 0\n"                       \
623 			"3:\n\t"                            \
624 			".insn\n\t"                         \
625 			"\t.section\t.fixup,\"ax\"\n\t"     \
626 			"4:\tli\t%1, %3\n\t"                \
627 			"j\t3b\n\t"                         \
628 			".previous\n\t"                     \
629 			".section\t__ex_table,\"a\"\n\t"    \
630 			STR(PTR)"\t1b, 4b\n\t"              \
631 			STR(PTR)"\t2b, 4b\n\t"              \
632 			".previous"                         \
633 			: "=&r" (value), "=r" (res)         \
634 			: "r" (addr), "i" (-EFAULT));       \
635 } while(0)
636 
637 #else
638 /* MIPSR6 has not lwl and ldl instructions */
639 #define	    _LoadWU(addr, value, res, type) \
640 do {                                                        \
641 		__asm__ __volatile__ (			    \
642 			".set\tpush\n\t"		    \
643 			".set\tnoat\n\t"		    \
644 			"1:"type##_lbu("%0", "3(%2)")"\n\t" \
645 			"2:"type##_lbu("$1", "2(%2)")"\n\t" \
646 			"sll\t%0, 0x8\n\t"		    \
647 			"or\t%0, $1\n\t"		    \
648 			"3:"type##_lbu("$1", "1(%2)")"\n\t" \
649 			"sll\t%0, 0x8\n\t"		    \
650 			"or\t%0, $1\n\t"		    \
651 			"4:"type##_lbu("$1", "0(%2)")"\n\t" \
652 			"sll\t%0, 0x8\n\t"		    \
653 			"or\t%0, $1\n\t"		    \
654 			"li\t%1, 0\n"			    \
655 			".set\tpop\n"			    \
656 			"10:\n\t"			    \
657 			".insn\n\t"			    \
658 			".section\t.fixup,\"ax\"\n\t"	    \
659 			"11:\tli\t%1, %3\n\t"		    \
660 			"j\t10b\n\t"			    \
661 			".previous\n\t"			    \
662 			".section\t__ex_table,\"a\"\n\t"    \
663 			STR(PTR)"\t1b, 11b\n\t"		    \
664 			STR(PTR)"\t2b, 11b\n\t"		    \
665 			STR(PTR)"\t3b, 11b\n\t"		    \
666 			STR(PTR)"\t4b, 11b\n\t"		    \
667 			".previous"			    \
668 			: "=&r" (value), "=r" (res)	    \
669 			: "r" (addr), "i" (-EFAULT));       \
670 } while(0)
671 
672 #define     _LoadDW(addr, value, res)  \
673 do {                                                        \
674 		__asm__ __volatile__ (			    \
675 			".set\tpush\n\t"		    \
676 			".set\tnoat\n\t"		    \
677 			"1:lb\t%0, 7(%2)\n\t"    	    \
678 			"2:lbu\t$1, 6(%2)\n\t"   	    \
679 			"dsll\t%0, 0x8\n\t"		    \
680 			"or\t%0, $1\n\t"		    \
681 			"3:lbu\t$1, 5(%2)\n\t"   	    \
682 			"dsll\t%0, 0x8\n\t"		    \
683 			"or\t%0, $1\n\t"		    \
684 			"4:lbu\t$1, 4(%2)\n\t"   	    \
685 			"dsll\t%0, 0x8\n\t"		    \
686 			"or\t%0, $1\n\t"		    \
687 			"5:lbu\t$1, 3(%2)\n\t"   	    \
688 			"dsll\t%0, 0x8\n\t"		    \
689 			"or\t%0, $1\n\t"		    \
690 			"6:lbu\t$1, 2(%2)\n\t"   	    \
691 			"dsll\t%0, 0x8\n\t"		    \
692 			"or\t%0, $1\n\t"		    \
693 			"7:lbu\t$1, 1(%2)\n\t"   	    \
694 			"dsll\t%0, 0x8\n\t"		    \
695 			"or\t%0, $1\n\t"		    \
696 			"8:lbu\t$1, 0(%2)\n\t"   	    \
697 			"dsll\t%0, 0x8\n\t"		    \
698 			"or\t%0, $1\n\t"		    \
699 			"li\t%1, 0\n"			    \
700 			".set\tpop\n\t"			    \
701 			"10:\n\t"			    \
702 			".insn\n\t"			    \
703 			".section\t.fixup,\"ax\"\n\t"	    \
704 			"11:\tli\t%1, %3\n\t"		    \
705 			"j\t10b\n\t"			    \
706 			".previous\n\t"			    \
707 			".section\t__ex_table,\"a\"\n\t"    \
708 			STR(PTR)"\t1b, 11b\n\t"		    \
709 			STR(PTR)"\t2b, 11b\n\t"		    \
710 			STR(PTR)"\t3b, 11b\n\t"		    \
711 			STR(PTR)"\t4b, 11b\n\t"		    \
712 			STR(PTR)"\t5b, 11b\n\t"		    \
713 			STR(PTR)"\t6b, 11b\n\t"		    \
714 			STR(PTR)"\t7b, 11b\n\t"		    \
715 			STR(PTR)"\t8b, 11b\n\t"		    \
716 			".previous"			    \
717 			: "=&r" (value), "=r" (res)	    \
718 			: "r" (addr), "i" (-EFAULT));       \
719 } while(0)
720 #endif /* CONFIG_CPU_MIPSR6 */
721 
722 #define     _StoreHW(addr, value, res, type) \
723 do {                                                        \
724 		__asm__ __volatile__ (                      \
725 			".set\tnoat\n"                      \
726 			"1:\t"type##_sb("%1", "0(%2)")"\n"  \
727 			"srl\t$1,%1, 0x8\n"                 \
728 			"2:\t"type##_sb("$1", "1(%2)")"\n"  \
729 			".set\tat\n\t"                      \
730 			"li\t%0, 0\n"                       \
731 			"3:\n\t"                            \
732 			".insn\n\t"                         \
733 			".section\t.fixup,\"ax\"\n\t"       \
734 			"4:\tli\t%0, %3\n\t"                \
735 			"j\t3b\n\t"                         \
736 			".previous\n\t"                     \
737 			".section\t__ex_table,\"a\"\n\t"    \
738 			STR(PTR)"\t1b, 4b\n\t"              \
739 			STR(PTR)"\t2b, 4b\n\t"              \
740 			".previous"                         \
741 			: "=r" (res)                        \
742 			: "r" (value), "r" (addr), "i" (-EFAULT));\
743 } while(0)
744 
745 #ifndef CONFIG_CPU_MIPSR6
746 #define     _StoreW(addr, value, res, type)  \
747 do {                                                        \
748 		__asm__ __volatile__ (                      \
749 			"1:\t"type##_swl("%1", "3(%2)")"\n" \
750 			"2:\t"type##_swr("%1", "(%2)")"\n\t"\
751 			"li\t%0, 0\n"                       \
752 			"3:\n\t"                            \
753 			".insn\n\t"                         \
754 			".section\t.fixup,\"ax\"\n\t"       \
755 			"4:\tli\t%0, %3\n\t"                \
756 			"j\t3b\n\t"                         \
757 			".previous\n\t"                     \
758 			".section\t__ex_table,\"a\"\n\t"    \
759 			STR(PTR)"\t1b, 4b\n\t"              \
760 			STR(PTR)"\t2b, 4b\n\t"              \
761 			".previous"                         \
762 		: "=r" (res)                                \
763 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
764 } while(0)
765 
766 #define     _StoreDW(addr, value, res) \
767 do {                                                        \
768 		__asm__ __volatile__ (                      \
769 			"1:\tsdl\t%1, 7(%2)\n"              \
770 			"2:\tsdr\t%1, (%2)\n\t"             \
771 			"li\t%0, 0\n"                       \
772 			"3:\n\t"                            \
773 			".insn\n\t"                         \
774 			".section\t.fixup,\"ax\"\n\t"       \
775 			"4:\tli\t%0, %3\n\t"                \
776 			"j\t3b\n\t"                         \
777 			".previous\n\t"                     \
778 			".section\t__ex_table,\"a\"\n\t"    \
779 			STR(PTR)"\t1b, 4b\n\t"              \
780 			STR(PTR)"\t2b, 4b\n\t"              \
781 			".previous"                         \
782 		: "=r" (res)                                \
783 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
784 } while(0)
785 
786 #else
787 /* MIPSR6 has no swl and sdl instructions */
788 #define     _StoreW(addr, value, res, type)  \
789 do {                                                        \
790 		__asm__ __volatile__ (                      \
791 			".set\tpush\n\t"		    \
792 			".set\tnoat\n\t"		    \
793 			"1:"type##_sb("%1", "0(%2)")"\n\t"  \
794 			"srl\t$1, %1, 0x8\n\t"		    \
795 			"2:"type##_sb("$1", "1(%2)")"\n\t"  \
796 			"srl\t$1, $1,  0x8\n\t"		    \
797 			"3:"type##_sb("$1", "2(%2)")"\n\t"  \
798 			"srl\t$1, $1, 0x8\n\t"		    \
799 			"4:"type##_sb("$1", "3(%2)")"\n\t"  \
800 			".set\tpop\n\t"			    \
801 			"li\t%0, 0\n"			    \
802 			"10:\n\t"			    \
803 			".insn\n\t"			    \
804 			".section\t.fixup,\"ax\"\n\t"	    \
805 			"11:\tli\t%0, %3\n\t"		    \
806 			"j\t10b\n\t"			    \
807 			".previous\n\t"			    \
808 			".section\t__ex_table,\"a\"\n\t"    \
809 			STR(PTR)"\t1b, 11b\n\t"		    \
810 			STR(PTR)"\t2b, 11b\n\t"		    \
811 			STR(PTR)"\t3b, 11b\n\t"		    \
812 			STR(PTR)"\t4b, 11b\n\t"		    \
813 			".previous"			    \
814 		: "=&r" (res)			    	    \
815 		: "r" (value), "r" (addr), "i" (-EFAULT)    \
816 		: "memory");                                \
817 } while(0)
818 
819 #define     _StoreDW(addr, value, res) \
820 do {                                                        \
821 		__asm__ __volatile__ (                      \
822 			".set\tpush\n\t"		    \
823 			".set\tnoat\n\t"		    \
824 			"1:sb\t%1, 0(%2)\n\t"    	    \
825 			"dsrl\t$1, %1, 0x8\n\t"		    \
826 			"2:sb\t$1, 1(%2)\n\t"    	    \
827 			"dsrl\t$1, $1, 0x8\n\t"		    \
828 			"3:sb\t$1, 2(%2)\n\t"    	    \
829 			"dsrl\t$1, $1, 0x8\n\t"		    \
830 			"4:sb\t$1, 3(%2)\n\t"    	    \
831 			"dsrl\t$1, $1, 0x8\n\t"		    \
832 			"5:sb\t$1, 4(%2)\n\t"    	    \
833 			"dsrl\t$1, $1, 0x8\n\t"		    \
834 			"6:sb\t$1, 5(%2)\n\t"    	    \
835 			"dsrl\t$1, $1, 0x8\n\t"		    \
836 			"7:sb\t$1, 6(%2)\n\t"    	    \
837 			"dsrl\t$1, $1, 0x8\n\t"		    \
838 			"8:sb\t$1, 7(%2)\n\t"    	    \
839 			"dsrl\t$1, $1, 0x8\n\t"		    \
840 			".set\tpop\n\t"			    \
841 			"li\t%0, 0\n"			    \
842 			"10:\n\t"			    \
843 			".insn\n\t"			    \
844 			".section\t.fixup,\"ax\"\n\t"	    \
845 			"11:\tli\t%0, %3\n\t"		    \
846 			"j\t10b\n\t"			    \
847 			".previous\n\t"			    \
848 			".section\t__ex_table,\"a\"\n\t"    \
849 			STR(PTR)"\t1b, 11b\n\t"		    \
850 			STR(PTR)"\t2b, 11b\n\t"		    \
851 			STR(PTR)"\t3b, 11b\n\t"		    \
852 			STR(PTR)"\t4b, 11b\n\t"		    \
853 			STR(PTR)"\t5b, 11b\n\t"		    \
854 			STR(PTR)"\t6b, 11b\n\t"		    \
855 			STR(PTR)"\t7b, 11b\n\t"		    \
856 			STR(PTR)"\t8b, 11b\n\t"		    \
857 			".previous"			    \
858 		: "=&r" (res)			    	    \
859 		: "r" (value), "r" (addr), "i" (-EFAULT)    \
860 		: "memory");                                \
861 } while(0)
862 
863 #endif /* CONFIG_CPU_MIPSR6 */
864 #endif
865 
866 #ifdef CONFIG_64BIT
dmtc1(unsigned long val,unsigned reg)867 static inline void dmtc1(unsigned long val, unsigned reg)
868 {
869 	switch (reg) {
870 	case 0: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$0\n.set pop"::"r"(val)); break;
871 	case 1: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$1\n.set pop"::"r"(val)); break;
872 	case 2: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$2\n.set pop"::"r"(val)); break;
873 	case 3: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$3\n.set pop"::"r"(val)); break;
874 	case 4: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$4\n.set pop"::"r"(val)); break;
875 	case 5: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$5\n.set pop"::"r"(val)); break;
876 	case 6: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$6\n.set pop"::"r"(val)); break;
877 	case 7: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$7\n.set pop"::"r"(val)); break;
878 	case 8: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$8\n.set pop"::"r"(val)); break;
879 	case 9: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$9\n.set pop"::"r"(val)); break;
880 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$10\n.set pop"::"r"(val)); break;
881 	case 11: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$11\n.set pop"::"r"(val)); break;
882 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$12\n.set pop"::"r"(val)); break;
883 	case 13: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$13\n.set pop"::"r"(val)); break;
884 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$14\n.set pop"::"r"(val)); break;
885 	case 15: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$15\n.set pop"::"r"(val)); break;
886 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$16\n.set pop"::"r"(val)); break;
887 	case 17: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$17\n.set pop"::"r"(val)); break;
888 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$18\n.set pop"::"r"(val)); break;
889 	case 19: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$19\n.set pop"::"r"(val)); break;
890 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$20\n.set pop"::"r"(val)); break;
891 	case 21: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$21\n.set pop"::"r"(val)); break;
892 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$22\n.set pop"::"r"(val)); break;
893 	case 23: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$23\n.set pop"::"r"(val)); break;
894 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$24\n.set pop"::"r"(val)); break;
895 	case 25: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$25\n.set pop"::"r"(val)); break;
896 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$26\n.set pop"::"r"(val)); break;
897 	case 27: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$27\n.set pop"::"r"(val)); break;
898 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$28\n.set pop"::"r"(val)); break;
899 	case 29: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$29\n.set pop"::"r"(val)); break;
900 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$30\n.set pop"::"r"(val)); break;
901 	case 31: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$31\n.set pop"::"r"(val)); break;
902 	}
903 }
904 
dmfc1(unsigned reg)905 static inline unsigned long dmfc1(unsigned reg)
906 {
907 	unsigned long uninitialized_var(val);
908 
909 	switch (reg) {
910 	case 0: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$0\n.set pop":"=r"(val)); break;
911 	case 1: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$1\n.set pop":"=r"(val)); break;
912 	case 2: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$2\n.set pop":"=r"(val)); break;
913 	case 3: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$3\n.set pop":"=r"(val)); break;
914 	case 4: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$4\n.set pop":"=r"(val)); break;
915 	case 5: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$5\n.set pop":"=r"(val)); break;
916 	case 6: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$6\n.set pop":"=r"(val)); break;
917 	case 7: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$7\n.set pop":"=r"(val)); break;
918 	case 8: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$8\n.set pop":"=r"(val)); break;
919 	case 9: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$9\n.set pop":"=r"(val)); break;
920 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$10\n.set pop":"=r"(val)); break;
921 	case 11: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$11\n.set pop":"=r"(val)); break;
922 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$12\n.set pop":"=r"(val)); break;
923 	case 13: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$13\n.set pop":"=r"(val)); break;
924 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$14\n.set pop":"=r"(val)); break;
925 	case 15: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$15\n.set pop":"=r"(val)); break;
926 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$16\n.set pop":"=r"(val)); break;
927 	case 17: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$17\n.set pop":"=r"(val)); break;
928 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$18\n.set pop":"=r"(val)); break;
929 	case 19: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$19\n.set pop":"=r"(val)); break;
930 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$20\n.set pop":"=r"(val)); break;
931 	case 21: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$21\n.set pop":"=r"(val)); break;
932 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$22\n.set pop":"=r"(val)); break;
933 	case 23: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$23\n.set pop":"=r"(val)); break;
934 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$24\n.set pop":"=r"(val)); break;
935 	case 25: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$25\n.set pop":"=r"(val)); break;
936 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$26\n.set pop":"=r"(val)); break;
937 	case 27: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$27\n.set pop":"=r"(val)); break;
938 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$28\n.set pop":"=r"(val)); break;
939 	case 29: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$29\n.set pop":"=r"(val)); break;
940 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$30\n.set pop":"=r"(val)); break;
941 	case 31: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$31\n.set pop":"=r"(val)); break;
942 	}
943 
944 	return val;
945 }
946 #else /* !CONFIG_64BIT */
947 
948 #if !defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_CPU_R4X00)
mtc1_mthc1(unsigned long val,unsigned long val2,unsigned reg)949 static inline void mtc1_mthc1(unsigned long val, unsigned long val2, unsigned reg)
950 {
951 	switch (reg) {
952 #ifdef __BIG_ENDIAN
953 	case 0:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$0\n\tmthc1\t%1,$0\n.set pop"::"r"(val2),"r"(val)); break;
954 	case 1:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$1\n\tmthc1\t%1,$1\n.set pop"::"r"(val2),"r"(val)); break;
955 	case 2:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$2\n\tmthc1\t%1,$2\n.set pop"::"r"(val2),"r"(val)); break;
956 	case 3:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$3\n\tmthc1\t%1,$3\n.set pop"::"r"(val2),"r"(val)); break;
957 	case 4:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$4\n\tmthc1\t%1,$4\n.set pop"::"r"(val2),"r"(val)); break;
958 	case 5:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$5\n\tmthc1\t%1,$5\n.set pop"::"r"(val2),"r"(val)); break;
959 	case 6:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$6\n\tmthc1\t%1,$6\n.set pop"::"r"(val2),"r"(val)); break;
960 	case 7:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$7\n\tmthc1\t%1,$7\n.set pop"::"r"(val2),"r"(val)); break;
961 	case 8:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$8\n\tmthc1\t%1,$8\n.set pop"::"r"(val2),"r"(val)); break;
962 	case 9:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$9\n\tmthc1\t%1,$9\n.set pop"::"r"(val2),"r"(val)); break;
963 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$10\n\tmthc1\t%1,$10\n.set pop"::"r"(val2),"r"(val)); break;
964 	case 11: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$11\n\tmthc1\t%1,$11\n.set pop"::"r"(val2),"r"(val)); break;
965 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$12\n\tmthc1\t%1,$12\n.set pop"::"r"(val2),"r"(val)); break;
966 	case 13: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$13\n\tmthc1\t%1,$13\n.set pop"::"r"(val2),"r"(val)); break;
967 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$14\n\tmthc1\t%1,$14\n.set pop"::"r"(val2),"r"(val)); break;
968 	case 15: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$15\n\tmthc1\t%1,$15\n.set pop"::"r"(val2),"r"(val)); break;
969 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$16\n\tmthc1\t%1,$16\n.set pop"::"r"(val2),"r"(val)); break;
970 	case 17: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$17\n\tmthc1\t%1,$17\n.set pop"::"r"(val2),"r"(val)); break;
971 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$18\n\tmthc1\t%1,$18\n.set pop"::"r"(val2),"r"(val)); break;
972 	case 19: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$19\n\tmthc1\t%1,$19\n.set pop"::"r"(val2),"r"(val)); break;
973 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$20\n\tmthc1\t%1,$20\n.set pop"::"r"(val2),"r"(val)); break;
974 	case 21: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$21\n\tmthc1\t%1,$21\n.set pop"::"r"(val2),"r"(val)); break;
975 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$22\n\tmthc1\t%1,$22\n.set pop"::"r"(val2),"r"(val)); break;
976 	case 23: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$23\n\tmthc1\t%1,$23\n.set pop"::"r"(val2),"r"(val)); break;
977 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$24\n\tmthc1\t%1,$24\n.set pop"::"r"(val2),"r"(val)); break;
978 	case 25: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$25\n\tmthc1\t%1,$25\n.set pop"::"r"(val2),"r"(val)); break;
979 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$26\n\tmthc1\t%1,$26\n.set pop"::"r"(val2),"r"(val)); break;
980 	case 27: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$27\n\tmthc1\t%1,$27\n.set pop"::"r"(val2),"r"(val)); break;
981 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$28\n\tmthc1\t%1,$28\n.set pop"::"r"(val2),"r"(val)); break;
982 	case 29: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$29\n\tmthc1\t%1,$29\n.set pop"::"r"(val2),"r"(val)); break;
983 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$30\n\tmthc1\t%1,$30\n.set pop"::"r"(val2),"r"(val)); break;
984 	case 31: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$31\n\tmthc1\t%1,$31\n.set pop"::"r"(val2),"r"(val)); break;
985 	}
986 #endif
987 #ifdef __LITTLE_ENDIAN
988 	case 0:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$0\n\tmthc1\t%1,$0\n.set pop"::"r"(val),"r"(val2)); break;
989 	case 1:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$1\n\tmthc1\t%1,$1\n.set pop"::"r"(val),"r"(val2)); break;
990 	case 2:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$2\n\tmthc1\t%1,$2\n.set pop"::"r"(val),"r"(val2)); break;
991 	case 3:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$3\n\tmthc1\t%1,$3\n.set pop"::"r"(val),"r"(val2)); break;
992 	case 4:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$4\n\tmthc1\t%1,$4\n.set pop"::"r"(val),"r"(val2)); break;
993 	case 5:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$5\n\tmthc1\t%1,$5\n.set pop"::"r"(val),"r"(val2)); break;
994 	case 6:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$6\n\tmthc1\t%1,$6\n.set pop"::"r"(val),"r"(val2)); break;
995 	case 7:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$7\n\tmthc1\t%1,$7\n.set pop"::"r"(val),"r"(val2)); break;
996 	case 8:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$8\n\tmthc1\t%1,$8\n.set pop"::"r"(val),"r"(val2)); break;
997 	case 9:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$9\n\tmthc1\t%1,$9\n.set pop"::"r"(val),"r"(val2)); break;
998 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$10\n\tmthc1\t%1,$10\n.set pop"::"r"(val),"r"(val2)); break;
999 	case 11: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$11\n\tmthc1\t%1,$11\n.set pop"::"r"(val),"r"(val2)); break;
1000 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$12\n\tmthc1\t%1,$12\n.set pop"::"r"(val),"r"(val2)); break;
1001 	case 13: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$13\n\tmthc1\t%1,$13\n.set pop"::"r"(val),"r"(val2)); break;
1002 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$14\n\tmthc1\t%1,$14\n.set pop"::"r"(val),"r"(val2)); break;
1003 	case 15: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$15\n\tmthc1\t%1,$15\n.set pop"::"r"(val),"r"(val2)); break;
1004 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$16\n\tmthc1\t%1,$16\n.set pop"::"r"(val),"r"(val2)); break;
1005 	case 17: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$17\n\tmthc1\t%1,$17\n.set pop"::"r"(val),"r"(val2)); break;
1006 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$18\n\tmthc1\t%1,$18\n.set pop"::"r"(val),"r"(val2)); break;
1007 	case 19: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$19\n\tmthc1\t%1,$19\n.set pop"::"r"(val),"r"(val2)); break;
1008 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$20\n\tmthc1\t%1,$20\n.set pop"::"r"(val),"r"(val2)); break;
1009 	case 21: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$21\n\tmthc1\t%1,$21\n.set pop"::"r"(val),"r"(val2)); break;
1010 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$22\n\tmthc1\t%1,$22\n.set pop"::"r"(val),"r"(val2)); break;
1011 	case 23: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$23\n\tmthc1\t%1,$23\n.set pop"::"r"(val),"r"(val2)); break;
1012 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$24\n\tmthc1\t%1,$24\n.set pop"::"r"(val),"r"(val2)); break;
1013 	case 25: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$25\n\tmthc1\t%1,$25\n.set pop"::"r"(val),"r"(val2)); break;
1014 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$26\n\tmthc1\t%1,$26\n.set pop"::"r"(val),"r"(val2)); break;
1015 	case 27: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$27\n\tmthc1\t%1,$27\n.set pop"::"r"(val),"r"(val2)); break;
1016 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$28\n\tmthc1\t%1,$28\n.set pop"::"r"(val),"r"(val2)); break;
1017 	case 29: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$29\n\tmthc1\t%1,$29\n.set pop"::"r"(val),"r"(val2)); break;
1018 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$30\n\tmthc1\t%1,$30\n.set pop"::"r"(val),"r"(val2)); break;
1019 	case 31: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$31\n\tmthc1\t%1,$31\n.set pop"::"r"(val),"r"(val2)); break;
1020 	}
1021 #endif
1022 }
1023 
1024 static inline void mfc1_mfhc1(unsigned long *val, unsigned long *val2, unsigned reg)
1025 {
1026 	unsigned long uninitialized_var(lval), uninitialized_var(lval2);
1027 
1028 	switch (reg) {
1029 #ifdef __BIG_ENDIAN
1030 	case 0:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$0\n\tmfhc1\t%1,$0\n.set pop":"=r"(lval2),"=r"(lval)); break;
1031 	case 1:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$1\n\tmfhc1\t%1,$1\n.set pop":"=r"(lval2),"=r"(lval)); break;
1032 	case 2:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$2\n\tmfhc1\t%1,$2\n.set pop":"=r"(lval2),"=r"(lval)); break;
1033 	case 3:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$3\n\tmfhc1\t%1,$3\n.set pop":"=r"(lval2),"=r"(lval)); break;
1034 	case 4:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$4\n\tmfhc1\t%1,$4\n.set pop":"=r"(lval2),"=r"(lval)); break;
1035 	case 5:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$5\n\tmfhc1\t%1,$5\n.set pop":"=r"(lval2),"=r"(lval)); break;
1036 	case 6:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$6\n\tmfhc1\t%1,$6\n.set pop":"=r"(lval2),"=r"(lval)); break;
1037 	case 7:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$7\n\tmfhc1\t%1,$7\n.set pop":"=r"(lval2),"=r"(lval)); break;
1038 	case 8:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$8\n\tmfhc1\t%1,$8\n.set pop":"=r"(lval2),"=r"(lval)); break;
1039 	case 9:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$9\n\tmfhc1\t%1,$9\n.set pop":"=r"(lval2),"=r"(lval)); break;
1040 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$10\n\tmfhc1\t%1,$10\n.set pop":"=r"(lval2),"=r"(lval)); break;
1041 	case 11: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$11\n\tmfhc1\t%1,$11\n.set pop":"=r"(lval2),"=r"(lval)); break;
1042 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$12\n\tmfhc1\t%1,$12\n.set pop":"=r"(lval2),"=r"(lval)); break;
1043 	case 13: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$13\n\tmfhc1\t%1,$13\n.set pop":"=r"(lval2),"=r"(lval)); break;
1044 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$14\n\tmfhc1\t%1,$14\n.set pop":"=r"(lval2),"=r"(lval)); break;
1045 	case 15: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$15\n\tmfhc1\t%1,$15\n.set pop":"=r"(lval2),"=r"(lval)); break;
1046 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$16\n\tmfhc1\t%1,$16\n.set pop":"=r"(lval2),"=r"(lval)); break;
1047 	case 17: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$17\n\tmfhc1\t%1,$17\n.set pop":"=r"(lval2),"=r"(lval)); break;
1048 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$18\n\tmfhc1\t%1,$18\n.set pop":"=r"(lval2),"=r"(lval)); break;
1049 	case 19: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$19\n\tmfhc1\t%1,$19\n.set pop":"=r"(lval2),"=r"(lval)); break;
1050 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$20\n\tmfhc1\t%1,$20\n.set pop":"=r"(lval2),"=r"(lval)); break;
1051 	case 21: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$21\n\tmfhc1\t%1,$21\n.set pop":"=r"(lval2),"=r"(lval)); break;
1052 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$22\n\tmfhc1\t%1,$22\n.set pop":"=r"(lval2),"=r"(lval)); break;
1053 	case 23: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$23\n\tmfhc1\t%1,$23\n.set pop":"=r"(lval2),"=r"(lval)); break;
1054 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$24\n\tmfhc1\t%1,$24\n.set pop":"=r"(lval2),"=r"(lval)); break;
1055 	case 25: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$25\n\tmfhc1\t%1,$25\n.set pop":"=r"(lval2),"=r"(lval)); break;
1056 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$26\n\tmfhc1\t%1,$26\n.set pop":"=r"(lval2),"=r"(lval)); break;
1057 	case 27: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$27\n\tmfhc1\t%1,$27\n.set pop":"=r"(lval2),"=r"(lval)); break;
1058 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$28\n\tmfhc1\t%1,$28\n.set pop":"=r"(lval2),"=r"(lval)); break;
1059 	case 29: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$29\n\tmfhc1\t%1,$29\n.set pop":"=r"(lval2),"=r"(lval)); break;
1060 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$30\n\tmfhc1\t%1,$30\n.set pop":"=r"(lval2),"=r"(lval)); break;
1061 	case 31: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$31\n\tmfhc1\t%1,$31\n.set pop":"=r"(lval2),"=r"(lval)); break;
1062 #endif
1063 #ifdef __LITTLE_ENDIAN
1064 	case 0:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$0\n\tmfhc1\t%1,$0\n.set pop":"=r"(lval),"=r"(lval2)); break;
1065 	case 1:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$1\n\tmfhc1\t%1,$1\n.set pop":"=r"(lval),"=r"(lval2)); break;
1066 	case 2:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$2\n\tmfhc1\t%1,$2\n.set pop":"=r"(lval),"=r"(lval2)); break;
1067 	case 3:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$3\n\tmfhc1\t%1,$3\n.set pop":"=r"(lval),"=r"(lval2)); break;
1068 	case 4:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$4\n\tmfhc1\t%1,$4\n.set pop":"=r"(lval),"=r"(lval2)); break;
1069 	case 5:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$5\n\tmfhc1\t%1,$5\n.set pop":"=r"(lval),"=r"(lval2)); break;
1070 	case 6:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$6\n\tmfhc1\t%1,$6\n.set pop":"=r"(lval),"=r"(lval2)); break;
1071 	case 7:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$7\n\tmfhc1\t%1,$7\n.set pop":"=r"(lval),"=r"(lval2)); break;
1072 	case 8:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$8\n\tmfhc1\t%1,$8\n.set pop":"=r"(lval),"=r"(lval2)); break;
1073 	case 9:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$9\n\tmfhc1\t%1,$9\n.set pop":"=r"(lval),"=r"(lval2)); break;
1074 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$10\n\tmfhc1\t%1,$10\n.set pop":"=r"(lval),"=r"(lval2)); break;
1075 	case 11: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$11\n\tmfhc1\t%1,$11\n.set pop":"=r"(lval),"=r"(lval2)); break;
1076 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$12\n\tmfhc1\t%1,$12\n.set pop":"=r"(lval),"=r"(lval2)); break;
1077 	case 13: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$13\n\tmfhc1\t%1,$13\n.set pop":"=r"(lval),"=r"(lval2)); break;
1078 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$14\n\tmfhc1\t%1,$14\n.set pop":"=r"(lval),"=r"(lval2)); break;
1079 	case 15: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$15\n\tmfhc1\t%1,$15\n.set pop":"=r"(lval),"=r"(lval2)); break;
1080 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$16\n\tmfhc1\t%1,$16\n.set pop":"=r"(lval),"=r"(lval2)); break;
1081 	case 17: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$17\n\tmfhc1\t%1,$17\n.set pop":"=r"(lval),"=r"(lval2)); break;
1082 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$18\n\tmfhc1\t%1,$18\n.set pop":"=r"(lval),"=r"(lval2)); break;
1083 	case 19: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$19\n\tmfhc1\t%1,$19\n.set pop":"=r"(lval),"=r"(lval2)); break;
1084 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$20\n\tmfhc1\t%1,$20\n.set pop":"=r"(lval),"=r"(lval2)); break;
1085 	case 21: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$21\n\tmfhc1\t%1,$21\n.set pop":"=r"(lval),"=r"(lval2)); break;
1086 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$22\n\tmfhc1\t%1,$22\n.set pop":"=r"(lval),"=r"(lval2)); break;
1087 	case 23: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$23\n\tmfhc1\t%1,$23\n.set pop":"=r"(lval),"=r"(lval2)); break;
1088 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$24\n\tmfhc1\t%1,$24\n.set pop":"=r"(lval),"=r"(lval2)); break;
1089 	case 25: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$25\n\tmfhc1\t%1,$25\n.set pop":"=r"(lval),"=r"(lval2)); break;
1090 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$26\n\tmfhc1\t%1,$26\n.set pop":"=r"(lval),"=r"(lval2)); break;
1091 	case 27: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$27\n\tmfhc1\t%1,$27\n.set pop":"=r"(lval),"=r"(lval2)); break;
1092 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$28\n\tmfhc1\t%1,$28\n.set pop":"=r"(lval),"=r"(lval2)); break;
1093 	case 29: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$29\n\tmfhc1\t%1,$29\n.set pop":"=r"(lval),"=r"(lval2)); break;
1094 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$30\n\tmfhc1\t%1,$30\n.set pop":"=r"(lval),"=r"(lval2)); break;
1095 	case 31: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$31\n\tmfhc1\t%1,$31\n.set pop":"=r"(lval),"=r"(lval2)); break;
1096 #endif
1097 	}
1098 	*val = lval;
1099 	*val2 = lval2;
1100 }
1101 #endif /* CONFIG_CPU_MIPSR1 */
1102 #endif /* CONFIG_64BIT */
1103 
1104 static inline void mtc1_pair(unsigned long val, unsigned long val2, unsigned reg)
1105 {
1106 	switch (reg & ~0x1) {
1107 #ifdef __BIG_ENDIAN
1108 	case 0:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$0\n\tmtc1\t%1,$1\n.set pop"::"r"(val2),"r"(val)); break;
1109 	case 2:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$2\n\tmtc1\t%1,$3\n.set pop"::"r"(val2),"r"(val)); break;
1110 	case 4:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$4\n\tmtc1\t%1,$5\n.set pop"::"r"(val2),"r"(val)); break;
1111 	case 6:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$6\n\tmtc1\t%1,$7\n.set pop"::"r"(val2),"r"(val)); break;
1112 	case 8:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$8\n\tmtc1\t%1,$9\n.set pop"::"r"(val2),"r"(val)); break;
1113 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$10\n\tmtc1\t%1,$11\n.set pop"::"r"(val2),"r"(val)); break;
1114 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$12\n\tmtc1\t%1,$13\n.set pop"::"r"(val2),"r"(val)); break;
1115 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$14\n\tmtc1\t%1,$15\n.set pop"::"r"(val2),"r"(val)); break;
1116 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$16\n\tmtc1\t%1,$17\n.set pop"::"r"(val2),"r"(val)); break;
1117 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$18\n\tmtc1\t%1,$19\n.set pop"::"r"(val2),"r"(val)); break;
1118 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$20\n\tmtc1\t%1,$21\n.set pop"::"r"(val2),"r"(val)); break;
1119 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$22\n\tmtc1\t%1,$23\n.set pop"::"r"(val2),"r"(val)); break;
1120 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$24\n\tmtc1\t%1,$25\n.set pop"::"r"(val2),"r"(val)); break;
1121 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$26\n\tmtc1\t%1,$27\n.set pop"::"r"(val2),"r"(val)); break;
1122 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$28\n\tmtc1\t%1,$29\n.set pop"::"r"(val2),"r"(val)); break;
1123 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$30\n\tmtc1\t%1,$31\n.set pop"::"r"(val2),"r"(val)); break;
1124 #endif
1125 #ifdef __LITTLE_ENDIAN
1126 	case 0:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$0\n\tmtc1\t%1,$1\n.set pop"::"r"(val),"r"(val2)); break;
1127 	case 2:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$2\n\tmtc1\t%1,$3\n.set pop"::"r"(val),"r"(val2)); break;
1128 	case 4:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$4\n\tmtc1\t%1,$5\n.set pop"::"r"(val),"r"(val2)); break;
1129 	case 6:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$6\n\tmtc1\t%1,$7\n.set pop"::"r"(val),"r"(val2)); break;
1130 	case 8:  __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$8\n\tmtc1\t%1,$9\n.set pop"::"r"(val),"r"(val2)); break;
1131 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$10\n\tmtc1\t%1,$11\n.set pop"::"r"(val),"r"(val2)); break;
1132 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$12\n\tmtc1\t%1,$13\n.set pop"::"r"(val),"r"(val2)); break;
1133 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$14\n\tmtc1\t%1,$15\n.set pop"::"r"(val),"r"(val2)); break;
1134 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$16\n\tmtc1\t%1,$17\n.set pop"::"r"(val),"r"(val2)); break;
1135 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$18\n\tmtc1\t%1,$19\n.set pop"::"r"(val),"r"(val2)); break;
1136 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$20\n\tmtc1\t%1,$21\n.set pop"::"r"(val),"r"(val2)); break;
1137 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$22\n\tmtc1\t%1,$23\n.set pop"::"r"(val),"r"(val2)); break;
1138 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$24\n\tmtc1\t%1,$25\n.set pop"::"r"(val),"r"(val2)); break;
1139 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$26\n\tmtc1\t%1,$27\n.set pop"::"r"(val),"r"(val2)); break;
1140 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$28\n\tmtc1\t%1,$29\n.set pop"::"r"(val),"r"(val2)); break;
1141 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$30\n\tmtc1\t%1,$31\n.set pop"::"r"(val),"r"(val2)); break;
1142 #endif
1143 	}
1144 }
1145 
1146 static inline void mfc1_pair(unsigned long *val, unsigned long *val2, unsigned reg)
1147 {
1148 	unsigned long uninitialized_var(lval), uninitialized_var(lval2);
1149 
1150 	switch (reg & ~0x1) {
1151 #ifdef __BIG_ENDIAN
1152 	case 0:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$0\n\tmfc1\t%1,$1\n.set pop":"=r"(lval2),"=r"(lval)); break;
1153 	case 2:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$2\n\tmfc1\t%1,$3\n.set pop":"=r"(lval2),"=r"(lval)); break;
1154 	case 4:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$4\n\tmfc1\t%1,$5\n.set pop":"=r"(lval2),"=r"(lval)); break;
1155 	case 6:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$6\n\tmfc1\t%1,$7\n.set pop":"=r"(lval2),"=r"(lval)); break;
1156 	case 8:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$8\n\tmfc1\t%1,$9\n.set pop":"=r"(lval2),"=r"(lval)); break;
1157 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$10\n\tmfc1\t%1,$11\n.set pop":"=r"(lval2),"=r"(lval)); break;
1158 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$12\n\tmfc1\t%1,$13\n.set pop":"=r"(lval2),"=r"(lval)); break;
1159 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$14\n\tmfc1\t%1,$15\n.set pop":"=r"(lval2),"=r"(lval)); break;
1160 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$16\n\tmfc1\t%1,$17\n.set pop":"=r"(lval2),"=r"(lval)); break;
1161 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$18\n\tmfc1\t%1,$19\n.set pop":"=r"(lval2),"=r"(lval)); break;
1162 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$20\n\tmfc1\t%1,$21\n.set pop":"=r"(lval2),"=r"(lval)); break;
1163 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$22\n\tmfc1\t%1,$23\n.set pop":"=r"(lval2),"=r"(lval)); break;
1164 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$24\n\tmfc1\t%1,$25\n.set pop":"=r"(lval2),"=r"(lval)); break;
1165 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$26\n\tmfc1\t%1,$27\n.set pop":"=r"(lval2),"=r"(lval)); break;
1166 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$28\n\tmfc1\t%1,$29\n.set pop":"=r"(lval2),"=r"(lval)); break;
1167 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$30\n\tmfc1\t%1,$31\n.set pop":"=r"(lval2),"=r"(lval)); break;
1168 #endif
1169 #ifdef __LITTLE_ENDIAN
1170 	case 0:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$0\n\tmfc1\t%1,$1\n.set pop":"=r"(lval),"=r"(lval2)); break;
1171 	case 2:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$2\n\tmfc1\t%1,$3\n.set pop":"=r"(lval),"=r"(lval2)); break;
1172 	case 4:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$4\n\tmfc1\t%1,$5\n.set pop":"=r"(lval),"=r"(lval2)); break;
1173 	case 6:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$6\n\tmfc1\t%1,$7\n.set pop":"=r"(lval),"=r"(lval2)); break;
1174 	case 8:  __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$8\n\tmfc1\t%1,$9\n.set pop":"=r"(lval),"=r"(lval2)); break;
1175 	case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$10\n\tmfc1\t%1,$11\n.set pop":"=r"(lval),"=r"(lval2)); break;
1176 	case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$12\n\tmfc1\t%1,$13\n.set pop":"=r"(lval),"=r"(lval2)); break;
1177 	case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$14\n\tmfc1\t%1,$15\n.set pop":"=r"(lval),"=r"(lval2)); break;
1178 	case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$16\n\tmfc1\t%1,$17\n.set pop":"=r"(lval),"=r"(lval2)); break;
1179 	case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$18\n\tmfc1\t%1,$19\n.set pop":"=r"(lval),"=r"(lval2)); break;
1180 	case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$20\n\tmfc1\t%1,$21\n.set pop":"=r"(lval),"=r"(lval2)); break;
1181 	case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$22\n\tmfc1\t%1,$23\n.set pop":"=r"(lval),"=r"(lval2)); break;
1182 	case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$24\n\tmfc1\t%1,$25\n.set pop":"=r"(lval),"=r"(lval2)); break;
1183 	case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$26\n\tmfc1\t%1,$27\n.set pop":"=r"(lval),"=r"(lval2)); break;
1184 	case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$28\n\tmfc1\t%1,$29\n.set pop":"=r"(lval),"=r"(lval2)); break;
1185 	case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$30\n\tmfc1\t%1,$31\n.set pop":"=r"(lval),"=r"(lval2)); break;
1186 #endif
1187 	}
1188 	*val = lval;
1189 	*val2 = lval2;
1190 }
1191 
1192 #define LoadHWU(addr, value, res)	_LoadHWU(addr, value, res, kernel)
1193 #define LoadHWUE(addr, value, res)	_LoadHWU(addr, value, res, user)
1194 #define LoadWU(addr, value, res)	_LoadWU(addr, value, res, kernel)
1195 #define LoadWUE(addr, value, res)	_LoadWU(addr, value, res, user)
1196 #define LoadHW(addr, value, res)	_LoadHW(addr, value, res, kernel)
1197 #define LoadHWE(addr, value, res)	_LoadHW(addr, value, res, user)
1198 #define LoadW(addr, value, res)		_LoadW(addr, value, res, kernel)
1199 #define LoadWE(addr, value, res)	_LoadW(addr, value, res, user)
1200 #define LoadDW(addr, value, res)	_LoadDW(addr, value, res)
1201 
1202 #define StoreHW(addr, value, res)	_StoreHW(addr, value, res, kernel)
1203 #define StoreHWE(addr, value, res)	_StoreHW(addr, value, res, user)
1204 #define StoreW(addr, value, res)	_StoreW(addr, value, res, kernel)
1205 #define StoreWE(addr, value, res)	_StoreW(addr, value, res, user)
1206 #define StoreDW(addr, value, res)	_StoreDW(addr, value, res)
1207 
1208 static void emulate_load_store_insn(struct pt_regs *regs,
1209 	void __user *addr, unsigned int __user *pc)
1210 {
1211 	union mips_instruction insn;
1212 	unsigned long value;
1213 	unsigned int res;
1214 	unsigned long origpc;
1215 	unsigned long orig31;
1216 	void __user *fault_addr = NULL;
1217 #ifdef	CONFIG_EVA
1218 	mm_segment_t seg;
1219 #endif
1220 	union fpureg *fpr;
1221 	enum msa_2b_fmt df;
1222 	unsigned int wd;
1223 	origpc = (unsigned long)pc;
1224 	orig31 = regs->regs[31];
1225 
1226 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
1227 
1228 	/*
1229 	 * This load never faults.
1230 	 */
1231 	__get_user(insn.word, pc);
1232 
1233 	switch (insn.i_format.opcode) {
1234 		/*
1235 		 * These are instructions that a compiler doesn't generate.  We
1236 		 * can assume therefore that the code is MIPS-aware and
1237 		 * really buggy.  Emulating these instructions would break the
1238 		 * semantics anyway.
1239 		 */
1240 	case ll_op:
1241 	case lld_op:
1242 	case sc_op:
1243 	case scd_op:
1244 
1245 		/*
1246 		 * For these instructions the only way to create an address
1247 		 * error is an attempted access to kernel/supervisor address
1248 		 * space.
1249 		 */
1250 	case ldl_op:
1251 	case ldr_op:
1252 	case lwl_op:
1253 	case lwr_op:
1254 	case sdl_op:
1255 	case sdr_op:
1256 	case swl_op:
1257 	case swr_op:
1258 	case lb_op:
1259 	case lbu_op:
1260 	case sb_op:
1261 		goto sigbus;
1262 
1263 		/*
1264 		 * The remaining opcodes are the ones that are really of
1265 		 * interest.
1266 		 */
1267 	case spec3_op:
1268 		switch (insn.dsp_format.func) {
1269 		case lx_op:
1270 			switch (insn.dsp_format.op) {
1271 			case lwx_op:
1272 				if (!access_ok(VERIFY_READ, addr, 4))
1273 					goto sigbus;
1274 
1275 				LoadW(addr, value, res);
1276 				if (res)
1277 					goto fault;
1278 				compute_return_epc(regs);
1279 				regs->regs[insn.dsp_format.rd] = value;
1280 				break;
1281 			case lhx_op:
1282 				if (!access_ok(VERIFY_READ, addr, 2)) {
1283 					goto sigbus;
1284 				}
1285 
1286 				LoadHW(addr, value, res);
1287 				if (res) {
1288 					goto fault;
1289 				}
1290 				compute_return_epc(regs);
1291 				regs->regs[insn.dsp_format.rd] = value;
1292 				break;
1293 			default:
1294 				goto sigill;
1295 			}
1296 			break;
1297 		}
1298 
1299 #ifdef CONFIG_EVA
1300 		/*
1301 		 * we can land here only from kernel accessing user memory,
1302 		 * so we need to "switch" the address limit to user space, so
1303 		 * address check can work properly.
1304 		 */
1305 		seg = get_fs();
1306 		set_fs(USER_DS);
1307 		switch (insn.spec3_format.func) {
1308 		case lhe_op:
1309 			if (!access_ok(VERIFY_READ, addr, 2)) {
1310 				set_fs(seg);
1311 				goto sigbus;
1312 			}
1313 			LoadHWE(addr, value, res);
1314 			if (res) {
1315 				set_fs(seg);
1316 				goto fault;
1317 			}
1318 			compute_return_epc(regs);
1319 			regs->regs[insn.spec3_format.rt] = value;
1320 			break;
1321 		case lwe_op:
1322 			if (!access_ok(VERIFY_READ, addr, 4)) {
1323 				set_fs(seg);
1324 				goto sigbus;
1325 			}
1326 				LoadWE(addr, value, res);
1327 			if (res) {
1328 				set_fs(seg);
1329 				goto fault;
1330 			}
1331 			compute_return_epc(regs);
1332 			regs->regs[insn.spec3_format.rt] = value;
1333 			break;
1334 		case lhue_op:
1335 			if (!access_ok(VERIFY_READ, addr, 2)) {
1336 				set_fs(seg);
1337 				goto sigbus;
1338 			}
1339 			LoadHWUE(addr, value, res);
1340 			if (res) {
1341 				set_fs(seg);
1342 				goto fault;
1343 			}
1344 			compute_return_epc(regs);
1345 			regs->regs[insn.spec3_format.rt] = value;
1346 			break;
1347 		case she_op:
1348 			if (!access_ok(VERIFY_WRITE, addr, 2)) {
1349 				set_fs(seg);
1350 				goto sigbus;
1351 			}
1352 			compute_return_epc(regs);
1353 			value = regs->regs[insn.spec3_format.rt];
1354 			StoreHWE(addr, value, res);
1355 			if (res) {
1356 				set_fs(seg);
1357 				goto fault;
1358 			}
1359 			break;
1360 		case swe_op:
1361 			if (!access_ok(VERIFY_WRITE, addr, 4)) {
1362 				set_fs(seg);
1363 				goto sigbus;
1364 			}
1365 			compute_return_epc(regs);
1366 			value = regs->regs[insn.spec3_format.rt];
1367 			StoreWE(addr, value, res);
1368 			if (res) {
1369 				set_fs(seg);
1370 				goto fault;
1371 			}
1372 			break;
1373 		default:
1374 			set_fs(seg);
1375 			goto sigill;
1376 		}
1377 		set_fs(seg);
1378 #endif
1379 		break;
1380 	case lh_op:
1381 		if (!access_ok(VERIFY_READ, addr, 2))
1382 			goto sigbus;
1383 
1384 		if (config_enabled(CONFIG_EVA)) {
1385 			if (segment_eq(get_fs(), get_ds()))
1386 				LoadHW(addr, value, res);
1387 			else
1388 				LoadHWE(addr, value, res);
1389 		} else {
1390 			LoadHW(addr, value, res);
1391 		}
1392 
1393 		if (res)
1394 			goto fault;
1395 		compute_return_epc(regs);
1396 		regs->regs[insn.i_format.rt] = value;
1397 		break;
1398 
1399 	case lw_op:
1400 		if (!access_ok(VERIFY_READ, addr, 4))
1401 			goto sigbus;
1402 
1403 		if (config_enabled(CONFIG_EVA)) {
1404 			if (segment_eq(get_fs(), get_ds()))
1405 				LoadW(addr, value, res);
1406 			else
1407 				LoadWE(addr, value, res);
1408 		} else {
1409 			LoadW(addr, value, res);
1410 		}
1411 
1412 		if (res)
1413 			goto fault;
1414 		compute_return_epc(regs);
1415 		regs->regs[insn.i_format.rt] = value;
1416 		break;
1417 
1418 	case lhu_op:
1419 		if (!access_ok(VERIFY_READ, addr, 2))
1420 			goto sigbus;
1421 
1422 		if (config_enabled(CONFIG_EVA)) {
1423 			if (segment_eq(get_fs(), get_ds()))
1424 				LoadHWU(addr, value, res);
1425 			else
1426 				LoadHWUE(addr, value, res);
1427 		} else {
1428 			LoadHWU(addr, value, res);
1429 		}
1430 
1431 		if (res)
1432 			goto fault;
1433 		compute_return_epc(regs);
1434 		regs->regs[insn.i_format.rt] = value;
1435 		break;
1436 
1437 	case lwu_op:
1438 #ifdef CONFIG_64BIT
1439 		/*
1440 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1441 		 * if we're on a 32-bit processor and an i-cache incoherency
1442 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1443 		 * would blow up, so for now we don't handle unaligned 64-bit
1444 		 * instructions on 32-bit kernels.
1445 		 */
1446 		if (!access_ok(VERIFY_READ, addr, 4))
1447 			goto sigbus;
1448 
1449 		LoadWU(addr, value, res);
1450 		if (res)
1451 			goto fault;
1452 		compute_return_epc(regs);
1453 		regs->regs[insn.i_format.rt] = value;
1454 		break;
1455 #endif /* CONFIG_64BIT */
1456 
1457 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1458 		goto sigill;
1459 
1460 	case ld_op:
1461 #ifdef CONFIG_64BIT
1462 		/*
1463 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1464 		 * if we're on a 32-bit processor and an i-cache incoherency
1465 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1466 		 * would blow up, so for now we don't handle unaligned 64-bit
1467 		 * instructions on 32-bit kernels.
1468 		 */
1469 		if (!access_ok(VERIFY_READ, addr, 8))
1470 			goto sigbus;
1471 
1472 		LoadDW(addr, value, res);
1473 		if (res)
1474 			goto fault;
1475 		compute_return_epc(regs);
1476 		regs->regs[insn.i_format.rt] = value;
1477 		break;
1478 #endif /* CONFIG_64BIT */
1479 
1480 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1481 		goto sigill;
1482 
1483 	case sh_op:
1484 		if (!access_ok(VERIFY_WRITE, addr, 2))
1485 			goto sigbus;
1486 
1487 		compute_return_epc(regs);
1488 		value = regs->regs[insn.i_format.rt];
1489 
1490 		if (config_enabled(CONFIG_EVA)) {
1491 			if (segment_eq(get_fs(), get_ds()))
1492 				StoreHW(addr, value, res);
1493 			else
1494 				StoreHWE(addr, value, res);
1495 		} else {
1496 			StoreHW(addr, value, res);
1497 		}
1498 
1499 		if (res)
1500 			goto fault;
1501 		break;
1502 
1503 	case sw_op:
1504 		if (!access_ok(VERIFY_WRITE, addr, 4))
1505 			goto sigbus;
1506 
1507 		compute_return_epc(regs);
1508 		value = regs->regs[insn.i_format.rt];
1509 
1510 		if (config_enabled(CONFIG_EVA)) {
1511 			if (segment_eq(get_fs(), get_ds()))
1512 				StoreW(addr, value, res);
1513 			else
1514 				StoreWE(addr, value, res);
1515 		} else {
1516 			StoreW(addr, value, res);
1517 		}
1518 
1519 		if (res)
1520 			goto fault;
1521 		break;
1522 
1523 	case sd_op:
1524 #ifdef CONFIG_64BIT
1525 		/*
1526 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1527 		 * if we're on a 32-bit processor and an i-cache incoherency
1528 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1529 		 * would blow up, so for now we don't handle unaligned 64-bit
1530 		 * instructions on 32-bit kernels.
1531 		 */
1532 		if (!access_ok(VERIFY_WRITE, addr, 8))
1533 			goto sigbus;
1534 
1535 		compute_return_epc(regs);
1536 		value = regs->regs[insn.i_format.rt];
1537 		StoreDW(addr, value, res);
1538 		if (res)
1539 			goto fault;
1540 		break;
1541 #endif /* CONFIG_64BIT */
1542 
1543 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1544 		goto sigill;
1545 
1546 	case ldc1_op:
1547 		if (!access_ok(VERIFY_READ, addr, 8))
1548 			goto sigbus;
1549 
1550 		preempt_disable();
1551 		if (is_fpu_owner()) {
1552 			if (read_c0_status() & ST0_FR) {
1553 #ifdef CONFIG_64BIT
1554 				LoadDW(addr, value, res);
1555 				if (res)
1556 					goto preempt_fault;
1557 				dmtc1(value, insn.i_format.rt);
1558 #else /* !CONFIG_64BIT */
1559 #if defined(CONFIG_CPU_MIPSR1) || defined(CONFIG_CPU_R4X00)
1560 				preempt_enable_no_resched();
1561 				goto fpu_continue;
1562 #else
1563 				unsigned long value2;
1564 
1565 				LoadW(addr, value, res);
1566 				if (res)
1567 					goto preempt_fault;
1568 				LoadW((addr + 4), value2, res);
1569 				if (res)
1570 					goto preempt_fault;
1571 				mtc1_mthc1(value, value2, insn.i_format.rt);
1572 #endif
1573 #endif /* CONFIG_64BIT */
1574 			} else {
1575 				unsigned long value2;
1576 
1577 				LoadW(addr, value, res);
1578 				if (res)
1579 					goto preempt_fault;
1580 				LoadW((addr + 4), value2, res);
1581 				if (res)
1582 					goto preempt_fault;
1583 				mtc1_pair(value, value2, insn.i_format.rt);
1584 			}
1585 			preempt_enable();
1586 			compute_return_epc(regs);
1587 			break;
1588 		}
1589 
1590 		preempt_enable_no_resched();
1591 		goto fpu_continue;
1592 
1593 	case sdc1_op:
1594 		if (!access_ok(VERIFY_WRITE, addr, 8))
1595 			goto sigbus;
1596 
1597 		preempt_disable();
1598 		if (is_fpu_owner()) {
1599 			compute_return_epc(regs);
1600 			if (read_c0_status() & ST0_FR) {
1601 #ifdef CONFIG_64BIT
1602 				value = dmfc1(insn.i_format.rt);
1603 				StoreDW(addr, value, res);
1604 				if (res)
1605 					goto preempt_fault;
1606 #else /* !CONFIG_64BIT */
1607 #if defined(CONFIG_CPU_MIPSR1) || defined(CONFIG_CPU_R4X00)
1608 				preempt_enable_no_resched();
1609 				/* roll back jump/branch */
1610 				regs->cp0_epc = origpc;
1611 				regs->regs[31] = orig31;
1612 				goto fpu_continue;
1613 #else
1614 				unsigned long value2;
1615 
1616 				mfc1_mfhc1(&value, &value2, insn.i_format.rt);
1617 				StoreW(addr, value, res);
1618 				if (res)
1619 					goto preempt_fault;
1620 				StoreW((addr + 4), value2, res);
1621 				if (res)
1622 					goto preempt_fault;
1623 #endif
1624 #endif /* CONFIG_64BIT */
1625 			} else {
1626 				unsigned long value2;
1627 
1628 				mfc1_pair(&value, &value2, insn.i_format.rt);
1629 				StoreW(addr, value, res);
1630 				if (res)
1631 					goto preempt_fault;
1632 				StoreW((addr + 4), value2, res);
1633 				if (res)
1634 					goto preempt_fault;
1635 			}
1636 			preempt_enable();
1637 			break;
1638 		}
1639 
1640 preempt_fault:
1641 		preempt_enable_no_resched();
1642 		goto fpu_continue;
1643 
1644 	case cop1x_op:
1645 		switch (insn.f_format.func) {
1646 			case lwxc1_op:
1647 			case swxc1_op:
1648 			case ldxc1_op:
1649 			case sdxc1_op:
1650 				goto fpu_continue;
1651 			default:
1652 				goto sigill;
1653 		}
1654 		break;
1655 
1656 	case lwc1_op:
1657 	case swc1_op:
1658 fpu_continue:
1659 		die_if_kernel("Unaligned FP access in kernel code", regs);
1660 		BUG_ON(!used_math());
1661 
1662 		lose_fpu(1);	/* Save FPU state for the emulator. */
1663 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1664 					       &fault_addr);
1665 		own_fpu(1);	/* Restore FPU state. */
1666 
1667 		/* Signal if something went wrong. */
1668 		process_fpemu_return(res, fault_addr, 0);
1669 
1670 		if (res == 0)
1671 			break;
1672 		return;
1673 
1674 	case msa_op:
1675 		if (!cpu_has_msa)
1676 			goto sigill;
1677 
1678 		/*
1679 		 * If we've reached this point then userland should have taken
1680 		 * the MSA disabled exception & initialised vector context at
1681 		 * some point in the past.
1682 		 */
1683 		BUG_ON(!thread_msa_context_live());
1684 
1685 		df = insn.msa_mi10_format.df;
1686 		wd = insn.msa_mi10_format.wd;
1687 		fpr = &current->thread.fpu.fpr[wd];
1688 
1689 		switch (insn.msa_mi10_format.func) {
1690 		case msa_ld_op:
1691 			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
1692 				goto sigbus;
1693 
1694 			/*
1695 			 * Disable preemption to avoid a race between copying
1696 			 * state from userland, migrating to another CPU and
1697 			 * updating the hardware vector register below.
1698 			 */
1699 			preempt_disable();
1700 
1701 			res = __copy_from_user_inatomic(fpr, addr,
1702 							sizeof(*fpr));
1703 			if (res)
1704 				goto fault;
1705 
1706 			/*
1707 			 * Update the hardware register if it is in use by the
1708 			 * task in this quantum, in order to avoid having to
1709 			 * save & restore the whole vector context.
1710 			 */
1711 			if (test_thread_flag(TIF_USEDMSA))
1712 				write_msa_wr(wd, fpr, df);
1713 
1714 			preempt_enable();
1715 			break;
1716 
1717 		case msa_st_op:
1718 			if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
1719 				goto sigbus;
1720 
1721 			/*
1722 			 * Update from the hardware register if it is in use by
1723 			 * the task in this quantum, in order to avoid having to
1724 			 * save & restore the whole vector context.
1725 			 */
1726 			preempt_disable();
1727 			if (test_thread_flag(TIF_USEDMSA))
1728 				read_msa_wr(wd, fpr, df);
1729 			preempt_enable();
1730 
1731 			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
1732 			if (res)
1733 				goto fault;
1734 			break;
1735 
1736 		default:
1737 			goto sigbus;
1738 		}
1739 
1740 		compute_return_epc(regs);
1741 		break;
1742 
1743 #ifndef CONFIG_CPU_MIPSR6
1744 	/*
1745 	 * COP2 is available to implementor for application specific use.
1746 	 * It's up to applications to register a notifier chain and do
1747 	 * whatever they have to do, including possible sending of signals.
1748 	 *
1749 	 * This instruction has been reallocated in Release 6
1750 	 */
1751 	case lwc2_op:
1752 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1753 		break;
1754 
1755 	case ldc2_op:
1756 		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1757 		break;
1758 
1759 	case swc2_op:
1760 		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1761 		break;
1762 
1763 	case sdc2_op:
1764 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1765 		break;
1766 #endif
1767 	default:
1768 		/*
1769 		 * Pheeee...  We encountered an yet unknown instruction or
1770 		 * cache coherence problem.  Die sucker, die ...
1771 		 */
1772 		goto sigill;
1773 	}
1774 
1775 #ifdef CONFIG_DEBUG_FS
1776 	unaligned_instructions++;
1777 #endif
1778 
1779 	return;
1780 
1781 fault:
1782 	/* roll back jump/branch */
1783 	regs->cp0_epc = origpc;
1784 	regs->regs[31] = orig31;
1785 	/* Did we have an exception handler installed? */
1786 	if (fixup_exception(regs))
1787 		return;
1788 
1789 	die_if_kernel("Unhandled kernel unaligned access", regs);
1790 	force_sig(SIGSEGV, current);
1791 
1792 	return;
1793 
1794 sigbus:
1795 	die_if_kernel("Unhandled kernel unaligned access", regs);
1796 	force_sig(SIGBUS, current);
1797 
1798 	return;
1799 
1800 sigill:
1801 	die_if_kernel
1802 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1803 	force_sig(SIGILL, current);
1804 }
1805 
1806 /* Recode table from 16-bit register notation to 32-bit GPR. */
1807 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1808 
1809 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1810 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1811 
1812 static void emulate_load_store_microMIPS(struct pt_regs *regs,
1813 					 void __user *addr)
1814 {
1815 	unsigned long value;
1816 	unsigned int res;
1817 	int i;
1818 	unsigned int reg = 0, rvar;
1819 	unsigned long orig31;
1820 	u16 __user *pc16;
1821 	u16 halfword;
1822 	unsigned int word;
1823 	unsigned long origpc, contpc;
1824 	union mips_instruction insn;
1825 	struct mm_decoded_insn mminsn;
1826 	void __user *fault_addr = NULL;
1827 
1828 	origpc = regs->cp0_epc;
1829 	orig31 = regs->regs[31];
1830 
1831 	mminsn.micro_mips_mode = 1;
1832 
1833 	/*
1834 	 * This load never faults.
1835 	 */
1836 	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1837 	__get_user(halfword, pc16);
1838 	pc16++;
1839 	contpc = regs->cp0_epc + 2;
1840 	word = ((unsigned int)halfword << 16);
1841 	mminsn.pc_inc = 2;
1842 
1843 	if (!mm_insn_16bit(halfword)) {
1844 		__get_user(halfword, pc16);
1845 		pc16++;
1846 		contpc = regs->cp0_epc + 4;
1847 		mminsn.pc_inc = 4;
1848 		word |= halfword;
1849 	}
1850 	mminsn.insn = word;
1851 
1852 	if (get_user(halfword, pc16))
1853 		goto fault;
1854 	mminsn.next_pc_inc = 2;
1855 	word = ((unsigned int)halfword << 16);
1856 
1857 	if (!mm_insn_16bit(halfword)) {
1858 		pc16++;
1859 		if (get_user(halfword, pc16))
1860 			goto fault;
1861 		mminsn.next_pc_inc = 4;
1862 		word |= halfword;
1863 	}
1864 	mminsn.next_insn = word;
1865 
1866 	insn = (union mips_instruction)(mminsn.insn);
1867 	if (mm_isBranchInstr(regs, mminsn, &contpc))
1868 		insn = (union mips_instruction)(mminsn.next_insn);
1869 
1870 	/*  Parse instruction to find what to do */
1871 
1872 	switch (insn.mm_i_format.opcode) {
1873 
1874 	case mm_pool32a_op:
1875 		switch (insn.mm_x_format.func) {
1876 		case mm_lwxs_op:
1877 			reg = insn.mm_x_format.rd;
1878 			goto loadW;
1879 		}
1880 
1881 		goto sigbus;
1882 
1883 	case mm_pool32b_op:
1884 		switch (insn.mm_m_format.func) {
1885 		case mm_lwp_func:
1886 			reg = insn.mm_m_format.rd;
1887 			if (reg == 31)
1888 				goto sigbus;
1889 
1890 			if (!access_ok(VERIFY_READ, addr, 8))
1891 				goto sigbus;
1892 
1893 			LoadW(addr, value, res);
1894 			if (res)
1895 				goto fault;
1896 			regs->regs[reg] = value;
1897 			addr += 4;
1898 			LoadW(addr, value, res);
1899 			if (res)
1900 				goto fault;
1901 			regs->regs[reg + 1] = value;
1902 			goto success;
1903 
1904 		case mm_swp_func:
1905 			reg = insn.mm_m_format.rd;
1906 			if (reg == 31)
1907 				goto sigbus;
1908 
1909 			if (!access_ok(VERIFY_WRITE, addr, 8))
1910 				goto sigbus;
1911 
1912 			value = regs->regs[reg];
1913 			StoreW(addr, value, res);
1914 			if (res)
1915 				goto fault;
1916 			addr += 4;
1917 			value = regs->regs[reg + 1];
1918 			StoreW(addr, value, res);
1919 			if (res)
1920 				goto fault;
1921 			goto success;
1922 
1923 		case mm_ldp_func:
1924 #ifdef CONFIG_64BIT
1925 			reg = insn.mm_m_format.rd;
1926 			if (reg == 31)
1927 				goto sigbus;
1928 
1929 			if (!access_ok(VERIFY_READ, addr, 16))
1930 				goto sigbus;
1931 
1932 			LoadDW(addr, value, res);
1933 			if (res)
1934 				goto fault;
1935 			regs->regs[reg] = value;
1936 			addr += 8;
1937 			LoadDW(addr, value, res);
1938 			if (res)
1939 				goto fault;
1940 			regs->regs[reg + 1] = value;
1941 			goto success;
1942 #endif /* CONFIG_64BIT */
1943 
1944 			goto sigill;
1945 
1946 		case mm_sdp_func:
1947 #ifdef CONFIG_64BIT
1948 			reg = insn.mm_m_format.rd;
1949 			if (reg == 31)
1950 				goto sigbus;
1951 
1952 			if (!access_ok(VERIFY_WRITE, addr, 16))
1953 				goto sigbus;
1954 
1955 			value = regs->regs[reg];
1956 			StoreDW(addr, value, res);
1957 			if (res)
1958 				goto fault;
1959 			addr += 8;
1960 			value = regs->regs[reg + 1];
1961 			StoreDW(addr, value, res);
1962 			if (res)
1963 				goto fault;
1964 			goto success;
1965 #endif /* CONFIG_64BIT */
1966 
1967 			goto sigill;
1968 
1969 		case mm_lwm32_func:
1970 			reg = insn.mm_m_format.rd;
1971 			rvar = reg & 0xf;
1972 			if ((rvar > 9) || !reg)
1973 				goto sigill;
1974 			if (reg & 0x10) {
1975 				if (!access_ok
1976 				    (VERIFY_READ, addr, 4 * (rvar + 1)))
1977 					goto sigbus;
1978 			} else {
1979 				if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1980 					goto sigbus;
1981 			}
1982 			if (rvar == 9)
1983 				rvar = 8;
1984 			for (i = 16; rvar; rvar--, i++) {
1985 				LoadW(addr, value, res);
1986 				if (res)
1987 					goto fault;
1988 				addr += 4;
1989 				regs->regs[i] = value;
1990 			}
1991 			if ((reg & 0xf) == 9) {
1992 				LoadW(addr, value, res);
1993 				if (res)
1994 					goto fault;
1995 				addr += 4;
1996 				regs->regs[30] = value;
1997 			}
1998 			if (reg & 0x10) {
1999 				LoadW(addr, value, res);
2000 				if (res)
2001 					goto fault;
2002 				regs->regs[31] = value;
2003 			}
2004 			goto success;
2005 
2006 		case mm_swm32_func:
2007 			reg = insn.mm_m_format.rd;
2008 			rvar = reg & 0xf;
2009 			if ((rvar > 9) || !reg)
2010 				goto sigill;
2011 			if (reg & 0x10) {
2012 				if (!access_ok
2013 				    (VERIFY_WRITE, addr, 4 * (rvar + 1)))
2014 					goto sigbus;
2015 			} else {
2016 				if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
2017 					goto sigbus;
2018 			}
2019 			if (rvar == 9)
2020 				rvar = 8;
2021 			for (i = 16; rvar; rvar--, i++) {
2022 				value = regs->regs[i];
2023 				StoreW(addr, value, res);
2024 				if (res)
2025 					goto fault;
2026 				addr += 4;
2027 			}
2028 			if ((reg & 0xf) == 9) {
2029 				value = regs->regs[30];
2030 				StoreW(addr, value, res);
2031 				if (res)
2032 					goto fault;
2033 				addr += 4;
2034 			}
2035 			if (reg & 0x10) {
2036 				value = regs->regs[31];
2037 				StoreW(addr, value, res);
2038 				if (res)
2039 					goto fault;
2040 			}
2041 			goto success;
2042 
2043 		case mm_ldm_func:
2044 #ifdef CONFIG_64BIT
2045 			reg = insn.mm_m_format.rd;
2046 			rvar = reg & 0xf;
2047 			if ((rvar > 9) || !reg)
2048 				goto sigill;
2049 			if (reg & 0x10) {
2050 				if (!access_ok
2051 				    (VERIFY_READ, addr, 8 * (rvar + 1)))
2052 					goto sigbus;
2053 			} else {
2054 				if (!access_ok(VERIFY_READ, addr, 8 * rvar))
2055 					goto sigbus;
2056 			}
2057 			if (rvar == 9)
2058 				rvar = 8;
2059 
2060 			for (i = 16; rvar; rvar--, i++) {
2061 				LoadDW(addr, value, res);
2062 				if (res)
2063 					goto fault;
2064 				addr += 4;
2065 				regs->regs[i] = value;
2066 			}
2067 			if ((reg & 0xf) == 9) {
2068 				LoadDW(addr, value, res);
2069 				if (res)
2070 					goto fault;
2071 				addr += 8;
2072 				regs->regs[30] = value;
2073 			}
2074 			if (reg & 0x10) {
2075 				LoadDW(addr, value, res);
2076 				if (res)
2077 					goto fault;
2078 				regs->regs[31] = value;
2079 			}
2080 			goto success;
2081 #endif /* CONFIG_64BIT */
2082 
2083 			goto sigill;
2084 
2085 		case mm_sdm_func:
2086 #ifdef CONFIG_64BIT
2087 			reg = insn.mm_m_format.rd;
2088 			rvar = reg & 0xf;
2089 			if ((rvar > 9) || !reg)
2090 				goto sigill;
2091 			if (reg & 0x10) {
2092 				if (!access_ok
2093 				    (VERIFY_WRITE, addr, 8 * (rvar + 1)))
2094 					goto sigbus;
2095 			} else {
2096 				if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
2097 					goto sigbus;
2098 			}
2099 			if (rvar == 9)
2100 				rvar = 8;
2101 
2102 			for (i = 16; rvar; rvar--, i++) {
2103 				value = regs->regs[i];
2104 				StoreDW(addr, value, res);
2105 				if (res)
2106 					goto fault;
2107 				addr += 8;
2108 			}
2109 			if ((reg & 0xf) == 9) {
2110 				value = regs->regs[30];
2111 				StoreDW(addr, value, res);
2112 				if (res)
2113 					goto fault;
2114 				addr += 8;
2115 			}
2116 			if (reg & 0x10) {
2117 				value = regs->regs[31];
2118 				StoreDW(addr, value, res);
2119 				if (res)
2120 					goto fault;
2121 			}
2122 			goto success;
2123 #endif /* CONFIG_64BIT */
2124 
2125 			goto sigill;
2126 
2127 			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
2128 		}
2129 
2130 		goto sigbus;
2131 
2132 	case mm_pool32c_op:
2133 		switch (insn.mm_m_format.func) {
2134 		case mm_lwu_func:
2135 			reg = insn.mm_m_format.rd;
2136 			goto loadWU;
2137 		}
2138 
2139 		/*  LL,SC,LLD,SCD are not serviced */
2140 		goto sigbus;
2141 
2142 	case mm_pool32f_op:
2143 		switch (insn.mm_x_format.func) {
2144 		case mm_lwxc1_func:
2145 		case mm_swxc1_func:
2146 		case mm_ldxc1_func:
2147 		case mm_sdxc1_func:
2148 			goto fpu_emul;
2149 		}
2150 
2151 		goto sigbus;
2152 
2153 	case mm_ldc132_op:
2154 		if (!access_ok(VERIFY_READ, addr, 8))
2155 			goto sigbus;
2156 
2157 		preempt_disable();
2158 		if (is_fpu_owner()) {
2159 			if (read_c0_status() & ST0_FR) {
2160 #ifdef CONFIG_64BIT
2161 				LoadDW(addr, value, res);
2162 				if (res)
2163 					goto preempt_fault;
2164 				dmtc1(value, insn.mm_i_format.rt);
2165 #else /* !CONFIG_64BIT */
2166 #if defined(CONFIG_CPU_MIPSR1) || defined(CONFIG_CPU_R4X00)
2167 				preempt_enable_no_resched();
2168 				goto fpu_emul;
2169 #else
2170 				unsigned long value2;
2171 
2172 				LoadW(addr, value, res);
2173 				if (res)
2174 					goto preempt_fault;
2175 				LoadW((addr + 4), value2, res);
2176 				if (res)
2177 					goto preempt_fault;
2178 				mtc1_mthc1(value, value2, insn.mm_i_format.rt);
2179 #endif
2180 #endif /* CONFIG_64BIT */
2181 			} else {
2182 				unsigned long value2;
2183 
2184 				LoadW(addr, value, res);
2185 				if (res)
2186 					goto preempt_fault;
2187 				LoadW((addr + 4), value2, res);
2188 				if (res)
2189 					goto preempt_fault;
2190 				mtc1_pair(value, value2, insn.mm_i_format.rt);
2191 			}
2192 			preempt_enable();
2193 			goto success;
2194 		}
2195 
2196                preempt_enable_no_resched();
2197 		goto fpu_emul;
2198 
2199 	case mm_sdc132_op:
2200 		if (!access_ok(VERIFY_WRITE, addr, 8))
2201 			goto sigbus;
2202 
2203 		preempt_disable();
2204 		if (is_fpu_owner()) {
2205 			if (read_c0_status() & ST0_FR) {
2206 #ifdef CONFIG_64BIT
2207 				value = dmfc1(insn.mm_i_format.rt);
2208 				StoreDW(addr, value, res);
2209 				if (res)
2210 					goto preempt_fault;
2211 #else /* !CONFIG_64BIT */
2212 #if defined(CONFIG_CPU_MIPSR1) || defined(CONFIG_CPU_R4X00)
2213 				preempt_enable_no_resched();
2214 				goto fpu_emul;
2215 #else
2216 				unsigned long value2;
2217 
2218 				mfc1_mfhc1(&value, &value2, insn.mm_i_format.rt);
2219 				StoreW(addr, value, res);
2220 				if (res)
2221 					goto preempt_fault;
2222 				StoreW((addr + 4), value2, res);
2223 				if (res)
2224 					goto preempt_fault;
2225 #endif
2226 #endif /* CONFIG_64BIT */
2227 			} else {
2228 				unsigned long value2;
2229 
2230 				mfc1_pair(&value, &value2, insn.mm_i_format.rt);
2231 				StoreW(addr, value, res);
2232 				if (res)
2233 					goto preempt_fault;
2234 				StoreW((addr + 4), value2, res);
2235 				if (res)
2236 					goto preempt_fault;
2237 			}
2238 			preempt_enable();
2239 			goto success;
2240 		}
2241 
2242 		preempt_enable_no_resched();
2243 		goto fpu_emul;
2244 
2245 	case mm_lwc132_op:
2246 	case mm_swc132_op:
2247 fpu_emul:
2248 		/* roll back jump/branch */
2249 		regs->cp0_epc = origpc;
2250 		regs->regs[31] = orig31;
2251 
2252 		die_if_kernel("Unaligned FP access in kernel code", regs);
2253 		BUG_ON(!used_math());
2254 //                BUG_ON(!is_fpu_owner());
2255 
2256 		lose_fpu(1);	/* save the FPU state for the emulator */
2257 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
2258 					       &fault_addr);
2259 		own_fpu(1);	/* restore FPU state */
2260 
2261 		/* If something went wrong, signal */
2262 		process_fpemu_return(res, fault_addr, 0);
2263 
2264 		if (res == 0)
2265 			goto success;
2266 		return;
2267 
2268 	case mm_lh32_op:
2269 		reg = insn.mm_i_format.rt;
2270 		goto loadHW;
2271 
2272 	case mm_lhu32_op:
2273 		reg = insn.mm_i_format.rt;
2274 		goto loadHWU;
2275 
2276 	case mm_lw32_op:
2277 		reg = insn.mm_i_format.rt;
2278 		goto loadW;
2279 
2280 	case mm_sh32_op:
2281 		reg = insn.mm_i_format.rt;
2282 		goto storeHW;
2283 
2284 	case mm_sw32_op:
2285 		reg = insn.mm_i_format.rt;
2286 		goto storeW;
2287 
2288 	case mm_ld32_op:
2289 		reg = insn.mm_i_format.rt;
2290 		goto loadDW;
2291 
2292 	case mm_sd32_op:
2293 		reg = insn.mm_i_format.rt;
2294 		goto storeDW;
2295 
2296 	case mm_pool16c_op:
2297 		switch (insn.mm16_m_format.func) {
2298 		case mm_lwm16_op:
2299 			reg = insn.mm16_m_format.rlist;
2300 			rvar = reg + 1;
2301 			if (!access_ok(VERIFY_READ, addr, 4 * rvar))
2302 				goto sigbus;
2303 
2304 			for (i = 16; rvar; rvar--, i++) {
2305 				LoadW(addr, value, res);
2306 				if (res)
2307 					goto fault;
2308 				addr += 4;
2309 				regs->regs[i] = value;
2310 			}
2311 			LoadW(addr, value, res);
2312 			if (res)
2313 				goto fault;
2314 			regs->regs[31] = value;
2315 
2316 			goto success;
2317 
2318 		case mm_swm16_op:
2319 			reg = insn.mm16_m_format.rlist;
2320 			rvar = reg + 1;
2321 			if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
2322 				goto sigbus;
2323 
2324 			for (i = 16; rvar; rvar--, i++) {
2325 				value = regs->regs[i];
2326 				StoreW(addr, value, res);
2327 				if (res)
2328 					goto fault;
2329 				addr += 4;
2330 			}
2331 			value = regs->regs[31];
2332 			StoreW(addr, value, res);
2333 			if (res)
2334 				goto fault;
2335 
2336 			goto success;
2337 
2338 		}
2339 
2340 		goto sigbus;
2341 
2342 	case mm_lhu16_op:
2343 		reg = reg16to32[insn.mm16_rb_format.rt];
2344 		goto loadHWU;
2345 
2346 	case mm_lw16_op:
2347 		reg = reg16to32[insn.mm16_rb_format.rt];
2348 		goto loadW;
2349 
2350 	case mm_sh16_op:
2351 		reg = reg16to32st[insn.mm16_rb_format.rt];
2352 		goto storeHW;
2353 
2354 	case mm_sw16_op:
2355 		reg = reg16to32st[insn.mm16_rb_format.rt];
2356 		goto storeW;
2357 
2358 	case mm_lwsp16_op:
2359 		reg = insn.mm16_r5_format.rt;
2360 		goto loadW;
2361 
2362 	case mm_swsp16_op:
2363 		reg = insn.mm16_r5_format.rt;
2364 		goto storeW;
2365 
2366 	case mm_lwgp16_op:
2367 		reg = reg16to32[insn.mm16_r3_format.rt];
2368 		goto loadW;
2369 
2370 	default:
2371 		goto sigill;
2372 	}
2373 
2374 loadHW:
2375 	if (!access_ok(VERIFY_READ, addr, 2))
2376 		goto sigbus;
2377 
2378 	LoadHW(addr, value, res);
2379 	if (res)
2380 		goto fault;
2381 	regs->regs[reg] = value;
2382 	goto success;
2383 
2384 loadHWU:
2385 	if (!access_ok(VERIFY_READ, addr, 2))
2386 		goto sigbus;
2387 
2388 	LoadHWU(addr, value, res);
2389 	if (res)
2390 		goto fault;
2391 	regs->regs[reg] = value;
2392 	goto success;
2393 
2394 loadW:
2395 	if (!access_ok(VERIFY_READ, addr, 4))
2396 		goto sigbus;
2397 
2398 	LoadW(addr, value, res);
2399 	if (res)
2400 		goto fault;
2401 	regs->regs[reg] = value;
2402 	goto success;
2403 
2404 loadWU:
2405 #ifdef CONFIG_64BIT
2406 	/*
2407 	 * A 32-bit kernel might be running on a 64-bit processor.  But
2408 	 * if we're on a 32-bit processor and an i-cache incoherency
2409 	 * or race makes us see a 64-bit instruction here the sdl/sdr
2410 	 * would blow up, so for now we don't handle unaligned 64-bit
2411 	 * instructions on 32-bit kernels.
2412 	 */
2413 	if (!access_ok(VERIFY_READ, addr, 4))
2414 		goto sigbus;
2415 
2416 	LoadWU(addr, value, res);
2417 	if (res)
2418 		goto fault;
2419 	regs->regs[reg] = value;
2420 	goto success;
2421 #endif /* CONFIG_64BIT */
2422 
2423 	/* Cannot handle 64-bit instructions in 32-bit kernel */
2424 	goto sigill;
2425 
2426 loadDW:
2427 #ifdef CONFIG_64BIT
2428 	/*
2429 	 * A 32-bit kernel might be running on a 64-bit processor.  But
2430 	 * if we're on a 32-bit processor and an i-cache incoherency
2431 	 * or race makes us see a 64-bit instruction here the sdl/sdr
2432 	 * would blow up, so for now we don't handle unaligned 64-bit
2433 	 * instructions on 32-bit kernels.
2434 	 */
2435 	if (!access_ok(VERIFY_READ, addr, 8))
2436 		goto sigbus;
2437 
2438 	LoadDW(addr, value, res);
2439 	if (res)
2440 		goto fault;
2441 	regs->regs[reg] = value;
2442 	goto success;
2443 #endif /* CONFIG_64BIT */
2444 
2445 	/* Cannot handle 64-bit instructions in 32-bit kernel */
2446 	goto sigill;
2447 
2448 storeHW:
2449 	if (!access_ok(VERIFY_WRITE, addr, 2))
2450 		goto sigbus;
2451 
2452 	value = regs->regs[reg];
2453 	StoreHW(addr, value, res);
2454 	if (res)
2455 		goto fault;
2456 	goto success;
2457 
2458 storeW:
2459 	if (!access_ok(VERIFY_WRITE, addr, 4))
2460 		goto sigbus;
2461 
2462 	value = regs->regs[reg];
2463 	StoreW(addr, value, res);
2464 	if (res)
2465 		goto fault;
2466 	goto success;
2467 
2468 storeDW:
2469 #ifdef CONFIG_64BIT
2470 	/*
2471 	 * A 32-bit kernel might be running on a 64-bit processor.  But
2472 	 * if we're on a 32-bit processor and an i-cache incoherency
2473 	 * or race makes us see a 64-bit instruction here the sdl/sdr
2474 	 * would blow up, so for now we don't handle unaligned 64-bit
2475 	 * instructions on 32-bit kernels.
2476 	 */
2477 	if (!access_ok(VERIFY_WRITE, addr, 8))
2478 		goto sigbus;
2479 
2480 	value = regs->regs[reg];
2481 	StoreDW(addr, value, res);
2482 	if (res)
2483 		goto fault;
2484 	goto success;
2485 #endif /* CONFIG_64BIT */
2486 
2487 	/* Cannot handle 64-bit instructions in 32-bit kernel */
2488 	goto sigill;
2489 
2490 success:
2491 	regs->cp0_epc = contpc;	/* advance or branch */
2492 
2493 #ifdef CONFIG_DEBUG_FS
2494 	unaligned_instructions++;
2495 #endif
2496 	return;
2497 
2498 preempt_fault:
2499 	preempt_enable();
2500 fault:
2501 	/* roll back jump/branch */
2502 	regs->cp0_epc = origpc;
2503 	regs->regs[31] = orig31;
2504 	/* Did we have an exception handler installed? */
2505 	if (fixup_exception(regs))
2506 		return;
2507 
2508 	die_if_kernel("Unhandled kernel unaligned access", regs);
2509 	force_sig(SIGSEGV, current);
2510 
2511 	return;
2512 
2513 sigbus:
2514 	die_if_kernel("Unhandled kernel unaligned access", regs);
2515 	force_sig(SIGBUS, current);
2516 
2517 	return;
2518 
2519 sigill:
2520 	die_if_kernel
2521 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
2522 	force_sig(SIGILL, current);
2523 }
2524 
2525 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
2526 {
2527 	unsigned long value;
2528 	unsigned int res;
2529 	int reg;
2530 	unsigned long orig31;
2531 	u16 __user *pc16;
2532 	unsigned long origpc;
2533 	union mips16e_instruction mips16inst, oldinst;
2534 
2535 	origpc = regs->cp0_epc;
2536 	orig31 = regs->regs[31];
2537 	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
2538 	/*
2539 	 * This load never faults.
2540 	 */
2541 	__get_user(mips16inst.full, pc16);
2542 	oldinst = mips16inst;
2543 
2544 	/* skip EXTEND instruction */
2545 	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
2546 		pc16++;
2547 		__get_user(mips16inst.full, pc16);
2548 	} else if (delay_slot(regs)) {
2549 		/*  skip jump instructions */
2550 		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
2551 		if (mips16inst.ri.opcode == MIPS16e_jal_op)
2552 			pc16++;
2553 		pc16++;
2554 		if (get_user(mips16inst.full, pc16))
2555 			goto sigbus;
2556 	}
2557 
2558 	switch (mips16inst.ri.opcode) {
2559 	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
2560 		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
2561 		case MIPS16e_ldpc_func:
2562 		case MIPS16e_ldsp_func:
2563 			reg = reg16to32[mips16inst.ri64.ry];
2564 			goto loadDW;
2565 
2566 		case MIPS16e_sdsp_func:
2567 			reg = reg16to32[mips16inst.ri64.ry];
2568 			goto writeDW;
2569 
2570 		case MIPS16e_sdrasp_func:
2571 			reg = 29;	/* GPRSP */
2572 			goto writeDW;
2573 		}
2574 
2575 		goto sigbus;
2576 
2577 	case MIPS16e_swsp_op:
2578 	case MIPS16e_lwpc_op:
2579 	case MIPS16e_lwsp_op:
2580 		reg = reg16to32[mips16inst.ri.rx];
2581 		break;
2582 
2583 	case MIPS16e_i8_op:
2584 		if (mips16inst.i8.func != MIPS16e_swrasp_func)
2585 			goto sigbus;
2586 		reg = 29;	/* GPRSP */
2587 		break;
2588 
2589 	default:
2590 		reg = reg16to32[mips16inst.rri.ry];
2591 		break;
2592 	}
2593 
2594 	switch (mips16inst.ri.opcode) {
2595 
2596 	case MIPS16e_lb_op:
2597 	case MIPS16e_lbu_op:
2598 	case MIPS16e_sb_op:
2599 		goto sigbus;
2600 
2601 	case MIPS16e_lh_op:
2602 		if (!access_ok(VERIFY_READ, addr, 2))
2603 			goto sigbus;
2604 
2605 		LoadHW(addr, value, res);
2606 		if (res)
2607 			goto fault;
2608 		MIPS16e_compute_return_epc(regs, &oldinst);
2609 		regs->regs[reg] = value;
2610 		break;
2611 
2612 	case MIPS16e_lhu_op:
2613 		if (!access_ok(VERIFY_READ, addr, 2))
2614 			goto sigbus;
2615 
2616 		LoadHWU(addr, value, res);
2617 		if (res)
2618 			goto fault;
2619 		MIPS16e_compute_return_epc(regs, &oldinst);
2620 		regs->regs[reg] = value;
2621 		break;
2622 
2623 	case MIPS16e_lw_op:
2624 	case MIPS16e_lwpc_op:
2625 	case MIPS16e_lwsp_op:
2626 		if (!access_ok(VERIFY_READ, addr, 4))
2627 			goto sigbus;
2628 
2629 		LoadW(addr, value, res);
2630 		if (res)
2631 			goto fault;
2632 		MIPS16e_compute_return_epc(regs, &oldinst);
2633 		regs->regs[reg] = value;
2634 		break;
2635 
2636 	case MIPS16e_lwu_op:
2637 #ifdef CONFIG_64BIT
2638 		/*
2639 		 * A 32-bit kernel might be running on a 64-bit processor.  But
2640 		 * if we're on a 32-bit processor and an i-cache incoherency
2641 		 * or race makes us see a 64-bit instruction here the sdl/sdr
2642 		 * would blow up, so for now we don't handle unaligned 64-bit
2643 		 * instructions on 32-bit kernels.
2644 		 */
2645 		if (!access_ok(VERIFY_READ, addr, 4))
2646 			goto sigbus;
2647 
2648 		LoadWU(addr, value, res);
2649 		if (res)
2650 			goto fault;
2651 		MIPS16e_compute_return_epc(regs, &oldinst);
2652 		regs->regs[reg] = value;
2653 		break;
2654 #endif /* CONFIG_64BIT */
2655 
2656 		/* Cannot handle 64-bit instructions in 32-bit kernel */
2657 		goto sigill;
2658 
2659 	case MIPS16e_ld_op:
2660 loadDW:
2661 #ifdef CONFIG_64BIT
2662 		/*
2663 		 * A 32-bit kernel might be running on a 64-bit processor.  But
2664 		 * if we're on a 32-bit processor and an i-cache incoherency
2665 		 * or race makes us see a 64-bit instruction here the sdl/sdr
2666 		 * would blow up, so for now we don't handle unaligned 64-bit
2667 		 * instructions on 32-bit kernels.
2668 		 */
2669 		if (!access_ok(VERIFY_READ, addr, 8))
2670 			goto sigbus;
2671 
2672 		LoadDW(addr, value, res);
2673 		if (res)
2674 			goto fault;
2675 		MIPS16e_compute_return_epc(regs, &oldinst);
2676 		regs->regs[reg] = value;
2677 		break;
2678 #endif /* CONFIG_64BIT */
2679 
2680 		/* Cannot handle 64-bit instructions in 32-bit kernel */
2681 		goto sigill;
2682 
2683 	case MIPS16e_sh_op:
2684 		if (!access_ok(VERIFY_WRITE, addr, 2))
2685 			goto sigbus;
2686 
2687 		MIPS16e_compute_return_epc(regs, &oldinst);
2688 		value = regs->regs[reg];
2689 		StoreHW(addr, value, res);
2690 		if (res)
2691 			goto fault;
2692 		break;
2693 
2694 	case MIPS16e_sw_op:
2695 	case MIPS16e_swsp_op:
2696 	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
2697 		if (!access_ok(VERIFY_WRITE, addr, 4))
2698 			goto sigbus;
2699 
2700 		MIPS16e_compute_return_epc(regs, &oldinst);
2701 		value = regs->regs[reg];
2702 		StoreW(addr, value, res);
2703 		if (res)
2704 			goto fault;
2705 		break;
2706 
2707 	case MIPS16e_sd_op:
2708 writeDW:
2709 #ifdef CONFIG_64BIT
2710 		/*
2711 		 * A 32-bit kernel might be running on a 64-bit processor.  But
2712 		 * if we're on a 32-bit processor and an i-cache incoherency
2713 		 * or race makes us see a 64-bit instruction here the sdl/sdr
2714 		 * would blow up, so for now we don't handle unaligned 64-bit
2715 		 * instructions on 32-bit kernels.
2716 		 */
2717 		if (!access_ok(VERIFY_WRITE, addr, 8))
2718 			goto sigbus;
2719 
2720 		MIPS16e_compute_return_epc(regs, &oldinst);
2721 		value = regs->regs[reg];
2722 		StoreDW(addr, value, res);
2723 		if (res)
2724 			goto fault;
2725 		break;
2726 #endif /* CONFIG_64BIT */
2727 
2728 		/* Cannot handle 64-bit instructions in 32-bit kernel */
2729 		goto sigill;
2730 
2731 	default:
2732 		/*
2733 		 * Pheeee...  We encountered an yet unknown instruction or
2734 		 * cache coherence problem.  Die sucker, die ...
2735 		 */
2736 		goto sigill;
2737 	}
2738 
2739 #ifdef CONFIG_DEBUG_FS
2740 	unaligned_instructions++;
2741 #endif
2742 
2743 	return;
2744 
2745 fault:
2746 	/* roll back jump/branch */
2747 	regs->cp0_epc = origpc;
2748 	regs->regs[31] = orig31;
2749 	/* Did we have an exception handler installed? */
2750 	if (fixup_exception(regs))
2751 		return;
2752 
2753 	die_if_kernel("Unhandled kernel unaligned access", regs);
2754 	force_sig(SIGSEGV, current);
2755 
2756 	return;
2757 
2758 sigbus:
2759 	die_if_kernel("Unhandled kernel unaligned access", regs);
2760 	force_sig(SIGBUS, current);
2761 
2762 	return;
2763 
2764 sigill:
2765 	die_if_kernel
2766 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
2767 	force_sig(SIGILL, current);
2768 }
2769 
2770 asmlinkage void do_ade(struct pt_regs *regs)
2771 {
2772 	enum ctx_state prev_state;
2773 	unsigned int __user *pc;
2774 	mm_segment_t seg;
2775 
2776 	prev_state = exception_enter();
2777 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
2778 			1, regs, regs->cp0_badvaddr);
2779 	/*
2780 	 * Did we catch a fault trying to load an instruction?
2781 	 */
2782 	if (regs->cp0_badvaddr == regs->cp0_epc)
2783 		goto sigbus;
2784 
2785 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
2786 		goto sigbus;
2787 	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2788 		goto sigbus;
2789 
2790 	/*
2791 	 * Do branch emulation only if we didn't forward the exception.
2792 	 * This is all so but ugly ...
2793 	 */
2794 
2795 	/*
2796 	 * Are we running in microMIPS mode?
2797 	 */
2798 	if (get_isa16_mode(regs->cp0_epc)) {
2799 		/*
2800 		 * Did we catch a fault trying to load an instruction in
2801 		 * 16-bit mode?
2802 		 */
2803 		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2804 			goto sigbus;
2805 		if (unaligned_action == UNALIGNED_ACTION_SHOW)
2806 			show_registers(regs);
2807 
2808 		if (cpu_has_mmips) {
2809 			seg = get_fs();
2810 			if (!user_mode(regs))
2811 				set_fs(KERNEL_DS);
2812 			emulate_load_store_microMIPS(regs,
2813 				(void __user *)regs->cp0_badvaddr);
2814 			set_fs(seg);
2815 
2816 			return;
2817 		}
2818 
2819 		if (cpu_has_mips16) {
2820 			seg = get_fs();
2821 			if (!user_mode(regs))
2822 				set_fs(KERNEL_DS);
2823 			emulate_load_store_MIPS16e(regs,
2824 				(void __user *)regs->cp0_badvaddr);
2825 			set_fs(seg);
2826 
2827 			return;
2828 	}
2829 
2830 		goto sigbus;
2831 	}
2832 
2833 	if (unaligned_action == UNALIGNED_ACTION_SHOW)
2834 		show_registers(regs);
2835 	pc = (unsigned int __user *)exception_epc(regs);
2836 
2837 	seg = get_fs();
2838 	if (!user_mode(regs))
2839 		set_fs(KERNEL_DS);
2840 	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
2841 	set_fs(seg);
2842 
2843 	return;
2844 
2845 sigbus:
2846 	die_if_kernel("Kernel unaligned instruction access", regs);
2847 	force_sig(SIGBUS, current);
2848 
2849 	/*
2850 	 * XXX On return from the signal handler we should advance the epc
2851 	 */
2852 	exception_exit(prev_state);
2853 }
2854 
2855 #ifdef CONFIG_DEBUG_FS
2856 extern struct dentry *mips_debugfs_dir;
2857 static int __init debugfs_unaligned(void)
2858 {
2859 	struct dentry *d;
2860 
2861 	if (!mips_debugfs_dir)
2862 		return -ENODEV;
2863 	d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
2864 			       mips_debugfs_dir, &unaligned_instructions);
2865 	if (!d)
2866 		return -ENOMEM;
2867 	d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2868 			       mips_debugfs_dir, &unaligned_action);
2869 	if (!d)
2870 		return -ENOMEM;
2871 	return 0;
2872 }
2873 __initcall(debugfs_unaligned);
2874 #endif
2875