• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Single-step support.
4  *
5  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6  */
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 
17 extern char system_call_common[];
18 
19 #ifdef CONFIG_PPC64
20 /* Bits in SRR1 that are copied from MSR */
21 #define MSR_MASK	0xffffffff87c0ffffUL
22 #else
23 #define MSR_MASK	0x87c0ffff
24 #endif
25 
26 /* Bits in XER */
27 #define XER_SO		0x80000000U
28 #define XER_OV		0x40000000U
29 #define XER_CA		0x20000000U
30 #define XER_OV32	0x00080000U
31 #define XER_CA32	0x00040000U
32 
33 #ifdef CONFIG_PPC_FPU
34 /*
35  * Functions in ldstfp.S
36  */
37 extern void get_fpr(int rn, double *p);
38 extern void put_fpr(int rn, const double *p);
39 extern void get_vr(int rn, __vector128 *p);
40 extern void put_vr(int rn, __vector128 *p);
41 extern void load_vsrn(int vsr, const void *p);
42 extern void store_vsrn(int vsr, void *p);
43 extern void conv_sp_to_dp(const float *sp, double *dp);
44 extern void conv_dp_to_sp(const double *dp, float *sp);
45 #endif
46 
47 #ifdef __powerpc64__
48 /*
49  * Functions in quad.S
50  */
51 extern int do_lq(unsigned long ea, unsigned long *regs);
52 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
53 extern int do_lqarx(unsigned long ea, unsigned long *regs);
54 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
55 		    unsigned int *crp);
56 #endif
57 
58 #ifdef __LITTLE_ENDIAN__
59 #define IS_LE	1
60 #define IS_BE	0
61 #else
62 #define IS_LE	0
63 #define IS_BE	1
64 #endif
65 
66 /*
67  * Emulate the truncation of 64 bit values in 32-bit mode.
68  */
truncate_if_32bit(unsigned long msr,unsigned long val)69 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
70 							unsigned long val)
71 {
72 #ifdef __powerpc64__
73 	if ((msr & MSR_64BIT) == 0)
74 		val &= 0xffffffffUL;
75 #endif
76 	return val;
77 }
78 
79 /*
80  * Determine whether a conditional branch instruction would branch.
81  */
branch_taken(unsigned int instr,const struct pt_regs * regs,struct instruction_op * op)82 static nokprobe_inline int branch_taken(unsigned int instr,
83 					const struct pt_regs *regs,
84 					struct instruction_op *op)
85 {
86 	unsigned int bo = (instr >> 21) & 0x1f;
87 	unsigned int bi;
88 
89 	if ((bo & 4) == 0) {
90 		/* decrement counter */
91 		op->type |= DECCTR;
92 		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
93 			return 0;
94 	}
95 	if ((bo & 0x10) == 0) {
96 		/* check bit from CR */
97 		bi = (instr >> 16) & 0x1f;
98 		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
99 			return 0;
100 	}
101 	return 1;
102 }
103 
address_ok(struct pt_regs * regs,unsigned long ea,int nb)104 static nokprobe_inline long address_ok(struct pt_regs *regs,
105 				       unsigned long ea, int nb)
106 {
107 	if (!user_mode(regs))
108 		return 1;
109 	if (__access_ok(ea, nb, USER_DS))
110 		return 1;
111 	if (__access_ok(ea, 1, USER_DS))
112 		/* Access overlaps the end of the user region */
113 		regs->dar = USER_DS.seg;
114 	else
115 		regs->dar = ea;
116 	return 0;
117 }
118 
119 /*
120  * Calculate effective address for a D-form instruction
121  */
dform_ea(unsigned int instr,const struct pt_regs * regs)122 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
123 					      const struct pt_regs *regs)
124 {
125 	int ra;
126 	unsigned long ea;
127 
128 	ra = (instr >> 16) & 0x1f;
129 	ea = (signed short) instr;		/* sign-extend */
130 	if (ra)
131 		ea += regs->gpr[ra];
132 
133 	return ea;
134 }
135 
136 #ifdef __powerpc64__
137 /*
138  * Calculate effective address for a DS-form instruction
139  */
dsform_ea(unsigned int instr,const struct pt_regs * regs)140 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
141 					       const struct pt_regs *regs)
142 {
143 	int ra;
144 	unsigned long ea;
145 
146 	ra = (instr >> 16) & 0x1f;
147 	ea = (signed short) (instr & ~3);	/* sign-extend */
148 	if (ra)
149 		ea += regs->gpr[ra];
150 
151 	return ea;
152 }
153 
154 /*
155  * Calculate effective address for a DQ-form instruction
156  */
dqform_ea(unsigned int instr,const struct pt_regs * regs)157 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
158 					       const struct pt_regs *regs)
159 {
160 	int ra;
161 	unsigned long ea;
162 
163 	ra = (instr >> 16) & 0x1f;
164 	ea = (signed short) (instr & ~0xf);	/* sign-extend */
165 	if (ra)
166 		ea += regs->gpr[ra];
167 
168 	return ea;
169 }
170 #endif /* __powerpc64 */
171 
172 /*
173  * Calculate effective address for an X-form instruction
174  */
xform_ea(unsigned int instr,const struct pt_regs * regs)175 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
176 					      const struct pt_regs *regs)
177 {
178 	int ra, rb;
179 	unsigned long ea;
180 
181 	ra = (instr >> 16) & 0x1f;
182 	rb = (instr >> 11) & 0x1f;
183 	ea = regs->gpr[rb];
184 	if (ra)
185 		ea += regs->gpr[ra];
186 
187 	return ea;
188 }
189 
190 /*
191  * Return the largest power of 2, not greater than sizeof(unsigned long),
192  * such that x is a multiple of it.
193  */
max_align(unsigned long x)194 static nokprobe_inline unsigned long max_align(unsigned long x)
195 {
196 	x |= sizeof(unsigned long);
197 	return x & -x;		/* isolates rightmost bit */
198 }
199 
byterev_2(unsigned long x)200 static nokprobe_inline unsigned long byterev_2(unsigned long x)
201 {
202 	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
203 }
204 
byterev_4(unsigned long x)205 static nokprobe_inline unsigned long byterev_4(unsigned long x)
206 {
207 	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
208 		((x & 0xff00) << 8) | ((x & 0xff) << 24);
209 }
210 
211 #ifdef __powerpc64__
byterev_8(unsigned long x)212 static nokprobe_inline unsigned long byterev_8(unsigned long x)
213 {
214 	return (byterev_4(x) << 32) | byterev_4(x >> 32);
215 }
216 #endif
217 
do_byte_reverse(void * ptr,int nb)218 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
219 {
220 	switch (nb) {
221 	case 2:
222 		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
223 		break;
224 	case 4:
225 		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
226 		break;
227 #ifdef __powerpc64__
228 	case 8:
229 		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
230 		break;
231 	case 16: {
232 		unsigned long *up = (unsigned long *)ptr;
233 		unsigned long tmp;
234 		tmp = byterev_8(up[0]);
235 		up[0] = byterev_8(up[1]);
236 		up[1] = tmp;
237 		break;
238 	}
239 #endif
240 	default:
241 		WARN_ON_ONCE(1);
242 	}
243 }
244 
read_mem_aligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)245 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
246 					    unsigned long ea, int nb,
247 					    struct pt_regs *regs)
248 {
249 	int err = 0;
250 	unsigned long x = 0;
251 
252 	switch (nb) {
253 	case 1:
254 		err = __get_user(x, (unsigned char __user *) ea);
255 		break;
256 	case 2:
257 		err = __get_user(x, (unsigned short __user *) ea);
258 		break;
259 	case 4:
260 		err = __get_user(x, (unsigned int __user *) ea);
261 		break;
262 #ifdef __powerpc64__
263 	case 8:
264 		err = __get_user(x, (unsigned long __user *) ea);
265 		break;
266 #endif
267 	}
268 	if (!err)
269 		*dest = x;
270 	else
271 		regs->dar = ea;
272 	return err;
273 }
274 
275 /*
276  * Copy from userspace to a buffer, using the largest possible
277  * aligned accesses, up to sizeof(long).
278  */
copy_mem_in(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)279 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
280 				       struct pt_regs *regs)
281 {
282 	int err = 0;
283 	int c;
284 
285 	for (; nb > 0; nb -= c) {
286 		c = max_align(ea);
287 		if (c > nb)
288 			c = max_align(nb);
289 		switch (c) {
290 		case 1:
291 			err = __get_user(*dest, (unsigned char __user *) ea);
292 			break;
293 		case 2:
294 			err = __get_user(*(u16 *)dest,
295 					 (unsigned short __user *) ea);
296 			break;
297 		case 4:
298 			err = __get_user(*(u32 *)dest,
299 					 (unsigned int __user *) ea);
300 			break;
301 #ifdef __powerpc64__
302 		case 8:
303 			err = __get_user(*(unsigned long *)dest,
304 					 (unsigned long __user *) ea);
305 			break;
306 #endif
307 		}
308 		if (err) {
309 			regs->dar = ea;
310 			return err;
311 		}
312 		dest += c;
313 		ea += c;
314 	}
315 	return 0;
316 }
317 
read_mem_unaligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)318 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
319 					      unsigned long ea, int nb,
320 					      struct pt_regs *regs)
321 {
322 	union {
323 		unsigned long ul;
324 		u8 b[sizeof(unsigned long)];
325 	} u;
326 	int i;
327 	int err;
328 
329 	u.ul = 0;
330 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
331 	err = copy_mem_in(&u.b[i], ea, nb, regs);
332 	if (!err)
333 		*dest = u.ul;
334 	return err;
335 }
336 
337 /*
338  * Read memory at address ea for nb bytes, return 0 for success
339  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
340  * If nb < sizeof(long), the result is right-justified on BE systems.
341  */
read_mem(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)342 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
343 			      struct pt_regs *regs)
344 {
345 	if (!address_ok(regs, ea, nb))
346 		return -EFAULT;
347 	if ((ea & (nb - 1)) == 0)
348 		return read_mem_aligned(dest, ea, nb, regs);
349 	return read_mem_unaligned(dest, ea, nb, regs);
350 }
351 NOKPROBE_SYMBOL(read_mem);
352 
write_mem_aligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)353 static nokprobe_inline int write_mem_aligned(unsigned long val,
354 					     unsigned long ea, int nb,
355 					     struct pt_regs *regs)
356 {
357 	int err = 0;
358 
359 	switch (nb) {
360 	case 1:
361 		err = __put_user(val, (unsigned char __user *) ea);
362 		break;
363 	case 2:
364 		err = __put_user(val, (unsigned short __user *) ea);
365 		break;
366 	case 4:
367 		err = __put_user(val, (unsigned int __user *) ea);
368 		break;
369 #ifdef __powerpc64__
370 	case 8:
371 		err = __put_user(val, (unsigned long __user *) ea);
372 		break;
373 #endif
374 	}
375 	if (err)
376 		regs->dar = ea;
377 	return err;
378 }
379 
380 /*
381  * Copy from a buffer to userspace, using the largest possible
382  * aligned accesses, up to sizeof(long).
383  */
copy_mem_out(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)384 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
385 					struct pt_regs *regs)
386 {
387 	int err = 0;
388 	int c;
389 
390 	for (; nb > 0; nb -= c) {
391 		c = max_align(ea);
392 		if (c > nb)
393 			c = max_align(nb);
394 		switch (c) {
395 		case 1:
396 			err = __put_user(*dest, (unsigned char __user *) ea);
397 			break;
398 		case 2:
399 			err = __put_user(*(u16 *)dest,
400 					 (unsigned short __user *) ea);
401 			break;
402 		case 4:
403 			err = __put_user(*(u32 *)dest,
404 					 (unsigned int __user *) ea);
405 			break;
406 #ifdef __powerpc64__
407 		case 8:
408 			err = __put_user(*(unsigned long *)dest,
409 					 (unsigned long __user *) ea);
410 			break;
411 #endif
412 		}
413 		if (err) {
414 			regs->dar = ea;
415 			return err;
416 		}
417 		dest += c;
418 		ea += c;
419 	}
420 	return 0;
421 }
422 
write_mem_unaligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)423 static nokprobe_inline int write_mem_unaligned(unsigned long val,
424 					       unsigned long ea, int nb,
425 					       struct pt_regs *regs)
426 {
427 	union {
428 		unsigned long ul;
429 		u8 b[sizeof(unsigned long)];
430 	} u;
431 	int i;
432 
433 	u.ul = val;
434 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
435 	return copy_mem_out(&u.b[i], ea, nb, regs);
436 }
437 
438 /*
439  * Write memory at address ea for nb bytes, return 0 for success
440  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
441  */
write_mem(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)442 static int write_mem(unsigned long val, unsigned long ea, int nb,
443 			       struct pt_regs *regs)
444 {
445 	if (!address_ok(regs, ea, nb))
446 		return -EFAULT;
447 	if ((ea & (nb - 1)) == 0)
448 		return write_mem_aligned(val, ea, nb, regs);
449 	return write_mem_unaligned(val, ea, nb, regs);
450 }
451 NOKPROBE_SYMBOL(write_mem);
452 
453 #ifdef CONFIG_PPC_FPU
454 /*
455  * These access either the real FP register or the image in the
456  * thread_struct, depending on regs->msr & MSR_FP.
457  */
do_fp_load(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)458 static int do_fp_load(struct instruction_op *op, unsigned long ea,
459 		      struct pt_regs *regs, bool cross_endian)
460 {
461 	int err, rn, nb;
462 	union {
463 		int i;
464 		unsigned int u;
465 		float f;
466 		double d[2];
467 		unsigned long l[2];
468 		u8 b[2 * sizeof(double)];
469 	} u;
470 
471 	nb = GETSIZE(op->type);
472 	if (nb > sizeof(u))
473 		return -EINVAL;
474 	if (!address_ok(regs, ea, nb))
475 		return -EFAULT;
476 	rn = op->reg;
477 	err = copy_mem_in(u.b, ea, nb, regs);
478 	if (err)
479 		return err;
480 	if (unlikely(cross_endian)) {
481 		do_byte_reverse(u.b, min(nb, 8));
482 		if (nb == 16)
483 			do_byte_reverse(&u.b[8], 8);
484 	}
485 	preempt_disable();
486 	if (nb == 4) {
487 		if (op->type & FPCONV)
488 			conv_sp_to_dp(&u.f, &u.d[0]);
489 		else if (op->type & SIGNEXT)
490 			u.l[0] = u.i;
491 		else
492 			u.l[0] = u.u;
493 	}
494 	if (regs->msr & MSR_FP)
495 		put_fpr(rn, &u.d[0]);
496 	else
497 		current->thread.TS_FPR(rn) = u.l[0];
498 	if (nb == 16) {
499 		/* lfdp */
500 		rn |= 1;
501 		if (regs->msr & MSR_FP)
502 			put_fpr(rn, &u.d[1]);
503 		else
504 			current->thread.TS_FPR(rn) = u.l[1];
505 	}
506 	preempt_enable();
507 	return 0;
508 }
509 NOKPROBE_SYMBOL(do_fp_load);
510 
do_fp_store(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)511 static int do_fp_store(struct instruction_op *op, unsigned long ea,
512 		       struct pt_regs *regs, bool cross_endian)
513 {
514 	int rn, nb;
515 	union {
516 		unsigned int u;
517 		float f;
518 		double d[2];
519 		unsigned long l[2];
520 		u8 b[2 * sizeof(double)];
521 	} u;
522 
523 	nb = GETSIZE(op->type);
524 	if (nb > sizeof(u))
525 		return -EINVAL;
526 	if (!address_ok(regs, ea, nb))
527 		return -EFAULT;
528 	rn = op->reg;
529 	preempt_disable();
530 	if (regs->msr & MSR_FP)
531 		get_fpr(rn, &u.d[0]);
532 	else
533 		u.l[0] = current->thread.TS_FPR(rn);
534 	if (nb == 4) {
535 		if (op->type & FPCONV)
536 			conv_dp_to_sp(&u.d[0], &u.f);
537 		else
538 			u.u = u.l[0];
539 	}
540 	if (nb == 16) {
541 		rn |= 1;
542 		if (regs->msr & MSR_FP)
543 			get_fpr(rn, &u.d[1]);
544 		else
545 			u.l[1] = current->thread.TS_FPR(rn);
546 	}
547 	preempt_enable();
548 	if (unlikely(cross_endian)) {
549 		do_byte_reverse(u.b, min(nb, 8));
550 		if (nb == 16)
551 			do_byte_reverse(&u.b[8], 8);
552 	}
553 	return copy_mem_out(u.b, ea, nb, regs);
554 }
555 NOKPROBE_SYMBOL(do_fp_store);
556 #endif
557 
558 #ifdef CONFIG_ALTIVEC
559 /* For Altivec/VMX, no need to worry about alignment */
do_vec_load(int rn,unsigned long ea,int size,struct pt_regs * regs,bool cross_endian)560 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
561 				       int size, struct pt_regs *regs,
562 				       bool cross_endian)
563 {
564 	int err;
565 	union {
566 		__vector128 v;
567 		u8 b[sizeof(__vector128)];
568 	} u = {};
569 
570 	if (size > sizeof(u))
571 		return -EINVAL;
572 
573 	if (!address_ok(regs, ea & ~0xfUL, 16))
574 		return -EFAULT;
575 	/* align to multiple of size */
576 	ea &= ~(size - 1);
577 	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
578 	if (err)
579 		return err;
580 	if (unlikely(cross_endian))
581 		do_byte_reverse(&u.b[ea & 0xf], size);
582 	preempt_disable();
583 	if (regs->msr & MSR_VEC)
584 		put_vr(rn, &u.v);
585 	else
586 		current->thread.vr_state.vr[rn] = u.v;
587 	preempt_enable();
588 	return 0;
589 }
590 
do_vec_store(int rn,unsigned long ea,int size,struct pt_regs * regs,bool cross_endian)591 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
592 					int size, struct pt_regs *regs,
593 					bool cross_endian)
594 {
595 	union {
596 		__vector128 v;
597 		u8 b[sizeof(__vector128)];
598 	} u;
599 
600 	if (size > sizeof(u))
601 		return -EINVAL;
602 
603 	if (!address_ok(regs, ea & ~0xfUL, 16))
604 		return -EFAULT;
605 	/* align to multiple of size */
606 	ea &= ~(size - 1);
607 
608 	preempt_disable();
609 	if (regs->msr & MSR_VEC)
610 		get_vr(rn, &u.v);
611 	else
612 		u.v = current->thread.vr_state.vr[rn];
613 	preempt_enable();
614 	if (unlikely(cross_endian))
615 		do_byte_reverse(&u.b[ea & 0xf], size);
616 	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
617 }
618 #endif /* CONFIG_ALTIVEC */
619 
620 #ifdef __powerpc64__
emulate_lq(struct pt_regs * regs,unsigned long ea,int reg,bool cross_endian)621 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
622 				      int reg, bool cross_endian)
623 {
624 	int err;
625 
626 	if (!address_ok(regs, ea, 16))
627 		return -EFAULT;
628 	/* if aligned, should be atomic */
629 	if ((ea & 0xf) == 0) {
630 		err = do_lq(ea, &regs->gpr[reg]);
631 	} else {
632 		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
633 		if (!err)
634 			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
635 	}
636 	if (!err && unlikely(cross_endian))
637 		do_byte_reverse(&regs->gpr[reg], 16);
638 	return err;
639 }
640 
emulate_stq(struct pt_regs * regs,unsigned long ea,int reg,bool cross_endian)641 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
642 				       int reg, bool cross_endian)
643 {
644 	int err;
645 	unsigned long vals[2];
646 
647 	if (!address_ok(regs, ea, 16))
648 		return -EFAULT;
649 	vals[0] = regs->gpr[reg];
650 	vals[1] = regs->gpr[reg + 1];
651 	if (unlikely(cross_endian))
652 		do_byte_reverse(vals, 16);
653 
654 	/* if aligned, should be atomic */
655 	if ((ea & 0xf) == 0)
656 		return do_stq(ea, vals[0], vals[1]);
657 
658 	err = write_mem(vals[IS_LE], ea, 8, regs);
659 	if (!err)
660 		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
661 	return err;
662 }
663 #endif /* __powerpc64 */
664 
665 #ifdef CONFIG_VSX
emulate_vsx_load(struct instruction_op * op,union vsx_reg * reg,const void * mem,bool rev)666 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
667 		      const void *mem, bool rev)
668 {
669 	int size, read_size;
670 	int i, j;
671 	const unsigned int *wp;
672 	const unsigned short *hp;
673 	const unsigned char *bp;
674 
675 	size = GETSIZE(op->type);
676 	reg->d[0] = reg->d[1] = 0;
677 
678 	switch (op->element_size) {
679 	case 16:
680 		/* whole vector; lxv[x] or lxvl[l] */
681 		if (size == 0)
682 			break;
683 		memcpy(reg, mem, size);
684 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
685 			rev = !rev;
686 		if (rev)
687 			do_byte_reverse(reg, 16);
688 		break;
689 	case 8:
690 		/* scalar loads, lxvd2x, lxvdsx */
691 		read_size = (size >= 8) ? 8 : size;
692 		i = IS_LE ? 8 : 8 - read_size;
693 		memcpy(&reg->b[i], mem, read_size);
694 		if (rev)
695 			do_byte_reverse(&reg->b[i], 8);
696 		if (size < 8) {
697 			if (op->type & SIGNEXT) {
698 				/* size == 4 is the only case here */
699 				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
700 			} else if (op->vsx_flags & VSX_FPCONV) {
701 				preempt_disable();
702 				conv_sp_to_dp(&reg->fp[1 + IS_LE],
703 					      &reg->dp[IS_LE]);
704 				preempt_enable();
705 			}
706 		} else {
707 			if (size == 16) {
708 				unsigned long v = *(unsigned long *)(mem + 8);
709 				reg->d[IS_BE] = !rev ? v : byterev_8(v);
710 			} else if (op->vsx_flags & VSX_SPLAT)
711 				reg->d[IS_BE] = reg->d[IS_LE];
712 		}
713 		break;
714 	case 4:
715 		/* lxvw4x, lxvwsx */
716 		wp = mem;
717 		for (j = 0; j < size / 4; ++j) {
718 			i = IS_LE ? 3 - j : j;
719 			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
720 		}
721 		if (op->vsx_flags & VSX_SPLAT) {
722 			u32 val = reg->w[IS_LE ? 3 : 0];
723 			for (; j < 4; ++j) {
724 				i = IS_LE ? 3 - j : j;
725 				reg->w[i] = val;
726 			}
727 		}
728 		break;
729 	case 2:
730 		/* lxvh8x */
731 		hp = mem;
732 		for (j = 0; j < size / 2; ++j) {
733 			i = IS_LE ? 7 - j : j;
734 			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
735 		}
736 		break;
737 	case 1:
738 		/* lxvb16x */
739 		bp = mem;
740 		for (j = 0; j < size; ++j) {
741 			i = IS_LE ? 15 - j : j;
742 			reg->b[i] = *bp++;
743 		}
744 		break;
745 	}
746 }
747 EXPORT_SYMBOL_GPL(emulate_vsx_load);
748 NOKPROBE_SYMBOL(emulate_vsx_load);
749 
emulate_vsx_store(struct instruction_op * op,const union vsx_reg * reg,void * mem,bool rev)750 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
751 		       void *mem, bool rev)
752 {
753 	int size, write_size;
754 	int i, j;
755 	union vsx_reg buf;
756 	unsigned int *wp;
757 	unsigned short *hp;
758 	unsigned char *bp;
759 
760 	size = GETSIZE(op->type);
761 
762 	switch (op->element_size) {
763 	case 16:
764 		/* stxv, stxvx, stxvl, stxvll */
765 		if (size == 0)
766 			break;
767 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
768 			rev = !rev;
769 		if (rev) {
770 			/* reverse 16 bytes */
771 			buf.d[0] = byterev_8(reg->d[1]);
772 			buf.d[1] = byterev_8(reg->d[0]);
773 			reg = &buf;
774 		}
775 		memcpy(mem, reg, size);
776 		break;
777 	case 8:
778 		/* scalar stores, stxvd2x */
779 		write_size = (size >= 8) ? 8 : size;
780 		i = IS_LE ? 8 : 8 - write_size;
781 		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
782 			buf.d[0] = buf.d[1] = 0;
783 			preempt_disable();
784 			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
785 			preempt_enable();
786 			reg = &buf;
787 		}
788 		memcpy(mem, &reg->b[i], write_size);
789 		if (size == 16)
790 			memcpy(mem + 8, &reg->d[IS_BE], 8);
791 		if (unlikely(rev)) {
792 			do_byte_reverse(mem, write_size);
793 			if (size == 16)
794 				do_byte_reverse(mem + 8, 8);
795 		}
796 		break;
797 	case 4:
798 		/* stxvw4x */
799 		wp = mem;
800 		for (j = 0; j < size / 4; ++j) {
801 			i = IS_LE ? 3 - j : j;
802 			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
803 		}
804 		break;
805 	case 2:
806 		/* stxvh8x */
807 		hp = mem;
808 		for (j = 0; j < size / 2; ++j) {
809 			i = IS_LE ? 7 - j : j;
810 			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
811 		}
812 		break;
813 	case 1:
814 		/* stvxb16x */
815 		bp = mem;
816 		for (j = 0; j < size; ++j) {
817 			i = IS_LE ? 15 - j : j;
818 			*bp++ = reg->b[i];
819 		}
820 		break;
821 	}
822 }
823 EXPORT_SYMBOL_GPL(emulate_vsx_store);
824 NOKPROBE_SYMBOL(emulate_vsx_store);
825 
do_vsx_load(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)826 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
827 				       unsigned long ea, struct pt_regs *regs,
828 				       bool cross_endian)
829 {
830 	int reg = op->reg;
831 	u8 mem[16];
832 	union vsx_reg buf;
833 	int size = GETSIZE(op->type);
834 
835 	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
836 		return -EFAULT;
837 
838 	emulate_vsx_load(op, &buf, mem, cross_endian);
839 	preempt_disable();
840 	if (reg < 32) {
841 		/* FP regs + extensions */
842 		if (regs->msr & MSR_FP) {
843 			load_vsrn(reg, &buf);
844 		} else {
845 			current->thread.fp_state.fpr[reg][0] = buf.d[0];
846 			current->thread.fp_state.fpr[reg][1] = buf.d[1];
847 		}
848 	} else {
849 		if (regs->msr & MSR_VEC)
850 			load_vsrn(reg, &buf);
851 		else
852 			current->thread.vr_state.vr[reg - 32] = buf.v;
853 	}
854 	preempt_enable();
855 	return 0;
856 }
857 
do_vsx_store(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)858 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
859 					unsigned long ea, struct pt_regs *regs,
860 					bool cross_endian)
861 {
862 	int reg = op->reg;
863 	u8 mem[16];
864 	union vsx_reg buf;
865 	int size = GETSIZE(op->type);
866 
867 	if (!address_ok(regs, ea, size))
868 		return -EFAULT;
869 
870 	preempt_disable();
871 	if (reg < 32) {
872 		/* FP regs + extensions */
873 		if (regs->msr & MSR_FP) {
874 			store_vsrn(reg, &buf);
875 		} else {
876 			buf.d[0] = current->thread.fp_state.fpr[reg][0];
877 			buf.d[1] = current->thread.fp_state.fpr[reg][1];
878 		}
879 	} else {
880 		if (regs->msr & MSR_VEC)
881 			store_vsrn(reg, &buf);
882 		else
883 			buf.v = current->thread.vr_state.vr[reg - 32];
884 	}
885 	preempt_enable();
886 	emulate_vsx_store(op, &buf, mem, cross_endian);
887 	return  copy_mem_out(mem, ea, size, regs);
888 }
889 #endif /* CONFIG_VSX */
890 
emulate_dcbz(unsigned long ea,struct pt_regs * regs)891 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
892 {
893 	int err;
894 	unsigned long i, size;
895 
896 #ifdef __powerpc64__
897 	size = ppc64_caches.l1d.block_size;
898 	if (!(regs->msr & MSR_64BIT))
899 		ea &= 0xffffffffUL;
900 #else
901 	size = L1_CACHE_BYTES;
902 #endif
903 	ea &= ~(size - 1);
904 	if (!address_ok(regs, ea, size))
905 		return -EFAULT;
906 	for (i = 0; i < size; i += sizeof(long)) {
907 		err = __put_user(0, (unsigned long __user *) (ea + i));
908 		if (err) {
909 			regs->dar = ea;
910 			return err;
911 		}
912 	}
913 	return 0;
914 }
915 NOKPROBE_SYMBOL(emulate_dcbz);
916 
917 #define __put_user_asmx(x, addr, err, op, cr)		\
918 	__asm__ __volatile__(				\
919 		".machine push\n"			\
920 		".machine power8\n"			\
921 		"1:	" op " %2,0,%3\n"		\
922 		".machine pop\n"			\
923 		"	mfcr	%1\n"			\
924 		"2:\n"					\
925 		".section .fixup,\"ax\"\n"		\
926 		"3:	li	%0,%4\n"		\
927 		"	b	2b\n"			\
928 		".previous\n"				\
929 		EX_TABLE(1b, 3b)			\
930 		: "=r" (err), "=r" (cr)			\
931 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
932 
933 #define __get_user_asmx(x, addr, err, op)		\
934 	__asm__ __volatile__(				\
935 		".machine push\n"			\
936 		".machine power8\n"			\
937 		"1:	"op" %1,0,%2\n"			\
938 		".machine pop\n"			\
939 		"2:\n"					\
940 		".section .fixup,\"ax\"\n"		\
941 		"3:	li	%0,%3\n"		\
942 		"	b	2b\n"			\
943 		".previous\n"				\
944 		EX_TABLE(1b, 3b)			\
945 		: "=r" (err), "=r" (x)			\
946 		: "r" (addr), "i" (-EFAULT), "0" (err))
947 
948 #define __cacheop_user_asmx(addr, err, op)		\
949 	__asm__ __volatile__(				\
950 		"1:	"op" 0,%1\n"			\
951 		"2:\n"					\
952 		".section .fixup,\"ax\"\n"		\
953 		"3:	li	%0,%3\n"		\
954 		"	b	2b\n"			\
955 		".previous\n"				\
956 		EX_TABLE(1b, 3b)			\
957 		: "=r" (err)				\
958 		: "r" (addr), "i" (-EFAULT), "0" (err))
959 
set_cr0(const struct pt_regs * regs,struct instruction_op * op)960 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
961 				    struct instruction_op *op)
962 {
963 	long val = op->val;
964 
965 	op->type |= SETCC;
966 	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
967 #ifdef __powerpc64__
968 	if (!(regs->msr & MSR_64BIT))
969 		val = (int) val;
970 #endif
971 	if (val < 0)
972 		op->ccval |= 0x80000000;
973 	else if (val > 0)
974 		op->ccval |= 0x40000000;
975 	else
976 		op->ccval |= 0x20000000;
977 }
978 
set_ca32(struct instruction_op * op,bool val)979 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
980 {
981 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
982 		if (val)
983 			op->xerval |= XER_CA32;
984 		else
985 			op->xerval &= ~XER_CA32;
986 	}
987 }
988 
add_with_carry(const struct pt_regs * regs,struct instruction_op * op,int rd,unsigned long val1,unsigned long val2,unsigned long carry_in)989 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
990 				     struct instruction_op *op, int rd,
991 				     unsigned long val1, unsigned long val2,
992 				     unsigned long carry_in)
993 {
994 	unsigned long val = val1 + val2;
995 
996 	if (carry_in)
997 		++val;
998 	op->type = COMPUTE + SETREG + SETXER;
999 	op->reg = rd;
1000 	op->val = val;
1001 #ifdef __powerpc64__
1002 	if (!(regs->msr & MSR_64BIT)) {
1003 		val = (unsigned int) val;
1004 		val1 = (unsigned int) val1;
1005 	}
1006 #endif
1007 	op->xerval = regs->xer;
1008 	if (val < val1 || (carry_in && val == val1))
1009 		op->xerval |= XER_CA;
1010 	else
1011 		op->xerval &= ~XER_CA;
1012 
1013 	set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1014 			(carry_in && (unsigned int)val == (unsigned int)val1));
1015 }
1016 
do_cmp_signed(const struct pt_regs * regs,struct instruction_op * op,long v1,long v2,int crfld)1017 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1018 					  struct instruction_op *op,
1019 					  long v1, long v2, int crfld)
1020 {
1021 	unsigned int crval, shift;
1022 
1023 	op->type = COMPUTE + SETCC;
1024 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1025 	if (v1 < v2)
1026 		crval |= 8;
1027 	else if (v1 > v2)
1028 		crval |= 4;
1029 	else
1030 		crval |= 2;
1031 	shift = (7 - crfld) * 4;
1032 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1033 }
1034 
do_cmp_unsigned(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2,int crfld)1035 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1036 					    struct instruction_op *op,
1037 					    unsigned long v1,
1038 					    unsigned long v2, int crfld)
1039 {
1040 	unsigned int crval, shift;
1041 
1042 	op->type = COMPUTE + SETCC;
1043 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1044 	if (v1 < v2)
1045 		crval |= 8;
1046 	else if (v1 > v2)
1047 		crval |= 4;
1048 	else
1049 		crval |= 2;
1050 	shift = (7 - crfld) * 4;
1051 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1052 }
1053 
do_cmpb(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2)1054 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1055 				    struct instruction_op *op,
1056 				    unsigned long v1, unsigned long v2)
1057 {
1058 	unsigned long long out_val, mask;
1059 	int i;
1060 
1061 	out_val = 0;
1062 	for (i = 0; i < 8; i++) {
1063 		mask = 0xffUL << (i * 8);
1064 		if ((v1 & mask) == (v2 & mask))
1065 			out_val |= mask;
1066 	}
1067 	op->val = out_val;
1068 }
1069 
1070 /*
1071  * The size parameter is used to adjust the equivalent popcnt instruction.
1072  * popcntb = 8, popcntw = 32, popcntd = 64
1073  */
do_popcnt(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,int size)1074 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1075 				      struct instruction_op *op,
1076 				      unsigned long v1, int size)
1077 {
1078 	unsigned long long out = v1;
1079 
1080 	out -= (out >> 1) & 0x5555555555555555ULL;
1081 	out = (0x3333333333333333ULL & out) +
1082 	      (0x3333333333333333ULL & (out >> 2));
1083 	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1084 
1085 	if (size == 8) {	/* popcntb */
1086 		op->val = out;
1087 		return;
1088 	}
1089 	out += out >> 8;
1090 	out += out >> 16;
1091 	if (size == 32) {	/* popcntw */
1092 		op->val = out & 0x0000003f0000003fULL;
1093 		return;
1094 	}
1095 
1096 	out = (out + (out >> 32)) & 0x7f;
1097 	op->val = out;	/* popcntd */
1098 }
1099 
1100 #ifdef CONFIG_PPC64
do_bpermd(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2)1101 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1102 				      struct instruction_op *op,
1103 				      unsigned long v1, unsigned long v2)
1104 {
1105 	unsigned char perm, idx;
1106 	unsigned int i;
1107 
1108 	perm = 0;
1109 	for (i = 0; i < 8; i++) {
1110 		idx = (v1 >> (i * 8)) & 0xff;
1111 		if (idx < 64)
1112 			if (v2 & PPC_BIT(idx))
1113 				perm |= 1 << i;
1114 	}
1115 	op->val = perm;
1116 }
1117 #endif /* CONFIG_PPC64 */
1118 /*
1119  * The size parameter adjusts the equivalent prty instruction.
1120  * prtyw = 32, prtyd = 64
1121  */
do_prty(const struct pt_regs * regs,struct instruction_op * op,unsigned long v,int size)1122 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1123 				    struct instruction_op *op,
1124 				    unsigned long v, int size)
1125 {
1126 	unsigned long long res = v ^ (v >> 8);
1127 
1128 	res ^= res >> 16;
1129 	if (size == 32) {		/* prtyw */
1130 		op->val = res & 0x0000000100000001ULL;
1131 		return;
1132 	}
1133 
1134 	res ^= res >> 32;
1135 	op->val = res & 1;	/*prtyd */
1136 }
1137 
trap_compare(long v1,long v2)1138 static nokprobe_inline int trap_compare(long v1, long v2)
1139 {
1140 	int ret = 0;
1141 
1142 	if (v1 < v2)
1143 		ret |= 0x10;
1144 	else if (v1 > v2)
1145 		ret |= 0x08;
1146 	else
1147 		ret |= 0x04;
1148 	if ((unsigned long)v1 < (unsigned long)v2)
1149 		ret |= 0x02;
1150 	else if ((unsigned long)v1 > (unsigned long)v2)
1151 		ret |= 0x01;
1152 	return ret;
1153 }
1154 
1155 /*
1156  * Elements of 32-bit rotate and mask instructions.
1157  */
1158 #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1159 			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1160 #ifdef __powerpc64__
1161 #define MASK64_L(mb)	(~0UL >> (mb))
1162 #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1163 #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1164 #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1165 #else
1166 #define DATA32(x)	(x)
1167 #endif
1168 #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1169 
1170 /*
1171  * Decode an instruction, and return information about it in *op
1172  * without changing *regs.
1173  * Integer arithmetic and logical instructions, branches, and barrier
1174  * instructions can be emulated just using the information in *op.
1175  *
1176  * Return value is 1 if the instruction can be emulated just by
1177  * updating *regs with the information in *op, -1 if we need the
1178  * GPRs but *regs doesn't contain the full register set, or 0
1179  * otherwise.
1180  */
analyse_instr(struct instruction_op * op,const struct pt_regs * regs,unsigned int instr)1181 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1182 		  unsigned int instr)
1183 {
1184 	unsigned int opcode, ra, rb, rc, rd, spr, u;
1185 	unsigned long int imm;
1186 	unsigned long int val, val2;
1187 	unsigned int mb, me, sh;
1188 	long ival;
1189 
1190 	op->type = COMPUTE;
1191 
1192 	opcode = instr >> 26;
1193 	switch (opcode) {
1194 	case 16:	/* bc */
1195 		op->type = BRANCH;
1196 		imm = (signed short)(instr & 0xfffc);
1197 		if ((instr & 2) == 0)
1198 			imm += regs->nip;
1199 		op->val = truncate_if_32bit(regs->msr, imm);
1200 		if (instr & 1)
1201 			op->type |= SETLK;
1202 		if (branch_taken(instr, regs, op))
1203 			op->type |= BRTAKEN;
1204 		return 1;
1205 #ifdef CONFIG_PPC64
1206 	case 17:	/* sc */
1207 		if ((instr & 0xfe2) == 2)
1208 			op->type = SYSCALL;
1209 		else
1210 			op->type = UNKNOWN;
1211 		return 0;
1212 #endif
1213 	case 18:	/* b */
1214 		op->type = BRANCH | BRTAKEN;
1215 		imm = instr & 0x03fffffc;
1216 		if (imm & 0x02000000)
1217 			imm -= 0x04000000;
1218 		if ((instr & 2) == 0)
1219 			imm += regs->nip;
1220 		op->val = truncate_if_32bit(regs->msr, imm);
1221 		if (instr & 1)
1222 			op->type |= SETLK;
1223 		return 1;
1224 	case 19:
1225 		switch ((instr >> 1) & 0x3ff) {
1226 		case 0:		/* mcrf */
1227 			op->type = COMPUTE + SETCC;
1228 			rd = 7 - ((instr >> 23) & 0x7);
1229 			ra = 7 - ((instr >> 18) & 0x7);
1230 			rd *= 4;
1231 			ra *= 4;
1232 			val = (regs->ccr >> ra) & 0xf;
1233 			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1234 			return 1;
1235 
1236 		case 16:	/* bclr */
1237 		case 528:	/* bcctr */
1238 			op->type = BRANCH;
1239 			imm = (instr & 0x400)? regs->ctr: regs->link;
1240 			op->val = truncate_if_32bit(regs->msr, imm);
1241 			if (instr & 1)
1242 				op->type |= SETLK;
1243 			if (branch_taken(instr, regs, op))
1244 				op->type |= BRTAKEN;
1245 			return 1;
1246 
1247 		case 18:	/* rfid, scary */
1248 			if (regs->msr & MSR_PR)
1249 				goto priv;
1250 			op->type = RFI;
1251 			return 0;
1252 
1253 		case 150:	/* isync */
1254 			op->type = BARRIER | BARRIER_ISYNC;
1255 			return 1;
1256 
1257 		case 33:	/* crnor */
1258 		case 129:	/* crandc */
1259 		case 193:	/* crxor */
1260 		case 225:	/* crnand */
1261 		case 257:	/* crand */
1262 		case 289:	/* creqv */
1263 		case 417:	/* crorc */
1264 		case 449:	/* cror */
1265 			op->type = COMPUTE + SETCC;
1266 			ra = (instr >> 16) & 0x1f;
1267 			rb = (instr >> 11) & 0x1f;
1268 			rd = (instr >> 21) & 0x1f;
1269 			ra = (regs->ccr >> (31 - ra)) & 1;
1270 			rb = (regs->ccr >> (31 - rb)) & 1;
1271 			val = (instr >> (6 + ra * 2 + rb)) & 1;
1272 			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1273 				(val << (31 - rd));
1274 			return 1;
1275 		}
1276 		break;
1277 	case 31:
1278 		switch ((instr >> 1) & 0x3ff) {
1279 		case 598:	/* sync */
1280 			op->type = BARRIER + BARRIER_SYNC;
1281 #ifdef __powerpc64__
1282 			switch ((instr >> 21) & 3) {
1283 			case 1:		/* lwsync */
1284 				op->type = BARRIER + BARRIER_LWSYNC;
1285 				break;
1286 			case 2:		/* ptesync */
1287 				op->type = BARRIER + BARRIER_PTESYNC;
1288 				break;
1289 			}
1290 #endif
1291 			return 1;
1292 
1293 		case 854:	/* eieio */
1294 			op->type = BARRIER + BARRIER_EIEIO;
1295 			return 1;
1296 		}
1297 		break;
1298 	}
1299 
1300 	/* Following cases refer to regs->gpr[], so we need all regs */
1301 	if (!FULL_REGS(regs))
1302 		return -1;
1303 
1304 	rd = (instr >> 21) & 0x1f;
1305 	ra = (instr >> 16) & 0x1f;
1306 	rb = (instr >> 11) & 0x1f;
1307 	rc = (instr >> 6) & 0x1f;
1308 
1309 	switch (opcode) {
1310 #ifdef __powerpc64__
1311 	case 2:		/* tdi */
1312 		if (rd & trap_compare(regs->gpr[ra], (short) instr))
1313 			goto trap;
1314 		return 1;
1315 #endif
1316 	case 3:		/* twi */
1317 		if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
1318 			goto trap;
1319 		return 1;
1320 
1321 #ifdef __powerpc64__
1322 	case 4:
1323 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
1324 			return -1;
1325 
1326 		switch (instr & 0x3f) {
1327 		case 48:	/* maddhd */
1328 			asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1329 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1330 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1331 			goto compute_done;
1332 
1333 		case 49:	/* maddhdu */
1334 			asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1335 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1336 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1337 			goto compute_done;
1338 
1339 		case 51:	/* maddld */
1340 			asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1341 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1342 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1343 			goto compute_done;
1344 		}
1345 
1346 		/*
1347 		 * There are other instructions from ISA 3.0 with the same
1348 		 * primary opcode which do not have emulation support yet.
1349 		 */
1350 		return -1;
1351 #endif
1352 
1353 	case 7:		/* mulli */
1354 		op->val = regs->gpr[ra] * (short) instr;
1355 		goto compute_done;
1356 
1357 	case 8:		/* subfic */
1358 		imm = (short) instr;
1359 		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1360 		return 1;
1361 
1362 	case 10:	/* cmpli */
1363 		imm = (unsigned short) instr;
1364 		val = regs->gpr[ra];
1365 #ifdef __powerpc64__
1366 		if ((rd & 1) == 0)
1367 			val = (unsigned int) val;
1368 #endif
1369 		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1370 		return 1;
1371 
1372 	case 11:	/* cmpi */
1373 		imm = (short) instr;
1374 		val = regs->gpr[ra];
1375 #ifdef __powerpc64__
1376 		if ((rd & 1) == 0)
1377 			val = (int) val;
1378 #endif
1379 		do_cmp_signed(regs, op, val, imm, rd >> 2);
1380 		return 1;
1381 
1382 	case 12:	/* addic */
1383 		imm = (short) instr;
1384 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1385 		return 1;
1386 
1387 	case 13:	/* addic. */
1388 		imm = (short) instr;
1389 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1390 		set_cr0(regs, op);
1391 		return 1;
1392 
1393 	case 14:	/* addi */
1394 		imm = (short) instr;
1395 		if (ra)
1396 			imm += regs->gpr[ra];
1397 		op->val = imm;
1398 		goto compute_done;
1399 
1400 	case 15:	/* addis */
1401 		imm = ((short) instr) << 16;
1402 		if (ra)
1403 			imm += regs->gpr[ra];
1404 		op->val = imm;
1405 		goto compute_done;
1406 
1407 	case 19:
1408 		if (((instr >> 1) & 0x1f) == 2) {
1409 			/* addpcis */
1410 			imm = (short) (instr & 0xffc1);	/* d0 + d2 fields */
1411 			imm |= (instr >> 15) & 0x3e;	/* d1 field */
1412 			op->val = regs->nip + (imm << 16) + 4;
1413 			goto compute_done;
1414 		}
1415 		op->type = UNKNOWN;
1416 		return 0;
1417 
1418 	case 20:	/* rlwimi */
1419 		mb = (instr >> 6) & 0x1f;
1420 		me = (instr >> 1) & 0x1f;
1421 		val = DATA32(regs->gpr[rd]);
1422 		imm = MASK32(mb, me);
1423 		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1424 		goto logical_done;
1425 
1426 	case 21:	/* rlwinm */
1427 		mb = (instr >> 6) & 0x1f;
1428 		me = (instr >> 1) & 0x1f;
1429 		val = DATA32(regs->gpr[rd]);
1430 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1431 		goto logical_done;
1432 
1433 	case 23:	/* rlwnm */
1434 		mb = (instr >> 6) & 0x1f;
1435 		me = (instr >> 1) & 0x1f;
1436 		rb = regs->gpr[rb] & 0x1f;
1437 		val = DATA32(regs->gpr[rd]);
1438 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1439 		goto logical_done;
1440 
1441 	case 24:	/* ori */
1442 		op->val = regs->gpr[rd] | (unsigned short) instr;
1443 		goto logical_done_nocc;
1444 
1445 	case 25:	/* oris */
1446 		imm = (unsigned short) instr;
1447 		op->val = regs->gpr[rd] | (imm << 16);
1448 		goto logical_done_nocc;
1449 
1450 	case 26:	/* xori */
1451 		op->val = regs->gpr[rd] ^ (unsigned short) instr;
1452 		goto logical_done_nocc;
1453 
1454 	case 27:	/* xoris */
1455 		imm = (unsigned short) instr;
1456 		op->val = regs->gpr[rd] ^ (imm << 16);
1457 		goto logical_done_nocc;
1458 
1459 	case 28:	/* andi. */
1460 		op->val = regs->gpr[rd] & (unsigned short) instr;
1461 		set_cr0(regs, op);
1462 		goto logical_done_nocc;
1463 
1464 	case 29:	/* andis. */
1465 		imm = (unsigned short) instr;
1466 		op->val = regs->gpr[rd] & (imm << 16);
1467 		set_cr0(regs, op);
1468 		goto logical_done_nocc;
1469 
1470 #ifdef __powerpc64__
1471 	case 30:	/* rld* */
1472 		mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
1473 		val = regs->gpr[rd];
1474 		if ((instr & 0x10) == 0) {
1475 			sh = rb | ((instr & 2) << 4);
1476 			val = ROTATE(val, sh);
1477 			switch ((instr >> 2) & 3) {
1478 			case 0:		/* rldicl */
1479 				val &= MASK64_L(mb);
1480 				break;
1481 			case 1:		/* rldicr */
1482 				val &= MASK64_R(mb);
1483 				break;
1484 			case 2:		/* rldic */
1485 				val &= MASK64(mb, 63 - sh);
1486 				break;
1487 			case 3:		/* rldimi */
1488 				imm = MASK64(mb, 63 - sh);
1489 				val = (regs->gpr[ra] & ~imm) |
1490 					(val & imm);
1491 			}
1492 			op->val = val;
1493 			goto logical_done;
1494 		} else {
1495 			sh = regs->gpr[rb] & 0x3f;
1496 			val = ROTATE(val, sh);
1497 			switch ((instr >> 1) & 7) {
1498 			case 0:		/* rldcl */
1499 				op->val = val & MASK64_L(mb);
1500 				goto logical_done;
1501 			case 1:		/* rldcr */
1502 				op->val = val & MASK64_R(mb);
1503 				goto logical_done;
1504 			}
1505 		}
1506 #endif
1507 		op->type = UNKNOWN;	/* illegal instruction */
1508 		return 0;
1509 
1510 	case 31:
1511 		/* isel occupies 32 minor opcodes */
1512 		if (((instr >> 1) & 0x1f) == 15) {
1513 			mb = (instr >> 6) & 0x1f; /* bc field */
1514 			val = (regs->ccr >> (31 - mb)) & 1;
1515 			val2 = (ra) ? regs->gpr[ra] : 0;
1516 
1517 			op->val = (val) ? val2 : regs->gpr[rb];
1518 			goto compute_done;
1519 		}
1520 
1521 		switch ((instr >> 1) & 0x3ff) {
1522 		case 4:		/* tw */
1523 			if (rd == 0x1f ||
1524 			    (rd & trap_compare((int)regs->gpr[ra],
1525 					       (int)regs->gpr[rb])))
1526 				goto trap;
1527 			return 1;
1528 #ifdef __powerpc64__
1529 		case 68:	/* td */
1530 			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1531 				goto trap;
1532 			return 1;
1533 #endif
1534 		case 83:	/* mfmsr */
1535 			if (regs->msr & MSR_PR)
1536 				goto priv;
1537 			op->type = MFMSR;
1538 			op->reg = rd;
1539 			return 0;
1540 		case 146:	/* mtmsr */
1541 			if (regs->msr & MSR_PR)
1542 				goto priv;
1543 			op->type = MTMSR;
1544 			op->reg = rd;
1545 			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1546 			return 0;
1547 #ifdef CONFIG_PPC64
1548 		case 178:	/* mtmsrd */
1549 			if (regs->msr & MSR_PR)
1550 				goto priv;
1551 			op->type = MTMSR;
1552 			op->reg = rd;
1553 			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1554 			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1555 			imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1556 			op->val = imm;
1557 			return 0;
1558 #endif
1559 
1560 		case 19:	/* mfcr */
1561 			imm = 0xffffffffUL;
1562 			if ((instr >> 20) & 1) {
1563 				imm = 0xf0000000UL;
1564 				for (sh = 0; sh < 8; ++sh) {
1565 					if (instr & (0x80000 >> sh))
1566 						break;
1567 					imm >>= 4;
1568 				}
1569 			}
1570 			op->val = regs->ccr & imm;
1571 			goto compute_done;
1572 
1573 		case 144:	/* mtcrf */
1574 			op->type = COMPUTE + SETCC;
1575 			imm = 0xf0000000UL;
1576 			val = regs->gpr[rd];
1577 			op->ccval = regs->ccr;
1578 			for (sh = 0; sh < 8; ++sh) {
1579 				if (instr & (0x80000 >> sh))
1580 					op->ccval = (op->ccval & ~imm) |
1581 						(val & imm);
1582 				imm >>= 4;
1583 			}
1584 			return 1;
1585 
1586 		case 339:	/* mfspr */
1587 			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1588 			op->type = MFSPR;
1589 			op->reg = rd;
1590 			op->spr = spr;
1591 			if (spr == SPRN_XER || spr == SPRN_LR ||
1592 			    spr == SPRN_CTR)
1593 				return 1;
1594 			return 0;
1595 
1596 		case 467:	/* mtspr */
1597 			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1598 			op->type = MTSPR;
1599 			op->val = regs->gpr[rd];
1600 			op->spr = spr;
1601 			if (spr == SPRN_XER || spr == SPRN_LR ||
1602 			    spr == SPRN_CTR)
1603 				return 1;
1604 			return 0;
1605 
1606 /*
1607  * Compare instructions
1608  */
1609 		case 0:	/* cmp */
1610 			val = regs->gpr[ra];
1611 			val2 = regs->gpr[rb];
1612 #ifdef __powerpc64__
1613 			if ((rd & 1) == 0) {
1614 				/* word (32-bit) compare */
1615 				val = (int) val;
1616 				val2 = (int) val2;
1617 			}
1618 #endif
1619 			do_cmp_signed(regs, op, val, val2, rd >> 2);
1620 			return 1;
1621 
1622 		case 32:	/* cmpl */
1623 			val = regs->gpr[ra];
1624 			val2 = regs->gpr[rb];
1625 #ifdef __powerpc64__
1626 			if ((rd & 1) == 0) {
1627 				/* word (32-bit) compare */
1628 				val = (unsigned int) val;
1629 				val2 = (unsigned int) val2;
1630 			}
1631 #endif
1632 			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1633 			return 1;
1634 
1635 		case 508: /* cmpb */
1636 			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1637 			goto logical_done_nocc;
1638 
1639 /*
1640  * Arithmetic instructions
1641  */
1642 		case 8:	/* subfc */
1643 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1644 				       regs->gpr[rb], 1);
1645 			goto arith_done;
1646 #ifdef __powerpc64__
1647 		case 9:	/* mulhdu */
1648 			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1649 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1650 			goto arith_done;
1651 #endif
1652 		case 10:	/* addc */
1653 			add_with_carry(regs, op, rd, regs->gpr[ra],
1654 				       regs->gpr[rb], 0);
1655 			goto arith_done;
1656 
1657 		case 11:	/* mulhwu */
1658 			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1659 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1660 			goto arith_done;
1661 
1662 		case 40:	/* subf */
1663 			op->val = regs->gpr[rb] - regs->gpr[ra];
1664 			goto arith_done;
1665 #ifdef __powerpc64__
1666 		case 73:	/* mulhd */
1667 			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1668 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1669 			goto arith_done;
1670 #endif
1671 		case 75:	/* mulhw */
1672 			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1673 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1674 			goto arith_done;
1675 
1676 		case 104:	/* neg */
1677 			op->val = -regs->gpr[ra];
1678 			goto arith_done;
1679 
1680 		case 136:	/* subfe */
1681 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1682 				       regs->gpr[rb], regs->xer & XER_CA);
1683 			goto arith_done;
1684 
1685 		case 138:	/* adde */
1686 			add_with_carry(regs, op, rd, regs->gpr[ra],
1687 				       regs->gpr[rb], regs->xer & XER_CA);
1688 			goto arith_done;
1689 
1690 		case 200:	/* subfze */
1691 			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1692 				       regs->xer & XER_CA);
1693 			goto arith_done;
1694 
1695 		case 202:	/* addze */
1696 			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1697 				       regs->xer & XER_CA);
1698 			goto arith_done;
1699 
1700 		case 232:	/* subfme */
1701 			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1702 				       regs->xer & XER_CA);
1703 			goto arith_done;
1704 #ifdef __powerpc64__
1705 		case 233:	/* mulld */
1706 			op->val = regs->gpr[ra] * regs->gpr[rb];
1707 			goto arith_done;
1708 #endif
1709 		case 234:	/* addme */
1710 			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1711 				       regs->xer & XER_CA);
1712 			goto arith_done;
1713 
1714 		case 235:	/* mullw */
1715 			op->val = (long)(int) regs->gpr[ra] *
1716 				(int) regs->gpr[rb];
1717 
1718 			goto arith_done;
1719 #ifdef __powerpc64__
1720 		case 265:	/* modud */
1721 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1722 				return -1;
1723 			op->val = regs->gpr[ra] % regs->gpr[rb];
1724 			goto compute_done;
1725 #endif
1726 		case 266:	/* add */
1727 			op->val = regs->gpr[ra] + regs->gpr[rb];
1728 			goto arith_done;
1729 
1730 		case 267:	/* moduw */
1731 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1732 				return -1;
1733 			op->val = (unsigned int) regs->gpr[ra] %
1734 				(unsigned int) regs->gpr[rb];
1735 			goto compute_done;
1736 #ifdef __powerpc64__
1737 		case 457:	/* divdu */
1738 			op->val = regs->gpr[ra] / regs->gpr[rb];
1739 			goto arith_done;
1740 #endif
1741 		case 459:	/* divwu */
1742 			op->val = (unsigned int) regs->gpr[ra] /
1743 				(unsigned int) regs->gpr[rb];
1744 			goto arith_done;
1745 #ifdef __powerpc64__
1746 		case 489:	/* divd */
1747 			op->val = (long int) regs->gpr[ra] /
1748 				(long int) regs->gpr[rb];
1749 			goto arith_done;
1750 #endif
1751 		case 491:	/* divw */
1752 			op->val = (int) regs->gpr[ra] /
1753 				(int) regs->gpr[rb];
1754 			goto arith_done;
1755 
1756 		case 755:	/* darn */
1757 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1758 				return -1;
1759 			switch (ra & 0x3) {
1760 			case 0:
1761 				/* 32-bit conditioned */
1762 				asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1763 				goto compute_done;
1764 
1765 			case 1:
1766 				/* 64-bit conditioned */
1767 				asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1768 				goto compute_done;
1769 
1770 			case 2:
1771 				/* 64-bit raw */
1772 				asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1773 				goto compute_done;
1774 			}
1775 
1776 			return -1;
1777 #ifdef __powerpc64__
1778 		case 777:	/* modsd */
1779 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1780 				return -1;
1781 			op->val = (long int) regs->gpr[ra] %
1782 				(long int) regs->gpr[rb];
1783 			goto compute_done;
1784 #endif
1785 		case 779:	/* modsw */
1786 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1787 				return -1;
1788 			op->val = (int) regs->gpr[ra] %
1789 				(int) regs->gpr[rb];
1790 			goto compute_done;
1791 
1792 
1793 /*
1794  * Logical instructions
1795  */
1796 		case 26:	/* cntlzw */
1797 			val = (unsigned int) regs->gpr[rd];
1798 			op->val = ( val ? __builtin_clz(val) : 32 );
1799 			goto logical_done;
1800 #ifdef __powerpc64__
1801 		case 58:	/* cntlzd */
1802 			val = regs->gpr[rd];
1803 			op->val = ( val ? __builtin_clzl(val) : 64 );
1804 			goto logical_done;
1805 #endif
1806 		case 28:	/* and */
1807 			op->val = regs->gpr[rd] & regs->gpr[rb];
1808 			goto logical_done;
1809 
1810 		case 60:	/* andc */
1811 			op->val = regs->gpr[rd] & ~regs->gpr[rb];
1812 			goto logical_done;
1813 
1814 		case 122:	/* popcntb */
1815 			do_popcnt(regs, op, regs->gpr[rd], 8);
1816 			goto logical_done_nocc;
1817 
1818 		case 124:	/* nor */
1819 			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1820 			goto logical_done;
1821 
1822 		case 154:	/* prtyw */
1823 			do_prty(regs, op, regs->gpr[rd], 32);
1824 			goto logical_done_nocc;
1825 
1826 		case 186:	/* prtyd */
1827 			do_prty(regs, op, regs->gpr[rd], 64);
1828 			goto logical_done_nocc;
1829 #ifdef CONFIG_PPC64
1830 		case 252:	/* bpermd */
1831 			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1832 			goto logical_done_nocc;
1833 #endif
1834 		case 284:	/* xor */
1835 			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1836 			goto logical_done;
1837 
1838 		case 316:	/* xor */
1839 			op->val = regs->gpr[rd] ^ regs->gpr[rb];
1840 			goto logical_done;
1841 
1842 		case 378:	/* popcntw */
1843 			do_popcnt(regs, op, regs->gpr[rd], 32);
1844 			goto logical_done_nocc;
1845 
1846 		case 412:	/* orc */
1847 			op->val = regs->gpr[rd] | ~regs->gpr[rb];
1848 			goto logical_done;
1849 
1850 		case 444:	/* or */
1851 			op->val = regs->gpr[rd] | regs->gpr[rb];
1852 			goto logical_done;
1853 
1854 		case 476:	/* nand */
1855 			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1856 			goto logical_done;
1857 #ifdef CONFIG_PPC64
1858 		case 506:	/* popcntd */
1859 			do_popcnt(regs, op, regs->gpr[rd], 64);
1860 			goto logical_done_nocc;
1861 #endif
1862 		case 538:	/* cnttzw */
1863 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1864 				return -1;
1865 			val = (unsigned int) regs->gpr[rd];
1866 			op->val = (val ? __builtin_ctz(val) : 32);
1867 			goto logical_done;
1868 #ifdef __powerpc64__
1869 		case 570:	/* cnttzd */
1870 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1871 				return -1;
1872 			val = regs->gpr[rd];
1873 			op->val = (val ? __builtin_ctzl(val) : 64);
1874 			goto logical_done;
1875 #endif
1876 		case 922:	/* extsh */
1877 			op->val = (signed short) regs->gpr[rd];
1878 			goto logical_done;
1879 
1880 		case 954:	/* extsb */
1881 			op->val = (signed char) regs->gpr[rd];
1882 			goto logical_done;
1883 #ifdef __powerpc64__
1884 		case 986:	/* extsw */
1885 			op->val = (signed int) regs->gpr[rd];
1886 			goto logical_done;
1887 #endif
1888 
1889 /*
1890  * Shift instructions
1891  */
1892 		case 24:	/* slw */
1893 			sh = regs->gpr[rb] & 0x3f;
1894 			if (sh < 32)
1895 				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1896 			else
1897 				op->val = 0;
1898 			goto logical_done;
1899 
1900 		case 536:	/* srw */
1901 			sh = regs->gpr[rb] & 0x3f;
1902 			if (sh < 32)
1903 				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1904 			else
1905 				op->val = 0;
1906 			goto logical_done;
1907 
1908 		case 792:	/* sraw */
1909 			op->type = COMPUTE + SETREG + SETXER;
1910 			sh = regs->gpr[rb] & 0x3f;
1911 			ival = (signed int) regs->gpr[rd];
1912 			op->val = ival >> (sh < 32 ? sh : 31);
1913 			op->xerval = regs->xer;
1914 			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1915 				op->xerval |= XER_CA;
1916 			else
1917 				op->xerval &= ~XER_CA;
1918 			set_ca32(op, op->xerval & XER_CA);
1919 			goto logical_done;
1920 
1921 		case 824:	/* srawi */
1922 			op->type = COMPUTE + SETREG + SETXER;
1923 			sh = rb;
1924 			ival = (signed int) regs->gpr[rd];
1925 			op->val = ival >> sh;
1926 			op->xerval = regs->xer;
1927 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1928 				op->xerval |= XER_CA;
1929 			else
1930 				op->xerval &= ~XER_CA;
1931 			set_ca32(op, op->xerval & XER_CA);
1932 			goto logical_done;
1933 
1934 #ifdef __powerpc64__
1935 		case 27:	/* sld */
1936 			sh = regs->gpr[rb] & 0x7f;
1937 			if (sh < 64)
1938 				op->val = regs->gpr[rd] << sh;
1939 			else
1940 				op->val = 0;
1941 			goto logical_done;
1942 
1943 		case 539:	/* srd */
1944 			sh = regs->gpr[rb] & 0x7f;
1945 			if (sh < 64)
1946 				op->val = regs->gpr[rd] >> sh;
1947 			else
1948 				op->val = 0;
1949 			goto logical_done;
1950 
1951 		case 794:	/* srad */
1952 			op->type = COMPUTE + SETREG + SETXER;
1953 			sh = regs->gpr[rb] & 0x7f;
1954 			ival = (signed long int) regs->gpr[rd];
1955 			op->val = ival >> (sh < 64 ? sh : 63);
1956 			op->xerval = regs->xer;
1957 			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1958 				op->xerval |= XER_CA;
1959 			else
1960 				op->xerval &= ~XER_CA;
1961 			set_ca32(op, op->xerval & XER_CA);
1962 			goto logical_done;
1963 
1964 		case 826:	/* sradi with sh_5 = 0 */
1965 		case 827:	/* sradi with sh_5 = 1 */
1966 			op->type = COMPUTE + SETREG + SETXER;
1967 			sh = rb | ((instr & 2) << 4);
1968 			ival = (signed long int) regs->gpr[rd];
1969 			op->val = ival >> sh;
1970 			op->xerval = regs->xer;
1971 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1972 				op->xerval |= XER_CA;
1973 			else
1974 				op->xerval &= ~XER_CA;
1975 			set_ca32(op, op->xerval & XER_CA);
1976 			goto logical_done;
1977 
1978 		case 890:	/* extswsli with sh_5 = 0 */
1979 		case 891:	/* extswsli with sh_5 = 1 */
1980 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1981 				return -1;
1982 			op->type = COMPUTE + SETREG;
1983 			sh = rb | ((instr & 2) << 4);
1984 			val = (signed int) regs->gpr[rd];
1985 			if (sh)
1986 				op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
1987 			else
1988 				op->val = val;
1989 			goto logical_done;
1990 
1991 #endif /* __powerpc64__ */
1992 
1993 /*
1994  * Cache instructions
1995  */
1996 		case 54:	/* dcbst */
1997 			op->type = MKOP(CACHEOP, DCBST, 0);
1998 			op->ea = xform_ea(instr, regs);
1999 			return 0;
2000 
2001 		case 86:	/* dcbf */
2002 			op->type = MKOP(CACHEOP, DCBF, 0);
2003 			op->ea = xform_ea(instr, regs);
2004 			return 0;
2005 
2006 		case 246:	/* dcbtst */
2007 			op->type = MKOP(CACHEOP, DCBTST, 0);
2008 			op->ea = xform_ea(instr, regs);
2009 			op->reg = rd;
2010 			return 0;
2011 
2012 		case 278:	/* dcbt */
2013 			op->type = MKOP(CACHEOP, DCBTST, 0);
2014 			op->ea = xform_ea(instr, regs);
2015 			op->reg = rd;
2016 			return 0;
2017 
2018 		case 982:	/* icbi */
2019 			op->type = MKOP(CACHEOP, ICBI, 0);
2020 			op->ea = xform_ea(instr, regs);
2021 			return 0;
2022 
2023 		case 1014:	/* dcbz */
2024 			op->type = MKOP(CACHEOP, DCBZ, 0);
2025 			op->ea = xform_ea(instr, regs);
2026 			return 0;
2027 		}
2028 		break;
2029 	}
2030 
2031 /*
2032  * Loads and stores.
2033  */
2034 	op->type = UNKNOWN;
2035 	op->update_reg = ra;
2036 	op->reg = rd;
2037 	op->val = regs->gpr[rd];
2038 	u = (instr >> 20) & UPDATE;
2039 	op->vsx_flags = 0;
2040 
2041 	switch (opcode) {
2042 	case 31:
2043 		u = instr & UPDATE;
2044 		op->ea = xform_ea(instr, regs);
2045 		switch ((instr >> 1) & 0x3ff) {
2046 		case 20:	/* lwarx */
2047 			op->type = MKOP(LARX, 0, 4);
2048 			break;
2049 
2050 		case 150:	/* stwcx. */
2051 			op->type = MKOP(STCX, 0, 4);
2052 			break;
2053 
2054 #ifdef __powerpc64__
2055 		case 84:	/* ldarx */
2056 			op->type = MKOP(LARX, 0, 8);
2057 			break;
2058 
2059 		case 214:	/* stdcx. */
2060 			op->type = MKOP(STCX, 0, 8);
2061 			break;
2062 
2063 		case 52:	/* lbarx */
2064 			op->type = MKOP(LARX, 0, 1);
2065 			break;
2066 
2067 		case 694:	/* stbcx. */
2068 			op->type = MKOP(STCX, 0, 1);
2069 			break;
2070 
2071 		case 116:	/* lharx */
2072 			op->type = MKOP(LARX, 0, 2);
2073 			break;
2074 
2075 		case 726:	/* sthcx. */
2076 			op->type = MKOP(STCX, 0, 2);
2077 			break;
2078 
2079 		case 276:	/* lqarx */
2080 			if (!((rd & 1) || rd == ra || rd == rb))
2081 				op->type = MKOP(LARX, 0, 16);
2082 			break;
2083 
2084 		case 182:	/* stqcx. */
2085 			if (!(rd & 1))
2086 				op->type = MKOP(STCX, 0, 16);
2087 			break;
2088 #endif
2089 
2090 		case 23:	/* lwzx */
2091 		case 55:	/* lwzux */
2092 			op->type = MKOP(LOAD, u, 4);
2093 			break;
2094 
2095 		case 87:	/* lbzx */
2096 		case 119:	/* lbzux */
2097 			op->type = MKOP(LOAD, u, 1);
2098 			break;
2099 
2100 #ifdef CONFIG_ALTIVEC
2101 		/*
2102 		 * Note: for the load/store vector element instructions,
2103 		 * bits of the EA say which field of the VMX register to use.
2104 		 */
2105 		case 7:		/* lvebx */
2106 			op->type = MKOP(LOAD_VMX, 0, 1);
2107 			op->element_size = 1;
2108 			break;
2109 
2110 		case 39:	/* lvehx */
2111 			op->type = MKOP(LOAD_VMX, 0, 2);
2112 			op->element_size = 2;
2113 			break;
2114 
2115 		case 71:	/* lvewx */
2116 			op->type = MKOP(LOAD_VMX, 0, 4);
2117 			op->element_size = 4;
2118 			break;
2119 
2120 		case 103:	/* lvx */
2121 		case 359:	/* lvxl */
2122 			op->type = MKOP(LOAD_VMX, 0, 16);
2123 			op->element_size = 16;
2124 			break;
2125 
2126 		case 135:	/* stvebx */
2127 			op->type = MKOP(STORE_VMX, 0, 1);
2128 			op->element_size = 1;
2129 			break;
2130 
2131 		case 167:	/* stvehx */
2132 			op->type = MKOP(STORE_VMX, 0, 2);
2133 			op->element_size = 2;
2134 			break;
2135 
2136 		case 199:	/* stvewx */
2137 			op->type = MKOP(STORE_VMX, 0, 4);
2138 			op->element_size = 4;
2139 			break;
2140 
2141 		case 231:	/* stvx */
2142 		case 487:	/* stvxl */
2143 			op->type = MKOP(STORE_VMX, 0, 16);
2144 			break;
2145 #endif /* CONFIG_ALTIVEC */
2146 
2147 #ifdef __powerpc64__
2148 		case 21:	/* ldx */
2149 		case 53:	/* ldux */
2150 			op->type = MKOP(LOAD, u, 8);
2151 			break;
2152 
2153 		case 149:	/* stdx */
2154 		case 181:	/* stdux */
2155 			op->type = MKOP(STORE, u, 8);
2156 			break;
2157 #endif
2158 
2159 		case 151:	/* stwx */
2160 		case 183:	/* stwux */
2161 			op->type = MKOP(STORE, u, 4);
2162 			break;
2163 
2164 		case 215:	/* stbx */
2165 		case 247:	/* stbux */
2166 			op->type = MKOP(STORE, u, 1);
2167 			break;
2168 
2169 		case 279:	/* lhzx */
2170 		case 311:	/* lhzux */
2171 			op->type = MKOP(LOAD, u, 2);
2172 			break;
2173 
2174 #ifdef __powerpc64__
2175 		case 341:	/* lwax */
2176 		case 373:	/* lwaux */
2177 			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2178 			break;
2179 #endif
2180 
2181 		case 343:	/* lhax */
2182 		case 375:	/* lhaux */
2183 			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2184 			break;
2185 
2186 		case 407:	/* sthx */
2187 		case 439:	/* sthux */
2188 			op->type = MKOP(STORE, u, 2);
2189 			break;
2190 
2191 #ifdef __powerpc64__
2192 		case 532:	/* ldbrx */
2193 			op->type = MKOP(LOAD, BYTEREV, 8);
2194 			break;
2195 
2196 #endif
2197 		case 533:	/* lswx */
2198 			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2199 			break;
2200 
2201 		case 534:	/* lwbrx */
2202 			op->type = MKOP(LOAD, BYTEREV, 4);
2203 			break;
2204 
2205 		case 597:	/* lswi */
2206 			if (rb == 0)
2207 				rb = 32;	/* # bytes to load */
2208 			op->type = MKOP(LOAD_MULTI, 0, rb);
2209 			op->ea = ra ? regs->gpr[ra] : 0;
2210 			break;
2211 
2212 #ifdef CONFIG_PPC_FPU
2213 		case 535:	/* lfsx */
2214 		case 567:	/* lfsux */
2215 			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2216 			break;
2217 
2218 		case 599:	/* lfdx */
2219 		case 631:	/* lfdux */
2220 			op->type = MKOP(LOAD_FP, u, 8);
2221 			break;
2222 
2223 		case 663:	/* stfsx */
2224 		case 695:	/* stfsux */
2225 			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2226 			break;
2227 
2228 		case 727:	/* stfdx */
2229 		case 759:	/* stfdux */
2230 			op->type = MKOP(STORE_FP, u, 8);
2231 			break;
2232 
2233 #ifdef __powerpc64__
2234 		case 791:	/* lfdpx */
2235 			op->type = MKOP(LOAD_FP, 0, 16);
2236 			break;
2237 
2238 		case 855:	/* lfiwax */
2239 			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2240 			break;
2241 
2242 		case 887:	/* lfiwzx */
2243 			op->type = MKOP(LOAD_FP, 0, 4);
2244 			break;
2245 
2246 		case 919:	/* stfdpx */
2247 			op->type = MKOP(STORE_FP, 0, 16);
2248 			break;
2249 
2250 		case 983:	/* stfiwx */
2251 			op->type = MKOP(STORE_FP, 0, 4);
2252 			break;
2253 #endif /* __powerpc64 */
2254 #endif /* CONFIG_PPC_FPU */
2255 
2256 #ifdef __powerpc64__
2257 		case 660:	/* stdbrx */
2258 			op->type = MKOP(STORE, BYTEREV, 8);
2259 			op->val = byterev_8(regs->gpr[rd]);
2260 			break;
2261 
2262 #endif
2263 		case 661:	/* stswx */
2264 			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2265 			break;
2266 
2267 		case 662:	/* stwbrx */
2268 			op->type = MKOP(STORE, BYTEREV, 4);
2269 			op->val = byterev_4(regs->gpr[rd]);
2270 			break;
2271 
2272 		case 725:	/* stswi */
2273 			if (rb == 0)
2274 				rb = 32;	/* # bytes to store */
2275 			op->type = MKOP(STORE_MULTI, 0, rb);
2276 			op->ea = ra ? regs->gpr[ra] : 0;
2277 			break;
2278 
2279 		case 790:	/* lhbrx */
2280 			op->type = MKOP(LOAD, BYTEREV, 2);
2281 			break;
2282 
2283 		case 918:	/* sthbrx */
2284 			op->type = MKOP(STORE, BYTEREV, 2);
2285 			op->val = byterev_2(regs->gpr[rd]);
2286 			break;
2287 
2288 #ifdef CONFIG_VSX
2289 		case 12:	/* lxsiwzx */
2290 			op->reg = rd | ((instr & 1) << 5);
2291 			op->type = MKOP(LOAD_VSX, 0, 4);
2292 			op->element_size = 8;
2293 			break;
2294 
2295 		case 76:	/* lxsiwax */
2296 			op->reg = rd | ((instr & 1) << 5);
2297 			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2298 			op->element_size = 8;
2299 			break;
2300 
2301 		case 140:	/* stxsiwx */
2302 			op->reg = rd | ((instr & 1) << 5);
2303 			op->type = MKOP(STORE_VSX, 0, 4);
2304 			op->element_size = 8;
2305 			break;
2306 
2307 		case 268:	/* lxvx */
2308 			op->reg = rd | ((instr & 1) << 5);
2309 			op->type = MKOP(LOAD_VSX, 0, 16);
2310 			op->element_size = 16;
2311 			op->vsx_flags = VSX_CHECK_VEC;
2312 			break;
2313 
2314 		case 269:	/* lxvl */
2315 		case 301: {	/* lxvll */
2316 			int nb;
2317 			op->reg = rd | ((instr & 1) << 5);
2318 			op->ea = ra ? regs->gpr[ra] : 0;
2319 			nb = regs->gpr[rb] & 0xff;
2320 			if (nb > 16)
2321 				nb = 16;
2322 			op->type = MKOP(LOAD_VSX, 0, nb);
2323 			op->element_size = 16;
2324 			op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2325 				VSX_CHECK_VEC;
2326 			break;
2327 		}
2328 		case 332:	/* lxvdsx */
2329 			op->reg = rd | ((instr & 1) << 5);
2330 			op->type = MKOP(LOAD_VSX, 0, 8);
2331 			op->element_size = 8;
2332 			op->vsx_flags = VSX_SPLAT;
2333 			break;
2334 
2335 		case 364:	/* lxvwsx */
2336 			op->reg = rd | ((instr & 1) << 5);
2337 			op->type = MKOP(LOAD_VSX, 0, 4);
2338 			op->element_size = 4;
2339 			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2340 			break;
2341 
2342 		case 396:	/* stxvx */
2343 			op->reg = rd | ((instr & 1) << 5);
2344 			op->type = MKOP(STORE_VSX, 0, 16);
2345 			op->element_size = 16;
2346 			op->vsx_flags = VSX_CHECK_VEC;
2347 			break;
2348 
2349 		case 397:	/* stxvl */
2350 		case 429: {	/* stxvll */
2351 			int nb;
2352 			op->reg = rd | ((instr & 1) << 5);
2353 			op->ea = ra ? regs->gpr[ra] : 0;
2354 			nb = regs->gpr[rb] & 0xff;
2355 			if (nb > 16)
2356 				nb = 16;
2357 			op->type = MKOP(STORE_VSX, 0, nb);
2358 			op->element_size = 16;
2359 			op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2360 				VSX_CHECK_VEC;
2361 			break;
2362 		}
2363 		case 524:	/* lxsspx */
2364 			op->reg = rd | ((instr & 1) << 5);
2365 			op->type = MKOP(LOAD_VSX, 0, 4);
2366 			op->element_size = 8;
2367 			op->vsx_flags = VSX_FPCONV;
2368 			break;
2369 
2370 		case 588:	/* lxsdx */
2371 			op->reg = rd | ((instr & 1) << 5);
2372 			op->type = MKOP(LOAD_VSX, 0, 8);
2373 			op->element_size = 8;
2374 			break;
2375 
2376 		case 652:	/* stxsspx */
2377 			op->reg = rd | ((instr & 1) << 5);
2378 			op->type = MKOP(STORE_VSX, 0, 4);
2379 			op->element_size = 8;
2380 			op->vsx_flags = VSX_FPCONV;
2381 			break;
2382 
2383 		case 716:	/* stxsdx */
2384 			op->reg = rd | ((instr & 1) << 5);
2385 			op->type = MKOP(STORE_VSX, 0, 8);
2386 			op->element_size = 8;
2387 			break;
2388 
2389 		case 780:	/* lxvw4x */
2390 			op->reg = rd | ((instr & 1) << 5);
2391 			op->type = MKOP(LOAD_VSX, 0, 16);
2392 			op->element_size = 4;
2393 			break;
2394 
2395 		case 781:	/* lxsibzx */
2396 			op->reg = rd | ((instr & 1) << 5);
2397 			op->type = MKOP(LOAD_VSX, 0, 1);
2398 			op->element_size = 8;
2399 			op->vsx_flags = VSX_CHECK_VEC;
2400 			break;
2401 
2402 		case 812:	/* lxvh8x */
2403 			op->reg = rd | ((instr & 1) << 5);
2404 			op->type = MKOP(LOAD_VSX, 0, 16);
2405 			op->element_size = 2;
2406 			op->vsx_flags = VSX_CHECK_VEC;
2407 			break;
2408 
2409 		case 813:	/* lxsihzx */
2410 			op->reg = rd | ((instr & 1) << 5);
2411 			op->type = MKOP(LOAD_VSX, 0, 2);
2412 			op->element_size = 8;
2413 			op->vsx_flags = VSX_CHECK_VEC;
2414 			break;
2415 
2416 		case 844:	/* lxvd2x */
2417 			op->reg = rd | ((instr & 1) << 5);
2418 			op->type = MKOP(LOAD_VSX, 0, 16);
2419 			op->element_size = 8;
2420 			break;
2421 
2422 		case 876:	/* lxvb16x */
2423 			op->reg = rd | ((instr & 1) << 5);
2424 			op->type = MKOP(LOAD_VSX, 0, 16);
2425 			op->element_size = 1;
2426 			op->vsx_flags = VSX_CHECK_VEC;
2427 			break;
2428 
2429 		case 908:	/* stxvw4x */
2430 			op->reg = rd | ((instr & 1) << 5);
2431 			op->type = MKOP(STORE_VSX, 0, 16);
2432 			op->element_size = 4;
2433 			break;
2434 
2435 		case 909:	/* stxsibx */
2436 			op->reg = rd | ((instr & 1) << 5);
2437 			op->type = MKOP(STORE_VSX, 0, 1);
2438 			op->element_size = 8;
2439 			op->vsx_flags = VSX_CHECK_VEC;
2440 			break;
2441 
2442 		case 940:	/* stxvh8x */
2443 			op->reg = rd | ((instr & 1) << 5);
2444 			op->type = MKOP(STORE_VSX, 0, 16);
2445 			op->element_size = 2;
2446 			op->vsx_flags = VSX_CHECK_VEC;
2447 			break;
2448 
2449 		case 941:	/* stxsihx */
2450 			op->reg = rd | ((instr & 1) << 5);
2451 			op->type = MKOP(STORE_VSX, 0, 2);
2452 			op->element_size = 8;
2453 			op->vsx_flags = VSX_CHECK_VEC;
2454 			break;
2455 
2456 		case 972:	/* stxvd2x */
2457 			op->reg = rd | ((instr & 1) << 5);
2458 			op->type = MKOP(STORE_VSX, 0, 16);
2459 			op->element_size = 8;
2460 			break;
2461 
2462 		case 1004:	/* stxvb16x */
2463 			op->reg = rd | ((instr & 1) << 5);
2464 			op->type = MKOP(STORE_VSX, 0, 16);
2465 			op->element_size = 1;
2466 			op->vsx_flags = VSX_CHECK_VEC;
2467 			break;
2468 
2469 #endif /* CONFIG_VSX */
2470 		}
2471 		break;
2472 
2473 	case 32:	/* lwz */
2474 	case 33:	/* lwzu */
2475 		op->type = MKOP(LOAD, u, 4);
2476 		op->ea = dform_ea(instr, regs);
2477 		break;
2478 
2479 	case 34:	/* lbz */
2480 	case 35:	/* lbzu */
2481 		op->type = MKOP(LOAD, u, 1);
2482 		op->ea = dform_ea(instr, regs);
2483 		break;
2484 
2485 	case 36:	/* stw */
2486 	case 37:	/* stwu */
2487 		op->type = MKOP(STORE, u, 4);
2488 		op->ea = dform_ea(instr, regs);
2489 		break;
2490 
2491 	case 38:	/* stb */
2492 	case 39:	/* stbu */
2493 		op->type = MKOP(STORE, u, 1);
2494 		op->ea = dform_ea(instr, regs);
2495 		break;
2496 
2497 	case 40:	/* lhz */
2498 	case 41:	/* lhzu */
2499 		op->type = MKOP(LOAD, u, 2);
2500 		op->ea = dform_ea(instr, regs);
2501 		break;
2502 
2503 	case 42:	/* lha */
2504 	case 43:	/* lhau */
2505 		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2506 		op->ea = dform_ea(instr, regs);
2507 		break;
2508 
2509 	case 44:	/* sth */
2510 	case 45:	/* sthu */
2511 		op->type = MKOP(STORE, u, 2);
2512 		op->ea = dform_ea(instr, regs);
2513 		break;
2514 
2515 	case 46:	/* lmw */
2516 		if (ra >= rd)
2517 			break;		/* invalid form, ra in range to load */
2518 		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2519 		op->ea = dform_ea(instr, regs);
2520 		break;
2521 
2522 	case 47:	/* stmw */
2523 		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2524 		op->ea = dform_ea(instr, regs);
2525 		break;
2526 
2527 #ifdef CONFIG_PPC_FPU
2528 	case 48:	/* lfs */
2529 	case 49:	/* lfsu */
2530 		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2531 		op->ea = dform_ea(instr, regs);
2532 		break;
2533 
2534 	case 50:	/* lfd */
2535 	case 51:	/* lfdu */
2536 		op->type = MKOP(LOAD_FP, u, 8);
2537 		op->ea = dform_ea(instr, regs);
2538 		break;
2539 
2540 	case 52:	/* stfs */
2541 	case 53:	/* stfsu */
2542 		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2543 		op->ea = dform_ea(instr, regs);
2544 		break;
2545 
2546 	case 54:	/* stfd */
2547 	case 55:	/* stfdu */
2548 		op->type = MKOP(STORE_FP, u, 8);
2549 		op->ea = dform_ea(instr, regs);
2550 		break;
2551 #endif
2552 
2553 #ifdef __powerpc64__
2554 	case 56:	/* lq */
2555 		if (!((rd & 1) || (rd == ra)))
2556 			op->type = MKOP(LOAD, 0, 16);
2557 		op->ea = dqform_ea(instr, regs);
2558 		break;
2559 #endif
2560 
2561 #ifdef CONFIG_VSX
2562 	case 57:	/* lfdp, lxsd, lxssp */
2563 		op->ea = dsform_ea(instr, regs);
2564 		switch (instr & 3) {
2565 		case 0:		/* lfdp */
2566 			if (rd & 1)
2567 				break;		/* reg must be even */
2568 			op->type = MKOP(LOAD_FP, 0, 16);
2569 			break;
2570 		case 2:		/* lxsd */
2571 			op->reg = rd + 32;
2572 			op->type = MKOP(LOAD_VSX, 0, 8);
2573 			op->element_size = 8;
2574 			op->vsx_flags = VSX_CHECK_VEC;
2575 			break;
2576 		case 3:		/* lxssp */
2577 			op->reg = rd + 32;
2578 			op->type = MKOP(LOAD_VSX, 0, 4);
2579 			op->element_size = 8;
2580 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2581 			break;
2582 		}
2583 		break;
2584 #endif /* CONFIG_VSX */
2585 
2586 #ifdef __powerpc64__
2587 	case 58:	/* ld[u], lwa */
2588 		op->ea = dsform_ea(instr, regs);
2589 		switch (instr & 3) {
2590 		case 0:		/* ld */
2591 			op->type = MKOP(LOAD, 0, 8);
2592 			break;
2593 		case 1:		/* ldu */
2594 			op->type = MKOP(LOAD, UPDATE, 8);
2595 			break;
2596 		case 2:		/* lwa */
2597 			op->type = MKOP(LOAD, SIGNEXT, 4);
2598 			break;
2599 		}
2600 		break;
2601 #endif
2602 
2603 #ifdef CONFIG_VSX
2604 	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2605 		switch (instr & 7) {
2606 		case 0:		/* stfdp with LSB of DS field = 0 */
2607 		case 4:		/* stfdp with LSB of DS field = 1 */
2608 			op->ea = dsform_ea(instr, regs);
2609 			op->type = MKOP(STORE_FP, 0, 16);
2610 			break;
2611 
2612 		case 1:		/* lxv */
2613 			op->ea = dqform_ea(instr, regs);
2614 			if (instr & 8)
2615 				op->reg = rd + 32;
2616 			op->type = MKOP(LOAD_VSX, 0, 16);
2617 			op->element_size = 16;
2618 			op->vsx_flags = VSX_CHECK_VEC;
2619 			break;
2620 
2621 		case 2:		/* stxsd with LSB of DS field = 0 */
2622 		case 6:		/* stxsd with LSB of DS field = 1 */
2623 			op->ea = dsform_ea(instr, regs);
2624 			op->reg = rd + 32;
2625 			op->type = MKOP(STORE_VSX, 0, 8);
2626 			op->element_size = 8;
2627 			op->vsx_flags = VSX_CHECK_VEC;
2628 			break;
2629 
2630 		case 3:		/* stxssp with LSB of DS field = 0 */
2631 		case 7:		/* stxssp with LSB of DS field = 1 */
2632 			op->ea = dsform_ea(instr, regs);
2633 			op->reg = rd + 32;
2634 			op->type = MKOP(STORE_VSX, 0, 4);
2635 			op->element_size = 8;
2636 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2637 			break;
2638 
2639 		case 5:		/* stxv */
2640 			op->ea = dqform_ea(instr, regs);
2641 			if (instr & 8)
2642 				op->reg = rd + 32;
2643 			op->type = MKOP(STORE_VSX, 0, 16);
2644 			op->element_size = 16;
2645 			op->vsx_flags = VSX_CHECK_VEC;
2646 			break;
2647 		}
2648 		break;
2649 #endif /* CONFIG_VSX */
2650 
2651 #ifdef __powerpc64__
2652 	case 62:	/* std[u] */
2653 		op->ea = dsform_ea(instr, regs);
2654 		switch (instr & 3) {
2655 		case 0:		/* std */
2656 			op->type = MKOP(STORE, 0, 8);
2657 			break;
2658 		case 1:		/* stdu */
2659 			op->type = MKOP(STORE, UPDATE, 8);
2660 			break;
2661 		case 2:		/* stq */
2662 			if (!(rd & 1))
2663 				op->type = MKOP(STORE, 0, 16);
2664 			break;
2665 		}
2666 		break;
2667 #endif /* __powerpc64__ */
2668 
2669 	}
2670 
2671 #ifdef CONFIG_VSX
2672 	if ((GETTYPE(op->type) == LOAD_VSX ||
2673 	     GETTYPE(op->type) == STORE_VSX) &&
2674 	    !cpu_has_feature(CPU_FTR_VSX)) {
2675 		return -1;
2676 	}
2677 #endif /* CONFIG_VSX */
2678 
2679 	return 0;
2680 
2681  logical_done:
2682 	if (instr & 1)
2683 		set_cr0(regs, op);
2684  logical_done_nocc:
2685 	op->reg = ra;
2686 	op->type |= SETREG;
2687 	return 1;
2688 
2689  arith_done:
2690 	if (instr & 1)
2691 		set_cr0(regs, op);
2692  compute_done:
2693 	op->reg = rd;
2694 	op->type |= SETREG;
2695 	return 1;
2696 
2697  priv:
2698 	op->type = INTERRUPT | 0x700;
2699 	op->val = SRR1_PROGPRIV;
2700 	return 0;
2701 
2702  trap:
2703 	op->type = INTERRUPT | 0x700;
2704 	op->val = SRR1_PROGTRAP;
2705 	return 0;
2706 }
2707 EXPORT_SYMBOL_GPL(analyse_instr);
2708 NOKPROBE_SYMBOL(analyse_instr);
2709 
2710 /*
2711  * For PPC32 we always use stwu with r1 to change the stack pointer.
2712  * So this emulated store may corrupt the exception frame, now we
2713  * have to provide the exception frame trampoline, which is pushed
2714  * below the kprobed function stack. So we only update gpr[1] but
2715  * don't emulate the real store operation. We will do real store
2716  * operation safely in exception return code by checking this flag.
2717  */
handle_stack_update(unsigned long ea,struct pt_regs * regs)2718 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2719 {
2720 #ifdef CONFIG_PPC32
2721 	/*
2722 	 * Check if we will touch kernel stack overflow
2723 	 */
2724 	if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2725 		printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2726 		return -EINVAL;
2727 	}
2728 #endif /* CONFIG_PPC32 */
2729 	/*
2730 	 * Check if we already set since that means we'll
2731 	 * lose the previous value.
2732 	 */
2733 	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2734 	set_thread_flag(TIF_EMULATE_STACK_STORE);
2735 	return 0;
2736 }
2737 
do_signext(unsigned long * valp,int size)2738 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2739 {
2740 	switch (size) {
2741 	case 2:
2742 		*valp = (signed short) *valp;
2743 		break;
2744 	case 4:
2745 		*valp = (signed int) *valp;
2746 		break;
2747 	}
2748 }
2749 
do_byterev(unsigned long * valp,int size)2750 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2751 {
2752 	switch (size) {
2753 	case 2:
2754 		*valp = byterev_2(*valp);
2755 		break;
2756 	case 4:
2757 		*valp = byterev_4(*valp);
2758 		break;
2759 #ifdef __powerpc64__
2760 	case 8:
2761 		*valp = byterev_8(*valp);
2762 		break;
2763 #endif
2764 	}
2765 }
2766 
2767 /*
2768  * Emulate an instruction that can be executed just by updating
2769  * fields in *regs.
2770  */
emulate_update_regs(struct pt_regs * regs,struct instruction_op * op)2771 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2772 {
2773 	unsigned long next_pc;
2774 
2775 	next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
2776 	switch (GETTYPE(op->type)) {
2777 	case COMPUTE:
2778 		if (op->type & SETREG)
2779 			regs->gpr[op->reg] = op->val;
2780 		if (op->type & SETCC)
2781 			regs->ccr = op->ccval;
2782 		if (op->type & SETXER)
2783 			regs->xer = op->xerval;
2784 		break;
2785 
2786 	case BRANCH:
2787 		if (op->type & SETLK)
2788 			regs->link = next_pc;
2789 		if (op->type & BRTAKEN)
2790 			next_pc = op->val;
2791 		if (op->type & DECCTR)
2792 			--regs->ctr;
2793 		break;
2794 
2795 	case BARRIER:
2796 		switch (op->type & BARRIER_MASK) {
2797 		case BARRIER_SYNC:
2798 			mb();
2799 			break;
2800 		case BARRIER_ISYNC:
2801 			isync();
2802 			break;
2803 		case BARRIER_EIEIO:
2804 			eieio();
2805 			break;
2806 #ifdef CONFIG_PPC64
2807 		case BARRIER_LWSYNC:
2808 			asm volatile("lwsync" : : : "memory");
2809 			break;
2810 		case BARRIER_PTESYNC:
2811 			asm volatile("ptesync" : : : "memory");
2812 			break;
2813 #endif
2814 		}
2815 		break;
2816 
2817 	case MFSPR:
2818 		switch (op->spr) {
2819 		case SPRN_XER:
2820 			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
2821 			break;
2822 		case SPRN_LR:
2823 			regs->gpr[op->reg] = regs->link;
2824 			break;
2825 		case SPRN_CTR:
2826 			regs->gpr[op->reg] = regs->ctr;
2827 			break;
2828 		default:
2829 			WARN_ON_ONCE(1);
2830 		}
2831 		break;
2832 
2833 	case MTSPR:
2834 		switch (op->spr) {
2835 		case SPRN_XER:
2836 			regs->xer = op->val & 0xffffffffUL;
2837 			break;
2838 		case SPRN_LR:
2839 			regs->link = op->val;
2840 			break;
2841 		case SPRN_CTR:
2842 			regs->ctr = op->val;
2843 			break;
2844 		default:
2845 			WARN_ON_ONCE(1);
2846 		}
2847 		break;
2848 
2849 	default:
2850 		WARN_ON_ONCE(1);
2851 	}
2852 	regs->nip = next_pc;
2853 }
2854 NOKPROBE_SYMBOL(emulate_update_regs);
2855 
2856 /*
2857  * Emulate a previously-analysed load or store instruction.
2858  * Return values are:
2859  * 0 = instruction emulated successfully
2860  * -EFAULT = address out of range or access faulted (regs->dar
2861  *	     contains the faulting address)
2862  * -EACCES = misaligned access, instruction requires alignment
2863  * -EINVAL = unknown operation in *op
2864  */
emulate_loadstore(struct pt_regs * regs,struct instruction_op * op)2865 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
2866 {
2867 	int err, size, type;
2868 	int i, rd, nb;
2869 	unsigned int cr;
2870 	unsigned long val;
2871 	unsigned long ea;
2872 	bool cross_endian;
2873 
2874 	err = 0;
2875 	size = GETSIZE(op->type);
2876 	type = GETTYPE(op->type);
2877 	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
2878 	ea = truncate_if_32bit(regs->msr, op->ea);
2879 
2880 	switch (type) {
2881 	case LARX:
2882 		if (ea & (size - 1))
2883 			return -EACCES;		/* can't handle misaligned */
2884 		if (!address_ok(regs, ea, size))
2885 			return -EFAULT;
2886 		err = 0;
2887 		val = 0;
2888 		switch (size) {
2889 #ifdef __powerpc64__
2890 		case 1:
2891 			__get_user_asmx(val, ea, err, "lbarx");
2892 			break;
2893 		case 2:
2894 			__get_user_asmx(val, ea, err, "lharx");
2895 			break;
2896 #endif
2897 		case 4:
2898 			__get_user_asmx(val, ea, err, "lwarx");
2899 			break;
2900 #ifdef __powerpc64__
2901 		case 8:
2902 			__get_user_asmx(val, ea, err, "ldarx");
2903 			break;
2904 		case 16:
2905 			err = do_lqarx(ea, &regs->gpr[op->reg]);
2906 			break;
2907 #endif
2908 		default:
2909 			return -EINVAL;
2910 		}
2911 		if (err) {
2912 			regs->dar = ea;
2913 			break;
2914 		}
2915 		if (size < 16)
2916 			regs->gpr[op->reg] = val;
2917 		break;
2918 
2919 	case STCX:
2920 		if (ea & (size - 1))
2921 			return -EACCES;		/* can't handle misaligned */
2922 		if (!address_ok(regs, ea, size))
2923 			return -EFAULT;
2924 		err = 0;
2925 		switch (size) {
2926 #ifdef __powerpc64__
2927 		case 1:
2928 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
2929 			break;
2930 		case 2:
2931 			__put_user_asmx(op->val, ea, err, "sthcx.", cr);
2932 			break;
2933 #endif
2934 		case 4:
2935 			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
2936 			break;
2937 #ifdef __powerpc64__
2938 		case 8:
2939 			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
2940 			break;
2941 		case 16:
2942 			err = do_stqcx(ea, regs->gpr[op->reg],
2943 				       regs->gpr[op->reg + 1], &cr);
2944 			break;
2945 #endif
2946 		default:
2947 			return -EINVAL;
2948 		}
2949 		if (!err)
2950 			regs->ccr = (regs->ccr & 0x0fffffff) |
2951 				(cr & 0xe0000000) |
2952 				((regs->xer >> 3) & 0x10000000);
2953 		else
2954 			regs->dar = ea;
2955 		break;
2956 
2957 	case LOAD:
2958 #ifdef __powerpc64__
2959 		if (size == 16) {
2960 			err = emulate_lq(regs, ea, op->reg, cross_endian);
2961 			break;
2962 		}
2963 #endif
2964 		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
2965 		if (!err) {
2966 			if (op->type & SIGNEXT)
2967 				do_signext(&regs->gpr[op->reg], size);
2968 			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
2969 				do_byterev(&regs->gpr[op->reg], size);
2970 		}
2971 		break;
2972 
2973 #ifdef CONFIG_PPC_FPU
2974 	case LOAD_FP:
2975 		/*
2976 		 * If the instruction is in userspace, we can emulate it even
2977 		 * if the VMX state is not live, because we have the state
2978 		 * stored in the thread_struct.  If the instruction is in
2979 		 * the kernel, we must not touch the state in the thread_struct.
2980 		 */
2981 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2982 			return 0;
2983 		err = do_fp_load(op, ea, regs, cross_endian);
2984 		break;
2985 #endif
2986 #ifdef CONFIG_ALTIVEC
2987 	case LOAD_VMX:
2988 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2989 			return 0;
2990 		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
2991 		break;
2992 #endif
2993 #ifdef CONFIG_VSX
2994 	case LOAD_VSX: {
2995 		unsigned long msrbit = MSR_VSX;
2996 
2997 		/*
2998 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2999 		 * when the target of the instruction is a vector register.
3000 		 */
3001 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3002 			msrbit = MSR_VEC;
3003 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3004 			return 0;
3005 		err = do_vsx_load(op, ea, regs, cross_endian);
3006 		break;
3007 	}
3008 #endif
3009 	case LOAD_MULTI:
3010 		if (!address_ok(regs, ea, size))
3011 			return -EFAULT;
3012 		rd = op->reg;
3013 		for (i = 0; i < size; i += 4) {
3014 			unsigned int v32 = 0;
3015 
3016 			nb = size - i;
3017 			if (nb > 4)
3018 				nb = 4;
3019 			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3020 			if (err)
3021 				break;
3022 			if (unlikely(cross_endian))
3023 				v32 = byterev_4(v32);
3024 			regs->gpr[rd] = v32;
3025 			ea += 4;
3026 			/* reg number wraps from 31 to 0 for lsw[ix] */
3027 			rd = (rd + 1) & 0x1f;
3028 		}
3029 		break;
3030 
3031 	case STORE:
3032 #ifdef __powerpc64__
3033 		if (size == 16) {
3034 			err = emulate_stq(regs, ea, op->reg, cross_endian);
3035 			break;
3036 		}
3037 #endif
3038 		if ((op->type & UPDATE) && size == sizeof(long) &&
3039 		    op->reg == 1 && op->update_reg == 1 &&
3040 		    !(regs->msr & MSR_PR) &&
3041 		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3042 			err = handle_stack_update(ea, regs);
3043 			break;
3044 		}
3045 		if (unlikely(cross_endian))
3046 			do_byterev(&op->val, size);
3047 		err = write_mem(op->val, ea, size, regs);
3048 		break;
3049 
3050 #ifdef CONFIG_PPC_FPU
3051 	case STORE_FP:
3052 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3053 			return 0;
3054 		err = do_fp_store(op, ea, regs, cross_endian);
3055 		break;
3056 #endif
3057 #ifdef CONFIG_ALTIVEC
3058 	case STORE_VMX:
3059 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3060 			return 0;
3061 		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3062 		break;
3063 #endif
3064 #ifdef CONFIG_VSX
3065 	case STORE_VSX: {
3066 		unsigned long msrbit = MSR_VSX;
3067 
3068 		/*
3069 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3070 		 * when the target of the instruction is a vector register.
3071 		 */
3072 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3073 			msrbit = MSR_VEC;
3074 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3075 			return 0;
3076 		err = do_vsx_store(op, ea, regs, cross_endian);
3077 		break;
3078 	}
3079 #endif
3080 	case STORE_MULTI:
3081 		if (!address_ok(regs, ea, size))
3082 			return -EFAULT;
3083 		rd = op->reg;
3084 		for (i = 0; i < size; i += 4) {
3085 			unsigned int v32 = regs->gpr[rd];
3086 
3087 			nb = size - i;
3088 			if (nb > 4)
3089 				nb = 4;
3090 			if (unlikely(cross_endian))
3091 				v32 = byterev_4(v32);
3092 			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3093 			if (err)
3094 				break;
3095 			ea += 4;
3096 			/* reg number wraps from 31 to 0 for stsw[ix] */
3097 			rd = (rd + 1) & 0x1f;
3098 		}
3099 		break;
3100 
3101 	default:
3102 		return -EINVAL;
3103 	}
3104 
3105 	if (err)
3106 		return err;
3107 
3108 	if (op->type & UPDATE)
3109 		regs->gpr[op->update_reg] = op->ea;
3110 
3111 	return 0;
3112 }
3113 NOKPROBE_SYMBOL(emulate_loadstore);
3114 
3115 /*
3116  * Emulate instructions that cause a transfer of control,
3117  * loads and stores, and a few other instructions.
3118  * Returns 1 if the step was emulated, 0 if not,
3119  * or -1 if the instruction is one that should not be stepped,
3120  * such as an rfid, or a mtmsrd that would clear MSR_RI.
3121  */
emulate_step(struct pt_regs * regs,unsigned int instr)3122 int emulate_step(struct pt_regs *regs, unsigned int instr)
3123 {
3124 	struct instruction_op op;
3125 	int r, err, type;
3126 	unsigned long val;
3127 	unsigned long ea;
3128 
3129 	r = analyse_instr(&op, regs, instr);
3130 	if (r < 0)
3131 		return r;
3132 	if (r > 0) {
3133 		emulate_update_regs(regs, &op);
3134 		return 1;
3135 	}
3136 
3137 	err = 0;
3138 	type = GETTYPE(op.type);
3139 
3140 	if (OP_IS_LOAD_STORE(type)) {
3141 		err = emulate_loadstore(regs, &op);
3142 		if (err)
3143 			return 0;
3144 		goto instr_done;
3145 	}
3146 
3147 	switch (type) {
3148 	case CACHEOP:
3149 		ea = truncate_if_32bit(regs->msr, op.ea);
3150 		if (!address_ok(regs, ea, 8))
3151 			return 0;
3152 		switch (op.type & CACHEOP_MASK) {
3153 		case DCBST:
3154 			__cacheop_user_asmx(ea, err, "dcbst");
3155 			break;
3156 		case DCBF:
3157 			__cacheop_user_asmx(ea, err, "dcbf");
3158 			break;
3159 		case DCBTST:
3160 			if (op.reg == 0)
3161 				prefetchw((void *) ea);
3162 			break;
3163 		case DCBT:
3164 			if (op.reg == 0)
3165 				prefetch((void *) ea);
3166 			break;
3167 		case ICBI:
3168 			__cacheop_user_asmx(ea, err, "icbi");
3169 			break;
3170 		case DCBZ:
3171 			err = emulate_dcbz(ea, regs);
3172 			break;
3173 		}
3174 		if (err) {
3175 			regs->dar = ea;
3176 			return 0;
3177 		}
3178 		goto instr_done;
3179 
3180 	case MFMSR:
3181 		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3182 		goto instr_done;
3183 
3184 	case MTMSR:
3185 		val = regs->gpr[op.reg];
3186 		if ((val & MSR_RI) == 0)
3187 			/* can't step mtmsr[d] that would clear MSR_RI */
3188 			return -1;
3189 		/* here op.val is the mask of bits to change */
3190 		regs->msr = (regs->msr & ~op.val) | (val & op.val);
3191 		goto instr_done;
3192 
3193 #ifdef CONFIG_PPC64
3194 	case SYSCALL:	/* sc */
3195 		/*
3196 		 * N.B. this uses knowledge about how the syscall
3197 		 * entry code works.  If that is changed, this will
3198 		 * need to be changed also.
3199 		 */
3200 		if (regs->gpr[0] == 0x1ebe &&
3201 		    cpu_has_feature(CPU_FTR_REAL_LE)) {
3202 			regs->msr ^= MSR_LE;
3203 			goto instr_done;
3204 		}
3205 		regs->gpr[9] = regs->gpr[13];
3206 		regs->gpr[10] = MSR_KERNEL;
3207 		regs->gpr[11] = regs->nip + 4;
3208 		regs->gpr[12] = regs->msr & MSR_MASK;
3209 		regs->gpr[13] = (unsigned long) get_paca();
3210 		regs->nip = (unsigned long) &system_call_common;
3211 		regs->msr = MSR_KERNEL;
3212 		return 1;
3213 
3214 	case RFI:
3215 		return -1;
3216 #endif
3217 	}
3218 	return 0;
3219 
3220  instr_done:
3221 	regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
3222 	return 1;
3223 }
3224 NOKPROBE_SYMBOL(emulate_step);
3225