• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Single-step support.
4   *
5   * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6   */
7  #include <linux/kernel.h>
8  #include <linux/kprobes.h>
9  #include <linux/ptrace.h>
10  #include <linux/prefetch.h>
11  #include <asm/sstep.h>
12  #include <asm/processor.h>
13  #include <linux/uaccess.h>
14  #include <asm/cpu_has_feature.h>
15  #include <asm/cputable.h>
16  #include <asm/disassemble.h>
17  
18  extern char system_call_common[];
19  extern char system_call_vectored_emulate[];
20  
21  #ifdef CONFIG_PPC64
22  /* Bits in SRR1 that are copied from MSR */
23  #define MSR_MASK	0xffffffff87c0ffffUL
24  #else
25  #define MSR_MASK	0x87c0ffff
26  #endif
27  
28  /* Bits in XER */
29  #define XER_SO		0x80000000U
30  #define XER_OV		0x40000000U
31  #define XER_CA		0x20000000U
32  #define XER_OV32	0x00080000U
33  #define XER_CA32	0x00040000U
34  
35  #ifdef CONFIG_VSX
36  #define VSX_REGISTER_XTP(rd)   ((((rd) & 1) << 5) | ((rd) & 0xfe))
37  #endif
38  
39  #ifdef CONFIG_PPC_FPU
40  /*
41   * Functions in ldstfp.S
42   */
43  extern void get_fpr(int rn, double *p);
44  extern void put_fpr(int rn, const double *p);
45  extern void get_vr(int rn, __vector128 *p);
46  extern void put_vr(int rn, __vector128 *p);
47  extern void load_vsrn(int vsr, const void *p);
48  extern void store_vsrn(int vsr, void *p);
49  extern void conv_sp_to_dp(const float *sp, double *dp);
50  extern void conv_dp_to_sp(const double *dp, float *sp);
51  #endif
52  
53  #ifdef __powerpc64__
54  /*
55   * Functions in quad.S
56   */
57  extern int do_lq(unsigned long ea, unsigned long *regs);
58  extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
59  extern int do_lqarx(unsigned long ea, unsigned long *regs);
60  extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
61  		    unsigned int *crp);
62  #endif
63  
64  #ifdef __LITTLE_ENDIAN__
65  #define IS_LE	1
66  #define IS_BE	0
67  #else
68  #define IS_LE	0
69  #define IS_BE	1
70  #endif
71  
72  /*
73   * Emulate the truncation of 64 bit values in 32-bit mode.
74   */
truncate_if_32bit(unsigned long msr,unsigned long val)75  static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
76  							unsigned long val)
77  {
78  #ifdef __powerpc64__
79  	if ((msr & MSR_64BIT) == 0)
80  		val &= 0xffffffffUL;
81  #endif
82  	return val;
83  }
84  
85  /*
86   * Determine whether a conditional branch instruction would branch.
87   */
branch_taken(unsigned int instr,const struct pt_regs * regs,struct instruction_op * op)88  static nokprobe_inline int branch_taken(unsigned int instr,
89  					const struct pt_regs *regs,
90  					struct instruction_op *op)
91  {
92  	unsigned int bo = (instr >> 21) & 0x1f;
93  	unsigned int bi;
94  
95  	if ((bo & 4) == 0) {
96  		/* decrement counter */
97  		op->type |= DECCTR;
98  		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
99  			return 0;
100  	}
101  	if ((bo & 0x10) == 0) {
102  		/* check bit from CR */
103  		bi = (instr >> 16) & 0x1f;
104  		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
105  			return 0;
106  	}
107  	return 1;
108  }
109  
address_ok(struct pt_regs * regs,unsigned long ea,int nb)110  static nokprobe_inline long address_ok(struct pt_regs *regs,
111  				       unsigned long ea, int nb)
112  {
113  	if (!user_mode(regs))
114  		return 1;
115  	if (access_ok((void __user *)ea, nb))
116  		return 1;
117  	if (access_ok((void __user *)ea, 1))
118  		/* Access overlaps the end of the user region */
119  		regs->dar = TASK_SIZE_MAX - 1;
120  	else
121  		regs->dar = ea;
122  	return 0;
123  }
124  
125  /*
126   * Calculate effective address for a D-form instruction
127   */
dform_ea(unsigned int instr,const struct pt_regs * regs)128  static nokprobe_inline unsigned long dform_ea(unsigned int instr,
129  					      const struct pt_regs *regs)
130  {
131  	int ra;
132  	unsigned long ea;
133  
134  	ra = (instr >> 16) & 0x1f;
135  	ea = (signed short) instr;		/* sign-extend */
136  	if (ra)
137  		ea += regs->gpr[ra];
138  
139  	return ea;
140  }
141  
142  #ifdef __powerpc64__
143  /*
144   * Calculate effective address for a DS-form instruction
145   */
dsform_ea(unsigned int instr,const struct pt_regs * regs)146  static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
147  					       const struct pt_regs *regs)
148  {
149  	int ra;
150  	unsigned long ea;
151  
152  	ra = (instr >> 16) & 0x1f;
153  	ea = (signed short) (instr & ~3);	/* sign-extend */
154  	if (ra)
155  		ea += regs->gpr[ra];
156  
157  	return ea;
158  }
159  
160  /*
161   * Calculate effective address for a DQ-form instruction
162   */
dqform_ea(unsigned int instr,const struct pt_regs * regs)163  static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
164  					       const struct pt_regs *regs)
165  {
166  	int ra;
167  	unsigned long ea;
168  
169  	ra = (instr >> 16) & 0x1f;
170  	ea = (signed short) (instr & ~0xf);	/* sign-extend */
171  	if (ra)
172  		ea += regs->gpr[ra];
173  
174  	return ea;
175  }
176  #endif /* __powerpc64 */
177  
178  /*
179   * Calculate effective address for an X-form instruction
180   */
xform_ea(unsigned int instr,const struct pt_regs * regs)181  static nokprobe_inline unsigned long xform_ea(unsigned int instr,
182  					      const struct pt_regs *regs)
183  {
184  	int ra, rb;
185  	unsigned long ea;
186  
187  	ra = (instr >> 16) & 0x1f;
188  	rb = (instr >> 11) & 0x1f;
189  	ea = regs->gpr[rb];
190  	if (ra)
191  		ea += regs->gpr[ra];
192  
193  	return ea;
194  }
195  
196  /*
197   * Calculate effective address for a MLS:D-form / 8LS:D-form
198   * prefixed instruction
199   */
mlsd_8lsd_ea(unsigned int instr,unsigned int suffix,const struct pt_regs * regs)200  static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
201  						  unsigned int suffix,
202  						  const struct pt_regs *regs)
203  {
204  	int ra, prefix_r;
205  	unsigned int  dd;
206  	unsigned long ea, d0, d1, d;
207  
208  	prefix_r = GET_PREFIX_R(instr);
209  	ra = GET_PREFIX_RA(suffix);
210  
211  	d0 = instr & 0x3ffff;
212  	d1 = suffix & 0xffff;
213  	d = (d0 << 16) | d1;
214  
215  	/*
216  	 * sign extend a 34 bit number
217  	 */
218  	dd = (unsigned int)(d >> 2);
219  	ea = (signed int)dd;
220  	ea = (ea << 2) | (d & 0x3);
221  
222  	if (!prefix_r && ra)
223  		ea += regs->gpr[ra];
224  	else if (!prefix_r && !ra)
225  		; /* Leave ea as is */
226  	else if (prefix_r)
227  		ea += regs->nip;
228  
229  	/*
230  	 * (prefix_r && ra) is an invalid form. Should already be
231  	 * checked for by caller!
232  	 */
233  
234  	return ea;
235  }
236  
237  /*
238   * Return the largest power of 2, not greater than sizeof(unsigned long),
239   * such that x is a multiple of it.
240   */
max_align(unsigned long x)241  static nokprobe_inline unsigned long max_align(unsigned long x)
242  {
243  	x |= sizeof(unsigned long);
244  	return x & -x;		/* isolates rightmost bit */
245  }
246  
byterev_2(unsigned long x)247  static nokprobe_inline unsigned long byterev_2(unsigned long x)
248  {
249  	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
250  }
251  
byterev_4(unsigned long x)252  static nokprobe_inline unsigned long byterev_4(unsigned long x)
253  {
254  	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
255  		((x & 0xff00) << 8) | ((x & 0xff) << 24);
256  }
257  
258  #ifdef __powerpc64__
byterev_8(unsigned long x)259  static nokprobe_inline unsigned long byterev_8(unsigned long x)
260  {
261  	return (byterev_4(x) << 32) | byterev_4(x >> 32);
262  }
263  #endif
264  
do_byte_reverse(void * ptr,int nb)265  static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
266  {
267  	switch (nb) {
268  	case 2:
269  		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
270  		break;
271  	case 4:
272  		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
273  		break;
274  #ifdef __powerpc64__
275  	case 8:
276  		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
277  		break;
278  	case 16: {
279  		unsigned long *up = (unsigned long *)ptr;
280  		unsigned long tmp;
281  		tmp = byterev_8(up[0]);
282  		up[0] = byterev_8(up[1]);
283  		up[1] = tmp;
284  		break;
285  	}
286  	case 32: {
287  		unsigned long *up = (unsigned long *)ptr;
288  		unsigned long tmp;
289  
290  		tmp = byterev_8(up[0]);
291  		up[0] = byterev_8(up[3]);
292  		up[3] = tmp;
293  		tmp = byterev_8(up[2]);
294  		up[2] = byterev_8(up[1]);
295  		up[1] = tmp;
296  		break;
297  	}
298  
299  #endif
300  	default:
301  		WARN_ON_ONCE(1);
302  	}
303  }
304  
read_mem_aligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)305  static nokprobe_inline int read_mem_aligned(unsigned long *dest,
306  					    unsigned long ea, int nb,
307  					    struct pt_regs *regs)
308  {
309  	int err = 0;
310  	unsigned long x = 0;
311  
312  	switch (nb) {
313  	case 1:
314  		err = __get_user(x, (unsigned char __user *) ea);
315  		break;
316  	case 2:
317  		err = __get_user(x, (unsigned short __user *) ea);
318  		break;
319  	case 4:
320  		err = __get_user(x, (unsigned int __user *) ea);
321  		break;
322  #ifdef __powerpc64__
323  	case 8:
324  		err = __get_user(x, (unsigned long __user *) ea);
325  		break;
326  #endif
327  	}
328  	if (!err)
329  		*dest = x;
330  	else
331  		regs->dar = ea;
332  	return err;
333  }
334  
335  /*
336   * Copy from userspace to a buffer, using the largest possible
337   * aligned accesses, up to sizeof(long).
338   */
copy_mem_in(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)339  static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
340  				       struct pt_regs *regs)
341  {
342  	int err = 0;
343  	int c;
344  
345  	for (; nb > 0; nb -= c) {
346  		c = max_align(ea);
347  		if (c > nb)
348  			c = max_align(nb);
349  		switch (c) {
350  		case 1:
351  			err = __get_user(*dest, (unsigned char __user *) ea);
352  			break;
353  		case 2:
354  			err = __get_user(*(u16 *)dest,
355  					 (unsigned short __user *) ea);
356  			break;
357  		case 4:
358  			err = __get_user(*(u32 *)dest,
359  					 (unsigned int __user *) ea);
360  			break;
361  #ifdef __powerpc64__
362  		case 8:
363  			err = __get_user(*(unsigned long *)dest,
364  					 (unsigned long __user *) ea);
365  			break;
366  #endif
367  		}
368  		if (err) {
369  			regs->dar = ea;
370  			return err;
371  		}
372  		dest += c;
373  		ea += c;
374  	}
375  	return 0;
376  }
377  
read_mem_unaligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)378  static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
379  					      unsigned long ea, int nb,
380  					      struct pt_regs *regs)
381  {
382  	union {
383  		unsigned long ul;
384  		u8 b[sizeof(unsigned long)];
385  	} u;
386  	int i;
387  	int err;
388  
389  	u.ul = 0;
390  	i = IS_BE ? sizeof(unsigned long) - nb : 0;
391  	err = copy_mem_in(&u.b[i], ea, nb, regs);
392  	if (!err)
393  		*dest = u.ul;
394  	return err;
395  }
396  
397  /*
398   * Read memory at address ea for nb bytes, return 0 for success
399   * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
400   * If nb < sizeof(long), the result is right-justified on BE systems.
401   */
read_mem(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)402  static int read_mem(unsigned long *dest, unsigned long ea, int nb,
403  			      struct pt_regs *regs)
404  {
405  	if (!address_ok(regs, ea, nb))
406  		return -EFAULT;
407  	if ((ea & (nb - 1)) == 0)
408  		return read_mem_aligned(dest, ea, nb, regs);
409  	return read_mem_unaligned(dest, ea, nb, regs);
410  }
411  NOKPROBE_SYMBOL(read_mem);
412  
write_mem_aligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)413  static nokprobe_inline int write_mem_aligned(unsigned long val,
414  					     unsigned long ea, int nb,
415  					     struct pt_regs *regs)
416  {
417  	int err = 0;
418  
419  	switch (nb) {
420  	case 1:
421  		err = __put_user(val, (unsigned char __user *) ea);
422  		break;
423  	case 2:
424  		err = __put_user(val, (unsigned short __user *) ea);
425  		break;
426  	case 4:
427  		err = __put_user(val, (unsigned int __user *) ea);
428  		break;
429  #ifdef __powerpc64__
430  	case 8:
431  		err = __put_user(val, (unsigned long __user *) ea);
432  		break;
433  #endif
434  	}
435  	if (err)
436  		regs->dar = ea;
437  	return err;
438  }
439  
440  /*
441   * Copy from a buffer to userspace, using the largest possible
442   * aligned accesses, up to sizeof(long).
443   */
copy_mem_out(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)444  static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
445  					struct pt_regs *regs)
446  {
447  	int err = 0;
448  	int c;
449  
450  	for (; nb > 0; nb -= c) {
451  		c = max_align(ea);
452  		if (c > nb)
453  			c = max_align(nb);
454  		switch (c) {
455  		case 1:
456  			err = __put_user(*dest, (unsigned char __user *) ea);
457  			break;
458  		case 2:
459  			err = __put_user(*(u16 *)dest,
460  					 (unsigned short __user *) ea);
461  			break;
462  		case 4:
463  			err = __put_user(*(u32 *)dest,
464  					 (unsigned int __user *) ea);
465  			break;
466  #ifdef __powerpc64__
467  		case 8:
468  			err = __put_user(*(unsigned long *)dest,
469  					 (unsigned long __user *) ea);
470  			break;
471  #endif
472  		}
473  		if (err) {
474  			regs->dar = ea;
475  			return err;
476  		}
477  		dest += c;
478  		ea += c;
479  	}
480  	return 0;
481  }
482  
write_mem_unaligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)483  static nokprobe_inline int write_mem_unaligned(unsigned long val,
484  					       unsigned long ea, int nb,
485  					       struct pt_regs *regs)
486  {
487  	union {
488  		unsigned long ul;
489  		u8 b[sizeof(unsigned long)];
490  	} u;
491  	int i;
492  
493  	u.ul = val;
494  	i = IS_BE ? sizeof(unsigned long) - nb : 0;
495  	return copy_mem_out(&u.b[i], ea, nb, regs);
496  }
497  
498  /*
499   * Write memory at address ea for nb bytes, return 0 for success
500   * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
501   */
write_mem(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)502  static int write_mem(unsigned long val, unsigned long ea, int nb,
503  			       struct pt_regs *regs)
504  {
505  	if (!address_ok(regs, ea, nb))
506  		return -EFAULT;
507  	if ((ea & (nb - 1)) == 0)
508  		return write_mem_aligned(val, ea, nb, regs);
509  	return write_mem_unaligned(val, ea, nb, regs);
510  }
511  NOKPROBE_SYMBOL(write_mem);
512  
513  #ifdef CONFIG_PPC_FPU
514  /*
515   * These access either the real FP register or the image in the
516   * thread_struct, depending on regs->msr & MSR_FP.
517   */
do_fp_load(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)518  static int do_fp_load(struct instruction_op *op, unsigned long ea,
519  		      struct pt_regs *regs, bool cross_endian)
520  {
521  	int err, rn, nb;
522  	union {
523  		int i;
524  		unsigned int u;
525  		float f;
526  		double d[2];
527  		unsigned long l[2];
528  		u8 b[2 * sizeof(double)];
529  	} u;
530  
531  	nb = GETSIZE(op->type);
532  	if (nb > sizeof(u))
533  		return -EINVAL;
534  	if (!address_ok(regs, ea, nb))
535  		return -EFAULT;
536  	rn = op->reg;
537  	err = copy_mem_in(u.b, ea, nb, regs);
538  	if (err)
539  		return err;
540  	if (unlikely(cross_endian)) {
541  		do_byte_reverse(u.b, min(nb, 8));
542  		if (nb == 16)
543  			do_byte_reverse(&u.b[8], 8);
544  	}
545  	preempt_disable();
546  	if (nb == 4) {
547  		if (op->type & FPCONV)
548  			conv_sp_to_dp(&u.f, &u.d[0]);
549  		else if (op->type & SIGNEXT)
550  			u.l[0] = u.i;
551  		else
552  			u.l[0] = u.u;
553  	}
554  	if (regs->msr & MSR_FP)
555  		put_fpr(rn, &u.d[0]);
556  	else
557  		current->thread.TS_FPR(rn) = u.l[0];
558  	if (nb == 16) {
559  		/* lfdp */
560  		rn |= 1;
561  		if (regs->msr & MSR_FP)
562  			put_fpr(rn, &u.d[1]);
563  		else
564  			current->thread.TS_FPR(rn) = u.l[1];
565  	}
566  	preempt_enable();
567  	return 0;
568  }
569  NOKPROBE_SYMBOL(do_fp_load);
570  
do_fp_store(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)571  static int do_fp_store(struct instruction_op *op, unsigned long ea,
572  		       struct pt_regs *regs, bool cross_endian)
573  {
574  	int rn, nb;
575  	union {
576  		unsigned int u;
577  		float f;
578  		double d[2];
579  		unsigned long l[2];
580  		u8 b[2 * sizeof(double)];
581  	} u;
582  
583  	nb = GETSIZE(op->type);
584  	if (nb > sizeof(u))
585  		return -EINVAL;
586  	if (!address_ok(regs, ea, nb))
587  		return -EFAULT;
588  	rn = op->reg;
589  	preempt_disable();
590  	if (regs->msr & MSR_FP)
591  		get_fpr(rn, &u.d[0]);
592  	else
593  		u.l[0] = current->thread.TS_FPR(rn);
594  	if (nb == 4) {
595  		if (op->type & FPCONV)
596  			conv_dp_to_sp(&u.d[0], &u.f);
597  		else
598  			u.u = u.l[0];
599  	}
600  	if (nb == 16) {
601  		rn |= 1;
602  		if (regs->msr & MSR_FP)
603  			get_fpr(rn, &u.d[1]);
604  		else
605  			u.l[1] = current->thread.TS_FPR(rn);
606  	}
607  	preempt_enable();
608  	if (unlikely(cross_endian)) {
609  		do_byte_reverse(u.b, min(nb, 8));
610  		if (nb == 16)
611  			do_byte_reverse(&u.b[8], 8);
612  	}
613  	return copy_mem_out(u.b, ea, nb, regs);
614  }
615  NOKPROBE_SYMBOL(do_fp_store);
616  #endif
617  
618  #ifdef CONFIG_ALTIVEC
619  /* For Altivec/VMX, no need to worry about alignment */
do_vec_load(int rn,unsigned long ea,int size,struct pt_regs * regs,bool cross_endian)620  static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
621  				       int size, struct pt_regs *regs,
622  				       bool cross_endian)
623  {
624  	int err;
625  	union {
626  		__vector128 v;
627  		u8 b[sizeof(__vector128)];
628  	} u = {};
629  
630  	if (size > sizeof(u))
631  		return -EINVAL;
632  
633  	if (!address_ok(regs, ea & ~0xfUL, 16))
634  		return -EFAULT;
635  	/* align to multiple of size */
636  	ea &= ~(size - 1);
637  	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
638  	if (err)
639  		return err;
640  	if (unlikely(cross_endian))
641  		do_byte_reverse(&u.b[ea & 0xf], size);
642  	preempt_disable();
643  	if (regs->msr & MSR_VEC)
644  		put_vr(rn, &u.v);
645  	else
646  		current->thread.vr_state.vr[rn] = u.v;
647  	preempt_enable();
648  	return 0;
649  }
650  
do_vec_store(int rn,unsigned long ea,int size,struct pt_regs * regs,bool cross_endian)651  static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
652  					int size, struct pt_regs *regs,
653  					bool cross_endian)
654  {
655  	union {
656  		__vector128 v;
657  		u8 b[sizeof(__vector128)];
658  	} u;
659  
660  	if (size > sizeof(u))
661  		return -EINVAL;
662  
663  	if (!address_ok(regs, ea & ~0xfUL, 16))
664  		return -EFAULT;
665  	/* align to multiple of size */
666  	ea &= ~(size - 1);
667  
668  	preempt_disable();
669  	if (regs->msr & MSR_VEC)
670  		get_vr(rn, &u.v);
671  	else
672  		u.v = current->thread.vr_state.vr[rn];
673  	preempt_enable();
674  	if (unlikely(cross_endian))
675  		do_byte_reverse(&u.b[ea & 0xf], size);
676  	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
677  }
678  #endif /* CONFIG_ALTIVEC */
679  
680  #ifdef __powerpc64__
emulate_lq(struct pt_regs * regs,unsigned long ea,int reg,bool cross_endian)681  static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
682  				      int reg, bool cross_endian)
683  {
684  	int err;
685  
686  	if (!address_ok(regs, ea, 16))
687  		return -EFAULT;
688  	/* if aligned, should be atomic */
689  	if ((ea & 0xf) == 0) {
690  		err = do_lq(ea, &regs->gpr[reg]);
691  	} else {
692  		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
693  		if (!err)
694  			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
695  	}
696  	if (!err && unlikely(cross_endian))
697  		do_byte_reverse(&regs->gpr[reg], 16);
698  	return err;
699  }
700  
emulate_stq(struct pt_regs * regs,unsigned long ea,int reg,bool cross_endian)701  static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
702  				       int reg, bool cross_endian)
703  {
704  	int err;
705  	unsigned long vals[2];
706  
707  	if (!address_ok(regs, ea, 16))
708  		return -EFAULT;
709  	vals[0] = regs->gpr[reg];
710  	vals[1] = regs->gpr[reg + 1];
711  	if (unlikely(cross_endian))
712  		do_byte_reverse(vals, 16);
713  
714  	/* if aligned, should be atomic */
715  	if ((ea & 0xf) == 0)
716  		return do_stq(ea, vals[0], vals[1]);
717  
718  	err = write_mem(vals[IS_LE], ea, 8, regs);
719  	if (!err)
720  		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
721  	return err;
722  }
723  #endif /* __powerpc64 */
724  
725  #ifdef CONFIG_VSX
emulate_vsx_load(struct instruction_op * op,union vsx_reg * reg,const void * mem,bool rev)726  void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
727  		      const void *mem, bool rev)
728  {
729  	int size, read_size;
730  	int i, j;
731  	const unsigned int *wp;
732  	const unsigned short *hp;
733  	const unsigned char *bp;
734  
735  	size = GETSIZE(op->type);
736  	reg->d[0] = reg->d[1] = 0;
737  
738  	switch (op->element_size) {
739  	case 32:
740  		/* [p]lxvp[x] */
741  	case 16:
742  		/* whole vector; lxv[x] or lxvl[l] */
743  		if (size == 0)
744  			break;
745  		memcpy(reg, mem, size);
746  		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
747  			rev = !rev;
748  		if (rev)
749  			do_byte_reverse(reg, size);
750  		break;
751  	case 8:
752  		/* scalar loads, lxvd2x, lxvdsx */
753  		read_size = (size >= 8) ? 8 : size;
754  		i = IS_LE ? 8 : 8 - read_size;
755  		memcpy(&reg->b[i], mem, read_size);
756  		if (rev)
757  			do_byte_reverse(&reg->b[i], 8);
758  		if (size < 8) {
759  			if (op->type & SIGNEXT) {
760  				/* size == 4 is the only case here */
761  				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
762  			} else if (op->vsx_flags & VSX_FPCONV) {
763  				preempt_disable();
764  				conv_sp_to_dp(&reg->fp[1 + IS_LE],
765  					      &reg->dp[IS_LE]);
766  				preempt_enable();
767  			}
768  		} else {
769  			if (size == 16) {
770  				unsigned long v = *(unsigned long *)(mem + 8);
771  				reg->d[IS_BE] = !rev ? v : byterev_8(v);
772  			} else if (op->vsx_flags & VSX_SPLAT)
773  				reg->d[IS_BE] = reg->d[IS_LE];
774  		}
775  		break;
776  	case 4:
777  		/* lxvw4x, lxvwsx */
778  		wp = mem;
779  		for (j = 0; j < size / 4; ++j) {
780  			i = IS_LE ? 3 - j : j;
781  			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
782  		}
783  		if (op->vsx_flags & VSX_SPLAT) {
784  			u32 val = reg->w[IS_LE ? 3 : 0];
785  			for (; j < 4; ++j) {
786  				i = IS_LE ? 3 - j : j;
787  				reg->w[i] = val;
788  			}
789  		}
790  		break;
791  	case 2:
792  		/* lxvh8x */
793  		hp = mem;
794  		for (j = 0; j < size / 2; ++j) {
795  			i = IS_LE ? 7 - j : j;
796  			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
797  		}
798  		break;
799  	case 1:
800  		/* lxvb16x */
801  		bp = mem;
802  		for (j = 0; j < size; ++j) {
803  			i = IS_LE ? 15 - j : j;
804  			reg->b[i] = *bp++;
805  		}
806  		break;
807  	}
808  }
809  EXPORT_SYMBOL_GPL(emulate_vsx_load);
810  NOKPROBE_SYMBOL(emulate_vsx_load);
811  
emulate_vsx_store(struct instruction_op * op,const union vsx_reg * reg,void * mem,bool rev)812  void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
813  		       void *mem, bool rev)
814  {
815  	int size, write_size;
816  	int i, j;
817  	union vsx_reg buf;
818  	unsigned int *wp;
819  	unsigned short *hp;
820  	unsigned char *bp;
821  
822  	size = GETSIZE(op->type);
823  
824  	switch (op->element_size) {
825  	case 32:
826  		/* [p]stxvp[x] */
827  		if (size == 0)
828  			break;
829  		if (rev) {
830  			/* reverse 32 bytes */
831  			union vsx_reg buf32[2];
832  			buf32[0].d[0] = byterev_8(reg[1].d[1]);
833  			buf32[0].d[1] = byterev_8(reg[1].d[0]);
834  			buf32[1].d[0] = byterev_8(reg[0].d[1]);
835  			buf32[1].d[1] = byterev_8(reg[0].d[0]);
836  			memcpy(mem, buf32, size);
837  		} else {
838  			memcpy(mem, reg, size);
839  		}
840  		break;
841  	case 16:
842  		/* stxv, stxvx, stxvl, stxvll */
843  		if (size == 0)
844  			break;
845  		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
846  			rev = !rev;
847  		if (rev) {
848  			/* reverse 16 bytes */
849  			buf.d[0] = byterev_8(reg->d[1]);
850  			buf.d[1] = byterev_8(reg->d[0]);
851  			reg = &buf;
852  		}
853  		memcpy(mem, reg, size);
854  		break;
855  	case 8:
856  		/* scalar stores, stxvd2x */
857  		write_size = (size >= 8) ? 8 : size;
858  		i = IS_LE ? 8 : 8 - write_size;
859  		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
860  			buf.d[0] = buf.d[1] = 0;
861  			preempt_disable();
862  			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
863  			preempt_enable();
864  			reg = &buf;
865  		}
866  		memcpy(mem, &reg->b[i], write_size);
867  		if (size == 16)
868  			memcpy(mem + 8, &reg->d[IS_BE], 8);
869  		if (unlikely(rev)) {
870  			do_byte_reverse(mem, write_size);
871  			if (size == 16)
872  				do_byte_reverse(mem + 8, 8);
873  		}
874  		break;
875  	case 4:
876  		/* stxvw4x */
877  		wp = mem;
878  		for (j = 0; j < size / 4; ++j) {
879  			i = IS_LE ? 3 - j : j;
880  			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
881  		}
882  		break;
883  	case 2:
884  		/* stxvh8x */
885  		hp = mem;
886  		for (j = 0; j < size / 2; ++j) {
887  			i = IS_LE ? 7 - j : j;
888  			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
889  		}
890  		break;
891  	case 1:
892  		/* stvxb16x */
893  		bp = mem;
894  		for (j = 0; j < size; ++j) {
895  			i = IS_LE ? 15 - j : j;
896  			*bp++ = reg->b[i];
897  		}
898  		break;
899  	}
900  }
901  EXPORT_SYMBOL_GPL(emulate_vsx_store);
902  NOKPROBE_SYMBOL(emulate_vsx_store);
903  
do_vsx_load(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)904  static nokprobe_inline int do_vsx_load(struct instruction_op *op,
905  				       unsigned long ea, struct pt_regs *regs,
906  				       bool cross_endian)
907  {
908  	int reg = op->reg;
909  	int i, j, nr_vsx_regs;
910  	u8 mem[32];
911  	union vsx_reg buf[2];
912  	int size = GETSIZE(op->type);
913  
914  	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
915  		return -EFAULT;
916  
917  	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
918  	emulate_vsx_load(op, buf, mem, cross_endian);
919  	preempt_disable();
920  	if (reg < 32) {
921  		/* FP regs + extensions */
922  		if (regs->msr & MSR_FP) {
923  			for (i = 0; i < nr_vsx_regs; i++) {
924  				j = IS_LE ? nr_vsx_regs - i - 1 : i;
925  				load_vsrn(reg + i, &buf[j].v);
926  			}
927  		} else {
928  			for (i = 0; i < nr_vsx_regs; i++) {
929  				j = IS_LE ? nr_vsx_regs - i - 1 : i;
930  				current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
931  				current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
932  			}
933  		}
934  	} else {
935  		if (regs->msr & MSR_VEC) {
936  			for (i = 0; i < nr_vsx_regs; i++) {
937  				j = IS_LE ? nr_vsx_regs - i - 1 : i;
938  				load_vsrn(reg + i, &buf[j].v);
939  			}
940  		} else {
941  			for (i = 0; i < nr_vsx_regs; i++) {
942  				j = IS_LE ? nr_vsx_regs - i - 1 : i;
943  				current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
944  			}
945  		}
946  	}
947  	preempt_enable();
948  	return 0;
949  }
950  
do_vsx_store(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)951  static nokprobe_inline int do_vsx_store(struct instruction_op *op,
952  					unsigned long ea, struct pt_regs *regs,
953  					bool cross_endian)
954  {
955  	int reg = op->reg;
956  	int i, j, nr_vsx_regs;
957  	u8 mem[32];
958  	union vsx_reg buf[2];
959  	int size = GETSIZE(op->type);
960  
961  	if (!address_ok(regs, ea, size))
962  		return -EFAULT;
963  
964  	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
965  	preempt_disable();
966  	if (reg < 32) {
967  		/* FP regs + extensions */
968  		if (regs->msr & MSR_FP) {
969  			for (i = 0; i < nr_vsx_regs; i++) {
970  				j = IS_LE ? nr_vsx_regs - i - 1 : i;
971  				store_vsrn(reg + i, &buf[j].v);
972  			}
973  		} else {
974  			for (i = 0; i < nr_vsx_regs; i++) {
975  				j = IS_LE ? nr_vsx_regs - i - 1 : i;
976  				buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
977  				buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
978  			}
979  		}
980  	} else {
981  		if (regs->msr & MSR_VEC) {
982  			for (i = 0; i < nr_vsx_regs; i++) {
983  				j = IS_LE ? nr_vsx_regs - i - 1 : i;
984  				store_vsrn(reg + i, &buf[j].v);
985  			}
986  		} else {
987  			for (i = 0; i < nr_vsx_regs; i++) {
988  				j = IS_LE ? nr_vsx_regs - i - 1 : i;
989  				buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
990  			}
991  		}
992  	}
993  	preempt_enable();
994  	emulate_vsx_store(op, buf, mem, cross_endian);
995  	return  copy_mem_out(mem, ea, size, regs);
996  }
997  #endif /* CONFIG_VSX */
998  
emulate_dcbz(unsigned long ea,struct pt_regs * regs)999  int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
1000  {
1001  	int err;
1002  	unsigned long i, size;
1003  
1004  #ifdef __powerpc64__
1005  	size = ppc64_caches.l1d.block_size;
1006  	if (!(regs->msr & MSR_64BIT))
1007  		ea &= 0xffffffffUL;
1008  #else
1009  	size = L1_CACHE_BYTES;
1010  #endif
1011  	ea &= ~(size - 1);
1012  	if (!address_ok(regs, ea, size))
1013  		return -EFAULT;
1014  	for (i = 0; i < size; i += sizeof(long)) {
1015  		err = __put_user(0, (unsigned long __user *) (ea + i));
1016  		if (err) {
1017  			regs->dar = ea;
1018  			return err;
1019  		}
1020  	}
1021  	return 0;
1022  }
1023  NOKPROBE_SYMBOL(emulate_dcbz);
1024  
1025  #define __put_user_asmx(x, addr, err, op, cr)		\
1026  	__asm__ __volatile__(				\
1027  		".machine push\n"			\
1028  		".machine power8\n"			\
1029  		"1:	" op " %2,0,%3\n"		\
1030  		".machine pop\n"			\
1031  		"	mfcr	%1\n"			\
1032  		"2:\n"					\
1033  		".section .fixup,\"ax\"\n"		\
1034  		"3:	li	%0,%4\n"		\
1035  		"	b	2b\n"			\
1036  		".previous\n"				\
1037  		EX_TABLE(1b, 3b)			\
1038  		: "=r" (err), "=r" (cr)			\
1039  		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1040  
1041  #define __get_user_asmx(x, addr, err, op)		\
1042  	__asm__ __volatile__(				\
1043  		".machine push\n"			\
1044  		".machine power8\n"			\
1045  		"1:	"op" %1,0,%2\n"			\
1046  		".machine pop\n"			\
1047  		"2:\n"					\
1048  		".section .fixup,\"ax\"\n"		\
1049  		"3:	li	%0,%3\n"		\
1050  		"	b	2b\n"			\
1051  		".previous\n"				\
1052  		EX_TABLE(1b, 3b)			\
1053  		: "=r" (err), "=r" (x)			\
1054  		: "r" (addr), "i" (-EFAULT), "0" (err))
1055  
1056  #define __cacheop_user_asmx(addr, err, op)		\
1057  	__asm__ __volatile__(				\
1058  		"1:	"op" 0,%1\n"			\
1059  		"2:\n"					\
1060  		".section .fixup,\"ax\"\n"		\
1061  		"3:	li	%0,%3\n"		\
1062  		"	b	2b\n"			\
1063  		".previous\n"				\
1064  		EX_TABLE(1b, 3b)			\
1065  		: "=r" (err)				\
1066  		: "r" (addr), "i" (-EFAULT), "0" (err))
1067  
set_cr0(const struct pt_regs * regs,struct instruction_op * op)1068  static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1069  				    struct instruction_op *op)
1070  {
1071  	long val = op->val;
1072  
1073  	op->type |= SETCC;
1074  	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1075  #ifdef __powerpc64__
1076  	if (!(regs->msr & MSR_64BIT))
1077  		val = (int) val;
1078  #endif
1079  	if (val < 0)
1080  		op->ccval |= 0x80000000;
1081  	else if (val > 0)
1082  		op->ccval |= 0x40000000;
1083  	else
1084  		op->ccval |= 0x20000000;
1085  }
1086  
set_ca32(struct instruction_op * op,bool val)1087  static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1088  {
1089  	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1090  		if (val)
1091  			op->xerval |= XER_CA32;
1092  		else
1093  			op->xerval &= ~XER_CA32;
1094  	}
1095  }
1096  
add_with_carry(const struct pt_regs * regs,struct instruction_op * op,int rd,unsigned long val1,unsigned long val2,unsigned long carry_in)1097  static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1098  				     struct instruction_op *op, int rd,
1099  				     unsigned long val1, unsigned long val2,
1100  				     unsigned long carry_in)
1101  {
1102  	unsigned long val = val1 + val2;
1103  
1104  	if (carry_in)
1105  		++val;
1106  	op->type = COMPUTE + SETREG + SETXER;
1107  	op->reg = rd;
1108  	op->val = val;
1109  #ifdef __powerpc64__
1110  	if (!(regs->msr & MSR_64BIT)) {
1111  		val = (unsigned int) val;
1112  		val1 = (unsigned int) val1;
1113  	}
1114  #endif
1115  	op->xerval = regs->xer;
1116  	if (val < val1 || (carry_in && val == val1))
1117  		op->xerval |= XER_CA;
1118  	else
1119  		op->xerval &= ~XER_CA;
1120  
1121  	set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1122  			(carry_in && (unsigned int)val == (unsigned int)val1));
1123  }
1124  
do_cmp_signed(const struct pt_regs * regs,struct instruction_op * op,long v1,long v2,int crfld)1125  static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1126  					  struct instruction_op *op,
1127  					  long v1, long v2, int crfld)
1128  {
1129  	unsigned int crval, shift;
1130  
1131  	op->type = COMPUTE + SETCC;
1132  	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1133  	if (v1 < v2)
1134  		crval |= 8;
1135  	else if (v1 > v2)
1136  		crval |= 4;
1137  	else
1138  		crval |= 2;
1139  	shift = (7 - crfld) * 4;
1140  	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1141  }
1142  
do_cmp_unsigned(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2,int crfld)1143  static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1144  					    struct instruction_op *op,
1145  					    unsigned long v1,
1146  					    unsigned long v2, int crfld)
1147  {
1148  	unsigned int crval, shift;
1149  
1150  	op->type = COMPUTE + SETCC;
1151  	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1152  	if (v1 < v2)
1153  		crval |= 8;
1154  	else if (v1 > v2)
1155  		crval |= 4;
1156  	else
1157  		crval |= 2;
1158  	shift = (7 - crfld) * 4;
1159  	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1160  }
1161  
do_cmpb(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2)1162  static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1163  				    struct instruction_op *op,
1164  				    unsigned long v1, unsigned long v2)
1165  {
1166  	unsigned long long out_val, mask;
1167  	int i;
1168  
1169  	out_val = 0;
1170  	for (i = 0; i < 8; i++) {
1171  		mask = 0xffUL << (i * 8);
1172  		if ((v1 & mask) == (v2 & mask))
1173  			out_val |= mask;
1174  	}
1175  	op->val = out_val;
1176  }
1177  
1178  /*
1179   * The size parameter is used to adjust the equivalent popcnt instruction.
1180   * popcntb = 8, popcntw = 32, popcntd = 64
1181   */
do_popcnt(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,int size)1182  static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1183  				      struct instruction_op *op,
1184  				      unsigned long v1, int size)
1185  {
1186  	unsigned long long out = v1;
1187  
1188  	out -= (out >> 1) & 0x5555555555555555ULL;
1189  	out = (0x3333333333333333ULL & out) +
1190  	      (0x3333333333333333ULL & (out >> 2));
1191  	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1192  
1193  	if (size == 8) {	/* popcntb */
1194  		op->val = out;
1195  		return;
1196  	}
1197  	out += out >> 8;
1198  	out += out >> 16;
1199  	if (size == 32) {	/* popcntw */
1200  		op->val = out & 0x0000003f0000003fULL;
1201  		return;
1202  	}
1203  
1204  	out = (out + (out >> 32)) & 0x7f;
1205  	op->val = out;	/* popcntd */
1206  }
1207  
1208  #ifdef CONFIG_PPC64
do_bpermd(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2)1209  static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1210  				      struct instruction_op *op,
1211  				      unsigned long v1, unsigned long v2)
1212  {
1213  	unsigned char perm, idx;
1214  	unsigned int i;
1215  
1216  	perm = 0;
1217  	for (i = 0; i < 8; i++) {
1218  		idx = (v1 >> (i * 8)) & 0xff;
1219  		if (idx < 64)
1220  			if (v2 & PPC_BIT(idx))
1221  				perm |= 1 << i;
1222  	}
1223  	op->val = perm;
1224  }
1225  #endif /* CONFIG_PPC64 */
1226  /*
1227   * The size parameter adjusts the equivalent prty instruction.
1228   * prtyw = 32, prtyd = 64
1229   */
do_prty(const struct pt_regs * regs,struct instruction_op * op,unsigned long v,int size)1230  static nokprobe_inline void do_prty(const struct pt_regs *regs,
1231  				    struct instruction_op *op,
1232  				    unsigned long v, int size)
1233  {
1234  	unsigned long long res = v ^ (v >> 8);
1235  
1236  	res ^= res >> 16;
1237  	if (size == 32) {		/* prtyw */
1238  		op->val = res & 0x0000000100000001ULL;
1239  		return;
1240  	}
1241  
1242  	res ^= res >> 32;
1243  	op->val = res & 1;	/*prtyd */
1244  }
1245  
trap_compare(long v1,long v2)1246  static nokprobe_inline int trap_compare(long v1, long v2)
1247  {
1248  	int ret = 0;
1249  
1250  	if (v1 < v2)
1251  		ret |= 0x10;
1252  	else if (v1 > v2)
1253  		ret |= 0x08;
1254  	else
1255  		ret |= 0x04;
1256  	if ((unsigned long)v1 < (unsigned long)v2)
1257  		ret |= 0x02;
1258  	else if ((unsigned long)v1 > (unsigned long)v2)
1259  		ret |= 0x01;
1260  	return ret;
1261  }
1262  
1263  /*
1264   * Elements of 32-bit rotate and mask instructions.
1265   */
1266  #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1267  			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1268  #ifdef __powerpc64__
1269  #define MASK64_L(mb)	(~0UL >> (mb))
1270  #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1271  #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1272  #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1273  #else
1274  #define DATA32(x)	(x)
1275  #endif
1276  #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1277  
1278  /*
1279   * Decode an instruction, and return information about it in *op
1280   * without changing *regs.
1281   * Integer arithmetic and logical instructions, branches, and barrier
1282   * instructions can be emulated just using the information in *op.
1283   *
1284   * Return value is 1 if the instruction can be emulated just by
1285   * updating *regs with the information in *op, -1 if we need the
1286   * GPRs but *regs doesn't contain the full register set, or 0
1287   * otherwise.
1288   */
analyse_instr(struct instruction_op * op,const struct pt_regs * regs,struct ppc_inst instr)1289  int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1290  		  struct ppc_inst instr)
1291  {
1292  #ifdef CONFIG_PPC64
1293  	unsigned int suffixopcode, prefixtype, prefix_r;
1294  #endif
1295  	unsigned int opcode, ra, rb, rc, rd, spr, u;
1296  	unsigned long int imm;
1297  	unsigned long int val, val2;
1298  	unsigned int mb, me, sh;
1299  	unsigned int word, suffix;
1300  	long ival;
1301  
1302  	word = ppc_inst_val(instr);
1303  	suffix = ppc_inst_suffix(instr);
1304  
1305  	op->type = COMPUTE;
1306  
1307  	opcode = ppc_inst_primary_opcode(instr);
1308  	switch (opcode) {
1309  	case 16:	/* bc */
1310  		op->type = BRANCH;
1311  		imm = (signed short)(word & 0xfffc);
1312  		if ((word & 2) == 0)
1313  			imm += regs->nip;
1314  		op->val = truncate_if_32bit(regs->msr, imm);
1315  		if (word & 1)
1316  			op->type |= SETLK;
1317  		if (branch_taken(word, regs, op))
1318  			op->type |= BRTAKEN;
1319  		return 1;
1320  #ifdef CONFIG_PPC64
1321  	case 17:	/* sc */
1322  		if ((word & 0xfe2) == 2)
1323  			op->type = SYSCALL;
1324  		else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1325  				(word & 0xfe3) == 1) {	/* scv */
1326  			op->type = SYSCALL_VECTORED_0;
1327  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1328  				goto unknown_opcode;
1329  		} else
1330  			op->type = UNKNOWN;
1331  		return 0;
1332  #endif
1333  	case 18:	/* b */
1334  		op->type = BRANCH | BRTAKEN;
1335  		imm = word & 0x03fffffc;
1336  		if (imm & 0x02000000)
1337  			imm -= 0x04000000;
1338  		if ((word & 2) == 0)
1339  			imm += regs->nip;
1340  		op->val = truncate_if_32bit(regs->msr, imm);
1341  		if (word & 1)
1342  			op->type |= SETLK;
1343  		return 1;
1344  	case 19:
1345  		switch ((word >> 1) & 0x3ff) {
1346  		case 0:		/* mcrf */
1347  			op->type = COMPUTE + SETCC;
1348  			rd = 7 - ((word >> 23) & 0x7);
1349  			ra = 7 - ((word >> 18) & 0x7);
1350  			rd *= 4;
1351  			ra *= 4;
1352  			val = (regs->ccr >> ra) & 0xf;
1353  			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1354  			return 1;
1355  
1356  		case 16:	/* bclr */
1357  		case 528:	/* bcctr */
1358  			op->type = BRANCH;
1359  			imm = (word & 0x400)? regs->ctr: regs->link;
1360  			op->val = truncate_if_32bit(regs->msr, imm);
1361  			if (word & 1)
1362  				op->type |= SETLK;
1363  			if (branch_taken(word, regs, op))
1364  				op->type |= BRTAKEN;
1365  			return 1;
1366  
1367  		case 18:	/* rfid, scary */
1368  			if (regs->msr & MSR_PR)
1369  				goto priv;
1370  			op->type = RFI;
1371  			return 0;
1372  
1373  		case 150:	/* isync */
1374  			op->type = BARRIER | BARRIER_ISYNC;
1375  			return 1;
1376  
1377  		case 33:	/* crnor */
1378  		case 129:	/* crandc */
1379  		case 193:	/* crxor */
1380  		case 225:	/* crnand */
1381  		case 257:	/* crand */
1382  		case 289:	/* creqv */
1383  		case 417:	/* crorc */
1384  		case 449:	/* cror */
1385  			op->type = COMPUTE + SETCC;
1386  			ra = (word >> 16) & 0x1f;
1387  			rb = (word >> 11) & 0x1f;
1388  			rd = (word >> 21) & 0x1f;
1389  			ra = (regs->ccr >> (31 - ra)) & 1;
1390  			rb = (regs->ccr >> (31 - rb)) & 1;
1391  			val = (word >> (6 + ra * 2 + rb)) & 1;
1392  			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1393  				(val << (31 - rd));
1394  			return 1;
1395  		}
1396  		break;
1397  	case 31:
1398  		switch ((word >> 1) & 0x3ff) {
1399  		case 598:	/* sync */
1400  			op->type = BARRIER + BARRIER_SYNC;
1401  #ifdef __powerpc64__
1402  			switch ((word >> 21) & 3) {
1403  			case 1:		/* lwsync */
1404  				op->type = BARRIER + BARRIER_LWSYNC;
1405  				break;
1406  			case 2:		/* ptesync */
1407  				op->type = BARRIER + BARRIER_PTESYNC;
1408  				break;
1409  			}
1410  #endif
1411  			return 1;
1412  
1413  		case 854:	/* eieio */
1414  			op->type = BARRIER + BARRIER_EIEIO;
1415  			return 1;
1416  		}
1417  		break;
1418  	}
1419  
1420  	rd = (word >> 21) & 0x1f;
1421  	ra = (word >> 16) & 0x1f;
1422  	rb = (word >> 11) & 0x1f;
1423  	rc = (word >> 6) & 0x1f;
1424  
1425  	switch (opcode) {
1426  #ifdef __powerpc64__
1427  	case 1:
1428  		if (!cpu_has_feature(CPU_FTR_ARCH_31))
1429  			goto unknown_opcode;
1430  
1431  		prefix_r = GET_PREFIX_R(word);
1432  		ra = GET_PREFIX_RA(suffix);
1433  		rd = (suffix >> 21) & 0x1f;
1434  		op->reg = rd;
1435  		op->val = regs->gpr[rd];
1436  		suffixopcode = get_op(suffix);
1437  		prefixtype = (word >> 24) & 0x3;
1438  		switch (prefixtype) {
1439  		case 2:
1440  			if (prefix_r && ra)
1441  				return 0;
1442  			switch (suffixopcode) {
1443  			case 14:	/* paddi */
1444  				op->type = COMPUTE | PREFIXED;
1445  				op->val = mlsd_8lsd_ea(word, suffix, regs);
1446  				goto compute_done;
1447  			}
1448  		}
1449  		break;
1450  	case 2:		/* tdi */
1451  		if (rd & trap_compare(regs->gpr[ra], (short) word))
1452  			goto trap;
1453  		return 1;
1454  #endif
1455  	case 3:		/* twi */
1456  		if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1457  			goto trap;
1458  		return 1;
1459  
1460  #ifdef __powerpc64__
1461  	case 4:
1462  		/*
1463  		 * There are very many instructions with this primary opcode
1464  		 * introduced in the ISA as early as v2.03. However, the ones
1465  		 * we currently emulate were all introduced with ISA 3.0
1466  		 */
1467  		if (!cpu_has_feature(CPU_FTR_ARCH_300))
1468  			goto unknown_opcode;
1469  
1470  		switch (word & 0x3f) {
1471  		case 48:	/* maddhd */
1472  			asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1473  				     "=r" (op->val) : "r" (regs->gpr[ra]),
1474  				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1475  			goto compute_done;
1476  
1477  		case 49:	/* maddhdu */
1478  			asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1479  				     "=r" (op->val) : "r" (regs->gpr[ra]),
1480  				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1481  			goto compute_done;
1482  
1483  		case 51:	/* maddld */
1484  			asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1485  				     "=r" (op->val) : "r" (regs->gpr[ra]),
1486  				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1487  			goto compute_done;
1488  		}
1489  
1490  		/*
1491  		 * There are other instructions from ISA 3.0 with the same
1492  		 * primary opcode which do not have emulation support yet.
1493  		 */
1494  		goto unknown_opcode;
1495  #endif
1496  
1497  	case 7:		/* mulli */
1498  		op->val = regs->gpr[ra] * (short) word;
1499  		goto compute_done;
1500  
1501  	case 8:		/* subfic */
1502  		imm = (short) word;
1503  		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1504  		return 1;
1505  
1506  	case 10:	/* cmpli */
1507  		imm = (unsigned short) word;
1508  		val = regs->gpr[ra];
1509  #ifdef __powerpc64__
1510  		if ((rd & 1) == 0)
1511  			val = (unsigned int) val;
1512  #endif
1513  		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1514  		return 1;
1515  
1516  	case 11:	/* cmpi */
1517  		imm = (short) word;
1518  		val = regs->gpr[ra];
1519  #ifdef __powerpc64__
1520  		if ((rd & 1) == 0)
1521  			val = (int) val;
1522  #endif
1523  		do_cmp_signed(regs, op, val, imm, rd >> 2);
1524  		return 1;
1525  
1526  	case 12:	/* addic */
1527  		imm = (short) word;
1528  		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1529  		return 1;
1530  
1531  	case 13:	/* addic. */
1532  		imm = (short) word;
1533  		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1534  		set_cr0(regs, op);
1535  		return 1;
1536  
1537  	case 14:	/* addi */
1538  		imm = (short) word;
1539  		if (ra)
1540  			imm += regs->gpr[ra];
1541  		op->val = imm;
1542  		goto compute_done;
1543  
1544  	case 15:	/* addis */
1545  		imm = ((short) word) << 16;
1546  		if (ra)
1547  			imm += regs->gpr[ra];
1548  		op->val = imm;
1549  		goto compute_done;
1550  
1551  	case 19:
1552  		if (((word >> 1) & 0x1f) == 2) {
1553  			/* addpcis */
1554  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1555  				goto unknown_opcode;
1556  			imm = (short) (word & 0xffc1);	/* d0 + d2 fields */
1557  			imm |= (word >> 15) & 0x3e;	/* d1 field */
1558  			op->val = regs->nip + (imm << 16) + 4;
1559  			goto compute_done;
1560  		}
1561  		op->type = UNKNOWN;
1562  		return 0;
1563  
1564  	case 20:	/* rlwimi */
1565  		mb = (word >> 6) & 0x1f;
1566  		me = (word >> 1) & 0x1f;
1567  		val = DATA32(regs->gpr[rd]);
1568  		imm = MASK32(mb, me);
1569  		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1570  		goto logical_done;
1571  
1572  	case 21:	/* rlwinm */
1573  		mb = (word >> 6) & 0x1f;
1574  		me = (word >> 1) & 0x1f;
1575  		val = DATA32(regs->gpr[rd]);
1576  		op->val = ROTATE(val, rb) & MASK32(mb, me);
1577  		goto logical_done;
1578  
1579  	case 23:	/* rlwnm */
1580  		mb = (word >> 6) & 0x1f;
1581  		me = (word >> 1) & 0x1f;
1582  		rb = regs->gpr[rb] & 0x1f;
1583  		val = DATA32(regs->gpr[rd]);
1584  		op->val = ROTATE(val, rb) & MASK32(mb, me);
1585  		goto logical_done;
1586  
1587  	case 24:	/* ori */
1588  		op->val = regs->gpr[rd] | (unsigned short) word;
1589  		goto logical_done_nocc;
1590  
1591  	case 25:	/* oris */
1592  		imm = (unsigned short) word;
1593  		op->val = regs->gpr[rd] | (imm << 16);
1594  		goto logical_done_nocc;
1595  
1596  	case 26:	/* xori */
1597  		op->val = regs->gpr[rd] ^ (unsigned short) word;
1598  		goto logical_done_nocc;
1599  
1600  	case 27:	/* xoris */
1601  		imm = (unsigned short) word;
1602  		op->val = regs->gpr[rd] ^ (imm << 16);
1603  		goto logical_done_nocc;
1604  
1605  	case 28:	/* andi. */
1606  		op->val = regs->gpr[rd] & (unsigned short) word;
1607  		set_cr0(regs, op);
1608  		goto logical_done_nocc;
1609  
1610  	case 29:	/* andis. */
1611  		imm = (unsigned short) word;
1612  		op->val = regs->gpr[rd] & (imm << 16);
1613  		set_cr0(regs, op);
1614  		goto logical_done_nocc;
1615  
1616  #ifdef __powerpc64__
1617  	case 30:	/* rld* */
1618  		mb = ((word >> 6) & 0x1f) | (word & 0x20);
1619  		val = regs->gpr[rd];
1620  		if ((word & 0x10) == 0) {
1621  			sh = rb | ((word & 2) << 4);
1622  			val = ROTATE(val, sh);
1623  			switch ((word >> 2) & 3) {
1624  			case 0:		/* rldicl */
1625  				val &= MASK64_L(mb);
1626  				break;
1627  			case 1:		/* rldicr */
1628  				val &= MASK64_R(mb);
1629  				break;
1630  			case 2:		/* rldic */
1631  				val &= MASK64(mb, 63 - sh);
1632  				break;
1633  			case 3:		/* rldimi */
1634  				imm = MASK64(mb, 63 - sh);
1635  				val = (regs->gpr[ra] & ~imm) |
1636  					(val & imm);
1637  			}
1638  			op->val = val;
1639  			goto logical_done;
1640  		} else {
1641  			sh = regs->gpr[rb] & 0x3f;
1642  			val = ROTATE(val, sh);
1643  			switch ((word >> 1) & 7) {
1644  			case 0:		/* rldcl */
1645  				op->val = val & MASK64_L(mb);
1646  				goto logical_done;
1647  			case 1:		/* rldcr */
1648  				op->val = val & MASK64_R(mb);
1649  				goto logical_done;
1650  			}
1651  		}
1652  #endif
1653  		op->type = UNKNOWN;	/* illegal instruction */
1654  		return 0;
1655  
1656  	case 31:
1657  		/* isel occupies 32 minor opcodes */
1658  		if (((word >> 1) & 0x1f) == 15) {
1659  			mb = (word >> 6) & 0x1f; /* bc field */
1660  			val = (regs->ccr >> (31 - mb)) & 1;
1661  			val2 = (ra) ? regs->gpr[ra] : 0;
1662  
1663  			op->val = (val) ? val2 : regs->gpr[rb];
1664  			goto compute_done;
1665  		}
1666  
1667  		switch ((word >> 1) & 0x3ff) {
1668  		case 4:		/* tw */
1669  			if (rd == 0x1f ||
1670  			    (rd & trap_compare((int)regs->gpr[ra],
1671  					       (int)regs->gpr[rb])))
1672  				goto trap;
1673  			return 1;
1674  #ifdef __powerpc64__
1675  		case 68:	/* td */
1676  			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1677  				goto trap;
1678  			return 1;
1679  #endif
1680  		case 83:	/* mfmsr */
1681  			if (regs->msr & MSR_PR)
1682  				goto priv;
1683  			op->type = MFMSR;
1684  			op->reg = rd;
1685  			return 0;
1686  		case 146:	/* mtmsr */
1687  			if (regs->msr & MSR_PR)
1688  				goto priv;
1689  			op->type = MTMSR;
1690  			op->reg = rd;
1691  			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1692  			return 0;
1693  #ifdef CONFIG_PPC64
1694  		case 178:	/* mtmsrd */
1695  			if (regs->msr & MSR_PR)
1696  				goto priv;
1697  			op->type = MTMSR;
1698  			op->reg = rd;
1699  			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1700  			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1701  			imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1702  			op->val = imm;
1703  			return 0;
1704  #endif
1705  
1706  		case 19:	/* mfcr */
1707  			imm = 0xffffffffUL;
1708  			if ((word >> 20) & 1) {
1709  				imm = 0xf0000000UL;
1710  				for (sh = 0; sh < 8; ++sh) {
1711  					if (word & (0x80000 >> sh))
1712  						break;
1713  					imm >>= 4;
1714  				}
1715  			}
1716  			op->val = regs->ccr & imm;
1717  			goto compute_done;
1718  
1719  		case 128:	/* setb */
1720  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1721  				goto unknown_opcode;
1722  			/*
1723  			 * 'ra' encodes the CR field number (bfa) in the top 3 bits.
1724  			 * Since each CR field is 4 bits,
1725  			 * we can simply mask off the bottom two bits (bfa * 4)
1726  			 * to yield the first bit in the CR field.
1727  			 */
1728  			ra = ra & ~0x3;
1729  			/* 'val' stores bits of the CR field (bfa) */
1730  			val = regs->ccr >> (CR0_SHIFT - ra);
1731  			/* checks if the LT bit of CR field (bfa) is set */
1732  			if (val & 8)
1733  				op->val = -1;
1734  			/* checks if the GT bit of CR field (bfa) is set */
1735  			else if (val & 4)
1736  				op->val = 1;
1737  			else
1738  				op->val = 0;
1739  			goto compute_done;
1740  
1741  		case 144:	/* mtcrf */
1742  			op->type = COMPUTE + SETCC;
1743  			imm = 0xf0000000UL;
1744  			val = regs->gpr[rd];
1745  			op->ccval = regs->ccr;
1746  			for (sh = 0; sh < 8; ++sh) {
1747  				if (word & (0x80000 >> sh))
1748  					op->ccval = (op->ccval & ~imm) |
1749  						(val & imm);
1750  				imm >>= 4;
1751  			}
1752  			return 1;
1753  
1754  		case 339:	/* mfspr */
1755  			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1756  			op->type = MFSPR;
1757  			op->reg = rd;
1758  			op->spr = spr;
1759  			if (spr == SPRN_XER || spr == SPRN_LR ||
1760  			    spr == SPRN_CTR)
1761  				return 1;
1762  			return 0;
1763  
1764  		case 467:	/* mtspr */
1765  			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1766  			op->type = MTSPR;
1767  			op->val = regs->gpr[rd];
1768  			op->spr = spr;
1769  			if (spr == SPRN_XER || spr == SPRN_LR ||
1770  			    spr == SPRN_CTR)
1771  				return 1;
1772  			return 0;
1773  
1774  /*
1775   * Compare instructions
1776   */
1777  		case 0:	/* cmp */
1778  			val = regs->gpr[ra];
1779  			val2 = regs->gpr[rb];
1780  #ifdef __powerpc64__
1781  			if ((rd & 1) == 0) {
1782  				/* word (32-bit) compare */
1783  				val = (int) val;
1784  				val2 = (int) val2;
1785  			}
1786  #endif
1787  			do_cmp_signed(regs, op, val, val2, rd >> 2);
1788  			return 1;
1789  
1790  		case 32:	/* cmpl */
1791  			val = regs->gpr[ra];
1792  			val2 = regs->gpr[rb];
1793  #ifdef __powerpc64__
1794  			if ((rd & 1) == 0) {
1795  				/* word (32-bit) compare */
1796  				val = (unsigned int) val;
1797  				val2 = (unsigned int) val2;
1798  			}
1799  #endif
1800  			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1801  			return 1;
1802  
1803  		case 508: /* cmpb */
1804  			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1805  			goto logical_done_nocc;
1806  
1807  /*
1808   * Arithmetic instructions
1809   */
1810  		case 8:	/* subfc */
1811  			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1812  				       regs->gpr[rb], 1);
1813  			goto arith_done;
1814  #ifdef __powerpc64__
1815  		case 9:	/* mulhdu */
1816  			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1817  			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1818  			goto arith_done;
1819  #endif
1820  		case 10:	/* addc */
1821  			add_with_carry(regs, op, rd, regs->gpr[ra],
1822  				       regs->gpr[rb], 0);
1823  			goto arith_done;
1824  
1825  		case 11:	/* mulhwu */
1826  			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1827  			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1828  			goto arith_done;
1829  
1830  		case 40:	/* subf */
1831  			op->val = regs->gpr[rb] - regs->gpr[ra];
1832  			goto arith_done;
1833  #ifdef __powerpc64__
1834  		case 73:	/* mulhd */
1835  			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1836  			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1837  			goto arith_done;
1838  #endif
1839  		case 75:	/* mulhw */
1840  			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1841  			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1842  			goto arith_done;
1843  
1844  		case 104:	/* neg */
1845  			op->val = -regs->gpr[ra];
1846  			goto arith_done;
1847  
1848  		case 136:	/* subfe */
1849  			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1850  				       regs->gpr[rb], regs->xer & XER_CA);
1851  			goto arith_done;
1852  
1853  		case 138:	/* adde */
1854  			add_with_carry(regs, op, rd, regs->gpr[ra],
1855  				       regs->gpr[rb], regs->xer & XER_CA);
1856  			goto arith_done;
1857  
1858  		case 200:	/* subfze */
1859  			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1860  				       regs->xer & XER_CA);
1861  			goto arith_done;
1862  
1863  		case 202:	/* addze */
1864  			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1865  				       regs->xer & XER_CA);
1866  			goto arith_done;
1867  
1868  		case 232:	/* subfme */
1869  			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1870  				       regs->xer & XER_CA);
1871  			goto arith_done;
1872  #ifdef __powerpc64__
1873  		case 233:	/* mulld */
1874  			op->val = regs->gpr[ra] * regs->gpr[rb];
1875  			goto arith_done;
1876  #endif
1877  		case 234:	/* addme */
1878  			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1879  				       regs->xer & XER_CA);
1880  			goto arith_done;
1881  
1882  		case 235:	/* mullw */
1883  			op->val = (long)(int) regs->gpr[ra] *
1884  				(int) regs->gpr[rb];
1885  
1886  			goto arith_done;
1887  #ifdef __powerpc64__
1888  		case 265:	/* modud */
1889  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1890  				goto unknown_opcode;
1891  			op->val = regs->gpr[ra] % regs->gpr[rb];
1892  			goto compute_done;
1893  #endif
1894  		case 266:	/* add */
1895  			op->val = regs->gpr[ra] + regs->gpr[rb];
1896  			goto arith_done;
1897  
1898  		case 267:	/* moduw */
1899  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1900  				goto unknown_opcode;
1901  			op->val = (unsigned int) regs->gpr[ra] %
1902  				(unsigned int) regs->gpr[rb];
1903  			goto compute_done;
1904  #ifdef __powerpc64__
1905  		case 457:	/* divdu */
1906  			op->val = regs->gpr[ra] / regs->gpr[rb];
1907  			goto arith_done;
1908  #endif
1909  		case 459:	/* divwu */
1910  			op->val = (unsigned int) regs->gpr[ra] /
1911  				(unsigned int) regs->gpr[rb];
1912  			goto arith_done;
1913  #ifdef __powerpc64__
1914  		case 489:	/* divd */
1915  			op->val = (long int) regs->gpr[ra] /
1916  				(long int) regs->gpr[rb];
1917  			goto arith_done;
1918  #endif
1919  		case 491:	/* divw */
1920  			op->val = (int) regs->gpr[ra] /
1921  				(int) regs->gpr[rb];
1922  			goto arith_done;
1923  #ifdef __powerpc64__
1924  		case 425:	/* divde[.] */
1925  			asm volatile(PPC_DIVDE(%0, %1, %2) :
1926  				"=r" (op->val) : "r" (regs->gpr[ra]),
1927  				"r" (regs->gpr[rb]));
1928  			goto arith_done;
1929  		case 393:	/* divdeu[.] */
1930  			asm volatile(PPC_DIVDEU(%0, %1, %2) :
1931  				"=r" (op->val) : "r" (regs->gpr[ra]),
1932  				"r" (regs->gpr[rb]));
1933  			goto arith_done;
1934  #endif
1935  		case 755:	/* darn */
1936  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1937  				goto unknown_opcode;
1938  			switch (ra & 0x3) {
1939  			case 0:
1940  				/* 32-bit conditioned */
1941  				asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1942  				goto compute_done;
1943  
1944  			case 1:
1945  				/* 64-bit conditioned */
1946  				asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1947  				goto compute_done;
1948  
1949  			case 2:
1950  				/* 64-bit raw */
1951  				asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1952  				goto compute_done;
1953  			}
1954  
1955  			goto unknown_opcode;
1956  #ifdef __powerpc64__
1957  		case 777:	/* modsd */
1958  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1959  				goto unknown_opcode;
1960  			op->val = (long int) regs->gpr[ra] %
1961  				(long int) regs->gpr[rb];
1962  			goto compute_done;
1963  #endif
1964  		case 779:	/* modsw */
1965  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1966  				goto unknown_opcode;
1967  			op->val = (int) regs->gpr[ra] %
1968  				(int) regs->gpr[rb];
1969  			goto compute_done;
1970  
1971  
1972  /*
1973   * Logical instructions
1974   */
1975  		case 26:	/* cntlzw */
1976  			val = (unsigned int) regs->gpr[rd];
1977  			op->val = ( val ? __builtin_clz(val) : 32 );
1978  			goto logical_done;
1979  #ifdef __powerpc64__
1980  		case 58:	/* cntlzd */
1981  			val = regs->gpr[rd];
1982  			op->val = ( val ? __builtin_clzl(val) : 64 );
1983  			goto logical_done;
1984  #endif
1985  		case 28:	/* and */
1986  			op->val = regs->gpr[rd] & regs->gpr[rb];
1987  			goto logical_done;
1988  
1989  		case 60:	/* andc */
1990  			op->val = regs->gpr[rd] & ~regs->gpr[rb];
1991  			goto logical_done;
1992  
1993  		case 122:	/* popcntb */
1994  			do_popcnt(regs, op, regs->gpr[rd], 8);
1995  			goto logical_done_nocc;
1996  
1997  		case 124:	/* nor */
1998  			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1999  			goto logical_done;
2000  
2001  		case 154:	/* prtyw */
2002  			do_prty(regs, op, regs->gpr[rd], 32);
2003  			goto logical_done_nocc;
2004  
2005  		case 186:	/* prtyd */
2006  			do_prty(regs, op, regs->gpr[rd], 64);
2007  			goto logical_done_nocc;
2008  #ifdef CONFIG_PPC64
2009  		case 252:	/* bpermd */
2010  			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
2011  			goto logical_done_nocc;
2012  #endif
2013  		case 284:	/* xor */
2014  			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
2015  			goto logical_done;
2016  
2017  		case 316:	/* xor */
2018  			op->val = regs->gpr[rd] ^ regs->gpr[rb];
2019  			goto logical_done;
2020  
2021  		case 378:	/* popcntw */
2022  			do_popcnt(regs, op, regs->gpr[rd], 32);
2023  			goto logical_done_nocc;
2024  
2025  		case 412:	/* orc */
2026  			op->val = regs->gpr[rd] | ~regs->gpr[rb];
2027  			goto logical_done;
2028  
2029  		case 444:	/* or */
2030  			op->val = regs->gpr[rd] | regs->gpr[rb];
2031  			goto logical_done;
2032  
2033  		case 476:	/* nand */
2034  			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2035  			goto logical_done;
2036  #ifdef CONFIG_PPC64
2037  		case 506:	/* popcntd */
2038  			do_popcnt(regs, op, regs->gpr[rd], 64);
2039  			goto logical_done_nocc;
2040  #endif
2041  		case 538:	/* cnttzw */
2042  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2043  				goto unknown_opcode;
2044  			val = (unsigned int) regs->gpr[rd];
2045  			op->val = (val ? __builtin_ctz(val) : 32);
2046  			goto logical_done;
2047  #ifdef __powerpc64__
2048  		case 570:	/* cnttzd */
2049  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2050  				goto unknown_opcode;
2051  			val = regs->gpr[rd];
2052  			op->val = (val ? __builtin_ctzl(val) : 64);
2053  			goto logical_done;
2054  #endif
2055  		case 922:	/* extsh */
2056  			op->val = (signed short) regs->gpr[rd];
2057  			goto logical_done;
2058  
2059  		case 954:	/* extsb */
2060  			op->val = (signed char) regs->gpr[rd];
2061  			goto logical_done;
2062  #ifdef __powerpc64__
2063  		case 986:	/* extsw */
2064  			op->val = (signed int) regs->gpr[rd];
2065  			goto logical_done;
2066  #endif
2067  
2068  /*
2069   * Shift instructions
2070   */
2071  		case 24:	/* slw */
2072  			sh = regs->gpr[rb] & 0x3f;
2073  			if (sh < 32)
2074  				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2075  			else
2076  				op->val = 0;
2077  			goto logical_done;
2078  
2079  		case 536:	/* srw */
2080  			sh = regs->gpr[rb] & 0x3f;
2081  			if (sh < 32)
2082  				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2083  			else
2084  				op->val = 0;
2085  			goto logical_done;
2086  
2087  		case 792:	/* sraw */
2088  			op->type = COMPUTE + SETREG + SETXER;
2089  			sh = regs->gpr[rb] & 0x3f;
2090  			ival = (signed int) regs->gpr[rd];
2091  			op->val = ival >> (sh < 32 ? sh : 31);
2092  			op->xerval = regs->xer;
2093  			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2094  				op->xerval |= XER_CA;
2095  			else
2096  				op->xerval &= ~XER_CA;
2097  			set_ca32(op, op->xerval & XER_CA);
2098  			goto logical_done;
2099  
2100  		case 824:	/* srawi */
2101  			op->type = COMPUTE + SETREG + SETXER;
2102  			sh = rb;
2103  			ival = (signed int) regs->gpr[rd];
2104  			op->val = ival >> sh;
2105  			op->xerval = regs->xer;
2106  			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2107  				op->xerval |= XER_CA;
2108  			else
2109  				op->xerval &= ~XER_CA;
2110  			set_ca32(op, op->xerval & XER_CA);
2111  			goto logical_done;
2112  
2113  #ifdef __powerpc64__
2114  		case 27:	/* sld */
2115  			sh = regs->gpr[rb] & 0x7f;
2116  			if (sh < 64)
2117  				op->val = regs->gpr[rd] << sh;
2118  			else
2119  				op->val = 0;
2120  			goto logical_done;
2121  
2122  		case 539:	/* srd */
2123  			sh = regs->gpr[rb] & 0x7f;
2124  			if (sh < 64)
2125  				op->val = regs->gpr[rd] >> sh;
2126  			else
2127  				op->val = 0;
2128  			goto logical_done;
2129  
2130  		case 794:	/* srad */
2131  			op->type = COMPUTE + SETREG + SETXER;
2132  			sh = regs->gpr[rb] & 0x7f;
2133  			ival = (signed long int) regs->gpr[rd];
2134  			op->val = ival >> (sh < 64 ? sh : 63);
2135  			op->xerval = regs->xer;
2136  			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2137  				op->xerval |= XER_CA;
2138  			else
2139  				op->xerval &= ~XER_CA;
2140  			set_ca32(op, op->xerval & XER_CA);
2141  			goto logical_done;
2142  
2143  		case 826:	/* sradi with sh_5 = 0 */
2144  		case 827:	/* sradi with sh_5 = 1 */
2145  			op->type = COMPUTE + SETREG + SETXER;
2146  			sh = rb | ((word & 2) << 4);
2147  			ival = (signed long int) regs->gpr[rd];
2148  			op->val = ival >> sh;
2149  			op->xerval = regs->xer;
2150  			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2151  				op->xerval |= XER_CA;
2152  			else
2153  				op->xerval &= ~XER_CA;
2154  			set_ca32(op, op->xerval & XER_CA);
2155  			goto logical_done;
2156  
2157  		case 890:	/* extswsli with sh_5 = 0 */
2158  		case 891:	/* extswsli with sh_5 = 1 */
2159  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2160  				goto unknown_opcode;
2161  			op->type = COMPUTE + SETREG;
2162  			sh = rb | ((word & 2) << 4);
2163  			val = (signed int) regs->gpr[rd];
2164  			if (sh)
2165  				op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2166  			else
2167  				op->val = val;
2168  			goto logical_done;
2169  
2170  #endif /* __powerpc64__ */
2171  
2172  /*
2173   * Cache instructions
2174   */
2175  		case 54:	/* dcbst */
2176  			op->type = MKOP(CACHEOP, DCBST, 0);
2177  			op->ea = xform_ea(word, regs);
2178  			return 0;
2179  
2180  		case 86:	/* dcbf */
2181  			op->type = MKOP(CACHEOP, DCBF, 0);
2182  			op->ea = xform_ea(word, regs);
2183  			return 0;
2184  
2185  		case 246:	/* dcbtst */
2186  			op->type = MKOP(CACHEOP, DCBTST, 0);
2187  			op->ea = xform_ea(word, regs);
2188  			op->reg = rd;
2189  			return 0;
2190  
2191  		case 278:	/* dcbt */
2192  			op->type = MKOP(CACHEOP, DCBTST, 0);
2193  			op->ea = xform_ea(word, regs);
2194  			op->reg = rd;
2195  			return 0;
2196  
2197  		case 982:	/* icbi */
2198  			op->type = MKOP(CACHEOP, ICBI, 0);
2199  			op->ea = xform_ea(word, regs);
2200  			return 0;
2201  
2202  		case 1014:	/* dcbz */
2203  			op->type = MKOP(CACHEOP, DCBZ, 0);
2204  			op->ea = xform_ea(word, regs);
2205  			return 0;
2206  		}
2207  		break;
2208  	}
2209  
2210  /*
2211   * Loads and stores.
2212   */
2213  	op->type = UNKNOWN;
2214  	op->update_reg = ra;
2215  	op->reg = rd;
2216  	op->val = regs->gpr[rd];
2217  	u = (word >> 20) & UPDATE;
2218  	op->vsx_flags = 0;
2219  
2220  	switch (opcode) {
2221  	case 31:
2222  		u = word & UPDATE;
2223  		op->ea = xform_ea(word, regs);
2224  		switch ((word >> 1) & 0x3ff) {
2225  		case 20:	/* lwarx */
2226  			op->type = MKOP(LARX, 0, 4);
2227  			break;
2228  
2229  		case 150:	/* stwcx. */
2230  			op->type = MKOP(STCX, 0, 4);
2231  			break;
2232  
2233  #ifdef __powerpc64__
2234  		case 84:	/* ldarx */
2235  			op->type = MKOP(LARX, 0, 8);
2236  			break;
2237  
2238  		case 214:	/* stdcx. */
2239  			op->type = MKOP(STCX, 0, 8);
2240  			break;
2241  
2242  		case 52:	/* lbarx */
2243  			op->type = MKOP(LARX, 0, 1);
2244  			break;
2245  
2246  		case 694:	/* stbcx. */
2247  			op->type = MKOP(STCX, 0, 1);
2248  			break;
2249  
2250  		case 116:	/* lharx */
2251  			op->type = MKOP(LARX, 0, 2);
2252  			break;
2253  
2254  		case 726:	/* sthcx. */
2255  			op->type = MKOP(STCX, 0, 2);
2256  			break;
2257  
2258  		case 276:	/* lqarx */
2259  			if (!((rd & 1) || rd == ra || rd == rb))
2260  				op->type = MKOP(LARX, 0, 16);
2261  			break;
2262  
2263  		case 182:	/* stqcx. */
2264  			if (!(rd & 1))
2265  				op->type = MKOP(STCX, 0, 16);
2266  			break;
2267  #endif
2268  
2269  		case 23:	/* lwzx */
2270  		case 55:	/* lwzux */
2271  			op->type = MKOP(LOAD, u, 4);
2272  			break;
2273  
2274  		case 87:	/* lbzx */
2275  		case 119:	/* lbzux */
2276  			op->type = MKOP(LOAD, u, 1);
2277  			break;
2278  
2279  #ifdef CONFIG_ALTIVEC
2280  		/*
2281  		 * Note: for the load/store vector element instructions,
2282  		 * bits of the EA say which field of the VMX register to use.
2283  		 */
2284  		case 7:		/* lvebx */
2285  			op->type = MKOP(LOAD_VMX, 0, 1);
2286  			op->element_size = 1;
2287  			break;
2288  
2289  		case 39:	/* lvehx */
2290  			op->type = MKOP(LOAD_VMX, 0, 2);
2291  			op->element_size = 2;
2292  			break;
2293  
2294  		case 71:	/* lvewx */
2295  			op->type = MKOP(LOAD_VMX, 0, 4);
2296  			op->element_size = 4;
2297  			break;
2298  
2299  		case 103:	/* lvx */
2300  		case 359:	/* lvxl */
2301  			op->type = MKOP(LOAD_VMX, 0, 16);
2302  			op->element_size = 16;
2303  			break;
2304  
2305  		case 135:	/* stvebx */
2306  			op->type = MKOP(STORE_VMX, 0, 1);
2307  			op->element_size = 1;
2308  			break;
2309  
2310  		case 167:	/* stvehx */
2311  			op->type = MKOP(STORE_VMX, 0, 2);
2312  			op->element_size = 2;
2313  			break;
2314  
2315  		case 199:	/* stvewx */
2316  			op->type = MKOP(STORE_VMX, 0, 4);
2317  			op->element_size = 4;
2318  			break;
2319  
2320  		case 231:	/* stvx */
2321  		case 487:	/* stvxl */
2322  			op->type = MKOP(STORE_VMX, 0, 16);
2323  			break;
2324  #endif /* CONFIG_ALTIVEC */
2325  
2326  #ifdef __powerpc64__
2327  		case 21:	/* ldx */
2328  		case 53:	/* ldux */
2329  			op->type = MKOP(LOAD, u, 8);
2330  			break;
2331  
2332  		case 149:	/* stdx */
2333  		case 181:	/* stdux */
2334  			op->type = MKOP(STORE, u, 8);
2335  			break;
2336  #endif
2337  
2338  		case 151:	/* stwx */
2339  		case 183:	/* stwux */
2340  			op->type = MKOP(STORE, u, 4);
2341  			break;
2342  
2343  		case 215:	/* stbx */
2344  		case 247:	/* stbux */
2345  			op->type = MKOP(STORE, u, 1);
2346  			break;
2347  
2348  		case 279:	/* lhzx */
2349  		case 311:	/* lhzux */
2350  			op->type = MKOP(LOAD, u, 2);
2351  			break;
2352  
2353  #ifdef __powerpc64__
2354  		case 341:	/* lwax */
2355  		case 373:	/* lwaux */
2356  			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2357  			break;
2358  #endif
2359  
2360  		case 343:	/* lhax */
2361  		case 375:	/* lhaux */
2362  			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2363  			break;
2364  
2365  		case 407:	/* sthx */
2366  		case 439:	/* sthux */
2367  			op->type = MKOP(STORE, u, 2);
2368  			break;
2369  
2370  #ifdef __powerpc64__
2371  		case 532:	/* ldbrx */
2372  			op->type = MKOP(LOAD, BYTEREV, 8);
2373  			break;
2374  
2375  #endif
2376  		case 533:	/* lswx */
2377  			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2378  			break;
2379  
2380  		case 534:	/* lwbrx */
2381  			op->type = MKOP(LOAD, BYTEREV, 4);
2382  			break;
2383  
2384  		case 597:	/* lswi */
2385  			if (rb == 0)
2386  				rb = 32;	/* # bytes to load */
2387  			op->type = MKOP(LOAD_MULTI, 0, rb);
2388  			op->ea = ra ? regs->gpr[ra] : 0;
2389  			break;
2390  
2391  #ifdef CONFIG_PPC_FPU
2392  		case 535:	/* lfsx */
2393  		case 567:	/* lfsux */
2394  			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2395  			break;
2396  
2397  		case 599:	/* lfdx */
2398  		case 631:	/* lfdux */
2399  			op->type = MKOP(LOAD_FP, u, 8);
2400  			break;
2401  
2402  		case 663:	/* stfsx */
2403  		case 695:	/* stfsux */
2404  			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2405  			break;
2406  
2407  		case 727:	/* stfdx */
2408  		case 759:	/* stfdux */
2409  			op->type = MKOP(STORE_FP, u, 8);
2410  			break;
2411  
2412  #ifdef __powerpc64__
2413  		case 791:	/* lfdpx */
2414  			op->type = MKOP(LOAD_FP, 0, 16);
2415  			break;
2416  
2417  		case 855:	/* lfiwax */
2418  			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2419  			break;
2420  
2421  		case 887:	/* lfiwzx */
2422  			op->type = MKOP(LOAD_FP, 0, 4);
2423  			break;
2424  
2425  		case 919:	/* stfdpx */
2426  			op->type = MKOP(STORE_FP, 0, 16);
2427  			break;
2428  
2429  		case 983:	/* stfiwx */
2430  			op->type = MKOP(STORE_FP, 0, 4);
2431  			break;
2432  #endif /* __powerpc64 */
2433  #endif /* CONFIG_PPC_FPU */
2434  
2435  #ifdef __powerpc64__
2436  		case 660:	/* stdbrx */
2437  			op->type = MKOP(STORE, BYTEREV, 8);
2438  			op->val = byterev_8(regs->gpr[rd]);
2439  			break;
2440  
2441  #endif
2442  		case 661:	/* stswx */
2443  			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2444  			break;
2445  
2446  		case 662:	/* stwbrx */
2447  			op->type = MKOP(STORE, BYTEREV, 4);
2448  			op->val = byterev_4(regs->gpr[rd]);
2449  			break;
2450  
2451  		case 725:	/* stswi */
2452  			if (rb == 0)
2453  				rb = 32;	/* # bytes to store */
2454  			op->type = MKOP(STORE_MULTI, 0, rb);
2455  			op->ea = ra ? regs->gpr[ra] : 0;
2456  			break;
2457  
2458  		case 790:	/* lhbrx */
2459  			op->type = MKOP(LOAD, BYTEREV, 2);
2460  			break;
2461  
2462  		case 918:	/* sthbrx */
2463  			op->type = MKOP(STORE, BYTEREV, 2);
2464  			op->val = byterev_2(regs->gpr[rd]);
2465  			break;
2466  
2467  #ifdef CONFIG_VSX
2468  		case 12:	/* lxsiwzx */
2469  			op->reg = rd | ((word & 1) << 5);
2470  			op->type = MKOP(LOAD_VSX, 0, 4);
2471  			op->element_size = 8;
2472  			break;
2473  
2474  		case 76:	/* lxsiwax */
2475  			op->reg = rd | ((word & 1) << 5);
2476  			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2477  			op->element_size = 8;
2478  			break;
2479  
2480  		case 140:	/* stxsiwx */
2481  			op->reg = rd | ((word & 1) << 5);
2482  			op->type = MKOP(STORE_VSX, 0, 4);
2483  			op->element_size = 8;
2484  			break;
2485  
2486  		case 268:	/* lxvx */
2487  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2488  				goto unknown_opcode;
2489  			op->reg = rd | ((word & 1) << 5);
2490  			op->type = MKOP(LOAD_VSX, 0, 16);
2491  			op->element_size = 16;
2492  			op->vsx_flags = VSX_CHECK_VEC;
2493  			break;
2494  
2495  		case 269:	/* lxvl */
2496  		case 301: {	/* lxvll */
2497  			int nb;
2498  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2499  				goto unknown_opcode;
2500  			op->reg = rd | ((word & 1) << 5);
2501  			op->ea = ra ? regs->gpr[ra] : 0;
2502  			nb = regs->gpr[rb] & 0xff;
2503  			if (nb > 16)
2504  				nb = 16;
2505  			op->type = MKOP(LOAD_VSX, 0, nb);
2506  			op->element_size = 16;
2507  			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2508  				VSX_CHECK_VEC;
2509  			break;
2510  		}
2511  		case 332:	/* lxvdsx */
2512  			op->reg = rd | ((word & 1) << 5);
2513  			op->type = MKOP(LOAD_VSX, 0, 8);
2514  			op->element_size = 8;
2515  			op->vsx_flags = VSX_SPLAT;
2516  			break;
2517  
2518  		case 333:       /* lxvpx */
2519  			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2520  				goto unknown_opcode;
2521  			op->reg = VSX_REGISTER_XTP(rd);
2522  			op->type = MKOP(LOAD_VSX, 0, 32);
2523  			op->element_size = 32;
2524  			break;
2525  
2526  		case 364:	/* lxvwsx */
2527  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2528  				goto unknown_opcode;
2529  			op->reg = rd | ((word & 1) << 5);
2530  			op->type = MKOP(LOAD_VSX, 0, 4);
2531  			op->element_size = 4;
2532  			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2533  			break;
2534  
2535  		case 396:	/* stxvx */
2536  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2537  				goto unknown_opcode;
2538  			op->reg = rd | ((word & 1) << 5);
2539  			op->type = MKOP(STORE_VSX, 0, 16);
2540  			op->element_size = 16;
2541  			op->vsx_flags = VSX_CHECK_VEC;
2542  			break;
2543  
2544  		case 397:	/* stxvl */
2545  		case 429: {	/* stxvll */
2546  			int nb;
2547  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2548  				goto unknown_opcode;
2549  			op->reg = rd | ((word & 1) << 5);
2550  			op->ea = ra ? regs->gpr[ra] : 0;
2551  			nb = regs->gpr[rb] & 0xff;
2552  			if (nb > 16)
2553  				nb = 16;
2554  			op->type = MKOP(STORE_VSX, 0, nb);
2555  			op->element_size = 16;
2556  			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2557  				VSX_CHECK_VEC;
2558  			break;
2559  		}
2560  		case 461:       /* stxvpx */
2561  			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2562  				goto unknown_opcode;
2563  			op->reg = VSX_REGISTER_XTP(rd);
2564  			op->type = MKOP(STORE_VSX, 0, 32);
2565  			op->element_size = 32;
2566  			break;
2567  		case 524:	/* lxsspx */
2568  			op->reg = rd | ((word & 1) << 5);
2569  			op->type = MKOP(LOAD_VSX, 0, 4);
2570  			op->element_size = 8;
2571  			op->vsx_flags = VSX_FPCONV;
2572  			break;
2573  
2574  		case 588:	/* lxsdx */
2575  			op->reg = rd | ((word & 1) << 5);
2576  			op->type = MKOP(LOAD_VSX, 0, 8);
2577  			op->element_size = 8;
2578  			break;
2579  
2580  		case 652:	/* stxsspx */
2581  			op->reg = rd | ((word & 1) << 5);
2582  			op->type = MKOP(STORE_VSX, 0, 4);
2583  			op->element_size = 8;
2584  			op->vsx_flags = VSX_FPCONV;
2585  			break;
2586  
2587  		case 716:	/* stxsdx */
2588  			op->reg = rd | ((word & 1) << 5);
2589  			op->type = MKOP(STORE_VSX, 0, 8);
2590  			op->element_size = 8;
2591  			break;
2592  
2593  		case 780:	/* lxvw4x */
2594  			op->reg = rd | ((word & 1) << 5);
2595  			op->type = MKOP(LOAD_VSX, 0, 16);
2596  			op->element_size = 4;
2597  			break;
2598  
2599  		case 781:	/* lxsibzx */
2600  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2601  				goto unknown_opcode;
2602  			op->reg = rd | ((word & 1) << 5);
2603  			op->type = MKOP(LOAD_VSX, 0, 1);
2604  			op->element_size = 8;
2605  			op->vsx_flags = VSX_CHECK_VEC;
2606  			break;
2607  
2608  		case 812:	/* lxvh8x */
2609  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2610  				goto unknown_opcode;
2611  			op->reg = rd | ((word & 1) << 5);
2612  			op->type = MKOP(LOAD_VSX, 0, 16);
2613  			op->element_size = 2;
2614  			op->vsx_flags = VSX_CHECK_VEC;
2615  			break;
2616  
2617  		case 813:	/* lxsihzx */
2618  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2619  				goto unknown_opcode;
2620  			op->reg = rd | ((word & 1) << 5);
2621  			op->type = MKOP(LOAD_VSX, 0, 2);
2622  			op->element_size = 8;
2623  			op->vsx_flags = VSX_CHECK_VEC;
2624  			break;
2625  
2626  		case 844:	/* lxvd2x */
2627  			op->reg = rd | ((word & 1) << 5);
2628  			op->type = MKOP(LOAD_VSX, 0, 16);
2629  			op->element_size = 8;
2630  			break;
2631  
2632  		case 876:	/* lxvb16x */
2633  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2634  				goto unknown_opcode;
2635  			op->reg = rd | ((word & 1) << 5);
2636  			op->type = MKOP(LOAD_VSX, 0, 16);
2637  			op->element_size = 1;
2638  			op->vsx_flags = VSX_CHECK_VEC;
2639  			break;
2640  
2641  		case 908:	/* stxvw4x */
2642  			op->reg = rd | ((word & 1) << 5);
2643  			op->type = MKOP(STORE_VSX, 0, 16);
2644  			op->element_size = 4;
2645  			break;
2646  
2647  		case 909:	/* stxsibx */
2648  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2649  				goto unknown_opcode;
2650  			op->reg = rd | ((word & 1) << 5);
2651  			op->type = MKOP(STORE_VSX, 0, 1);
2652  			op->element_size = 8;
2653  			op->vsx_flags = VSX_CHECK_VEC;
2654  			break;
2655  
2656  		case 940:	/* stxvh8x */
2657  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2658  				goto unknown_opcode;
2659  			op->reg = rd | ((word & 1) << 5);
2660  			op->type = MKOP(STORE_VSX, 0, 16);
2661  			op->element_size = 2;
2662  			op->vsx_flags = VSX_CHECK_VEC;
2663  			break;
2664  
2665  		case 941:	/* stxsihx */
2666  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2667  				goto unknown_opcode;
2668  			op->reg = rd | ((word & 1) << 5);
2669  			op->type = MKOP(STORE_VSX, 0, 2);
2670  			op->element_size = 8;
2671  			op->vsx_flags = VSX_CHECK_VEC;
2672  			break;
2673  
2674  		case 972:	/* stxvd2x */
2675  			op->reg = rd | ((word & 1) << 5);
2676  			op->type = MKOP(STORE_VSX, 0, 16);
2677  			op->element_size = 8;
2678  			break;
2679  
2680  		case 1004:	/* stxvb16x */
2681  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2682  				goto unknown_opcode;
2683  			op->reg = rd | ((word & 1) << 5);
2684  			op->type = MKOP(STORE_VSX, 0, 16);
2685  			op->element_size = 1;
2686  			op->vsx_flags = VSX_CHECK_VEC;
2687  			break;
2688  
2689  #endif /* CONFIG_VSX */
2690  		}
2691  		break;
2692  
2693  	case 32:	/* lwz */
2694  	case 33:	/* lwzu */
2695  		op->type = MKOP(LOAD, u, 4);
2696  		op->ea = dform_ea(word, regs);
2697  		break;
2698  
2699  	case 34:	/* lbz */
2700  	case 35:	/* lbzu */
2701  		op->type = MKOP(LOAD, u, 1);
2702  		op->ea = dform_ea(word, regs);
2703  		break;
2704  
2705  	case 36:	/* stw */
2706  	case 37:	/* stwu */
2707  		op->type = MKOP(STORE, u, 4);
2708  		op->ea = dform_ea(word, regs);
2709  		break;
2710  
2711  	case 38:	/* stb */
2712  	case 39:	/* stbu */
2713  		op->type = MKOP(STORE, u, 1);
2714  		op->ea = dform_ea(word, regs);
2715  		break;
2716  
2717  	case 40:	/* lhz */
2718  	case 41:	/* lhzu */
2719  		op->type = MKOP(LOAD, u, 2);
2720  		op->ea = dform_ea(word, regs);
2721  		break;
2722  
2723  	case 42:	/* lha */
2724  	case 43:	/* lhau */
2725  		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2726  		op->ea = dform_ea(word, regs);
2727  		break;
2728  
2729  	case 44:	/* sth */
2730  	case 45:	/* sthu */
2731  		op->type = MKOP(STORE, u, 2);
2732  		op->ea = dform_ea(word, regs);
2733  		break;
2734  
2735  	case 46:	/* lmw */
2736  		if (ra >= rd)
2737  			break;		/* invalid form, ra in range to load */
2738  		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2739  		op->ea = dform_ea(word, regs);
2740  		break;
2741  
2742  	case 47:	/* stmw */
2743  		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2744  		op->ea = dform_ea(word, regs);
2745  		break;
2746  
2747  #ifdef CONFIG_PPC_FPU
2748  	case 48:	/* lfs */
2749  	case 49:	/* lfsu */
2750  		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2751  		op->ea = dform_ea(word, regs);
2752  		break;
2753  
2754  	case 50:	/* lfd */
2755  	case 51:	/* lfdu */
2756  		op->type = MKOP(LOAD_FP, u, 8);
2757  		op->ea = dform_ea(word, regs);
2758  		break;
2759  
2760  	case 52:	/* stfs */
2761  	case 53:	/* stfsu */
2762  		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2763  		op->ea = dform_ea(word, regs);
2764  		break;
2765  
2766  	case 54:	/* stfd */
2767  	case 55:	/* stfdu */
2768  		op->type = MKOP(STORE_FP, u, 8);
2769  		op->ea = dform_ea(word, regs);
2770  		break;
2771  #endif
2772  
2773  #ifdef __powerpc64__
2774  	case 56:	/* lq */
2775  		if (!((rd & 1) || (rd == ra)))
2776  			op->type = MKOP(LOAD, 0, 16);
2777  		op->ea = dqform_ea(word, regs);
2778  		break;
2779  #endif
2780  
2781  #ifdef CONFIG_VSX
2782  	case 57:	/* lfdp, lxsd, lxssp */
2783  		op->ea = dsform_ea(word, regs);
2784  		switch (word & 3) {
2785  		case 0:		/* lfdp */
2786  			if (rd & 1)
2787  				break;		/* reg must be even */
2788  			op->type = MKOP(LOAD_FP, 0, 16);
2789  			break;
2790  		case 2:		/* lxsd */
2791  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2792  				goto unknown_opcode;
2793  			op->reg = rd + 32;
2794  			op->type = MKOP(LOAD_VSX, 0, 8);
2795  			op->element_size = 8;
2796  			op->vsx_flags = VSX_CHECK_VEC;
2797  			break;
2798  		case 3:		/* lxssp */
2799  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2800  				goto unknown_opcode;
2801  			op->reg = rd + 32;
2802  			op->type = MKOP(LOAD_VSX, 0, 4);
2803  			op->element_size = 8;
2804  			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2805  			break;
2806  		}
2807  		break;
2808  #endif /* CONFIG_VSX */
2809  
2810  #ifdef __powerpc64__
2811  	case 58:	/* ld[u], lwa */
2812  		op->ea = dsform_ea(word, regs);
2813  		switch (word & 3) {
2814  		case 0:		/* ld */
2815  			op->type = MKOP(LOAD, 0, 8);
2816  			break;
2817  		case 1:		/* ldu */
2818  			op->type = MKOP(LOAD, UPDATE, 8);
2819  			break;
2820  		case 2:		/* lwa */
2821  			op->type = MKOP(LOAD, SIGNEXT, 4);
2822  			break;
2823  		}
2824  		break;
2825  #endif
2826  
2827  #ifdef CONFIG_VSX
2828  	case 6:
2829  		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2830  			goto unknown_opcode;
2831  		op->ea = dqform_ea(word, regs);
2832  		op->reg = VSX_REGISTER_XTP(rd);
2833  		op->element_size = 32;
2834  		switch (word & 0xf) {
2835  		case 0:         /* lxvp */
2836  			op->type = MKOP(LOAD_VSX, 0, 32);
2837  			break;
2838  		case 1:         /* stxvp */
2839  			op->type = MKOP(STORE_VSX, 0, 32);
2840  			break;
2841  		}
2842  		break;
2843  
2844  	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2845  		switch (word & 7) {
2846  		case 0:		/* stfdp with LSB of DS field = 0 */
2847  		case 4:		/* stfdp with LSB of DS field = 1 */
2848  			op->ea = dsform_ea(word, regs);
2849  			op->type = MKOP(STORE_FP, 0, 16);
2850  			break;
2851  
2852  		case 1:		/* lxv */
2853  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2854  				goto unknown_opcode;
2855  			op->ea = dqform_ea(word, regs);
2856  			if (word & 8)
2857  				op->reg = rd + 32;
2858  			op->type = MKOP(LOAD_VSX, 0, 16);
2859  			op->element_size = 16;
2860  			op->vsx_flags = VSX_CHECK_VEC;
2861  			break;
2862  
2863  		case 2:		/* stxsd with LSB of DS field = 0 */
2864  		case 6:		/* stxsd with LSB of DS field = 1 */
2865  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2866  				goto unknown_opcode;
2867  			op->ea = dsform_ea(word, regs);
2868  			op->reg = rd + 32;
2869  			op->type = MKOP(STORE_VSX, 0, 8);
2870  			op->element_size = 8;
2871  			op->vsx_flags = VSX_CHECK_VEC;
2872  			break;
2873  
2874  		case 3:		/* stxssp with LSB of DS field = 0 */
2875  		case 7:		/* stxssp with LSB of DS field = 1 */
2876  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2877  				goto unknown_opcode;
2878  			op->ea = dsform_ea(word, regs);
2879  			op->reg = rd + 32;
2880  			op->type = MKOP(STORE_VSX, 0, 4);
2881  			op->element_size = 8;
2882  			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2883  			break;
2884  
2885  		case 5:		/* stxv */
2886  			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2887  				goto unknown_opcode;
2888  			op->ea = dqform_ea(word, regs);
2889  			if (word & 8)
2890  				op->reg = rd + 32;
2891  			op->type = MKOP(STORE_VSX, 0, 16);
2892  			op->element_size = 16;
2893  			op->vsx_flags = VSX_CHECK_VEC;
2894  			break;
2895  		}
2896  		break;
2897  #endif /* CONFIG_VSX */
2898  
2899  #ifdef __powerpc64__
2900  	case 62:	/* std[u] */
2901  		op->ea = dsform_ea(word, regs);
2902  		switch (word & 3) {
2903  		case 0:		/* std */
2904  			op->type = MKOP(STORE, 0, 8);
2905  			break;
2906  		case 1:		/* stdu */
2907  			op->type = MKOP(STORE, UPDATE, 8);
2908  			break;
2909  		case 2:		/* stq */
2910  			if (!(rd & 1))
2911  				op->type = MKOP(STORE, 0, 16);
2912  			break;
2913  		}
2914  		break;
2915  	case 1: /* Prefixed instructions */
2916  		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2917  			goto unknown_opcode;
2918  
2919  		prefix_r = GET_PREFIX_R(word);
2920  		ra = GET_PREFIX_RA(suffix);
2921  		op->update_reg = ra;
2922  		rd = (suffix >> 21) & 0x1f;
2923  		op->reg = rd;
2924  		op->val = regs->gpr[rd];
2925  
2926  		suffixopcode = get_op(suffix);
2927  		prefixtype = (word >> 24) & 0x3;
2928  		switch (prefixtype) {
2929  		case 0: /* Type 00  Eight-Byte Load/Store */
2930  			if (prefix_r && ra)
2931  				break;
2932  			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2933  			switch (suffixopcode) {
2934  			case 41:	/* plwa */
2935  				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2936  				break;
2937  #ifdef CONFIG_VSX
2938  			case 42:        /* plxsd */
2939  				op->reg = rd + 32;
2940  				op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2941  				op->element_size = 8;
2942  				op->vsx_flags = VSX_CHECK_VEC;
2943  				break;
2944  			case 43:	/* plxssp */
2945  				op->reg = rd + 32;
2946  				op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2947  				op->element_size = 8;
2948  				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2949  				break;
2950  			case 46:	/* pstxsd */
2951  				op->reg = rd + 32;
2952  				op->type = MKOP(STORE_VSX, PREFIXED, 8);
2953  				op->element_size = 8;
2954  				op->vsx_flags = VSX_CHECK_VEC;
2955  				break;
2956  			case 47:	/* pstxssp */
2957  				op->reg = rd + 32;
2958  				op->type = MKOP(STORE_VSX, PREFIXED, 4);
2959  				op->element_size = 8;
2960  				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2961  				break;
2962  			case 51:	/* plxv1 */
2963  				op->reg += 32;
2964  				fallthrough;
2965  			case 50:	/* plxv0 */
2966  				op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2967  				op->element_size = 16;
2968  				op->vsx_flags = VSX_CHECK_VEC;
2969  				break;
2970  			case 55:	/* pstxv1 */
2971  				op->reg = rd + 32;
2972  				fallthrough;
2973  			case 54:	/* pstxv0 */
2974  				op->type = MKOP(STORE_VSX, PREFIXED, 16);
2975  				op->element_size = 16;
2976  				op->vsx_flags = VSX_CHECK_VEC;
2977  				break;
2978  #endif /* CONFIG_VSX */
2979  			case 56:        /* plq */
2980  				op->type = MKOP(LOAD, PREFIXED, 16);
2981  				break;
2982  			case 57:	/* pld */
2983  				op->type = MKOP(LOAD, PREFIXED, 8);
2984  				break;
2985  #ifdef CONFIG_VSX
2986  			case 58:        /* plxvp */
2987  				op->reg = VSX_REGISTER_XTP(rd);
2988  				op->type = MKOP(LOAD_VSX, PREFIXED, 32);
2989  				op->element_size = 32;
2990  				break;
2991  #endif /* CONFIG_VSX */
2992  			case 60:        /* pstq */
2993  				op->type = MKOP(STORE, PREFIXED, 16);
2994  				break;
2995  			case 61:	/* pstd */
2996  				op->type = MKOP(STORE, PREFIXED, 8);
2997  				break;
2998  #ifdef CONFIG_VSX
2999  			case 62:        /* pstxvp */
3000  				op->reg = VSX_REGISTER_XTP(rd);
3001  				op->type = MKOP(STORE_VSX, PREFIXED, 32);
3002  				op->element_size = 32;
3003  				break;
3004  #endif /* CONFIG_VSX */
3005  			}
3006  			break;
3007  		case 1: /* Type 01 Eight-Byte Register-to-Register */
3008  			break;
3009  		case 2: /* Type 10 Modified Load/Store */
3010  			if (prefix_r && ra)
3011  				break;
3012  			op->ea = mlsd_8lsd_ea(word, suffix, regs);
3013  			switch (suffixopcode) {
3014  			case 32:	/* plwz */
3015  				op->type = MKOP(LOAD, PREFIXED, 4);
3016  				break;
3017  			case 34:	/* plbz */
3018  				op->type = MKOP(LOAD, PREFIXED, 1);
3019  				break;
3020  			case 36:	/* pstw */
3021  				op->type = MKOP(STORE, PREFIXED, 4);
3022  				break;
3023  			case 38:	/* pstb */
3024  				op->type = MKOP(STORE, PREFIXED, 1);
3025  				break;
3026  			case 40:	/* plhz */
3027  				op->type = MKOP(LOAD, PREFIXED, 2);
3028  				break;
3029  			case 42:	/* plha */
3030  				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
3031  				break;
3032  			case 44:	/* psth */
3033  				op->type = MKOP(STORE, PREFIXED, 2);
3034  				break;
3035  			case 48:        /* plfs */
3036  				op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3037  				break;
3038  			case 50:        /* plfd */
3039  				op->type = MKOP(LOAD_FP, PREFIXED, 8);
3040  				break;
3041  			case 52:        /* pstfs */
3042  				op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3043  				break;
3044  			case 54:        /* pstfd */
3045  				op->type = MKOP(STORE_FP, PREFIXED, 8);
3046  				break;
3047  			}
3048  			break;
3049  		case 3: /* Type 11 Modified Register-to-Register */
3050  			break;
3051  		}
3052  #endif /* __powerpc64__ */
3053  
3054  	}
3055  
3056  	if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3057  		switch (GETTYPE(op->type)) {
3058  		case LOAD:
3059  			if (ra == rd)
3060  				goto unknown_opcode;
3061  			fallthrough;
3062  		case STORE:
3063  		case LOAD_FP:
3064  		case STORE_FP:
3065  			if (ra == 0)
3066  				goto unknown_opcode;
3067  		}
3068  	}
3069  
3070  #ifdef CONFIG_VSX
3071  	if ((GETTYPE(op->type) == LOAD_VSX ||
3072  	     GETTYPE(op->type) == STORE_VSX) &&
3073  	    !cpu_has_feature(CPU_FTR_VSX)) {
3074  		return -1;
3075  	}
3076  #endif /* CONFIG_VSX */
3077  
3078  	return 0;
3079  
3080   unknown_opcode:
3081  	op->type = UNKNOWN;
3082  	return 0;
3083  
3084   logical_done:
3085  	if (word & 1)
3086  		set_cr0(regs, op);
3087   logical_done_nocc:
3088  	op->reg = ra;
3089  	op->type |= SETREG;
3090  	return 1;
3091  
3092   arith_done:
3093  	if (word & 1)
3094  		set_cr0(regs, op);
3095   compute_done:
3096  	op->reg = rd;
3097  	op->type |= SETREG;
3098  	return 1;
3099  
3100   priv:
3101  	op->type = INTERRUPT | 0x700;
3102  	op->val = SRR1_PROGPRIV;
3103  	return 0;
3104  
3105   trap:
3106  	op->type = INTERRUPT | 0x700;
3107  	op->val = SRR1_PROGTRAP;
3108  	return 0;
3109  }
3110  EXPORT_SYMBOL_GPL(analyse_instr);
3111  NOKPROBE_SYMBOL(analyse_instr);
3112  
3113  /*
3114   * For PPC32 we always use stwu with r1 to change the stack pointer.
3115   * So this emulated store may corrupt the exception frame, now we
3116   * have to provide the exception frame trampoline, which is pushed
3117   * below the kprobed function stack. So we only update gpr[1] but
3118   * don't emulate the real store operation. We will do real store
3119   * operation safely in exception return code by checking this flag.
3120   */
handle_stack_update(unsigned long ea,struct pt_regs * regs)3121  static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3122  {
3123  	/*
3124  	 * Check if we already set since that means we'll
3125  	 * lose the previous value.
3126  	 */
3127  	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3128  	set_thread_flag(TIF_EMULATE_STACK_STORE);
3129  	return 0;
3130  }
3131  
do_signext(unsigned long * valp,int size)3132  static nokprobe_inline void do_signext(unsigned long *valp, int size)
3133  {
3134  	switch (size) {
3135  	case 2:
3136  		*valp = (signed short) *valp;
3137  		break;
3138  	case 4:
3139  		*valp = (signed int) *valp;
3140  		break;
3141  	}
3142  }
3143  
do_byterev(unsigned long * valp,int size)3144  static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3145  {
3146  	switch (size) {
3147  	case 2:
3148  		*valp = byterev_2(*valp);
3149  		break;
3150  	case 4:
3151  		*valp = byterev_4(*valp);
3152  		break;
3153  #ifdef __powerpc64__
3154  	case 8:
3155  		*valp = byterev_8(*valp);
3156  		break;
3157  #endif
3158  	}
3159  }
3160  
3161  /*
3162   * Emulate an instruction that can be executed just by updating
3163   * fields in *regs.
3164   */
emulate_update_regs(struct pt_regs * regs,struct instruction_op * op)3165  void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3166  {
3167  	unsigned long next_pc;
3168  
3169  	next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3170  	switch (GETTYPE(op->type)) {
3171  	case COMPUTE:
3172  		if (op->type & SETREG)
3173  			regs->gpr[op->reg] = op->val;
3174  		if (op->type & SETCC)
3175  			regs->ccr = op->ccval;
3176  		if (op->type & SETXER)
3177  			regs->xer = op->xerval;
3178  		break;
3179  
3180  	case BRANCH:
3181  		if (op->type & SETLK)
3182  			regs->link = next_pc;
3183  		if (op->type & BRTAKEN)
3184  			next_pc = op->val;
3185  		if (op->type & DECCTR)
3186  			--regs->ctr;
3187  		break;
3188  
3189  	case BARRIER:
3190  		switch (op->type & BARRIER_MASK) {
3191  		case BARRIER_SYNC:
3192  			mb();
3193  			break;
3194  		case BARRIER_ISYNC:
3195  			isync();
3196  			break;
3197  		case BARRIER_EIEIO:
3198  			eieio();
3199  			break;
3200  #ifdef CONFIG_PPC64
3201  		case BARRIER_LWSYNC:
3202  			asm volatile("lwsync" : : : "memory");
3203  			break;
3204  		case BARRIER_PTESYNC:
3205  			asm volatile("ptesync" : : : "memory");
3206  			break;
3207  #endif
3208  		}
3209  		break;
3210  
3211  	case MFSPR:
3212  		switch (op->spr) {
3213  		case SPRN_XER:
3214  			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3215  			break;
3216  		case SPRN_LR:
3217  			regs->gpr[op->reg] = regs->link;
3218  			break;
3219  		case SPRN_CTR:
3220  			regs->gpr[op->reg] = regs->ctr;
3221  			break;
3222  		default:
3223  			WARN_ON_ONCE(1);
3224  		}
3225  		break;
3226  
3227  	case MTSPR:
3228  		switch (op->spr) {
3229  		case SPRN_XER:
3230  			regs->xer = op->val & 0xffffffffUL;
3231  			break;
3232  		case SPRN_LR:
3233  			regs->link = op->val;
3234  			break;
3235  		case SPRN_CTR:
3236  			regs->ctr = op->val;
3237  			break;
3238  		default:
3239  			WARN_ON_ONCE(1);
3240  		}
3241  		break;
3242  
3243  	default:
3244  		WARN_ON_ONCE(1);
3245  	}
3246  	regs_set_return_ip(regs, next_pc);
3247  }
3248  NOKPROBE_SYMBOL(emulate_update_regs);
3249  
3250  /*
3251   * Emulate a previously-analysed load or store instruction.
3252   * Return values are:
3253   * 0 = instruction emulated successfully
3254   * -EFAULT = address out of range or access faulted (regs->dar
3255   *	     contains the faulting address)
3256   * -EACCES = misaligned access, instruction requires alignment
3257   * -EINVAL = unknown operation in *op
3258   */
emulate_loadstore(struct pt_regs * regs,struct instruction_op * op)3259  int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3260  {
3261  	int err, size, type;
3262  	int i, rd, nb;
3263  	unsigned int cr;
3264  	unsigned long val;
3265  	unsigned long ea;
3266  	bool cross_endian;
3267  
3268  	err = 0;
3269  	size = GETSIZE(op->type);
3270  	type = GETTYPE(op->type);
3271  	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3272  	ea = truncate_if_32bit(regs->msr, op->ea);
3273  
3274  	switch (type) {
3275  	case LARX:
3276  		if (ea & (size - 1))
3277  			return -EACCES;		/* can't handle misaligned */
3278  		if (!address_ok(regs, ea, size))
3279  			return -EFAULT;
3280  		err = 0;
3281  		val = 0;
3282  		switch (size) {
3283  #ifdef __powerpc64__
3284  		case 1:
3285  			__get_user_asmx(val, ea, err, "lbarx");
3286  			break;
3287  		case 2:
3288  			__get_user_asmx(val, ea, err, "lharx");
3289  			break;
3290  #endif
3291  		case 4:
3292  			__get_user_asmx(val, ea, err, "lwarx");
3293  			break;
3294  #ifdef __powerpc64__
3295  		case 8:
3296  			__get_user_asmx(val, ea, err, "ldarx");
3297  			break;
3298  		case 16:
3299  			err = do_lqarx(ea, &regs->gpr[op->reg]);
3300  			break;
3301  #endif
3302  		default:
3303  			return -EINVAL;
3304  		}
3305  		if (err) {
3306  			regs->dar = ea;
3307  			break;
3308  		}
3309  		if (size < 16)
3310  			regs->gpr[op->reg] = val;
3311  		break;
3312  
3313  	case STCX:
3314  		if (ea & (size - 1))
3315  			return -EACCES;		/* can't handle misaligned */
3316  		if (!address_ok(regs, ea, size))
3317  			return -EFAULT;
3318  		err = 0;
3319  		switch (size) {
3320  #ifdef __powerpc64__
3321  		case 1:
3322  			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3323  			break;
3324  		case 2:
3325  			__put_user_asmx(op->val, ea, err, "sthcx.", cr);
3326  			break;
3327  #endif
3328  		case 4:
3329  			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
3330  			break;
3331  #ifdef __powerpc64__
3332  		case 8:
3333  			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
3334  			break;
3335  		case 16:
3336  			err = do_stqcx(ea, regs->gpr[op->reg],
3337  				       regs->gpr[op->reg + 1], &cr);
3338  			break;
3339  #endif
3340  		default:
3341  			return -EINVAL;
3342  		}
3343  		if (!err)
3344  			regs->ccr = (regs->ccr & 0x0fffffff) |
3345  				(cr & 0xe0000000) |
3346  				((regs->xer >> 3) & 0x10000000);
3347  		else
3348  			regs->dar = ea;
3349  		break;
3350  
3351  	case LOAD:
3352  #ifdef __powerpc64__
3353  		if (size == 16) {
3354  			err = emulate_lq(regs, ea, op->reg, cross_endian);
3355  			break;
3356  		}
3357  #endif
3358  		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
3359  		if (!err) {
3360  			if (op->type & SIGNEXT)
3361  				do_signext(&regs->gpr[op->reg], size);
3362  			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3363  				do_byterev(&regs->gpr[op->reg], size);
3364  		}
3365  		break;
3366  
3367  #ifdef CONFIG_PPC_FPU
3368  	case LOAD_FP:
3369  		/*
3370  		 * If the instruction is in userspace, we can emulate it even
3371  		 * if the VMX state is not live, because we have the state
3372  		 * stored in the thread_struct.  If the instruction is in
3373  		 * the kernel, we must not touch the state in the thread_struct.
3374  		 */
3375  		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3376  			return 0;
3377  		err = do_fp_load(op, ea, regs, cross_endian);
3378  		break;
3379  #endif
3380  #ifdef CONFIG_ALTIVEC
3381  	case LOAD_VMX:
3382  		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3383  			return 0;
3384  		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3385  		break;
3386  #endif
3387  #ifdef CONFIG_VSX
3388  	case LOAD_VSX: {
3389  		unsigned long msrbit = MSR_VSX;
3390  
3391  		/*
3392  		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3393  		 * when the target of the instruction is a vector register.
3394  		 */
3395  		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3396  			msrbit = MSR_VEC;
3397  		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3398  			return 0;
3399  		err = do_vsx_load(op, ea, regs, cross_endian);
3400  		break;
3401  	}
3402  #endif
3403  	case LOAD_MULTI:
3404  		if (!address_ok(regs, ea, size))
3405  			return -EFAULT;
3406  		rd = op->reg;
3407  		for (i = 0; i < size; i += 4) {
3408  			unsigned int v32 = 0;
3409  
3410  			nb = size - i;
3411  			if (nb > 4)
3412  				nb = 4;
3413  			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3414  			if (err)
3415  				break;
3416  			if (unlikely(cross_endian))
3417  				v32 = byterev_4(v32);
3418  			regs->gpr[rd] = v32;
3419  			ea += 4;
3420  			/* reg number wraps from 31 to 0 for lsw[ix] */
3421  			rd = (rd + 1) & 0x1f;
3422  		}
3423  		break;
3424  
3425  	case STORE:
3426  #ifdef __powerpc64__
3427  		if (size == 16) {
3428  			err = emulate_stq(regs, ea, op->reg, cross_endian);
3429  			break;
3430  		}
3431  #endif
3432  		if ((op->type & UPDATE) && size == sizeof(long) &&
3433  		    op->reg == 1 && op->update_reg == 1 &&
3434  		    !(regs->msr & MSR_PR) &&
3435  		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3436  			err = handle_stack_update(ea, regs);
3437  			break;
3438  		}
3439  		if (unlikely(cross_endian))
3440  			do_byterev(&op->val, size);
3441  		err = write_mem(op->val, ea, size, regs);
3442  		break;
3443  
3444  #ifdef CONFIG_PPC_FPU
3445  	case STORE_FP:
3446  		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3447  			return 0;
3448  		err = do_fp_store(op, ea, regs, cross_endian);
3449  		break;
3450  #endif
3451  #ifdef CONFIG_ALTIVEC
3452  	case STORE_VMX:
3453  		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3454  			return 0;
3455  		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3456  		break;
3457  #endif
3458  #ifdef CONFIG_VSX
3459  	case STORE_VSX: {
3460  		unsigned long msrbit = MSR_VSX;
3461  
3462  		/*
3463  		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3464  		 * when the target of the instruction is a vector register.
3465  		 */
3466  		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3467  			msrbit = MSR_VEC;
3468  		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3469  			return 0;
3470  		err = do_vsx_store(op, ea, regs, cross_endian);
3471  		break;
3472  	}
3473  #endif
3474  	case STORE_MULTI:
3475  		if (!address_ok(regs, ea, size))
3476  			return -EFAULT;
3477  		rd = op->reg;
3478  		for (i = 0; i < size; i += 4) {
3479  			unsigned int v32 = regs->gpr[rd];
3480  
3481  			nb = size - i;
3482  			if (nb > 4)
3483  				nb = 4;
3484  			if (unlikely(cross_endian))
3485  				v32 = byterev_4(v32);
3486  			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3487  			if (err)
3488  				break;
3489  			ea += 4;
3490  			/* reg number wraps from 31 to 0 for stsw[ix] */
3491  			rd = (rd + 1) & 0x1f;
3492  		}
3493  		break;
3494  
3495  	default:
3496  		return -EINVAL;
3497  	}
3498  
3499  	if (err)
3500  		return err;
3501  
3502  	if (op->type & UPDATE)
3503  		regs->gpr[op->update_reg] = op->ea;
3504  
3505  	return 0;
3506  }
3507  NOKPROBE_SYMBOL(emulate_loadstore);
3508  
3509  /*
3510   * Emulate instructions that cause a transfer of control,
3511   * loads and stores, and a few other instructions.
3512   * Returns 1 if the step was emulated, 0 if not,
3513   * or -1 if the instruction is one that should not be stepped,
3514   * such as an rfid, or a mtmsrd that would clear MSR_RI.
3515   */
emulate_step(struct pt_regs * regs,struct ppc_inst instr)3516  int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3517  {
3518  	struct instruction_op op;
3519  	int r, err, type;
3520  	unsigned long val;
3521  	unsigned long ea;
3522  
3523  	r = analyse_instr(&op, regs, instr);
3524  	if (r < 0)
3525  		return r;
3526  	if (r > 0) {
3527  		emulate_update_regs(regs, &op);
3528  		return 1;
3529  	}
3530  
3531  	err = 0;
3532  	type = GETTYPE(op.type);
3533  
3534  	if (OP_IS_LOAD_STORE(type)) {
3535  		err = emulate_loadstore(regs, &op);
3536  		if (err)
3537  			return 0;
3538  		goto instr_done;
3539  	}
3540  
3541  	switch (type) {
3542  	case CACHEOP:
3543  		ea = truncate_if_32bit(regs->msr, op.ea);
3544  		if (!address_ok(regs, ea, 8))
3545  			return 0;
3546  		switch (op.type & CACHEOP_MASK) {
3547  		case DCBST:
3548  			__cacheop_user_asmx(ea, err, "dcbst");
3549  			break;
3550  		case DCBF:
3551  			__cacheop_user_asmx(ea, err, "dcbf");
3552  			break;
3553  		case DCBTST:
3554  			if (op.reg == 0)
3555  				prefetchw((void *) ea);
3556  			break;
3557  		case DCBT:
3558  			if (op.reg == 0)
3559  				prefetch((void *) ea);
3560  			break;
3561  		case ICBI:
3562  			__cacheop_user_asmx(ea, err, "icbi");
3563  			break;
3564  		case DCBZ:
3565  			err = emulate_dcbz(ea, regs);
3566  			break;
3567  		}
3568  		if (err) {
3569  			regs->dar = ea;
3570  			return 0;
3571  		}
3572  		goto instr_done;
3573  
3574  	case MFMSR:
3575  		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3576  		goto instr_done;
3577  
3578  	case MTMSR:
3579  		val = regs->gpr[op.reg];
3580  		if ((val & MSR_RI) == 0)
3581  			/* can't step mtmsr[d] that would clear MSR_RI */
3582  			return -1;
3583  		/* here op.val is the mask of bits to change */
3584  		regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
3585  		goto instr_done;
3586  
3587  #ifdef CONFIG_PPC64
3588  	case SYSCALL:	/* sc */
3589  		/*
3590  		 * N.B. this uses knowledge about how the syscall
3591  		 * entry code works.  If that is changed, this will
3592  		 * need to be changed also.
3593  		 */
3594  		if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3595  				cpu_has_feature(CPU_FTR_REAL_LE) &&
3596  				regs->gpr[0] == 0x1ebe) {
3597  			regs_set_return_msr(regs, regs->msr ^ MSR_LE);
3598  			goto instr_done;
3599  		}
3600  		regs->gpr[9] = regs->gpr[13];
3601  		regs->gpr[10] = MSR_KERNEL;
3602  		regs->gpr[11] = regs->nip + 4;
3603  		regs->gpr[12] = regs->msr & MSR_MASK;
3604  		regs->gpr[13] = (unsigned long) get_paca();
3605  		regs_set_return_ip(regs, (unsigned long) &system_call_common);
3606  		regs_set_return_msr(regs, MSR_KERNEL);
3607  		return 1;
3608  
3609  #ifdef CONFIG_PPC_BOOK3S_64
3610  	case SYSCALL_VECTORED_0:	/* scv 0 */
3611  		regs->gpr[9] = regs->gpr[13];
3612  		regs->gpr[10] = MSR_KERNEL;
3613  		regs->gpr[11] = regs->nip + 4;
3614  		regs->gpr[12] = regs->msr & MSR_MASK;
3615  		regs->gpr[13] = (unsigned long) get_paca();
3616  		regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
3617  		regs_set_return_msr(regs, MSR_KERNEL);
3618  		return 1;
3619  #endif
3620  
3621  	case RFI:
3622  		return -1;
3623  #endif
3624  	}
3625  	return 0;
3626  
3627   instr_done:
3628  	regs_set_return_ip(regs,
3629  		truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
3630  	return 1;
3631  }
3632  NOKPROBE_SYMBOL(emulate_step);
3633