• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_CODEGEN_PPC_CONSTANTS_PPC_H_
6 #define V8_CODEGEN_PPC_CONSTANTS_PPC_H_
7 
8 #include <stdint.h>
9 
10 #include "src/base/logging.h"
11 #include "src/base/macros.h"
12 #include "src/common/globals.h"
13 
14 // UNIMPLEMENTED_ macro for PPC.
15 #ifdef DEBUG
16 #define UNIMPLEMENTED_PPC()                                                \
17   v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
18                        __FILE__, __LINE__, __func__)
19 #else
20 #define UNIMPLEMENTED_PPC()
21 #endif
22 
23 #if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) &&                    \
24     (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \
25                    (!defined(_CALL_ELF) || _CALL_ELF == 1)))
26 #define ABI_USES_FUNCTION_DESCRIPTORS 1
27 #else
28 #define ABI_USES_FUNCTION_DESCRIPTORS 0
29 #endif
30 
31 #if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || V8_OS_AIX || \
32     V8_TARGET_ARCH_PPC64
33 #define ABI_PASSES_HANDLES_IN_REGS 1
34 #else
35 #define ABI_PASSES_HANDLES_IN_REGS 0
36 #endif
37 
38 #if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || !V8_TARGET_ARCH_PPC64 || \
39     V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)
40 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
41 #else
42 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
43 #endif
44 
45 #if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || \
46     (V8_TARGET_ARCH_PPC64 &&                     \
47      (V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)))
48 #define ABI_CALL_VIA_IP 1
49 #else
50 #define ABI_CALL_VIA_IP 0
51 #endif
52 
53 #if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || V8_OS_AIX || \
54     V8_TARGET_ARCH_PPC64
55 #define ABI_TOC_REGISTER 2
56 #else
57 #define ABI_TOC_REGISTER 13
58 #endif
59 namespace v8 {
60 namespace internal {
61 
62 // TODO(sigurds): Change this value once we use relative jumps.
63 constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
64 
65 // Used to encode a boolean value when emitting 32 bit
66 // opcodes which will indicate the presence of function descriptors
67 constexpr int kHasFunctionDescriptorBitShift = 4;
68 constexpr int kHasFunctionDescriptorBitMask = 1
69                                               << kHasFunctionDescriptorBitShift;
70 
71 // Number of registers
72 const int kNumRegisters = 32;
73 
74 // FP support.
75 const int kNumDoubleRegisters = 32;
76 
77 const int kNoRegister = -1;
78 
79 // Used in embedded constant pool builder - max reach in bits for
80 // various load instructions (one less due to unsigned)
81 const int kLoadPtrMaxReachBits = 15;
82 const int kLoadDoubleMaxReachBits = 15;
83 
84 // Actual value of root register is offset from the root array's start
85 // to take advantage of negative displacement values.
86 // TODO(sigurds): Choose best value.
87 constexpr int kRootRegisterBias = 128;
88 
89 // sign-extend the least significant 5-bits of value <imm>
90 #define SIGN_EXT_IMM5(imm) ((static_cast<int>(imm) << 27) >> 27)
91 
92 // sign-extend the least significant 16-bits of value <imm>
93 #define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
94 
95 // sign-extend the least significant 14-bits of value <imm>
96 #define SIGN_EXT_IMM18(imm) ((static_cast<int>(imm) << 14) >> 14)
97 
98 // sign-extend the least significant 22-bits of value <imm>
99 #define SIGN_EXT_IMM22(imm) ((static_cast<int>(imm) << 10) >> 10)
100 
101 // sign-extend the least significant 26-bits of value <imm>
102 #define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
103 
104 // sign-extend the least significant 34-bits of prefix+suffix value <imm>
105 #define SIGN_EXT_IMM34(imm) ((static_cast<int64_t>(imm) << 30) >> 30)
106 
107 // -----------------------------------------------------------------------------
108 // Conditions.
109 
110 // Defines constants and accessor classes to assemble, disassemble and
111 // simulate PPC instructions.
112 //
113 // Section references in the code refer to the "PowerPC Microprocessor
114 // Family: The Programmer.s Reference Guide" from 10/95
115 // https://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600741775/$file/prg.pdf
116 //
117 
118 // Constants for specific fields are defined in their respective named enums.
119 // General constants are in an anonymous enum in class Instr.
120 enum Condition {
121   kNoCondition = -1,
122   eq = 0,         // Equal.
123   ne = 1,         // Not equal.
124   ge = 2,         // Greater or equal.
125   lt = 3,         // Less than.
126   gt = 4,         // Greater than.
127   le = 5,         // Less then or equal
128   unordered = 6,  // Floating-point unordered
129   ordered = 7,
130   overflow = 8,  // Summary overflow
131   nooverflow = 9,
132   al = 10  // Always.
133 };
134 
NegateCondition(Condition cond)135 inline Condition NegateCondition(Condition cond) {
136   DCHECK(cond != al);
137   return static_cast<Condition>(cond ^ ne);
138 }
139 
140 // -----------------------------------------------------------------------------
141 // Instructions encoding.
142 
143 // Instr is merely used by the Assembler to distinguish 32bit integers
144 // representing instructions from usual 32 bit values.
145 // Instruction objects are pointers to 32bit values, and provide methods to
146 // access the various ISA fields.
147 using Instr = uint32_t;
148 
149 #define PPC_XX3_OPCODE_SCALAR_LIST(V)                                 \
150   /* VSX Scalar Add Double-Precision */                               \
151   V(xsadddp, XSADDDP, 0xF0000100)                                     \
152   /* VSX Scalar Add Single-Precision */                               \
153   V(xsaddsp, XSADDSP, 0xF0000000)                                     \
154   /* VSX Scalar Compare Ordered Double-Precision */                   \
155   V(xscmpodp, XSCMPODP, 0xF0000158)                                   \
156   /* VSX Scalar Compare Unordered Double-Precision */                 \
157   V(xscmpudp, XSCMPUDP, 0xF0000118)                                   \
158   /* VSX Scalar Copy Sign Double-Precision */                         \
159   V(xscpsgndp, XSCPSGNDP, 0xF0000580)                                 \
160   /* VSX Scalar Divide Double-Precision */                            \
161   V(xsdivdp, XSDIVDP, 0xF00001C0)                                     \
162   /* VSX Scalar Divide Single-Precision */                            \
163   V(xsdivsp, XSDIVSP, 0xF00000C0)                                     \
164   /* VSX Scalar Multiply-Add Type-A Double-Precision */               \
165   V(xsmaddadp, XSMADDADP, 0xF0000108)                                 \
166   /* VSX Scalar Multiply-Add Type-A Single-Precision */               \
167   V(xsmaddasp, XSMADDASP, 0xF0000008)                                 \
168   /* VSX Scalar Multiply-Add Type-M Double-Precision */               \
169   V(xsmaddmdp, XSMADDMDP, 0xF0000148)                                 \
170   /* VSX Scalar Multiply-Add Type-M Single-Precision */               \
171   V(xsmaddmsp, XSMADDMSP, 0xF0000048)                                 \
172   /* VSX Scalar Maximum Double-Precision */                           \
173   V(xsmaxdp, XSMAXDP, 0xF0000500)                                     \
174   /* VSX Scalar Minimum Double-Precision */                           \
175   V(xsmindp, XSMINDP, 0xF0000540)                                     \
176   /* VSX Scalar Multiply-Subtract Type-A Double-Precision */          \
177   V(xsmsubadp, XSMSUBADP, 0xF0000188)                                 \
178   /* VSX Scalar Multiply-Subtract Type-A Single-Precision */          \
179   V(xsmsubasp, XSMSUBASP, 0xF0000088)                                 \
180   /* VSX Scalar Multiply-Subtract Type-M Double-Precision */          \
181   V(xsmsubmdp, XSMSUBMDP, 0xF00001C8)                                 \
182   /* VSX Scalar Multiply-Subtract Type-M Single-Precision */          \
183   V(xsmsubmsp, XSMSUBMSP, 0xF00000C8)                                 \
184   /* VSX Scalar Multiply Double-Precision */                          \
185   V(xsmuldp, XSMULDP, 0xF0000180)                                     \
186   /* VSX Scalar Multiply Single-Precision */                          \
187   V(xsmulsp, XSMULSP, 0xF0000080)                                     \
188   /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */      \
189   V(xsnmaddadp, XSNMADDADP, 0xF0000508)                               \
190   /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */      \
191   V(xsnmaddasp, XSNMADDASP, 0xF0000408)                               \
192   /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */      \
193   V(xsnmaddmdp, XSNMADDMDP, 0xF0000548)                               \
194   /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */      \
195   V(xsnmaddmsp, XSNMADDMSP, 0xF0000448)                               \
196   /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
197   V(xsnmsubadp, XSNMSUBADP, 0xF0000588)                               \
198   /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
199   V(xsnmsubasp, XSNMSUBASP, 0xF0000488)                               \
200   /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
201   V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8)                               \
202   /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
203   V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8)                               \
204   /* VSX Scalar Reciprocal Estimate Double-Precision */               \
205   V(xsredp, XSREDP, 0xF0000168)                                       \
206   /* VSX Scalar Subtract Double-Precision */                          \
207   V(xssubdp, XSSUBDP, 0xF0000140)                                     \
208   /* VSX Scalar Subtract Single-Precision */                          \
209   V(xssubsp, XSSUBSP, 0xF0000040)                                     \
210   /* VSX Scalar Test for software Divide Double-Precision */          \
211   V(xstdivdp, XSTDIVDP, 0xF00001E8)
212 
213 #define PPC_XX3_OPCODE_VECTOR_LIST(V)                                         \
214   /* VSX Vector Add Double-Precision */                                       \
215   V(xvadddp, XVADDDP, 0xF0000300)                                             \
216   /* VSX Vector Add Single-Precision */                                       \
217   V(xvaddsp, XVADDSP, 0xF0000200)                                             \
218   /* VSX Vector Compare Equal To Double-Precision */                          \
219   V(xvcmpeqdp, XVCMPEQDP, 0xF0000318)                                         \
220   /* VSX Vector Compare Equal To Double-Precision & record CR6 */             \
221   V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718)                                       \
222   /* VSX Vector Compare Equal To Single-Precision */                          \
223   V(xvcmpeqsp, XVCMPEQSP, 0xF0000218)                                         \
224   /* VSX Vector Compare Equal To Single-Precision & record CR6 */             \
225   V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618)                                       \
226   /* VSX Vector Compare Greater Than or Equal To Double-Precision */          \
227   V(xvcmpgedp, XVCMPGEDP, 0xF0000398)                                         \
228   /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \
229   /* CR6 */                                                                   \
230   V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798)                                       \
231   /* VSX Vector Compare Greater Than or Equal To Single-Precision */          \
232   V(xvcmpgesp, XVCMPGESP, 0xF0000298)                                         \
233   /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \
234   /* CR6 */                                                                   \
235   V(xvcmpgespx, XVCMPGESPx, 0xF0000698)                                       \
236   /* VSX Vector Compare Greater Than Double-Precision */                      \
237   V(xvcmpgtdp, XVCMPGTDP, 0xF0000358)                                         \
238   /* VSX Vector Compare Greater Than Double-Precision & record CR6 */         \
239   V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758)                                       \
240   /* VSX Vector Compare Greater Than Single-Precision */                      \
241   V(xvcmpgtsp, XVCMPGTSP, 0xF0000258)                                         \
242   /* VSX Vector Compare Greater Than Single-Precision & record CR6 */         \
243   V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658)                                       \
244   /* VSX Vector Copy Sign Double-Precision */                                 \
245   V(xvcpsgndp, XVCPSGNDP, 0xF0000780)                                         \
246   /* VSX Vector Copy Sign Single-Precision */                                 \
247   V(xvcpsgnsp, XVCPSGNSP, 0xF0000680)                                         \
248   /* VSX Vector Divide Double-Precision */                                    \
249   V(xvdivdp, XVDIVDP, 0xF00003C0)                                             \
250   /* VSX Vector Divide Single-Precision */                                    \
251   V(xvdivsp, XVDIVSP, 0xF00002C0)                                             \
252   /* VSX Vector Multiply-Add Type-A Double-Precision */                       \
253   V(xvmaddadp, XVMADDADP, 0xF0000308)                                         \
254   /* VSX Vector Multiply-Add Type-A Single-Precision */                       \
255   V(xvmaddasp, XVMADDASP, 0xF0000208)                                         \
256   /* VSX Vector Multiply-Add Type-M Double-Precision */                       \
257   V(xvmaddmdp, XVMADDMDP, 0xF0000348)                                         \
258   /* VSX Vector Multiply-Add Type-M Single-Precision */                       \
259   V(xvmaddmsp, XVMADDMSP, 0xF0000248)                                         \
260   /* VSX Vector Maximum Double-Precision */                                   \
261   V(xvmaxdp, XVMAXDP, 0xF0000700)                                             \
262   /* VSX Vector Maximum Single-Precision */                                   \
263   V(xvmaxsp, XVMAXSP, 0xF0000600)                                             \
264   /* VSX Vector Minimum Double-Precision */                                   \
265   V(xvmindp, XVMINDP, 0xF0000740)                                             \
266   /* VSX Vector Minimum Single-Precision */                                   \
267   V(xvminsp, XVMINSP, 0xF0000640)                                             \
268   /* VSX Vector Multiply-Subtract Type-A Double-Precision */                  \
269   V(xvmsubadp, XVMSUBADP, 0xF0000388)                                         \
270   /* VSX Vector Multiply-Subtract Type-A Single-Precision */                  \
271   V(xvmsubasp, XVMSUBASP, 0xF0000288)                                         \
272   /* VSX Vector Multiply-Subtract Type-M Double-Precision */                  \
273   V(xvmsubmdp, XVMSUBMDP, 0xF00003C8)                                         \
274   /* VSX Vector Multiply-Subtract Type-M Single-Precision */                  \
275   V(xvmsubmsp, XVMSUBMSP, 0xF00002C8)                                         \
276   /* VSX Vector Multiply Double-Precision */                                  \
277   V(xvmuldp, XVMULDP, 0xF0000380)                                             \
278   /* VSX Vector Multiply Single-Precision */                                  \
279   V(xvmulsp, XVMULSP, 0xF0000280)                                             \
280   /* VSX Vector Negative Multiply-Add Type-A Double-Precision */              \
281   V(xvnmaddadp, XVNMADDADP, 0xF0000708)                                       \
282   /* VSX Vector Negative Multiply-Add Type-A Single-Precision */              \
283   V(xvnmaddasp, XVNMADDASP, 0xF0000608)                                       \
284   /* VSX Vector Negative Multiply-Add Type-M Double-Precision */              \
285   V(xvnmaddmdp, XVNMADDMDP, 0xF0000748)                                       \
286   /* VSX Vector Negative Multiply-Add Type-M Single-Precision */              \
287   V(xvnmaddmsp, XVNMADDMSP, 0xF0000648)                                       \
288   /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */         \
289   V(xvnmsubadp, XVNMSUBADP, 0xF0000788)                                       \
290   /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */         \
291   V(xvnmsubasp, XVNMSUBASP, 0xF0000688)                                       \
292   /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */         \
293   V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8)                                       \
294   /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */         \
295   V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8)                                       \
296   /* VSX Vector Reciprocal Estimate Double-Precision */                       \
297   V(xvredp, XVREDP, 0xF0000368)                                               \
298   /* VSX Vector Subtract Double-Precision */                                  \
299   V(xvsubdp, XVSUBDP, 0xF0000340)                                             \
300   /* VSX Vector Subtract Single-Precision */                                  \
301   V(xvsubsp, XVSUBSP, 0xF0000240)                                             \
302   /* VSX Vector Test for software Divide Double-Precision */                  \
303   V(xvtdivdp, XVTDIVDP, 0xF00003E8)                                           \
304   /* VSX Vector Test for software Divide Single-Precision */                  \
305   V(xvtdivsp, XVTDIVSP, 0xF00002E8)                                           \
306   /* VSX Logical AND */                                                       \
307   V(xxland, XXLAND, 0xF0000410)                                               \
308   /* VSX Logical AND with Complement */                                       \
309   V(xxlandc, XXLANDC, 0xF0000450)                                             \
310   /* VSX Logical Equivalence */                                               \
311   V(xxleqv, XXLEQV, 0xF00005D0)                                               \
312   /* VSX Logical NAND */                                                      \
313   V(xxlnand, XXLNAND, 0xF0000590)                                             \
314   /* VSX Logical NOR */                                                       \
315   V(xxlnor, XXLNOR, 0xF0000510)                                               \
316   /* VSX Logical OR */                                                        \
317   V(xxlor, XXLOR, 0xF0000490)                                                 \
318   /* VSX Logical OR with Complement */                                        \
319   V(xxlorc, XXLORC, 0xF0000550)                                               \
320   /* VSX Logical XOR */                                                       \
321   V(xxlxor, XXLXOR, 0xF00004D0)                                               \
322   /* VSX Merge High Word */                                                   \
323   V(xxmrghw, XXMRGHW, 0xF0000090)                                             \
324   /* VSX Merge Low Word */                                                    \
325   V(xxmrglw, XXMRGLW, 0xF0000190)                                             \
326   /* VSX Permute Doubleword Immediate */                                      \
327   V(xxpermdi, XXPERMDI, 0xF0000050)                                           \
328   /* VSX Shift Left Double by Word Immediate */                               \
329   V(xxsldwi, XXSLDWI, 0xF0000010)                                             \
330   /* VSX Splat Word */                                                        \
331   V(xxspltw, XXSPLTW, 0xF0000290)
332 
333 #define PPC_Z23_OPCODE_LIST(V)                                    \
334   /* Decimal Quantize */                                          \
335   V(dqua, DQUA, 0xEC000006)                                       \
336   /* Decimal Quantize Immediate */                                \
337   V(dquai, DQUAI, 0xEC000086)                                     \
338   /* Decimal Quantize Immediate Quad */                           \
339   V(dquaiq, DQUAIQ, 0xFC000086)                                   \
340   /* Decimal Quantize Quad */                                     \
341   V(dquaq, DQUAQ, 0xFC000006)                                     \
342   /* Decimal Floating Round To FP Integer Without Inexact */      \
343   V(drintn, DRINTN, 0xEC0001C6)                                   \
344   /* Decimal Floating Round To FP Integer Without Inexact Quad */ \
345   V(drintnq, DRINTNQ, 0xFC0001C6)                                 \
346   /* Decimal Floating Round To FP Integer With Inexact */         \
347   V(drintx, DRINTX, 0xEC0000C6)                                   \
348   /* Decimal Floating Round To FP Integer With Inexact Quad */    \
349   V(drintxq, DRINTXQ, 0xFC0000C6)                                 \
350   /* Decimal Floating Reround */                                  \
351   V(drrnd, DRRND, 0xEC000046)                                     \
352   /* Decimal Floating Reround Quad */                             \
353   V(drrndq, DRRNDQ, 0xFC000046)
354 
355 #define PPC_Z22_OPCODE_LIST(V)                                  \
356   /* Decimal Floating Shift Coefficient Left Immediate */       \
357   V(dscli, DSCLI, 0xEC000084)                                   \
358   /* Decimal Floating Shift Coefficient Left Immediate Quad */  \
359   V(dscliq, DSCLIQ, 0xFC000084)                                 \
360   /* Decimal Floating Shift Coefficient Right Immediate */      \
361   V(dscri, DSCRI, 0xEC0000C4)                                   \
362   /* Decimal Floating Shift Coefficient Right Immediate Quad */ \
363   V(dscriq, DSCRIQ, 0xFC0000C4)                                 \
364   /* Decimal Floating Test Data Class */                        \
365   V(dtstdc, DTSTDC, 0xEC000184)                                 \
366   /* Decimal Floating Test Data Class Quad */                   \
367   V(dtstdcq, DTSTDCQ, 0xFC000184)                               \
368   /* Decimal Floating Test Data Group */                        \
369   V(dtstdg, DTSTDG, 0xEC0001C4)                                 \
370   /* Decimal Floating Test Data Group Quad */                   \
371   V(dtstdgq, DTSTDGQ, 0xFC0001C4)
372 
373 #define PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V)                                 \
374   /* VSX Vector Absolute Value Double-Precision */                           \
375   V(xvabsdp, XVABSDP, 0xF0000764)                                            \
376   /* VSX Vector Negate Double-Precision */                                   \
377   V(xvnegdp, XVNEGDP, 0xF00007E4)                                            \
378   /* VSX Vector Square Root Double-Precision */                              \
379   V(xvsqrtdp, XVSQRTDP, 0xF000032C)                                          \
380   /* VSX Vector Absolute Value Single-Precision */                           \
381   V(xvabssp, XVABSSP, 0xF0000664)                                            \
382   /* VSX Vector Negate Single-Precision */                                   \
383   V(xvnegsp, XVNEGSP, 0xF00006E4)                                            \
384   /* VSX Vector Reciprocal Estimate Single-Precision */                      \
385   V(xvresp, XVRESP, 0xF0000268)                                              \
386   /* VSX Vector Reciprocal Square Root Estimate Single-Precision */          \
387   V(xvrsqrtesp, XVRSQRTESP, 0xF0000228)                                      \
388   /* VSX Vector Square Root Single-Precision */                              \
389   V(xvsqrtsp, XVSQRTSP, 0xF000022C)                                          \
390   /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */       \
391   /* Saturate */                                                             \
392   V(xvcvspsxws, XVCVSPSXWS, 0xF0000260)                                      \
393   /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */     \
394   /* Saturate */                                                             \
395   V(xvcvspuxws, XVCVSPUXWS, 0xF0000220)                                      \
396   /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */       \
397   V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0)                                        \
398   /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */     \
399   V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0)                                        \
400   /* VSX Vector Round to Double-Precision Integer toward +Infinity */        \
401   V(xvrdpip, XVRDPIP, 0xF00003A4)                                            \
402   /* VSX Vector Round to Double-Precision Integer toward -Infinity */        \
403   V(xvrdpim, XVRDPIM, 0xF00003E4)                                            \
404   /* VSX Vector Round to Double-Precision Integer toward Zero */             \
405   V(xvrdpiz, XVRDPIZ, 0xF0000364)                                            \
406   /* VSX Vector Round to Double-Precision Integer */                         \
407   V(xvrdpi, XVRDPI, 0xF0000324)                                              \
408   /* VSX Vector Round to Single-Precision Integer toward +Infinity */        \
409   V(xvrspip, XVRSPIP, 0xF00002A4)                                            \
410   /* VSX Vector Round to Single-Precision Integer toward -Infinity */        \
411   V(xvrspim, XVRSPIM, 0xF00002E4)                                            \
412   /* VSX Vector Round to Single-Precision Integer toward Zero */             \
413   V(xvrspiz, XVRSPIZ, 0xF0000264)                                            \
414   /* VSX Vector Round to Single-Precision Integer */                         \
415   V(xvrspi, XVRSPI, 0xF0000224)                                              \
416   /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
417   V(xvcvsxddp, XVCVSXDDP, 0xF00007E0)                                        \
418   /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */        \
419   /* Precision */                                                            \
420   V(xvcvuxddp, XVCVUXDDP, 0xF00007A0)                                        \
421   /* VSX Vector Convert Single-Precision to Double-Precision */              \
422   V(xvcvspdp, XVCVSPDP, 0xF0000724)                                          \
423   /* VSX Vector Convert Double-Precision to Single-Precision */              \
424   V(xvcvdpsp, XVCVDPSP, 0xF0000624)                                          \
425   /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */       \
426   /* Saturate */                                                             \
427   V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360)                                      \
428   /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */     \
429   /* Saturate */                                                             \
430   V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
431 
432 #define PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V)                                \
433   /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
434   /* signalling */                                                          \
435   V(xscvdpspn, XSCVDPSPN, 0xF000042C)                                       \
436   /* VSX Scalar Convert Single-Precision to Double-Precision format Non- */ \
437   /* signalling */                                                          \
438   V(xscvspdpn, XSCVSPDPN, 0xF000052C)
439 
440 #define PPC_XX2_OPCODE_B_FORM_LIST(V)  \
441   /* Vector Byte-Reverse Quadword */   \
442   V(xxbrq, XXBRQ, 0xF01F076C)          \
443   /* Vector Byte-Reverse Doubleword */ \
444   V(xxbrd, XXBRD, 0xF017076C)          \
445   /* Vector Byte-Reverse Word */       \
446   V(xxbrw, XXBRW, 0xF00F076C)          \
447   /* Vector Byte-Reverse Halfword */   \
448   V(xxbrh, XXBRH, 0xF007076C)
449 
450 #define PPC_XX2_OPCODE_UNUSED_LIST(V)                                        \
451   /* VSX Scalar Square Root Double-Precision */                              \
452   V(xssqrtdp, XSSQRTDP, 0xF000012C)                                          \
453   /* VSX Scalar Reciprocal Estimate Single-Precision */                      \
454   V(xsresp, XSRESP, 0xF0000068)                                              \
455   /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */          \
456   V(xsrsqrtesp, XSRSQRTESP, 0xF0000028)                                      \
457   /* VSX Scalar Square Root Single-Precision */                              \
458   V(xssqrtsp, XSSQRTSP, 0xF000002C)                                          \
459   /* VSX Scalar Absolute Value Double-Precision */                           \
460   V(xsabsdp, XSABSDP, 0xF0000564)                                            \
461   /* VSX Scalar Convert Double-Precision to Single-Precision */              \
462   V(xscvdpsp, XSCVDPSP, 0xF0000424)                                          \
463   /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
464   /* Saturate */                                                             \
465   V(xscvdpsxds, XSCVDPSXDS, 0xF0000560)                                      \
466   /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */       \
467   /* Saturate */                                                             \
468   V(xscvdpsxws, XSCVDPSXWS, 0xF0000160)                                      \
469   /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */          \
470   /* Doubleword Saturate */                                                  \
471   V(xscvdpuxds, XSCVDPUXDS, 0xF0000520)                                      \
472   /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */     \
473   /* Saturate */                                                             \
474   V(xscvdpuxws, XSCVDPUXWS, 0xF0000120)                                      \
475   /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */        \
476   V(xscvspdp, XSCVSPDP, 0xF0000524)                                          \
477   /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
478   V(xscvsxddp, XSCVSXDDP, 0xF00005E0)                                        \
479   /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
480   V(xscvsxdsp, XSCVSXDSP, 0xF00004E0)                                        \
481   /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */        \
482   /* Precision */                                                            \
483   V(xscvuxddp, XSCVUXDDP, 0xF00005A0)                                        \
484   /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */        \
485   /* Precision */                                                            \
486   V(xscvuxdsp, XSCVUXDSP, 0xF00004A0)                                        \
487   /* VSX Scalar Negative Absolute Value Double-Precision */                  \
488   V(xsnabsdp, XSNABSDP, 0xF00005A4)                                          \
489   /* VSX Scalar Negate Double-Precision */                                   \
490   V(xsnegdp, XSNEGDP, 0xF00005E4)                                            \
491   /* VSX Scalar Round to Double-Precision Integer */                         \
492   V(xsrdpi, XSRDPI, 0xF0000124)                                              \
493   /* VSX Scalar Round to Double-Precision Integer using Current rounding */  \
494   /* mode */                                                                 \
495   V(xsrdpic, XSRDPIC, 0xF00001AC)                                            \
496   /* VSX Scalar Round to Double-Precision Integer toward -Infinity */        \
497   V(xsrdpim, XSRDPIM, 0xF00001E4)                                            \
498   /* VSX Scalar Round to Double-Precision Integer toward +Infinity */        \
499   V(xsrdpip, XSRDPIP, 0xF00001A4)                                            \
500   /* VSX Scalar Round to Double-Precision Integer toward Zero */             \
501   V(xsrdpiz, XSRDPIZ, 0xF0000164)                                            \
502   /* VSX Scalar Round to Single-Precision */                                 \
503   V(xsrsp, XSRSP, 0xF0000464)                                                \
504   /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */          \
505   V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128)                                      \
506   /* VSX Scalar Test for software Square Root Double-Precision */            \
507   V(xstsqrtdp, XSTSQRTDP, 0xF00001A8)                                        \
508   /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
509   /* Saturate */                                                             \
510   V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760)                                      \
511   /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */          \
512   /* Doubleword Saturate */                                                  \
513   V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720)                                      \
514   /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
515   /* Saturate */                                                             \
516   V(xvcvspsxds, XVCVSPSXDS, 0xF0000660)                                      \
517   /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */          \
518   /* Doubleword Saturate */                                                  \
519   V(xvcvspuxds, XVCVSPUXDS, 0xF0000620)                                      \
520   /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
521   V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0)                                        \
522   /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */       \
523   V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0)                                        \
524   /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */        \
525   /* Precision */                                                            \
526   V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0)                                        \
527   /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */     \
528   V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0)                                        \
529   /* VSX Vector Negative Absolute Value Double-Precision */                  \
530   V(xvnabsdp, XVNABSDP, 0xF00007A4)                                          \
531   /* VSX Vector Negative Absolute Value Single-Precision */                  \
532   V(xvnabssp, XVNABSSP, 0xF00006A4)                                          \
533   /* VSX Vector Round to Double-Precision Integer using Current rounding */  \
534   /* mode */                                                                 \
535   V(xvrdpic, XVRDPIC, 0xF00003AC)                                            \
536   /* VSX Vector Round to Single-Precision Integer using Current rounding */  \
537   /* mode */                                                                 \
538   V(xvrspic, XVRSPIC, 0xF00002AC)                                            \
539   /* VSX Vector Reciprocal Square Root Estimate Double-Precision */          \
540   V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328)                                      \
541   /* VSX Vector Test for software Square Root Double-Precision */            \
542   V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8)                                        \
543   /* VSX Vector Test for software Square Root Single-Precision */            \
544   V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8)                                        \
545   /* Vector Splat Immediate Byte */                                          \
546   V(xxspltib, XXSPLTIB, 0xF00002D0)
547 
548 #define PPC_XX2_OPCODE_LIST(V)         \
549   PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
550   PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
551   PPC_XX2_OPCODE_B_FORM_LIST(V)        \
552   PPC_XX2_OPCODE_UNUSED_LIST(V)
553 
554 #define PPC_EVX_OPCODE_LIST(V)                                                \
555   /* Vector Load Double Word into Double Word by External PID Indexed */      \
556   V(evlddepx, EVLDDEPX, 0x7C00063E)                                           \
557   /* Vector Store Double of Double by External PID Indexed */                 \
558   V(evstddepx, EVSTDDEPX, 0x7C00073E)                                         \
559   /* Bit Reversed Increment */                                                \
560   V(brinc, BRINC, 0x1000020F)                                                 \
561   /* Vector Absolute Value */                                                 \
562   V(evabs, EVABS, 0x10000208)                                                 \
563   /* Vector Add Immediate Word */                                             \
564   V(evaddiw, EVADDIW, 0x10000202)                                             \
565   /* Vector Add Signed, Modulo, Integer to Accumulator Word */                \
566   V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9)                                     \
567   /* Vector Add Signed, Saturate, Integer to Accumulator Word */              \
568   V(evaddssiaaw, EVADDSSIAAW, 0x100004C1)                                     \
569   /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */              \
570   V(evaddumiaaw, EVADDUMIAAW, 0x100004C8)                                     \
571   /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */            \
572   V(evaddusiaaw, EVADDUSIAAW, 0x100004C0)                                     \
573   /* Vector Add Word */                                                       \
574   V(evaddw, EVADDW, 0x10000200)                                               \
575   /* Vector AND */                                                            \
576   V(evand, EVAND, 0x10000211)                                                 \
577   /* Vector AND with Complement */                                            \
578   V(evandc, EVANDC, 0x10000212)                                               \
579   /* Vector Compare Equal */                                                  \
580   V(evcmpeq, EVCMPEQ, 0x10000234)                                             \
581   /* Vector Compare Greater Than Signed */                                    \
582   V(evcmpgts, EVCMPGTS, 0x10000231)                                           \
583   /* Vector Compare Greater Than Unsigned */                                  \
584   V(evcmpgtu, EVCMPGTU, 0x10000230)                                           \
585   /* Vector Compare Less Than Signed */                                       \
586   V(evcmplts, EVCMPLTS, 0x10000233)                                           \
587   /* Vector Compare Less Than Unsigned */                                     \
588   V(evcmpltu, EVCMPLTU, 0x10000232)                                           \
589   /* Vector Count Leading Signed Bits Word */                                 \
590   V(evcntlsw, EVCNTLSW, 0x1000020E)                                           \
591   /* Vector Count Leading Zeros Word */                                       \
592   V(evcntlzw, EVCNTLZW, 0x1000020D)                                           \
593   /* Vector Divide Word Signed */                                             \
594   V(evdivws, EVDIVWS, 0x100004C6)                                             \
595   /* Vector Divide Word Unsigned */                                           \
596   V(evdivwu, EVDIVWU, 0x100004C7)                                             \
597   /* Vector Equivalent */                                                     \
598   V(eveqv, EVEQV, 0x10000219)                                                 \
599   /* Vector Extend Sign Byte */                                               \
600   V(evextsb, EVEXTSB, 0x1000020A)                                             \
601   /* Vector Extend Sign Half Word */                                          \
602   V(evextsh, EVEXTSH, 0x1000020B)                                             \
603   /* Vector Load Double Word into Double Word */                              \
604   V(evldd, EVLDD, 0x10000301)                                                 \
605   /* Vector Load Double Word into Double Word Indexed */                      \
606   V(evlddx, EVLDDX, 0x10000300)                                               \
607   /* Vector Load Double into Four Half Words */                               \
608   V(evldh, EVLDH, 0x10000305)                                                 \
609   /* Vector Load Double into Four Half Words Indexed */                       \
610   V(evldhx, EVLDHX, 0x10000304)                                               \
611   /* Vector Load Double into Two Words */                                     \
612   V(evldw, EVLDW, 0x10000303)                                                 \
613   /* Vector Load Double into Two Words Indexed */                             \
614   V(evldwx, EVLDWX, 0x10000302)                                               \
615   /* Vector Load Half Word into Half Words Even and Splat */                  \
616   V(evlhhesplat, EVLHHESPLAT, 0x10000309)                                     \
617   /* Vector Load Half Word into Half Words Even and Splat Indexed */          \
618   V(evlhhesplatx, EVLHHESPLATX, 0x10000308)                                   \
619   /* Vector Load Half Word into Half Word Odd Signed and Splat */             \
620   V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F)                                   \
621   /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */     \
622   V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E)                                 \
623   /* Vector Load Half Word into Half Word Odd Unsigned and Splat */           \
624   V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D)                                   \
625   /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */   \
626   V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C)                                 \
627   /* Vector Load Word into Two Half Words Even */                             \
628   V(evlwhe, EVLWHE, 0x10000311)                                               \
629   /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \
630   V(evlwhos, EVLWHOS, 0x10000317)                                             \
631   /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */    \
632   /* extension) */                                                            \
633   V(evlwhosx, EVLWHOSX, 0x10000316)                                           \
634   /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */     \
635   V(evlwhou, EVLWHOU, 0x10000315)                                             \
636   /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */      \
637   /* extended) */                                                             \
638   V(evlwhoux, EVLWHOUX, 0x10000314)                                           \
639   /* Vector Load Word into Two Half Words and Splat */                        \
640   V(evlwhsplat, EVLWHSPLAT, 0x1000031D)                                       \
641   /* Vector Load Word into Two Half Words and Splat Indexed */                \
642   V(evlwhsplatx, EVLWHSPLATX, 0x1000031C)                                     \
643   /* Vector Load Word into Word and Splat */                                  \
644   V(evlwwsplat, EVLWWSPLAT, 0x10000319)                                       \
645   /* Vector Load Word into Word and Splat Indexed */                          \
646   V(evlwwsplatx, EVLWWSPLATX, 0x10000318)                                     \
647   /* Vector Merge High */                                                     \
648   V(evmergehi, EVMERGEHI, 0x1000022C)                                         \
649   /* Vector Merge High/Low */                                                 \
650   V(evmergehilo, EVMERGEHILO, 0x1000022E)                                     \
651   /* Vector Merge Low */                                                      \
652   V(evmergelo, EVMERGELO, 0x1000022D)                                         \
653   /* Vector Merge Low/High */                                                 \
654   V(evmergelohi, EVMERGELOHI, 0x1000022F)                                     \
655   /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
656   /* and Accumulate */                                                        \
657   V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B)                                     \
658   /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
659   /* and Accumulate Negative */                                               \
660   V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB)                                     \
661   /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */    \
662   /* and Accumulate */                                                        \
663   V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529)                                     \
664   /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */    \
665   /* and Accumulate Negative */                                               \
666   V(evmhegsmian, EVMHEGSMIAN, 0x100005A9)                                     \
667   /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */  \
668   /* and Accumulate */                                                        \
669   V(evmhegumiaa, EVMHEGUMIAA, 0x10000528)                                     \
670   /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */  \
671   /* and Accumulate Negative */                                               \
672   V(evmhegumian, EVMHEGUMIAN, 0x100005A8)                                     \
673   /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */          \
674   V(evmhesmf, EVMHESMF, 0x1000040B)                                           \
675   /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */       \
676   /* Accumulator */                                                           \
677   V(evmhesmfa, EVMHESMFA, 0x1000042B)                                         \
678   /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */      \
679   /* Accumulate into Words */                                                 \
680   V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B)                                     \
681   /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */      \
682   /* Accumulate Negative into Words */                                        \
683   V(evmhesmfanw, EVMHESMFANW, 0x1000058B)                                     \
684   /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */             \
685   V(evmhesmi, EVMHESMI, 0x10000409)                                           \
686   /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */          \
687   /* Accumulator */                                                           \
688   V(evmhesmia, EVMHESMIA, 0x10000429)                                         \
689   /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */         \
690   /* Accumulate into Words */                                                 \
691   V(evmhesmiaaw, EVMHESMIAAW, 0x10000509)                                     \
692   /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */         \
693   /* Accumulate Negative into Words */                                        \
694   V(evmhesmianw, EVMHESMIANW, 0x10000589)                                     \
695   /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */        \
696   V(evmhessf, EVMHESSF, 0x10000403)                                           \
697   /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */     \
698   /* Accumulator */                                                           \
699   V(evmhessfa, EVMHESSFA, 0x10000423)                                         \
700   /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */    \
701   /* Accumulate into Words */                                                 \
702   V(evmhessfaaw, EVMHESSFAAW, 0x10000503)                                     \
703   /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */    \
704   /* Accumulate Negative into Words */                                        \
705   V(evmhessfanw, EVMHESSFANW, 0x10000583)                                     \
706   /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */       \
707   /* Accumulate into Words */                                                 \
708   V(evmhessiaaw, EVMHESSIAAW, 0x10000501)                                     \
709   /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */       \
710   /* Accumulate Negative into Words */                                        \
711   V(evmhessianw, EVMHESSIANW, 0x10000581)                                     \
712   /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */           \
713   V(evmheumi, EVMHEUMI, 0x10000408)                                           \
714   /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */        \
715   /* Accumulator */                                                           \
716   V(evmheumia, EVMHEUMIA, 0x10000428)                                         \
717   /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */       \
718   /* Accumulate into Words */                                                 \
719   V(evmheumiaaw, EVMHEUMIAAW, 0x10000508)                                     \
720   /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */       \
721   /* Accumulate Negative into Words */                                        \
722   V(evmheumianw, EVMHEUMIANW, 0x10000588)                                     \
723   /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */     \
724   /* Accumulate into Words */                                                 \
725   V(evmheusiaaw, EVMHEUSIAAW, 0x10000500)                                     \
726   /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */     \
727   /* Accumulate Negative into Words */                                        \
728   V(evmheusianw, EVMHEUSIANW, 0x10000580)                                     \
729   /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */  \
730   /* and Accumulate */                                                        \
731   V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F)                                     \
732   /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */  \
733   /* and Accumulate Negative */                                               \
734   V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF)                                     \
735   /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */    \
736   /* and Accumulate */                                                        \
737   V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D)                                     \
738   /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \
739   /* Accumulate Negative */                                                   \
740   V(evmhogsmian, EVMHOGSMIAN, 0x100005AD)                                     \
741   /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */   \
742   /* and Accumulate */                                                        \
743   V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C)                                     \
744   /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */   \
745   /* and Accumulate Negative */                                               \
746   V(evmhogumian, EVMHOGUMIAN, 0x100005AC)                                     \
747   /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */           \
748   V(evmhosmf, EVMHOSMF, 0x1000040F)                                           \
749   /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */        \
750   /* Accumulator */                                                           \
751   V(evmhosmfa, EVMHOSMFA, 0x1000042F)                                         \
752   /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */       \
753   /* Accumulate into Words */                                                 \
754   V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F)                                     \
755   /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */       \
756   /* Accumulate Negative into Words */                                        \
757   V(evmhosmfanw, EVMHOSMFANW, 0x1000058F)                                     \
758   /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */              \
759   V(evmhosmi, EVMHOSMI, 0x1000040D)                                           \
760   /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */           \
761   /* Accumulator */                                                           \
762   V(evmhosmia, EVMHOSMIA, 0x1000042D)                                         \
763   /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */          \
764   /* Accumulate into Words */                                                 \
765   V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D)                                     \
766   /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */          \
767   /* Accumulate Negative into Words */                                        \
768   V(evmhosmianw, EVMHOSMIANW, 0x1000058D)                                     \
769   /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */         \
770   V(evmhossf, EVMHOSSF, 0x10000407)                                           \
771   /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */      \
772   /* Accumulator */                                                           \
773   V(evmhossfa, EVMHOSSFA, 0x10000427)                                         \
774   /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */     \
775   /* Accumulate into Words */                                                 \
776   V(evmhossfaaw, EVMHOSSFAAW, 0x10000507)                                     \
777   /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */     \
778   /* Accumulate Negative into Words */                                        \
779   V(evmhossfanw, EVMHOSSFANW, 0x10000587)                                     \
780   /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */        \
781   /* Accumulate into Words */                                                 \
782   V(evmhossiaaw, EVMHOSSIAAW, 0x10000505)                                     \
783   /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */        \
784   /* Accumulate Negative into Words */                                        \
785   V(evmhossianw, EVMHOSSIANW, 0x10000585)                                     \
786   /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */            \
787   V(evmhoumi, EVMHOUMI, 0x1000040C)                                           \
788   /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */         \
789   /* Accumulator */                                                           \
790   V(evmhoumia, EVMHOUMIA, 0x1000042C)                                         \
791   /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */        \
792   /* Accumulate into Words */                                                 \
793   V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C)                                     \
794   /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */        \
795   /* Accumulate Negative into Words */                                        \
796   V(evmhoumianw, EVMHOUMIANW, 0x1000058C)                                     \
797   /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */      \
798   /* Accumulate into Words */                                                 \
799   V(evmhousiaaw, EVMHOUSIAAW, 0x10000504)                                     \
800   /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */      \
801   /* Accumulate Negative into Words */                                        \
802   V(evmhousianw, EVMHOUSIANW, 0x10000584)                                     \
803   /* Initialize Accumulator */                                                \
804   V(evmra, EVMRA, 0x100004C4)                                                 \
805   /* Vector Multiply Word High Signed, Modulo, Fractional */                  \
806   V(evmwhsmf, EVMWHSMF, 0x1000044F)                                           \
807   /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */   \
808   V(evmwhsmfa, EVMWHSMFA, 0x1000046F)                                         \
809   /* Vector Multiply Word High Signed, Modulo, Integer */                     \
810   V(evmwhsmi, EVMWHSMI, 0x1000044D)                                           \
811   /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */      \
812   V(evmwhsmia, EVMWHSMIA, 0x1000046D)                                         \
813   /* Vector Multiply Word High Signed, Saturate, Fractional */                \
814   V(evmwhssf, EVMWHSSF, 0x10000447)                                           \
815   /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \
816   V(evmwhssfa, EVMWHSSFA, 0x10000467)                                         \
817   /* Vector Multiply Word High Unsigned, Modulo, Integer */                   \
818   V(evmwhumi, EVMWHUMI, 0x1000044C)                                           \
819   /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */    \
820   V(evmwhumia, EVMWHUMIA, 0x1000046C)                                         \
821   /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */    \
822   /* Words */                                                                 \
823   V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549)                                     \
824   /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */       \
825   /* Negative in Words */                                                     \
826   V(evmwlsmianw, EVMWLSMIANW, 0x100005C9)                                     \
827   /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */  \
828   /* Words */                                                                 \
829   V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541)                                     \
830   /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */     \
831   /* Negative in Words */                                                     \
832   V(evmwlssianw, EVMWLSSIANW, 0x100005C1)                                     \
833   /* Vector Multiply Word Low Unsigned, Modulo, Integer */                    \
834   V(evmwlumi, EVMWLUMI, 0x10000448)                                           \
835   /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */     \
836   V(evmwlumia, EVMWLUMIA, 0x10000468)                                         \
837   /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */  \
838   /* Words */                                                                 \
839   V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548)                                     \
840   /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */     \
841   /* Negative in Words */                                                     \
842   V(evmwlumianw, EVMWLUMIANW, 0x100005C8)                                     \
843   /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */   \
844   /* in Words */                                                              \
845   V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540)                                     \
846   /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */   \
847   /* Negative in Words */                                                     \
848   V(evmwlusianw, EVMWLUSIANW, 0x100005C0)                                     \
849   /* Vector Multiply Word Signed, Modulo, Fractional */                       \
850   V(evmwsmf, EVMWSMF, 0x1000045B)                                             \
851   /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */        \
852   V(evmwsmfa, EVMWSMFA, 0x1000047B)                                           \
853   /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */        \
854   V(evmwsmfaa, EVMWSMFAA, 0x1000055B)                                         \
855   /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */        \
856   /* Negative */                                                              \
857   V(evmwsmfan, EVMWSMFAN, 0x100005DB)                                         \
858   /* Vector Multiply Word Signed, Modulo, Integer */                          \
859   V(evmwsmi, EVMWSMI, 0x10000459)                                             \
860   /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */           \
861   V(evmwsmia, EVMWSMIA, 0x10000479)                                           \
862   /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */           \
863   V(evmwsmiaa, EVMWSMIAA, 0x10000559)                                         \
864   /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */  \
865   V(evmwsmian, EVMWSMIAN, 0x100005D9)                                         \
866   /* Vector Multiply Word Signed, Saturate, Fractional */                     \
867   V(evmwssf, EVMWSSF, 0x10000453)                                             \
868   /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */      \
869   V(evmwssfa, EVMWSSFA, 0x10000473)                                           \
870   /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */      \
871   V(evmwssfaa, EVMWSSFAA, 0x10000553)                                         \
872   /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */      \
873   /* Negative */                                                              \
874   V(evmwssfan, EVMWSSFAN, 0x100005D3)                                         \
875   /* Vector Multiply Word Unsigned, Modulo, Integer */                        \
876   V(evmwumi, EVMWUMI, 0x10000458)                                             \
877   /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */         \
878   V(evmwumia, EVMWUMIA, 0x10000478)                                           \
879   /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */         \
880   V(evmwumiaa, EVMWUMIAA, 0x10000558)                                         \
881   /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */         \
882   /* Negative */                                                              \
883   V(evmwumian, EVMWUMIAN, 0x100005D8)                                         \
884   /* Vector NAND */                                                           \
885   V(evnand, EVNAND, 0x1000021E)                                               \
886   /* Vector Negate */                                                         \
887   V(evneg, EVNEG, 0x10000209)                                                 \
888   /* Vector NOR */                                                            \
889   V(evnor, EVNOR, 0x10000218)                                                 \
890   /* Vector OR */                                                             \
891   V(evor, EVOR, 0x10000217)                                                   \
892   /* Vector OR with Complement */                                             \
893   V(evorc, EVORC, 0x1000021B)                                                 \
894   /* Vector Rotate Left Word */                                               \
895   V(evrlw, EVRLW, 0x10000228)                                                 \
896   /* Vector Rotate Left Word Immediate */                                     \
897   V(evrlwi, EVRLWI, 0x1000022A)                                               \
898   /* Vector Round Word */                                                     \
899   V(evrndw, EVRNDW, 0x1000020C)                                               \
900   /* Vector Shift Left Word */                                                \
901   V(evslw, EVSLW, 0x10000224)                                                 \
902   /* Vector Shift Left Word Immediate */                                      \
903   V(evslwi, EVSLWI, 0x10000226)                                               \
904   /* Vector Splat Fractional Immediate */                                     \
905   V(evsplatfi, EVSPLATFI, 0x1000022B)                                         \
906   /* Vector Splat Immediate */                                                \
907   V(evsplati, EVSPLATI, 0x10000229)                                           \
908   /* Vector Shift Right Word Immediate Signed */                              \
909   V(evsrwis, EVSRWIS, 0x10000223)                                             \
910   /* Vector Shift Right Word Immediate Unsigned */                            \
911   V(evsrwiu, EVSRWIU, 0x10000222)                                             \
912   /* Vector Shift Right Word Signed */                                        \
913   V(evsrws, EVSRWS, 0x10000221)                                               \
914   /* Vector Shift Right Word Unsigned */                                      \
915   V(evsrwu, EVSRWU, 0x10000220)                                               \
916   /* Vector Store Double of Double */                                         \
917   V(evstdd, EVSTDD, 0x10000321)                                               \
918   /* Vector Store Double of Double Indexed */                                 \
919   V(evstddx, EVSTDDX, 0x10000320)                                             \
920   /* Vector Store Double of Four Half Words */                                \
921   V(evstdh, EVSTDH, 0x10000325)                                               \
922   /* Vector Store Double of Four Half Words Indexed */                        \
923   V(evstdhx, EVSTDHX, 0x10000324)                                             \
924   /* Vector Store Double of Two Words */                                      \
925   V(evstdw, EVSTDW, 0x10000323)                                               \
926   /* Vector Store Double of Two Words Indexed */                              \
927   V(evstdwx, EVSTDWX, 0x10000322)                                             \
928   /* Vector Store Word of Two Half Words from Even */                         \
929   V(evstwhe, EVSTWHE, 0x10000331)                                             \
930   /* Vector Store Word of Two Half Words from Even Indexed */                 \
931   V(evstwhex, EVSTWHEX, 0x10000330)                                           \
932   /* Vector Store Word of Two Half Words from Odd */                          \
933   V(evstwho, EVSTWHO, 0x10000335)                                             \
934   /* Vector Store Word of Two Half Words from Odd Indexed */                  \
935   V(evstwhox, EVSTWHOX, 0x10000334)                                           \
936   /* Vector Store Word of Word from Even */                                   \
937   V(evstwwe, EVSTWWE, 0x10000339)                                             \
938   /* Vector Store Word of Word from Even Indexed */                           \
939   V(evstwwex, EVSTWWEX, 0x10000338)                                           \
940   /* Vector Store Word of Word from Odd */                                    \
941   V(evstwwo, EVSTWWO, 0x1000033D)                                             \
942   /* Vector Store Word of Word from Odd Indexed */                            \
943   V(evstwwox, EVSTWWOX, 0x1000033C)                                           \
944   /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */           \
945   V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB)                                   \
946   /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */         \
947   V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3)                                   \
948   /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */         \
949   V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA)                                   \
950   /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */       \
951   V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2)                                   \
952   /* Vector Subtract from Word */                                             \
953   V(evsubfw, EVSUBFW, 0x10000204)                                             \
954   /* Vector Subtract Immediate from Word */                                   \
955   V(evsubifw, EVSUBIFW, 0x10000206)                                           \
956   /* Vector XOR */                                                            \
957   V(evxor, EVXOR, 0x10000216)                                                 \
958   /* Floating-Point Double-Precision Absolute Value */                        \
959   V(efdabs, EFDABS, 0x100002E4)                                               \
960   /* Floating-Point Double-Precision Add */                                   \
961   V(efdadd, EFDADD, 0x100002E0)                                               \
962   /* Floating-Point Double-Precision Convert from Single-Precision */         \
963   V(efdcfs, EFDCFS, 0x100002EF)                                               \
964   /* Convert Floating-Point Double-Precision from Signed Fraction */          \
965   V(efdcfsf, EFDCFSF, 0x100002F3)                                             \
966   /* Convert Floating-Point Double-Precision from Signed Integer */           \
967   V(efdcfsi, EFDCFSI, 0x100002F1)                                             \
968   /* Convert Floating-Point Double-Precision from Signed Integer */           \
969   /* Doubleword */                                                            \
970   V(efdcfsid, EFDCFSID, 0x100002E3)                                           \
971   /* Convert Floating-Point Double-Precision from Unsigned Fraction */        \
972   V(efdcfuf, EFDCFUF, 0x100002F2)                                             \
973   /* Convert Floating-Point Double-Precision from Unsigned Integer */         \
974   V(efdcfui, EFDCFUI, 0x100002F0)                                             \
975   /* Convert Floating-Point Double-Precision fromUnsigned Integer */          \
976   /* Doubleword */                                                            \
977   V(efdcfuid, EFDCFUID, 0x100002E2)                                           \
978   /* Floating-Point Double-Precision Compare Equal */                         \
979   V(efdcmpeq, EFDCMPEQ, 0x100002EE)                                           \
980   /* Floating-Point Double-Precision Compare Greater Than */                  \
981   V(efdcmpgt, EFDCMPGT, 0x100002EC)                                           \
982   /* Floating-Point Double-Precision Compare Less Than */                     \
983   V(efdcmplt, EFDCMPLT, 0x100002ED)                                           \
984   /* Convert Floating-Point Double-Precision to Signed Fraction */            \
985   V(efdctsf, EFDCTSF, 0x100002F7)                                             \
986   /* Convert Floating-Point Double-Precision to Signed Integer */             \
987   V(efdctsi, EFDCTSI, 0x100002F5)                                             \
988   /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */  \
989   /* with Round toward Zero */                                                \
990   V(efdctsidz, EFDCTSIDZ, 0x100002EB)                                         \
991   /* Convert Floating-Point Double-Precision to Signed Integer with Round */  \
992   /* toward Zero */                                                           \
993   V(efdctsiz, EFDCTSIZ, 0x100002FA)                                           \
994   /* Convert Floating-Point Double-Precision to Unsigned Fraction */          \
995   V(efdctuf, EFDCTUF, 0x100002F6)                                             \
996   /* Convert Floating-Point Double-Precision to Unsigned Integer */           \
997   V(efdctui, EFDCTUI, 0x100002F4)                                             \
998   /* Convert Floating-Point Double-Precision to Unsigned Integer */           \
999   /* Doubleword with Round toward Zero */                                     \
1000   V(efdctuidz, EFDCTUIDZ, 0x100002EA)                                         \
1001   /* Convert Floating-Point Double-Precision to Unsigned Integer with */      \
1002   /* Round toward Zero */                                                     \
1003   V(efdctuiz, EFDCTUIZ, 0x100002F8)                                           \
1004   /* Floating-Point Double-Precision Divide */                                \
1005   V(efddiv, EFDDIV, 0x100002E9)                                               \
1006   /* Floating-Point Double-Precision Multiply */                              \
1007   V(efdmul, EFDMUL, 0x100002E8)                                               \
1008   /* Floating-Point Double-Precision Negative Absolute Value */               \
1009   V(efdnabs, EFDNABS, 0x100002E5)                                             \
1010   /* Floating-Point Double-Precision Negate */                                \
1011   V(efdneg, EFDNEG, 0x100002E6)                                               \
1012   /* Floating-Point Double-Precision Subtract */                              \
1013   V(efdsub, EFDSUB, 0x100002E1)                                               \
1014   /* Floating-Point Double-Precision Test Equal */                            \
1015   V(efdtsteq, EFDTSTEQ, 0x100002FE)                                           \
1016   /* Floating-Point Double-Precision Test Greater Than */                     \
1017   V(efdtstgt, EFDTSTGT, 0x100002FC)                                           \
1018   /* Floating-Point Double-Precision Test Less Than */                        \
1019   V(efdtstlt, EFDTSTLT, 0x100002FD)                                           \
1020   /* Floating-Point Single-Precision Convert from Double-Precision */         \
1021   V(efscfd, EFSCFD, 0x100002CF)                                               \
1022   /* Floating-Point Absolute Value */                                         \
1023   V(efsabs, EFSABS, 0x100002C4)                                               \
1024   /* Floating-Point Add */                                                    \
1025   V(efsadd, EFSADD, 0x100002C0)                                               \
1026   /* Convert Floating-Point from Signed Fraction */                           \
1027   V(efscfsf, EFSCFSF, 0x100002D3)                                             \
1028   /* Convert Floating-Point from Signed Integer */                            \
1029   V(efscfsi, EFSCFSI, 0x100002D1)                                             \
1030   /* Convert Floating-Point from Unsigned Fraction */                         \
1031   V(efscfuf, EFSCFUF, 0x100002D2)                                             \
1032   /* Convert Floating-Point from Unsigned Integer */                          \
1033   V(efscfui, EFSCFUI, 0x100002D0)                                             \
1034   /* Floating-Point Compare Equal */                                          \
1035   V(efscmpeq, EFSCMPEQ, 0x100002CE)                                           \
1036   /* Floating-Point Compare Greater Than */                                   \
1037   V(efscmpgt, EFSCMPGT, 0x100002CC)                                           \
1038   /* Floating-Point Compare Less Than */                                      \
1039   V(efscmplt, EFSCMPLT, 0x100002CD)                                           \
1040   /* Convert Floating-Point to Signed Fraction */                             \
1041   V(efsctsf, EFSCTSF, 0x100002D7)                                             \
1042   /* Convert Floating-Point to Signed Integer */                              \
1043   V(efsctsi, EFSCTSI, 0x100002D5)                                             \
1044   /* Convert Floating-Point to Signed Integer with Round toward Zero */       \
1045   V(efsctsiz, EFSCTSIZ, 0x100002DA)                                           \
1046   /* Convert Floating-Point to Unsigned Fraction */                           \
1047   V(efsctuf, EFSCTUF, 0x100002D6)                                             \
1048   /* Convert Floating-Point to Unsigned Integer */                            \
1049   V(efsctui, EFSCTUI, 0x100002D4)                                             \
1050   /* Convert Floating-Point to Unsigned Integer with Round toward Zero */     \
1051   V(efsctuiz, EFSCTUIZ, 0x100002D8)                                           \
1052   /* Floating-Point Divide */                                                 \
1053   V(efsdiv, EFSDIV, 0x100002C9)                                               \
1054   /* Floating-Point Multiply */                                               \
1055   V(efsmul, EFSMUL, 0x100002C8)                                               \
1056   /* Floating-Point Negative Absolute Value */                                \
1057   V(efsnabs, EFSNABS, 0x100002C5)                                             \
1058   /* Floating-Point Negate */                                                 \
1059   V(efsneg, EFSNEG, 0x100002C6)                                               \
1060   /* Floating-Point Subtract */                                               \
1061   V(efssub, EFSSUB, 0x100002C1)                                               \
1062   /* Floating-Point Test Equal */                                             \
1063   V(efststeq, EFSTSTEQ, 0x100002DE)                                           \
1064   /* Floating-Point Test Greater Than */                                      \
1065   V(efststgt, EFSTSTGT, 0x100002DC)                                           \
1066   /* Floating-Point Test Less Than */                                         \
1067   V(efststlt, EFSTSTLT, 0x100002DD)                                           \
1068   /* Vector Floating-Point Absolute Value */                                  \
1069   V(evfsabs, EVFSABS, 0x10000284)                                             \
1070   /* Vector Floating-Point Add */                                             \
1071   V(evfsadd, EVFSADD, 0x10000280)                                             \
1072   /* Vector Convert Floating-Point from Signed Fraction */                    \
1073   V(evfscfsf, EVFSCFSF, 0x10000293)                                           \
1074   /* Vector Convert Floating-Point from Signed Integer */                     \
1075   V(evfscfsi, EVFSCFSI, 0x10000291)                                           \
1076   /* Vector Convert Floating-Point from Unsigned Fraction */                  \
1077   V(evfscfuf, EVFSCFUF, 0x10000292)                                           \
1078   /* Vector Convert Floating-Point from Unsigned Integer */                   \
1079   V(evfscfui, EVFSCFUI, 0x10000290)                                           \
1080   /* Vector Floating-Point Compare Equal */                                   \
1081   V(evfscmpeq, EVFSCMPEQ, 0x1000028E)                                         \
1082   /* Vector Floating-Point Compare Greater Than */                            \
1083   V(evfscmpgt, EVFSCMPGT, 0x1000028C)                                         \
1084   /* Vector Floating-Point Compare Less Than */                               \
1085   V(evfscmplt, EVFSCMPLT, 0x1000028D)                                         \
1086   /* Vector Convert Floating-Point to Signed Fraction */                      \
1087   V(evfsctsf, EVFSCTSF, 0x10000297)                                           \
1088   /* Vector Convert Floating-Point to Signed Integer */                       \
1089   V(evfsctsi, EVFSCTSI, 0x10000295)                                           \
1090   /* Vector Convert Floating-Point to Signed Integer with Round toward */     \
1091   /* Zero */                                                                  \
1092   V(evfsctsiz, EVFSCTSIZ, 0x1000029A)                                         \
1093   /* Vector Convert Floating-Point to Unsigned Fraction */                    \
1094   V(evfsctuf, EVFSCTUF, 0x10000296)                                           \
1095   /* Vector Convert Floating-Point to Unsigned Integer */                     \
1096   V(evfsctui, EVFSCTUI, 0x10000294)                                           \
1097   /* Vector Convert Floating-Point to Unsigned Integer with Round toward */   \
1098   /* Zero */                                                                  \
1099   V(evfsctuiz, EVFSCTUIZ, 0x10000298)                                         \
1100   /* Vector Floating-Point Divide */                                          \
1101   V(evfsdiv, EVFSDIV, 0x10000289)                                             \
1102   /* Vector Floating-Point Multiply */                                        \
1103   V(evfsmul, EVFSMUL, 0x10000288)                                             \
1104   /* Vector Floating-Point Negative Absolute Value */                         \
1105   V(evfsnabs, EVFSNABS, 0x10000285)                                           \
1106   /* Vector Floating-Point Negate */                                          \
1107   V(evfsneg, EVFSNEG, 0x10000286)                                             \
1108   /* Vector Floating-Point Subtract */                                        \
1109   V(evfssub, EVFSSUB, 0x10000281)                                             \
1110   /* Vector Floating-Point Test Equal */                                      \
1111   V(evfststeq, EVFSTSTEQ, 0x1000029E)                                         \
1112   /* Vector Floating-Point Test Greater Than */                               \
1113   V(evfststgt, EVFSTSTGT, 0x1000029C)                                         \
1114   /* Vector Floating-Point Test Less Than */                                  \
1115   V(evfststlt, EVFSTSTLT, 0x1000029D)
1116 
1117 #define PPC_VC_OPCODE_LIST(V)                                    \
1118   /* Vector Compare Bounds Single-Precision */                   \
1119   V(vcmpbfp, VCMPBFP, 0x100003C6)                                \
1120   /* Vector Compare Equal To Single-Precision */                 \
1121   V(vcmpeqfp, VCMPEQFP, 0x100000C6)                              \
1122   /* Vector Compare Equal To Unsigned Byte */                    \
1123   V(vcmpequb, VCMPEQUB, 0x10000006)                              \
1124   /* Vector Compare Equal To Unsigned Doubleword */              \
1125   V(vcmpequd, VCMPEQUD, 0x100000C7)                              \
1126   /* Vector Compare Equal To Unsigned Halfword */                \
1127   V(vcmpequh, VCMPEQUH, 0x10000046)                              \
1128   /* Vector Compare Equal To Unsigned Word */                    \
1129   V(vcmpequw, VCMPEQUW, 0x10000086)                              \
1130   /* Vector Compare Greater Than or Equal To Single-Precision */ \
1131   V(vcmpgefp, VCMPGEFP, 0x100001C6)                              \
1132   /* Vector Compare Greater Than Single-Precision */             \
1133   V(vcmpgtfp, VCMPGTFP, 0x100002C6)                              \
1134   /* Vector Compare Greater Than Signed Byte */                  \
1135   V(vcmpgtsb, VCMPGTSB, 0x10000306)                              \
1136   /* Vector Compare Greater Than Signed Doubleword */            \
1137   V(vcmpgtsd, VCMPGTSD, 0x100003C7)                              \
1138   /* Vector Compare Greater Than Signed Halfword */              \
1139   V(vcmpgtsh, VCMPGTSH, 0x10000346)                              \
1140   /* Vector Compare Greater Than Signed Word */                  \
1141   V(vcmpgtsw, VCMPGTSW, 0x10000386)                              \
1142   /* Vector Compare Greater Than Unsigned Byte */                \
1143   V(vcmpgtub, VCMPGTUB, 0x10000206)                              \
1144   /* Vector Compare Greater Than Unsigned Doubleword */          \
1145   V(vcmpgtud, VCMPGTUD, 0x100002C7)                              \
1146   /* Vector Compare Greater Than Unsigned Halfword */            \
1147   V(vcmpgtuh, VCMPGTUH, 0x10000246)                              \
1148   /* Vector Compare Greater Than Unsigned Word */                \
1149   V(vcmpgtuw, VCMPGTUW, 0x10000286)
1150 
1151 #define PPC_X_OPCODE_A_FORM_LIST(V) \
1152   /* Modulo Signed Dword */         \
1153   V(modsd, MODSD, 0x7C000612)       \
1154   /*  Modulo Unsigned Dword */      \
1155   V(modud, MODUD, 0x7C000212)       \
1156   /* Modulo Signed Word */          \
1157   V(modsw, MODSW, 0x7C000616)       \
1158   /* Modulo Unsigned Word */        \
1159   V(moduw, MODUW, 0x7C000216)
1160 
1161 #define PPC_X_OPCODE_B_FORM_LIST(V)      \
1162   /* XOR */                              \
1163   V(xor_, XORX, 0x7C000278)              \
1164   /* AND */                              \
1165   V(and_, ANDX, 0x7C000038)              \
1166   /* AND with Complement */              \
1167   V(andc, ANDCX, 0x7C000078)             \
1168   /* OR */                               \
1169   V(orx, ORX, 0x7C000378)                \
1170   /* OR with Complement */               \
1171   V(orc, ORC, 0x7C000338)                \
1172   /* NOR */                              \
1173   V(nor, NORX, 0x7C0000F8)               \
1174   /* Shift Right Word */                 \
1175   V(srw, SRWX, 0x7C000430)               \
1176   /* Shift Left Word */                  \
1177   V(slw, SLWX, 0x7C000030)               \
1178   /* Shift Right Algebraic Word */       \
1179   V(sraw, SRAW, 0x7C000630)              \
1180   /* Shift Left Doubleword */            \
1181   V(sld, SLDX, 0x7C000036)               \
1182   /* Shift Right Algebraic Doubleword */ \
1183   V(srad, SRAD, 0x7C000634)              \
1184   /* Shift Right Doubleword */           \
1185   V(srd, SRDX, 0x7C000436)
1186 
1187 #define PPC_X_OPCODE_C_FORM_LIST(V)    \
1188   /* Count Leading Zeros Word */       \
1189   V(cntlzw, CNTLZWX, 0x7C000034)       \
1190   /* Count Leading Zeros Doubleword */ \
1191   V(cntlzd, CNTLZDX, 0x7C000074)       \
1192   /* Count Tailing Zeros Word */       \
1193   V(cnttzw, CNTTZWX, 0x7C000434)       \
1194   /* Count Tailing Zeros Doubleword */ \
1195   V(cnttzd, CNTTZDX, 0x7C000474)       \
1196   /* Population Count Byte-wise */     \
1197   V(popcntb, POPCNTB, 0x7C0000F4)      \
1198   /* Population Count Words */         \
1199   V(popcntw, POPCNTW, 0x7C0002F4)      \
1200   /* Population Count Doubleword */    \
1201   V(popcntd, POPCNTD, 0x7C0003F4)      \
1202   /* Extend Sign Byte */               \
1203   V(extsb, EXTSB, 0x7C000774)          \
1204   /* Extend Sign Halfword */           \
1205   V(extsh, EXTSH, 0x7C000734)
1206 
1207 #define PPC_X_OPCODE_D_FORM_LIST(V)                     \
1208   /* Load Halfword Byte-Reverse Indexed */              \
1209   V(lhbrx, LHBRX, 0x7C00062C)                           \
1210   /* Load Word Byte-Reverse Indexed */                  \
1211   V(lwbrx, LWBRX, 0x7C00042C)                           \
1212   /* Load Doubleword Byte-Reverse Indexed */            \
1213   V(ldbrx, LDBRX, 0x7C000428)                           \
1214   /* Load Byte and Zero Indexed */                      \
1215   V(lbzx, LBZX, 0x7C0000AE)                             \
1216   /* Load Byte and Zero with Update Indexed */          \
1217   V(lbzux, LBZUX, 0x7C0000EE)                           \
1218   /* Load Halfword and Zero Indexed */                  \
1219   V(lhzx, LHZX, 0x7C00022E)                             \
1220   /* Load Halfword and Zero with Update Indexed */      \
1221   V(lhzux, LHZUX, 0x7C00026E)                           \
1222   /* Load Halfword Algebraic Indexed */                 \
1223   V(lhax, LHAX, 0x7C0002AE)                             \
1224   /* Load Word and Zero Indexed */                      \
1225   V(lwzx, LWZX, 0x7C00002E)                             \
1226   /* Load Word and Zero with Update Indexed */          \
1227   V(lwzux, LWZUX, 0x7C00006E)                           \
1228   /* Load Doubleword Indexed */                         \
1229   V(ldx, LDX, 0x7C00002A)                               \
1230   /* Load Doubleword with Update Indexed */             \
1231   V(ldux, LDUX, 0x7C00006A)                             \
1232   /* Load Floating-Point Double Indexed */              \
1233   V(lfdx, LFDX, 0x7C0004AE)                             \
1234   /* Load Floating-Point Single Indexed */              \
1235   V(lfsx, LFSX, 0x7C00042E)                             \
1236   /* Load Floating-Point Double with Update Indexed */  \
1237   V(lfdux, LFDUX, 0x7C0004EE)                           \
1238   /* Load Floating-Point Single with Update Indexed */  \
1239   V(lfsux, LFSUX, 0x7C00046E)                           \
1240   /* Store Byte with Update Indexed */                  \
1241   V(stbux, STBUX, 0x7C0001EE)                           \
1242   /* Store Byte Indexed */                              \
1243   V(stbx, STBX, 0x7C0001AE)                             \
1244   /* Store Halfword with Update Indexed */              \
1245   V(sthux, STHUX, 0x7C00036E)                           \
1246   /* Store Halfword Indexed */                          \
1247   V(sthx, STHX, 0x7C00032E)                             \
1248   /* Store Word with Update Indexed */                  \
1249   V(stwux, STWUX, 0x7C00016E)                           \
1250   /* Store Word Indexed */                              \
1251   V(stwx, STWX, 0x7C00012E)                             \
1252   /* Store Doubleword with Update Indexed */            \
1253   V(stdux, STDUX, 0x7C00016A)                           \
1254   /* Store Doubleword Indexed */                        \
1255   V(stdx, STDX, 0x7C00012A)                             \
1256   /* Store Floating-Point Double with Update Indexed */ \
1257   V(stfdux, STFDUX, 0x7C0005EE)                         \
1258   /* Store Floating-Point Double Indexed */             \
1259   V(stfdx, STFDX, 0x7C0005AE)                           \
1260   /* Store Floating-Point Single with Update Indexed */ \
1261   V(stfsux, STFSUX, 0x7C00056E)                         \
1262   /* Store Floating-Point Single Indexed */             \
1263   V(stfsx, STFSX, 0x7C00052E)                           \
1264   /* Store Doubleword Byte-Reverse Indexed */           \
1265   V(stdbrx, STDBRX, 0x7C000528)                         \
1266   /* Store Word Byte-Reverse Indexed */                 \
1267   V(stwbrx, STWBRX, 0x7C00052C)                         \
1268   /* Store Halfword Byte-Reverse Indexed */             \
1269   V(sthbrx, STHBRX, 0x7C00072C)                         \
1270   /* Load Vector Indexed */                             \
1271   V(lvx, LVX, 0x7C0000CE)                               \
1272   /* Store Vector Indexed */                            \
1273   V(stvx, STVX, 0x7C0001CE)
1274 
1275 #define PPC_X_OPCODE_E_FORM_LIST(V)          \
1276   /* Shift Right Algebraic Word Immediate */ \
1277   V(srawi, SRAWIX, 0x7C000670)
1278 
1279 #define PPC_X_OPCODE_F_FORM_LIST(V) \
1280   /* Compare */                     \
1281   V(cmp, CMP, 0x7C000000)           \
1282   /* Compare Logical */             \
1283   V(cmpl, CMPL, 0x7C000040)
1284 
1285 #define PPC_X_OPCODE_G_FORM_LIST(V) \
1286   /* Byte-Reverse Halfword */       \
1287   V(brh, BRH, 0x7C0001B6)           \
1288   /* Byte-Reverse Word */           \
1289   V(brw, BRW, 0x7C000136)           \
1290   /* Byte-Reverse Doubleword */     \
1291   V(brd, BRD, 0x7C000176)
1292 
1293 #define PPC_X_OPCODE_EH_S_FORM_LIST(V)                    \
1294   /* Store Byte Conditional Indexed */                    \
1295   V(stbcx, STBCX, 0x7C00056D)                             \
1296   /* Store Halfword Conditional Indexed Xform */          \
1297   V(sthcx, STHCX, 0x7C0005AD)                             \
1298   /* Store Word Conditional Indexed & record CR0 */       \
1299   V(stwcx, STWCX, 0x7C00012D)                             \
1300   /* Store Doubleword Conditional Indexed & record CR0 */ \
1301   V(stdcx, STDCX, 0x7C0001AD)
1302 
1303 #define PPC_X_OPCODE_EH_L_FORM_LIST(V)          \
1304   /* Load Byte And Reserve Indexed */           \
1305   V(lbarx, LBARX, 0x7C000068)                   \
1306   /* Load Halfword And Reserve Indexed Xform */ \
1307   V(lharx, LHARX, 0x7C0000E8)                   \
1308   /* Load Word and Reserve Indexed */           \
1309   V(lwarx, LWARX, 0x7C000028)                   \
1310   /* Load Doubleword And Reserve Indexed */     \
1311   V(ldarx, LDARX, 0x7C0000A8)
1312 
1313 #define PPC_X_OPCODE_UNUSED_LIST(V)                                           \
1314   /* Bit Permute Doubleword */                                                \
1315   V(bpermd, BPERMD, 0x7C0001F8)                                               \
1316   /* Extend Sign Word */                                                      \
1317   V(extsw, EXTSW, 0x7C0007B4)                                                 \
1318   /* Load Word Algebraic with Update Indexed */                               \
1319   V(lwaux, LWAUX, 0x7C0002EA)                                                 \
1320   /* Load Word Algebraic Indexed */                                           \
1321   V(lwax, LWAX, 0x7C0002AA)                                                   \
1322   /* Parity Doubleword */                                                     \
1323   V(prtyd, PRTYD, 0x7C000174)                                                 \
1324   /* Trap Doubleword */                                                       \
1325   V(td, TD, 0x7C000088)                                                       \
1326   /* Branch Conditional to Branch Target Address Register */                  \
1327   V(bctar, BCTAR, 0x4C000460)                                                 \
1328   /* Compare Byte */                                                          \
1329   V(cmpb, CMPB, 0x7C0003F8)                                                   \
1330   /* Data Cache Block Flush */                                                \
1331   V(dcbf, DCBF, 0x7C0000AC)                                                   \
1332   /* Data Cache Block Store */                                                \
1333   V(dcbst, DCBST, 0x7C00006C)                                                 \
1334   /* Data Cache Block Touch */                                                \
1335   V(dcbt, DCBT, 0x7C00022C)                                                   \
1336   /* Data Cache Block Touch for Store */                                      \
1337   V(dcbtst, DCBTST, 0x7C0001EC)                                               \
1338   /* Data Cache Block Zero */                                                 \
1339   V(dcbz, DCBZ, 0x7C0007EC)                                                   \
1340   /* Equivalent */                                                            \
1341   V(eqv, EQV, 0x7C000238)                                                     \
1342   /* Instruction Cache Block Invalidate */                                    \
1343   V(icbi, ICBI, 0x7C0007AC)                                                   \
1344   /* NAND */                                                                  \
1345   V(nand, NAND, 0x7C0003B8)                                                   \
1346   /* Parity Word */                                                           \
1347   V(prtyw, PRTYW, 0x7C000134)                                                 \
1348   /* Synchronize */                                                           \
1349   V(sync, SYNC, 0x7C0004AC)                                                   \
1350   /* Trap Word */                                                             \
1351   V(tw, TW, 0x7C000008)                                                       \
1352   /* ExecuExecuted No Operation */                                            \
1353   V(xnop, XNOP, 0x68000000)                                                   \
1354   /* Convert Binary Coded Decimal To Declets */                               \
1355   V(cbcdtd, CBCDTD, 0x7C000274)                                               \
1356   /* Convert Declets To Binary Coded Decimal */                               \
1357   V(cdtbcd, CDTBCD, 0x7C000234)                                               \
1358   /* Decimal Floating Add */                                                  \
1359   V(dadd, DADD, 0xEC000004)                                                   \
1360   /* Decimal Floating Add Quad */                                             \
1361   V(daddq, DADDQ, 0xFC000004)                                                 \
1362   /* Decimal Floating Convert From Fixed */                                   \
1363   V(dcffix, DCFFIX, 0xEC000644)                                               \
1364   /* Decimal Floating Convert From Fixed Quad */                              \
1365   V(dcffixq, DCFFIXQ, 0xFC000644)                                             \
1366   /* Decimal Floating Compare Ordered */                                      \
1367   V(dcmpo, DCMPO, 0xEC000104)                                                 \
1368   /* Decimal Floating Compare Ordered Quad */                                 \
1369   V(dcmpoq, DCMPOQ, 0xFC000104)                                               \
1370   /* Decimal Floating Compare Unordered */                                    \
1371   V(dcmpu, DCMPU, 0xEC000504)                                                 \
1372   /* Decimal Floating Compare Unordered Quad */                               \
1373   V(dcmpuq, DCMPUQ, 0xFC000504)                                               \
1374   /* Decimal Floating Convert To DFP Long */                                  \
1375   V(dctdp, DCTDP, 0xEC000204)                                                 \
1376   /* Decimal Floating Convert To Fixed */                                     \
1377   V(dctfix, DCTFIX, 0xEC000244)                                               \
1378   /* Decimal Floating Convert To Fixed Quad */                                \
1379   V(dctfixq, DCTFIXQ, 0xFC000244)                                             \
1380   /* Decimal Floating Convert To DFP Extended */                              \
1381   V(dctqpq, DCTQPQ, 0xFC000204)                                               \
1382   /* Decimal Floating Decode DPD To BCD */                                    \
1383   V(ddedpd, DDEDPD, 0xEC000284)                                               \
1384   /* Decimal Floating Decode DPD To BCD Quad */                               \
1385   V(ddedpdq, DDEDPDQ, 0xFC000284)                                             \
1386   /* Decimal Floating Divide */                                               \
1387   V(ddiv, DDIV, 0xEC000444)                                                   \
1388   /* Decimal Floating Divide Quad */                                          \
1389   V(ddivq, DDIVQ, 0xFC000444)                                                 \
1390   /* Decimal Floating Encode BCD To DPD */                                    \
1391   V(denbcd, DENBCD, 0xEC000684)                                               \
1392   /* Decimal Floating Encode BCD To DPD Quad */                               \
1393   V(denbcdq, DENBCDQ, 0xFC000684)                                             \
1394   /* Decimal Floating Insert Exponent */                                      \
1395   V(diex, DIEX, 0xEC0006C4)                                                   \
1396   /* Decimal Floating Insert Exponent Quad */                                 \
1397   V(diexq, DIEXQ, 0xFC0006C4)                                                 \
1398   /* Decimal Floating Multiply */                                             \
1399   V(dmul, DMUL, 0xEC000044)                                                   \
1400   /* Decimal Floating Multiply Quad */                                        \
1401   V(dmulq, DMULQ, 0xFC000044)                                                 \
1402   /* Decimal Floating Round To DFP Long */                                    \
1403   V(drdpq, DRDPQ, 0xFC000604)                                                 \
1404   /* Decimal Floating Round To DFP Short */                                   \
1405   V(drsp, DRSP, 0xEC000604)                                                   \
1406   /* Decimal Floating Subtract */                                             \
1407   V(dsub, DSUB, 0xEC000404)                                                   \
1408   /* Decimal Floating Subtract Quad */                                        \
1409   V(dsubq, DSUBQ, 0xFC000404)                                                 \
1410   /* Decimal Floating Test Exponent */                                        \
1411   V(dtstex, DTSTEX, 0xEC000144)                                               \
1412   /* Decimal Floating Test Exponent Quad */                                   \
1413   V(dtstexq, DTSTEXQ, 0xFC000144)                                             \
1414   /* Decimal Floating Test Significance */                                    \
1415   V(dtstsf, DTSTSF, 0xEC000544)                                               \
1416   /* Decimal Floating Test Significance Quad */                               \
1417   V(dtstsfq, DTSTSFQ, 0xFC000544)                                             \
1418   /* Decimal Floating Extract Exponent */                                     \
1419   V(dxex, DXEX, 0xEC0002C4)                                                   \
1420   /* Decimal Floating Extract Exponent Quad */                                \
1421   V(dxexq, DXEXQ, 0xFC0002C4)                                                 \
1422   /* Decorated Storage Notify */                                              \
1423   V(dsn, DSN, 0x7C0003C6)                                                     \
1424   /* Load Byte with Decoration Indexed */                                     \
1425   V(lbdx, LBDX, 0x7C000406)                                                   \
1426   /* Load Doubleword with Decoration Indexed */                               \
1427   V(lddx, LDDX, 0x7C0004C6)                                                   \
1428   /* Load Floating Doubleword with Decoration Indexed */                      \
1429   V(lfddx, LFDDX, 0x7C000646)                                                 \
1430   /* Load Halfword with Decoration Indexed */                                 \
1431   V(lhdx, LHDX, 0x7C000446)                                                   \
1432   /* Load Word with Decoration Indexed */                                     \
1433   V(lwdx, LWDX, 0x7C000486)                                                   \
1434   /* Store Byte with Decoration Indexed */                                    \
1435   V(stbdx, STBDX, 0x7C000506)                                                 \
1436   /* Store Doubleword with Decoration Indexed */                              \
1437   V(stddx, STDDX, 0x7C0005C6)                                                 \
1438   /* Store Floating Doubleword with Decoration Indexed */                     \
1439   V(stfddx, STFDDX, 0x7C000746)                                               \
1440   /* Store Halfword with Decoration Indexed */                                \
1441   V(sthdx, STHDX, 0x7C000546)                                                 \
1442   /* Store Word with Decoration Indexed */                                    \
1443   V(stwdx, STWDX, 0x7C000586)                                                 \
1444   /* Data Cache Block Allocate */                                             \
1445   V(dcba, DCBA, 0x7C0005EC)                                                   \
1446   /* Data Cache Block Invalidate */                                           \
1447   V(dcbi, DCBI, 0x7C0003AC)                                                   \
1448   /* Instruction Cache Block Touch */                                         \
1449   V(icbt, ICBT, 0x7C00002C)                                                   \
1450   /* Move to Condition Register from XER */                                   \
1451   V(mcrxr, MCRXR, 0x7C000400)                                                 \
1452   /* TLB Invalidate Local Indexed */                                          \
1453   V(tlbilx, TLBILX, 0x7C000024)                                               \
1454   /* TLB Invalidate Virtual Address Indexed */                                \
1455   V(tlbivax, TLBIVAX, 0x7C000624)                                             \
1456   /* TLB Read Entry */                                                        \
1457   V(tlbre, TLBRE, 0x7C000764)                                                 \
1458   /* TLB Search Indexed */                                                    \
1459   V(tlbsx, TLBSX, 0x7C000724)                                                 \
1460   /* TLB Write Entry */                                                       \
1461   V(tlbwe, TLBWE, 0x7C0007A4)                                                 \
1462   /* Write External Enable */                                                 \
1463   V(wrtee, WRTEE, 0x7C000106)                                                 \
1464   /* Write External Enable Immediate */                                       \
1465   V(wrteei, WRTEEI, 0x7C000146)                                               \
1466   /* Data Cache Read */                                                       \
1467   V(dcread, DCREAD, 0x7C00028C)                                               \
1468   /* Instruction Cache Read */                                                \
1469   V(icread, ICREAD, 0x7C0007CC)                                               \
1470   /* Data Cache Invalidate */                                                 \
1471   V(dci, DCI, 0x7C00038C)                                                     \
1472   /* Instruction Cache Invalidate */                                          \
1473   V(ici, ICI, 0x7C00078C)                                                     \
1474   /* Move From Device Control Register User Mode Indexed */                   \
1475   V(mfdcrux, MFDCRUX, 0x7C000246)                                             \
1476   /* Move From Device Control Register Indexed */                             \
1477   V(mfdcrx, MFDCRX, 0x7C000206)                                               \
1478   /* Move To Device Control Register User Mode Indexed */                     \
1479   V(mtdcrux, MTDCRUX, 0x7C000346)                                             \
1480   /* Move To Device Control Register Indexed */                               \
1481   V(mtdcrx, MTDCRX, 0x7C000306)                                               \
1482   /* Return From Debug Interrupt */                                           \
1483   V(rfdi, RFDI, 0x4C00004E)                                                   \
1484   /* Data Cache Block Flush by External PID */                                \
1485   V(dcbfep, DCBFEP, 0x7C0000FE)                                               \
1486   /* Data Cache Block Store by External PID */                                \
1487   V(dcbstep, DCBSTEP, 0x7C00007E)                                             \
1488   /* Data Cache Block Touch by External PID */                                \
1489   V(dcbtep, DCBTEP, 0x7C00027E)                                               \
1490   /* Data Cache Block Touch for Store by External PID */                      \
1491   V(dcbtstep, DCBTSTEP, 0x7C0001FE)                                           \
1492   /* Data Cache Block Zero by External PID */                                 \
1493   V(dcbzep, DCBZEP, 0x7C0007FE)                                               \
1494   /* Instruction Cache Block Invalidate by External PID */                    \
1495   V(icbiep, ICBIEP, 0x7C0007BE)                                               \
1496   /* Load Byte and Zero by External PID Indexed */                            \
1497   V(lbepx, LBEPX, 0x7C0000BE)                                                 \
1498   /* Load Floating-Point Double by External PID Indexed */                    \
1499   V(lfdepx, LFDEPX, 0x7C0004BE)                                               \
1500   /* Load Halfword and Zero by External PID Indexed */                        \
1501   V(lhepx, LHEPX, 0x7C00023E)                                                 \
1502   /* Load Vector by External PID Indexed */                                   \
1503   V(lvepx, LVEPX, 0x7C00024E)                                                 \
1504   /* Load Vector by External PID Indexed Last */                              \
1505   V(lvepxl, LVEPXL, 0x7C00020E)                                               \
1506   /* Load Word and Zero by External PID Indexed */                            \
1507   V(lwepx, LWEPX, 0x7C00003E)                                                 \
1508   /* Store Byte by External PID Indexed */                                    \
1509   V(stbepx, STBEPX, 0x7C0001BE)                                               \
1510   /* Store Floating-Point Double by External PID Indexed */                   \
1511   V(stfdepx, STFDEPX, 0x7C0005BE)                                             \
1512   /* Store Halfword by External PID Indexed */                                \
1513   V(sthepx, STHEPX, 0x7C00033E)                                               \
1514   /* Store Vector by External PID Indexed */                                  \
1515   V(stvepx, STVEPX, 0x7C00064E)                                               \
1516   /* Store Vector by External PID Indexed Last */                             \
1517   V(stvepxl, STVEPXL, 0x7C00060E)                                             \
1518   /* Store Word by External PID Indexed */                                    \
1519   V(stwepx, STWEPX, 0x7C00013E)                                               \
1520   /* Load Doubleword by External PID Indexed */                               \
1521   V(ldepx, LDEPX, 0x7C00003A)                                                 \
1522   /* Store Doubleword by External PID Indexed */                              \
1523   V(stdepx, STDEPX, 0x7C00013A)                                               \
1524   /* TLB Search and Reserve Indexed */                                        \
1525   V(tlbsrx, TLBSRX, 0x7C0006A5)                                               \
1526   /* External Control In Word Indexed */                                      \
1527   V(eciwx, ECIWX, 0x7C00026C)                                                 \
1528   /* External Control Out Word Indexed */                                     \
1529   V(ecowx, ECOWX, 0x7C00036C)                                                 \
1530   /* Data Cache Block Lock Clear */                                           \
1531   V(dcblc, DCBLC, 0x7C00030C)                                                 \
1532   /* Data Cache Block Lock Query */                                           \
1533   V(dcblq, DCBLQ, 0x7C00034D)                                                 \
1534   /* Data Cache Block Touch and Lock Set */                                   \
1535   V(dcbtls, DCBTLS, 0x7C00014C)                                               \
1536   /* Data Cache Block Touch for Store and Lock Set */                         \
1537   V(dcbtstls, DCBTSTLS, 0x7C00010C)                                           \
1538   /* Instruction Cache Block Lock Clear */                                    \
1539   V(icblc, ICBLC, 0x7C0001CC)                                                 \
1540   /* Instruction Cache Block Lock Query */                                    \
1541   V(icblq, ICBLQ, 0x7C00018D)                                                 \
1542   /* Instruction Cache Block Touch and Lock Set */                            \
1543   V(icbtls, ICBTLS, 0x7C0003CC)                                               \
1544   /* Floating Compare Ordered */                                              \
1545   V(fcmpo, FCMPO, 0xFC000040)                                                 \
1546   /* Floating Compare Unordered */                                            \
1547   V(fcmpu, FCMPU, 0xFC000000)                                                 \
1548   /* Floating Test for software Divide */                                     \
1549   V(ftdiv, FTDIV, 0xFC000100)                                                 \
1550   /* Floating Test for software Square Root */                                \
1551   V(ftsqrt, FTSQRT, 0xFC000140)                                               \
1552   /* Load Floating-Point as Integer Word Algebraic Indexed */                 \
1553   V(lfiwax, LFIWAX, 0x7C0006AE)                                               \
1554   /* Load Floating-Point as Integer Word and Zero Indexed */                  \
1555   V(lfiwzx, LFIWZX, 0x7C0006EE)                                               \
1556   /* Move To Condition Register from FPSCR */                                 \
1557   V(mcrfs, MCRFS, 0xFC000080)                                                 \
1558   /* Store Floating-Point as Integer Word Indexed */                          \
1559   V(stfiwx, STFIWX, 0x7C0007AE)                                               \
1560   /* Load Floating-Point Double Pair Indexed */                               \
1561   V(lfdpx, LFDPX, 0x7C00062E)                                                 \
1562   /* Store Floating-Point Double Pair Indexed */                              \
1563   V(stfdpx, STFDPX, 0x7C00072E)                                               \
1564   /* Floating Absolute Value */                                               \
1565   V(fabs, FABS, 0xFC000210)                                                   \
1566   /* Floating Convert From Integer Doubleword */                              \
1567   V(fcfid, FCFID, 0xFC00069C)                                                 \
1568   /* Floating Convert From Integer Doubleword Single */                       \
1569   V(fcfids, FCFIDS, 0xEC00069C)                                               \
1570   /* Floating Convert From Integer Doubleword Unsigned */                     \
1571   V(fcfidu, FCFIDU, 0xFC00079C)                                               \
1572   /* Floating Convert From Integer Doubleword Unsigned Single */              \
1573   V(fcfidus, FCFIDUS, 0xEC00079C)                                             \
1574   /* Floating Copy Sign */                                                    \
1575   V(fcpsgn, FCPSGN, 0xFC000010)                                               \
1576   /* Floating Convert To Integer Doubleword */                                \
1577   V(fctid, FCTID, 0xFC00065C)                                                 \
1578   /* Floating Convert To Integer Doubleword Unsigned */                       \
1579   V(fctidu, FCTIDU, 0xFC00075C)                                               \
1580   /* Floating Convert To Integer Doubleword Unsigned with round toward */     \
1581   /* Zero */                                                                  \
1582   V(fctiduz, FCTIDUZ, 0xFC00075E)                                             \
1583   /* Floating Convert To Integer Doubleword with round toward Zero */         \
1584   V(fctidz, FCTIDZ, 0xFC00065E)                                               \
1585   /* Floating Convert To Integer Word */                                      \
1586   V(fctiw, FCTIW, 0xFC00001C)                                                 \
1587   /* Floating Convert To Integer Word Unsigned */                             \
1588   V(fctiwu, FCTIWU, 0xFC00011C)                                               \
1589   /* Floating Convert To Integer Word Unsigned with round toward Zero */      \
1590   V(fctiwuz, FCTIWUZ, 0xFC00011E)                                             \
1591   /* Floating Convert To Integer Word with round to Zero */                   \
1592   V(fctiwz, FCTIWZ, 0xFC00001E)                                               \
1593   /* Floating Move Register */                                                \
1594   V(fmr, FMR, 0xFC000090)                                                     \
1595   /* Floating Negative Absolute Value */                                      \
1596   V(fnabs, FNABS, 0xFC000110)                                                 \
1597   /* Floating Negate */                                                       \
1598   V(fneg, FNEG, 0xFC000050)                                                   \
1599   /* Floating Round to Single-Precision */                                    \
1600   V(frsp, FRSP, 0xFC000018)                                                   \
1601   /* Move From FPSCR */                                                       \
1602   V(mffs, MFFS, 0xFC00048E)                                                   \
1603   /* Move To FPSCR Bit 0 */                                                   \
1604   V(mtfsb0, MTFSB0, 0xFC00008C)                                               \
1605   /* Move To FPSCR Bit 1 */                                                   \
1606   V(mtfsb1, MTFSB1, 0xFC00004C)                                               \
1607   /* Move To FPSCR Field Immediate */                                         \
1608   V(mtfsfi, MTFSFI, 0xFC00010C)                                               \
1609   /* Floating Round To Integer Minus */                                       \
1610   V(frim, FRIM, 0xFC0003D0)                                                   \
1611   /* Floating Round To Integer Nearest */                                     \
1612   V(frin, FRIN, 0xFC000310)                                                   \
1613   /* Floating Round To Integer Plus */                                        \
1614   V(frip, FRIP, 0xFC000390)                                                   \
1615   /* Floating Round To Integer toward Zero */                                 \
1616   V(friz, FRIZ, 0xFC000350)                                                   \
1617   /* Multiply Cross Halfword to Word Signed */                                \
1618   V(mulchw, MULCHW, 0x10000150)                                               \
1619   /* Multiply Cross Halfword to Word Unsigned */                              \
1620   V(mulchwu, MULCHWU, 0x10000110)                                             \
1621   /* Multiply High Halfword to Word Signed */                                 \
1622   V(mulhhw, MULHHW, 0x10000050)                                               \
1623   /* Multiply High Halfword to Word Unsigned */                               \
1624   V(mulhhwu, MULHHWU, 0x10000010)                                             \
1625   /* Multiply Low Halfword to Word Signed */                                  \
1626   V(mullhw, MULLHW, 0x10000350)                                               \
1627   /* Multiply Low Halfword to Word Unsigned */                                \
1628   V(mullhwu, MULLHWU, 0x10000310)                                             \
1629   /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \
1630   V(dlmzb, DLMZB, 0x7C00009C)                                                 \
1631   /* Load Quadword And Reserve Indexed */                                     \
1632   V(lqarx, LQARX, 0x7C000228)                                                 \
1633   /* Store Quadword Conditional Indexed and record CR0 */                     \
1634   V(stqcx, STQCX, 0x7C00016D)                                                 \
1635   /* Load String Word Immediate */                                            \
1636   V(lswi, LSWI, 0x7C0004AA)                                                   \
1637   /* Load String Word Indexed */                                              \
1638   V(lswx, LSWX, 0x7C00042A)                                                   \
1639   /* Store String Word Immediate */                                           \
1640   V(stswi, STSWI, 0x7C0005AA)                                                 \
1641   /* Store String Word Indexed */                                             \
1642   V(stswx, STSWX, 0x7C00052A)                                                 \
1643   /* Clear BHRB */                                                            \
1644   V(clrbhrb, CLRBHRB, 0x7C00035C)                                             \
1645   /* Enforce In-order Execution of I/O */                                     \
1646   V(eieio, EIEIO, 0x7C0006AC)                                                 \
1647   /* Load Byte and Zero Caching Inhibited Indexed */                          \
1648   V(lbzcix, LBZCIX, 0x7C0006AA)                                               \
1649   /* Load Doubleword Caching Inhibited Indexed */                             \
1650   V(ldcix, LDCIX, 0x7C0006EA)                                                 \
1651   /* Load Halfword and Zero Caching Inhibited Indexed */                      \
1652   V(lhzcix, LHZCIX, 0x7C00066A)                                               \
1653   /* Load Word and Zero Caching Inhibited Indexed */                          \
1654   V(lwzcix, LWZCIX, 0x7C00062A)                                               \
1655   /* Move From Segment Register */                                            \
1656   V(mfsr, MFSR, 0x7C0004A6)                                                   \
1657   /* Move From Segment Register Indirect */                                   \
1658   V(mfsrin, MFSRIN, 0x7C000526)                                               \
1659   /* Move To Machine State Register Doubleword */                             \
1660   V(mtmsrd, MTMSRD, 0x7C000164)                                               \
1661   /* Move To Split Little Endian */                                           \
1662   V(mtsle, MTSLE, 0x7C000126)                                                 \
1663   /* Move To Segment Register */                                              \
1664   V(mtsr, MTSR, 0x7C0001A4)                                                   \
1665   /* Move To Segment Register Indirect */                                     \
1666   V(mtsrin, MTSRIN, 0x7C0001E4)                                               \
1667   /* SLB Find Entry ESID */                                                   \
1668   V(slbfee, SLBFEE, 0x7C0007A7)                                               \
1669   /* SLB Invalidate All */                                                    \
1670   V(slbia, SLBIA, 0x7C0003E4)                                                 \
1671   /* SLB Invalidate Entry */                                                  \
1672   V(slbie, SLBIE, 0x7C000364)                                                 \
1673   /* SLB Move From Entry ESID */                                              \
1674   V(slbmfee, SLBMFEE, 0x7C000726)                                             \
1675   /* SLB Move From Entry VSID */                                              \
1676   V(slbmfev, SLBMFEV, 0x7C0006A6)                                             \
1677   /* SLB Move To Entry */                                                     \
1678   V(slbmte, SLBMTE, 0x7C000324)                                               \
1679   /* Store Byte Caching Inhibited Indexed */                                  \
1680   V(stbcix, STBCIX, 0x7C0007AA)                                               \
1681   /* Store Doubleword Caching Inhibited Indexed */                            \
1682   V(stdcix, STDCIX, 0x7C0007EA)                                               \
1683   /* Store Halfword and Zero Caching Inhibited Indexed */                     \
1684   V(sthcix, STHCIX, 0x7C00076A)                                               \
1685   /* Store Word and Zero Caching Inhibited Indexed */                         \
1686   V(stwcix, STWCIX, 0x7C00072A)                                               \
1687   /* TLB Invalidate All */                                                    \
1688   V(tlbia, TLBIA, 0x7C0002E4)                                                 \
1689   /* TLB Invalidate Entry */                                                  \
1690   V(tlbie, TLBIE, 0x7C000264)                                                 \
1691   /* TLB Invalidate Entry Local */                                            \
1692   V(tlbiel, TLBIEL, 0x7C000224)                                               \
1693   /* Message Clear Privileged */                                              \
1694   V(msgclrp, MSGCLRP, 0x7C00015C)                                             \
1695   /* Message Send Privileged */                                               \
1696   V(msgsndp, MSGSNDP, 0x7C00011C)                                             \
1697   /* Message Clear */                                                         \
1698   V(msgclr, MSGCLR, 0x7C0001DC)                                               \
1699   /* Message Send */                                                          \
1700   V(msgsnd, MSGSND, 0x7C00019C)                                               \
1701   /* Move From Machine State Register */                                      \
1702   V(mfmsr, MFMSR, 0x7C0000A6)                                                 \
1703   /* Move To Machine State Register */                                        \
1704   V(mtmsr, MTMSR, 0x7C000124)                                                 \
1705   /* TLB Synchronize */                                                       \
1706   V(tlbsync, TLBSYNC, 0x7C00046C)                                             \
1707   /* Transaction Abort */                                                     \
1708   V(tabort, TABORT, 0x7C00071D)                                               \
1709   /* Transaction Abort Doubleword Conditional */                              \
1710   V(tabortdc, TABORTDC, 0x7C00065D)                                           \
1711   /* Transaction Abort Doubleword Conditional Immediate */                    \
1712   V(tabortdci, TABORTDCI, 0x7C0006DD)                                         \
1713   /* Transaction Abort Word Conditional */                                    \
1714   V(tabortwc, TABORTWC, 0x7C00061D)                                           \
1715   /* Transaction Abort Word Conditional Immediate */                          \
1716   V(tabortwci, TABORTWCI, 0x7C00069D)                                         \
1717   /* Transaction Begin */                                                     \
1718   V(tbegin, TBEGIN, 0x7C00051D)                                               \
1719   /* Transaction Check */                                                     \
1720   V(tcheck, TCHECK, 0x7C00059C)                                               \
1721   /* Transaction End */                                                       \
1722   V(tend, TEND, 0x7C00055C)                                                   \
1723   /* Transaction Recheckpoint */                                              \
1724   V(trechkpt, TRECHKPT, 0x7C0007DD)                                           \
1725   /* Transaction Reclaim */                                                   \
1726   V(treclaim, TRECLAIM, 0x7C00075D)                                           \
1727   /* Transaction Suspend or Resume */                                         \
1728   V(tsr, TSR, 0x7C0005DC)                                                     \
1729   /* Load Vector Element Byte Indexed */                                      \
1730   V(lvebx, LVEBX, 0x7C00000E)                                                 \
1731   /* Load Vector Element Halfword Indexed */                                  \
1732   V(lvehx, LVEHX, 0x7C00004E)                                                 \
1733   /* Load Vector Element Word Indexed */                                      \
1734   V(lvewx, LVEWX, 0x7C00008E)                                                 \
1735   /* Load Vector for Shift Left */                                            \
1736   V(lvsl, LVSL, 0x7C00000C)                                                   \
1737   /* Load Vector for Shift Right */                                           \
1738   V(lvsr, LVSR, 0x7C00004C)                                                   \
1739   /* Load Vector Indexed Last */                                              \
1740   V(lvxl, LVXL, 0x7C0002CE)                                                   \
1741   /* Store Vector Element Byte Indexed */                                     \
1742   V(stvebx, STVEBX, 0x7C00010E)                                               \
1743   /* Store Vector Element Halfword Indexed */                                 \
1744   V(stvehx, STVEHX, 0x7C00014E)                                               \
1745   /* Store Vector Element Word Indexed */                                     \
1746   V(stvewx, STVEWX, 0x7C00018E)                                               \
1747   /* Store Vector Indexed Last */                                             \
1748   V(stvxl, STVXL, 0x7C0003CE)                                                 \
1749   /* Floating Merge Even Word */                                              \
1750   V(fmrgew, FMRGEW, 0xFC00078C)                                               \
1751   /* Floating Merge Odd Word */                                               \
1752   V(fmrgow, FMRGOW, 0xFC00068C)                                               \
1753   /* Wait for Interrupt */                                                    \
1754   V(wait, WAIT, 0x7C00007C)
1755 
1756 #define PPC_X_OPCODE_LIST(V)     \
1757   PPC_X_OPCODE_A_FORM_LIST(V)    \
1758   PPC_X_OPCODE_B_FORM_LIST(V)    \
1759   PPC_X_OPCODE_C_FORM_LIST(V)    \
1760   PPC_X_OPCODE_D_FORM_LIST(V)    \
1761   PPC_X_OPCODE_E_FORM_LIST(V)    \
1762   PPC_X_OPCODE_F_FORM_LIST(V)    \
1763   PPC_X_OPCODE_G_FORM_LIST(V)    \
1764   PPC_X_OPCODE_EH_L_FORM_LIST(V) \
1765   PPC_X_OPCODE_UNUSED_LIST(V)
1766 
1767 #define PPC_EVS_OPCODE_LIST(V) \
1768   /* Vector Select */          \
1769   V(evsel, EVSEL, 0x10000278)
1770 
1771 #define PPC_DS_OPCODE_LIST(V)            \
1772   /* Load Doubleword */                  \
1773   V(ld, LD, 0xE8000000)                  \
1774   /* Load Doubleword with Update */      \
1775   V(ldu, LDU, 0xE8000001)                \
1776   /* Load Word Algebraic */              \
1777   V(lwa, LWA, 0xE8000002)                \
1778   /* Store Doubleword */                 \
1779   V(std, STD, 0xF8000000)                \
1780   /* Store Doubleword with Update */     \
1781   V(stdu, STDU, 0xF8000001)              \
1782   /* Load Floating-Point Double Pair */  \
1783   V(lfdp, LFDP, 0xE4000000)              \
1784   /* Store Floating-Point Double Pair */ \
1785   V(stfdp, STFDP, 0xF4000000)            \
1786   /* Store Quadword */                   \
1787   V(stq, STQ, 0xF8000002)
1788 
1789 #define PPC_DQ_OPCODE_LIST(V) V(lsq, LSQ, 0xE0000000)
1790 
1791 #define PPC_D_OPCODE_LIST(V)                    \
1792   /* Trap Doubleword Immediate */               \
1793   V(tdi, TDI, 0x08000000)                       \
1794   /* Add Immediate */                           \
1795   V(addi, ADDI, 0x38000000)                     \
1796   /* Add Immediate Carrying */                  \
1797   V(addic, ADDIC, 0x30000000)                   \
1798   /* Add Immediate Carrying & record CR0 */     \
1799   V(addicx, ADDICx, 0x34000000)                 \
1800   /* Add Immediate Shifted */                   \
1801   V(addis, ADDIS, 0x3C000000)                   \
1802   /* AND Immediate & record CR0 */              \
1803   V(andix, ANDIx, 0x70000000)                   \
1804   /* AND Immediate Shifted & record CR0 */      \
1805   V(andisx, ANDISx, 0x74000000)                 \
1806   /* Compare Immediate */                       \
1807   V(cmpi, CMPI, 0x2C000000)                     \
1808   /* Compare Logical Immediate */               \
1809   V(cmpli, CMPLI, 0x28000000)                   \
1810   /* Load Byte and Zero */                      \
1811   V(lbz, LBZ, 0x88000000)                       \
1812   /* Load Byte and Zero with Update */          \
1813   V(lbzu, LBZU, 0x8C000000)                     \
1814   /* Load Halfword Algebraic */                 \
1815   V(lha, LHA, 0xA8000000)                       \
1816   /* Load Halfword Algebraic with Update */     \
1817   V(lhau, LHAU, 0xAC000000)                     \
1818   /* Load Halfword and Zero */                  \
1819   V(lhz, LHZ, 0xA0000000)                       \
1820   /* Load Halfword and Zero with Update */      \
1821   V(lhzu, LHZU, 0xA4000000)                     \
1822   /* Load Multiple Word */                      \
1823   V(lmw, LMW, 0xB8000000)                       \
1824   /* Load Word and Zero */                      \
1825   V(lwz, LWZ, 0x80000000)                       \
1826   /* Load Word and Zero with Update */          \
1827   V(lwzu, LWZU, 0x84000000)                     \
1828   /* Multiply Low Immediate */                  \
1829   V(mulli, MULLI, 0x1C000000)                   \
1830   /* OR Immediate */                            \
1831   V(ori, ORI, 0x60000000)                       \
1832   /* OR Immediate Shifted */                    \
1833   V(oris, ORIS, 0x64000000)                     \
1834   /* Store Byte */                              \
1835   V(stb, STB, 0x98000000)                       \
1836   /* Store Byte with Update */                  \
1837   V(stbu, STBU, 0x9C000000)                     \
1838   /* Store Halfword */                          \
1839   V(sth, STH, 0xB0000000)                       \
1840   /* Store Halfword with Update */              \
1841   V(sthu, STHU, 0xB4000000)                     \
1842   /* Store Multiple Word */                     \
1843   V(stmw, STMW, 0xBC000000)                     \
1844   /* Store Word */                              \
1845   V(stw, STW, 0x90000000)                       \
1846   /* Store Word with Update */                  \
1847   V(stwu, STWU, 0x94000000)                     \
1848   /* Subtract From Immediate Carrying */        \
1849   V(subfic, SUBFIC, 0x20000000)                 \
1850   /* Trap Word Immediate */                     \
1851   V(twi, TWI, 0x0C000000)                       \
1852   /* XOR Immediate */                           \
1853   V(xori, XORI, 0x68000000)                     \
1854   /* XOR Immediate Shifted */                   \
1855   V(xoris, XORIS, 0x6C000000)                   \
1856   /* Load Floating-Point Double */              \
1857   V(lfd, LFD, 0xC8000000)                       \
1858   /* Load Floating-Point Double with Update */  \
1859   V(lfdu, LFDU, 0xCC000000)                     \
1860   /* Load Floating-Point Single */              \
1861   V(lfs, LFS, 0xC0000000)                       \
1862   /* Load Floating-Point Single with Update */  \
1863   V(lfsu, LFSU, 0xC4000000)                     \
1864   /* Store Floating-Point Double */             \
1865   V(stfd, STFD, 0xD8000000)                     \
1866   /* Store Floating-Point Double with Update */ \
1867   V(stfdu, STFDU, 0xDC000000)                   \
1868   /* Store Floating-Point Single */             \
1869   V(stfs, STFS, 0xD0000000)                     \
1870   /* Store Floating-Point Single with Update */ \
1871   V(stfsu, STFSU, 0xD4000000)
1872 
1873 #define PPC_XFL_OPCODE_LIST(V) \
1874   /* Move To FPSCR Fields */   \
1875   V(mtfsf, MTFSF, 0xFC00058E)
1876 
1877 #define PPC_XFX_OPCODE_LIST(V)                  \
1878   /* Move From Condition Register */            \
1879   V(mfcr, MFCR, 0x7C000026)                     \
1880   /* Move From One Condition Register Field */  \
1881   V(mfocrf, MFOCRF, 0x7C100026)                 \
1882   /* Move From Special Purpose Register */      \
1883   V(mfspr, MFSPR, 0x7C0002A6)                   \
1884   /* Move To Condition Register Fields */       \
1885   V(mtcrf, MTCRF, 0x7C000120)                   \
1886   /* Move To One Condition Register Field */    \
1887   V(mtocrf, MTOCRF, 0x7C100120)                 \
1888   /* Move To Special Purpose Register */        \
1889   V(mtspr, MTSPR, 0x7C0003A6)                   \
1890   /* Debugger Notify Halt */                    \
1891   V(dnh, DNH, 0x4C00018C)                       \
1892   /* Move From Device Control Register */       \
1893   V(mfdcr, MFDCR, 0x7C000286)                   \
1894   /* Move To Device Control Register */         \
1895   V(mtdcr, MTDCR, 0x7C000386)                   \
1896   /* Move from Performance Monitor Register */  \
1897   V(mfpmr, MFPMR, 0x7C00029C)                   \
1898   /* Move To Performance Monitor Register */    \
1899   V(mtpmr, MTPMR, 0x7C00039C)                   \
1900   /* Move From Branch History Rolling Buffer */ \
1901   V(mfbhrbe, MFBHRBE, 0x7C00025C)               \
1902   /* Move From Time Base */                     \
1903   V(mftb, MFTB, 0x7C0002E6)
1904 
1905 #define PPC_MDS_OPCODE_LIST(V)                  \
1906   /* Rotate Left Doubleword then Clear Left */  \
1907   V(rldcl, RLDCL, 0x78000010)                   \
1908   /* Rotate Left Doubleword then Clear Right */ \
1909   V(rldcr, RLDCR, 0x78000012)
1910 
1911 #define PPC_A_OPCODE_LIST(V)                            \
1912   /* Integer Select */                                  \
1913   V(isel, ISEL, 0x7C00001E)                             \
1914   /* Floating Add */                                    \
1915   V(fadd, FADD, 0xFC00002A)                             \
1916   /* Floating Add Single */                             \
1917   V(fadds, FADDS, 0xEC00002A)                           \
1918   /* Floating Divide */                                 \
1919   V(fdiv, FDIV, 0xFC000024)                             \
1920   /* Floating Divide Single */                          \
1921   V(fdivs, FDIVS, 0xEC000024)                           \
1922   /* Floating Multiply-Add */                           \
1923   V(fmadd, FMADD, 0xFC00003A)                           \
1924   /* Floating Multiply-Add Single */                    \
1925   V(fmadds, FMADDS, 0xEC00003A)                         \
1926   /* Floating Multiply-Subtract */                      \
1927   V(fmsub, FMSUB, 0xFC000038)                           \
1928   /* Floating Multiply-Subtract Single */               \
1929   V(fmsubs, FMSUBS, 0xEC000038)                         \
1930   /* Floating Multiply */                               \
1931   V(fmul, FMUL, 0xFC000032)                             \
1932   /* Floating Multiply Single */                        \
1933   V(fmuls, FMULS, 0xEC000032)                           \
1934   /* Floating Negative Multiply-Add */                  \
1935   V(fnmadd, FNMADD, 0xFC00003E)                         \
1936   /* Floating Negative Multiply-Add Single */           \
1937   V(fnmadds, FNMADDS, 0xEC00003E)                       \
1938   /* Floating Negative Multiply-Subtract */             \
1939   V(fnmsub, FNMSUB, 0xFC00003C)                         \
1940   /* Floating Negative Multiply-Subtract Single */      \
1941   V(fnmsubs, FNMSUBS, 0xEC00003C)                       \
1942   /* Floating Reciprocal Estimate Single */             \
1943   V(fres, FRES, 0xEC000030)                             \
1944   /* Floating Reciprocal Square Root Estimate */        \
1945   V(frsqrte, FRSQRTE, 0xFC000034)                       \
1946   /* Floating Select */                                 \
1947   V(fsel, FSEL, 0xFC00002E)                             \
1948   /* Floating Square Root */                            \
1949   V(fsqrt, FSQRT, 0xFC00002C)                           \
1950   /* Floating Square Root Single */                     \
1951   V(fsqrts, FSQRTS, 0xEC00002C)                         \
1952   /* Floating Subtract */                               \
1953   V(fsub, FSUB, 0xFC000028)                             \
1954   /* Floating Subtract Single */                        \
1955   V(fsubs, FSUBS, 0xEC000028)                           \
1956   /* Floating Reciprocal Estimate */                    \
1957   V(fre, FRE, 0xFC000030)                               \
1958   /* Floating Reciprocal Square Root Estimate Single */ \
1959   V(frsqrtes, FRSQRTES, 0xEC000034)
1960 
1961 #define PPC_VA_OPCODE_A_FORM_LIST(V)                            \
1962   /* Vector Permute */                                          \
1963   V(vperm, VPERM, 0x1000002B)                                   \
1964   /* Vector Multiply-Low-Add Unsigned Halfword Modulo */        \
1965   V(vmladduhm, VMLADDUHM, 0x10000022)                           \
1966   /* Vector Select */                                           \
1967   V(vsel, VSEL, 0x1000002A)                                     \
1968   /* Vector Multiply-Sum Signed Halfword Modulo */              \
1969   V(vmsumshm, VMSUMSHM, 0x10000028)                             \
1970   /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
1971   V(vmhraddshs, VMHRADDSHS, 0x10000021)
1972 
1973 #define PPC_VA_OPCODE_UNUSED_LIST(V)                             \
1974   /* Vector Add Extended & write Carry Unsigned Quadword */      \
1975   V(vaddecuq, VADDECUQ, 0x1000003D)                              \
1976   /* Vector Add Extended Unsigned Quadword Modulo */             \
1977   V(vaddeuqm, VADDEUQM, 0x1000003C)                              \
1978   /* Vector Multiply-Add Single-Precision */                     \
1979   V(vmaddfp, VMADDFP, 0x1000002E)                                \
1980   /* Vector Multiply-High-Add Signed Halfword Saturate */        \
1981   V(vmhaddshs, VMHADDSHS, 0x10000020)                            \
1982   /* Vector Multiply-Sum Mixed Byte Modulo */                    \
1983   V(vmsummbm, VMSUMMBM, 0x10000025)                              \
1984   /* Vector Multiply-Sum Signed Halfword Saturate */             \
1985   V(vmsumshs, VMSUMSHS, 0x10000029)                              \
1986   /* Vector Multiply-Sum Unsigned Byte Modulo */                 \
1987   V(vmsumubm, VMSUMUBM, 0x10000024)                              \
1988   /* Vector Multiply-Sum Unsigned Halfword Modulo */             \
1989   V(vmsumuhm, VMSUMUHM, 0x10000026)                              \
1990   /* Vector Multiply-Sum Unsigned Halfword Saturate */           \
1991   V(vmsumuhs, VMSUMUHS, 0x10000027)                              \
1992   /* Vector Negative Multiply-Subtract Single-Precision */       \
1993   V(vnmsubfp, VNMSUBFP, 0x1000002F)                              \
1994   /* Vector Shift Left Double by Octet Immediate */              \
1995   V(vsldoi, VSLDOI, 0x1000002C)                                  \
1996   /* Vector Subtract Extended & write Carry Unsigned Quadword */ \
1997   V(vsubecuq, VSUBECUQ, 0x1000003F)                              \
1998   /* Vector Subtract Extended Unsigned Quadword Modulo */        \
1999   V(vsubeuqm, VSUBEUQM, 0x1000003E)                              \
2000   /* Vector Permute and Exclusive-OR */                          \
2001   V(vpermxor, VPERMXOR, 0x1000002D)
2002 
2003 #define PPC_VA_OPCODE_LIST(V)  \
2004   PPC_VA_OPCODE_A_FORM_LIST(V) \
2005   PPC_VA_OPCODE_UNUSED_LIST(V)
2006 
2007 #define PPC_XX1_OPCODE_LIST(V)                             \
2008   /* Load VSR Scalar Doubleword Indexed */                 \
2009   V(lxsdx, LXSDX, 0x7C000498)                              \
2010   /* Load VSX Scalar as Integer Word Algebraic Indexed */  \
2011   V(lxsiwax, LXSIWAX, 0x7C000098)                          \
2012   /* Load VSX Scalar as Integer Byte & Zero Indexed */     \
2013   V(lxsibzx, LXSIBZX, 0x7C00061A)                          \
2014   /* Load VSX Scalar as Integer Halfword & Zero Indexed */ \
2015   V(lxsihzx, LXSIHZX, 0x7C00065A)                          \
2016   /* Load VSX Scalar as Integer Word and Zero Indexed */   \
2017   V(lxsiwzx, LXSIWZX, 0x7C000018)                          \
2018   /* Load VSX Scalar Single-Precision Indexed */           \
2019   V(lxsspx, LXSSPX, 0x7C000418)                            \
2020   /* Load VSR Vector Doubleword*2 Indexed */               \
2021   V(lxvd, LXVD, 0x7C000698)                                \
2022   /* Load VSX Vector Indexed */                            \
2023   V(lxvx, LXVX, 0x7C000218)                                \
2024   /* Load VSR Vector Doubleword & Splat Indexed */         \
2025   V(lxvdsx, LXVDSX, 0x7C000298)                            \
2026   /* Load VSR Vector Word*4 Indexed */                     \
2027   V(lxvw, LXVW, 0x7C000618)                                \
2028   /* Move To VSR Doubleword */                             \
2029   V(mtvsrd, MTVSRD, 0x7C000166)                            \
2030   /* Move To VSR Double Doubleword */                      \
2031   V(mtvsrdd, MTVSRDD, 0x7C000366)                          \
2032   /* Move To VSR Word Algebraic */                         \
2033   V(mtvsrwa, MTVSRWA, 0x7C0001A6)                          \
2034   /* Move To VSR Word and Zero */                          \
2035   V(mtvsrwz, MTVSRWZ, 0x7C0001E6)                          \
2036   /* Move From VSR Doubleword */                           \
2037   V(mfvsrd, MFVSRD, 0x7C000066)                            \
2038   /* Move From VSR Word and Zero */                        \
2039   V(mfvsrwz, MFVSRWZ, 0x7C0000E6)                          \
2040   /* Store VSR Scalar Doubleword Indexed */                \
2041   V(stxsdx, STXSDX, 0x7C000598)                            \
2042   /* Store VSX Scalar as Integer Word Indexed */           \
2043   V(stxsiwx, STXSIWX, 0x7C000118)                          \
2044   /* Store VSX Scalar as Integer Halfword Indexed */       \
2045   V(stxsihx, STXSIHX, 0x7C00075A)                          \
2046   /* Store VSX Scalar as Integer Byte Indexed */           \
2047   V(stxsibx, STXSIBX, 0x7C00071A)                          \
2048   /* Store VSR Scalar Word Indexed */                      \
2049   V(stxsspx, STXSSPX, 0x7C000518)                          \
2050   /* Store VSR Vector Doubleword*2 Indexed */              \
2051   V(stxvd, STXVD, 0x7C000798)                              \
2052   /* Store VSX Vector Indexed */                           \
2053   V(stxvx, STXVX, 0x7C000318)                              \
2054   /* Store VSR Vector Word*4 Indexed */                    \
2055   V(stxvw, STXVW, 0x7C000718)
2056 
2057 #define PPC_B_OPCODE_LIST(V) \
2058   /* Branch Conditional */   \
2059   V(bc, BCX, 0x40000000)
2060 
2061 #define PPC_XO_OPCODE_LIST(V)                                               \
2062   /* Divide Doubleword */                                                   \
2063   V(divd, DIVD, 0x7C0003D2)                                                 \
2064   /* Divide Doubleword Extended */                                          \
2065   V(divde, DIVDE, 0x7C000352)                                               \
2066   /* Divide Doubleword Extended & record OV */                              \
2067   V(divdeo, DIVDEO, 0x7C000752)                                             \
2068   /* Divide Doubleword Extended Unsigned */                                 \
2069   V(divdeu, DIVDEU, 0x7C000312)                                             \
2070   /* Divide Doubleword Extended Unsigned & record OV */                     \
2071   V(divdeuo, DIVDEUO, 0x7C000712)                                           \
2072   /* Divide Doubleword & record OV */                                       \
2073   V(divdo, DIVDO, 0x7C0007D2)                                               \
2074   /* Divide Doubleword Unsigned */                                          \
2075   V(divdu, DIVDU, 0x7C000392)                                               \
2076   /* Divide Doubleword Unsigned & record OV */                              \
2077   V(divduo, DIVDUO, 0x7C000792)                                             \
2078   /* Multiply High Doubleword */                                            \
2079   V(mulhd, MULHD, 0x7C000092)                                               \
2080   /* Multiply High Doubleword Unsigned */                                   \
2081   V(mulhdu, MULHDU, 0x7C000012)                                             \
2082   /* Multiply Low Doubleword */                                             \
2083   V(mulld, MULLD, 0x7C0001D2)                                               \
2084   /* Multiply Low Doubleword & record OV */                                 \
2085   V(mulldo, MULLDO, 0x7C0005D2)                                             \
2086   /* Add */                                                                 \
2087   V(add, ADDX, 0x7C000214)                                                  \
2088   /* Add Carrying */                                                        \
2089   V(addc, ADDCX, 0x7C000014)                                                \
2090   /* Add Carrying & record OV */                                            \
2091   V(addco, ADDCO, 0x7C000414)                                               \
2092   /* Add Extended */                                                        \
2093   V(adde, ADDEX, 0x7C000114)                                                \
2094   /* Add Extended & record OV & record OV */                                \
2095   V(addeo, ADDEO, 0x7C000514)                                               \
2096   /* Add to Minus One Extended */                                           \
2097   V(addme, ADDME, 0x7C0001D4)                                               \
2098   /* Add to Minus One Extended & record OV */                               \
2099   V(addmeo, ADDMEO, 0x7C0005D4)                                             \
2100   /* Add & record OV */                                                     \
2101   V(addo, ADDO, 0x7C000614)                                                 \
2102   /* Add to Zero Extended */                                                \
2103   V(addze, ADDZEX, 0x7C000194)                                              \
2104   /* Add to Zero Extended & record OV */                                    \
2105   V(addzeo, ADDZEO, 0x7C000594)                                             \
2106   /* Divide Word Format */                                                  \
2107   V(divw, DIVW, 0x7C0003D6)                                                 \
2108   /* Divide Word Extended */                                                \
2109   V(divwe, DIVWE, 0x7C000356)                                               \
2110   /* Divide Word Extended & record OV */                                    \
2111   V(divweo, DIVWEO, 0x7C000756)                                             \
2112   /* Divide Word Extended Unsigned */                                       \
2113   V(divweu, DIVWEU, 0x7C000316)                                             \
2114   /* Divide Word Extended Unsigned & record OV */                           \
2115   V(divweuo, DIVWEUO, 0x7C000716)                                           \
2116   /* Divide Word & record OV */                                             \
2117   V(divwo, DIVWO, 0x7C0007D6)                                               \
2118   /* Divide Word Unsigned */                                                \
2119   V(divwu, DIVWU, 0x7C000396)                                               \
2120   /* Divide Word Unsigned & record OV */                                    \
2121   V(divwuo, DIVWUO, 0x7C000796)                                             \
2122   /* Multiply High Word */                                                  \
2123   V(mulhw, MULHWX, 0x7C000096)                                              \
2124   /* Multiply High Word Unsigned */                                         \
2125   V(mulhwu, MULHWUX, 0x7C000016)                                            \
2126   /* Multiply Low Word */                                                   \
2127   V(mullw, MULLW, 0x7C0001D6)                                               \
2128   /* Multiply Low Word & record OV */                                       \
2129   V(mullwo, MULLWO, 0x7C0005D6)                                             \
2130   /* Negate */                                                              \
2131   V(neg, NEGX, 0x7C0000D0)                                                  \
2132   /* Negate & record OV */                                                  \
2133   V(nego, NEGO, 0x7C0004D0)                                                 \
2134   /* Subtract From */                                                       \
2135   V(subf, SUBFX, 0x7C000050)                                                \
2136   /* Subtract From Carrying */                                              \
2137   V(subfc, SUBFCX, 0x7C000010)                                              \
2138   /* Subtract From Carrying & record OV */                                  \
2139   V(subfco, SUBFCO, 0x7C000410)                                             \
2140   /* Subtract From Extended */                                              \
2141   V(subfe, SUBFEX, 0x7C000110)                                              \
2142   /* Subtract From Extended & record OV */                                  \
2143   V(subfeo, SUBFEO, 0x7C000510)                                             \
2144   /* Subtract From Minus One Extended */                                    \
2145   V(subfme, SUBFME, 0x7C0001D0)                                             \
2146   /* Subtract From Minus One Extended & record OV */                        \
2147   V(subfmeo, SUBFMEO, 0x7C0005D0)                                           \
2148   /* Subtract From & record OV */                                           \
2149   V(subfo, SUBFO, 0x7C000450)                                               \
2150   /* Subtract From Zero Extended */                                         \
2151   V(subfze, SUBFZE, 0x7C000190)                                             \
2152   /* Subtract From Zero Extended & record OV */                             \
2153   V(subfzeo, SUBFZEO, 0x7C000590)                                           \
2154   /* Add and Generate Sixes */                                              \
2155   V(addg, ADDG, 0x7C000094)                                                 \
2156   /* Multiply Accumulate Cross Halfword to Word Modulo Signed */            \
2157   V(macchw, MACCHW, 0x10000158)                                             \
2158   /* Multiply Accumulate Cross Halfword to Word Saturate Signed */          \
2159   V(macchws, MACCHWS, 0x100001D8)                                           \
2160   /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */        \
2161   V(macchwsu, MACCHWSU, 0x10000198)                                         \
2162   /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */          \
2163   V(macchwu, MACCHWU, 0x10000118)                                           \
2164   /* Multiply Accumulate High Halfword to Word Modulo Signed */             \
2165   V(machhw, MACHHW, 0x10000058)                                             \
2166   /* Multiply Accumulate High Halfword to Word Saturate Signed */           \
2167   V(machhws, MACHHWS, 0x100000D8)                                           \
2168   /* Multiply Accumulate High Halfword to Word Saturate Unsigned */         \
2169   V(machhwsu, MACHHWSU, 0x10000098)                                         \
2170   /* Multiply Accumulate High Halfword to Word Modulo Unsigned */           \
2171   V(machhwu, MACHHWU, 0x10000018)                                           \
2172   /* Multiply Accumulate Low Halfword to Word Modulo Signed */              \
2173   V(maclhw, MACLHW, 0x10000358)                                             \
2174   /* Multiply Accumulate Low Halfword to Word Saturate Signed */            \
2175   V(maclhws, MACLHWS, 0x100003D8)                                           \
2176   /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */          \
2177   V(maclhwsu, MACLHWSU, 0x10000398)                                         \
2178   /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */            \
2179   V(maclhwu, MACLHWU, 0x10000318)                                           \
2180   /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */   \
2181   V(nmacchw, NMACCHW, 0x1000015C)                                           \
2182   /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
2183   V(nmacchws, NMACCHWS, 0x100001DC)                                         \
2184   /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */    \
2185   V(nmachhw, NMACHHW, 0x1000005C)                                           \
2186   /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */  \
2187   V(nmachhws, NMACHHWS, 0x100000DC)                                         \
2188   /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */     \
2189   V(nmaclhw, NMACLHW, 0x1000035C)                                           \
2190   /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */   \
2191   V(nmaclhws, NMACLHWS, 0x100003DC)
2192 
2193 #define PPC_XL_OPCODE_LIST(V)                       \
2194   /* Branch Conditional to Count Register */        \
2195   V(bcctr, BCCTRX, 0x4C000420)                      \
2196   /* Branch Conditional to Link Register */         \
2197   V(bclr, BCLRX, 0x4C000020)                        \
2198   /* Condition Register AND */                      \
2199   V(crand, CRAND, 0x4C000202)                       \
2200   /* Condition Register AND with Complement */      \
2201   V(crandc, CRANDC, 0x4C000102)                     \
2202   /* Condition Register Equivalent */               \
2203   V(creqv, CREQV, 0x4C000242)                       \
2204   /* Condition Register NAND */                     \
2205   V(crnand, CRNAND, 0x4C0001C2)                     \
2206   /* Condition Register NOR */                      \
2207   V(crnor, CRNOR, 0x4C000042)                       \
2208   /* Condition Register OR */                       \
2209   V(cror, CROR, 0x4C000382)                         \
2210   /* Condition Register OR with Complement */       \
2211   V(crorc, CRORC, 0x4C000342)                       \
2212   /* Condition Register XOR */                      \
2213   V(crxor, CRXOR, 0x4C000182)                       \
2214   /* Instruction Synchronize */                     \
2215   V(isync, ISYNC, 0x4C00012C)                       \
2216   /* Move Condition Register Field */               \
2217   V(mcrf, MCRF, 0x4C000000)                         \
2218   /* Return From Critical Interrupt */              \
2219   V(rfci, RFCI, 0x4C000066)                         \
2220   /* Return From Interrupt */                       \
2221   V(rfi, RFI, 0x4C000064)                           \
2222   /* Return From Machine Check Interrupt */         \
2223   V(rfmci, RFMCI, 0x4C00004C)                       \
2224   /* Embedded Hypervisor Privilege */               \
2225   V(ehpriv, EHPRIV, 0x7C00021C)                     \
2226   /* Return From Guest Interrupt */                 \
2227   V(rfgi, RFGI, 0x4C0000CC)                         \
2228   /* Doze */                                        \
2229   V(doze, DOZE, 0x4C000324)                         \
2230   /* Return From Interrupt Doubleword Hypervisor */ \
2231   V(hrfid, HRFID, 0x4C000224)                       \
2232   /* Nap */                                         \
2233   V(nap, NAP, 0x4C000364)                           \
2234   /* Return from Event Based Branch */              \
2235   V(rfebb, RFEBB, 0x4C000124)                       \
2236   /* Return from Interrupt Doubleword */            \
2237   V(rfid, RFID, 0x4C000024)                         \
2238   /* Rip Van Winkle */                              \
2239   V(rvwinkle, RVWINKLE, 0x4C0003E4)                 \
2240   /* Sleep */                                       \
2241   V(sleep, SLEEP, 0x4C0003A4)
2242 
2243 #define PPC_XX4_OPCODE_LIST(V) \
2244   /* VSX Select */             \
2245   V(xxsel, XXSEL, 0xF0000030)
2246 
2247 #define PPC_I_OPCODE_LIST(V) \
2248   /* Branch */               \
2249   V(b, BX, 0x48000000)
2250 
2251 #define PPC_M_OPCODE_LIST(V)                          \
2252   /* Rotate Left Word Immediate then Mask Insert */   \
2253   V(rlwimi, RLWIMIX, 0x50000000)                      \
2254   /* Rotate Left Word Immediate then AND with Mask */ \
2255   V(rlwinm, RLWINMX, 0x54000000)                      \
2256   /* Rotate Left Word then AND with Mask */           \
2257   V(rlwnm, RLWNMX, 0x5C000000)
2258 
2259 #define PPC_VX_OPCODE_A_FORM_LIST(V)     \
2260   /* Vector Splat Byte */                \
2261   V(vspltb, VSPLTB, 0x1000020C)          \
2262   /* Vector Splat Word */                \
2263   V(vspltw, VSPLTW, 0x1000028C)          \
2264   /* Vector Splat Halfword */            \
2265   V(vsplth, VSPLTH, 0x1000024C)          \
2266   /* Vector Extract Unsigned Byte */     \
2267   V(vextractub, VEXTRACTUB, 0x1000020D)  \
2268   /* Vector Extract Unsigned Halfword */ \
2269   V(vextractuh, VEXTRACTUH, 0x1000024D)  \
2270   /* Vector Extract Unsigned Word */     \
2271   V(vextractuw, VEXTRACTUW, 0x1000028D)  \
2272   /* Vector Extract Doubleword */        \
2273   V(vextractd, VEXTRACTD, 0x100002CD)    \
2274   /* Vector Insert Byte */               \
2275   V(vinsertb, VINSERTB, 0x1000030D)      \
2276   /* Vector Insert Halfword */           \
2277   V(vinserth, VINSERTH, 0x1000034D)      \
2278   /* Vector Insert Word */               \
2279   V(vinsertw, VINSERTW, 0x1000038D)      \
2280   /* Vector Insert Doubleword */         \
2281   V(vinsertd, VINSERTD, 0x100003CD)
2282 
2283 #define PPC_VX_OPCODE_B_FORM_LIST(V)                       \
2284   /* Vector Logical OR */                                  \
2285   V(vor, VOR, 0x10000484)                                  \
2286   /* Vector Logical XOR */                                 \
2287   V(vxor, VXOR, 0x100004C4)                                \
2288   /* Vector Logical NOR */                                 \
2289   V(vnor, VNOR, 0x10000504)                                \
2290   /* Vector Shift Right by Octet */                        \
2291   V(vsro, VSRO, 0x1000044C)                                \
2292   /* Vector Shift Left by Octet */                         \
2293   V(vslo, VSLO, 0x1000040C)                                \
2294   /* Vector Add Unsigned Doubleword Modulo */              \
2295   V(vaddudm, VADDUDM, 0x100000C0)                          \
2296   /* Vector Add Unsigned Word Modulo */                    \
2297   V(vadduwm, VADDUWM, 0x10000080)                          \
2298   /* Vector Add Unsigned Halfword Modulo */                \
2299   V(vadduhm, VADDUHM, 0x10000040)                          \
2300   /* Vector Add Unsigned Byte Modulo */                    \
2301   V(vaddubm, VADDUBM, 0x10000000)                          \
2302   /* Vector Add Single-Precision */                        \
2303   V(vaddfp, VADDFP, 0x1000000A)                            \
2304   /* Vector Subtract Single-Precision */                   \
2305   V(vsubfp, VSUBFP, 0x1000004A)                            \
2306   /* Vector Subtract Unsigned Doubleword Modulo */         \
2307   V(vsubudm, VSUBUDM, 0x100004C0)                          \
2308   /* Vector Subtract Unsigned Word Modulo */               \
2309   V(vsubuwm, VSUBUWM, 0x10000480)                          \
2310   /* Vector Subtract Unsigned Halfword Modulo */           \
2311   V(vsubuhm, VSUBUHM, 0x10000440)                          \
2312   /* Vector Subtract Unsigned Byte Modulo */               \
2313   V(vsububm, VSUBUBM, 0x10000400)                          \
2314   /* Vector Multiply Unsigned Word Modulo */               \
2315   V(vmuluwm, VMULUWM, 0x10000089)                          \
2316   /* Vector Pack Unsigned Halfword Unsigned Modulo */      \
2317   V(vpkuhum, VPKUHUM, 0x1000000E)                          \
2318   /* Vector Multiply Even Signed Byte */                   \
2319   V(vmulesb, VMULESB, 0x10000308)                          \
2320   /* Vector Multiply Even Unsigned Byte */                 \
2321   V(vmuleub, VMULEUB, 0x10000208)                          \
2322   /* Vector Multiply Odd Signed Byte */                    \
2323   V(vmulosb, VMULOSB, 0x10000108)                          \
2324   /* Vector Multiply Odd Unsigned Byte */                  \
2325   V(vmuloub, VMULOUB, 0x10000008)                          \
2326   /* Vector Multiply Even Unsigned Halfword */             \
2327   V(vmuleuh, VMULEUH, 0x10000248)                          \
2328   /* Vector Multiply Even Signed Halfword */               \
2329   V(vmulesh, VMULESH, 0x10000348)                          \
2330   /* Vector Multiply Odd Unsigned Halfword */              \
2331   V(vmulouh, VMULOUH, 0x10000048)                          \
2332   /* Vector Multiply Odd Signed Halfword */                \
2333   V(vmulosh, VMULOSH, 0x10000148)                          \
2334   /* Vector Multiply Even Signed Word */                   \
2335   V(vmulesw, VMULESW, 0x10000388)                          \
2336   /* Vector Multiply Even Unsigned Word */                 \
2337   V(vmuleuw, VMULEUW, 0x10000288)                          \
2338   /* Vector Multiply Odd Signed Word */                    \
2339   V(vmulosw, VMULOSW, 0x10000188)                          \
2340   /* Vector Multiply Odd Unsigned Word */                  \
2341   V(vmulouw, VMULOUW, 0x10000088)                          \
2342   /* Vector Multiply Low Doubleword */                     \
2343   V(vmulld, VMULLD, 0x100001C9)                            \
2344   /* Vector Sum across Quarter Signed Halfword Saturate */ \
2345   V(vsum4shs, VSUM4SHS, 0x10000648)                        \
2346   /* Vector Pack Unsigned Word Unsigned Saturate */        \
2347   V(vpkuwus, VPKUWUS, 0x100000CE)                          \
2348   /* Vector Sum across Half Signed Word Saturate */        \
2349   V(vsum2sws, VSUM2SWS, 0x10000688)                        \
2350   /* Vector Pack Unsigned Doubleword Unsigned Modulo */    \
2351   V(vpkudum, VPKUDUM, 0x1000044E)                          \
2352   /* Vector Maximum Signed Byte */                         \
2353   V(vmaxsb, VMAXSB, 0x10000102)                            \
2354   /* Vector Maximum Unsigned Byte */                       \
2355   V(vmaxub, VMAXUB, 0x10000002)                            \
2356   /* Vector Maximum Signed Doubleword */                   \
2357   V(vmaxsd, VMAXSD, 0x100001C2)                            \
2358   /* Vector Maximum Unsigned Doubleword */                 \
2359   V(vmaxud, VMAXUD, 0x100000C2)                            \
2360   /* Vector Maximum Signed Halfword */                     \
2361   V(vmaxsh, VMAXSH, 0x10000142)                            \
2362   /* Vector Maximum Unsigned Halfword */                   \
2363   V(vmaxuh, VMAXUH, 0x10000042)                            \
2364   /* Vector Maximum Signed Word */                         \
2365   V(vmaxsw, VMAXSW, 0x10000182)                            \
2366   /* Vector Maximum Unsigned Word */                       \
2367   V(vmaxuw, VMAXUW, 0x10000082)                            \
2368   /* Vector Minimum Signed Byte */                         \
2369   V(vminsb, VMINSB, 0x10000302)                            \
2370   /* Vector Minimum Unsigned Byte */                       \
2371   V(vminub, VMINUB, 0x10000202)                            \
2372   /* Vector Minimum Signed Doubleword */                   \
2373   V(vminsd, VMINSD, 0x100003C2)                            \
2374   /* Vector Minimum Unsigned Doubleword */                 \
2375   V(vminud, VMINUD, 0x100002C2)                            \
2376   /* Vector Minimum Signed Halfword */                     \
2377   V(vminsh, VMINSH, 0x10000342)                            \
2378   /* Vector Minimum Unsigned Halfword */                   \
2379   V(vminuh, VMINUH, 0x10000242)                            \
2380   /* Vector Minimum Signed Word */                         \
2381   V(vminsw, VMINSW, 0x10000382)                            \
2382   /* Vector Minimum Unsigned Word */                       \
2383   V(vminuw, VMINUW, 0x10000282)                            \
2384   /* Vector Shift Left Byte */                             \
2385   V(vslb, VSLB, 0x10000104)                                \
2386   /* Vector Shift Left Word */                             \
2387   V(vslw, VSLW, 0x10000184)                                \
2388   /* Vector Shift Left Halfword */                         \
2389   V(vslh, VSLH, 0x10000144)                                \
2390   /* Vector Shift Left Doubleword */                       \
2391   V(vsld, VSLD, 0x100005C4)                                \
2392   /* Vector Shift Right Byte */                            \
2393   V(vsrb, VSRB, 0x10000204)                                \
2394   /* Vector Shift Right Word */                            \
2395   V(vsrw, VSRW, 0x10000284)                                \
2396   /* Vector Shift Right Halfword */                        \
2397   V(vsrh, VSRH, 0x10000244)                                \
2398   /* Vector Shift Right Doubleword */                      \
2399   V(vsrd, VSRD, 0x100006C4)                                \
2400   /* Vector Shift Right Algebraic Byte */                  \
2401   V(vsrab, VSRAB, 0x10000304)                              \
2402   /* Vector Shift Right Algebraic Word */                  \
2403   V(vsraw, VSRAW, 0x10000384)                              \
2404   /* Vector Shift Right Algebraic Halfword */              \
2405   V(vsrah, VSRAH, 0x10000344)                              \
2406   /* Vector Shift Right Algebraic Doubleword */            \
2407   V(vsrad, VSRAD, 0x100003C4)                              \
2408   /* Vector Logical AND */                                 \
2409   V(vand, VAND, 0x10000404)                                \
2410   /* Vector Pack Signed Word Signed Saturate */            \
2411   V(vpkswss, VPKSWSS, 0x100001CE)                          \
2412   /* Vector Pack Signed Word Unsigned Saturate */          \
2413   V(vpkswus, VPKSWUS, 0x1000014E)                          \
2414   /* Vector Pack Signed Halfword Signed Saturate */        \
2415   V(vpkshss, VPKSHSS, 0x1000018E)                          \
2416   /* Vector Pack Signed Halfword Unsigned Saturate */      \
2417   V(vpkshus, VPKSHUS, 0x1000010E)                          \
2418   /* Vector Add Signed Halfword Saturate */                \
2419   V(vaddshs, VADDSHS, 0x10000340)                          \
2420   /* Vector Subtract Signed Halfword Saturate */           \
2421   V(vsubshs, VSUBSHS, 0x10000740)                          \
2422   /* Vector Add Unsigned Halfword Saturate */              \
2423   V(vadduhs, VADDUHS, 0x10000240)                          \
2424   /* Vector Subtract Unsigned Halfword Saturate */         \
2425   V(vsubuhs, VSUBUHS, 0x10000640)                          \
2426   /* Vector Add Signed Byte Saturate */                    \
2427   V(vaddsbs, VADDSBS, 0x10000300)                          \
2428   /* Vector Subtract Signed Byte Saturate */               \
2429   V(vsubsbs, VSUBSBS, 0x10000700)                          \
2430   /* Vector Add Unsigned Byte Saturate */                  \
2431   V(vaddubs, VADDUBS, 0x10000200)                          \
2432   /* Vector Subtract Unsigned Byte Saturate */             \
2433   V(vsububs, VSUBUBS, 0x10000600)                          \
2434   /* Vector Average Unsigned Byte */                       \
2435   V(vavgub, VAVGUB, 0x10000402)                            \
2436   /* Vector Average Unsigned Halfword */                   \
2437   V(vavguh, VAVGUH, 0x10000442)                            \
2438   /* Vector Logical AND with Complement */                 \
2439   V(vandc, VANDC, 0x10000444)                              \
2440   /* Vector Minimum Single-Precision */                    \
2441   V(vminfp, VMINFP, 0x1000044A)                            \
2442   /* Vector Maximum Single-Precision */                    \
2443   V(vmaxfp, VMAXFP, 0x1000040A)                            \
2444   /* Vector Bit Permute Quadword */                        \
2445   V(vbpermq, VBPERMQ, 0x1000054C)                          \
2446   /* Vector Merge High Byte */                             \
2447   V(vmrghb, VMRGHB, 0x1000000C)                            \
2448   /* Vector Merge High Halfword */                         \
2449   V(vmrghh, VMRGHH, 0x1000004C)                            \
2450   /* Vector Merge High Word */                             \
2451   V(vmrghw, VMRGHW, 0x1000008C)                            \
2452   /* Vector Merge Low Byte */                              \
2453   V(vmrglb, VMRGLB, 0x1000010C)                            \
2454   /* Vector Merge Low Halfword */                          \
2455   V(vmrglh, VMRGLH, 0x1000014C)                            \
2456   /* Vector Merge Low Word */                              \
2457   V(vmrglw, VMRGLW, 0x1000018C)
2458 
2459 #define PPC_VX_OPCODE_C_FORM_LIST(V)       \
2460   /* Vector Unpack Low Signed Word */      \
2461   V(vupklsw, VUPKLSW, 0x100006CE)          \
2462   /* Vector Unpack High Signed Word */     \
2463   V(vupkhsw, VUPKHSW, 0x1000064E)          \
2464   /* Vector Unpack Low Signed Halfword */  \
2465   V(vupklsh, VUPKLSH, 0x100002CE)          \
2466   /* Vector Unpack High Signed Halfword */ \
2467   V(vupkhsh, VUPKHSH, 0x1000024E)          \
2468   /* Vector Unpack Low Signed Byte */      \
2469   V(vupklsb, VUPKLSB, 0x1000028E)          \
2470   /* Vector Unpack High Signed Byte */     \
2471   V(vupkhsb, VUPKHSB, 0x1000020E)          \
2472   /* Vector Population Count Byte */       \
2473   V(vpopcntb, VPOPCNTB, 0x10000703)
2474 
2475 #define PPC_VX_OPCODE_D_FORM_LIST(V) \
2476   /* Vector Negate Word */           \
2477   V(vnegw, VNEGW, 0x10060602)        \
2478   /* Vector Negate Doubleword */     \
2479   V(vnegd, VNEGD, 0x10070602)
2480 
2481 #define PPC_VX_OPCODE_E_FORM_LIST(V)           \
2482   /* Vector Splat Immediate Signed Byte */     \
2483   V(vspltisb, VSPLTISB, 0x1000030C)            \
2484   /* Vector Splat Immediate Signed Halfword */ \
2485   V(vspltish, VSPLTISH, 0x1000034C)            \
2486   /* Vector Splat Immediate Signed Word */     \
2487   V(vspltisw, VSPLTISW, 0x1000038C)
2488 
2489 #define PPC_VX_OPCODE_F_FORM_LIST(V)    \
2490   /* Vector Extract Byte Mask */        \
2491   V(vextractbm, VEXTRACTBM, 0x10080642) \
2492   /* Vector Extract Halfword Mask */    \
2493   V(vextracthm, VEXTRACTHM, 0x10090642) \
2494   /* Vector Extract Word Mask */        \
2495   V(vextractwm, VEXTRACTWM, 0x100A0642) \
2496   /* Vector Extract Doubleword Mask */  \
2497   V(vextractdm, VEXTRACTDM, 0x100B0642)
2498 
2499 #define PPC_VX_OPCODE_G_FORM_LIST(V)         \
2500   /* Vector Insert Word from GPR using       \
2501 immediate-specified index */                 \
2502   V(vinsw, VINSW, 0x100000CF)                \
2503   /* Vector Insert Doubleword from GPR using \
2504 immediate-specified index */                 \
2505   V(vinsd, VINSD, 0x100001CF)
2506 
2507 #define PPC_VX_OPCODE_UNUSED_LIST(V)                                      \
2508   /* Decimal Add Modulo */                                                \
2509   V(bcdadd, BCDADD, 0xF0000400)                                           \
2510   /* Decimal Subtract Modulo */                                           \
2511   V(bcdsub, BCDSUB, 0xF0000440)                                           \
2512   /* Move From Vector Status and Control Register */                      \
2513   V(mfvscr, MFVSCR, 0x10000604)                                           \
2514   /* Move To Vector Status and Control Register */                        \
2515   V(mtvscr, MTVSCR, 0x10000644)                                           \
2516   /* Vector Add & write Carry Unsigned Quadword */                        \
2517   V(vaddcuq, VADDCUQ, 0x10000140)                                         \
2518   /* Vector Add and Write Carry-Out Unsigned Word */                      \
2519   V(vaddcuw, VADDCUW, 0x10000180)                                         \
2520   /* Vector Add Signed Word Saturate */                                   \
2521   V(vaddsws, VADDSWS, 0x10000380)                                         \
2522   /* Vector Add Unsigned Quadword Modulo */                               \
2523   V(vadduqm, VADDUQM, 0x10000100)                                         \
2524   /* Vector Add Unsigned Word Saturate */                                 \
2525   V(vadduws, VADDUWS, 0x10000280)                                         \
2526   /* Vector Average Signed Byte */                                        \
2527   V(vavgsb, VAVGSB, 0x10000502)                                           \
2528   /* Vector Average Signed Halfword */                                    \
2529   V(vavgsh, VAVGSH, 0x10000542)                                           \
2530   /* Vector Average Signed Word */                                        \
2531   V(vavgsw, VAVGSW, 0x10000582)                                           \
2532   /* Vector Average Unsigned Word */                                      \
2533   V(vavguw, VAVGUW, 0x10000482)                                           \
2534   /* Vector Convert From Signed Fixed-Point Word To Single-Precision */   \
2535   V(vcfsx, VCFSX, 0x1000034A)                                             \
2536   /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
2537   V(vcfux, VCFUX, 0x1000030A)                                             \
2538   /* Vector Count Leading Zeros Byte */                                   \
2539   V(vclzb, VCLZB, 0x10000702)                                             \
2540   /* Vector Count Leading Zeros Doubleword */                             \
2541   V(vclzd, VCLZD, 0x100007C2)                                             \
2542   /* Vector Count Leading Zeros Halfword */                               \
2543   V(vclzh, VCLZH, 0x10000742)                                             \
2544   /* Vector Count Leading Zeros Word */                                   \
2545   V(vclzw, VCLZW, 0x10000782)                                             \
2546   /* Vector Convert From Single-Precision To Signed Fixed-Point Word */   \
2547   /* Saturate */                                                          \
2548   V(vctsxs, VCTSXS, 0x100003CA)                                           \
2549   /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \
2550   /* Saturate */                                                          \
2551   V(vctuxs, VCTUXS, 0x1000038A)                                           \
2552   /* Vector Equivalence */                                                \
2553   V(veqv, VEQV, 0x10000684)                                               \
2554   /* Vector 2 Raised to the Exponent Estimate Single-Precision */         \
2555   V(vexptefp, VEXPTEFP, 0x1000018A)                                       \
2556   /* Vector Gather Bits by Byte by Doubleword */                          \
2557   V(vgbbd, VGBBD, 0x1000050C)                                             \
2558   /* Vector Log Base 2 Estimate Single-Precision */                       \
2559   V(vlogefp, VLOGEFP, 0x100001CA)                                         \
2560   /* Vector NAND */                                                       \
2561   V(vnand, VNAND, 0x10000584)                                             \
2562   /* Vector OR with Complement */                                         \
2563   V(vorc, VORC, 0x10000544)                                               \
2564   /* Vector Pack Pixel */                                                 \
2565   V(vpkpx, VPKPX, 0x1000030E)                                             \
2566   /* Vector Pack Signed Doubleword Signed Saturate */                     \
2567   V(vpksdss, VPKSDSS, 0x100005CE)                                         \
2568   /* Vector Pack Signed Doubleword Unsigned Saturate */                   \
2569   V(vpksdus, VPKSDUS, 0x1000054E)                                         \
2570   /* Vector Pack Unsigned Doubleword Unsigned Saturate */                 \
2571   V(vpkudus, VPKUDUS, 0x100004CE)                                         \
2572   /* Vector Pack Unsigned Halfword Unsigned Saturate */                   \
2573   V(vpkuhus, VPKUHUS, 0x1000008E)                                         \
2574   /* Vector Pack Unsigned Word Unsigned Modulo */                         \
2575   V(vpkuwum, VPKUWUM, 0x1000004E)                                         \
2576   /* Vector Polynomial Multiply-Sum Byte */                               \
2577   V(vpmsumb, VPMSUMB, 0x10000408)                                         \
2578   /* Vector Polynomial Multiply-Sum Doubleword */                         \
2579   V(vpmsumd, VPMSUMD, 0x100004C8)                                         \
2580   /* Vector Polynomial Multiply-Sum Halfword */                           \
2581   V(vpmsumh, VPMSUMH, 0x10000448)                                         \
2582   /* Vector Polynomial Multiply-Sum Word */                               \
2583   V(vpmsumw, VPMSUMW, 0x10000488)                                         \
2584   /* Vector Population Count Doubleword */                                \
2585   V(vpopcntd, VPOPCNTD, 0x100007C3)                                       \
2586   /* Vector Population Count Halfword */                                  \
2587   V(vpopcnth, VPOPCNTH, 0x10000743)                                       \
2588   /* Vector Population Count Word */                                      \
2589   V(vpopcntw, VPOPCNTW, 0x10000783)                                       \
2590   /* Vector Reciprocal Estimate Single-Precision */                       \
2591   V(vrefp, VREFP, 0x1000010A)                                             \
2592   /* Vector Round to Single-Precision Integer toward -Infinity */         \
2593   V(vrfim, VRFIM, 0x100002CA)                                             \
2594   /* Vector Round to Single-Precision Integer Nearest */                  \
2595   V(vrfin, VRFIN, 0x1000020A)                                             \
2596   /* Vector Round to Single-Precision Integer toward +Infinity */         \
2597   V(vrfip, VRFIP, 0x1000028A)                                             \
2598   /* Vector Round to Single-Precision Integer toward Zero */              \
2599   V(vrfiz, VRFIZ, 0x1000024A)                                             \
2600   /* Vector Rotate Left Byte */                                           \
2601   V(vrlb, VRLB, 0x10000004)                                               \
2602   /* Vector Rotate Left Doubleword */                                     \
2603   V(vrld, VRLD, 0x100000C4)                                               \
2604   /* Vector Rotate Left Halfword */                                       \
2605   V(vrlh, VRLH, 0x10000044)                                               \
2606   /* Vector Rotate Left Word */                                           \
2607   V(vrlw, VRLW, 0x10000084)                                               \
2608   /* Vector Reciprocal Square Root Estimate Single-Precision */           \
2609   V(vrsqrtefp, VRSQRTEFP, 0x1000014A)                                     \
2610   /* Vector Shift Left */                                                 \
2611   V(vsl, VSL, 0x100001C4)                                                 \
2612   /* Vector Shift Right */                                                \
2613   V(vsr, VSR, 0x100002C4)                                                 \
2614   /* Vector Subtract & write Carry Unsigned Quadword */                   \
2615   V(vsubcuq, VSUBCUQ, 0x10000540)                                         \
2616   /* Vector Subtract and Write Carry-Out Unsigned Word */                 \
2617   V(vsubcuw, VSUBCUW, 0x10000580)                                         \
2618   /* Vector Subtract Signed Word Saturate */                              \
2619   V(vsubsws, VSUBSWS, 0x10000780)                                         \
2620   /* Vector Subtract Unsigned Quadword Modulo */                          \
2621   V(vsubuqm, VSUBUQM, 0x10000500)                                         \
2622   /* Vector Subtract Unsigned Word Saturate */                            \
2623   V(vsubuws, VSUBUWS, 0x10000680)                                         \
2624   /* Vector Sum across Quarter Signed Byte Saturate */                    \
2625   V(vsum4sbs, VSUM4SBS, 0x10000708)                                       \
2626   /* Vector Sum across Quarter Unsigned Byte Saturate */                  \
2627   V(vsum4bus, VSUM4BUS, 0x10000608)                                       \
2628   /* Vector Sum across Signed Word Saturate */                            \
2629   V(vsumsws, VSUMSWS, 0x10000788)                                         \
2630   /* Vector Unpack High Pixel */                                          \
2631   V(vupkhpx, VUPKHPX, 0x1000034E)                                         \
2632   /* Vector Unpack Low Pixel */                                           \
2633   V(vupklpx, VUPKLPX, 0x100003CE)                                         \
2634   /* Vector AES Cipher */                                                 \
2635   V(vcipher, VCIPHER, 0x10000508)                                         \
2636   /* Vector AES Cipher Last */                                            \
2637   V(vcipherlast, VCIPHERLAST, 0x10000509)                                 \
2638   /* Vector AES Inverse Cipher */                                         \
2639   V(vncipher, VNCIPHER, 0x10000548)                                       \
2640   /* Vector AES Inverse Cipher Last */                                    \
2641   V(vncipherlast, VNCIPHERLAST, 0x10000549)                               \
2642   /* Vector AES S-Box */                                                  \
2643   V(vsbox, VSBOX, 0x100005C8)                                             \
2644   /* Vector SHA-512 Sigma Doubleword */                                   \
2645   V(vshasigmad, VSHASIGMAD, 0x100006C2)                                   \
2646   /* Vector SHA-256 Sigma Word */                                         \
2647   V(vshasigmaw, VSHASIGMAW, 0x10000682)                                   \
2648   /* Vector Merge Even Word */                                            \
2649   V(vmrgew, VMRGEW, 0x1000078C)                                           \
2650   /* Vector Merge Odd Word */                                             \
2651   V(vmrgow, VMRGOW, 0x1000068C)
2652 
2653 #define PPC_VX_OPCODE_LIST(V)  \
2654   PPC_VX_OPCODE_A_FORM_LIST(V) \
2655   PPC_VX_OPCODE_B_FORM_LIST(V) \
2656   PPC_VX_OPCODE_C_FORM_LIST(V) \
2657   PPC_VX_OPCODE_D_FORM_LIST(V) \
2658   PPC_VX_OPCODE_E_FORM_LIST(V) \
2659   PPC_VX_OPCODE_F_FORM_LIST(V) \
2660   PPC_VX_OPCODE_G_FORM_LIST(V) \
2661   PPC_VX_OPCODE_UNUSED_LIST(V)
2662 
2663 #define PPC_XS_OPCODE_LIST(V)                      \
2664   /* Shift Right Algebraic Doubleword Immediate */ \
2665   V(sradi, SRADIX, 0x7C000674)
2666 
2667 #define PPC_MD_OPCODE_LIST(V)                             \
2668   /* Rotate Left Doubleword Immediate then Clear */       \
2669   V(rldic, RLDIC, 0x78000008)                             \
2670   /* Rotate Left Doubleword Immediate then Clear Left */  \
2671   V(rldicl, RLDICL, 0x78000000)                           \
2672   /* Rotate Left Doubleword Immediate then Clear Right */ \
2673   V(rldicr, RLDICR, 0x78000004)                           \
2674   /* Rotate Left Doubleword Immediate then Mask Insert */ \
2675   V(rldimi, RLDIMI, 0x7800000C)
2676 
2677 #define PPC_SC_OPCODE_LIST(V) \
2678   /* System Call */           \
2679   V(sc, SC, 0x44000002)
2680 
2681 #define PPC_PREFIX_OPCODE_TYPE_00_LIST(V)        \
2682   V(pload_store_8ls, PLOAD_STORE_8LS, 0x4000000) \
2683   V(pplwa, PPLWA, 0xA4000000)                    \
2684   V(ppld, PPLD, 0xE4000000)
2685 
2686 #define PPC_PREFIX_OPCODE_TYPE_10_LIST(V) \
2687   V(pload_store_mls, PLOAD_STORE_MLS, 0x6000000)
2688 
2689 #define PPC_OPCODE_LIST(V)          \
2690   PPC_X_OPCODE_LIST(V)              \
2691   PPC_X_OPCODE_EH_S_FORM_LIST(V)    \
2692   PPC_XO_OPCODE_LIST(V)             \
2693   PPC_DS_OPCODE_LIST(V)             \
2694   PPC_DQ_OPCODE_LIST(V)             \
2695   PPC_MDS_OPCODE_LIST(V)            \
2696   PPC_MD_OPCODE_LIST(V)             \
2697   PPC_XS_OPCODE_LIST(V)             \
2698   PPC_D_OPCODE_LIST(V)              \
2699   PPC_I_OPCODE_LIST(V)              \
2700   PPC_B_OPCODE_LIST(V)              \
2701   PPC_XL_OPCODE_LIST(V)             \
2702   PPC_A_OPCODE_LIST(V)              \
2703   PPC_XFX_OPCODE_LIST(V)            \
2704   PPC_M_OPCODE_LIST(V)              \
2705   PPC_SC_OPCODE_LIST(V)             \
2706   PPC_Z23_OPCODE_LIST(V)            \
2707   PPC_Z22_OPCODE_LIST(V)            \
2708   PPC_EVX_OPCODE_LIST(V)            \
2709   PPC_XFL_OPCODE_LIST(V)            \
2710   PPC_EVS_OPCODE_LIST(V)            \
2711   PPC_VX_OPCODE_LIST(V)             \
2712   PPC_VA_OPCODE_LIST(V)             \
2713   PPC_VC_OPCODE_LIST(V)             \
2714   PPC_XX1_OPCODE_LIST(V)            \
2715   PPC_XX2_OPCODE_LIST(V)            \
2716   PPC_XX3_OPCODE_VECTOR_LIST(V)     \
2717   PPC_XX3_OPCODE_SCALAR_LIST(V)     \
2718   PPC_XX4_OPCODE_LIST(V)            \
2719   PPC_PREFIX_OPCODE_TYPE_00_LIST(V) \
2720   PPC_PREFIX_OPCODE_TYPE_10_LIST(V)
2721 
2722 enum Opcode : uint32_t {
2723 #define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
2724   opcode_name = opcode_value,
2725   PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
2726 #undef DECLARE_INSTRUCTION
2727       EXTP = 0x4000000,  // Extended code set prefixed
2728   EXT0 = 0x10000000,     // Extended code set 0
2729   EXT1 = 0x4C000000,     // Extended code set 1
2730   EXT2 = 0x7C000000,     // Extended code set 2
2731   EXT3 = 0xEC000000,     // Extended code set 3
2732   EXT4 = 0xFC000000,     // Extended code set 4
2733   EXT5 = 0x78000000,     // Extended code set 5 - 64bit only
2734   EXT6 = 0xF0000000,     // Extended code set 6
2735 };
2736 
2737 // Instruction encoding bits and masks.
2738 enum {
2739   // Instruction encoding bit
2740   B1 = 1 << 1,
2741   B2 = 1 << 2,
2742   B3 = 1 << 3,
2743   B4 = 1 << 4,
2744   B5 = 1 << 5,
2745   B7 = 1 << 7,
2746   B8 = 1 << 8,
2747   B9 = 1 << 9,
2748   B12 = 1 << 12,
2749   B18 = 1 << 18,
2750   B19 = 1 << 19,
2751   B20 = 1 << 20,
2752   B22 = 1 << 22,
2753   B23 = 1 << 23,
2754   B24 = 1 << 24,
2755   B25 = 1 << 25,
2756   B26 = 1 << 26,
2757   B27 = 1 << 27,
2758   B28 = 1 << 28,
2759   B6 = 1 << 6,
2760   B10 = 1 << 10,
2761   B11 = 1 << 11,
2762   B16 = 1 << 16,
2763   B17 = 1 << 17,
2764   B21 = 1 << 21,
2765 
2766   // Instruction bit masks
2767   kCondMask = 0x1F << 21,
2768   kOff12Mask = (1 << 12) - 1,
2769   kImm24Mask = (1 << 24) - 1,
2770   kOff16Mask = (1 << 16) - 1,
2771   kImm16Mask = (1 << 16) - 1,
2772   kImm18Mask = (1 << 18) - 1,
2773   kImm22Mask = (1 << 22) - 1,
2774   kImm26Mask = (1 << 26) - 1,
2775   kBOfieldMask = 0x1f << 21,
2776   kOpcodeMask = 0x3f << 26,
2777   kExt1OpcodeMask = 0x3ff << 1,
2778   kExt2OpcodeMask = 0x3ff << 1,
2779   kExt2OpcodeVariant2Mask = 0x1ff << 2,
2780   kExt5OpcodeMask = 0x3 << 2,
2781   kBOMask = 0x1f << 21,
2782   kBIMask = 0x1F << 16,
2783   kBDMask = 0x14 << 2,
2784   kAAMask = 0x01 << 1,
2785   kLKMask = 0x01,
2786   kRCMask = 0x01,
2787   kTOMask = 0x1f << 21
2788 };
2789 
2790 // -----------------------------------------------------------------------------
2791 // Addressing modes and instruction variants.
2792 
2793 // Overflow Exception
2794 enum OEBit {
2795   SetOE = 1 << 10,   // Set overflow exception
2796   LeaveOE = 0 << 10  // No overflow exception
2797 };
2798 
2799 // Record bit
2800 enum RCBit {   // Bit 0
2801   SetRC = 1,   // LT,GT,EQ,SO
2802   LeaveRC = 0  // None
2803 };
2804 // Exclusive Access hint bit
2805 enum EHBit {   // Bit 0
2806   SetEH = 1,   // Exclusive Access
2807   LeaveEH = 0  // Atomic Update
2808 };
2809 
2810 // Link bit
2811 enum LKBit {   // Bit 0
2812   SetLK = 1,   // Load effective address of next instruction
2813   LeaveLK = 0  // No action
2814 };
2815 
2816 // Prefixed R bit.
2817 enum PRBit { SetPR = 1, LeavePR = 0 };
2818 
2819 enum BOfield {        // Bits 25-21
2820   DCBNZF = 0 << 21,   // Decrement CTR; branch if CTR != 0 and condition false
2821   DCBEZF = 2 << 21,   // Decrement CTR; branch if CTR == 0 and condition false
2822   BF = 4 << 21,       // Branch if condition false
2823   DCBNZT = 8 << 21,   // Decrement CTR; branch if CTR != 0 and condition true
2824   DCBEZT = 10 << 21,  // Decrement CTR; branch if CTR == 0 and condition true
2825   BT = 12 << 21,      // Branch if condition true
2826   DCBNZ = 16 << 21,   // Decrement CTR; branch if CTR != 0
2827   DCBEZ = 18 << 21,   // Decrement CTR; branch if CTR == 0
2828   BA = 20 << 21       // Branch always
2829 };
2830 
2831 #if V8_OS_AIX
2832 #undef CR_LT
2833 #undef CR_GT
2834 #undef CR_EQ
2835 #undef CR_SO
2836 #endif
2837 
2838 enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
2839 
2840 #define CRWIDTH 4
2841 
2842 // These are the documented bit positions biased down by 32
2843 enum FPSCRBit {
2844   VXSOFT = 21,  // 53: Software-Defined Condition
2845   VXSQRT = 22,  // 54: Invalid Square Root
2846   VXCVI = 23    // 55: Invalid Integer Convert
2847 };
2848 
2849 // -----------------------------------------------------------------------------
2850 // Supervisor Call (svc) specific support.
2851 
2852 // Special Software Interrupt codes when used in the presence of the PPC
2853 // simulator.
2854 // svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
2855 // standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
2856 enum SoftwareInterruptCodes {
2857   // transition to C code
2858   kCallRtRedirected = 0x10,
2859   // break point
2860   kBreakpoint = 0x821008,  // bits23-0 of 0x7d821008 = twge r2, r2
2861   // stop
2862   kStopCode = 1 << 23
2863 };
2864 const uint32_t kStopCodeMask = kStopCode - 1;
2865 const uint32_t kMaxStopCode = kStopCode - 1;
2866 const int32_t kDefaultStopCode = -1;
2867 
2868 // FP rounding modes.
2869 enum FPRoundingMode {
2870   RN = 0,  // Round to Nearest.
2871   RZ = 1,  // Round towards zero.
2872   RP = 2,  // Round towards Plus Infinity.
2873   RM = 3,  // Round towards Minus Infinity.
2874 
2875   // Aliases.
2876   kRoundToNearest = RN,
2877   kRoundToZero = RZ,
2878   kRoundToPlusInf = RP,
2879   kRoundToMinusInf = RM
2880 };
2881 
2882 const uint32_t kFPRoundingModeMask = 3;
2883 
2884 enum CheckForInexactConversion {
2885   kCheckForInexactConversion,
2886   kDontCheckForInexactConversion
2887 };
2888 
2889 // -----------------------------------------------------------------------------
2890 // Specific instructions, constants, and masks.
2891 // These constants are declared in assembler-arm.cc, as they use named registers
2892 // and other constants.
2893 
2894 // add(sp, sp, 4) instruction (aka Pop())
2895 extern const Instr kPopInstruction;
2896 
2897 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
2898 // register r is not encoded.
2899 extern const Instr kPushRegPattern;
2900 
2901 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
2902 // register r is not encoded.
2903 extern const Instr kPopRegPattern;
2904 
2905 // use TWI to indicate redirection call for simulation mode
2906 const Instr rtCallRedirInstr = TWI;
2907 
2908 // -----------------------------------------------------------------------------
2909 // Instruction abstraction.
2910 
2911 // The class Instruction enables access to individual fields defined in the PPC
2912 // architecture instruction set encoding.
2913 // Note that the Assembler uses typedef int32_t Instr.
2914 //
2915 // Example: Test whether the instruction at ptr does set the condition code
2916 // bits.
2917 //
2918 // bool InstructionSetsConditionCodes(byte* ptr) {
2919 //   Instruction* instr = Instruction::At(ptr);
2920 //   int type = instr->TypeValue();
2921 //   return ((type == 0) || (type == 1)) && instr->HasS();
2922 // }
2923 //
2924 
2925 constexpr uint8_t kInstrSize = 4;
2926 constexpr uint8_t kInstrSizeLog2 = 2;
2927 constexpr uint8_t kPcLoadDelta = 8;
2928 
2929 class Instruction {
2930  public:
2931 // Helper macro to define static accessors.
2932 // We use the cast to char* trick to bypass the strict anti-aliasing rules.
2933 #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
2934   static inline return_type Name(Instr instr) {          \
2935     char* temp = reinterpret_cast<char*>(&instr);        \
2936     return reinterpret_cast<Instruction*>(temp)->Name(); \
2937   }
2938 
2939 #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
2940 
2941   // Get the raw instruction bits.
InstructionBits()2942   inline Instr InstructionBits() const {
2943     return *reinterpret_cast<const Instr*>(this);
2944   }
2945 
2946   // Set the raw instruction bits to value.
SetInstructionBits(Instr value)2947   inline void SetInstructionBits(Instr value) {
2948     *reinterpret_cast<Instr*>(this) = value;
2949   }
2950 
2951   // Read one particular bit out of the instruction bits.
Bit(int nr)2952   inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
2953 
2954   // Read a bit field's value out of the instruction bits.
Bits(int hi,int lo)2955   inline int Bits(int hi, int lo) const {
2956     return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
2957   }
2958 
2959   // Read a bit field out of the instruction bits.
BitField(int hi,int lo)2960   inline uint32_t BitField(int hi, int lo) const {
2961     return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
2962   }
2963 
2964   // Static support.
2965 
2966   // Read one particular bit out of the instruction bits.
Bit(Instr instr,int nr)2967   static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
2968 
2969   // Read the value of a bit field out of the instruction bits.
Bits(Instr instr,int hi,int lo)2970   static inline int Bits(Instr instr, int hi, int lo) {
2971     return (instr >> lo) & ((2 << (hi - lo)) - 1);
2972   }
2973 
2974   // Read a bit field out of the instruction bits.
BitField(Instr instr,int hi,int lo)2975   static inline uint32_t BitField(Instr instr, int hi, int lo) {
2976     return instr & (((2 << (hi - lo)) - 1) << lo);
2977   }
2978 
RSValue()2979   inline int RSValue() const { return Bits(25, 21); }
RTValue()2980   inline int RTValue() const { return Bits(25, 21); }
RAValue()2981   inline int RAValue() const { return Bits(20, 16); }
DECLARE_STATIC_ACCESSOR(RAValue)2982   DECLARE_STATIC_ACCESSOR(RAValue)
2983   inline int RBValue() const { return Bits(15, 11); }
DECLARE_STATIC_ACCESSOR(RBValue)2984   DECLARE_STATIC_ACCESSOR(RBValue)
2985   inline int RCValue() const { return Bits(10, 6); }
DECLARE_STATIC_ACCESSOR(RCValue)2986   DECLARE_STATIC_ACCESSOR(RCValue)
2987 
2988   inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
OpcodeField()2989   inline uint32_t OpcodeField() const {
2990     return static_cast<Opcode>(BitField(31, 26));
2991   }
PrefixOpcodeField()2992   inline uint32_t PrefixOpcodeField() const {
2993     return static_cast<Opcode>(BitField(31, 25));
2994   }
2995 
2996 #define OPCODE_CASES(name, opcode_name, opcode_value) case opcode_name:
2997 
OpcodeBase()2998   inline Opcode OpcodeBase() const {
2999     uint32_t opcode = PrefixOpcodeField();
3000     uint32_t extcode = PrefixOpcodeField();
3001     // Check for prefix.
3002     switch (opcode) {
3003       PPC_PREFIX_OPCODE_TYPE_00_LIST(OPCODE_CASES)
3004       PPC_PREFIX_OPCODE_TYPE_10_LIST(OPCODE_CASES)
3005       return static_cast<Opcode>(opcode);
3006     }
3007     opcode = OpcodeField();
3008     extcode = OpcodeField();
3009     // Check for suffix.
3010     switch (opcode) {
3011       PPC_PREFIX_OPCODE_TYPE_00_LIST(OPCODE_CASES)
3012       return static_cast<Opcode>(opcode);
3013     }
3014     switch (opcode) {
3015       PPC_D_OPCODE_LIST(OPCODE_CASES)
3016       PPC_I_OPCODE_LIST(OPCODE_CASES)
3017       PPC_B_OPCODE_LIST(OPCODE_CASES)
3018       PPC_M_OPCODE_LIST(OPCODE_CASES)
3019       return static_cast<Opcode>(opcode);
3020     }
3021     opcode = extcode | BitField(5, 0);
3022     switch (opcode) {
3023       PPC_VA_OPCODE_LIST(OPCODE_CASES)
3024       return static_cast<Opcode>(opcode);
3025     }
3026     // Some VX opcodes have integers hard coded in the middle, handle those
3027     // first.
3028     opcode = extcode | BitField(20, 16) | BitField(10, 0);
3029     switch (opcode) {
3030       PPC_VX_OPCODE_D_FORM_LIST(OPCODE_CASES)
3031       PPC_VX_OPCODE_F_FORM_LIST(OPCODE_CASES)
3032       return static_cast<Opcode>(opcode);
3033     }
3034     opcode = extcode | BitField(10, 0);
3035     switch (opcode) {
3036       PPC_VX_OPCODE_A_FORM_LIST(OPCODE_CASES)
3037       PPC_VX_OPCODE_B_FORM_LIST(OPCODE_CASES)
3038       PPC_VX_OPCODE_C_FORM_LIST(OPCODE_CASES)
3039       PPC_VX_OPCODE_E_FORM_LIST(OPCODE_CASES)
3040       PPC_VX_OPCODE_G_FORM_LIST(OPCODE_CASES)
3041       PPC_VX_OPCODE_UNUSED_LIST(OPCODE_CASES)
3042       PPC_X_OPCODE_EH_S_FORM_LIST(OPCODE_CASES)
3043       return static_cast<Opcode>(opcode);
3044     }
3045     opcode = extcode | BitField(9, 0);
3046     switch (opcode) {
3047       PPC_VC_OPCODE_LIST(OPCODE_CASES)
3048       return static_cast<Opcode>(opcode);
3049     }
3050     opcode = extcode | BitField(10, 1) | BitField(20, 20);
3051     switch (opcode) {
3052       PPC_XFX_OPCODE_LIST(OPCODE_CASES)
3053       return static_cast<Opcode>(opcode);
3054     }
3055     // Some XX2 opcodes have integers hard coded in the middle, handle those
3056     // first.
3057     opcode = extcode | BitField(20, 16) | BitField(10, 2);
3058     switch (opcode) {
3059       PPC_XX2_OPCODE_B_FORM_LIST(OPCODE_CASES)
3060       return static_cast<Opcode>(opcode);
3061     }
3062     opcode = extcode | BitField(10, 2);
3063     switch (opcode) {
3064       PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(OPCODE_CASES)
3065       PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(OPCODE_CASES)
3066       PPC_XX2_OPCODE_UNUSED_LIST(OPCODE_CASES)
3067       return static_cast<Opcode>(opcode);
3068     }
3069     opcode = extcode | BitField(10, 1);
3070     switch (opcode) {
3071       PPC_X_OPCODE_LIST(OPCODE_CASES)
3072       PPC_XL_OPCODE_LIST(OPCODE_CASES)
3073       PPC_XFL_OPCODE_LIST(OPCODE_CASES)
3074       PPC_XX1_OPCODE_LIST(OPCODE_CASES)
3075       PPC_EVX_OPCODE_LIST(OPCODE_CASES)
3076       return static_cast<Opcode>(opcode);
3077     }
3078     opcode = extcode | BitField(9, 1);
3079     switch (opcode) {
3080       PPC_XO_OPCODE_LIST(OPCODE_CASES)
3081       PPC_Z22_OPCODE_LIST(OPCODE_CASES)
3082       return static_cast<Opcode>(opcode);
3083     }
3084     opcode = extcode | BitField(10, 2);
3085     switch (opcode) {
3086       PPC_XS_OPCODE_LIST(OPCODE_CASES)
3087       return static_cast<Opcode>(opcode);
3088     }
3089     opcode = extcode | BitField(10, 3);
3090     switch (opcode) {
3091       PPC_EVS_OPCODE_LIST(OPCODE_CASES)
3092       PPC_XX3_OPCODE_VECTOR_LIST(OPCODE_CASES)
3093       PPC_XX3_OPCODE_SCALAR_LIST(OPCODE_CASES)
3094       return static_cast<Opcode>(opcode);
3095     }
3096     opcode = extcode | BitField(8, 1);
3097     switch (opcode) {
3098       PPC_Z23_OPCODE_LIST(OPCODE_CASES)
3099       return static_cast<Opcode>(opcode);
3100     }
3101     opcode = extcode | BitField(5, 1);
3102     switch (opcode) {
3103       PPC_A_OPCODE_LIST(OPCODE_CASES)
3104       return static_cast<Opcode>(opcode);
3105     }
3106     opcode = extcode | BitField(4, 1);
3107     switch (opcode) {
3108       PPC_MDS_OPCODE_LIST(OPCODE_CASES)
3109       return static_cast<Opcode>(opcode);
3110     }
3111     opcode = extcode | BitField(4, 2);
3112     switch (opcode) {
3113       PPC_MD_OPCODE_LIST(OPCODE_CASES)
3114       return static_cast<Opcode>(opcode);
3115     }
3116     opcode = extcode | BitField(5, 4);
3117     switch (opcode) {
3118       PPC_XX4_OPCODE_LIST(OPCODE_CASES)
3119       return static_cast<Opcode>(opcode);
3120     }
3121     opcode = extcode | BitField(2, 0);
3122     switch (opcode) {
3123       PPC_DQ_OPCODE_LIST(OPCODE_CASES)
3124       return static_cast<Opcode>(opcode);
3125     }
3126     opcode = extcode | BitField(1, 0);
3127     switch (opcode) {
3128       PPC_DS_OPCODE_LIST(OPCODE_CASES)
3129       return static_cast<Opcode>(opcode);
3130     }
3131     opcode = extcode | BitField(1, 1);
3132     switch (opcode) {
3133       PPC_SC_OPCODE_LIST(OPCODE_CASES)
3134       return static_cast<Opcode>(opcode);
3135     }
3136     UNIMPLEMENTED();
3137     return static_cast<Opcode>(0);
3138   }
3139 
3140 #undef OPCODE_CASES
3141 
3142   // Fields used in Software interrupt instructions
SvcValue()3143   inline SoftwareInterruptCodes SvcValue() const {
3144     return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
3145   }
3146 
3147   // Instructions are read of out a code stream. The only way to get a
3148   // reference to an instruction is to convert a pointer. There is no way
3149   // to allocate or create instances of class Instruction.
3150   // Use the At(pc) function to create references to Instruction.
At(byte * pc)3151   static Instruction* At(byte* pc) {
3152     return reinterpret_cast<Instruction*>(pc);
3153   }
3154 
3155  private:
3156   // We need to prevent the creation of instances of class Instruction.
3157   DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
3158 };
3159 
3160 // Helper functions for converting between register numbers and names.
3161 class Registers {
3162  public:
3163   // Lookup the register number for the name provided.
3164   static int Number(const char* name);
3165 
3166  private:
3167   static const char* names_[kNumRegisters];
3168 };
3169 
3170 // Helper functions for converting between FP register numbers and names.
3171 class DoubleRegisters {
3172  public:
3173   // Lookup the register number for the name provided.
3174   static int Number(const char* name);
3175 
3176  private:
3177   static const char* names_[kNumDoubleRegisters];
3178 };
3179 }  // namespace internal
3180 }  // namespace v8
3181 
3182 static constexpr int kR0DwarfCode = 0;
3183 static constexpr int kFpDwarfCode = 31;  // frame-pointer
3184 static constexpr int kLrDwarfCode = 65;  // return-address(lr)
3185 static constexpr int kSpDwarfCode = 1;   // stack-pointer (sp)
3186 
3187 #endif  // V8_CODEGEN_PPC_CONSTANTS_PPC_H_
3188