• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
7 
8 namespace v8 {
9 namespace internal {
10 namespace compiler {
11 
12 // X64-specific opcodes that specify which assembly sequence to emit.
13 // Most opcodes specify a single instruction.
14 #define TARGET_ARCH_OPCODE_LIST(V) \
15   V(X64Add)                        \
16   V(X64Add32)                      \
17   V(X64And)                        \
18   V(X64And32)                      \
19   V(X64Cmp)                        \
20   V(X64Cmp32)                      \
21   V(X64Test)                       \
22   V(X64Test32)                     \
23   V(X64Or)                         \
24   V(X64Or32)                       \
25   V(X64Xor)                        \
26   V(X64Xor32)                      \
27   V(X64Sub)                        \
28   V(X64Sub32)                      \
29   V(X64Imul)                       \
30   V(X64Imul32)                     \
31   V(X64ImulHigh32)                 \
32   V(X64UmulHigh32)                 \
33   V(X64Idiv)                       \
34   V(X64Idiv32)                     \
35   V(X64Udiv)                       \
36   V(X64Udiv32)                     \
37   V(X64Not)                        \
38   V(X64Not32)                      \
39   V(X64Neg)                        \
40   V(X64Neg32)                      \
41   V(X64Shl)                        \
42   V(X64Shl32)                      \
43   V(X64Shr)                        \
44   V(X64Shr32)                      \
45   V(X64Sar)                        \
46   V(X64Sar32)                      \
47   V(X64Ror)                        \
48   V(X64Ror32)                      \
49   V(X64Lzcnt)                      \
50   V(X64Lzcnt32)                    \
51   V(X64Tzcnt)                      \
52   V(X64Tzcnt32)                    \
53   V(X64Popcnt)                     \
54   V(X64Popcnt32)                   \
55   V(SSEFloat32Cmp)                 \
56   V(SSEFloat32Add)                 \
57   V(SSEFloat32Sub)                 \
58   V(SSEFloat32Mul)                 \
59   V(SSEFloat32Div)                 \
60   V(SSEFloat32Abs)                 \
61   V(SSEFloat32Neg)                 \
62   V(SSEFloat32Sqrt)                \
63   V(SSEFloat32Max)                 \
64   V(SSEFloat32Min)                 \
65   V(SSEFloat32ToFloat64)           \
66   V(SSEFloat32Round)               \
67   V(SSEFloat64Cmp)                 \
68   V(SSEFloat64Add)                 \
69   V(SSEFloat64Sub)                 \
70   V(SSEFloat64Mul)                 \
71   V(SSEFloat64Div)                 \
72   V(SSEFloat64Mod)                 \
73   V(SSEFloat64Abs)                 \
74   V(SSEFloat64Neg)                 \
75   V(SSEFloat64Sqrt)                \
76   V(SSEFloat64Round)               \
77   V(SSEFloat64Max)                 \
78   V(SSEFloat64Min)                 \
79   V(SSEFloat64ToFloat32)           \
80   V(SSEFloat64ToInt32)             \
81   V(SSEFloat64ToUint32)            \
82   V(SSEFloat32ToInt64)             \
83   V(SSEFloat64ToInt64)             \
84   V(SSEFloat32ToUint64)            \
85   V(SSEFloat64ToUint64)            \
86   V(SSEInt32ToFloat64)             \
87   V(SSEInt64ToFloat32)             \
88   V(SSEInt64ToFloat64)             \
89   V(SSEUint64ToFloat32)            \
90   V(SSEUint64ToFloat64)            \
91   V(SSEUint32ToFloat64)            \
92   V(SSEFloat64ExtractLowWord32)    \
93   V(SSEFloat64ExtractHighWord32)   \
94   V(SSEFloat64InsertLowWord32)     \
95   V(SSEFloat64InsertHighWord32)    \
96   V(SSEFloat64LoadLowWord32)       \
97   V(AVXFloat32Cmp)                 \
98   V(AVXFloat32Add)                 \
99   V(AVXFloat32Sub)                 \
100   V(AVXFloat32Mul)                 \
101   V(AVXFloat32Div)                 \
102   V(AVXFloat32Max)                 \
103   V(AVXFloat32Min)                 \
104   V(AVXFloat64Cmp)                 \
105   V(AVXFloat64Add)                 \
106   V(AVXFloat64Sub)                 \
107   V(AVXFloat64Mul)                 \
108   V(AVXFloat64Div)                 \
109   V(AVXFloat64Max)                 \
110   V(AVXFloat64Min)                 \
111   V(AVXFloat64Abs)                 \
112   V(AVXFloat64Neg)                 \
113   V(AVXFloat32Abs)                 \
114   V(AVXFloat32Neg)                 \
115   V(X64Movsxbl)                    \
116   V(X64Movzxbl)                    \
117   V(X64Movb)                       \
118   V(X64Movsxwl)                    \
119   V(X64Movzxwl)                    \
120   V(X64Movw)                       \
121   V(X64Movl)                       \
122   V(X64Movsxlq)                    \
123   V(X64Movq)                       \
124   V(X64Movsd)                      \
125   V(X64Movss)                      \
126   V(X64BitcastFI)                  \
127   V(X64BitcastDL)                  \
128   V(X64BitcastIF)                  \
129   V(X64BitcastLD)                  \
130   V(X64Lea32)                      \
131   V(X64Lea)                        \
132   V(X64Dec32)                      \
133   V(X64Inc32)                      \
134   V(X64Push)                       \
135   V(X64Poke)                       \
136   V(X64StackCheck)
137 
138 
139 // Addressing modes represent the "shape" of inputs to an instruction.
140 // Many instructions support multiple addressing modes. Addressing modes
141 // are encoded into the InstructionCode of the instruction and tell the
142 // code generator after register allocation which assembler method to call.
143 //
144 // We use the following local notation for addressing modes:
145 //
146 // M = memory operand
147 // R = base register
148 // N = index register * N for N in {1, 2, 4, 8}
149 // I = immediate displacement (32-bit signed integer)
150 
151 #define TARGET_ADDRESSING_MODE_LIST(V) \
152   V(MR)   /* [%r1            ] */      \
153   V(MRI)  /* [%r1         + K] */      \
154   V(MR1)  /* [%r1 + %r2*1    ] */      \
155   V(MR2)  /* [%r1 + %r2*2    ] */      \
156   V(MR4)  /* [%r1 + %r2*4    ] */      \
157   V(MR8)  /* [%r1 + %r2*8    ] */      \
158   V(MR1I) /* [%r1 + %r2*1 + K] */      \
159   V(MR2I) /* [%r1 + %r2*2 + K] */      \
160   V(MR4I) /* [%r1 + %r2*3 + K] */      \
161   V(MR8I) /* [%r1 + %r2*4 + K] */      \
162   V(M1)   /* [      %r2*1    ] */      \
163   V(M2)   /* [      %r2*2    ] */      \
164   V(M4)   /* [      %r2*4    ] */      \
165   V(M8)   /* [      %r2*8    ] */      \
166   V(M1I)  /* [      %r2*1 + K] */      \
167   V(M2I)  /* [      %r2*2 + K] */      \
168   V(M4I)  /* [      %r2*4 + K] */      \
169   V(M8I)  /* [      %r2*8 + K] */
170 
171 }  // namespace compiler
172 }  // namespace internal
173 }  // namespace v8
174 
175 #endif  // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
176