1 // Copyright 2014 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ 6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ 7 8 namespace v8 { 9 namespace internal { 10 namespace compiler { 11 12 // X64-specific opcodes that specify which assembly sequence to emit. 13 // Most opcodes specify a single instruction. 14 #define TARGET_ARCH_OPCODE_LIST(V) \ 15 V(X64Add) \ 16 V(X64Add32) \ 17 V(X64And) \ 18 V(X64And32) \ 19 V(X64Cmp) \ 20 V(X64Cmp32) \ 21 V(X64Cmp16) \ 22 V(X64Cmp8) \ 23 V(X64Test) \ 24 V(X64Test32) \ 25 V(X64Test16) \ 26 V(X64Test8) \ 27 V(X64Or) \ 28 V(X64Or32) \ 29 V(X64Xor) \ 30 V(X64Xor32) \ 31 V(X64Sub) \ 32 V(X64Sub32) \ 33 V(X64Imul) \ 34 V(X64Imul32) \ 35 V(X64ImulHigh32) \ 36 V(X64UmulHigh32) \ 37 V(X64Idiv) \ 38 V(X64Idiv32) \ 39 V(X64Udiv) \ 40 V(X64Udiv32) \ 41 V(X64Not) \ 42 V(X64Not32) \ 43 V(X64Neg) \ 44 V(X64Neg32) \ 45 V(X64Shl) \ 46 V(X64Shl32) \ 47 V(X64Shr) \ 48 V(X64Shr32) \ 49 V(X64Sar) \ 50 V(X64Sar32) \ 51 V(X64Ror) \ 52 V(X64Ror32) \ 53 V(X64Lzcnt) \ 54 V(X64Lzcnt32) \ 55 V(X64Tzcnt) \ 56 V(X64Tzcnt32) \ 57 V(X64Popcnt) \ 58 V(X64Popcnt32) \ 59 V(X64Bswap) \ 60 V(X64Bswap32) \ 61 V(LFence) \ 62 V(SSEFloat32Cmp) \ 63 V(SSEFloat32Add) \ 64 V(SSEFloat32Sub) \ 65 V(SSEFloat32Mul) \ 66 V(SSEFloat32Div) \ 67 V(SSEFloat32Abs) \ 68 V(SSEFloat32Neg) \ 69 V(SSEFloat32Sqrt) \ 70 V(SSEFloat32ToFloat64) \ 71 V(SSEFloat32ToInt32) \ 72 V(SSEFloat32ToUint32) \ 73 V(SSEFloat32Round) \ 74 V(SSEFloat64Cmp) \ 75 V(SSEFloat64Add) \ 76 V(SSEFloat64Sub) \ 77 V(SSEFloat64Mul) \ 78 V(SSEFloat64Div) \ 79 V(SSEFloat64Mod) \ 80 V(SSEFloat64Abs) \ 81 V(SSEFloat64Neg) \ 82 V(SSEFloat64Sqrt) \ 83 V(SSEFloat64Round) \ 84 V(SSEFloat32Max) \ 85 V(SSEFloat64Max) \ 86 V(SSEFloat32Min) \ 87 V(SSEFloat64Min) \ 88 V(SSEFloat64ToFloat32) \ 89 V(SSEFloat64ToInt32) \ 90 V(SSEFloat64ToUint32) \ 91 V(SSEFloat32ToInt64) \ 92 V(SSEFloat64ToInt64) \ 93 V(SSEFloat32ToUint64) \ 94 V(SSEFloat64ToUint64) \ 95 V(SSEInt32ToFloat64) \ 96 V(SSEInt32ToFloat32) \ 97 V(SSEInt64ToFloat32) \ 98 V(SSEInt64ToFloat64) \ 99 V(SSEUint64ToFloat32) \ 100 V(SSEUint64ToFloat64) \ 101 V(SSEUint32ToFloat64) \ 102 V(SSEUint32ToFloat32) \ 103 V(SSEFloat64ExtractLowWord32) \ 104 V(SSEFloat64ExtractHighWord32) \ 105 V(SSEFloat64InsertLowWord32) \ 106 V(SSEFloat64InsertHighWord32) \ 107 V(SSEFloat64LoadLowWord32) \ 108 V(SSEFloat64SilenceNaN) \ 109 V(AVXFloat32Cmp) \ 110 V(AVXFloat32Add) \ 111 V(AVXFloat32Sub) \ 112 V(AVXFloat32Mul) \ 113 V(AVXFloat32Div) \ 114 V(AVXFloat64Cmp) \ 115 V(AVXFloat64Add) \ 116 V(AVXFloat64Sub) \ 117 V(AVXFloat64Mul) \ 118 V(AVXFloat64Div) \ 119 V(AVXFloat64Abs) \ 120 V(AVXFloat64Neg) \ 121 V(AVXFloat32Abs) \ 122 V(AVXFloat32Neg) \ 123 V(X64Movsxbl) \ 124 V(X64Movzxbl) \ 125 V(X64Movsxbq) \ 126 V(X64Movzxbq) \ 127 V(X64Movb) \ 128 V(X64Movsxwl) \ 129 V(X64Movzxwl) \ 130 V(X64Movsxwq) \ 131 V(X64Movzxwq) \ 132 V(X64Movw) \ 133 V(X64Movl) \ 134 V(X64Movsxlq) \ 135 V(X64Movq) \ 136 V(X64Movsd) \ 137 V(X64Movss) \ 138 V(X64Movdqu) \ 139 V(X64BitcastFI) \ 140 V(X64BitcastDL) \ 141 V(X64BitcastIF) \ 142 V(X64BitcastLD) \ 143 V(X64Lea32) \ 144 V(X64Lea) \ 145 V(X64Dec32) \ 146 V(X64Inc32) \ 147 V(X64Push) \ 148 V(X64Poke) \ 149 V(X64Peek) \ 150 V(X64StackCheck) \ 151 V(X64F32x4Splat) \ 152 V(X64F32x4ExtractLane) \ 153 V(X64F32x4ReplaceLane) \ 154 V(X64F32x4Abs) \ 155 V(X64F32x4Neg) \ 156 V(X64F32x4RecipApprox) \ 157 V(X64F32x4RecipSqrtApprox) \ 158 V(X64F32x4Add) \ 159 V(X64F32x4AddHoriz) \ 160 V(X64F32x4Sub) \ 161 V(X64F32x4Mul) \ 162 V(X64F32x4Min) \ 163 V(X64F32x4Max) \ 164 V(X64F32x4Eq) \ 165 V(X64F32x4Ne) \ 166 V(X64F32x4Lt) \ 167 V(X64F32x4Le) \ 168 V(X64I32x4Splat) \ 169 V(X64I32x4ExtractLane) \ 170 V(X64I32x4ReplaceLane) \ 171 V(X64I32x4Neg) \ 172 V(X64I32x4Shl) \ 173 V(X64I32x4ShrS) \ 174 V(X64I32x4Add) \ 175 V(X64I32x4AddHoriz) \ 176 V(X64I32x4Sub) \ 177 V(X64I32x4Mul) \ 178 V(X64I32x4MinS) \ 179 V(X64I32x4MaxS) \ 180 V(X64I32x4Eq) \ 181 V(X64I32x4Ne) \ 182 V(X64I32x4GtS) \ 183 V(X64I32x4GeS) \ 184 V(X64I32x4ShrU) \ 185 V(X64I32x4MinU) \ 186 V(X64I32x4MaxU) \ 187 V(X64I32x4GtU) \ 188 V(X64I32x4GeU) \ 189 V(X64I16x8Splat) \ 190 V(X64I16x8ExtractLane) \ 191 V(X64I16x8ReplaceLane) \ 192 V(X64I16x8Neg) \ 193 V(X64I16x8Shl) \ 194 V(X64I16x8ShrS) \ 195 V(X64I16x8Add) \ 196 V(X64I16x8AddSaturateS) \ 197 V(X64I16x8AddHoriz) \ 198 V(X64I16x8Sub) \ 199 V(X64I16x8SubSaturateS) \ 200 V(X64I16x8Mul) \ 201 V(X64I16x8MinS) \ 202 V(X64I16x8MaxS) \ 203 V(X64I16x8Eq) \ 204 V(X64I16x8Ne) \ 205 V(X64I16x8GtS) \ 206 V(X64I16x8GeS) \ 207 V(X64I16x8ShrU) \ 208 V(X64I16x8AddSaturateU) \ 209 V(X64I16x8SubSaturateU) \ 210 V(X64I16x8MinU) \ 211 V(X64I16x8MaxU) \ 212 V(X64I16x8GtU) \ 213 V(X64I16x8GeU) \ 214 V(X64I8x16Splat) \ 215 V(X64I8x16ExtractLane) \ 216 V(X64I8x16ReplaceLane) \ 217 V(X64I8x16Neg) \ 218 V(X64I8x16Add) \ 219 V(X64I8x16AddSaturateS) \ 220 V(X64I8x16Sub) \ 221 V(X64I8x16SubSaturateS) \ 222 V(X64I8x16MinS) \ 223 V(X64I8x16MaxS) \ 224 V(X64I8x16Eq) \ 225 V(X64I8x16Ne) \ 226 V(X64I8x16GtS) \ 227 V(X64I8x16GeS) \ 228 V(X64I8x16AddSaturateU) \ 229 V(X64I8x16SubSaturateU) \ 230 V(X64I8x16MinU) \ 231 V(X64I8x16MaxU) \ 232 V(X64I8x16GtU) \ 233 V(X64I8x16GeU) \ 234 V(X64S128And) \ 235 V(X64S128Or) \ 236 V(X64S128Xor) \ 237 V(X64S128Not) \ 238 V(X64S128Select) \ 239 V(X64S128Zero) \ 240 V(X64Word64AtomicLoadUint8) \ 241 V(X64Word64AtomicLoadUint16) \ 242 V(X64Word64AtomicLoadUint32) \ 243 V(X64Word64AtomicLoadUint64) \ 244 V(X64Word64AtomicStoreWord8) \ 245 V(X64Word64AtomicStoreWord16) \ 246 V(X64Word64AtomicStoreWord32) \ 247 V(X64Word64AtomicStoreWord64) \ 248 V(X64Word64AtomicAddUint8) \ 249 V(X64Word64AtomicAddUint16) \ 250 V(X64Word64AtomicAddUint32) \ 251 V(X64Word64AtomicAddUint64) \ 252 V(X64Word64AtomicSubUint8) \ 253 V(X64Word64AtomicSubUint16) \ 254 V(X64Word64AtomicSubUint32) \ 255 V(X64Word64AtomicSubUint64) \ 256 V(X64Word64AtomicAndUint8) \ 257 V(X64Word64AtomicAndUint16) \ 258 V(X64Word64AtomicAndUint32) \ 259 V(X64Word64AtomicAndUint64) \ 260 V(X64Word64AtomicOrUint8) \ 261 V(X64Word64AtomicOrUint16) \ 262 V(X64Word64AtomicOrUint32) \ 263 V(X64Word64AtomicOrUint64) \ 264 V(X64Word64AtomicXorUint8) \ 265 V(X64Word64AtomicXorUint16) \ 266 V(X64Word64AtomicXorUint32) \ 267 V(X64Word64AtomicXorUint64) \ 268 V(X64Word64AtomicExchangeUint8) \ 269 V(X64Word64AtomicExchangeUint16) \ 270 V(X64Word64AtomicExchangeUint32) \ 271 V(X64Word64AtomicExchangeUint64) \ 272 V(X64Word64AtomicCompareExchangeUint8) \ 273 V(X64Word64AtomicCompareExchangeUint16) \ 274 V(X64Word64AtomicCompareExchangeUint32) \ 275 V(X64Word64AtomicCompareExchangeUint64) 276 277 // Addressing modes represent the "shape" of inputs to an instruction. 278 // Many instructions support multiple addressing modes. Addressing modes 279 // are encoded into the InstructionCode of the instruction and tell the 280 // code generator after register allocation which assembler method to call. 281 // 282 // We use the following local notation for addressing modes: 283 // 284 // M = memory operand 285 // R = base register 286 // N = index register * N for N in {1, 2, 4, 8} 287 // I = immediate displacement (32-bit signed integer) 288 289 #define TARGET_ADDRESSING_MODE_LIST(V) \ 290 V(MR) /* [%r1 ] */ \ 291 V(MRI) /* [%r1 + K] */ \ 292 V(MR1) /* [%r1 + %r2*1 ] */ \ 293 V(MR2) /* [%r1 + %r2*2 ] */ \ 294 V(MR4) /* [%r1 + %r2*4 ] */ \ 295 V(MR8) /* [%r1 + %r2*8 ] */ \ 296 V(MR1I) /* [%r1 + %r2*1 + K] */ \ 297 V(MR2I) /* [%r1 + %r2*2 + K] */ \ 298 V(MR4I) /* [%r1 + %r2*3 + K] */ \ 299 V(MR8I) /* [%r1 + %r2*4 + K] */ \ 300 V(M1) /* [ %r2*1 ] */ \ 301 V(M2) /* [ %r2*2 ] */ \ 302 V(M4) /* [ %r2*4 ] */ \ 303 V(M8) /* [ %r2*8 ] */ \ 304 V(M1I) /* [ %r2*1 + K] */ \ 305 V(M2I) /* [ %r2*2 + K] */ \ 306 V(M4I) /* [ %r2*4 + K] */ \ 307 V(M8I) /* [ %r2*8 + K] */ \ 308 V(Root) /* [%root + K] */ 309 310 } // namespace compiler 311 } // namespace internal 312 } // namespace v8 313 314 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ 315