1 // Copyright 2014 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ 6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ 7 8 namespace v8 { 9 namespace internal { 10 namespace compiler { 11 12 // X64-specific opcodes that specify which assembly sequence to emit. 13 // Most opcodes specify a single instruction. 14 #define TARGET_ARCH_OPCODE_LIST(V) \ 15 V(X64Add) \ 16 V(X64Add32) \ 17 V(X64And) \ 18 V(X64And32) \ 19 V(X64Cmp) \ 20 V(X64Cmp32) \ 21 V(X64Cmp16) \ 22 V(X64Cmp8) \ 23 V(X64Test) \ 24 V(X64Test32) \ 25 V(X64Test16) \ 26 V(X64Test8) \ 27 V(X64Or) \ 28 V(X64Or32) \ 29 V(X64Xor) \ 30 V(X64Xor32) \ 31 V(X64Sub) \ 32 V(X64Sub32) \ 33 V(X64Imul) \ 34 V(X64Imul32) \ 35 V(X64ImulHigh32) \ 36 V(X64UmulHigh32) \ 37 V(X64Idiv) \ 38 V(X64Idiv32) \ 39 V(X64Udiv) \ 40 V(X64Udiv32) \ 41 V(X64Not) \ 42 V(X64Not32) \ 43 V(X64Neg) \ 44 V(X64Neg32) \ 45 V(X64Shl) \ 46 V(X64Shl32) \ 47 V(X64Shr) \ 48 V(X64Shr32) \ 49 V(X64Sar) \ 50 V(X64Sar32) \ 51 V(X64Ror) \ 52 V(X64Ror32) \ 53 V(X64Lzcnt) \ 54 V(X64Lzcnt32) \ 55 V(X64Tzcnt) \ 56 V(X64Tzcnt32) \ 57 V(X64Popcnt) \ 58 V(X64Popcnt32) \ 59 V(SSEFloat32Cmp) \ 60 V(SSEFloat32Add) \ 61 V(SSEFloat32Sub) \ 62 V(SSEFloat32Mul) \ 63 V(SSEFloat32Div) \ 64 V(SSEFloat32Abs) \ 65 V(SSEFloat32Neg) \ 66 V(SSEFloat32Sqrt) \ 67 V(SSEFloat32Max) \ 68 V(SSEFloat32Min) \ 69 V(SSEFloat32ToFloat64) \ 70 V(SSEFloat32ToInt32) \ 71 V(SSEFloat32ToUint32) \ 72 V(SSEFloat32Round) \ 73 V(SSEFloat64Cmp) \ 74 V(SSEFloat64Add) \ 75 V(SSEFloat64Sub) \ 76 V(SSEFloat64Mul) \ 77 V(SSEFloat64Div) \ 78 V(SSEFloat64Mod) \ 79 V(SSEFloat64Abs) \ 80 V(SSEFloat64Neg) \ 81 V(SSEFloat64Sqrt) \ 82 V(SSEFloat64Round) \ 83 V(SSEFloat64Max) \ 84 V(SSEFloat64Min) \ 85 V(SSEFloat64ToFloat32) \ 86 V(SSEFloat64ToInt32) \ 87 V(SSEFloat64ToUint32) \ 88 V(SSEFloat32ToInt64) \ 89 V(SSEFloat64ToInt64) \ 90 V(SSEFloat32ToUint64) \ 91 V(SSEFloat64ToUint64) \ 92 V(SSEInt32ToFloat64) \ 93 V(SSEInt32ToFloat32) \ 94 V(SSEInt64ToFloat32) \ 95 V(SSEInt64ToFloat64) \ 96 V(SSEUint64ToFloat32) \ 97 V(SSEUint64ToFloat64) \ 98 V(SSEUint32ToFloat64) \ 99 V(SSEUint32ToFloat32) \ 100 V(SSEFloat64ExtractLowWord32) \ 101 V(SSEFloat64ExtractHighWord32) \ 102 V(SSEFloat64InsertLowWord32) \ 103 V(SSEFloat64InsertHighWord32) \ 104 V(SSEFloat64LoadLowWord32) \ 105 V(SSEFloat64SilenceNaN) \ 106 V(AVXFloat32Cmp) \ 107 V(AVXFloat32Add) \ 108 V(AVXFloat32Sub) \ 109 V(AVXFloat32Mul) \ 110 V(AVXFloat32Div) \ 111 V(AVXFloat32Max) \ 112 V(AVXFloat32Min) \ 113 V(AVXFloat64Cmp) \ 114 V(AVXFloat64Add) \ 115 V(AVXFloat64Sub) \ 116 V(AVXFloat64Mul) \ 117 V(AVXFloat64Div) \ 118 V(AVXFloat64Max) \ 119 V(AVXFloat64Min) \ 120 V(AVXFloat64Abs) \ 121 V(AVXFloat64Neg) \ 122 V(AVXFloat32Abs) \ 123 V(AVXFloat32Neg) \ 124 V(X64Movsxbl) \ 125 V(X64Movzxbl) \ 126 V(X64Movb) \ 127 V(X64Movsxwl) \ 128 V(X64Movzxwl) \ 129 V(X64Movw) \ 130 V(X64Movl) \ 131 V(X64Movsxlq) \ 132 V(X64Movq) \ 133 V(X64Movsd) \ 134 V(X64Movss) \ 135 V(X64BitcastFI) \ 136 V(X64BitcastDL) \ 137 V(X64BitcastIF) \ 138 V(X64BitcastLD) \ 139 V(X64Lea32) \ 140 V(X64Lea) \ 141 V(X64Dec32) \ 142 V(X64Inc32) \ 143 V(X64Push) \ 144 V(X64Poke) \ 145 V(X64StackCheck) \ 146 V(X64Xchgb) \ 147 V(X64Xchgw) \ 148 V(X64Xchgl) 149 150 // Addressing modes represent the "shape" of inputs to an instruction. 151 // Many instructions support multiple addressing modes. Addressing modes 152 // are encoded into the InstructionCode of the instruction and tell the 153 // code generator after register allocation which assembler method to call. 154 // 155 // We use the following local notation for addressing modes: 156 // 157 // M = memory operand 158 // R = base register 159 // N = index register * N for N in {1, 2, 4, 8} 160 // I = immediate displacement (32-bit signed integer) 161 162 #define TARGET_ADDRESSING_MODE_LIST(V) \ 163 V(MR) /* [%r1 ] */ \ 164 V(MRI) /* [%r1 + K] */ \ 165 V(MR1) /* [%r1 + %r2*1 ] */ \ 166 V(MR2) /* [%r1 + %r2*2 ] */ \ 167 V(MR4) /* [%r1 + %r2*4 ] */ \ 168 V(MR8) /* [%r1 + %r2*8 ] */ \ 169 V(MR1I) /* [%r1 + %r2*1 + K] */ \ 170 V(MR2I) /* [%r1 + %r2*2 + K] */ \ 171 V(MR4I) /* [%r1 + %r2*3 + K] */ \ 172 V(MR8I) /* [%r1 + %r2*4 + K] */ \ 173 V(M1) /* [ %r2*1 ] */ \ 174 V(M2) /* [ %r2*2 ] */ \ 175 V(M4) /* [ %r2*4 ] */ \ 176 V(M8) /* [ %r2*8 ] */ \ 177 V(M1I) /* [ %r2*1 + K] */ \ 178 V(M2I) /* [ %r2*2 + K] */ \ 179 V(M4I) /* [ %r2*4 + K] */ \ 180 V(M8I) /* [ %r2*8 + K] */ 181 182 } // namespace compiler 183 } // namespace internal 184 } // namespace v8 185 186 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ 187