1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the X86MCCodeEmitter class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "MCTargetDesc/X86MCTargetDesc.h"
15 #include "MCTargetDesc/X86BaseInfo.h"
16 #include "MCTargetDesc/X86FixupKinds.h"
17 #include "llvm/MC/MCCodeEmitter.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstrInfo.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/MC/MCSymbol.h"
25 #include "llvm/Support/raw_ostream.h"
26
27 using namespace llvm;
28
29 #define DEBUG_TYPE "mccodeemitter"
30
31 namespace {
32 class X86MCCodeEmitter : public MCCodeEmitter {
33 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
34 void operator=(const X86MCCodeEmitter &) = delete;
35 const MCInstrInfo &MCII;
36 MCContext &Ctx;
37 public:
X86MCCodeEmitter(const MCInstrInfo & mcii,MCContext & ctx)38 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
39 : MCII(mcii), Ctx(ctx) {
40 }
41
~X86MCCodeEmitter()42 ~X86MCCodeEmitter() override {}
43
is64BitMode(const MCSubtargetInfo & STI) const44 bool is64BitMode(const MCSubtargetInfo &STI) const {
45 return STI.getFeatureBits()[X86::Mode64Bit];
46 }
47
is32BitMode(const MCSubtargetInfo & STI) const48 bool is32BitMode(const MCSubtargetInfo &STI) const {
49 return STI.getFeatureBits()[X86::Mode32Bit];
50 }
51
is16BitMode(const MCSubtargetInfo & STI) const52 bool is16BitMode(const MCSubtargetInfo &STI) const {
53 return STI.getFeatureBits()[X86::Mode16Bit];
54 }
55
56 /// Is16BitMemOperand - Return true if the specified instruction has
57 /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
Is16BitMemOperand(const MCInst & MI,unsigned Op,const MCSubtargetInfo & STI) const58 bool Is16BitMemOperand(const MCInst &MI, unsigned Op,
59 const MCSubtargetInfo &STI) const {
60 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
61 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
62 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
63
64 if (is16BitMode(STI) && BaseReg.getReg() == 0 &&
65 Disp.isImm() && Disp.getImm() < 0x10000)
66 return true;
67 if ((BaseReg.getReg() != 0 &&
68 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
69 (IndexReg.getReg() != 0 &&
70 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
71 return true;
72 return false;
73 }
74
GetX86RegNum(const MCOperand & MO) const75 unsigned GetX86RegNum(const MCOperand &MO) const {
76 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
77 }
78
getX86RegEncoding(const MCInst & MI,unsigned OpNum) const79 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const {
80 return Ctx.getRegisterInfo()->getEncodingValue(
81 MI.getOperand(OpNum).getReg());
82 }
83
isX86_64ExtendedReg(const MCInst & MI,unsigned OpNum) const84 bool isX86_64ExtendedReg(const MCInst &MI, unsigned OpNum) const {
85 return (getX86RegEncoding(MI, OpNum) >> 3) & 1;
86 }
87
EmitByte(uint8_t C,unsigned & CurByte,raw_ostream & OS) const88 void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const {
89 OS << (char)C;
90 ++CurByte;
91 }
92
EmitConstant(uint64_t Val,unsigned Size,unsigned & CurByte,raw_ostream & OS) const93 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
94 raw_ostream &OS) const {
95 // Output the constant in little endian byte order.
96 for (unsigned i = 0; i != Size; ++i) {
97 EmitByte(Val & 255, CurByte, OS);
98 Val >>= 8;
99 }
100 }
101
102 void EmitImmediate(const MCOperand &Disp, SMLoc Loc,
103 unsigned ImmSize, MCFixupKind FixupKind,
104 unsigned &CurByte, raw_ostream &OS,
105 SmallVectorImpl<MCFixup> &Fixups,
106 int ImmOffset = 0) const;
107
ModRMByte(unsigned Mod,unsigned RegOpcode,unsigned RM)108 inline static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode,
109 unsigned RM) {
110 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
111 return RM | (RegOpcode << 3) | (Mod << 6);
112 }
113
EmitRegModRMByte(const MCOperand & ModRMReg,unsigned RegOpcodeFld,unsigned & CurByte,raw_ostream & OS) const114 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
115 unsigned &CurByte, raw_ostream &OS) const {
116 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
117 }
118
EmitSIBByte(unsigned SS,unsigned Index,unsigned Base,unsigned & CurByte,raw_ostream & OS) const119 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
120 unsigned &CurByte, raw_ostream &OS) const {
121 // SIB byte is in the same format as the ModRMByte.
122 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
123 }
124
125 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
126 uint64_t TSFlags, bool Rex, unsigned &CurByte,
127 raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
128 const MCSubtargetInfo &STI) const;
129
130 void encodeInstruction(const MCInst &MI, raw_ostream &OS,
131 SmallVectorImpl<MCFixup> &Fixups,
132 const MCSubtargetInfo &STI) const override;
133
134 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
135 const MCInst &MI, const MCInstrDesc &Desc,
136 raw_ostream &OS) const;
137
138 void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
139 const MCInst &MI, raw_ostream &OS) const;
140
141 bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
142 const MCInst &MI, const MCInstrDesc &Desc,
143 const MCSubtargetInfo &STI, raw_ostream &OS) const;
144
145 uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
146 int MemOperand, const MCInstrDesc &Desc) const;
147 };
148
149 } // end anonymous namespace
150
createX86MCCodeEmitter(const MCInstrInfo & MCII,const MCRegisterInfo & MRI,MCContext & Ctx)151 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
152 const MCRegisterInfo &MRI,
153 MCContext &Ctx) {
154 return new X86MCCodeEmitter(MCII, Ctx);
155 }
156
157 /// isDisp8 - Return true if this signed displacement fits in a 8-bit
158 /// sign-extended field.
isDisp8(int Value)159 static bool isDisp8(int Value) {
160 return Value == (int8_t)Value;
161 }
162
163 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit
164 /// compressed dispacement field.
isCDisp8(uint64_t TSFlags,int Value,int & CValue)165 static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
166 assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) &&
167 "Compressed 8-bit displacement is only valid for EVEX inst.");
168
169 unsigned CD8_Scale =
170 (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift;
171 if (CD8_Scale == 0) {
172 CValue = Value;
173 return isDisp8(Value);
174 }
175
176 unsigned Mask = CD8_Scale - 1;
177 assert((CD8_Scale & Mask) == 0 && "Invalid memory object size.");
178 if (Value & Mask) // Unaligned offset
179 return false;
180 Value /= (int)CD8_Scale;
181 bool Ret = (Value == (int8_t)Value);
182
183 if (Ret)
184 CValue = Value;
185 return Ret;
186 }
187
188 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
189 /// in an instruction with the specified TSFlags.
getImmFixupKind(uint64_t TSFlags)190 static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
191 unsigned Size = X86II::getSizeOfImm(TSFlags);
192 bool isPCRel = X86II::isImmPCRel(TSFlags);
193
194 if (X86II::isImmSigned(TSFlags)) {
195 switch (Size) {
196 default: llvm_unreachable("Unsupported signed fixup size!");
197 case 4: return MCFixupKind(X86::reloc_signed_4byte);
198 }
199 }
200 return MCFixup::getKindForSize(Size, isPCRel);
201 }
202
203 /// Is32BitMemOperand - Return true if the specified instruction has
204 /// a 32-bit memory operand. Op specifies the operand # of the memoperand.
Is32BitMemOperand(const MCInst & MI,unsigned Op)205 static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
206 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
207 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
208
209 if ((BaseReg.getReg() != 0 &&
210 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
211 (IndexReg.getReg() != 0 &&
212 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
213 return true;
214 if (BaseReg.getReg() == X86::EIP) {
215 assert(IndexReg.getReg() == 0 && "Invalid eip-based address.");
216 return true;
217 }
218 return false;
219 }
220
221 /// Is64BitMemOperand - Return true if the specified instruction has
222 /// a 64-bit memory operand. Op specifies the operand # of the memoperand.
223 #ifndef NDEBUG
Is64BitMemOperand(const MCInst & MI,unsigned Op)224 static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
225 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
226 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
227
228 if ((BaseReg.getReg() != 0 &&
229 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
230 (IndexReg.getReg() != 0 &&
231 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
232 return true;
233 return false;
234 }
235 #endif
236
237 /// StartsWithGlobalOffsetTable - Check if this expression starts with
238 /// _GLOBAL_OFFSET_TABLE_ and if it is of the form
239 /// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
240 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
241 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
242 /// of a binary expression.
243 enum GlobalOffsetTableExprKind {
244 GOT_None,
245 GOT_Normal,
246 GOT_SymDiff
247 };
248 static GlobalOffsetTableExprKind
StartsWithGlobalOffsetTable(const MCExpr * Expr)249 StartsWithGlobalOffsetTable(const MCExpr *Expr) {
250 const MCExpr *RHS = nullptr;
251 if (Expr->getKind() == MCExpr::Binary) {
252 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
253 Expr = BE->getLHS();
254 RHS = BE->getRHS();
255 }
256
257 if (Expr->getKind() != MCExpr::SymbolRef)
258 return GOT_None;
259
260 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
261 const MCSymbol &S = Ref->getSymbol();
262 if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
263 return GOT_None;
264 if (RHS && RHS->getKind() == MCExpr::SymbolRef)
265 return GOT_SymDiff;
266 return GOT_Normal;
267 }
268
HasSecRelSymbolRef(const MCExpr * Expr)269 static bool HasSecRelSymbolRef(const MCExpr *Expr) {
270 if (Expr->getKind() == MCExpr::SymbolRef) {
271 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
272 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
273 }
274 return false;
275 }
276
277 void X86MCCodeEmitter::
EmitImmediate(const MCOperand & DispOp,SMLoc Loc,unsigned Size,MCFixupKind FixupKind,unsigned & CurByte,raw_ostream & OS,SmallVectorImpl<MCFixup> & Fixups,int ImmOffset) const278 EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
279 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
280 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
281 const MCExpr *Expr = nullptr;
282 if (DispOp.isImm()) {
283 // If this is a simple integer displacement that doesn't require a
284 // relocation, emit it now.
285 if (FixupKind != FK_PCRel_1 &&
286 FixupKind != FK_PCRel_2 &&
287 FixupKind != FK_PCRel_4) {
288 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
289 return;
290 }
291 Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
292 } else {
293 Expr = DispOp.getExpr();
294 }
295
296 // If we have an immoffset, add it to the expression.
297 if ((FixupKind == FK_Data_4 ||
298 FixupKind == FK_Data_8 ||
299 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
300 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr);
301 if (Kind != GOT_None) {
302 assert(ImmOffset == 0);
303
304 if (Size == 8) {
305 FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
306 } else {
307 assert(Size == 4);
308 FixupKind = MCFixupKind(X86::reloc_global_offset_table);
309 }
310
311 if (Kind == GOT_Normal)
312 ImmOffset = CurByte;
313 } else if (Expr->getKind() == MCExpr::SymbolRef) {
314 if (HasSecRelSymbolRef(Expr)) {
315 FixupKind = MCFixupKind(FK_SecRel_4);
316 }
317 } else if (Expr->getKind() == MCExpr::Binary) {
318 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr);
319 if (HasSecRelSymbolRef(Bin->getLHS())
320 || HasSecRelSymbolRef(Bin->getRHS())) {
321 FixupKind = MCFixupKind(FK_SecRel_4);
322 }
323 }
324 }
325
326 // If the fixup is pc-relative, we need to bias the value to be relative to
327 // the start of the field, not the end of the field.
328 if (FixupKind == FK_PCRel_4 ||
329 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
330 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
331 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
332 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex))
333 ImmOffset -= 4;
334 if (FixupKind == FK_PCRel_2)
335 ImmOffset -= 2;
336 if (FixupKind == FK_PCRel_1)
337 ImmOffset -= 1;
338
339 if (ImmOffset)
340 Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
341 Ctx);
342
343 // Emit a symbolic constant as a fixup and 4 zeros.
344 Fixups.push_back(MCFixup::create(CurByte, Expr, FixupKind, Loc));
345 EmitConstant(0, Size, CurByte, OS);
346 }
347
emitMemModRMByte(const MCInst & MI,unsigned Op,unsigned RegOpcodeField,uint64_t TSFlags,bool Rex,unsigned & CurByte,raw_ostream & OS,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const348 void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
349 unsigned RegOpcodeField,
350 uint64_t TSFlags, bool Rex,
351 unsigned &CurByte, raw_ostream &OS,
352 SmallVectorImpl<MCFixup> &Fixups,
353 const MCSubtargetInfo &STI) const {
354 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
355 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
356 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
357 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
358 unsigned BaseReg = Base.getReg();
359 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
360
361 // Handle %rip relative addressing.
362 if (BaseReg == X86::RIP ||
363 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
364 assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode");
365 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
366 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
367
368 unsigned Opcode = MI.getOpcode();
369 // movq loads are handled with a special relocation form which allows the
370 // linker to eliminate some loads for GOT references which end up in the
371 // same linkage unit.
372 unsigned FixupKind = [=]() {
373 switch (Opcode) {
374 default:
375 return X86::reloc_riprel_4byte;
376 case X86::MOV64rm:
377 assert(Rex);
378 return X86::reloc_riprel_4byte_movq_load;
379 case X86::CALL64m:
380 case X86::JMP64m:
381 case X86::TEST64rm:
382 case X86::ADC64rm:
383 case X86::ADD64rm:
384 case X86::AND64rm:
385 case X86::CMP64rm:
386 case X86::OR64rm:
387 case X86::SBB64rm:
388 case X86::SUB64rm:
389 case X86::XOR64rm:
390 return Rex ? X86::reloc_riprel_4byte_relax_rex
391 : X86::reloc_riprel_4byte_relax;
392 }
393 }();
394
395 // rip-relative addressing is actually relative to the *next* instruction.
396 // Since an immediate can follow the mod/rm byte for an instruction, this
397 // means that we need to bias the immediate field of the instruction with
398 // the size of the immediate field. If we have this case, add it into the
399 // expression to emit.
400 int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
401
402 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
403 CurByte, OS, Fixups, -ImmSize);
404 return;
405 }
406
407 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
408
409 // 16-bit addressing forms of the ModR/M byte have a different encoding for
410 // the R/M field and are far more limited in which registers can be used.
411 if (Is16BitMemOperand(MI, Op, STI)) {
412 if (BaseReg) {
413 // For 32-bit addressing, the row and column values in Table 2-2 are
414 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
415 // some special cases. And GetX86RegNum reflects that numbering.
416 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
417 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
418 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
419 // while values 0-3 indicate the allowed combinations (base+index) of
420 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
421 //
422 // R16Table[] is a lookup from the normal RegNo, to the row values from
423 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
424 static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
425 unsigned RMfield = R16Table[BaseRegNo];
426
427 assert(RMfield && "invalid 16-bit base register");
428
429 if (IndexReg.getReg()) {
430 unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)];
431
432 assert(IndexReg16 && "invalid 16-bit index register");
433 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
434 assert(((IndexReg16 ^ RMfield) & 2) &&
435 "invalid 16-bit base/index register combination");
436 assert(Scale.getImm() == 1 &&
437 "invalid scale for 16-bit memory reference");
438
439 // Allow base/index to appear in either order (although GAS doesn't).
440 if (IndexReg16 & 2)
441 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
442 else
443 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
444 }
445
446 if (Disp.isImm() && isDisp8(Disp.getImm())) {
447 if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
448 // There is no displacement; just the register.
449 EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS);
450 return;
451 }
452 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
453 EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS);
454 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
455 return;
456 }
457 // This is the [REG]+disp16 case.
458 EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS);
459 } else {
460 // There is no BaseReg; this is the plain [disp16] case.
461 EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS);
462 }
463
464 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
465 EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups);
466 return;
467 }
468
469 // Determine whether a SIB byte is needed.
470 // If no BaseReg, issue a RIP relative instruction only if the MCE can
471 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
472 // 2-7) and absolute references.
473
474 if (// The SIB byte must be used if there is an index register.
475 IndexReg.getReg() == 0 &&
476 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
477 // encode to an R/M value of 4, which indicates that a SIB byte is
478 // present.
479 BaseRegNo != N86::ESP &&
480 // If there is no base register and we're in 64-bit mode, we need a SIB
481 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
482 (!is64BitMode(STI) || BaseReg != 0)) {
483
484 if (BaseReg == 0) { // [disp32] in X86-32 mode
485 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
486 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups);
487 return;
488 }
489
490 // If the base is not EBP/ESP and there is no displacement, use simple
491 // indirect register encoding, this handles addresses like [EAX]. The
492 // encoding for [EBP] with no displacement means [disp32] so we handle it
493 // by emitting a displacement of 0 below.
494 if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
495 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
496 return;
497 }
498
499 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
500 if (Disp.isImm()) {
501 if (!HasEVEX && isDisp8(Disp.getImm())) {
502 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
503 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
504 return;
505 }
506 // Try EVEX compressed 8-bit displacement first; if failed, fall back to
507 // 32-bit displacement.
508 int CDisp8 = 0;
509 if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
510 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
511 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups,
512 CDisp8 - Disp.getImm());
513 return;
514 }
515 }
516
517 // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
518 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
519 unsigned Opcode = MI.getOpcode();
520 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
521 : X86::reloc_signed_4byte;
522 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS,
523 Fixups);
524 return;
525 }
526
527 // We need a SIB byte, so start by outputting the ModR/M byte first
528 assert(IndexReg.getReg() != X86::ESP &&
529 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
530
531 bool ForceDisp32 = false;
532 bool ForceDisp8 = false;
533 int CDisp8 = 0;
534 int ImmOffset = 0;
535 if (BaseReg == 0) {
536 // If there is no base register, we emit the special case SIB byte with
537 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
538 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
539 ForceDisp32 = true;
540 } else if (!Disp.isImm()) {
541 // Emit the normal disp32 encoding.
542 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
543 ForceDisp32 = true;
544 } else if (Disp.getImm() == 0 &&
545 // Base reg can't be anything that ends up with '5' as the base
546 // reg, it is the magic [*] nomenclature that indicates no base.
547 BaseRegNo != N86::EBP) {
548 // Emit no displacement ModR/M byte
549 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
550 } else if (!HasEVEX && isDisp8(Disp.getImm())) {
551 // Emit the disp8 encoding.
552 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
553 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
554 } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
555 // Emit the disp8 encoding.
556 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
557 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
558 ImmOffset = CDisp8 - Disp.getImm();
559 } else {
560 // Emit the normal disp32 encoding.
561 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
562 }
563
564 // Calculate what the SS field value should be...
565 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
566 unsigned SS = SSTable[Scale.getImm()];
567
568 if (BaseReg == 0) {
569 // Handle the SIB byte for the case where there is no base, see Intel
570 // Manual 2A, table 2-7. The displacement has already been output.
571 unsigned IndexRegNo;
572 if (IndexReg.getReg())
573 IndexRegNo = GetX86RegNum(IndexReg);
574 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
575 IndexRegNo = 4;
576 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS);
577 } else {
578 unsigned IndexRegNo;
579 if (IndexReg.getReg())
580 IndexRegNo = GetX86RegNum(IndexReg);
581 else
582 IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
583 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
584 }
585
586 // Do we need to output a displacement?
587 if (ForceDisp8)
588 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset);
589 else if (ForceDisp32 || Disp.getImm() != 0)
590 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
591 CurByte, OS, Fixups);
592 }
593
594 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
595 /// called VEX.
EmitVEXOpcodePrefix(uint64_t TSFlags,unsigned & CurByte,int MemOperand,const MCInst & MI,const MCInstrDesc & Desc,raw_ostream & OS) const596 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
597 int MemOperand, const MCInst &MI,
598 const MCInstrDesc &Desc,
599 raw_ostream &OS) const {
600 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
601
602 uint64_t Encoding = TSFlags & X86II::EncodingMask;
603 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
604 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
605 bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
606 bool HasMemOp4 = TSFlags & X86II::MemOp4;
607 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
608
609 // VEX_R: opcode externsion equivalent to REX.R in
610 // 1's complement (inverted) form
611 //
612 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
613 // 0: Same as REX_R=1 (64 bit mode only)
614 //
615 uint8_t VEX_R = 0x1;
616 uint8_t EVEX_R2 = 0x1;
617
618 // VEX_X: equivalent to REX.X, only used when a
619 // register is used for index in SIB Byte.
620 //
621 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
622 // 0: Same as REX.X=1 (64-bit mode only)
623 uint8_t VEX_X = 0x1;
624
625 // VEX_B:
626 //
627 // 1: Same as REX_B=0 (ignored in 32-bit mode)
628 // 0: Same as REX_B=1 (64 bit mode only)
629 //
630 uint8_t VEX_B = 0x1;
631
632 // VEX_W: opcode specific (use like REX.W, or used for
633 // opcode extension, or ignored, depending on the opcode byte)
634 uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0;
635
636 // VEX_5M (VEX m-mmmmm field):
637 //
638 // 0b00000: Reserved for future use
639 // 0b00001: implied 0F leading opcode
640 // 0b00010: implied 0F 38 leading opcode bytes
641 // 0b00011: implied 0F 3A leading opcode bytes
642 // 0b00100-0b11111: Reserved for future use
643 // 0b01000: XOP map select - 08h instructions with imm byte
644 // 0b01001: XOP map select - 09h instructions with no imm byte
645 // 0b01010: XOP map select - 0Ah instructions with imm dword
646 uint8_t VEX_5M;
647 switch (TSFlags & X86II::OpMapMask) {
648 default: llvm_unreachable("Invalid prefix!");
649 case X86II::TB: VEX_5M = 0x1; break; // 0F
650 case X86II::T8: VEX_5M = 0x2; break; // 0F 38
651 case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
652 case X86II::XOP8: VEX_5M = 0x8; break;
653 case X86II::XOP9: VEX_5M = 0x9; break;
654 case X86II::XOPA: VEX_5M = 0xA; break;
655 }
656
657 // VEX_4V (VEX vvvv field): a register specifier
658 // (in 1's complement form) or 1111 if unused.
659 uint8_t VEX_4V = 0xf;
660 uint8_t EVEX_V2 = 0x1;
661
662 // EVEX_L2/VEX_L (Vector Length):
663 //
664 // L2 L
665 // 0 0: scalar or 128-bit vector
666 // 0 1: 256-bit vector
667 // 1 0: 512-bit vector
668 //
669 uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0;
670 uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0;
671
672 // VEX_PP: opcode extension providing equivalent
673 // functionality of a SIMD prefix
674 //
675 // 0b00: None
676 // 0b01: 66
677 // 0b10: F3
678 // 0b11: F2
679 //
680 uint8_t VEX_PP;
681 switch (TSFlags & X86II::OpPrefixMask) {
682 default: llvm_unreachable("Invalid op prefix!");
683 case X86II::PS: VEX_PP = 0x0; break; // none
684 case X86II::PD: VEX_PP = 0x1; break; // 66
685 case X86II::XS: VEX_PP = 0x2; break; // F3
686 case X86II::XD: VEX_PP = 0x3; break; // F2
687 }
688
689 // EVEX_U
690 uint8_t EVEX_U = 1; // Always '1' so far
691
692 // EVEX_z
693 uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0;
694
695 // EVEX_b
696 uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0;
697
698 // EVEX_rc
699 uint8_t EVEX_rc = 0;
700
701 // EVEX_aaa
702 uint8_t EVEX_aaa = 0;
703
704 bool EncodeRC = false;
705
706 // Classify VEX_B, VEX_4V, VEX_R, VEX_X
707 unsigned NumOps = Desc.getNumOperands();
708 unsigned CurOp = X86II::getOperandBias(Desc);
709
710 switch (TSFlags & X86II::FormMask) {
711 default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!");
712 case X86II::RawFrm:
713 break;
714 case X86II::MRMDestMem: {
715 // MRMDestMem instructions forms:
716 // MemAddr, src1(ModR/M)
717 // MemAddr, src1(VEX_4V), src2(ModR/M)
718 // MemAddr, src1(ModR/M), imm8
719 //
720 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
721 VEX_B = ~(BaseRegEnc >> 3) & 1;
722 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
723 VEX_X = ~(IndexRegEnc >> 3) & 1;
724 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
725 EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
726
727 CurOp += X86::AddrNumOperands;
728
729 if (HasEVEX_K)
730 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
731
732 if (HasVEX_4V) {
733 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
734 VEX_4V = ~VRegEnc & 0xf;
735 EVEX_V2 = ~(VRegEnc >> 4) & 1;
736 }
737
738 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
739 VEX_R = ~(RegEnc >> 3) & 1;
740 EVEX_R2 = ~(RegEnc >> 4) & 1;
741 break;
742 }
743 case X86II::MRMSrcMem: {
744 // MRMSrcMem instructions forms:
745 // src1(ModR/M), MemAddr
746 // src1(ModR/M), src2(VEX_4V), MemAddr
747 // src1(ModR/M), MemAddr, imm8
748 // src1(ModR/M), MemAddr, src2(VEX_I8IMM)
749 //
750 // FMA4:
751 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
752 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
753 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
754 VEX_R = ~(RegEnc >> 3) & 1;
755 EVEX_R2 = ~(RegEnc >> 4) & 1;
756
757 if (HasEVEX_K)
758 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
759
760 if (HasVEX_4V) {
761 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
762 VEX_4V = ~VRegEnc & 0xf;
763 EVEX_V2 = ~(VRegEnc >> 4) & 1;
764 }
765
766 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
767 VEX_B = ~(BaseRegEnc >> 3) & 1;
768 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
769 VEX_X = ~(IndexRegEnc >> 3) & 1;
770 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
771 EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
772
773 if (HasVEX_4VOp3)
774 // Instruction format for 4VOp3:
775 // src1(ModR/M), MemAddr, src3(VEX_4V)
776 // CurOp points to start of the MemoryOperand,
777 // it skips TIED_TO operands if exist, then increments past src1.
778 // CurOp + X86::AddrNumOperands will point to src3.
779 VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
780 break;
781 }
782 case X86II::MRM0m: case X86II::MRM1m:
783 case X86II::MRM2m: case X86II::MRM3m:
784 case X86II::MRM4m: case X86II::MRM5m:
785 case X86II::MRM6m: case X86II::MRM7m: {
786 // MRM[0-9]m instructions forms:
787 // MemAddr
788 // src1(VEX_4V), MemAddr
789 if (HasVEX_4V) {
790 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
791 VEX_4V = ~VRegEnc & 0xf;
792 EVEX_V2 = ~(VRegEnc >> 4) & 1;
793 }
794
795 if (HasEVEX_K)
796 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
797
798 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
799 VEX_B = ~(BaseRegEnc >> 3) & 1;
800 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
801 VEX_X = ~(IndexRegEnc >> 3) & 1;
802 break;
803 }
804 case X86II::MRMSrcReg: {
805 // MRMSrcReg instructions forms:
806 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
807 // dst(ModR/M), src1(ModR/M)
808 // dst(ModR/M), src1(ModR/M), imm8
809 //
810 // FMA4:
811 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
812 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
813 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
814 VEX_R = ~(RegEnc >> 3) & 1;
815 EVEX_R2 = ~(RegEnc >> 4) & 1;
816
817 if (HasEVEX_K)
818 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
819
820 if (HasVEX_4V) {
821 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
822 VEX_4V = ~VRegEnc & 0xf;
823 EVEX_V2 = ~(VRegEnc >> 4) & 1;
824 }
825
826 if (HasMemOp4) // Skip second register source (encoded in I8IMM)
827 CurOp++;
828
829 RegEnc = getX86RegEncoding(MI, CurOp++);
830 VEX_B = ~(RegEnc >> 3) & 1;
831 VEX_X = ~(RegEnc >> 4) & 1;
832 if (HasVEX_4VOp3)
833 VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
834 if (EVEX_b) {
835 if (HasEVEX_RC) {
836 unsigned RcOperand = NumOps-1;
837 assert(RcOperand >= CurOp);
838 EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
839 }
840 EncodeRC = true;
841 }
842 break;
843 }
844 case X86II::MRMDestReg: {
845 // MRMDestReg instructions forms:
846 // dst(ModR/M), src(ModR/M)
847 // dst(ModR/M), src(ModR/M), imm8
848 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
849 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
850 VEX_B = ~(RegEnc >> 3) & 1;
851 VEX_X = ~(RegEnc >> 4) & 1;
852
853 if (HasEVEX_K)
854 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
855
856 if (HasVEX_4V) {
857 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
858 VEX_4V = ~VRegEnc & 0xf;
859 EVEX_V2 = ~(VRegEnc >> 4) & 1;
860 }
861
862 RegEnc = getX86RegEncoding(MI, CurOp++);
863 VEX_R = ~(RegEnc >> 3) & 1;
864 EVEX_R2 = ~(RegEnc >> 4) & 1;
865 if (EVEX_b)
866 EncodeRC = true;
867 break;
868 }
869 case X86II::MRM0r: case X86II::MRM1r:
870 case X86II::MRM2r: case X86II::MRM3r:
871 case X86II::MRM4r: case X86II::MRM5r:
872 case X86II::MRM6r: case X86II::MRM7r: {
873 // MRM0r-MRM7r instructions forms:
874 // dst(VEX_4V), src(ModR/M), imm8
875 if (HasVEX_4V) {
876 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
877 VEX_4V = ~VRegEnc & 0xf;
878 EVEX_V2 = ~(VRegEnc >> 4) & 1;
879 }
880 if (HasEVEX_K)
881 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
882
883 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
884 VEX_B = ~(RegEnc >> 3) & 1;
885 VEX_X = ~(RegEnc >> 4) & 1;
886 break;
887 }
888 }
889
890 if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
891 // VEX opcode prefix can have 2 or 3 bytes
892 //
893 // 3 bytes:
894 // +-----+ +--------------+ +-------------------+
895 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
896 // +-----+ +--------------+ +-------------------+
897 // 2 bytes:
898 // +-----+ +-------------------+
899 // | C5h | | R | vvvv | L | pp |
900 // +-----+ +-------------------+
901 //
902 // XOP uses a similar prefix:
903 // +-----+ +--------------+ +-------------------+
904 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
905 // +-----+ +--------------+ +-------------------+
906 uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
907
908 // Can we use the 2 byte VEX prefix?
909 if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
910 EmitByte(0xC5, CurByte, OS);
911 EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
912 return;
913 }
914
915 // 3 byte VEX prefix
916 EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS);
917 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
918 EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
919 } else {
920 assert(Encoding == X86II::EVEX && "unknown encoding!");
921 // EVEX opcode prefix can have 4 bytes
922 //
923 // +-----+ +--------------+ +-------------------+ +------------------------+
924 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
925 // +-----+ +--------------+ +-------------------+ +------------------------+
926 assert((VEX_5M & 0x3) == VEX_5M
927 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
928
929 EmitByte(0x62, CurByte, OS);
930 EmitByte((VEX_R << 7) |
931 (VEX_X << 6) |
932 (VEX_B << 5) |
933 (EVEX_R2 << 4) |
934 VEX_5M, CurByte, OS);
935 EmitByte((VEX_W << 7) |
936 (VEX_4V << 3) |
937 (EVEX_U << 2) |
938 VEX_PP, CurByte, OS);
939 if (EncodeRC)
940 EmitByte((EVEX_z << 7) |
941 (EVEX_rc << 5) |
942 (EVEX_b << 4) |
943 (EVEX_V2 << 3) |
944 EVEX_aaa, CurByte, OS);
945 else
946 EmitByte((EVEX_z << 7) |
947 (EVEX_L2 << 6) |
948 (VEX_L << 5) |
949 (EVEX_b << 4) |
950 (EVEX_V2 << 3) |
951 EVEX_aaa, CurByte, OS);
952 }
953 }
954
955 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
956 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
957 /// size, and 3) use of X86-64 extended registers.
DetermineREXPrefix(const MCInst & MI,uint64_t TSFlags,int MemOperand,const MCInstrDesc & Desc) const958 uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
959 int MemOperand,
960 const MCInstrDesc &Desc) const {
961 uint8_t REX = 0;
962 bool UsesHighByteReg = false;
963
964 if (TSFlags & X86II::REX_W)
965 REX |= 1 << 3; // set REX.W
966
967 if (MI.getNumOperands() == 0) return REX;
968
969 unsigned NumOps = MI.getNumOperands();
970 unsigned CurOp = X86II::getOperandBias(Desc);
971
972 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
973 for (unsigned i = CurOp; i != NumOps; ++i) {
974 const MCOperand &MO = MI.getOperand(i);
975 if (!MO.isReg()) continue;
976 unsigned Reg = MO.getReg();
977 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
978 UsesHighByteReg = true;
979 if (!X86II::isX86_64NonExtLowByteReg(Reg)) continue;
980 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
981 // that returns non-zero.
982 REX |= 0x40; // REX fixed encoding prefix
983 break;
984 }
985
986 switch (TSFlags & X86II::FormMask) {
987 case X86II::AddRegFrm:
988 REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
989 break;
990 case X86II::MRMSrcReg:
991 REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
992 REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
993 break;
994 case X86II::MRMSrcMem: {
995 REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
996 REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
997 REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
998 CurOp += X86::AddrNumOperands;
999 break;
1000 }
1001 case X86II::MRMDestReg:
1002 REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
1003 REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
1004 break;
1005 case X86II::MRMDestMem:
1006 REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1007 REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1008 CurOp += X86::AddrNumOperands;
1009 REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
1010 break;
1011 case X86II::MRMXm:
1012 case X86II::MRM0m: case X86II::MRM1m:
1013 case X86II::MRM2m: case X86II::MRM3m:
1014 case X86II::MRM4m: case X86II::MRM5m:
1015 case X86II::MRM6m: case X86II::MRM7m:
1016 REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1017 REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1018 break;
1019 case X86II::MRMXr:
1020 case X86II::MRM0r: case X86II::MRM1r:
1021 case X86II::MRM2r: case X86II::MRM3r:
1022 case X86II::MRM4r: case X86II::MRM5r:
1023 case X86II::MRM6r: case X86II::MRM7r:
1024 REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
1025 break;
1026 }
1027 if (REX && UsesHighByteReg)
1028 report_fatal_error("Cannot encode high byte register in REX-prefixed instruction");
1029
1030 return REX;
1031 }
1032
1033 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
EmitSegmentOverridePrefix(unsigned & CurByte,unsigned SegOperand,const MCInst & MI,raw_ostream & OS) const1034 void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
1035 unsigned SegOperand,
1036 const MCInst &MI,
1037 raw_ostream &OS) const {
1038 // Check for explicit segment override on memory operand.
1039 switch (MI.getOperand(SegOperand).getReg()) {
1040 default: llvm_unreachable("Unknown segment register!");
1041 case 0: break;
1042 case X86::CS: EmitByte(0x2E, CurByte, OS); break;
1043 case X86::SS: EmitByte(0x36, CurByte, OS); break;
1044 case X86::DS: EmitByte(0x3E, CurByte, OS); break;
1045 case X86::ES: EmitByte(0x26, CurByte, OS); break;
1046 case X86::FS: EmitByte(0x64, CurByte, OS); break;
1047 case X86::GS: EmitByte(0x65, CurByte, OS); break;
1048 }
1049 }
1050
1051 /// Emit all instruction prefixes prior to the opcode.
1052 ///
1053 /// MemOperand is the operand # of the start of a memory operand if present. If
1054 /// Not present, it is -1.
1055 ///
1056 /// Returns true if a REX prefix was used.
emitOpcodePrefix(uint64_t TSFlags,unsigned & CurByte,int MemOperand,const MCInst & MI,const MCInstrDesc & Desc,const MCSubtargetInfo & STI,raw_ostream & OS) const1057 bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
1058 int MemOperand, const MCInst &MI,
1059 const MCInstrDesc &Desc,
1060 const MCSubtargetInfo &STI,
1061 raw_ostream &OS) const {
1062 bool Ret = false;
1063 // Emit the operand size opcode prefix as needed.
1064 if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32
1065 : X86II::OpSize16))
1066 EmitByte(0x66, CurByte, OS);
1067
1068 // Emit the LOCK opcode prefix.
1069 if (TSFlags & X86II::LOCK)
1070 EmitByte(0xF0, CurByte, OS);
1071
1072 switch (TSFlags & X86II::OpPrefixMask) {
1073 case X86II::PD: // 66
1074 EmitByte(0x66, CurByte, OS);
1075 break;
1076 case X86II::XS: // F3
1077 EmitByte(0xF3, CurByte, OS);
1078 break;
1079 case X86II::XD: // F2
1080 EmitByte(0xF2, CurByte, OS);
1081 break;
1082 }
1083
1084 // Handle REX prefix.
1085 // FIXME: Can this come before F2 etc to simplify emission?
1086 if (is64BitMode(STI)) {
1087 if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) {
1088 EmitByte(0x40 | REX, CurByte, OS);
1089 Ret = true;
1090 }
1091 }
1092
1093 // 0x0F escape code must be emitted just before the opcode.
1094 switch (TSFlags & X86II::OpMapMask) {
1095 case X86II::TB: // Two-byte opcode map
1096 case X86II::T8: // 0F 38
1097 case X86II::TA: // 0F 3A
1098 EmitByte(0x0F, CurByte, OS);
1099 break;
1100 }
1101
1102 switch (TSFlags & X86II::OpMapMask) {
1103 case X86II::T8: // 0F 38
1104 EmitByte(0x38, CurByte, OS);
1105 break;
1106 case X86II::TA: // 0F 3A
1107 EmitByte(0x3A, CurByte, OS);
1108 break;
1109 }
1110 return Ret;
1111 }
1112
1113 void X86MCCodeEmitter::
encodeInstruction(const MCInst & MI,raw_ostream & OS,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const1114 encodeInstruction(const MCInst &MI, raw_ostream &OS,
1115 SmallVectorImpl<MCFixup> &Fixups,
1116 const MCSubtargetInfo &STI) const {
1117 unsigned Opcode = MI.getOpcode();
1118 const MCInstrDesc &Desc = MCII.get(Opcode);
1119 uint64_t TSFlags = Desc.TSFlags;
1120
1121 // Pseudo instructions don't get encoded.
1122 if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
1123 return;
1124
1125 unsigned NumOps = Desc.getNumOperands();
1126 unsigned CurOp = X86II::getOperandBias(Desc);
1127
1128 // Keep track of the current byte being emitted.
1129 unsigned CurByte = 0;
1130
1131 // Encoding type for this instruction.
1132 uint64_t Encoding = TSFlags & X86II::EncodingMask;
1133
1134 // It uses the VEX.VVVV field?
1135 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1136 bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
1137 bool HasMemOp4 = TSFlags & X86II::MemOp4;
1138 bool HasVEX_I8IMM = TSFlags & X86II::VEX_I8IMM;
1139 assert((!HasMemOp4 || HasVEX_I8IMM) && "MemOp4 should imply VEX_I8IMM");
1140
1141 // It uses the EVEX.aaa field?
1142 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1143 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1144
1145 // Used if a register is encoded in 7:4 of immediate.
1146 unsigned I8RegNum = 0;
1147
1148 // Determine where the memory operand starts, if present.
1149 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
1150 if (MemoryOperand != -1) MemoryOperand += CurOp;
1151
1152 // Emit segment override opcode prefix as needed.
1153 if (MemoryOperand >= 0)
1154 EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg,
1155 MI, OS);
1156
1157 // Emit the repeat opcode prefix as needed.
1158 if (TSFlags & X86II::REP)
1159 EmitByte(0xF3, CurByte, OS);
1160
1161 // Emit the address size opcode prefix as needed.
1162 bool need_address_override;
1163 uint64_t AdSize = TSFlags & X86II::AdSizeMask;
1164 if ((is16BitMode(STI) && AdSize == X86II::AdSize32) ||
1165 (is32BitMode(STI) && AdSize == X86II::AdSize16) ||
1166 (is64BitMode(STI) && AdSize == X86II::AdSize32)) {
1167 need_address_override = true;
1168 } else if (MemoryOperand < 0) {
1169 need_address_override = false;
1170 } else if (is64BitMode(STI)) {
1171 assert(!Is16BitMemOperand(MI, MemoryOperand, STI));
1172 need_address_override = Is32BitMemOperand(MI, MemoryOperand);
1173 } else if (is32BitMode(STI)) {
1174 assert(!Is64BitMemOperand(MI, MemoryOperand));
1175 need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI);
1176 } else {
1177 assert(is16BitMode(STI));
1178 assert(!Is64BitMemOperand(MI, MemoryOperand));
1179 need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI);
1180 }
1181
1182 if (need_address_override)
1183 EmitByte(0x67, CurByte, OS);
1184
1185 bool Rex = false;
1186 if (Encoding == 0)
1187 Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
1188 else
1189 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
1190
1191 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1192
1193 if (TSFlags & X86II::Has3DNow0F0FOpcode)
1194 BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1195
1196 uint64_t Form = TSFlags & X86II::FormMask;
1197 switch (Form) {
1198 default: errs() << "FORM: " << Form << "\n";
1199 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1200 case X86II::Pseudo:
1201 llvm_unreachable("Pseudo instruction shouldn't be emitted");
1202 case X86II::RawFrmDstSrc: {
1203 unsigned siReg = MI.getOperand(1).getReg();
1204 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
1205 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
1206 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
1207 "SI and DI register sizes do not match");
1208 // Emit segment override opcode prefix as needed (not for %ds).
1209 if (MI.getOperand(2).getReg() != X86::DS)
1210 EmitSegmentOverridePrefix(CurByte, 2, MI, OS);
1211 // Emit AdSize prefix as needed.
1212 if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1213 (is32BitMode(STI) && siReg == X86::SI))
1214 EmitByte(0x67, CurByte, OS);
1215 CurOp += 3; // Consume operands.
1216 EmitByte(BaseOpcode, CurByte, OS);
1217 break;
1218 }
1219 case X86II::RawFrmSrc: {
1220 unsigned siReg = MI.getOperand(0).getReg();
1221 // Emit segment override opcode prefix as needed (not for %ds).
1222 if (MI.getOperand(1).getReg() != X86::DS)
1223 EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1224 // Emit AdSize prefix as needed.
1225 if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1226 (is32BitMode(STI) && siReg == X86::SI))
1227 EmitByte(0x67, CurByte, OS);
1228 CurOp += 2; // Consume operands.
1229 EmitByte(BaseOpcode, CurByte, OS);
1230 break;
1231 }
1232 case X86II::RawFrmDst: {
1233 unsigned siReg = MI.getOperand(0).getReg();
1234 // Emit AdSize prefix as needed.
1235 if ((!is32BitMode(STI) && siReg == X86::EDI) ||
1236 (is32BitMode(STI) && siReg == X86::DI))
1237 EmitByte(0x67, CurByte, OS);
1238 ++CurOp; // Consume operand.
1239 EmitByte(BaseOpcode, CurByte, OS);
1240 break;
1241 }
1242 case X86II::RawFrm:
1243 EmitByte(BaseOpcode, CurByte, OS);
1244 break;
1245 case X86II::RawFrmMemOffs:
1246 // Emit segment override opcode prefix as needed.
1247 EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1248 EmitByte(BaseOpcode, CurByte, OS);
1249 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1250 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1251 CurByte, OS, Fixups);
1252 ++CurOp; // skip segment operand
1253 break;
1254 case X86II::RawFrmImm8:
1255 EmitByte(BaseOpcode, CurByte, OS);
1256 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1257 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1258 CurByte, OS, Fixups);
1259 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
1260 OS, Fixups);
1261 break;
1262 case X86II::RawFrmImm16:
1263 EmitByte(BaseOpcode, CurByte, OS);
1264 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1265 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1266 CurByte, OS, Fixups);
1267 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
1268 OS, Fixups);
1269 break;
1270
1271 case X86II::AddRegFrm:
1272 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
1273 break;
1274
1275 case X86II::MRMDestReg: {
1276 EmitByte(BaseOpcode, CurByte, OS);
1277 unsigned SrcRegNum = CurOp + 1;
1278
1279 if (HasEVEX_K) // Skip writemask
1280 ++SrcRegNum;
1281
1282 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1283 ++SrcRegNum;
1284
1285 EmitRegModRMByte(MI.getOperand(CurOp),
1286 GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
1287 CurOp = SrcRegNum + 1;
1288 break;
1289 }
1290 case X86II::MRMDestMem: {
1291 EmitByte(BaseOpcode, CurByte, OS);
1292 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1293
1294 if (HasEVEX_K) // Skip writemask
1295 ++SrcRegNum;
1296
1297 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1298 ++SrcRegNum;
1299
1300 emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
1301 Rex, CurByte, OS, Fixups, STI);
1302 CurOp = SrcRegNum + 1;
1303 break;
1304 }
1305 case X86II::MRMSrcReg: {
1306 EmitByte(BaseOpcode, CurByte, OS);
1307 unsigned SrcRegNum = CurOp + 1;
1308
1309 if (HasEVEX_K) // Skip writemask
1310 ++SrcRegNum;
1311
1312 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1313 ++SrcRegNum;
1314
1315 if (HasMemOp4) // Capture 2nd src (which is encoded in I8IMM)
1316 I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
1317
1318 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1319 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1320 CurOp = SrcRegNum + 1;
1321 if (HasVEX_4VOp3)
1322 ++CurOp;
1323 if (!HasMemOp4 && HasVEX_I8IMM)
1324 I8RegNum = getX86RegEncoding(MI, CurOp++);
1325 // do not count the rounding control operand
1326 if (HasEVEX_RC)
1327 --NumOps;
1328 break;
1329 }
1330 case X86II::MRMSrcMem: {
1331 unsigned FirstMemOp = CurOp+1;
1332
1333 if (HasEVEX_K) // Skip writemask
1334 ++FirstMemOp;
1335
1336 if (HasVEX_4V)
1337 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1338
1339 if (HasMemOp4) // Capture second register source (encoded in I8IMM)
1340 I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
1341
1342 EmitByte(BaseOpcode, CurByte, OS);
1343
1344 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1345 TSFlags, Rex, CurByte, OS, Fixups, STI);
1346 CurOp = FirstMemOp + X86::AddrNumOperands;
1347 if (HasVEX_4VOp3)
1348 ++CurOp;
1349 if (!HasMemOp4 && HasVEX_I8IMM)
1350 I8RegNum = getX86RegEncoding(MI, CurOp++);
1351 break;
1352 }
1353
1354 case X86II::MRMXr:
1355 case X86II::MRM0r: case X86II::MRM1r:
1356 case X86II::MRM2r: case X86II::MRM3r:
1357 case X86II::MRM4r: case X86II::MRM5r:
1358 case X86II::MRM6r: case X86II::MRM7r: {
1359 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1360 ++CurOp;
1361 if (HasEVEX_K) // Skip writemask
1362 ++CurOp;
1363 EmitByte(BaseOpcode, CurByte, OS);
1364 EmitRegModRMByte(MI.getOperand(CurOp++),
1365 (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
1366 CurByte, OS);
1367 break;
1368 }
1369
1370 case X86II::MRMXm:
1371 case X86II::MRM0m: case X86II::MRM1m:
1372 case X86II::MRM2m: case X86II::MRM3m:
1373 case X86II::MRM4m: case X86II::MRM5m:
1374 case X86II::MRM6m: case X86II::MRM7m: {
1375 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1376 ++CurOp;
1377 if (HasEVEX_K) // Skip writemask
1378 ++CurOp;
1379 EmitByte(BaseOpcode, CurByte, OS);
1380 emitMemModRMByte(MI, CurOp,
1381 (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
1382 Rex, CurByte, OS, Fixups, STI);
1383 CurOp += X86::AddrNumOperands;
1384 break;
1385 }
1386 case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
1387 case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C5:
1388 case X86II::MRM_C6: case X86II::MRM_C7: case X86II::MRM_C8:
1389 case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
1390 case X86II::MRM_CC: case X86II::MRM_CD: case X86II::MRM_CE:
1391 case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1:
1392 case X86II::MRM_D2: case X86II::MRM_D3: case X86II::MRM_D4:
1393 case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D7:
1394 case X86II::MRM_D8: case X86II::MRM_D9: case X86II::MRM_DA:
1395 case X86II::MRM_DB: case X86II::MRM_DC: case X86II::MRM_DD:
1396 case X86II::MRM_DE: case X86II::MRM_DF: case X86II::MRM_E0:
1397 case X86II::MRM_E1: case X86II::MRM_E2: case X86II::MRM_E3:
1398 case X86II::MRM_E4: case X86II::MRM_E5: case X86II::MRM_E6:
1399 case X86II::MRM_E7: case X86II::MRM_E8: case X86II::MRM_E9:
1400 case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC:
1401 case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_EF:
1402 case X86II::MRM_F0: case X86II::MRM_F1: case X86II::MRM_F2:
1403 case X86II::MRM_F3: case X86II::MRM_F4: case X86II::MRM_F5:
1404 case X86II::MRM_F6: case X86II::MRM_F7: case X86II::MRM_F8:
1405 case X86II::MRM_F9: case X86II::MRM_FA: case X86II::MRM_FB:
1406 case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE:
1407 case X86II::MRM_FF:
1408 EmitByte(BaseOpcode, CurByte, OS);
1409 EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS);
1410 break;
1411 }
1412
1413 if (HasVEX_I8IMM) {
1414 // The last source register of a 4 operand instruction in AVX is encoded
1415 // in bits[7:4] of a immediate byte.
1416 assert(I8RegNum < 16 && "Register encoding out of range");
1417 I8RegNum <<= 4;
1418 if (CurOp != NumOps) {
1419 unsigned Val = MI.getOperand(CurOp++).getImm();
1420 assert(Val < 16 && "Immediate operand value out of range");
1421 I8RegNum |= Val;
1422 }
1423 EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
1424 CurByte, OS, Fixups);
1425 } else {
1426 // If there is a remaining operand, it must be a trailing immediate. Emit it
1427 // according to the right size for the instruction. Some instructions
1428 // (SSE4a extrq and insertq) have two trailing immediates.
1429 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1430 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1431 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1432 CurByte, OS, Fixups);
1433 }
1434 }
1435
1436 if (TSFlags & X86II::Has3DNow0F0FOpcode)
1437 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
1438
1439 #ifndef NDEBUG
1440 // FIXME: Verify.
1441 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1442 errs() << "Cannot encode all operands of: ";
1443 MI.dump();
1444 errs() << '\n';
1445 abort();
1446 }
1447 #endif
1448 }
1449