• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains code to lower X86 MachineInstrs to their corresponding
11 // MCInst records.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "X86AsmPrinter.h"
16 #include "InstPrinter/X86ATTInstPrinter.h"
17 #include "X86COFFMachineModuleInfo.h"
18 #include "llvm/ADT/SmallString.h"
19 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
20 #include "llvm/IR/Type.h"
21 #include "llvm/MC/MCAsmInfo.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCExpr.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCInstBuilder.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSymbol.h"
28 #include "llvm/Support/FormattedStream.h"
29 #include "llvm/Target/Mangler.h"
30 using namespace llvm;
31 
32 namespace {
33 
34 /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
35 class X86MCInstLower {
36   MCContext &Ctx;
37   Mangler *Mang;
38   const MachineFunction &MF;
39   const TargetMachine &TM;
40   const MCAsmInfo &MAI;
41   X86AsmPrinter &AsmPrinter;
42 public:
43   X86MCInstLower(Mangler *mang, const MachineFunction &MF,
44                  X86AsmPrinter &asmprinter);
45 
46   void Lower(const MachineInstr *MI, MCInst &OutMI) const;
47 
48   MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
49   MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
50 
51 private:
52   MachineModuleInfoMachO &getMachOMMI() const;
53 };
54 
55 } // end anonymous namespace
56 
X86MCInstLower(Mangler * mang,const MachineFunction & mf,X86AsmPrinter & asmprinter)57 X86MCInstLower::X86MCInstLower(Mangler *mang, const MachineFunction &mf,
58                                X86AsmPrinter &asmprinter)
59 : Ctx(mf.getContext()), Mang(mang), MF(mf), TM(mf.getTarget()),
60   MAI(*TM.getMCAsmInfo()), AsmPrinter(asmprinter) {}
61 
getMachOMMI() const62 MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
63   return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>();
64 }
65 
66 
67 /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
68 /// operand to an MCSymbol.
69 MCSymbol *X86MCInstLower::
GetSymbolFromOperand(const MachineOperand & MO) const70 GetSymbolFromOperand(const MachineOperand &MO) const {
71   assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference");
72 
73   SmallString<128> Name;
74 
75   if (MO.isGlobal()) {
76     const GlobalValue *GV = MO.getGlobal();
77     bool isImplicitlyPrivate = false;
78     if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB ||
79         MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
80         MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
81         MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
82       isImplicitlyPrivate = true;
83 
84     Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
85   } else if (MO.isSymbol()) {
86     Name += MAI.getGlobalPrefix();
87     Name += MO.getSymbolName();
88   } else if (MO.isMBB()) {
89     Name += MO.getMBB()->getSymbol()->getName();
90   }
91 
92   // If the target flags on the operand changes the name of the symbol, do that
93   // before we return the symbol.
94   switch (MO.getTargetFlags()) {
95   default: break;
96   case X86II::MO_DLLIMPORT: {
97     // Handle dllimport linkage.
98     const char *Prefix = "__imp_";
99     Name.insert(Name.begin(), Prefix, Prefix+strlen(Prefix));
100     break;
101   }
102   case X86II::MO_DARWIN_NONLAZY:
103   case X86II::MO_DARWIN_NONLAZY_PIC_BASE: {
104     Name += "$non_lazy_ptr";
105     MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
106 
107     MachineModuleInfoImpl::StubValueTy &StubSym =
108       getMachOMMI().getGVStubEntry(Sym);
109     if (StubSym.getPointer() == 0) {
110       assert(MO.isGlobal() && "Extern symbol not handled yet");
111       StubSym =
112         MachineModuleInfoImpl::
113         StubValueTy(Mang->getSymbol(MO.getGlobal()),
114                     !MO.getGlobal()->hasInternalLinkage());
115     }
116     return Sym;
117   }
118   case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: {
119     Name += "$non_lazy_ptr";
120     MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
121     MachineModuleInfoImpl::StubValueTy &StubSym =
122       getMachOMMI().getHiddenGVStubEntry(Sym);
123     if (StubSym.getPointer() == 0) {
124       assert(MO.isGlobal() && "Extern symbol not handled yet");
125       StubSym =
126         MachineModuleInfoImpl::
127         StubValueTy(Mang->getSymbol(MO.getGlobal()),
128                     !MO.getGlobal()->hasInternalLinkage());
129     }
130     return Sym;
131   }
132   case X86II::MO_DARWIN_STUB: {
133     Name += "$stub";
134     MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
135     MachineModuleInfoImpl::StubValueTy &StubSym =
136       getMachOMMI().getFnStubEntry(Sym);
137     if (StubSym.getPointer())
138       return Sym;
139 
140     if (MO.isGlobal()) {
141       StubSym =
142         MachineModuleInfoImpl::
143         StubValueTy(Mang->getSymbol(MO.getGlobal()),
144                     !MO.getGlobal()->hasInternalLinkage());
145     } else {
146       Name.erase(Name.end()-5, Name.end());
147       StubSym =
148         MachineModuleInfoImpl::
149         StubValueTy(Ctx.GetOrCreateSymbol(Name.str()), false);
150     }
151     return Sym;
152   }
153   }
154 
155   return Ctx.GetOrCreateSymbol(Name.str());
156 }
157 
LowerSymbolOperand(const MachineOperand & MO,MCSymbol * Sym) const158 MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
159                                              MCSymbol *Sym) const {
160   // FIXME: We would like an efficient form for this, so we don't have to do a
161   // lot of extra uniquing.
162   const MCExpr *Expr = 0;
163   MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
164 
165   switch (MO.getTargetFlags()) {
166   default: llvm_unreachable("Unknown target flag on GV operand");
167   case X86II::MO_NO_FLAG:    // No flag.
168   // These affect the name of the symbol, not any suffix.
169   case X86II::MO_DARWIN_NONLAZY:
170   case X86II::MO_DLLIMPORT:
171   case X86II::MO_DARWIN_STUB:
172     break;
173 
174   case X86II::MO_TLVP:      RefKind = MCSymbolRefExpr::VK_TLVP; break;
175   case X86II::MO_TLVP_PIC_BASE:
176     Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
177     // Subtract the pic base.
178     Expr = MCBinaryExpr::CreateSub(Expr,
179                                   MCSymbolRefExpr::Create(MF.getPICBaseSymbol(),
180                                                            Ctx),
181                                    Ctx);
182     break;
183   case X86II::MO_SECREL:    RefKind = MCSymbolRefExpr::VK_SECREL; break;
184   case X86II::MO_TLSGD:     RefKind = MCSymbolRefExpr::VK_TLSGD; break;
185   case X86II::MO_TLSLD:     RefKind = MCSymbolRefExpr::VK_TLSLD; break;
186   case X86II::MO_TLSLDM:    RefKind = MCSymbolRefExpr::VK_TLSLDM; break;
187   case X86II::MO_GOTTPOFF:  RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
188   case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
189   case X86II::MO_TPOFF:     RefKind = MCSymbolRefExpr::VK_TPOFF; break;
190   case X86II::MO_DTPOFF:    RefKind = MCSymbolRefExpr::VK_DTPOFF; break;
191   case X86II::MO_NTPOFF:    RefKind = MCSymbolRefExpr::VK_NTPOFF; break;
192   case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break;
193   case X86II::MO_GOTPCREL:  RefKind = MCSymbolRefExpr::VK_GOTPCREL; break;
194   case X86II::MO_GOT:       RefKind = MCSymbolRefExpr::VK_GOT; break;
195   case X86II::MO_GOTOFF:    RefKind = MCSymbolRefExpr::VK_GOTOFF; break;
196   case X86II::MO_PLT:       RefKind = MCSymbolRefExpr::VK_PLT; break;
197   case X86II::MO_PIC_BASE_OFFSET:
198   case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
199   case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
200     Expr = MCSymbolRefExpr::Create(Sym, Ctx);
201     // Subtract the pic base.
202     Expr = MCBinaryExpr::CreateSub(Expr,
203                             MCSymbolRefExpr::Create(MF.getPICBaseSymbol(), Ctx),
204                                    Ctx);
205     if (MO.isJTI() && MAI.hasSetDirective()) {
206       // If .set directive is supported, use it to reduce the number of
207       // relocations the assembler will generate for differences between
208       // local labels. This is only safe when the symbols are in the same
209       // section so we are restricting it to jumptable references.
210       MCSymbol *Label = Ctx.CreateTempSymbol();
211       AsmPrinter.OutStreamer.EmitAssignment(Label, Expr);
212       Expr = MCSymbolRefExpr::Create(Label, Ctx);
213     }
214     break;
215   }
216 
217   if (Expr == 0)
218     Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
219 
220   if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
221     Expr = MCBinaryExpr::CreateAdd(Expr,
222                                    MCConstantExpr::Create(MO.getOffset(), Ctx),
223                                    Ctx);
224   return MCOperand::CreateExpr(Expr);
225 }
226 
227 
228 
lower_subreg32(MCInst * MI,unsigned OpNo)229 static void lower_subreg32(MCInst *MI, unsigned OpNo) {
230   // Convert registers in the addr mode according to subreg32.
231   unsigned Reg = MI->getOperand(OpNo).getReg();
232   if (Reg != 0)
233     MI->getOperand(OpNo).setReg(getX86SubSuperRegister(Reg, MVT::i32));
234 }
235 
lower_lea64_32mem(MCInst * MI,unsigned OpNo)236 static void lower_lea64_32mem(MCInst *MI, unsigned OpNo) {
237   // Convert registers in the addr mode according to subreg64.
238   for (unsigned i = 0; i != 4; ++i) {
239     if (!MI->getOperand(OpNo+i).isReg()) continue;
240 
241     unsigned Reg = MI->getOperand(OpNo+i).getReg();
242     // LEAs can use RIP-relative addressing, and RIP has no sub/super register.
243     if (Reg == 0 || Reg == X86::RIP) continue;
244 
245     MI->getOperand(OpNo+i).setReg(getX86SubSuperRegister(Reg, MVT::i64));
246   }
247 }
248 
249 /// LowerSubReg32_Op0 - Things like MOVZX16rr8 -> MOVZX32rr8.
LowerSubReg32_Op0(MCInst & OutMI,unsigned NewOpc)250 static void LowerSubReg32_Op0(MCInst &OutMI, unsigned NewOpc) {
251   OutMI.setOpcode(NewOpc);
252   lower_subreg32(&OutMI, 0);
253 }
254 /// LowerUnaryToTwoAddr - R = setb   -> R = sbb R, R
LowerUnaryToTwoAddr(MCInst & OutMI,unsigned NewOpc)255 static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) {
256   OutMI.setOpcode(NewOpc);
257   OutMI.addOperand(OutMI.getOperand(0));
258   OutMI.addOperand(OutMI.getOperand(0));
259 }
260 
261 /// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
262 /// a short fixed-register form.
SimplifyShortImmForm(MCInst & Inst,unsigned Opcode)263 static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
264   unsigned ImmOp = Inst.getNumOperands() - 1;
265   assert(Inst.getOperand(0).isReg() &&
266          (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&
267          ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&
268            Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||
269           Inst.getNumOperands() == 2) && "Unexpected instruction!");
270 
271   // Check whether the destination register can be fixed.
272   unsigned Reg = Inst.getOperand(0).getReg();
273   if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
274     return;
275 
276   // If so, rewrite the instruction.
277   MCOperand Saved = Inst.getOperand(ImmOp);
278   Inst = MCInst();
279   Inst.setOpcode(Opcode);
280   Inst.addOperand(Saved);
281 }
282 
283 /// \brief Simplify things like MOV32rm to MOV32o32a.
SimplifyShortMoveForm(X86AsmPrinter & Printer,MCInst & Inst,unsigned Opcode)284 static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
285                                   unsigned Opcode) {
286   // Don't make these simplifications in 64-bit mode; other assemblers don't
287   // perform them because they make the code larger.
288   if (Printer.getSubtarget().is64Bit())
289     return;
290 
291   bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg();
292   unsigned AddrBase = IsStore;
293   unsigned RegOp = IsStore ? 0 : 5;
294   unsigned AddrOp = AddrBase + 3;
295   assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&
296          Inst.getOperand(AddrBase + 0).isReg() && // base
297          Inst.getOperand(AddrBase + 1).isImm() && // scale
298          Inst.getOperand(AddrBase + 2).isReg() && // index register
299          (Inst.getOperand(AddrOp).isExpr() ||     // address
300           Inst.getOperand(AddrOp).isImm())&&
301          Inst.getOperand(AddrBase + 4).isReg() && // segment
302          "Unexpected instruction!");
303 
304   // Check whether the destination register can be fixed.
305   unsigned Reg = Inst.getOperand(RegOp).getReg();
306   if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
307     return;
308 
309   // Check whether this is an absolute address.
310   // FIXME: We know TLVP symbol refs aren't, but there should be a better way
311   // to do this here.
312   bool Absolute = true;
313   if (Inst.getOperand(AddrOp).isExpr()) {
314     const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr();
315     if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE))
316       if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
317         Absolute = false;
318   }
319 
320   if (Absolute &&
321       (Inst.getOperand(AddrBase + 0).getReg() != 0 ||
322        Inst.getOperand(AddrBase + 2).getReg() != 0 ||
323        Inst.getOperand(AddrBase + 4).getReg() != 0 ||
324        Inst.getOperand(AddrBase + 1).getImm() != 1))
325     return;
326 
327   // If so, rewrite the instruction.
328   MCOperand Saved = Inst.getOperand(AddrOp);
329   Inst = MCInst();
330   Inst.setOpcode(Opcode);
331   Inst.addOperand(Saved);
332 }
333 
Lower(const MachineInstr * MI,MCInst & OutMI) const334 void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
335   OutMI.setOpcode(MI->getOpcode());
336 
337   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
338     const MachineOperand &MO = MI->getOperand(i);
339 
340     MCOperand MCOp;
341     switch (MO.getType()) {
342     default:
343       MI->dump();
344       llvm_unreachable("unknown operand type");
345     case MachineOperand::MO_Register:
346       // Ignore all implicit register operands.
347       if (MO.isImplicit()) continue;
348       MCOp = MCOperand::CreateReg(MO.getReg());
349       break;
350     case MachineOperand::MO_Immediate:
351       MCOp = MCOperand::CreateImm(MO.getImm());
352       break;
353     case MachineOperand::MO_MachineBasicBlock:
354     case MachineOperand::MO_GlobalAddress:
355     case MachineOperand::MO_ExternalSymbol:
356       MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
357       break;
358     case MachineOperand::MO_JumpTableIndex:
359       MCOp = LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
360       break;
361     case MachineOperand::MO_ConstantPoolIndex:
362       MCOp = LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
363       break;
364     case MachineOperand::MO_BlockAddress:
365       MCOp = LowerSymbolOperand(MO,
366                      AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
367       break;
368     case MachineOperand::MO_RegisterMask:
369       // Ignore call clobbers.
370       continue;
371     }
372 
373     OutMI.addOperand(MCOp);
374   }
375 
376   // Handle a few special cases to eliminate operand modifiers.
377 ReSimplify:
378   switch (OutMI.getOpcode()) {
379   case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
380     lower_lea64_32mem(&OutMI, 1);
381     // FALL THROUGH.
382   case X86::LEA64r:
383   case X86::LEA16r:
384   case X86::LEA32r:
385     // LEA should have a segment register, but it must be empty.
386     assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&
387            "Unexpected # of LEA operands");
388     assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
389            "LEA has segment specified!");
390     break;
391   case X86::MOVZX64rr32:  LowerSubReg32_Op0(OutMI, X86::MOV32rr); break;
392   case X86::MOVZX64rm32:  LowerSubReg32_Op0(OutMI, X86::MOV32rm); break;
393   case X86::MOV64ri64i32: LowerSubReg32_Op0(OutMI, X86::MOV32ri); break;
394   case X86::MOVZX64rr8:   LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
395   case X86::MOVZX64rm8:   LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
396   case X86::MOVZX64rr16:  LowerSubReg32_Op0(OutMI, X86::MOVZX32rr16); break;
397   case X86::MOVZX64rm16:  LowerSubReg32_Op0(OutMI, X86::MOVZX32rm16); break;
398   case X86::MOV8r0:       LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
399   case X86::MOV32r0:      LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
400 
401   case X86::MOV16r0:
402     LowerSubReg32_Op0(OutMI, X86::MOV32r0);   // MOV16r0 -> MOV32r0
403     LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
404     break;
405   case X86::MOV64r0:
406     LowerSubReg32_Op0(OutMI, X86::MOV32r0);   // MOV64r0 -> MOV32r0
407     LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
408     break;
409 
410   // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B
411   // if one of the registers is extended, but other isn't.
412   case X86::VMOVAPDrr:
413   case X86::VMOVAPDYrr:
414   case X86::VMOVAPSrr:
415   case X86::VMOVAPSYrr:
416   case X86::VMOVDQArr:
417   case X86::VMOVDQAYrr:
418   case X86::VMOVDQUrr:
419   case X86::VMOVDQUYrr:
420   case X86::VMOVUPDrr:
421   case X86::VMOVUPDYrr:
422   case X86::VMOVUPSrr:
423   case X86::VMOVUPSYrr: {
424     if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) &&
425         X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) {
426       unsigned NewOpc;
427       switch (OutMI.getOpcode()) {
428       default: llvm_unreachable("Invalid opcode");
429       case X86::VMOVAPDrr:  NewOpc = X86::VMOVAPDrr_REV;  break;
430       case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
431       case X86::VMOVAPSrr:  NewOpc = X86::VMOVAPSrr_REV;  break;
432       case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
433       case X86::VMOVDQArr:  NewOpc = X86::VMOVDQArr_REV;  break;
434       case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
435       case X86::VMOVDQUrr:  NewOpc = X86::VMOVDQUrr_REV;  break;
436       case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
437       case X86::VMOVUPDrr:  NewOpc = X86::VMOVUPDrr_REV;  break;
438       case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
439       case X86::VMOVUPSrr:  NewOpc = X86::VMOVUPSrr_REV;  break;
440       case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
441       }
442       OutMI.setOpcode(NewOpc);
443     }
444     break;
445   }
446   case X86::VMOVSDrr:
447   case X86::VMOVSSrr: {
448     if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) &&
449         X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) {
450       unsigned NewOpc;
451       switch (OutMI.getOpcode()) {
452       default: llvm_unreachable("Invalid opcode");
453       case X86::VMOVSDrr:   NewOpc = X86::VMOVSDrr_REV;   break;
454       case X86::VMOVSSrr:   NewOpc = X86::VMOVSSrr_REV;   break;
455       }
456       OutMI.setOpcode(NewOpc);
457     }
458     break;
459   }
460 
461   // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register
462   // inputs modeled as normal uses instead of implicit uses.  As such, truncate
463   // off all but the first operand (the callee).  FIXME: Change isel.
464   case X86::TAILJMPr64:
465   case X86::CALL64r:
466   case X86::CALL64pcrel32: {
467     unsigned Opcode = OutMI.getOpcode();
468     MCOperand Saved = OutMI.getOperand(0);
469     OutMI = MCInst();
470     OutMI.setOpcode(Opcode);
471     OutMI.addOperand(Saved);
472     break;
473   }
474 
475   case X86::EH_RETURN:
476   case X86::EH_RETURN64: {
477     OutMI = MCInst();
478     OutMI.setOpcode(X86::RET);
479     break;
480   }
481 
482   // TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions.
483   case X86::TAILJMPr:
484   case X86::TAILJMPd:
485   case X86::TAILJMPd64: {
486     unsigned Opcode;
487     switch (OutMI.getOpcode()) {
488     default: llvm_unreachable("Invalid opcode");
489     case X86::TAILJMPr: Opcode = X86::JMP32r; break;
490     case X86::TAILJMPd:
491     case X86::TAILJMPd64: Opcode = X86::JMP_1; break;
492     }
493 
494     MCOperand Saved = OutMI.getOperand(0);
495     OutMI = MCInst();
496     OutMI.setOpcode(Opcode);
497     OutMI.addOperand(Saved);
498     break;
499   }
500 
501   // These are pseudo-ops for OR to help with the OR->ADD transformation.  We do
502   // this with an ugly goto in case the resultant OR uses EAX and needs the
503   // short form.
504   case X86::ADD16rr_DB:   OutMI.setOpcode(X86::OR16rr); goto ReSimplify;
505   case X86::ADD32rr_DB:   OutMI.setOpcode(X86::OR32rr); goto ReSimplify;
506   case X86::ADD64rr_DB:   OutMI.setOpcode(X86::OR64rr); goto ReSimplify;
507   case X86::ADD16ri_DB:   OutMI.setOpcode(X86::OR16ri); goto ReSimplify;
508   case X86::ADD32ri_DB:   OutMI.setOpcode(X86::OR32ri); goto ReSimplify;
509   case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify;
510   case X86::ADD16ri8_DB:  OutMI.setOpcode(X86::OR16ri8); goto ReSimplify;
511   case X86::ADD32ri8_DB:  OutMI.setOpcode(X86::OR32ri8); goto ReSimplify;
512   case X86::ADD64ri8_DB:  OutMI.setOpcode(X86::OR64ri8); goto ReSimplify;
513 
514   // The assembler backend wants to see branches in their small form and relax
515   // them to their large form.  The JIT can only handle the large form because
516   // it does not do relaxation.  For now, translate the large form to the
517   // small one here.
518   case X86::JMP_4: OutMI.setOpcode(X86::JMP_1); break;
519   case X86::JO_4:  OutMI.setOpcode(X86::JO_1); break;
520   case X86::JNO_4: OutMI.setOpcode(X86::JNO_1); break;
521   case X86::JB_4:  OutMI.setOpcode(X86::JB_1); break;
522   case X86::JAE_4: OutMI.setOpcode(X86::JAE_1); break;
523   case X86::JE_4:  OutMI.setOpcode(X86::JE_1); break;
524   case X86::JNE_4: OutMI.setOpcode(X86::JNE_1); break;
525   case X86::JBE_4: OutMI.setOpcode(X86::JBE_1); break;
526   case X86::JA_4:  OutMI.setOpcode(X86::JA_1); break;
527   case X86::JS_4:  OutMI.setOpcode(X86::JS_1); break;
528   case X86::JNS_4: OutMI.setOpcode(X86::JNS_1); break;
529   case X86::JP_4:  OutMI.setOpcode(X86::JP_1); break;
530   case X86::JNP_4: OutMI.setOpcode(X86::JNP_1); break;
531   case X86::JL_4:  OutMI.setOpcode(X86::JL_1); break;
532   case X86::JGE_4: OutMI.setOpcode(X86::JGE_1); break;
533   case X86::JLE_4: OutMI.setOpcode(X86::JLE_1); break;
534   case X86::JG_4:  OutMI.setOpcode(X86::JG_1); break;
535 
536   // Atomic load and store require a separate pseudo-inst because Acquire
537   // implies mayStore and Release implies mayLoad; fix these to regular MOV
538   // instructions here
539   case X86::ACQUIRE_MOV8rm:  OutMI.setOpcode(X86::MOV8rm); goto ReSimplify;
540   case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify;
541   case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify;
542   case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify;
543   case X86::RELEASE_MOV8mr:  OutMI.setOpcode(X86::MOV8mr); goto ReSimplify;
544   case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify;
545   case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify;
546   case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify;
547 
548   // We don't currently select the correct instruction form for instructions
549   // which have a short %eax, etc. form. Handle this by custom lowering, for
550   // now.
551   //
552   // Note, we are currently not handling the following instructions:
553   // MOV64ao8, MOV64o8a
554   // XCHG16ar, XCHG32ar, XCHG64ar
555   case X86::MOV8mr_NOREX:
556   case X86::MOV8mr:     SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao8); break;
557   case X86::MOV8rm_NOREX:
558   case X86::MOV8rm:     SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o8a); break;
559   case X86::MOV16mr:    SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao16); break;
560   case X86::MOV16rm:    SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o16a); break;
561   case X86::MOV32mr:    SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break;
562   case X86::MOV32rm:    SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break;
563 
564   case X86::ADC8ri:     SimplifyShortImmForm(OutMI, X86::ADC8i8);    break;
565   case X86::ADC16ri:    SimplifyShortImmForm(OutMI, X86::ADC16i16);  break;
566   case X86::ADC32ri:    SimplifyShortImmForm(OutMI, X86::ADC32i32);  break;
567   case X86::ADC64ri32:  SimplifyShortImmForm(OutMI, X86::ADC64i32);  break;
568   case X86::ADD8ri:     SimplifyShortImmForm(OutMI, X86::ADD8i8);    break;
569   case X86::ADD16ri:    SimplifyShortImmForm(OutMI, X86::ADD16i16);  break;
570   case X86::ADD32ri:    SimplifyShortImmForm(OutMI, X86::ADD32i32);  break;
571   case X86::ADD64ri32:  SimplifyShortImmForm(OutMI, X86::ADD64i32);  break;
572   case X86::AND8ri:     SimplifyShortImmForm(OutMI, X86::AND8i8);    break;
573   case X86::AND16ri:    SimplifyShortImmForm(OutMI, X86::AND16i16);  break;
574   case X86::AND32ri:    SimplifyShortImmForm(OutMI, X86::AND32i32);  break;
575   case X86::AND64ri32:  SimplifyShortImmForm(OutMI, X86::AND64i32);  break;
576   case X86::CMP8ri:     SimplifyShortImmForm(OutMI, X86::CMP8i8);    break;
577   case X86::CMP16ri:    SimplifyShortImmForm(OutMI, X86::CMP16i16);  break;
578   case X86::CMP32ri:    SimplifyShortImmForm(OutMI, X86::CMP32i32);  break;
579   case X86::CMP64ri32:  SimplifyShortImmForm(OutMI, X86::CMP64i32);  break;
580   case X86::OR8ri:      SimplifyShortImmForm(OutMI, X86::OR8i8);     break;
581   case X86::OR16ri:     SimplifyShortImmForm(OutMI, X86::OR16i16);   break;
582   case X86::OR32ri:     SimplifyShortImmForm(OutMI, X86::OR32i32);   break;
583   case X86::OR64ri32:   SimplifyShortImmForm(OutMI, X86::OR64i32);   break;
584   case X86::SBB8ri:     SimplifyShortImmForm(OutMI, X86::SBB8i8);    break;
585   case X86::SBB16ri:    SimplifyShortImmForm(OutMI, X86::SBB16i16);  break;
586   case X86::SBB32ri:    SimplifyShortImmForm(OutMI, X86::SBB32i32);  break;
587   case X86::SBB64ri32:  SimplifyShortImmForm(OutMI, X86::SBB64i32);  break;
588   case X86::SUB8ri:     SimplifyShortImmForm(OutMI, X86::SUB8i8);    break;
589   case X86::SUB16ri:    SimplifyShortImmForm(OutMI, X86::SUB16i16);  break;
590   case X86::SUB32ri:    SimplifyShortImmForm(OutMI, X86::SUB32i32);  break;
591   case X86::SUB64ri32:  SimplifyShortImmForm(OutMI, X86::SUB64i32);  break;
592   case X86::TEST8ri:    SimplifyShortImmForm(OutMI, X86::TEST8i8);   break;
593   case X86::TEST16ri:   SimplifyShortImmForm(OutMI, X86::TEST16i16); break;
594   case X86::TEST32ri:   SimplifyShortImmForm(OutMI, X86::TEST32i32); break;
595   case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break;
596   case X86::XOR8ri:     SimplifyShortImmForm(OutMI, X86::XOR8i8);    break;
597   case X86::XOR16ri:    SimplifyShortImmForm(OutMI, X86::XOR16i16);  break;
598   case X86::XOR32ri:    SimplifyShortImmForm(OutMI, X86::XOR32i32);  break;
599   case X86::XOR64ri32:  SimplifyShortImmForm(OutMI, X86::XOR64i32);  break;
600 
601   case X86::MORESTACK_RET:
602     OutMI.setOpcode(X86::RET);
603     break;
604 
605   case X86::MORESTACK_RET_RESTORE_R10:
606     OutMI.setOpcode(X86::MOV64rr);
607     OutMI.addOperand(MCOperand::CreateReg(X86::R10));
608     OutMI.addOperand(MCOperand::CreateReg(X86::RAX));
609 
610     AsmPrinter.OutStreamer.EmitInstruction(MCInstBuilder(X86::RET));
611     break;
612   }
613 }
614 
LowerTlsAddr(MCStreamer & OutStreamer,X86MCInstLower & MCInstLowering,const MachineInstr & MI)615 static void LowerTlsAddr(MCStreamer &OutStreamer,
616                          X86MCInstLower &MCInstLowering,
617                          const MachineInstr &MI) {
618 
619   bool is64Bits = MI.getOpcode() == X86::TLS_addr64 ||
620                   MI.getOpcode() == X86::TLS_base_addr64;
621 
622   bool needsPadding = MI.getOpcode() == X86::TLS_addr64;
623 
624   MCContext &context = OutStreamer.getContext();
625 
626   if (needsPadding)
627     OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
628 
629   MCSymbolRefExpr::VariantKind SRVK;
630   switch (MI.getOpcode()) {
631     case X86::TLS_addr32:
632     case X86::TLS_addr64:
633       SRVK = MCSymbolRefExpr::VK_TLSGD;
634       break;
635     case X86::TLS_base_addr32:
636       SRVK = MCSymbolRefExpr::VK_TLSLDM;
637       break;
638     case X86::TLS_base_addr64:
639       SRVK = MCSymbolRefExpr::VK_TLSLD;
640       break;
641     default:
642       llvm_unreachable("unexpected opcode");
643   }
644 
645   MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3));
646   const MCSymbolRefExpr *symRef = MCSymbolRefExpr::Create(sym, SRVK, context);
647 
648   MCInst LEA;
649   if (is64Bits) {
650     LEA.setOpcode(X86::LEA64r);
651     LEA.addOperand(MCOperand::CreateReg(X86::RDI)); // dest
652     LEA.addOperand(MCOperand::CreateReg(X86::RIP)); // base
653     LEA.addOperand(MCOperand::CreateImm(1));        // scale
654     LEA.addOperand(MCOperand::CreateReg(0));        // index
655     LEA.addOperand(MCOperand::CreateExpr(symRef));  // disp
656     LEA.addOperand(MCOperand::CreateReg(0));        // seg
657   } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) {
658     LEA.setOpcode(X86::LEA32r);
659     LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest
660     LEA.addOperand(MCOperand::CreateReg(X86::EBX)); // base
661     LEA.addOperand(MCOperand::CreateImm(1));        // scale
662     LEA.addOperand(MCOperand::CreateReg(0));        // index
663     LEA.addOperand(MCOperand::CreateExpr(symRef));  // disp
664     LEA.addOperand(MCOperand::CreateReg(0));        // seg
665   } else {
666     LEA.setOpcode(X86::LEA32r);
667     LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest
668     LEA.addOperand(MCOperand::CreateReg(0));        // base
669     LEA.addOperand(MCOperand::CreateImm(1));        // scale
670     LEA.addOperand(MCOperand::CreateReg(X86::EBX)); // index
671     LEA.addOperand(MCOperand::CreateExpr(symRef));  // disp
672     LEA.addOperand(MCOperand::CreateReg(0));        // seg
673   }
674   OutStreamer.EmitInstruction(LEA);
675 
676   if (needsPadding) {
677     OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
678     OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
679     OutStreamer.EmitInstruction(MCInstBuilder(X86::REX64_PREFIX));
680   }
681 
682   StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr";
683   MCSymbol *tlsGetAddr = context.GetOrCreateSymbol(name);
684   const MCSymbolRefExpr *tlsRef =
685     MCSymbolRefExpr::Create(tlsGetAddr,
686                             MCSymbolRefExpr::VK_PLT,
687                             context);
688 
689   OutStreamer.EmitInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32
690                                                      : X86::CALLpcrel32)
691     .addExpr(tlsRef));
692 }
693 
EmitInstruction(const MachineInstr * MI)694 void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
695   X86MCInstLower MCInstLowering(Mang, *MF, *this);
696   switch (MI->getOpcode()) {
697   case TargetOpcode::DBG_VALUE:
698     if (isVerbose() && OutStreamer.hasRawTextSupport()) {
699       std::string TmpStr;
700       raw_string_ostream OS(TmpStr);
701       PrintDebugValueComment(MI, OS);
702       OutStreamer.EmitRawText(StringRef(OS.str()));
703     }
704     return;
705 
706   // Emit nothing here but a comment if we can.
707   case X86::Int_MemBarrier:
708     if (OutStreamer.hasRawTextSupport())
709       OutStreamer.EmitRawText(StringRef("\t#MEMBARRIER"));
710     return;
711 
712 
713   case X86::EH_RETURN:
714   case X86::EH_RETURN64: {
715     // Lower these as normal, but add some comments.
716     unsigned Reg = MI->getOperand(0).getReg();
717     OutStreamer.AddComment(StringRef("eh_return, addr: %") +
718                            X86ATTInstPrinter::getRegisterName(Reg));
719     break;
720   }
721   case X86::TAILJMPr:
722   case X86::TAILJMPd:
723   case X86::TAILJMPd64:
724     // Lower these as normal, but add some comments.
725     OutStreamer.AddComment("TAILCALL");
726     break;
727 
728   case X86::TLS_addr32:
729   case X86::TLS_addr64:
730   case X86::TLS_base_addr32:
731   case X86::TLS_base_addr64:
732     return LowerTlsAddr(OutStreamer, MCInstLowering, *MI);
733 
734   case X86::MOVPC32r: {
735     // This is a pseudo op for a two instruction sequence with a label, which
736     // looks like:
737     //     call "L1$pb"
738     // "L1$pb":
739     //     popl %esi
740 
741     // Emit the call.
742     MCSymbol *PICBase = MF->getPICBaseSymbol();
743     // FIXME: We would like an efficient form for this, so we don't have to do a
744     // lot of extra uniquing.
745     OutStreamer.EmitInstruction(MCInstBuilder(X86::CALLpcrel32)
746       .addExpr(MCSymbolRefExpr::Create(PICBase, OutContext)));
747 
748     // Emit the label.
749     OutStreamer.EmitLabel(PICBase);
750 
751     // popl $reg
752     OutStreamer.EmitInstruction(MCInstBuilder(X86::POP32r)
753       .addReg(MI->getOperand(0).getReg()));
754     return;
755   }
756 
757   case X86::ADD32ri: {
758     // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
759     if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
760       break;
761 
762     // Okay, we have something like:
763     //  EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
764 
765     // For this, we want to print something like:
766     //   MYGLOBAL + (. - PICBASE)
767     // However, we can't generate a ".", so just emit a new label here and refer
768     // to it.
769     MCSymbol *DotSym = OutContext.CreateTempSymbol();
770     OutStreamer.EmitLabel(DotSym);
771 
772     // Now that we have emitted the label, lower the complex operand expression.
773     MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
774 
775     const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext);
776     const MCExpr *PICBase =
777       MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), OutContext);
778     DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext);
779 
780     DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext),
781                                       DotExpr, OutContext);
782 
783     OutStreamer.EmitInstruction(MCInstBuilder(X86::ADD32ri)
784       .addReg(MI->getOperand(0).getReg())
785       .addReg(MI->getOperand(1).getReg())
786       .addExpr(DotExpr));
787     return;
788   }
789   }
790 
791   MCInst TmpInst;
792   MCInstLowering.Lower(MI, TmpInst);
793   OutStreamer.EmitInstruction(TmpInst);
794 }
795