1 //===------- X86ExpandPseudo.cpp - Expand pseudo instructions -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a pass that expands pseudo instructions into target
11 // instructions to allow proper scheduling, if-conversion, other late
12 // optimizations, or simply the encoding of the instructions.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "X86.h"
17 #include "X86FrameLowering.h"
18 #include "X86InstrBuilder.h"
19 #include "X86InstrInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86Subtarget.h"
22 #include "llvm/Analysis/EHPersonalities.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved.
26 #include "llvm/IR/GlobalValue.h"
27 using namespace llvm;
28
29 #define DEBUG_TYPE "x86-pseudo"
30
31 namespace {
32 class X86ExpandPseudo : public MachineFunctionPass {
33 public:
34 static char ID;
X86ExpandPseudo()35 X86ExpandPseudo() : MachineFunctionPass(ID) {}
36
getAnalysisUsage(AnalysisUsage & AU) const37 void getAnalysisUsage(AnalysisUsage &AU) const override {
38 AU.setPreservesCFG();
39 AU.addPreservedID(MachineLoopInfoID);
40 AU.addPreservedID(MachineDominatorsID);
41 MachineFunctionPass::getAnalysisUsage(AU);
42 }
43
44 const X86Subtarget *STI;
45 const X86InstrInfo *TII;
46 const X86RegisterInfo *TRI;
47 const X86MachineFunctionInfo *X86FI;
48 const X86FrameLowering *X86FL;
49
50 bool runOnMachineFunction(MachineFunction &Fn) override;
51
getRequiredProperties() const52 MachineFunctionProperties getRequiredProperties() const override {
53 return MachineFunctionProperties().set(
54 MachineFunctionProperties::Property::AllVRegsAllocated);
55 }
56
getPassName() const57 const char *getPassName() const override {
58 return "X86 pseudo instruction expansion pass";
59 }
60
61 private:
62 bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
63 bool ExpandMBB(MachineBasicBlock &MBB);
64 };
65 char X86ExpandPseudo::ID = 0;
66 } // End anonymous namespace.
67
68 /// If \p MBBI is a pseudo instruction, this method expands
69 /// it to the corresponding (sequence of) actual instruction(s).
70 /// \returns true if \p MBBI has been expanded.
ExpandMI(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI)71 bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
72 MachineBasicBlock::iterator MBBI) {
73 MachineInstr &MI = *MBBI;
74 unsigned Opcode = MI.getOpcode();
75 DebugLoc DL = MBBI->getDebugLoc();
76 switch (Opcode) {
77 default:
78 return false;
79 case X86::TCRETURNdi:
80 case X86::TCRETURNri:
81 case X86::TCRETURNmi:
82 case X86::TCRETURNdi64:
83 case X86::TCRETURNri64:
84 case X86::TCRETURNmi64: {
85 bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
86 MachineOperand &JumpTarget = MBBI->getOperand(0);
87 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
88 assert(StackAdjust.isImm() && "Expecting immediate value.");
89
90 // Adjust stack pointer.
91 int StackAdj = StackAdjust.getImm();
92 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
93 int Offset = 0;
94 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
95
96 // Incoporate the retaddr area.
97 Offset = StackAdj-MaxTCDelta;
98 assert(Offset >= 0 && "Offset should never be negative");
99
100 if (Offset) {
101 // Check for possible merge with preceding ADD instruction.
102 Offset += X86FL->mergeSPUpdates(MBB, MBBI, true);
103 X86FL->emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
104 }
105
106 // Jump to label or value in register.
107 bool IsWin64 = STI->isTargetWin64();
108 if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {
109 unsigned Op = (Opcode == X86::TCRETURNdi)
110 ? X86::TAILJMPd
111 : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
112 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
113 if (JumpTarget.isGlobal())
114 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
115 JumpTarget.getTargetFlags());
116 else {
117 assert(JumpTarget.isSymbol());
118 MIB.addExternalSymbol(JumpTarget.getSymbolName(),
119 JumpTarget.getTargetFlags());
120 }
121 } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
122 unsigned Op = (Opcode == X86::TCRETURNmi)
123 ? X86::TAILJMPm
124 : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
125 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
126 for (unsigned i = 0; i != 5; ++i)
127 MIB.addOperand(MBBI->getOperand(i));
128 } else if (Opcode == X86::TCRETURNri64) {
129 BuildMI(MBB, MBBI, DL,
130 TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
131 .addReg(JumpTarget.getReg(), RegState::Kill);
132 } else {
133 BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))
134 .addReg(JumpTarget.getReg(), RegState::Kill);
135 }
136
137 MachineInstr &NewMI = *std::prev(MBBI);
138 NewMI.copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
139
140 // Delete the pseudo instruction TCRETURN.
141 MBB.erase(MBBI);
142
143 return true;
144 }
145 case X86::EH_RETURN:
146 case X86::EH_RETURN64: {
147 MachineOperand &DestAddr = MBBI->getOperand(0);
148 assert(DestAddr.isReg() && "Offset should be in register!");
149 const bool Uses64BitFramePtr =
150 STI->isTarget64BitLP64() || STI->isTargetNaCl64();
151 unsigned StackPtr = TRI->getStackRegister();
152 BuildMI(MBB, MBBI, DL,
153 TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)
154 .addReg(DestAddr.getReg());
155 // The EH_RETURN pseudo is really removed during the MC Lowering.
156 return true;
157 }
158 case X86::IRET: {
159 // Adjust stack to erase error code
160 int64_t StackAdj = MBBI->getOperand(0).getImm();
161 X86FL->emitSPUpdate(MBB, MBBI, StackAdj, true);
162 // Replace pseudo with machine iret
163 BuildMI(MBB, MBBI, DL,
164 TII->get(STI->is64Bit() ? X86::IRET64 : X86::IRET32));
165 MBB.erase(MBBI);
166 return true;
167 }
168 case X86::RET: {
169 // Adjust stack to erase error code
170 int64_t StackAdj = MBBI->getOperand(0).getImm();
171 MachineInstrBuilder MIB;
172 if (StackAdj == 0) {
173 MIB = BuildMI(MBB, MBBI, DL,
174 TII->get(STI->is64Bit() ? X86::RETQ : X86::RETL));
175 } else if (isUInt<16>(StackAdj)) {
176 MIB = BuildMI(MBB, MBBI, DL,
177 TII->get(STI->is64Bit() ? X86::RETIQ : X86::RETIL))
178 .addImm(StackAdj);
179 } else {
180 assert(!STI->is64Bit() &&
181 "shouldn't need to do this for x86_64 targets!");
182 // A ret can only handle immediates as big as 2**16-1. If we need to pop
183 // off bytes before the return address, we must do it manually.
184 BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r)).addReg(X86::ECX, RegState::Define);
185 X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
186 BuildMI(MBB, MBBI, DL, TII->get(X86::PUSH32r)).addReg(X86::ECX);
187 MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RETL));
188 }
189 for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I)
190 MIB.addOperand(MBBI->getOperand(I));
191 MBB.erase(MBBI);
192 return true;
193 }
194 case X86::EH_RESTORE: {
195 // Restore ESP and EBP, and optionally ESI if required.
196 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
197 MBB.getParent()->getFunction()->getPersonalityFn()));
198 X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
199 MBBI->eraseFromParent();
200 return true;
201 }
202 case X86::LCMPXCHG8B_SAVE_EBX:
203 case X86::LCMPXCHG16B_SAVE_RBX: {
204 // Perform the following transformation.
205 // SaveRbx = pseudocmpxchg Addr, <4 opds for the address>, InArg, SaveRbx
206 // =>
207 // [E|R]BX = InArg
208 // actualcmpxchg Addr
209 // [E|R]BX = SaveRbx
210 const MachineOperand &InArg = MBBI->getOperand(6);
211 unsigned SaveRbx = MBBI->getOperand(7).getReg();
212
213 unsigned ActualInArg =
214 Opcode == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
215 // Copy the input argument of the pseudo into the argument of the
216 // actual instruction.
217 TII->copyPhysReg(MBB, MBBI, DL, ActualInArg, InArg.getReg(),
218 InArg.isKill());
219 // Create the actual instruction.
220 unsigned ActualOpc =
221 Opcode == X86::LCMPXCHG8B_SAVE_EBX ? X86::LCMPXCHG8B : X86::LCMPXCHG16B;
222 MachineInstr *NewInstr = BuildMI(MBB, MBBI, DL, TII->get(ActualOpc));
223 // Copy the operands related to the address.
224 for (unsigned Idx = 1; Idx < 6; ++Idx)
225 NewInstr->addOperand(MBBI->getOperand(Idx));
226 // Finally, restore the value of RBX.
227 TII->copyPhysReg(MBB, MBBI, DL, ActualInArg, SaveRbx,
228 /*SrcIsKill*/ true);
229
230 // Delete the pseudo.
231 MBBI->eraseFromParent();
232 return true;
233 }
234 }
235 llvm_unreachable("Previous switch has a fallthrough?");
236 }
237
238 /// Expand all pseudo instructions contained in \p MBB.
239 /// \returns true if any expansion occurred for \p MBB.
ExpandMBB(MachineBasicBlock & MBB)240 bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
241 bool Modified = false;
242
243 // MBBI may be invalidated by the expansion.
244 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
245 while (MBBI != E) {
246 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
247 Modified |= ExpandMI(MBB, MBBI);
248 MBBI = NMBBI;
249 }
250
251 return Modified;
252 }
253
runOnMachineFunction(MachineFunction & MF)254 bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
255 STI = &static_cast<const X86Subtarget &>(MF.getSubtarget());
256 TII = STI->getInstrInfo();
257 TRI = STI->getRegisterInfo();
258 X86FI = MF.getInfo<X86MachineFunctionInfo>();
259 X86FL = STI->getFrameLowering();
260
261 bool Modified = false;
262 for (MachineBasicBlock &MBB : MF)
263 Modified |= ExpandMBB(MBB);
264 return Modified;
265 }
266
267 /// Returns an instance of the pseudo instruction expansion pass.
createX86ExpandPseudoPass()268 FunctionPass *llvm::createX86ExpandPseudoPass() {
269 return new X86ExpandPseudo();
270 }
271