1 //===-- X86FloatingPoint.cpp - Floating point Reg -> Stack converter ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the pass which converts floating point instructions from
11 // pseudo registers into register stack instructions. This pass uses live
12 // variable information to indicate where the FPn registers are used and their
13 // lifetimes.
14 //
15 // The x87 hardware tracks liveness of the stack registers, so it is necessary
16 // to implement exact liveness tracking between basic blocks. The CFG edges are
17 // partitioned into bundles where the same FP registers must be live in
18 // identical stack positions. Instructions are inserted at the end of each basic
19 // block to rearrange the live registers to match the outgoing bundle.
20 //
21 // This approach avoids splitting critical edges at the potential cost of more
22 // live register shuffling instructions when critical edges are present.
23 //
24 //===----------------------------------------------------------------------===//
25
26 #define DEBUG_TYPE "x86-codegen"
27 #include "X86.h"
28 #include "X86InstrInfo.h"
29 #include "llvm/ADT/DepthFirstIterator.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
35 #include "llvm/CodeGen/EdgeBundles.h"
36 #include "llvm/CodeGen/MachineFunctionPass.h"
37 #include "llvm/CodeGen/MachineInstrBuilder.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/Passes.h"
40 #include "llvm/InlineAsm.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include <algorithm>
47 using namespace llvm;
48
49 STATISTIC(NumFXCH, "Number of fxch instructions inserted");
50 STATISTIC(NumFP , "Number of floating point instructions");
51
52 namespace {
53 struct FPS : public MachineFunctionPass {
54 static char ID;
FPS__anon5f948e960111::FPS55 FPS() : MachineFunctionPass(ID) {
56 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
57 // This is really only to keep valgrind quiet.
58 // The logic in isLive() is too much for it.
59 memset(Stack, 0, sizeof(Stack));
60 memset(RegMap, 0, sizeof(RegMap));
61 }
62
getAnalysisUsage__anon5f948e960111::FPS63 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.setPreservesCFG();
65 AU.addRequired<EdgeBundles>();
66 AU.addPreservedID(MachineLoopInfoID);
67 AU.addPreservedID(MachineDominatorsID);
68 MachineFunctionPass::getAnalysisUsage(AU);
69 }
70
71 virtual bool runOnMachineFunction(MachineFunction &MF);
72
getPassName__anon5f948e960111::FPS73 virtual const char *getPassName() const { return "X86 FP Stackifier"; }
74
75 private:
76 const TargetInstrInfo *TII; // Machine instruction info.
77
78 // Two CFG edges are related if they leave the same block, or enter the same
79 // block. The transitive closure of an edge under this relation is a
80 // LiveBundle. It represents a set of CFG edges where the live FP stack
81 // registers must be allocated identically in the x87 stack.
82 //
83 // A LiveBundle is usually all the edges leaving a block, or all the edges
84 // entering a block, but it can contain more edges if critical edges are
85 // present.
86 //
87 // The set of live FP registers in a LiveBundle is calculated by bundleCFG,
88 // but the exact mapping of FP registers to stack slots is fixed later.
89 struct LiveBundle {
90 // Bit mask of live FP registers. Bit 0 = FP0, bit 1 = FP1, &c.
91 unsigned Mask;
92
93 // Number of pre-assigned live registers in FixStack. This is 0 when the
94 // stack order has not yet been fixed.
95 unsigned FixCount;
96
97 // Assigned stack order for live-in registers.
98 // FixStack[i] == getStackEntry(i) for all i < FixCount.
99 unsigned char FixStack[8];
100
LiveBundle__anon5f948e960111::FPS::LiveBundle101 LiveBundle() : Mask(0), FixCount(0) {}
102
103 // Have the live registers been assigned a stack order yet?
isFixed__anon5f948e960111::FPS::LiveBundle104 bool isFixed() const { return !Mask || FixCount; }
105 };
106
107 // Numbered LiveBundle structs. LiveBundles[0] is used for all CFG edges
108 // with no live FP registers.
109 SmallVector<LiveBundle, 8> LiveBundles;
110
111 // The edge bundle analysis provides indices into the LiveBundles vector.
112 EdgeBundles *Bundles;
113
114 // Return a bitmask of FP registers in block's live-in list.
calcLiveInMask__anon5f948e960111::FPS115 unsigned calcLiveInMask(MachineBasicBlock *MBB) {
116 unsigned Mask = 0;
117 for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
118 E = MBB->livein_end(); I != E; ++I) {
119 unsigned Reg = *I - X86::FP0;
120 if (Reg < 8)
121 Mask |= 1 << Reg;
122 }
123 return Mask;
124 }
125
126 // Partition all the CFG edges into LiveBundles.
127 void bundleCFG(MachineFunction &MF);
128
129 MachineBasicBlock *MBB; // Current basic block
130
131 // The hardware keeps track of how many FP registers are live, so we have
132 // to model that exactly. Usually, each live register corresponds to an
133 // FP<n> register, but when dealing with calls, returns, and inline
134 // assembly, it is sometimes neccesary to have live scratch registers.
135 unsigned Stack[8]; // FP<n> Registers in each stack slot...
136 unsigned StackTop; // The current top of the FP stack.
137
138 enum {
139 NumFPRegs = 16 // Including scratch pseudo-registers.
140 };
141
142 // For each live FP<n> register, point to its Stack[] entry.
143 // The first entries correspond to FP0-FP6, the rest are scratch registers
144 // used when we need slightly different live registers than what the
145 // register allocator thinks.
146 unsigned RegMap[NumFPRegs];
147
148 // Pending fixed registers - Inline assembly needs FP registers to appear
149 // in fixed stack slot positions. This is handled by copying FP registers
150 // to ST registers before the instruction, and copying back after the
151 // instruction.
152 //
153 // This is modeled with pending ST registers. NumPendingSTs is the number
154 // of ST registers (ST0-STn) we are tracking. PendingST[n] points to an FP
155 // register that holds the ST value. The ST registers are not moved into
156 // place until immediately before the instruction that needs them.
157 //
158 // It can happen that we need an ST register to be live when no FP register
159 // holds the value:
160 //
161 // %ST0 = COPY %FP4<kill>
162 //
163 // When that happens, we allocate a scratch FP register to hold the ST
164 // value. That means every register in PendingST must be live.
165
166 unsigned NumPendingSTs;
167 unsigned char PendingST[8];
168
169 // Set up our stack model to match the incoming registers to MBB.
170 void setupBlockStack();
171
172 // Shuffle live registers to match the expectations of successor blocks.
173 void finishBlockStack();
174
dumpStack__anon5f948e960111::FPS175 void dumpStack() const {
176 dbgs() << "Stack contents:";
177 for (unsigned i = 0; i != StackTop; ++i) {
178 dbgs() << " FP" << Stack[i];
179 assert(RegMap[Stack[i]] == i && "Stack[] doesn't match RegMap[]!");
180 }
181 for (unsigned i = 0; i != NumPendingSTs; ++i)
182 dbgs() << ", ST" << i << " in FP" << unsigned(PendingST[i]);
183 dbgs() << "\n";
184 }
185
186 /// getSlot - Return the stack slot number a particular register number is
187 /// in.
getSlot__anon5f948e960111::FPS188 unsigned getSlot(unsigned RegNo) const {
189 assert(RegNo < NumFPRegs && "Regno out of range!");
190 return RegMap[RegNo];
191 }
192
193 /// isLive - Is RegNo currently live in the stack?
isLive__anon5f948e960111::FPS194 bool isLive(unsigned RegNo) const {
195 unsigned Slot = getSlot(RegNo);
196 return Slot < StackTop && Stack[Slot] == RegNo;
197 }
198
199 /// getScratchReg - Return an FP register that is not currently in use.
getScratchReg__anon5f948e960111::FPS200 unsigned getScratchReg() {
201 for (int i = NumFPRegs - 1; i >= 8; --i)
202 if (!isLive(i))
203 return i;
204 llvm_unreachable("Ran out of scratch FP registers");
205 }
206
207 /// isScratchReg - Returns trus if RegNo is a scratch FP register.
isScratchReg__anon5f948e960111::FPS208 bool isScratchReg(unsigned RegNo) {
209 return RegNo > 8 && RegNo < NumFPRegs;
210 }
211
212 /// getStackEntry - Return the X86::FP<n> register in register ST(i).
getStackEntry__anon5f948e960111::FPS213 unsigned getStackEntry(unsigned STi) const {
214 if (STi >= StackTop)
215 report_fatal_error("Access past stack top!");
216 return Stack[StackTop-1-STi];
217 }
218
219 /// getSTReg - Return the X86::ST(i) register which contains the specified
220 /// FP<RegNo> register.
getSTReg__anon5f948e960111::FPS221 unsigned getSTReg(unsigned RegNo) const {
222 return StackTop - 1 - getSlot(RegNo) + llvm::X86::ST0;
223 }
224
225 // pushReg - Push the specified FP<n> register onto the stack.
pushReg__anon5f948e960111::FPS226 void pushReg(unsigned Reg) {
227 assert(Reg < NumFPRegs && "Register number out of range!");
228 if (StackTop >= 8)
229 report_fatal_error("Stack overflow!");
230 Stack[StackTop] = Reg;
231 RegMap[Reg] = StackTop++;
232 }
233
isAtTop__anon5f948e960111::FPS234 bool isAtTop(unsigned RegNo) const { return getSlot(RegNo) == StackTop-1; }
moveToTop__anon5f948e960111::FPS235 void moveToTop(unsigned RegNo, MachineBasicBlock::iterator I) {
236 DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
237 if (isAtTop(RegNo)) return;
238
239 unsigned STReg = getSTReg(RegNo);
240 unsigned RegOnTop = getStackEntry(0);
241
242 // Swap the slots the regs are in.
243 std::swap(RegMap[RegNo], RegMap[RegOnTop]);
244
245 // Swap stack slot contents.
246 if (RegMap[RegOnTop] >= StackTop)
247 report_fatal_error("Access past stack top!");
248 std::swap(Stack[RegMap[RegOnTop]], Stack[StackTop-1]);
249
250 // Emit an fxch to update the runtime processors version of the state.
251 BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(STReg);
252 ++NumFXCH;
253 }
254
duplicateToTop__anon5f948e960111::FPS255 void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
256 DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
257 unsigned STReg = getSTReg(RegNo);
258 pushReg(AsReg); // New register on top of stack
259
260 BuildMI(*MBB, I, dl, TII->get(X86::LD_Frr)).addReg(STReg);
261 }
262
263 /// duplicatePendingSTBeforeKill - The instruction at I is about to kill
264 /// RegNo. If any PendingST registers still need the RegNo value, duplicate
265 /// them to new scratch registers.
duplicatePendingSTBeforeKill__anon5f948e960111::FPS266 void duplicatePendingSTBeforeKill(unsigned RegNo, MachineInstr *I) {
267 for (unsigned i = 0; i != NumPendingSTs; ++i) {
268 if (PendingST[i] != RegNo)
269 continue;
270 unsigned SR = getScratchReg();
271 DEBUG(dbgs() << "Duplicating pending ST" << i
272 << " in FP" << RegNo << " to FP" << SR << '\n');
273 duplicateToTop(RegNo, SR, I);
274 PendingST[i] = SR;
275 }
276 }
277
278 /// popStackAfter - Pop the current value off of the top of the FP stack
279 /// after the specified instruction.
280 void popStackAfter(MachineBasicBlock::iterator &I);
281
282 /// freeStackSlotAfter - Free the specified register from the register
283 /// stack, so that it is no longer in a register. If the register is
284 /// currently at the top of the stack, we just pop the current instruction,
285 /// otherwise we store the current top-of-stack into the specified slot,
286 /// then pop the top of stack.
287 void freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned Reg);
288
289 /// freeStackSlotBefore - Just the pop, no folding. Return the inserted
290 /// instruction.
291 MachineBasicBlock::iterator
292 freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo);
293
294 /// Adjust the live registers to be the set in Mask.
295 void adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I);
296
297 /// Shuffle the top FixCount stack entries such that FP reg FixStack[0] is
298 /// st(0), FP reg FixStack[1] is st(1) etc.
299 void shuffleStackTop(const unsigned char *FixStack, unsigned FixCount,
300 MachineBasicBlock::iterator I);
301
302 bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
303
304 void handleZeroArgFP(MachineBasicBlock::iterator &I);
305 void handleOneArgFP(MachineBasicBlock::iterator &I);
306 void handleOneArgFPRW(MachineBasicBlock::iterator &I);
307 void handleTwoArgFP(MachineBasicBlock::iterator &I);
308 void handleCompareFP(MachineBasicBlock::iterator &I);
309 void handleCondMovFP(MachineBasicBlock::iterator &I);
310 void handleSpecialFP(MachineBasicBlock::iterator &I);
311
312 // Check if a COPY instruction is using FP registers.
isFPCopy__anon5f948e960111::FPS313 bool isFPCopy(MachineInstr *MI) {
314 unsigned DstReg = MI->getOperand(0).getReg();
315 unsigned SrcReg = MI->getOperand(1).getReg();
316
317 return X86::RFP80RegClass.contains(DstReg) ||
318 X86::RFP80RegClass.contains(SrcReg);
319 }
320 };
321 char FPS::ID = 0;
322 }
323
createX86FloatingPointStackifierPass()324 FunctionPass *llvm::createX86FloatingPointStackifierPass() { return new FPS(); }
325
326 /// getFPReg - Return the X86::FPx register number for the specified operand.
327 /// For example, this returns 3 for X86::FP3.
getFPReg(const MachineOperand & MO)328 static unsigned getFPReg(const MachineOperand &MO) {
329 assert(MO.isReg() && "Expected an FP register!");
330 unsigned Reg = MO.getReg();
331 assert(Reg >= X86::FP0 && Reg <= X86::FP6 && "Expected FP register!");
332 return Reg - X86::FP0;
333 }
334
335 /// runOnMachineFunction - Loop over all of the basic blocks, transforming FP
336 /// register references into FP stack references.
337 ///
runOnMachineFunction(MachineFunction & MF)338 bool FPS::runOnMachineFunction(MachineFunction &MF) {
339 // We only need to run this pass if there are any FP registers used in this
340 // function. If it is all integer, there is nothing for us to do!
341 bool FPIsUsed = false;
342
343 assert(X86::FP6 == X86::FP0+6 && "Register enums aren't sorted right!");
344 for (unsigned i = 0; i <= 6; ++i)
345 if (MF.getRegInfo().isPhysRegUsed(X86::FP0+i)) {
346 FPIsUsed = true;
347 break;
348 }
349
350 // Early exit.
351 if (!FPIsUsed) return false;
352
353 Bundles = &getAnalysis<EdgeBundles>();
354 TII = MF.getTarget().getInstrInfo();
355
356 // Prepare cross-MBB liveness.
357 bundleCFG(MF);
358
359 StackTop = 0;
360
361 // Process the function in depth first order so that we process at least one
362 // of the predecessors for every reachable block in the function.
363 SmallPtrSet<MachineBasicBlock*, 8> Processed;
364 MachineBasicBlock *Entry = MF.begin();
365
366 bool Changed = false;
367 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*, 8> >
368 I = df_ext_begin(Entry, Processed), E = df_ext_end(Entry, Processed);
369 I != E; ++I)
370 Changed |= processBasicBlock(MF, **I);
371
372 // Process any unreachable blocks in arbitrary order now.
373 if (MF.size() != Processed.size())
374 for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
375 if (Processed.insert(BB))
376 Changed |= processBasicBlock(MF, *BB);
377
378 LiveBundles.clear();
379
380 return Changed;
381 }
382
383 /// bundleCFG - Scan all the basic blocks to determine consistent live-in and
384 /// live-out sets for the FP registers. Consistent means that the set of
385 /// registers live-out from a block is identical to the live-in set of all
386 /// successors. This is not enforced by the normal live-in lists since
387 /// registers may be implicitly defined, or not used by all successors.
bundleCFG(MachineFunction & MF)388 void FPS::bundleCFG(MachineFunction &MF) {
389 assert(LiveBundles.empty() && "Stale data in LiveBundles");
390 LiveBundles.resize(Bundles->getNumBundles());
391
392 // Gather the actual live-in masks for all MBBs.
393 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
394 MachineBasicBlock *MBB = I;
395 const unsigned Mask = calcLiveInMask(MBB);
396 if (!Mask)
397 continue;
398 // Update MBB ingoing bundle mask.
399 LiveBundles[Bundles->getBundle(MBB->getNumber(), false)].Mask |= Mask;
400 }
401 }
402
403 /// processBasicBlock - Loop over all of the instructions in the basic block,
404 /// transforming FP instructions into their stack form.
405 ///
processBasicBlock(MachineFunction & MF,MachineBasicBlock & BB)406 bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
407 bool Changed = false;
408 MBB = &BB;
409 NumPendingSTs = 0;
410
411 setupBlockStack();
412
413 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
414 MachineInstr *MI = I;
415 uint64_t Flags = MI->getDesc().TSFlags;
416
417 unsigned FPInstClass = Flags & X86II::FPTypeMask;
418 if (MI->isInlineAsm())
419 FPInstClass = X86II::SpecialFP;
420
421 if (MI->isCopy() && isFPCopy(MI))
422 FPInstClass = X86II::SpecialFP;
423
424 if (MI->isImplicitDef() &&
425 X86::RFP80RegClass.contains(MI->getOperand(0).getReg()))
426 FPInstClass = X86II::SpecialFP;
427
428 if (FPInstClass == X86II::NotFP)
429 continue; // Efficiently ignore non-fp insts!
430
431 MachineInstr *PrevMI = 0;
432 if (I != BB.begin())
433 PrevMI = prior(I);
434
435 ++NumFP; // Keep track of # of pseudo instrs
436 DEBUG(dbgs() << "\nFPInst:\t" << *MI);
437
438 // Get dead variables list now because the MI pointer may be deleted as part
439 // of processing!
440 SmallVector<unsigned, 8> DeadRegs;
441 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
442 const MachineOperand &MO = MI->getOperand(i);
443 if (MO.isReg() && MO.isDead())
444 DeadRegs.push_back(MO.getReg());
445 }
446
447 switch (FPInstClass) {
448 case X86II::ZeroArgFP: handleZeroArgFP(I); break;
449 case X86II::OneArgFP: handleOneArgFP(I); break; // fstp ST(0)
450 case X86II::OneArgFPRW: handleOneArgFPRW(I); break; // ST(0) = fsqrt(ST(0))
451 case X86II::TwoArgFP: handleTwoArgFP(I); break;
452 case X86II::CompareFP: handleCompareFP(I); break;
453 case X86II::CondMovFP: handleCondMovFP(I); break;
454 case X86II::SpecialFP: handleSpecialFP(I); break;
455 default: llvm_unreachable("Unknown FP Type!");
456 }
457
458 // Check to see if any of the values defined by this instruction are dead
459 // after definition. If so, pop them.
460 for (unsigned i = 0, e = DeadRegs.size(); i != e; ++i) {
461 unsigned Reg = DeadRegs[i];
462 if (Reg >= X86::FP0 && Reg <= X86::FP6) {
463 DEBUG(dbgs() << "Register FP#" << Reg-X86::FP0 << " is dead!\n");
464 freeStackSlotAfter(I, Reg-X86::FP0);
465 }
466 }
467
468 // Print out all of the instructions expanded to if -debug
469 DEBUG(
470 MachineBasicBlock::iterator PrevI(PrevMI);
471 if (I == PrevI) {
472 dbgs() << "Just deleted pseudo instruction\n";
473 } else {
474 MachineBasicBlock::iterator Start = I;
475 // Rewind to first instruction newly inserted.
476 while (Start != BB.begin() && prior(Start) != PrevI) --Start;
477 dbgs() << "Inserted instructions:\n\t";
478 Start->print(dbgs(), &MF.getTarget());
479 while (++Start != llvm::next(I)) {}
480 }
481 dumpStack();
482 );
483 (void)PrevMI;
484
485 Changed = true;
486 }
487
488 finishBlockStack();
489
490 return Changed;
491 }
492
493 /// setupBlockStack - Use the live bundles to set up our model of the stack
494 /// to match predecessors' live out stack.
setupBlockStack()495 void FPS::setupBlockStack() {
496 DEBUG(dbgs() << "\nSetting up live-ins for BB#" << MBB->getNumber()
497 << " derived from " << MBB->getName() << ".\n");
498 StackTop = 0;
499 // Get the live-in bundle for MBB.
500 const LiveBundle &Bundle =
501 LiveBundles[Bundles->getBundle(MBB->getNumber(), false)];
502
503 if (!Bundle.Mask) {
504 DEBUG(dbgs() << "Block has no FP live-ins.\n");
505 return;
506 }
507
508 // Depth-first iteration should ensure that we always have an assigned stack.
509 assert(Bundle.isFixed() && "Reached block before any predecessors");
510
511 // Push the fixed live-in registers.
512 for (unsigned i = Bundle.FixCount; i > 0; --i) {
513 MBB->addLiveIn(X86::ST0+i-1);
514 DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %FP"
515 << unsigned(Bundle.FixStack[i-1]) << '\n');
516 pushReg(Bundle.FixStack[i-1]);
517 }
518
519 // Kill off unwanted live-ins. This can happen with a critical edge.
520 // FIXME: We could keep these live registers around as zombies. They may need
521 // to be revived at the end of a short block. It might save a few instrs.
522 adjustLiveRegs(calcLiveInMask(MBB), MBB->begin());
523 DEBUG(MBB->dump());
524 }
525
526 /// finishBlockStack - Revive live-outs that are implicitly defined out of
527 /// MBB. Shuffle live registers to match the expected fixed stack of any
528 /// predecessors, and ensure that all predecessors are expecting the same
529 /// stack.
finishBlockStack()530 void FPS::finishBlockStack() {
531 // The RET handling below takes care of return blocks for us.
532 if (MBB->succ_empty())
533 return;
534
535 DEBUG(dbgs() << "Setting up live-outs for BB#" << MBB->getNumber()
536 << " derived from " << MBB->getName() << ".\n");
537
538 // Get MBB's live-out bundle.
539 unsigned BundleIdx = Bundles->getBundle(MBB->getNumber(), true);
540 LiveBundle &Bundle = LiveBundles[BundleIdx];
541
542 // We may need to kill and define some registers to match successors.
543 // FIXME: This can probably be combined with the shuffle below.
544 MachineBasicBlock::iterator Term = MBB->getFirstTerminator();
545 adjustLiveRegs(Bundle.Mask, Term);
546
547 if (!Bundle.Mask) {
548 DEBUG(dbgs() << "No live-outs.\n");
549 return;
550 }
551
552 // Has the stack order been fixed yet?
553 DEBUG(dbgs() << "LB#" << BundleIdx << ": ");
554 if (Bundle.isFixed()) {
555 DEBUG(dbgs() << "Shuffling stack to match.\n");
556 shuffleStackTop(Bundle.FixStack, Bundle.FixCount, Term);
557 } else {
558 // Not fixed yet, we get to choose.
559 DEBUG(dbgs() << "Fixing stack order now.\n");
560 Bundle.FixCount = StackTop;
561 for (unsigned i = 0; i < StackTop; ++i)
562 Bundle.FixStack[i] = getStackEntry(i);
563 }
564 }
565
566
567 //===----------------------------------------------------------------------===//
568 // Efficient Lookup Table Support
569 //===----------------------------------------------------------------------===//
570
571 namespace {
572 struct TableEntry {
573 unsigned from;
574 unsigned to;
operator <__anon5f948e960311::TableEntry575 bool operator<(const TableEntry &TE) const { return from < TE.from; }
operator <(const TableEntry & TE,unsigned V)576 friend bool operator<(const TableEntry &TE, unsigned V) {
577 return TE.from < V;
578 }
operator <(unsigned V,const TableEntry & TE)579 friend bool LLVM_ATTRIBUTE_USED operator<(unsigned V,
580 const TableEntry &TE) {
581 return V < TE.from;
582 }
583 };
584 }
585
586 #ifndef NDEBUG
TableIsSorted(const TableEntry * Table,unsigned NumEntries)587 static bool TableIsSorted(const TableEntry *Table, unsigned NumEntries) {
588 for (unsigned i = 0; i != NumEntries-1; ++i)
589 if (!(Table[i] < Table[i+1])) return false;
590 return true;
591 }
592 #endif
593
Lookup(const TableEntry * Table,unsigned N,unsigned Opcode)594 static int Lookup(const TableEntry *Table, unsigned N, unsigned Opcode) {
595 const TableEntry *I = std::lower_bound(Table, Table+N, Opcode);
596 if (I != Table+N && I->from == Opcode)
597 return I->to;
598 return -1;
599 }
600
601 #ifdef NDEBUG
602 #define ASSERT_SORTED(TABLE)
603 #else
604 #define ASSERT_SORTED(TABLE) \
605 { static bool TABLE##Checked = false; \
606 if (!TABLE##Checked) { \
607 assert(TableIsSorted(TABLE, array_lengthof(TABLE)) && \
608 "All lookup tables must be sorted for efficient access!"); \
609 TABLE##Checked = true; \
610 } \
611 }
612 #endif
613
614 //===----------------------------------------------------------------------===//
615 // Register File -> Register Stack Mapping Methods
616 //===----------------------------------------------------------------------===//
617
618 // OpcodeTable - Sorted map of register instructions to their stack version.
619 // The first element is an register file pseudo instruction, the second is the
620 // concrete X86 instruction which uses the register stack.
621 //
622 static const TableEntry OpcodeTable[] = {
623 { X86::ABS_Fp32 , X86::ABS_F },
624 { X86::ABS_Fp64 , X86::ABS_F },
625 { X86::ABS_Fp80 , X86::ABS_F },
626 { X86::ADD_Fp32m , X86::ADD_F32m },
627 { X86::ADD_Fp64m , X86::ADD_F64m },
628 { X86::ADD_Fp64m32 , X86::ADD_F32m },
629 { X86::ADD_Fp80m32 , X86::ADD_F32m },
630 { X86::ADD_Fp80m64 , X86::ADD_F64m },
631 { X86::ADD_FpI16m32 , X86::ADD_FI16m },
632 { X86::ADD_FpI16m64 , X86::ADD_FI16m },
633 { X86::ADD_FpI16m80 , X86::ADD_FI16m },
634 { X86::ADD_FpI32m32 , X86::ADD_FI32m },
635 { X86::ADD_FpI32m64 , X86::ADD_FI32m },
636 { X86::ADD_FpI32m80 , X86::ADD_FI32m },
637 { X86::CHS_Fp32 , X86::CHS_F },
638 { X86::CHS_Fp64 , X86::CHS_F },
639 { X86::CHS_Fp80 , X86::CHS_F },
640 { X86::CMOVBE_Fp32 , X86::CMOVBE_F },
641 { X86::CMOVBE_Fp64 , X86::CMOVBE_F },
642 { X86::CMOVBE_Fp80 , X86::CMOVBE_F },
643 { X86::CMOVB_Fp32 , X86::CMOVB_F },
644 { X86::CMOVB_Fp64 , X86::CMOVB_F },
645 { X86::CMOVB_Fp80 , X86::CMOVB_F },
646 { X86::CMOVE_Fp32 , X86::CMOVE_F },
647 { X86::CMOVE_Fp64 , X86::CMOVE_F },
648 { X86::CMOVE_Fp80 , X86::CMOVE_F },
649 { X86::CMOVNBE_Fp32 , X86::CMOVNBE_F },
650 { X86::CMOVNBE_Fp64 , X86::CMOVNBE_F },
651 { X86::CMOVNBE_Fp80 , X86::CMOVNBE_F },
652 { X86::CMOVNB_Fp32 , X86::CMOVNB_F },
653 { X86::CMOVNB_Fp64 , X86::CMOVNB_F },
654 { X86::CMOVNB_Fp80 , X86::CMOVNB_F },
655 { X86::CMOVNE_Fp32 , X86::CMOVNE_F },
656 { X86::CMOVNE_Fp64 , X86::CMOVNE_F },
657 { X86::CMOVNE_Fp80 , X86::CMOVNE_F },
658 { X86::CMOVNP_Fp32 , X86::CMOVNP_F },
659 { X86::CMOVNP_Fp64 , X86::CMOVNP_F },
660 { X86::CMOVNP_Fp80 , X86::CMOVNP_F },
661 { X86::CMOVP_Fp32 , X86::CMOVP_F },
662 { X86::CMOVP_Fp64 , X86::CMOVP_F },
663 { X86::CMOVP_Fp80 , X86::CMOVP_F },
664 { X86::COS_Fp32 , X86::COS_F },
665 { X86::COS_Fp64 , X86::COS_F },
666 { X86::COS_Fp80 , X86::COS_F },
667 { X86::DIVR_Fp32m , X86::DIVR_F32m },
668 { X86::DIVR_Fp64m , X86::DIVR_F64m },
669 { X86::DIVR_Fp64m32 , X86::DIVR_F32m },
670 { X86::DIVR_Fp80m32 , X86::DIVR_F32m },
671 { X86::DIVR_Fp80m64 , X86::DIVR_F64m },
672 { X86::DIVR_FpI16m32, X86::DIVR_FI16m},
673 { X86::DIVR_FpI16m64, X86::DIVR_FI16m},
674 { X86::DIVR_FpI16m80, X86::DIVR_FI16m},
675 { X86::DIVR_FpI32m32, X86::DIVR_FI32m},
676 { X86::DIVR_FpI32m64, X86::DIVR_FI32m},
677 { X86::DIVR_FpI32m80, X86::DIVR_FI32m},
678 { X86::DIV_Fp32m , X86::DIV_F32m },
679 { X86::DIV_Fp64m , X86::DIV_F64m },
680 { X86::DIV_Fp64m32 , X86::DIV_F32m },
681 { X86::DIV_Fp80m32 , X86::DIV_F32m },
682 { X86::DIV_Fp80m64 , X86::DIV_F64m },
683 { X86::DIV_FpI16m32 , X86::DIV_FI16m },
684 { X86::DIV_FpI16m64 , X86::DIV_FI16m },
685 { X86::DIV_FpI16m80 , X86::DIV_FI16m },
686 { X86::DIV_FpI32m32 , X86::DIV_FI32m },
687 { X86::DIV_FpI32m64 , X86::DIV_FI32m },
688 { X86::DIV_FpI32m80 , X86::DIV_FI32m },
689 { X86::ILD_Fp16m32 , X86::ILD_F16m },
690 { X86::ILD_Fp16m64 , X86::ILD_F16m },
691 { X86::ILD_Fp16m80 , X86::ILD_F16m },
692 { X86::ILD_Fp32m32 , X86::ILD_F32m },
693 { X86::ILD_Fp32m64 , X86::ILD_F32m },
694 { X86::ILD_Fp32m80 , X86::ILD_F32m },
695 { X86::ILD_Fp64m32 , X86::ILD_F64m },
696 { X86::ILD_Fp64m64 , X86::ILD_F64m },
697 { X86::ILD_Fp64m80 , X86::ILD_F64m },
698 { X86::ISTT_Fp16m32 , X86::ISTT_FP16m},
699 { X86::ISTT_Fp16m64 , X86::ISTT_FP16m},
700 { X86::ISTT_Fp16m80 , X86::ISTT_FP16m},
701 { X86::ISTT_Fp32m32 , X86::ISTT_FP32m},
702 { X86::ISTT_Fp32m64 , X86::ISTT_FP32m},
703 { X86::ISTT_Fp32m80 , X86::ISTT_FP32m},
704 { X86::ISTT_Fp64m32 , X86::ISTT_FP64m},
705 { X86::ISTT_Fp64m64 , X86::ISTT_FP64m},
706 { X86::ISTT_Fp64m80 , X86::ISTT_FP64m},
707 { X86::IST_Fp16m32 , X86::IST_F16m },
708 { X86::IST_Fp16m64 , X86::IST_F16m },
709 { X86::IST_Fp16m80 , X86::IST_F16m },
710 { X86::IST_Fp32m32 , X86::IST_F32m },
711 { X86::IST_Fp32m64 , X86::IST_F32m },
712 { X86::IST_Fp32m80 , X86::IST_F32m },
713 { X86::IST_Fp64m32 , X86::IST_FP64m },
714 { X86::IST_Fp64m64 , X86::IST_FP64m },
715 { X86::IST_Fp64m80 , X86::IST_FP64m },
716 { X86::LD_Fp032 , X86::LD_F0 },
717 { X86::LD_Fp064 , X86::LD_F0 },
718 { X86::LD_Fp080 , X86::LD_F0 },
719 { X86::LD_Fp132 , X86::LD_F1 },
720 { X86::LD_Fp164 , X86::LD_F1 },
721 { X86::LD_Fp180 , X86::LD_F1 },
722 { X86::LD_Fp32m , X86::LD_F32m },
723 { X86::LD_Fp32m64 , X86::LD_F32m },
724 { X86::LD_Fp32m80 , X86::LD_F32m },
725 { X86::LD_Fp64m , X86::LD_F64m },
726 { X86::LD_Fp64m80 , X86::LD_F64m },
727 { X86::LD_Fp80m , X86::LD_F80m },
728 { X86::MUL_Fp32m , X86::MUL_F32m },
729 { X86::MUL_Fp64m , X86::MUL_F64m },
730 { X86::MUL_Fp64m32 , X86::MUL_F32m },
731 { X86::MUL_Fp80m32 , X86::MUL_F32m },
732 { X86::MUL_Fp80m64 , X86::MUL_F64m },
733 { X86::MUL_FpI16m32 , X86::MUL_FI16m },
734 { X86::MUL_FpI16m64 , X86::MUL_FI16m },
735 { X86::MUL_FpI16m80 , X86::MUL_FI16m },
736 { X86::MUL_FpI32m32 , X86::MUL_FI32m },
737 { X86::MUL_FpI32m64 , X86::MUL_FI32m },
738 { X86::MUL_FpI32m80 , X86::MUL_FI32m },
739 { X86::SIN_Fp32 , X86::SIN_F },
740 { X86::SIN_Fp64 , X86::SIN_F },
741 { X86::SIN_Fp80 , X86::SIN_F },
742 { X86::SQRT_Fp32 , X86::SQRT_F },
743 { X86::SQRT_Fp64 , X86::SQRT_F },
744 { X86::SQRT_Fp80 , X86::SQRT_F },
745 { X86::ST_Fp32m , X86::ST_F32m },
746 { X86::ST_Fp64m , X86::ST_F64m },
747 { X86::ST_Fp64m32 , X86::ST_F32m },
748 { X86::ST_Fp80m32 , X86::ST_F32m },
749 { X86::ST_Fp80m64 , X86::ST_F64m },
750 { X86::ST_FpP80m , X86::ST_FP80m },
751 { X86::SUBR_Fp32m , X86::SUBR_F32m },
752 { X86::SUBR_Fp64m , X86::SUBR_F64m },
753 { X86::SUBR_Fp64m32 , X86::SUBR_F32m },
754 { X86::SUBR_Fp80m32 , X86::SUBR_F32m },
755 { X86::SUBR_Fp80m64 , X86::SUBR_F64m },
756 { X86::SUBR_FpI16m32, X86::SUBR_FI16m},
757 { X86::SUBR_FpI16m64, X86::SUBR_FI16m},
758 { X86::SUBR_FpI16m80, X86::SUBR_FI16m},
759 { X86::SUBR_FpI32m32, X86::SUBR_FI32m},
760 { X86::SUBR_FpI32m64, X86::SUBR_FI32m},
761 { X86::SUBR_FpI32m80, X86::SUBR_FI32m},
762 { X86::SUB_Fp32m , X86::SUB_F32m },
763 { X86::SUB_Fp64m , X86::SUB_F64m },
764 { X86::SUB_Fp64m32 , X86::SUB_F32m },
765 { X86::SUB_Fp80m32 , X86::SUB_F32m },
766 { X86::SUB_Fp80m64 , X86::SUB_F64m },
767 { X86::SUB_FpI16m32 , X86::SUB_FI16m },
768 { X86::SUB_FpI16m64 , X86::SUB_FI16m },
769 { X86::SUB_FpI16m80 , X86::SUB_FI16m },
770 { X86::SUB_FpI32m32 , X86::SUB_FI32m },
771 { X86::SUB_FpI32m64 , X86::SUB_FI32m },
772 { X86::SUB_FpI32m80 , X86::SUB_FI32m },
773 { X86::TST_Fp32 , X86::TST_F },
774 { X86::TST_Fp64 , X86::TST_F },
775 { X86::TST_Fp80 , X86::TST_F },
776 { X86::UCOM_FpIr32 , X86::UCOM_FIr },
777 { X86::UCOM_FpIr64 , X86::UCOM_FIr },
778 { X86::UCOM_FpIr80 , X86::UCOM_FIr },
779 { X86::UCOM_Fpr32 , X86::UCOM_Fr },
780 { X86::UCOM_Fpr64 , X86::UCOM_Fr },
781 { X86::UCOM_Fpr80 , X86::UCOM_Fr },
782 };
783
getConcreteOpcode(unsigned Opcode)784 static unsigned getConcreteOpcode(unsigned Opcode) {
785 ASSERT_SORTED(OpcodeTable);
786 int Opc = Lookup(OpcodeTable, array_lengthof(OpcodeTable), Opcode);
787 assert(Opc != -1 && "FP Stack instruction not in OpcodeTable!");
788 return Opc;
789 }
790
791 //===----------------------------------------------------------------------===//
792 // Helper Methods
793 //===----------------------------------------------------------------------===//
794
795 // PopTable - Sorted map of instructions to their popping version. The first
796 // element is an instruction, the second is the version which pops.
797 //
798 static const TableEntry PopTable[] = {
799 { X86::ADD_FrST0 , X86::ADD_FPrST0 },
800
801 { X86::DIVR_FrST0, X86::DIVR_FPrST0 },
802 { X86::DIV_FrST0 , X86::DIV_FPrST0 },
803
804 { X86::IST_F16m , X86::IST_FP16m },
805 { X86::IST_F32m , X86::IST_FP32m },
806
807 { X86::MUL_FrST0 , X86::MUL_FPrST0 },
808
809 { X86::ST_F32m , X86::ST_FP32m },
810 { X86::ST_F64m , X86::ST_FP64m },
811 { X86::ST_Frr , X86::ST_FPrr },
812
813 { X86::SUBR_FrST0, X86::SUBR_FPrST0 },
814 { X86::SUB_FrST0 , X86::SUB_FPrST0 },
815
816 { X86::UCOM_FIr , X86::UCOM_FIPr },
817
818 { X86::UCOM_FPr , X86::UCOM_FPPr },
819 { X86::UCOM_Fr , X86::UCOM_FPr },
820 };
821
822 /// popStackAfter - Pop the current value off of the top of the FP stack after
823 /// the specified instruction. This attempts to be sneaky and combine the pop
824 /// into the instruction itself if possible. The iterator is left pointing to
825 /// the last instruction, be it a new pop instruction inserted, or the old
826 /// instruction if it was modified in place.
827 ///
popStackAfter(MachineBasicBlock::iterator & I)828 void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
829 MachineInstr* MI = I;
830 DebugLoc dl = MI->getDebugLoc();
831 ASSERT_SORTED(PopTable);
832 if (StackTop == 0)
833 report_fatal_error("Cannot pop empty stack!");
834 RegMap[Stack[--StackTop]] = ~0; // Update state
835
836 // Check to see if there is a popping version of this instruction...
837 int Opcode = Lookup(PopTable, array_lengthof(PopTable), I->getOpcode());
838 if (Opcode != -1) {
839 I->setDesc(TII->get(Opcode));
840 if (Opcode == X86::UCOM_FPPr)
841 I->RemoveOperand(0);
842 } else { // Insert an explicit pop
843 I = BuildMI(*MBB, ++I, dl, TII->get(X86::ST_FPrr)).addReg(X86::ST0);
844 }
845 }
846
847 /// freeStackSlotAfter - Free the specified register from the register stack, so
848 /// that it is no longer in a register. If the register is currently at the top
849 /// of the stack, we just pop the current instruction, otherwise we store the
850 /// current top-of-stack into the specified slot, then pop the top of stack.
freeStackSlotAfter(MachineBasicBlock::iterator & I,unsigned FPRegNo)851 void FPS::freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned FPRegNo) {
852 if (getStackEntry(0) == FPRegNo) { // already at the top of stack? easy.
853 popStackAfter(I);
854 return;
855 }
856
857 // Otherwise, store the top of stack into the dead slot, killing the operand
858 // without having to add in an explicit xchg then pop.
859 //
860 I = freeStackSlotBefore(++I, FPRegNo);
861 }
862
863 /// freeStackSlotBefore - Free the specified register without trying any
864 /// folding.
865 MachineBasicBlock::iterator
freeStackSlotBefore(MachineBasicBlock::iterator I,unsigned FPRegNo)866 FPS::freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo) {
867 unsigned STReg = getSTReg(FPRegNo);
868 unsigned OldSlot = getSlot(FPRegNo);
869 unsigned TopReg = Stack[StackTop-1];
870 Stack[OldSlot] = TopReg;
871 RegMap[TopReg] = OldSlot;
872 RegMap[FPRegNo] = ~0;
873 Stack[--StackTop] = ~0;
874 return BuildMI(*MBB, I, DebugLoc(), TII->get(X86::ST_FPrr)).addReg(STReg);
875 }
876
877 /// adjustLiveRegs - Kill and revive registers such that exactly the FP
878 /// registers with a bit in Mask are live.
adjustLiveRegs(unsigned Mask,MachineBasicBlock::iterator I)879 void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
880 unsigned Defs = Mask;
881 unsigned Kills = 0;
882 for (unsigned i = 0; i < StackTop; ++i) {
883 unsigned RegNo = Stack[i];
884 if (!(Defs & (1 << RegNo)))
885 // This register is live, but we don't want it.
886 Kills |= (1 << RegNo);
887 else
888 // We don't need to imp-def this live register.
889 Defs &= ~(1 << RegNo);
890 }
891 assert((Kills & Defs) == 0 && "Register needs killing and def'ing?");
892
893 // Produce implicit-defs for free by using killed registers.
894 while (Kills && Defs) {
895 unsigned KReg = CountTrailingZeros_32(Kills);
896 unsigned DReg = CountTrailingZeros_32(Defs);
897 DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n");
898 std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
899 std::swap(RegMap[KReg], RegMap[DReg]);
900 Kills &= ~(1 << KReg);
901 Defs &= ~(1 << DReg);
902 }
903
904 // Kill registers by popping.
905 if (Kills && I != MBB->begin()) {
906 MachineBasicBlock::iterator I2 = llvm::prior(I);
907 while (StackTop) {
908 unsigned KReg = getStackEntry(0);
909 if (!(Kills & (1 << KReg)))
910 break;
911 DEBUG(dbgs() << "Popping %FP" << KReg << "\n");
912 popStackAfter(I2);
913 Kills &= ~(1 << KReg);
914 }
915 }
916
917 // Manually kill the rest.
918 while (Kills) {
919 unsigned KReg = CountTrailingZeros_32(Kills);
920 DEBUG(dbgs() << "Killing %FP" << KReg << "\n");
921 freeStackSlotBefore(I, KReg);
922 Kills &= ~(1 << KReg);
923 }
924
925 // Load zeros for all the imp-defs.
926 while(Defs) {
927 unsigned DReg = CountTrailingZeros_32(Defs);
928 DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n");
929 BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
930 pushReg(DReg);
931 Defs &= ~(1 << DReg);
932 }
933
934 // Now we should have the correct registers live.
935 DEBUG(dumpStack());
936 assert(StackTop == CountPopulation_32(Mask) && "Live count mismatch");
937 }
938
939 /// shuffleStackTop - emit fxch instructions before I to shuffle the top
940 /// FixCount entries into the order given by FixStack.
941 /// FIXME: Is there a better algorithm than insertion sort?
shuffleStackTop(const unsigned char * FixStack,unsigned FixCount,MachineBasicBlock::iterator I)942 void FPS::shuffleStackTop(const unsigned char *FixStack,
943 unsigned FixCount,
944 MachineBasicBlock::iterator I) {
945 // Move items into place, starting from the desired stack bottom.
946 while (FixCount--) {
947 // Old register at position FixCount.
948 unsigned OldReg = getStackEntry(FixCount);
949 // Desired register at position FixCount.
950 unsigned Reg = FixStack[FixCount];
951 if (Reg == OldReg)
952 continue;
953 // (Reg st0) (OldReg st0) = (Reg OldReg st0)
954 moveToTop(Reg, I);
955 if (FixCount > 0)
956 moveToTop(OldReg, I);
957 }
958 DEBUG(dumpStack());
959 }
960
961
962 //===----------------------------------------------------------------------===//
963 // Instruction transformation implementation
964 //===----------------------------------------------------------------------===//
965
966 /// handleZeroArgFP - ST(0) = fld0 ST(0) = flds <mem>
967 ///
handleZeroArgFP(MachineBasicBlock::iterator & I)968 void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
969 MachineInstr *MI = I;
970 unsigned DestReg = getFPReg(MI->getOperand(0));
971
972 // Change from the pseudo instruction to the concrete instruction.
973 MI->RemoveOperand(0); // Remove the explicit ST(0) operand
974 MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
975
976 // Result gets pushed on the stack.
977 pushReg(DestReg);
978 }
979
980 /// handleOneArgFP - fst <mem>, ST(0)
981 ///
handleOneArgFP(MachineBasicBlock::iterator & I)982 void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
983 MachineInstr *MI = I;
984 unsigned NumOps = MI->getDesc().getNumOperands();
985 assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
986 "Can only handle fst* & ftst instructions!");
987
988 // Is this the last use of the source register?
989 unsigned Reg = getFPReg(MI->getOperand(NumOps-1));
990 bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
991
992 if (KillsSrc)
993 duplicatePendingSTBeforeKill(Reg, I);
994
995 // FISTP64m is strange because there isn't a non-popping versions.
996 // If we have one _and_ we don't want to pop the operand, duplicate the value
997 // on the stack instead of moving it. This ensure that popping the value is
998 // always ok.
999 // Ditto FISTTP16m, FISTTP32m, FISTTP64m, ST_FpP80m.
1000 //
1001 if (!KillsSrc &&
1002 (MI->getOpcode() == X86::IST_Fp64m32 ||
1003 MI->getOpcode() == X86::ISTT_Fp16m32 ||
1004 MI->getOpcode() == X86::ISTT_Fp32m32 ||
1005 MI->getOpcode() == X86::ISTT_Fp64m32 ||
1006 MI->getOpcode() == X86::IST_Fp64m64 ||
1007 MI->getOpcode() == X86::ISTT_Fp16m64 ||
1008 MI->getOpcode() == X86::ISTT_Fp32m64 ||
1009 MI->getOpcode() == X86::ISTT_Fp64m64 ||
1010 MI->getOpcode() == X86::IST_Fp64m80 ||
1011 MI->getOpcode() == X86::ISTT_Fp16m80 ||
1012 MI->getOpcode() == X86::ISTT_Fp32m80 ||
1013 MI->getOpcode() == X86::ISTT_Fp64m80 ||
1014 MI->getOpcode() == X86::ST_FpP80m)) {
1015 duplicateToTop(Reg, getScratchReg(), I);
1016 } else {
1017 moveToTop(Reg, I); // Move to the top of the stack...
1018 }
1019
1020 // Convert from the pseudo instruction to the concrete instruction.
1021 MI->RemoveOperand(NumOps-1); // Remove explicit ST(0) operand
1022 MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
1023
1024 if (MI->getOpcode() == X86::IST_FP64m ||
1025 MI->getOpcode() == X86::ISTT_FP16m ||
1026 MI->getOpcode() == X86::ISTT_FP32m ||
1027 MI->getOpcode() == X86::ISTT_FP64m ||
1028 MI->getOpcode() == X86::ST_FP80m) {
1029 if (StackTop == 0)
1030 report_fatal_error("Stack empty??");
1031 --StackTop;
1032 } else if (KillsSrc) { // Last use of operand?
1033 popStackAfter(I);
1034 }
1035 }
1036
1037
1038 /// handleOneArgFPRW: Handle instructions that read from the top of stack and
1039 /// replace the value with a newly computed value. These instructions may have
1040 /// non-fp operands after their FP operands.
1041 ///
1042 /// Examples:
1043 /// R1 = fchs R2
1044 /// R1 = fadd R2, [mem]
1045 ///
handleOneArgFPRW(MachineBasicBlock::iterator & I)1046 void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
1047 MachineInstr *MI = I;
1048 #ifndef NDEBUG
1049 unsigned NumOps = MI->getDesc().getNumOperands();
1050 assert(NumOps >= 2 && "FPRW instructions must have 2 ops!!");
1051 #endif
1052
1053 // Is this the last use of the source register?
1054 unsigned Reg = getFPReg(MI->getOperand(1));
1055 bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
1056
1057 if (KillsSrc) {
1058 duplicatePendingSTBeforeKill(Reg, I);
1059 // If this is the last use of the source register, just make sure it's on
1060 // the top of the stack.
1061 moveToTop(Reg, I);
1062 if (StackTop == 0)
1063 report_fatal_error("Stack cannot be empty!");
1064 --StackTop;
1065 pushReg(getFPReg(MI->getOperand(0)));
1066 } else {
1067 // If this is not the last use of the source register, _copy_ it to the top
1068 // of the stack.
1069 duplicateToTop(Reg, getFPReg(MI->getOperand(0)), I);
1070 }
1071
1072 // Change from the pseudo instruction to the concrete instruction.
1073 MI->RemoveOperand(1); // Drop the source operand.
1074 MI->RemoveOperand(0); // Drop the destination operand.
1075 MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
1076 }
1077
1078
1079 //===----------------------------------------------------------------------===//
1080 // Define tables of various ways to map pseudo instructions
1081 //
1082
1083 // ForwardST0Table - Map: A = B op C into: ST(0) = ST(0) op ST(i)
1084 static const TableEntry ForwardST0Table[] = {
1085 { X86::ADD_Fp32 , X86::ADD_FST0r },
1086 { X86::ADD_Fp64 , X86::ADD_FST0r },
1087 { X86::ADD_Fp80 , X86::ADD_FST0r },
1088 { X86::DIV_Fp32 , X86::DIV_FST0r },
1089 { X86::DIV_Fp64 , X86::DIV_FST0r },
1090 { X86::DIV_Fp80 , X86::DIV_FST0r },
1091 { X86::MUL_Fp32 , X86::MUL_FST0r },
1092 { X86::MUL_Fp64 , X86::MUL_FST0r },
1093 { X86::MUL_Fp80 , X86::MUL_FST0r },
1094 { X86::SUB_Fp32 , X86::SUB_FST0r },
1095 { X86::SUB_Fp64 , X86::SUB_FST0r },
1096 { X86::SUB_Fp80 , X86::SUB_FST0r },
1097 };
1098
1099 // ReverseST0Table - Map: A = B op C into: ST(0) = ST(i) op ST(0)
1100 static const TableEntry ReverseST0Table[] = {
1101 { X86::ADD_Fp32 , X86::ADD_FST0r }, // commutative
1102 { X86::ADD_Fp64 , X86::ADD_FST0r }, // commutative
1103 { X86::ADD_Fp80 , X86::ADD_FST0r }, // commutative
1104 { X86::DIV_Fp32 , X86::DIVR_FST0r },
1105 { X86::DIV_Fp64 , X86::DIVR_FST0r },
1106 { X86::DIV_Fp80 , X86::DIVR_FST0r },
1107 { X86::MUL_Fp32 , X86::MUL_FST0r }, // commutative
1108 { X86::MUL_Fp64 , X86::MUL_FST0r }, // commutative
1109 { X86::MUL_Fp80 , X86::MUL_FST0r }, // commutative
1110 { X86::SUB_Fp32 , X86::SUBR_FST0r },
1111 { X86::SUB_Fp64 , X86::SUBR_FST0r },
1112 { X86::SUB_Fp80 , X86::SUBR_FST0r },
1113 };
1114
1115 // ForwardSTiTable - Map: A = B op C into: ST(i) = ST(0) op ST(i)
1116 static const TableEntry ForwardSTiTable[] = {
1117 { X86::ADD_Fp32 , X86::ADD_FrST0 }, // commutative
1118 { X86::ADD_Fp64 , X86::ADD_FrST0 }, // commutative
1119 { X86::ADD_Fp80 , X86::ADD_FrST0 }, // commutative
1120 { X86::DIV_Fp32 , X86::DIVR_FrST0 },
1121 { X86::DIV_Fp64 , X86::DIVR_FrST0 },
1122 { X86::DIV_Fp80 , X86::DIVR_FrST0 },
1123 { X86::MUL_Fp32 , X86::MUL_FrST0 }, // commutative
1124 { X86::MUL_Fp64 , X86::MUL_FrST0 }, // commutative
1125 { X86::MUL_Fp80 , X86::MUL_FrST0 }, // commutative
1126 { X86::SUB_Fp32 , X86::SUBR_FrST0 },
1127 { X86::SUB_Fp64 , X86::SUBR_FrST0 },
1128 { X86::SUB_Fp80 , X86::SUBR_FrST0 },
1129 };
1130
1131 // ReverseSTiTable - Map: A = B op C into: ST(i) = ST(i) op ST(0)
1132 static const TableEntry ReverseSTiTable[] = {
1133 { X86::ADD_Fp32 , X86::ADD_FrST0 },
1134 { X86::ADD_Fp64 , X86::ADD_FrST0 },
1135 { X86::ADD_Fp80 , X86::ADD_FrST0 },
1136 { X86::DIV_Fp32 , X86::DIV_FrST0 },
1137 { X86::DIV_Fp64 , X86::DIV_FrST0 },
1138 { X86::DIV_Fp80 , X86::DIV_FrST0 },
1139 { X86::MUL_Fp32 , X86::MUL_FrST0 },
1140 { X86::MUL_Fp64 , X86::MUL_FrST0 },
1141 { X86::MUL_Fp80 , X86::MUL_FrST0 },
1142 { X86::SUB_Fp32 , X86::SUB_FrST0 },
1143 { X86::SUB_Fp64 , X86::SUB_FrST0 },
1144 { X86::SUB_Fp80 , X86::SUB_FrST0 },
1145 };
1146
1147
1148 /// handleTwoArgFP - Handle instructions like FADD and friends which are virtual
1149 /// instructions which need to be simplified and possibly transformed.
1150 ///
1151 /// Result: ST(0) = fsub ST(0), ST(i)
1152 /// ST(i) = fsub ST(0), ST(i)
1153 /// ST(0) = fsubr ST(0), ST(i)
1154 /// ST(i) = fsubr ST(0), ST(i)
1155 ///
handleTwoArgFP(MachineBasicBlock::iterator & I)1156 void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
1157 ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
1158 ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
1159 MachineInstr *MI = I;
1160
1161 unsigned NumOperands = MI->getDesc().getNumOperands();
1162 assert(NumOperands == 3 && "Illegal TwoArgFP instruction!");
1163 unsigned Dest = getFPReg(MI->getOperand(0));
1164 unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
1165 unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
1166 bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
1167 bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
1168 DebugLoc dl = MI->getDebugLoc();
1169
1170 unsigned TOS = getStackEntry(0);
1171
1172 // One of our operands must be on the top of the stack. If neither is yet, we
1173 // need to move one.
1174 if (Op0 != TOS && Op1 != TOS) { // No operand at TOS?
1175 // We can choose to move either operand to the top of the stack. If one of
1176 // the operands is killed by this instruction, we want that one so that we
1177 // can update right on top of the old version.
1178 if (KillsOp0) {
1179 moveToTop(Op0, I); // Move dead operand to TOS.
1180 TOS = Op0;
1181 } else if (KillsOp1) {
1182 moveToTop(Op1, I);
1183 TOS = Op1;
1184 } else {
1185 // All of the operands are live after this instruction executes, so we
1186 // cannot update on top of any operand. Because of this, we must
1187 // duplicate one of the stack elements to the top. It doesn't matter
1188 // which one we pick.
1189 //
1190 duplicateToTop(Op0, Dest, I);
1191 Op0 = TOS = Dest;
1192 KillsOp0 = true;
1193 }
1194 } else if (!KillsOp0 && !KillsOp1) {
1195 // If we DO have one of our operands at the top of the stack, but we don't
1196 // have a dead operand, we must duplicate one of the operands to a new slot
1197 // on the stack.
1198 duplicateToTop(Op0, Dest, I);
1199 Op0 = TOS = Dest;
1200 KillsOp0 = true;
1201 }
1202
1203 // Now we know that one of our operands is on the top of the stack, and at
1204 // least one of our operands is killed by this instruction.
1205 assert((TOS == Op0 || TOS == Op1) && (KillsOp0 || KillsOp1) &&
1206 "Stack conditions not set up right!");
1207
1208 // We decide which form to use based on what is on the top of the stack, and
1209 // which operand is killed by this instruction.
1210 const TableEntry *InstTable;
1211 bool isForward = TOS == Op0;
1212 bool updateST0 = (TOS == Op0 && !KillsOp1) || (TOS == Op1 && !KillsOp0);
1213 if (updateST0) {
1214 if (isForward)
1215 InstTable = ForwardST0Table;
1216 else
1217 InstTable = ReverseST0Table;
1218 } else {
1219 if (isForward)
1220 InstTable = ForwardSTiTable;
1221 else
1222 InstTable = ReverseSTiTable;
1223 }
1224
1225 int Opcode = Lookup(InstTable, array_lengthof(ForwardST0Table),
1226 MI->getOpcode());
1227 assert(Opcode != -1 && "Unknown TwoArgFP pseudo instruction!");
1228
1229 // NotTOS - The register which is not on the top of stack...
1230 unsigned NotTOS = (TOS == Op0) ? Op1 : Op0;
1231
1232 // Replace the old instruction with a new instruction
1233 MBB->remove(I++);
1234 I = BuildMI(*MBB, I, dl, TII->get(Opcode)).addReg(getSTReg(NotTOS));
1235
1236 // If both operands are killed, pop one off of the stack in addition to
1237 // overwriting the other one.
1238 if (KillsOp0 && KillsOp1 && Op0 != Op1) {
1239 assert(!updateST0 && "Should have updated other operand!");
1240 popStackAfter(I); // Pop the top of stack
1241 }
1242
1243 // Update stack information so that we know the destination register is now on
1244 // the stack.
1245 unsigned UpdatedSlot = getSlot(updateST0 ? TOS : NotTOS);
1246 assert(UpdatedSlot < StackTop && Dest < 7);
1247 Stack[UpdatedSlot] = Dest;
1248 RegMap[Dest] = UpdatedSlot;
1249 MBB->getParent()->DeleteMachineInstr(MI); // Remove the old instruction
1250 }
1251
1252 /// handleCompareFP - Handle FUCOM and FUCOMI instructions, which have two FP
1253 /// register arguments and no explicit destinations.
1254 ///
handleCompareFP(MachineBasicBlock::iterator & I)1255 void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
1256 ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
1257 ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
1258 MachineInstr *MI = I;
1259
1260 unsigned NumOperands = MI->getDesc().getNumOperands();
1261 assert(NumOperands == 2 && "Illegal FUCOM* instruction!");
1262 unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
1263 unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
1264 bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
1265 bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
1266
1267 // Make sure the first operand is on the top of stack, the other one can be
1268 // anywhere.
1269 moveToTop(Op0, I);
1270
1271 // Change from the pseudo instruction to the concrete instruction.
1272 MI->getOperand(0).setReg(getSTReg(Op1));
1273 MI->RemoveOperand(1);
1274 MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
1275
1276 // If any of the operands are killed by this instruction, free them.
1277 if (KillsOp0) freeStackSlotAfter(I, Op0);
1278 if (KillsOp1 && Op0 != Op1) freeStackSlotAfter(I, Op1);
1279 }
1280
1281 /// handleCondMovFP - Handle two address conditional move instructions. These
1282 /// instructions move a st(i) register to st(0) iff a condition is true. These
1283 /// instructions require that the first operand is at the top of the stack, but
1284 /// otherwise don't modify the stack at all.
handleCondMovFP(MachineBasicBlock::iterator & I)1285 void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
1286 MachineInstr *MI = I;
1287
1288 unsigned Op0 = getFPReg(MI->getOperand(0));
1289 unsigned Op1 = getFPReg(MI->getOperand(2));
1290 bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
1291
1292 // The first operand *must* be on the top of the stack.
1293 moveToTop(Op0, I);
1294
1295 // Change the second operand to the stack register that the operand is in.
1296 // Change from the pseudo instruction to the concrete instruction.
1297 MI->RemoveOperand(0);
1298 MI->RemoveOperand(1);
1299 MI->getOperand(0).setReg(getSTReg(Op1));
1300 MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
1301
1302 // If we kill the second operand, make sure to pop it from the stack.
1303 if (Op0 != Op1 && KillsOp1) {
1304 // Get this value off of the register stack.
1305 freeStackSlotAfter(I, Op1);
1306 }
1307 }
1308
1309
1310 /// handleSpecialFP - Handle special instructions which behave unlike other
1311 /// floating point instructions. This is primarily intended for use by pseudo
1312 /// instructions.
1313 ///
handleSpecialFP(MachineBasicBlock::iterator & I)1314 void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
1315 MachineInstr *MI = I;
1316 switch (MI->getOpcode()) {
1317 default: llvm_unreachable("Unknown SpecialFP instruction!");
1318 case TargetOpcode::COPY: {
1319 // We handle three kinds of copies: FP <- FP, FP <- ST, and ST <- FP.
1320 const MachineOperand &MO1 = MI->getOperand(1);
1321 const MachineOperand &MO0 = MI->getOperand(0);
1322 unsigned DstST = MO0.getReg() - X86::ST0;
1323 unsigned SrcST = MO1.getReg() - X86::ST0;
1324 bool KillsSrc = MI->killsRegister(MO1.getReg());
1325
1326 // ST = COPY FP. Set up a pending ST register.
1327 if (DstST < 8) {
1328 unsigned SrcFP = getFPReg(MO1);
1329 assert(isLive(SrcFP) && "Cannot copy dead register");
1330 assert(!MO0.isDead() && "Cannot copy to dead ST register");
1331
1332 // Unallocated STs are marked as the nonexistent FP255.
1333 while (NumPendingSTs <= DstST)
1334 PendingST[NumPendingSTs++] = NumFPRegs;
1335
1336 // STi could still be live from a previous inline asm.
1337 if (isScratchReg(PendingST[DstST])) {
1338 DEBUG(dbgs() << "Clobbering old ST in FP" << unsigned(PendingST[DstST])
1339 << '\n');
1340 freeStackSlotBefore(MI, PendingST[DstST]);
1341 }
1342
1343 // When the source is killed, allocate a scratch FP register.
1344 if (KillsSrc) {
1345 duplicatePendingSTBeforeKill(SrcFP, I);
1346 unsigned Slot = getSlot(SrcFP);
1347 unsigned SR = getScratchReg();
1348 PendingST[DstST] = SR;
1349 Stack[Slot] = SR;
1350 RegMap[SR] = Slot;
1351 } else
1352 PendingST[DstST] = SrcFP;
1353 break;
1354 }
1355
1356 // FP = COPY ST. Extract fixed stack value.
1357 // Any instruction defining ST registers must have assigned them to a
1358 // scratch register.
1359 if (SrcST < 8) {
1360 unsigned DstFP = getFPReg(MO0);
1361 assert(!isLive(DstFP) && "Cannot copy ST to live FP register");
1362 assert(NumPendingSTs > SrcST && "Cannot copy from dead ST register");
1363 unsigned SrcFP = PendingST[SrcST];
1364 assert(isScratchReg(SrcFP) && "Expected ST in a scratch register");
1365 assert(isLive(SrcFP) && "Scratch holding ST is dead");
1366
1367 // DstFP steals the stack slot from SrcFP.
1368 unsigned Slot = getSlot(SrcFP);
1369 Stack[Slot] = DstFP;
1370 RegMap[DstFP] = Slot;
1371
1372 // Always treat the ST as killed.
1373 PendingST[SrcST] = NumFPRegs;
1374 while (NumPendingSTs && PendingST[NumPendingSTs - 1] == NumFPRegs)
1375 --NumPendingSTs;
1376 break;
1377 }
1378
1379 // FP <- FP copy.
1380 unsigned DstFP = getFPReg(MO0);
1381 unsigned SrcFP = getFPReg(MO1);
1382 assert(isLive(SrcFP) && "Cannot copy dead register");
1383 if (KillsSrc) {
1384 // If the input operand is killed, we can just change the owner of the
1385 // incoming stack slot into the result.
1386 unsigned Slot = getSlot(SrcFP);
1387 Stack[Slot] = DstFP;
1388 RegMap[DstFP] = Slot;
1389 } else {
1390 // For COPY we just duplicate the specified value to a new stack slot.
1391 // This could be made better, but would require substantial changes.
1392 duplicateToTop(SrcFP, DstFP, I);
1393 }
1394 break;
1395 }
1396
1397 case TargetOpcode::IMPLICIT_DEF: {
1398 // All FP registers must be explicitly defined, so load a 0 instead.
1399 unsigned Reg = MI->getOperand(0).getReg() - X86::FP0;
1400 DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
1401 BuildMI(*MBB, I, MI->getDebugLoc(), TII->get(X86::LD_F0));
1402 pushReg(Reg);
1403 break;
1404 }
1405
1406 case X86::FpPOP_RETVAL: {
1407 // The FpPOP_RETVAL instruction is used after calls that return a value on
1408 // the floating point stack. We cannot model this with ST defs since CALL
1409 // instructions have fixed clobber lists. This instruction is interpreted
1410 // to mean that there is one more live register on the stack than we
1411 // thought.
1412 //
1413 // This means that StackTop does not match the hardware stack between a
1414 // call and the FpPOP_RETVAL instructions. We do tolerate FP instructions
1415 // between CALL and FpPOP_RETVAL as long as they don't overflow the
1416 // hardware stack.
1417 unsigned DstFP = getFPReg(MI->getOperand(0));
1418
1419 // Move existing stack elements up to reflect reality.
1420 assert(StackTop < 8 && "Stack overflowed before FpPOP_RETVAL");
1421 if (StackTop) {
1422 std::copy_backward(Stack, Stack + StackTop, Stack + StackTop + 1);
1423 for (unsigned i = 0; i != NumFPRegs; ++i)
1424 ++RegMap[i];
1425 }
1426 ++StackTop;
1427
1428 // DstFP is the new bottom of the stack.
1429 Stack[0] = DstFP;
1430 RegMap[DstFP] = 0;
1431
1432 // DstFP will be killed by processBasicBlock if this was a dead def.
1433 break;
1434 }
1435
1436 case TargetOpcode::INLINEASM: {
1437 // The inline asm MachineInstr currently only *uses* FP registers for the
1438 // 'f' constraint. These should be turned into the current ST(x) register
1439 // in the machine instr.
1440 //
1441 // There are special rules for x87 inline assembly. The compiler must know
1442 // exactly how many registers are popped and pushed implicitly by the asm.
1443 // Otherwise it is not possible to restore the stack state after the inline
1444 // asm.
1445 //
1446 // There are 3 kinds of input operands:
1447 //
1448 // 1. Popped inputs. These must appear at the stack top in ST0-STn. A
1449 // popped input operand must be in a fixed stack slot, and it is either
1450 // tied to an output operand, or in the clobber list. The MI has ST use
1451 // and def operands for these inputs.
1452 //
1453 // 2. Fixed inputs. These inputs appear in fixed stack slots, but are
1454 // preserved by the inline asm. The fixed stack slots must be STn-STm
1455 // following the popped inputs. A fixed input operand cannot be tied to
1456 // an output or appear in the clobber list. The MI has ST use operands
1457 // and no defs for these inputs.
1458 //
1459 // 3. Preserved inputs. These inputs use the "f" constraint which is
1460 // represented as an FP register. The inline asm won't change these
1461 // stack slots.
1462 //
1463 // Outputs must be in ST registers, FP outputs are not allowed. Clobbered
1464 // registers do not count as output operands. The inline asm changes the
1465 // stack as if it popped all the popped inputs and then pushed all the
1466 // output operands.
1467
1468 // Scan the assembly for ST registers used, defined and clobbered. We can
1469 // only tell clobbers from defs by looking at the asm descriptor.
1470 unsigned STUses = 0, STDefs = 0, STClobbers = 0, STDeadDefs = 0;
1471 unsigned NumOps = 0;
1472 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
1473 i != e && MI->getOperand(i).isImm(); i += 1 + NumOps) {
1474 unsigned Flags = MI->getOperand(i).getImm();
1475 NumOps = InlineAsm::getNumOperandRegisters(Flags);
1476 if (NumOps != 1)
1477 continue;
1478 const MachineOperand &MO = MI->getOperand(i + 1);
1479 if (!MO.isReg())
1480 continue;
1481 unsigned STReg = MO.getReg() - X86::ST0;
1482 if (STReg >= 8)
1483 continue;
1484
1485 switch (InlineAsm::getKind(Flags)) {
1486 case InlineAsm::Kind_RegUse:
1487 STUses |= (1u << STReg);
1488 break;
1489 case InlineAsm::Kind_RegDef:
1490 case InlineAsm::Kind_RegDefEarlyClobber:
1491 STDefs |= (1u << STReg);
1492 if (MO.isDead())
1493 STDeadDefs |= (1u << STReg);
1494 break;
1495 case InlineAsm::Kind_Clobber:
1496 STClobbers |= (1u << STReg);
1497 break;
1498 default:
1499 break;
1500 }
1501 }
1502
1503 if (STUses && !isMask_32(STUses))
1504 MI->emitError("fixed input regs must be last on the x87 stack");
1505 unsigned NumSTUses = CountTrailingOnes_32(STUses);
1506
1507 // Defs must be contiguous from the stack top. ST0-STn.
1508 if (STDefs && !isMask_32(STDefs)) {
1509 MI->emitError("output regs must be last on the x87 stack");
1510 STDefs = NextPowerOf2(STDefs) - 1;
1511 }
1512 unsigned NumSTDefs = CountTrailingOnes_32(STDefs);
1513
1514 // So must the clobbered stack slots. ST0-STm, m >= n.
1515 if (STClobbers && !isMask_32(STDefs | STClobbers))
1516 MI->emitError("clobbers must be last on the x87 stack");
1517
1518 // Popped inputs are the ones that are also clobbered or defined.
1519 unsigned STPopped = STUses & (STDefs | STClobbers);
1520 if (STPopped && !isMask_32(STPopped))
1521 MI->emitError("implicitly popped regs must be last on the x87 stack");
1522 unsigned NumSTPopped = CountTrailingOnes_32(STPopped);
1523
1524 DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
1525 << NumSTPopped << ", and defines " << NumSTDefs << " regs.\n");
1526
1527 // Scan the instruction for FP uses corresponding to "f" constraints.
1528 // Collect FP registers to kill afer the instruction.
1529 // Always kill all the scratch regs.
1530 unsigned FPKills = ((1u << NumFPRegs) - 1) & ~0xff;
1531 unsigned FPUsed = 0;
1532 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1533 MachineOperand &Op = MI->getOperand(i);
1534 if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
1535 continue;
1536 if (!Op.isUse())
1537 MI->emitError("illegal \"f\" output constraint");
1538 unsigned FPReg = getFPReg(Op);
1539 FPUsed |= 1U << FPReg;
1540
1541 // If we kill this operand, make sure to pop it from the stack after the
1542 // asm. We just remember it for now, and pop them all off at the end in
1543 // a batch.
1544 if (Op.isKill())
1545 FPKills |= 1U << FPReg;
1546 }
1547
1548 // The popped inputs will be killed by the instruction, so duplicate them
1549 // if the FP register needs to be live after the instruction, or if it is
1550 // used in the instruction itself. We effectively treat the popped inputs
1551 // as early clobbers.
1552 for (unsigned i = 0; i < NumSTPopped; ++i) {
1553 if ((FPKills & ~FPUsed) & (1u << PendingST[i]))
1554 continue;
1555 unsigned SR = getScratchReg();
1556 duplicateToTop(PendingST[i], SR, I);
1557 DEBUG(dbgs() << "Duplicating ST" << i << " in FP"
1558 << unsigned(PendingST[i]) << " to avoid clobbering it.\n");
1559 PendingST[i] = SR;
1560 }
1561
1562 // Make sure we have a unique live register for every fixed use. Some of
1563 // them could be undef uses, and we need to emit LD_F0 instructions.
1564 for (unsigned i = 0; i < NumSTUses; ++i) {
1565 if (i < NumPendingSTs && PendingST[i] < NumFPRegs) {
1566 // Check for shared assignments.
1567 for (unsigned j = 0; j < i; ++j) {
1568 if (PendingST[j] != PendingST[i])
1569 continue;
1570 // STi and STj are inn the same register, create a copy.
1571 unsigned SR = getScratchReg();
1572 duplicateToTop(PendingST[i], SR, I);
1573 DEBUG(dbgs() << "Duplicating ST" << i << " in FP"
1574 << unsigned(PendingST[i])
1575 << " to avoid collision with ST" << j << '\n');
1576 PendingST[i] = SR;
1577 }
1578 continue;
1579 }
1580 unsigned SR = getScratchReg();
1581 DEBUG(dbgs() << "Emitting LD_F0 for ST" << i << " in FP" << SR << '\n');
1582 BuildMI(*MBB, I, MI->getDebugLoc(), TII->get(X86::LD_F0));
1583 pushReg(SR);
1584 PendingST[i] = SR;
1585 if (NumPendingSTs == i)
1586 ++NumPendingSTs;
1587 }
1588 assert(NumPendingSTs >= NumSTUses && "Fixed registers should be assigned");
1589
1590 // Now we can rearrange the live registers to match what was requested.
1591 shuffleStackTop(PendingST, NumPendingSTs, I);
1592 DEBUG({dbgs() << "Before asm: "; dumpStack();});
1593
1594 // With the stack layout fixed, rewrite the FP registers.
1595 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1596 MachineOperand &Op = MI->getOperand(i);
1597 if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
1598 continue;
1599 unsigned FPReg = getFPReg(Op);
1600 Op.setReg(getSTReg(FPReg));
1601 }
1602
1603 // Simulate the inline asm popping its inputs and pushing its outputs.
1604 StackTop -= NumSTPopped;
1605
1606 // Hold the fixed output registers in scratch FP registers. They will be
1607 // transferred to real FP registers by copies.
1608 NumPendingSTs = 0;
1609 for (unsigned i = 0; i < NumSTDefs; ++i) {
1610 unsigned SR = getScratchReg();
1611 pushReg(SR);
1612 FPKills &= ~(1u << SR);
1613 }
1614 for (unsigned i = 0; i < NumSTDefs; ++i)
1615 PendingST[NumPendingSTs++] = getStackEntry(i);
1616 DEBUG({dbgs() << "After asm: "; dumpStack();});
1617
1618 // If any of the ST defs were dead, pop them immediately. Our caller only
1619 // handles dead FP defs.
1620 MachineBasicBlock::iterator InsertPt = MI;
1621 for (unsigned i = 0; STDefs & (1u << i); ++i) {
1622 if (!(STDeadDefs & (1u << i)))
1623 continue;
1624 freeStackSlotAfter(InsertPt, PendingST[i]);
1625 PendingST[i] = NumFPRegs;
1626 }
1627 while (NumPendingSTs && PendingST[NumPendingSTs - 1] == NumFPRegs)
1628 --NumPendingSTs;
1629
1630 // If this asm kills any FP registers (is the last use of them) we must
1631 // explicitly emit pop instructions for them. Do this now after the asm has
1632 // executed so that the ST(x) numbers are not off (which would happen if we
1633 // did this inline with operand rewriting).
1634 //
1635 // Note: this might be a non-optimal pop sequence. We might be able to do
1636 // better by trying to pop in stack order or something.
1637 while (FPKills) {
1638 unsigned FPReg = CountTrailingZeros_32(FPKills);
1639 if (isLive(FPReg))
1640 freeStackSlotAfter(InsertPt, FPReg);
1641 FPKills &= ~(1U << FPReg);
1642 }
1643 // Don't delete the inline asm!
1644 return;
1645 }
1646
1647 case X86::RET:
1648 case X86::RETI:
1649 // If RET has an FP register use operand, pass the first one in ST(0) and
1650 // the second one in ST(1).
1651
1652 // Find the register operands.
1653 unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
1654 unsigned LiveMask = 0;
1655
1656 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1657 MachineOperand &Op = MI->getOperand(i);
1658 if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
1659 continue;
1660 // FP Register uses must be kills unless there are two uses of the same
1661 // register, in which case only one will be a kill.
1662 assert(Op.isUse() &&
1663 (Op.isKill() || // Marked kill.
1664 getFPReg(Op) == FirstFPRegOp || // Second instance.
1665 MI->killsRegister(Op.getReg())) && // Later use is marked kill.
1666 "Ret only defs operands, and values aren't live beyond it");
1667
1668 if (FirstFPRegOp == ~0U)
1669 FirstFPRegOp = getFPReg(Op);
1670 else {
1671 assert(SecondFPRegOp == ~0U && "More than two fp operands!");
1672 SecondFPRegOp = getFPReg(Op);
1673 }
1674 LiveMask |= (1 << getFPReg(Op));
1675
1676 // Remove the operand so that later passes don't see it.
1677 MI->RemoveOperand(i);
1678 --i, --e;
1679 }
1680
1681 // We may have been carrying spurious live-ins, so make sure only the returned
1682 // registers are left live.
1683 adjustLiveRegs(LiveMask, MI);
1684 if (!LiveMask) return; // Quick check to see if any are possible.
1685
1686 // There are only four possibilities here:
1687 // 1) we are returning a single FP value. In this case, it has to be in
1688 // ST(0) already, so just declare success by removing the value from the
1689 // FP Stack.
1690 if (SecondFPRegOp == ~0U) {
1691 // Assert that the top of stack contains the right FP register.
1692 assert(StackTop == 1 && FirstFPRegOp == getStackEntry(0) &&
1693 "Top of stack not the right register for RET!");
1694
1695 // Ok, everything is good, mark the value as not being on the stack
1696 // anymore so that our assertion about the stack being empty at end of
1697 // block doesn't fire.
1698 StackTop = 0;
1699 return;
1700 }
1701
1702 // Otherwise, we are returning two values:
1703 // 2) If returning the same value for both, we only have one thing in the FP
1704 // stack. Consider: RET FP1, FP1
1705 if (StackTop == 1) {
1706 assert(FirstFPRegOp == SecondFPRegOp && FirstFPRegOp == getStackEntry(0)&&
1707 "Stack misconfiguration for RET!");
1708
1709 // Duplicate the TOS so that we return it twice. Just pick some other FPx
1710 // register to hold it.
1711 unsigned NewReg = getScratchReg();
1712 duplicateToTop(FirstFPRegOp, NewReg, MI);
1713 FirstFPRegOp = NewReg;
1714 }
1715
1716 /// Okay we know we have two different FPx operands now:
1717 assert(StackTop == 2 && "Must have two values live!");
1718
1719 /// 3) If SecondFPRegOp is currently in ST(0) and FirstFPRegOp is currently
1720 /// in ST(1). In this case, emit an fxch.
1721 if (getStackEntry(0) == SecondFPRegOp) {
1722 assert(getStackEntry(1) == FirstFPRegOp && "Unknown regs live");
1723 moveToTop(FirstFPRegOp, MI);
1724 }
1725
1726 /// 4) Finally, FirstFPRegOp must be in ST(0) and SecondFPRegOp must be in
1727 /// ST(1). Just remove both from our understanding of the stack and return.
1728 assert(getStackEntry(0) == FirstFPRegOp && "Unknown regs live");
1729 assert(getStackEntry(1) == SecondFPRegOp && "Unknown regs live");
1730 StackTop = 0;
1731 return;
1732 }
1733
1734 I = MBB->erase(I); // Remove the pseudo instruction
1735
1736 // We want to leave I pointing to the previous instruction, but what if we
1737 // just erased the first instruction?
1738 if (I == MBB->begin()) {
1739 DEBUG(dbgs() << "Inserting dummy KILL\n");
1740 I = BuildMI(*MBB, I, DebugLoc(), TII->get(TargetOpcode::KILL));
1741 } else
1742 --I;
1743 }
1744