1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "SystemZRegisterInfo.h"
11 #include "SystemZInstrInfo.h"
12 #include "SystemZSubtarget.h"
13 #include "llvm/CodeGen/LiveIntervals.h"
14 #include "llvm/ADT/SmallSet.h"
15 #include "llvm/CodeGen/MachineInstrBuilder.h"
16 #include "llvm/CodeGen/MachineRegisterInfo.h"
17 #include "llvm/CodeGen/TargetFrameLowering.h"
18 #include "llvm/CodeGen/VirtRegMap.h"
19
20 using namespace llvm;
21
22 #define GET_REGINFO_TARGET_DESC
23 #include "SystemZGenRegisterInfo.inc"
24
SystemZRegisterInfo()25 SystemZRegisterInfo::SystemZRegisterInfo()
26 : SystemZGenRegisterInfo(SystemZ::R14D) {}
27
28 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
29 // somehow belongs in it. Otherwise, return GRX32.
getRC32(MachineOperand & MO,const VirtRegMap * VRM,const MachineRegisterInfo * MRI)30 static const TargetRegisterClass *getRC32(MachineOperand &MO,
31 const VirtRegMap *VRM,
32 const MachineRegisterInfo *MRI) {
33 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
34
35 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
36 MO.getSubReg() == SystemZ::subreg_l32 ||
37 MO.getSubReg() == SystemZ::subreg_hl32)
38 return &SystemZ::GR32BitRegClass;
39 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
40 MO.getSubReg() == SystemZ::subreg_h32 ||
41 MO.getSubReg() == SystemZ::subreg_hh32)
42 return &SystemZ::GRH32BitRegClass;
43
44 if (VRM && VRM->hasPhys(MO.getReg())) {
45 unsigned PhysReg = VRM->getPhys(MO.getReg());
46 if (SystemZ::GR32BitRegClass.contains(PhysReg))
47 return &SystemZ::GR32BitRegClass;
48 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
49 "Phys reg not in GR32 or GRH32?");
50 return &SystemZ::GRH32BitRegClass;
51 }
52
53 assert (RC == &SystemZ::GRX32BitRegClass);
54 return RC;
55 }
56
57 bool
getRegAllocationHints(unsigned VirtReg,ArrayRef<MCPhysReg> Order,SmallVectorImpl<MCPhysReg> & Hints,const MachineFunction & MF,const VirtRegMap * VRM,const LiveRegMatrix * Matrix) const58 SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,
59 ArrayRef<MCPhysReg> Order,
60 SmallVectorImpl<MCPhysReg> &Hints,
61 const MachineFunction &MF,
62 const VirtRegMap *VRM,
63 const LiveRegMatrix *Matrix) const {
64 const MachineRegisterInfo *MRI = &MF.getRegInfo();
65 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
66 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
67 SmallVector<unsigned, 8> Worklist;
68 SmallSet<unsigned, 4> DoneRegs;
69 Worklist.push_back(VirtReg);
70 while (Worklist.size()) {
71 unsigned Reg = Worklist.pop_back_val();
72 if (!DoneRegs.insert(Reg).second)
73 continue;
74
75 for (auto &Use : MRI->use_instructions(Reg))
76 // For LOCRMux, see if the other operand is already a high or low
77 // register, and in that case give the correpsonding hints for
78 // VirtReg. LOCR instructions need both operands in either high or
79 // low parts.
80 if (Use.getOpcode() == SystemZ::LOCRMux) {
81 MachineOperand &TrueMO = Use.getOperand(1);
82 MachineOperand &FalseMO = Use.getOperand(2);
83 const TargetRegisterClass *RC =
84 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
85 getRC32(TrueMO, VRM, MRI));
86 if (RC && RC != &SystemZ::GRX32BitRegClass) {
87 for (MCPhysReg Reg : Order)
88 if (RC->contains(Reg) && !MRI->isReserved(Reg))
89 Hints.push_back(Reg);
90 // Return true to make these hints the only regs available to
91 // RA. This may mean extra spilling but since the alternative is
92 // a jump sequence expansion of the LOCRMux, it is preferred.
93 return true;
94 }
95
96 // Add the other operand of the LOCRMux to the worklist.
97 unsigned OtherReg =
98 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
99 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
100 Worklist.push_back(OtherReg);
101 }
102 }
103 }
104
105 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
106 VRM, Matrix);
107 }
108
109 const MCPhysReg *
getCalleeSavedRegs(const MachineFunction * MF) const110 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
111 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
112 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
113 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList
114 : CSR_SystemZ_AllRegs_SaveList;
115 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
116 MF->getFunction().getAttributes().hasAttrSomewhere(
117 Attribute::SwiftError))
118 return CSR_SystemZ_SwiftError_SaveList;
119 return CSR_SystemZ_SaveList;
120 }
121
122 const uint32_t *
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const123 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
124 CallingConv::ID CC) const {
125 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
126 if (CC == CallingConv::AnyReg)
127 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
128 : CSR_SystemZ_AllRegs_RegMask;
129 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
130 MF.getFunction().getAttributes().hasAttrSomewhere(
131 Attribute::SwiftError))
132 return CSR_SystemZ_SwiftError_RegMask;
133 return CSR_SystemZ_RegMask;
134 }
135
136 BitVector
getReservedRegs(const MachineFunction & MF) const137 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
138 BitVector Reserved(getNumRegs());
139 const SystemZFrameLowering *TFI = getFrameLowering(MF);
140
141 if (TFI->hasFP(MF)) {
142 // R11D is the frame pointer. Reserve all aliases.
143 Reserved.set(SystemZ::R11D);
144 Reserved.set(SystemZ::R11L);
145 Reserved.set(SystemZ::R11H);
146 Reserved.set(SystemZ::R10Q);
147 }
148
149 // R15D is the stack pointer. Reserve all aliases.
150 Reserved.set(SystemZ::R15D);
151 Reserved.set(SystemZ::R15L);
152 Reserved.set(SystemZ::R15H);
153 Reserved.set(SystemZ::R14Q);
154
155 // A0 and A1 hold the thread pointer.
156 Reserved.set(SystemZ::A0);
157 Reserved.set(SystemZ::A1);
158
159 return Reserved;
160 }
161
162 void
eliminateFrameIndex(MachineBasicBlock::iterator MI,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const163 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
164 int SPAdj, unsigned FIOperandNum,
165 RegScavenger *RS) const {
166 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
167
168 MachineBasicBlock &MBB = *MI->getParent();
169 MachineFunction &MF = *MBB.getParent();
170 auto *TII =
171 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
172 const SystemZFrameLowering *TFI = getFrameLowering(MF);
173 DebugLoc DL = MI->getDebugLoc();
174
175 // Decompose the frame index into a base and offset.
176 int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
177 unsigned BasePtr;
178 int64_t Offset = (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr) +
179 MI->getOperand(FIOperandNum + 1).getImm());
180
181 // Special handling of dbg_value instructions.
182 if (MI->isDebugValue()) {
183 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
184 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
185 return;
186 }
187
188 // See if the offset is in range, or if an equivalent instruction that
189 // accepts the offset exists.
190 unsigned Opcode = MI->getOpcode();
191 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
192 if (OpcodeForOffset) {
193 if (OpcodeForOffset == SystemZ::LE &&
194 MF.getSubtarget<SystemZSubtarget>().hasVector()) {
195 // If LE is ok for offset, use LDE instead on z13.
196 OpcodeForOffset = SystemZ::LDE32;
197 }
198 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
199 }
200 else {
201 // Create an anchor point that is in range. Start at 0xffff so that
202 // can use LLILH to load the immediate.
203 int64_t OldOffset = Offset;
204 int64_t Mask = 0xffff;
205 do {
206 Offset = OldOffset & Mask;
207 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
208 Mask >>= 1;
209 assert(Mask && "One offset must be OK");
210 } while (!OpcodeForOffset);
211
212 unsigned ScratchReg =
213 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
214 int64_t HighOffset = OldOffset - Offset;
215
216 if (MI->getDesc().TSFlags & SystemZII::HasIndex
217 && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
218 // Load the offset into the scratch register and use it as an index.
219 // The scratch register then dies here.
220 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
221 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
222 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
223 false, false, true);
224 } else {
225 // Load the anchor address into a scratch register.
226 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
227 if (LAOpcode)
228 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
229 .addReg(BasePtr).addImm(HighOffset).addReg(0);
230 else {
231 // Load the high offset into the scratch register and use it as
232 // an index.
233 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
234 BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg)
235 .addReg(ScratchReg, RegState::Kill).addReg(BasePtr);
236 }
237
238 // Use the scratch register as the base. It then dies here.
239 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
240 false, false, true);
241 }
242 }
243 MI->setDesc(TII->get(OpcodeForOffset));
244 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
245 }
246
shouldCoalesce(MachineInstr * MI,const TargetRegisterClass * SrcRC,unsigned SubReg,const TargetRegisterClass * DstRC,unsigned DstSubReg,const TargetRegisterClass * NewRC,LiveIntervals & LIS) const247 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
248 const TargetRegisterClass *SrcRC,
249 unsigned SubReg,
250 const TargetRegisterClass *DstRC,
251 unsigned DstSubReg,
252 const TargetRegisterClass *NewRC,
253 LiveIntervals &LIS) const {
254 assert (MI->isCopy() && "Only expecting COPY instructions");
255
256 // Coalesce anything which is not a COPY involving a subreg to/from GR128.
257 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
258 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64)))
259 return true;
260
261 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small
262 // and local to one MBB with not too much interferring registers. Otherwise
263 // regalloc may run out of registers.
264
265 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0);
266 unsigned GR128Reg = MI->getOperand(WideOpNo).getReg();
267 unsigned GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg();
268 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg);
269 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg);
270
271 // Check that the two virtual registers are local to MBB.
272 MachineBasicBlock *MBB = MI->getParent();
273 if (LIS.isLiveInToMBB(IntGR128, MBB) || LIS.isLiveOutOfMBB(IntGR128, MBB) ||
274 LIS.isLiveInToMBB(IntGRNar, MBB) || LIS.isLiveOutOfMBB(IntGRNar, MBB))
275 return false;
276
277 // Find the first and last MIs of the registers.
278 MachineInstr *FirstMI = nullptr, *LastMI = nullptr;
279 if (WideOpNo == 1) {
280 FirstMI = LIS.getInstructionFromIndex(IntGR128.beginIndex());
281 LastMI = LIS.getInstructionFromIndex(IntGRNar.endIndex());
282 } else {
283 FirstMI = LIS.getInstructionFromIndex(IntGRNar.beginIndex());
284 LastMI = LIS.getInstructionFromIndex(IntGR128.endIndex());
285 }
286 assert (FirstMI && LastMI && "No instruction from index?");
287
288 // Check if coalescing seems safe by finding the set of clobbered physreg
289 // pairs in the region.
290 BitVector PhysClobbered(getNumRegs());
291 MachineBasicBlock::iterator MII = FirstMI, MEE = LastMI;
292 MEE++;
293 for (; MII != MEE; ++MII) {
294 for (const MachineOperand &MO : MII->operands())
295 if (MO.isReg() && isPhysicalRegister(MO.getReg())) {
296 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);
297 SI.isValid(); ++SI)
298 if (NewRC->contains(*SI)) {
299 PhysClobbered.set(*SI);
300 break;
301 }
302 }
303 }
304
305 // Demand an arbitrary margin of free regs.
306 unsigned const DemandedFreeGR128 = 3;
307 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
308 return false;
309
310 return true;
311 }
312
313 unsigned
getFrameRegister(const MachineFunction & MF) const314 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
315 const SystemZFrameLowering *TFI = getFrameLowering(MF);
316 return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;
317 }
318
319 const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass * RC) const320 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
321 if (RC == &SystemZ::CCRRegClass)
322 return &SystemZ::GR32BitRegClass;
323 return RC;
324 }
325
326