1 //===-- RISCVRegisterInfo.cpp - RISCV Register Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetRegisterInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "RISCVRegisterInfo.h"
14 #include "RISCV.h"
15 #include "RISCVMachineFunctionInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "llvm/BinaryFormat/Dwarf.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/RegisterScavenging.h"
22 #include "llvm/CodeGen/TargetFrameLowering.h"
23 #include "llvm/CodeGen/TargetInstrInfo.h"
24 #include "llvm/IR/DebugInfoMetadata.h"
25 #include "llvm/Support/ErrorHandling.h"
26
27 #define GET_REGINFO_TARGET_DESC
28 #include "RISCVGenRegisterInfo.inc"
29
30 using namespace llvm;
31
32 static cl::opt<bool>
33 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
34 cl::init(false),
35 cl::desc("Disable two address hints for register "
36 "allocation"));
37
38 static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
39 static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
40 static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
41 static_assert(RISCV::F31_H == RISCV::F0_H + 31,
42 "Register list not consecutive");
43 static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
44 static_assert(RISCV::F31_F == RISCV::F0_F + 31,
45 "Register list not consecutive");
46 static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
47 static_assert(RISCV::F31_D == RISCV::F0_D + 31,
48 "Register list not consecutive");
49 static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
50 static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
51
RISCVRegisterInfo(unsigned HwMode)52 RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
53 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
54 /*PC*/0, HwMode) {}
55
56 const MCPhysReg *
getCalleeSavedRegs(const MachineFunction * MF) const57 RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
58 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
59 if (MF->getFunction().getCallingConv() == CallingConv::GHC)
60 return CSR_NoRegs_SaveList;
61 if (MF->getFunction().hasFnAttribute("interrupt")) {
62 if (Subtarget.hasStdExtD())
63 return CSR_XLEN_F64_Interrupt_SaveList;
64 if (Subtarget.hasStdExtF())
65 return CSR_XLEN_F32_Interrupt_SaveList;
66 return CSR_Interrupt_SaveList;
67 }
68
69 switch (Subtarget.getTargetABI()) {
70 default:
71 llvm_unreachable("Unrecognized ABI");
72 case RISCVABI::ABI_ILP32:
73 case RISCVABI::ABI_LP64:
74 return CSR_ILP32_LP64_SaveList;
75 case RISCVABI::ABI_ILP32F:
76 case RISCVABI::ABI_LP64F:
77 return CSR_ILP32F_LP64F_SaveList;
78 case RISCVABI::ABI_ILP32D:
79 case RISCVABI::ABI_LP64D:
80 return CSR_ILP32D_LP64D_SaveList;
81 }
82 }
83
getReservedRegs(const MachineFunction & MF) const84 BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
85 const RISCVFrameLowering *TFI = getFrameLowering(MF);
86 BitVector Reserved(getNumRegs());
87
88 // Mark any registers requested to be reserved as such
89 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
90 if (MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(Reg))
91 markSuperRegs(Reserved, Reg);
92 }
93
94 // Use markSuperRegs to ensure any register aliases are also reserved
95 markSuperRegs(Reserved, RISCV::X0); // zero
96 markSuperRegs(Reserved, RISCV::X2); // sp
97 markSuperRegs(Reserved, RISCV::X3); // gp
98 markSuperRegs(Reserved, RISCV::X4); // tp
99 if (TFI->hasFP(MF))
100 markSuperRegs(Reserved, RISCV::X8); // fp
101 // Reserve the base register if we need to realign the stack and allocate
102 // variable-sized objects at runtime.
103 if (TFI->hasBP(MF))
104 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
105
106 // V registers for code generation. We handle them manually.
107 markSuperRegs(Reserved, RISCV::VL);
108 markSuperRegs(Reserved, RISCV::VTYPE);
109 markSuperRegs(Reserved, RISCV::VXSAT);
110 markSuperRegs(Reserved, RISCV::VXRM);
111 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
112
113 // Floating point environment registers.
114 markSuperRegs(Reserved, RISCV::FRM);
115 markSuperRegs(Reserved, RISCV::FFLAGS);
116
117 assert(checkAllSuperRegsMarked(Reserved));
118 return Reserved;
119 }
120
isAsmClobberable(const MachineFunction & MF,MCRegister PhysReg) const121 bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF,
122 MCRegister PhysReg) const {
123 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
124 }
125
getNoPreservedMask() const126 const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
127 return CSR_NoRegs_RegMask;
128 }
129
130 // Frame indexes representing locations of CSRs which are given a fixed location
131 // by save/restore libcalls.
132 static const std::pair<unsigned, int> FixedCSRFIMap[] = {
133 {/*ra*/ RISCV::X1, -1},
134 {/*s0*/ RISCV::X8, -2},
135 {/*s1*/ RISCV::X9, -3},
136 {/*s2*/ RISCV::X18, -4},
137 {/*s3*/ RISCV::X19, -5},
138 {/*s4*/ RISCV::X20, -6},
139 {/*s5*/ RISCV::X21, -7},
140 {/*s6*/ RISCV::X22, -8},
141 {/*s7*/ RISCV::X23, -9},
142 {/*s8*/ RISCV::X24, -10},
143 {/*s9*/ RISCV::X25, -11},
144 {/*s10*/ RISCV::X26, -12},
145 {/*s11*/ RISCV::X27, -13}
146 };
147
hasReservedSpillSlot(const MachineFunction & MF,Register Reg,int & FrameIdx) const148 bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
149 Register Reg,
150 int &FrameIdx) const {
151 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
152 if (!RVFI->useSaveRestoreLibCalls(MF))
153 return false;
154
155 const auto *FII =
156 llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; });
157 if (FII == std::end(FixedCSRFIMap))
158 return false;
159
160 FrameIdx = FII->second;
161 return true;
162 }
163
adjustReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator II,const DebugLoc & DL,Register DestReg,Register SrcReg,StackOffset Offset,MachineInstr::MIFlag Flag,MaybeAlign RequiredAlign) const164 void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
165 MachineBasicBlock::iterator II,
166 const DebugLoc &DL, Register DestReg,
167 Register SrcReg, StackOffset Offset,
168 MachineInstr::MIFlag Flag,
169 MaybeAlign RequiredAlign) const {
170
171 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
172 return;
173
174 MachineFunction &MF = *MBB.getParent();
175 MachineRegisterInfo &MRI = MF.getRegInfo();
176 const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
177 const RISCVInstrInfo *TII = ST.getInstrInfo();
178
179 bool KillSrcReg = false;
180
181 if (Offset.getScalable()) {
182 unsigned ScalableAdjOpc = RISCV::ADD;
183 int64_t ScalableValue = Offset.getScalable();
184 if (ScalableValue < 0) {
185 ScalableValue = -ScalableValue;
186 ScalableAdjOpc = RISCV::SUB;
187 }
188 // Get vlenb and multiply vlen with the number of vector registers.
189 Register ScratchReg = DestReg;
190 if (DestReg == SrcReg)
191 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
192 TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag);
193 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
194 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
195 .setMIFlag(Flag);
196 SrcReg = DestReg;
197 KillSrcReg = true;
198 }
199
200 int64_t Val = Offset.getFixed();
201 if (DestReg == SrcReg && Val == 0)
202 return;
203
204 const uint64_t Align = RequiredAlign.valueOrOne().value();
205
206 if (isInt<12>(Val)) {
207 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
208 .addReg(SrcReg, getKillRegState(KillSrcReg))
209 .addImm(Val)
210 .setMIFlag(Flag);
211 return;
212 }
213
214 // Try to split the offset across two ADDIs. We need to keep the intermediate
215 // result aligned after each ADDI. We need to determine the maximum value we
216 // can put in each ADDI. In the negative direction, we can use -2048 which is
217 // always sufficiently aligned. In the positive direction, we need to find the
218 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
219 // created with LUI.
220 assert(Align < 2048 && "Required alignment too large");
221 int64_t MaxPosAdjStep = 2048 - Align;
222 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
223 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
224 Val -= FirstAdj;
225 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
226 .addReg(SrcReg, getKillRegState(KillSrcReg))
227 .addImm(FirstAdj)
228 .setMIFlag(Flag);
229 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
230 .addReg(DestReg, RegState::Kill)
231 .addImm(Val)
232 .setMIFlag(Flag);
233 return;
234 }
235
236 unsigned Opc = RISCV::ADD;
237 if (Val < 0) {
238 Val = -Val;
239 Opc = RISCV::SUB;
240 }
241
242 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
243 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
244 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
245 .addReg(SrcReg, getKillRegState(KillSrcReg))
246 .addReg(ScratchReg, RegState::Kill)
247 .setMIFlag(Flag);
248 }
249
250 // Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
251 // LMUL*VLENB bytes.
lowerVSPILL(MachineBasicBlock::iterator II) const252 void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
253 DebugLoc DL = II->getDebugLoc();
254 MachineBasicBlock &MBB = *II->getParent();
255 MachineFunction &MF = *MBB.getParent();
256 MachineRegisterInfo &MRI = MF.getRegInfo();
257 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
258 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
259
260 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
261 unsigned NF = ZvlssegInfo->first;
262 unsigned LMUL = ZvlssegInfo->second;
263 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
264 unsigned Opcode, SubRegIdx;
265 switch (LMUL) {
266 default:
267 llvm_unreachable("LMUL must be 1, 2, or 4.");
268 case 1:
269 Opcode = RISCV::VS1R_V;
270 SubRegIdx = RISCV::sub_vrm1_0;
271 break;
272 case 2:
273 Opcode = RISCV::VS2R_V;
274 SubRegIdx = RISCV::sub_vrm2_0;
275 break;
276 case 4:
277 Opcode = RISCV::VS4R_V;
278 SubRegIdx = RISCV::sub_vrm4_0;
279 break;
280 }
281 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
282 "Unexpected subreg numbering");
283 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
284 "Unexpected subreg numbering");
285 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
286 "Unexpected subreg numbering");
287
288 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
289 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
290 uint32_t ShiftAmount = Log2_32(LMUL);
291 if (ShiftAmount != 0)
292 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
293 .addReg(VL)
294 .addImm(ShiftAmount);
295
296 Register SrcReg = II->getOperand(0).getReg();
297 Register Base = II->getOperand(1).getReg();
298 bool IsBaseKill = II->getOperand(1).isKill();
299 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
300 for (unsigned I = 0; I < NF; ++I) {
301 // Adding implicit-use of super register to describe we are using part of
302 // super register, that prevents machine verifier complaining when part of
303 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
304 // detail.
305 BuildMI(MBB, II, DL, TII->get(Opcode))
306 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
307 .addReg(Base, getKillRegState(I == NF - 1))
308 .addMemOperand(*(II->memoperands_begin()))
309 .addReg(SrcReg, RegState::Implicit);
310 if (I != NF - 1)
311 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
312 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
313 .addReg(VL, getKillRegState(I == NF - 2));
314 Base = NewBase;
315 }
316 II->eraseFromParent();
317 }
318
319 // Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
320 // LMUL*VLENB bytes.
lowerVRELOAD(MachineBasicBlock::iterator II) const321 void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
322 DebugLoc DL = II->getDebugLoc();
323 MachineBasicBlock &MBB = *II->getParent();
324 MachineFunction &MF = *MBB.getParent();
325 MachineRegisterInfo &MRI = MF.getRegInfo();
326 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
327 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
328
329 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
330 unsigned NF = ZvlssegInfo->first;
331 unsigned LMUL = ZvlssegInfo->second;
332 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
333 unsigned Opcode, SubRegIdx;
334 switch (LMUL) {
335 default:
336 llvm_unreachable("LMUL must be 1, 2, or 4.");
337 case 1:
338 Opcode = RISCV::VL1RE8_V;
339 SubRegIdx = RISCV::sub_vrm1_0;
340 break;
341 case 2:
342 Opcode = RISCV::VL2RE8_V;
343 SubRegIdx = RISCV::sub_vrm2_0;
344 break;
345 case 4:
346 Opcode = RISCV::VL4RE8_V;
347 SubRegIdx = RISCV::sub_vrm4_0;
348 break;
349 }
350 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
351 "Unexpected subreg numbering");
352 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
353 "Unexpected subreg numbering");
354 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
355 "Unexpected subreg numbering");
356
357 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
358 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
359 uint32_t ShiftAmount = Log2_32(LMUL);
360 if (ShiftAmount != 0)
361 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
362 .addReg(VL)
363 .addImm(ShiftAmount);
364
365 Register DestReg = II->getOperand(0).getReg();
366 Register Base = II->getOperand(1).getReg();
367 bool IsBaseKill = II->getOperand(1).isKill();
368 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
369 for (unsigned I = 0; I < NF; ++I) {
370 BuildMI(MBB, II, DL, TII->get(Opcode),
371 TRI->getSubReg(DestReg, SubRegIdx + I))
372 .addReg(Base, getKillRegState(I == NF - 1))
373 .addMemOperand(*(II->memoperands_begin()));
374 if (I != NF - 1)
375 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
376 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
377 .addReg(VL, getKillRegState(I == NF - 2));
378 Base = NewBase;
379 }
380 II->eraseFromParent();
381 }
382
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const383 bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
384 int SPAdj, unsigned FIOperandNum,
385 RegScavenger *RS) const {
386 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
387
388 MachineInstr &MI = *II;
389 MachineFunction &MF = *MI.getParent()->getParent();
390 MachineRegisterInfo &MRI = MF.getRegInfo();
391 const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
392 DebugLoc DL = MI.getDebugLoc();
393
394 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
395 Register FrameReg;
396 StackOffset Offset =
397 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
398 bool IsRVVSpill = RISCV::isRVVSpill(MI);
399 if (!IsRVVSpill)
400 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
401
402 if (Offset.getScalable() &&
403 ST.getRealMinVLen() == ST.getRealMaxVLen()) {
404 // For an exact VLEN value, scalable offsets become constant and thus
405 // can be converted entirely into fixed offsets.
406 int64_t FixedValue = Offset.getFixed();
407 int64_t ScalableValue = Offset.getScalable();
408 assert(ScalableValue % 8 == 0 &&
409 "Scalable offset is not a multiple of a single vector size.");
410 int64_t NumOfVReg = ScalableValue / 8;
411 int64_t VLENB = ST.getRealMinVLen() / 8;
412 Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
413 }
414
415 if (!isInt<32>(Offset.getFixed())) {
416 report_fatal_error(
417 "Frame offsets outside of the signed 32-bit range not supported");
418 }
419
420 if (!IsRVVSpill) {
421 if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) {
422 // We chose to emit the canonical immediate sequence rather than folding
423 // the offset into the using add under the theory that doing so doesn't
424 // save dynamic instruction count and some target may fuse the canonical
425 // 32 bit immediate sequence. We still need to clear the portion of the
426 // offset encoded in the immediate.
427 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
428 } else {
429 // We can encode an add with 12 bit signed immediate in the immediate
430 // operand of our user instruction. As a result, the remaining
431 // offset can by construction, at worst, a LUI and a ADD.
432 int64_t Val = Offset.getFixed();
433 int64_t Lo12 = SignExtend64<12>(Val);
434 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
435 Offset = StackOffset::get((uint64_t)Val - (uint64_t)Lo12,
436 Offset.getScalable());
437 }
438 }
439
440 if (Offset.getScalable() || Offset.getFixed()) {
441 Register DestReg;
442 if (MI.getOpcode() == RISCV::ADDI)
443 DestReg = MI.getOperand(0).getReg();
444 else
445 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
446 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
447 MachineInstr::NoFlags, std::nullopt);
448 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
449 /*IsImp*/false,
450 /*IsKill*/true);
451 } else {
452 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
453 /*IsImp*/false,
454 /*IsKill*/false);
455 }
456
457 // If after materializing the adjustment, we have a pointless ADDI, remove it
458 if (MI.getOpcode() == RISCV::ADDI &&
459 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
460 MI.getOperand(2).getImm() == 0) {
461 MI.eraseFromParent();
462 return true;
463 }
464
465 // Handle spill/fill of synthetic register classes for segment operations to
466 // ensure correctness in the edge case one gets spilled. There are many
467 // possible optimizations here, but given the extreme rarity of such spills,
468 // we prefer simplicity of implementation for now.
469 switch (MI.getOpcode()) {
470 case RISCV::PseudoVSPILL2_M1:
471 case RISCV::PseudoVSPILL2_M2:
472 case RISCV::PseudoVSPILL2_M4:
473 case RISCV::PseudoVSPILL3_M1:
474 case RISCV::PseudoVSPILL3_M2:
475 case RISCV::PseudoVSPILL4_M1:
476 case RISCV::PseudoVSPILL4_M2:
477 case RISCV::PseudoVSPILL5_M1:
478 case RISCV::PseudoVSPILL6_M1:
479 case RISCV::PseudoVSPILL7_M1:
480 case RISCV::PseudoVSPILL8_M1:
481 lowerVSPILL(II);
482 return true;
483 case RISCV::PseudoVRELOAD2_M1:
484 case RISCV::PseudoVRELOAD2_M2:
485 case RISCV::PseudoVRELOAD2_M4:
486 case RISCV::PseudoVRELOAD3_M1:
487 case RISCV::PseudoVRELOAD3_M2:
488 case RISCV::PseudoVRELOAD4_M1:
489 case RISCV::PseudoVRELOAD4_M2:
490 case RISCV::PseudoVRELOAD5_M1:
491 case RISCV::PseudoVRELOAD6_M1:
492 case RISCV::PseudoVRELOAD7_M1:
493 case RISCV::PseudoVRELOAD8_M1:
494 lowerVRELOAD(II);
495 return true;
496 }
497
498 return false;
499 }
500
requiresVirtualBaseRegisters(const MachineFunction & MF) const501 bool RISCVRegisterInfo::requiresVirtualBaseRegisters(
502 const MachineFunction &MF) const {
503 return true;
504 }
505
506 // Returns true if the instruction's frame index reference would be better
507 // served by a base register other than FP or SP.
508 // Used by LocalStackSlotAllocation pass to determine which frame index
509 // references it should create new base registers for.
needsFrameBaseReg(MachineInstr * MI,int64_t Offset) const510 bool RISCVRegisterInfo::needsFrameBaseReg(MachineInstr *MI,
511 int64_t Offset) const {
512 unsigned FIOperandNum = 0;
513 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
514 assert(FIOperandNum < MI->getNumOperands() &&
515 "Instr doesn't have FrameIndex operand");
516
517 // For RISC-V, The machine instructions that include a FrameIndex operand
518 // are load/store, ADDI instructions.
519 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
520 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
521 return false;
522 // We only generate virtual base registers for loads and stores, so
523 // return false for everything else.
524 if (!MI->mayLoad() && !MI->mayStore())
525 return false;
526
527 const MachineFunction &MF = *MI->getMF();
528 const MachineFrameInfo &MFI = MF.getFrameInfo();
529 const RISCVFrameLowering *TFI = getFrameLowering(MF);
530 const MachineRegisterInfo &MRI = MF.getRegInfo();
531 unsigned CalleeSavedSize = 0;
532 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
533
534 // Estimate the stack size used to store callee saved registers(
535 // excludes reserved registers).
536 BitVector ReservedRegs = getReservedRegs(MF);
537 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R; ++R) {
538 if (!ReservedRegs.test(Reg))
539 CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg));
540 }
541
542 int64_t MaxFPOffset = Offset - CalleeSavedSize;
543 if (TFI->hasFP(MF) && !shouldRealignStack(MF))
544 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
545
546 // Assume 128 bytes spill slots size to estimate the maximum possible
547 // offset relative to the stack pointer.
548 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
549 // real one for RISC-V.
550 int64_t MaxSPOffset = Offset + 128;
551 MaxSPOffset += MFI.getLocalFrameSize();
552 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
553 }
554
555 // Determine whether a given base register plus offset immediate is
556 // encodable to resolve a frame index.
isFrameOffsetLegal(const MachineInstr * MI,Register BaseReg,int64_t Offset) const557 bool RISCVRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
558 Register BaseReg,
559 int64_t Offset) const {
560 unsigned FIOperandNum = 0;
561 while (!MI->getOperand(FIOperandNum).isFI()) {
562 FIOperandNum++;
563 assert(FIOperandNum < MI->getNumOperands() &&
564 "Instr does not have a FrameIndex operand!");
565 }
566
567 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
568 return isInt<12>(Offset);
569 }
570
571 // Insert defining instruction(s) for a pointer to FrameIdx before
572 // insertion point I.
573 // Return materialized frame pointer.
materializeFrameBaseRegister(MachineBasicBlock * MBB,int FrameIdx,int64_t Offset) const574 Register RISCVRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
575 int FrameIdx,
576 int64_t Offset) const {
577 MachineBasicBlock::iterator MBBI = MBB->begin();
578 DebugLoc DL;
579 if (MBBI != MBB->end())
580 DL = MBBI->getDebugLoc();
581 MachineFunction *MF = MBB->getParent();
582 MachineRegisterInfo &MFI = MF->getRegInfo();
583 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
584
585 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
586 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
587 .addFrameIndex(FrameIdx)
588 .addImm(Offset);
589 return BaseReg;
590 }
591
592 // Resolve a frame index operand of an instruction to reference the
593 // indicated base register plus offset instead.
resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset) const594 void RISCVRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
595 int64_t Offset) const {
596 unsigned FIOperandNum = 0;
597 while (!MI.getOperand(FIOperandNum).isFI()) {
598 FIOperandNum++;
599 assert(FIOperandNum < MI.getNumOperands() &&
600 "Instr does not have a FrameIndex operand!");
601 }
602
603 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
604 // FrameIndex Operands are always represented as a
605 // register followed by an immediate.
606 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
607 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
608 }
609
610 // Get the offset from the referenced frame index in the instruction,
611 // if there is one.
getFrameIndexInstrOffset(const MachineInstr * MI,int Idx) const612 int64_t RISCVRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
613 int Idx) const {
614 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
615 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
616 "The MI must be I or S format.");
617 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
618 "FrameIndex operand");
619 return MI->getOperand(Idx + 1).getImm();
620 }
621
getFrameRegister(const MachineFunction & MF) const622 Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
623 const TargetFrameLowering *TFI = getFrameLowering(MF);
624 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
625 }
626
627 const uint32_t *
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const628 RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
629 CallingConv::ID CC) const {
630 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
631
632 if (CC == CallingConv::GHC)
633 return CSR_NoRegs_RegMask;
634 switch (Subtarget.getTargetABI()) {
635 default:
636 llvm_unreachable("Unrecognized ABI");
637 case RISCVABI::ABI_ILP32:
638 case RISCVABI::ABI_LP64:
639 return CSR_ILP32_LP64_RegMask;
640 case RISCVABI::ABI_ILP32F:
641 case RISCVABI::ABI_LP64F:
642 return CSR_ILP32F_LP64F_RegMask;
643 case RISCVABI::ABI_ILP32D:
644 case RISCVABI::ABI_LP64D:
645 return CSR_ILP32D_LP64D_RegMask;
646 }
647 }
648
649 const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction &) const650 RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
651 const MachineFunction &) const {
652 if (RC == &RISCV::VMV0RegClass)
653 return &RISCV::VRRegClass;
654 return RC;
655 }
656
getOffsetOpcodes(const StackOffset & Offset,SmallVectorImpl<uint64_t> & Ops) const657 void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
658 SmallVectorImpl<uint64_t> &Ops) const {
659 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
660 // to represent one vector register. The dwarf offset is
661 // VLENB * scalable_offset / 8.
662 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
663
664 // Add fixed-sized offset using existing DIExpression interface.
665 DIExpression::appendOffset(Ops, Offset.getFixed());
666
667 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
668 int64_t VLENBSized = Offset.getScalable() / 8;
669 if (VLENBSized > 0) {
670 Ops.push_back(dwarf::DW_OP_constu);
671 Ops.push_back(VLENBSized);
672 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
673 Ops.push_back(dwarf::DW_OP_mul);
674 Ops.push_back(dwarf::DW_OP_plus);
675 } else if (VLENBSized < 0) {
676 Ops.push_back(dwarf::DW_OP_constu);
677 Ops.push_back(-VLENBSized);
678 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
679 Ops.push_back(dwarf::DW_OP_mul);
680 Ops.push_back(dwarf::DW_OP_minus);
681 }
682 }
683
684 unsigned
getRegisterCostTableIndex(const MachineFunction & MF) const685 RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
686 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0;
687 }
688
689 // Add two address hints to improve chances of being able to use a compressed
690 // instruction.
getRegAllocationHints(Register VirtReg,ArrayRef<MCPhysReg> Order,SmallVectorImpl<MCPhysReg> & Hints,const MachineFunction & MF,const VirtRegMap * VRM,const LiveRegMatrix * Matrix) const691 bool RISCVRegisterInfo::getRegAllocationHints(
692 Register VirtReg, ArrayRef<MCPhysReg> Order,
693 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
694 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
695 const MachineRegisterInfo *MRI = &MF.getRegInfo();
696
697 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
698 VirtReg, Order, Hints, MF, VRM, Matrix);
699
700 if (!VRM || DisableRegAllocHints)
701 return BaseImplRetVal;
702
703 // Add any two address hints after any copy hints.
704 SmallSet<Register, 4> TwoAddrHints;
705
706 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
707 bool NeedGPRC) -> void {
708 Register Reg = MO.getReg();
709 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
710 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) {
711 assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
712 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
713 TwoAddrHints.insert(PhysReg);
714 }
715 };
716
717 // This is all of the compressible binary instructions. If an instruction
718 // needs GPRC register class operands \p NeedGPRC will be set to true.
719 auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) {
720 NeedGPRC = false;
721 switch (MI.getOpcode()) {
722 default:
723 return false;
724 case RISCV::AND:
725 case RISCV::OR:
726 case RISCV::XOR:
727 case RISCV::SUB:
728 case RISCV::ADDW:
729 case RISCV::SUBW:
730 NeedGPRC = true;
731 return true;
732 case RISCV::ANDI:
733 NeedGPRC = true;
734 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
735 case RISCV::SRAI:
736 case RISCV::SRLI:
737 NeedGPRC = true;
738 return true;
739 case RISCV::ADD:
740 case RISCV::SLLI:
741 return true;
742 case RISCV::ADDI:
743 case RISCV::ADDIW:
744 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
745 }
746 };
747
748 // Returns true if this operand is compressible. For non-registers it always
749 // returns true. Immediate range was already checked in isCompressible.
750 // For registers, it checks if the register is a GPRC register. reg-reg
751 // instructions that require GPRC need all register operands to be GPRC.
752 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
753 if (!MO.isReg())
754 return true;
755 Register Reg = MO.getReg();
756 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
757 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
758 };
759
760 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
761 const MachineInstr &MI = *MO.getParent();
762 unsigned OpIdx = MI.getOperandNo(&MO);
763 bool NeedGPRC;
764 if (isCompressible(MI, NeedGPRC)) {
765 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
766 if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))
767 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
768 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
769 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
770 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
771 } else if (OpIdx == 1 &&
772 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) {
773 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
774 } else if (MI.isCommutable() && OpIdx == 2 &&
775 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
776 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
777 }
778 }
779 }
780
781 for (MCPhysReg OrderReg : Order)
782 if (TwoAddrHints.count(OrderReg))
783 Hints.push_back(OrderReg);
784
785 return BaseImplRetVal;
786 }
787