1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// Mips.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/IR/IntrinsicsMips.h"
22
23 #define DEBUG_TYPE "mips-isel"
24
25 using namespace llvm;
26
27 namespace {
28
29 #define GET_GLOBALISEL_PREDICATE_BITSET
30 #include "MipsGenGlobalISel.inc"
31 #undef GET_GLOBALISEL_PREDICATE_BITSET
32
33 class MipsInstructionSelector : public InstructionSelector {
34 public:
35 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36 const MipsRegisterBankInfo &RBI);
37
38 bool select(MachineInstr &I) override;
getName()39 static const char *getName() { return DEBUG_TYPE; }
40
41 private:
42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45 bool materialize32BitImm(Register DestReg, APInt Imm,
46 MachineIRBuilder &B) const;
47 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
48 const TargetRegisterClass *
49 getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50 unsigned selectLoadStoreOpCode(MachineInstr &I,
51 MachineRegisterInfo &MRI) const;
52
53 const MipsTargetMachine &TM;
54 const MipsSubtarget &STI;
55 const MipsInstrInfo &TII;
56 const MipsRegisterInfo &TRI;
57 const MipsRegisterBankInfo &RBI;
58
59 #define GET_GLOBALISEL_PREDICATES_DECL
60 #include "MipsGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_PREDICATES_DECL
62
63 #define GET_GLOBALISEL_TEMPORARIES_DECL
64 #include "MipsGenGlobalISel.inc"
65 #undef GET_GLOBALISEL_TEMPORARIES_DECL
66 };
67
68 } // end anonymous namespace
69
70 #define GET_GLOBALISEL_IMPL
71 #include "MipsGenGlobalISel.inc"
72 #undef GET_GLOBALISEL_IMPL
73
MipsInstructionSelector(const MipsTargetMachine & TM,const MipsSubtarget & STI,const MipsRegisterBankInfo & RBI)74 MipsInstructionSelector::MipsInstructionSelector(
75 const MipsTargetMachine &TM, const MipsSubtarget &STI,
76 const MipsRegisterBankInfo &RBI)
77 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
78 TRI(*STI.getRegisterInfo()), RBI(RBI),
79
80 #define GET_GLOBALISEL_PREDICATES_INIT
81 #include "MipsGenGlobalISel.inc"
82 #undef GET_GLOBALISEL_PREDICATES_INIT
83 #define GET_GLOBALISEL_TEMPORARIES_INIT
84 #include "MipsGenGlobalISel.inc"
85 #undef GET_GLOBALISEL_TEMPORARIES_INIT
86 {
87 }
88
isRegInGprb(Register Reg,MachineRegisterInfo & MRI) const89 bool MipsInstructionSelector::isRegInGprb(Register Reg,
90 MachineRegisterInfo &MRI) const {
91 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
92 }
93
isRegInFprb(Register Reg,MachineRegisterInfo & MRI) const94 bool MipsInstructionSelector::isRegInFprb(Register Reg,
95 MachineRegisterInfo &MRI) const {
96 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
97 }
98
selectCopy(MachineInstr & I,MachineRegisterInfo & MRI) const99 bool MipsInstructionSelector::selectCopy(MachineInstr &I,
100 MachineRegisterInfo &MRI) const {
101 Register DstReg = I.getOperand(0).getReg();
102 if (Register::isPhysicalRegister(DstReg))
103 return true;
104
105 const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI);
106 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
107 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
108 << " operand\n");
109 return false;
110 }
111 return true;
112 }
113
getRegClassForTypeOnBank(Register Reg,MachineRegisterInfo & MRI) const114 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
115 Register Reg, MachineRegisterInfo &MRI) const {
116 const LLT Ty = MRI.getType(Reg);
117 const unsigned TySize = Ty.getSizeInBits();
118
119 if (isRegInGprb(Reg, MRI)) {
120 assert((Ty.isScalar() || Ty.isPointer()) && TySize == 32 &&
121 "Register class not available for LLT, register bank combination");
122 return &Mips::GPR32RegClass;
123 }
124
125 if (isRegInFprb(Reg, MRI)) {
126 if (Ty.isScalar()) {
127 assert((TySize == 32 || TySize == 64) &&
128 "Register class not available for LLT, register bank combination");
129 if (TySize == 32)
130 return &Mips::FGR32RegClass;
131 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
132 }
133 }
134
135 llvm_unreachable("Unsupported register bank.");
136 }
137
materialize32BitImm(Register DestReg,APInt Imm,MachineIRBuilder & B) const138 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
139 MachineIRBuilder &B) const {
140 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
141 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
142 if (Imm.getHiBits(16).isNullValue()) {
143 MachineInstr *Inst =
144 B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
145 .addImm(Imm.getLoBits(16).getLimitedValue());
146 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
147 }
148 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
149 if (Imm.getLoBits(16).isNullValue()) {
150 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
151 .addImm(Imm.getHiBits(16).getLimitedValue());
152 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
153 }
154 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
155 if (Imm.isSignedIntN(16)) {
156 MachineInstr *Inst =
157 B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
158 .addImm(Imm.getLoBits(16).getLimitedValue());
159 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
160 }
161 // Values that cannot be materialized with single immediate instruction.
162 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
163 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
164 .addImm(Imm.getHiBits(16).getLimitedValue());
165 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
166 .addImm(Imm.getLoBits(16).getLimitedValue());
167 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
168 return false;
169 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
170 return false;
171 return true;
172 }
173
174 /// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
175 unsigned
selectLoadStoreOpCode(MachineInstr & I,MachineRegisterInfo & MRI) const176 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
177 MachineRegisterInfo &MRI) const {
178 const Register ValueReg = I.getOperand(0).getReg();
179 const LLT Ty = MRI.getType(ValueReg);
180 const unsigned TySize = Ty.getSizeInBits();
181 const unsigned MemSizeInBytes = (*I.memoperands_begin())->getSize();
182 unsigned Opc = I.getOpcode();
183 const bool isStore = Opc == TargetOpcode::G_STORE;
184
185 if (isRegInGprb(ValueReg, MRI)) {
186 assert(((Ty.isScalar() && TySize == 32) ||
187 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
188 "Unsupported register bank, LLT, MemSizeInBytes combination");
189 (void)TySize;
190 if (isStore)
191 switch (MemSizeInBytes) {
192 case 4:
193 return Mips::SW;
194 case 2:
195 return Mips::SH;
196 case 1:
197 return Mips::SB;
198 default:
199 return Opc;
200 }
201 else
202 // Unspecified extending load is selected into zeroExtending load.
203 switch (MemSizeInBytes) {
204 case 4:
205 return Mips::LW;
206 case 2:
207 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
208 case 1:
209 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
210 default:
211 return Opc;
212 }
213 }
214
215 if (isRegInFprb(ValueReg, MRI)) {
216 if (Ty.isScalar()) {
217 assert(((TySize == 32 && MemSizeInBytes == 4) ||
218 (TySize == 64 && MemSizeInBytes == 8)) &&
219 "Unsupported register bank, LLT, MemSizeInBytes combination");
220
221 if (MemSizeInBytes == 4)
222 return isStore ? Mips::SWC1 : Mips::LWC1;
223
224 if (STI.isFP64bit())
225 return isStore ? Mips::SDC164 : Mips::LDC164;
226 return isStore ? Mips::SDC1 : Mips::LDC1;
227 }
228
229 if (Ty.isVector()) {
230 assert(STI.hasMSA() && "Vector instructions require target with MSA.");
231 assert((TySize == 128 && MemSizeInBytes == 16) &&
232 "Unsupported register bank, LLT, MemSizeInBytes combination");
233 switch (Ty.getElementType().getSizeInBits()) {
234 case 8:
235 return isStore ? Mips::ST_B : Mips::LD_B;
236 case 16:
237 return isStore ? Mips::ST_H : Mips::LD_H;
238 case 32:
239 return isStore ? Mips::ST_W : Mips::LD_W;
240 case 64:
241 return isStore ? Mips::ST_D : Mips::LD_D;
242 default:
243 return Opc;
244 }
245 }
246 }
247
248 return Opc;
249 }
250
select(MachineInstr & I)251 bool MipsInstructionSelector::select(MachineInstr &I) {
252
253 MachineBasicBlock &MBB = *I.getParent();
254 MachineFunction &MF = *MBB.getParent();
255 MachineRegisterInfo &MRI = MF.getRegInfo();
256
257 if (!isPreISelGenericOpcode(I.getOpcode())) {
258 if (I.isCopy())
259 return selectCopy(I, MRI);
260
261 return true;
262 }
263
264 if (I.getOpcode() == Mips::G_MUL &&
265 isRegInGprb(I.getOperand(0).getReg(), MRI)) {
266 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
267 .add(I.getOperand(0))
268 .add(I.getOperand(1))
269 .add(I.getOperand(2));
270 if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI))
271 return false;
272 Mul->getOperand(3).setIsDead(true);
273 Mul->getOperand(4).setIsDead(true);
274
275 I.eraseFromParent();
276 return true;
277 }
278
279 if (selectImpl(I, *CoverageInfo))
280 return true;
281
282 MachineInstr *MI = nullptr;
283 using namespace TargetOpcode;
284
285 switch (I.getOpcode()) {
286 case G_UMULH: {
287 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
288 MachineInstr *PseudoMULTu, *PseudoMove;
289
290 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
291 .addDef(PseudoMULTuReg)
292 .add(I.getOperand(1))
293 .add(I.getOperand(2));
294 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
295 return false;
296
297 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
298 .addDef(I.getOperand(0).getReg())
299 .addUse(PseudoMULTuReg);
300 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
301 return false;
302
303 I.eraseFromParent();
304 return true;
305 }
306 case G_PTR_ADD: {
307 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
308 .add(I.getOperand(0))
309 .add(I.getOperand(1))
310 .add(I.getOperand(2));
311 break;
312 }
313 case G_INTTOPTR:
314 case G_PTRTOINT: {
315 I.setDesc(TII.get(COPY));
316 return selectCopy(I, MRI);
317 }
318 case G_FRAME_INDEX: {
319 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
320 .add(I.getOperand(0))
321 .add(I.getOperand(1))
322 .addImm(0);
323 break;
324 }
325 case G_BRCOND: {
326 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE))
327 .add(I.getOperand(0))
328 .addUse(Mips::ZERO)
329 .add(I.getOperand(1));
330 break;
331 }
332 case G_BRJT: {
333 unsigned EntrySize =
334 MF.getJumpTableInfo()->getEntrySize(MF.getDataLayout());
335 assert(isPowerOf2_32(EntrySize) &&
336 "Non-power-of-two jump-table entry size not supported.");
337
338 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
339 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
340 .addDef(JTIndex)
341 .addUse(I.getOperand(2).getReg())
342 .addImm(Log2_32(EntrySize));
343 if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
344 return false;
345
346 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
347 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
348 .addDef(DestAddress)
349 .addUse(I.getOperand(0).getReg())
350 .addUse(JTIndex);
351 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
352 return false;
353
354 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
355 MachineInstr *LW =
356 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
357 .addDef(Dest)
358 .addUse(DestAddress)
359 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
360 .addMemOperand(MF.getMachineMemOperand(
361 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, 4));
362 if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI))
363 return false;
364
365 if (MF.getTarget().isPositionIndependent()) {
366 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
367 LW->getOperand(0).setReg(DestTmp);
368 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
369 .addDef(Dest)
370 .addUse(DestTmp)
371 .addUse(MF.getInfo<MipsFunctionInfo>()
372 ->getGlobalBaseRegForGlobalISel());
373 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
374 return false;
375 }
376
377 MachineInstr *Branch =
378 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
379 .addUse(Dest);
380 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
381 return false;
382
383 I.eraseFromParent();
384 return true;
385 }
386 case G_BRINDIRECT: {
387 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
388 .add(I.getOperand(0));
389 break;
390 }
391 case G_PHI: {
392 const Register DestReg = I.getOperand(0).getReg();
393
394 const TargetRegisterClass *DefRC = nullptr;
395 if (Register::isPhysicalRegister(DestReg))
396 DefRC = TRI.getRegClass(DestReg);
397 else
398 DefRC = getRegClassForTypeOnBank(DestReg, MRI);
399
400 I.setDesc(TII.get(TargetOpcode::PHI));
401 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
402 }
403 case G_STORE:
404 case G_LOAD:
405 case G_ZEXTLOAD:
406 case G_SEXTLOAD: {
407 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
408 if (NewOpc == I.getOpcode())
409 return false;
410
411 MachineOperand BaseAddr = I.getOperand(1);
412 int64_t SignedOffset = 0;
413 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
414 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
415 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
416 // %LoadResult/%StoreSrc = load/store %Addr(p0)
417 // into:
418 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
419
420 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
421 if (Addr->getOpcode() == G_PTR_ADD) {
422 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
423 if (Offset->getOpcode() == G_CONSTANT) {
424 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
425 if (OffsetValue.isSignedIntN(16)) {
426 BaseAddr = Addr->getOperand(1);
427 SignedOffset = OffsetValue.getSExtValue();
428 }
429 }
430 }
431
432 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
433 .add(I.getOperand(0))
434 .add(BaseAddr)
435 .addImm(SignedOffset)
436 .addMemOperand(*I.memoperands_begin());
437 break;
438 }
439 case G_UDIV:
440 case G_UREM:
441 case G_SDIV:
442 case G_SREM: {
443 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
444 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
445 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
446
447 MachineInstr *PseudoDIV, *PseudoMove;
448 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
449 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
450 .addDef(HILOReg)
451 .add(I.getOperand(1))
452 .add(I.getOperand(2));
453 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
454 return false;
455
456 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
457 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
458 .addDef(I.getOperand(0).getReg())
459 .addUse(HILOReg);
460 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
461 return false;
462
463 I.eraseFromParent();
464 return true;
465 }
466 case G_SELECT: {
467 // Handle operands with pointer type.
468 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
469 .add(I.getOperand(0))
470 .add(I.getOperand(2))
471 .add(I.getOperand(1))
472 .add(I.getOperand(3));
473 break;
474 }
475 case G_IMPLICIT_DEF: {
476 Register Dst = I.getOperand(0).getReg();
477 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
478 .addDef(Dst);
479
480 // Set class based on register bank, there can be fpr and gpr implicit def.
481 MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI));
482 break;
483 }
484 case G_CONSTANT: {
485 MachineIRBuilder B(I);
486 if (!materialize32BitImm(I.getOperand(0).getReg(),
487 I.getOperand(1).getCImm()->getValue(), B))
488 return false;
489
490 I.eraseFromParent();
491 return true;
492 }
493 case G_FCONSTANT: {
494 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
495 APInt APImm = FPimm.bitcastToAPInt();
496 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
497
498 if (Size == 32) {
499 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
500 MachineIRBuilder B(I);
501 if (!materialize32BitImm(GPRReg, APImm, B))
502 return false;
503
504 MachineInstrBuilder MTC1 =
505 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
506 if (!MTC1.constrainAllUses(TII, TRI, RBI))
507 return false;
508 }
509 if (Size == 64) {
510 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
511 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
512 MachineIRBuilder B(I);
513 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
514 return false;
515 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
516 return false;
517
518 MachineInstrBuilder PairF64 = B.buildInstr(
519 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
520 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
521 if (!PairF64.constrainAllUses(TII, TRI, RBI))
522 return false;
523 }
524
525 I.eraseFromParent();
526 return true;
527 }
528 case G_FABS: {
529 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
530 unsigned FABSOpcode =
531 Size == 32 ? Mips::FABS_S
532 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
533 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
534 .add(I.getOperand(0))
535 .add(I.getOperand(1));
536 break;
537 }
538 case G_FPTOSI: {
539 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
540 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
541 (void)ToSize;
542 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
543 assert((FromSize == 32 || FromSize == 64) &&
544 "Unsupported floating point size for G_FPTOSI");
545
546 unsigned Opcode;
547 if (FromSize == 32)
548 Opcode = Mips::TRUNC_W_S;
549 else
550 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
551 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
552 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
553 .addDef(ResultInFPR)
554 .addUse(I.getOperand(1).getReg());
555 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
556 return false;
557
558 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
559 .addDef(I.getOperand(0).getReg())
560 .addUse(ResultInFPR);
561 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
562 return false;
563
564 I.eraseFromParent();
565 return true;
566 }
567 case G_GLOBAL_VALUE: {
568 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
569 if (MF.getTarget().isPositionIndependent()) {
570 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
571 .addDef(I.getOperand(0).getReg())
572 .addReg(MF.getInfo<MipsFunctionInfo>()
573 ->getGlobalBaseRegForGlobalISel())
574 .addGlobalAddress(GVal);
575 // Global Values that don't have local linkage are handled differently
576 // when they are part of call sequence. MipsCallLowering::lowerCall
577 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
578 // MO_GOT_CALL flag when Callee doesn't have local linkage.
579 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
580 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL);
581 else
582 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT);
583 LWGOT->addMemOperand(
584 MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
585 MachineMemOperand::MOLoad, 4, 4));
586 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
587 return false;
588
589 if (GVal->hasLocalLinkage()) {
590 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
591 LWGOT->getOperand(0).setReg(LWGOTDef);
592
593 MachineInstr *ADDiu =
594 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
595 .addDef(I.getOperand(0).getReg())
596 .addReg(LWGOTDef)
597 .addGlobalAddress(GVal);
598 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
599 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
600 return false;
601 }
602 } else {
603 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
604
605 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
606 .addDef(LUiReg)
607 .addGlobalAddress(GVal);
608 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI);
609 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
610 return false;
611
612 MachineInstr *ADDiu =
613 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
614 .addDef(I.getOperand(0).getReg())
615 .addUse(LUiReg)
616 .addGlobalAddress(GVal);
617 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
618 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
619 return false;
620 }
621 I.eraseFromParent();
622 return true;
623 }
624 case G_JUMP_TABLE: {
625 if (MF.getTarget().isPositionIndependent()) {
626 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
627 .addDef(I.getOperand(0).getReg())
628 .addReg(MF.getInfo<MipsFunctionInfo>()
629 ->getGlobalBaseRegForGlobalISel())
630 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
631 .addMemOperand(
632 MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
633 MachineMemOperand::MOLoad, 4, 4));
634 } else {
635 MI =
636 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
637 .addDef(I.getOperand(0).getReg())
638 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
639 }
640 break;
641 }
642 case G_ICMP: {
643 struct Instr {
644 unsigned Opcode;
645 Register Def, LHS, RHS;
646 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
647 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
648
649 bool hasImm() const {
650 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
651 return true;
652 return false;
653 }
654 };
655
656 SmallVector<struct Instr, 2> Instructions;
657 Register ICMPReg = I.getOperand(0).getReg();
658 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
659 Register LHS = I.getOperand(2).getReg();
660 Register RHS = I.getOperand(3).getReg();
661 CmpInst::Predicate Cond =
662 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
663
664 switch (Cond) {
665 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
666 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
667 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
668 break;
669 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
670 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
671 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
672 break;
673 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
674 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
675 break;
676 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
677 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
678 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
679 break;
680 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
681 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
682 break;
683 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
684 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
685 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
686 break;
687 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
688 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
689 break;
690 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
691 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
692 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
693 break;
694 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
695 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
696 break;
697 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
698 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
699 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
700 break;
701 default:
702 return false;
703 }
704
705 MachineIRBuilder B(I);
706 for (const struct Instr &Instruction : Instructions) {
707 MachineInstrBuilder MIB = B.buildInstr(
708 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
709
710 if (Instruction.hasImm())
711 MIB.addImm(Instruction.RHS);
712 else
713 MIB.addUse(Instruction.RHS);
714
715 if (!MIB.constrainAllUses(TII, TRI, RBI))
716 return false;
717 }
718
719 I.eraseFromParent();
720 return true;
721 }
722 case G_FCMP: {
723 unsigned MipsFCMPCondCode;
724 bool isLogicallyNegated;
725 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
726 I.getOperand(1).getPredicate())) {
727 case CmpInst::FCMP_UNO: // Unordered
728 case CmpInst::FCMP_ORD: // Ordered (OR)
729 MipsFCMPCondCode = Mips::FCOND_UN;
730 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
731 break;
732 case CmpInst::FCMP_OEQ: // Equal
733 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
734 MipsFCMPCondCode = Mips::FCOND_OEQ;
735 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
736 break;
737 case CmpInst::FCMP_UEQ: // Unordered or Equal
738 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
739 MipsFCMPCondCode = Mips::FCOND_UEQ;
740 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
741 break;
742 case CmpInst::FCMP_OLT: // Ordered or Less Than
743 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
744 MipsFCMPCondCode = Mips::FCOND_OLT;
745 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
746 break;
747 case CmpInst::FCMP_ULT: // Unordered or Less Than
748 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
749 MipsFCMPCondCode = Mips::FCOND_ULT;
750 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
751 break;
752 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
753 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
754 MipsFCMPCondCode = Mips::FCOND_OLE;
755 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
756 break;
757 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
758 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
759 MipsFCMPCondCode = Mips::FCOND_ULE;
760 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
761 break;
762 default:
763 return false;
764 }
765
766 // Default compare result in gpr register will be `true`.
767 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
768 // using MOVF_I. When orignal predicate (Cond) is logically negated
769 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
770 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
771
772 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
773 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
774 .addDef(TrueInReg)
775 .addUse(Mips::ZERO)
776 .addImm(1);
777
778 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
779 unsigned FCMPOpcode =
780 Size == 32 ? Mips::FCMP_S32
781 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
782 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
783 .addUse(I.getOperand(2).getReg())
784 .addUse(I.getOperand(3).getReg())
785 .addImm(MipsFCMPCondCode);
786 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
787 return false;
788
789 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
790 .addDef(I.getOperand(0).getReg())
791 .addUse(Mips::ZERO)
792 .addUse(Mips::FCC0)
793 .addUse(TrueInReg);
794 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
795 return false;
796
797 I.eraseFromParent();
798 return true;
799 }
800 case G_FENCE: {
801 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
802 break;
803 }
804 case G_VASTART: {
805 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
806 int FI = FuncInfo->getVarArgsFrameIndex();
807
808 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
809 MachineInstr *LEA_ADDiu =
810 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
811 .addDef(LeaReg)
812 .addFrameIndex(FI)
813 .addImm(0);
814 if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
815 return false;
816
817 MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
818 .addUse(LeaReg)
819 .addUse(I.getOperand(0).getReg())
820 .addImm(0);
821 if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
822 return false;
823
824 I.eraseFromParent();
825 return true;
826 }
827 default:
828 return false;
829 }
830
831 I.eraseFromParent();
832 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
833 }
834
835 namespace llvm {
createMipsInstructionSelector(const MipsTargetMachine & TM,MipsSubtarget & Subtarget,MipsRegisterBankInfo & RBI)836 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM,
837 MipsSubtarget &Subtarget,
838 MipsRegisterBankInfo &RBI) {
839 return new MipsInstructionSelector(TM, Subtarget, RBI);
840 }
841 } // end namespace llvm
842