1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/Analysis/MemoryLocation.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23
24 using namespace llvm;
25
setMF(MachineFunction & MF)26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27 State.MF = &MF;
28 State.MBB = nullptr;
29 State.MRI = &MF.getRegInfo();
30 State.TII = MF.getSubtarget().getInstrInfo();
31 State.DL = DebugLoc();
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34 }
35
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39
buildInstrNoInsert(unsigned Opcode)40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42 return MIB;
43 }
44
insertInstr(MachineInstrBuilder MIB)45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46 getMBB().insert(getInsertPt(), MIB);
47 recordInsertion(MIB);
48 return MIB;
49 }
50
51 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63
64 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return buildInstr(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr);
90 }
91
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101 if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102 if (CI->getBitWidth() > 64)
103 MIB.addCImm(CI);
104 else
105 MIB.addImm(CI->getZExtValue());
106 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107 MIB.addFPImm(CFP);
108 } else {
109 // Insert $noreg if we didn't find a usable constant and had to drop it.
110 MIB.addReg(Register());
111 }
112
113 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114 return insertInstr(MIB);
115 }
116
buildDbgLabel(const MDNode * Label)117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
118 assert(isa<DILabel>(Label) && "not a label");
119 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
120 "Expected inlined-at fields to agree");
121 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
122
123 return MIB.addMetadata(Label);
124 }
125
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
127 const SrcOp &Size,
128 Align Alignment) {
129 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
131 Res.addDefToMIB(*getMRI(), MIB);
132 Size.addSrcToMIB(MIB);
133 MIB.addImm(Alignment.value());
134 return MIB;
135 }
136
buildFrameIndex(const DstOp & Res,int Idx)137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
138 int Idx) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
141 Res.addDefToMIB(*getMRI(), MIB);
142 MIB.addFrameIndex(Idx);
143 return MIB;
144 }
145
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
147 const GlobalValue *GV) {
148 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
150 GV->getType()->getAddressSpace() &&
151 "address space mismatch");
152
153 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
154 Res.addDefToMIB(*getMRI(), MIB);
155 MIB.addGlobalAddress(GV);
156 return MIB;
157 }
158
buildJumpTable(const LLT PtrTy,unsigned JTI)159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
160 unsigned JTI) {
161 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
162 .addJumpTableIndex(JTI);
163 }
164
validateUnaryOp(const LLT Res,const LLT Op0)165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
166 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
167 assert((Res == Op0) && "type mismatch");
168 }
169
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
171 const LLT Op1) {
172 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
173 assert((Res == Op0 && Res == Op1) && "type mismatch");
174 }
175
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
177 const LLT Op1) {
178 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
179 assert((Res == Op0) && "type mismatch");
180 }
181
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
183 const SrcOp &Op0,
184 const SrcOp &Op1) {
185 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
186 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
187 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
188
189 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
190 }
191
192 Optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
194 const LLT ValueTy, uint64_t Value) {
195 assert(Res == 0 && "Res is a result argument");
196 assert(ValueTy.isScalar() && "invalid offset type");
197
198 if (Value == 0) {
199 Res = Op0;
200 return None;
201 }
202
203 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
204 auto Cst = buildConstant(ValueTy, Value);
205 return buildPtrAdd(Res, Op0, Cst.getReg(0));
206 }
207
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
209 const SrcOp &Op0,
210 uint32_t NumBits) {
211 LLT PtrTy = Res.getLLTTy(*getMRI());
212 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
213 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
214 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
215 return buildPtrMask(Res, Op0, MaskReg);
216 }
217
buildBr(MachineBasicBlock & Dest)218 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
219 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
220 }
221
buildBrIndirect(Register Tgt)222 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
223 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
224 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
225 }
226
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)227 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
228 unsigned JTI,
229 Register IndexReg) {
230 assert(getMRI()->getType(TablePtr).isPointer() &&
231 "Table reg must be a pointer");
232 return buildInstr(TargetOpcode::G_BRJT)
233 .addUse(TablePtr)
234 .addJumpTableIndex(JTI)
235 .addUse(IndexReg);
236 }
237
buildCopy(const DstOp & Res,const SrcOp & Op)238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
239 const SrcOp &Op) {
240 return buildInstr(TargetOpcode::COPY, Res, Op);
241 }
242
buildConstant(const DstOp & Res,const ConstantInt & Val)243 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
244 const ConstantInt &Val) {
245 LLT Ty = Res.getLLTTy(*getMRI());
246 LLT EltTy = Ty.getScalarType();
247 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
248 "creating constant with the wrong size");
249
250 if (Ty.isVector()) {
251 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
252 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
253 .addCImm(&Val);
254 return buildSplatVector(Res, Const);
255 }
256
257 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
258 Const->setDebugLoc(DebugLoc());
259 Res.addDefToMIB(*getMRI(), Const);
260 Const.addCImm(&Val);
261 return Const;
262 }
263
buildConstant(const DstOp & Res,int64_t Val)264 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
265 int64_t Val) {
266 auto IntN = IntegerType::get(getMF().getFunction().getContext(),
267 Res.getLLTTy(*getMRI()).getScalarSizeInBits());
268 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
269 return buildConstant(Res, *CI);
270 }
271
buildFConstant(const DstOp & Res,const ConstantFP & Val)272 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
273 const ConstantFP &Val) {
274 LLT Ty = Res.getLLTTy(*getMRI());
275 LLT EltTy = Ty.getScalarType();
276
277 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
278 == EltTy.getSizeInBits() &&
279 "creating fconstant with the wrong size");
280
281 assert(!Ty.isPointer() && "invalid operand type");
282
283 if (Ty.isVector()) {
284 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
285 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
286 .addFPImm(&Val);
287
288 return buildSplatVector(Res, Const);
289 }
290
291 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
292 Const->setDebugLoc(DebugLoc());
293 Res.addDefToMIB(*getMRI(), Const);
294 Const.addFPImm(&Val);
295 return Const;
296 }
297
buildConstant(const DstOp & Res,const APInt & Val)298 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
299 const APInt &Val) {
300 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
301 return buildConstant(Res, *CI);
302 }
303
buildFConstant(const DstOp & Res,double Val)304 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
305 double Val) {
306 LLT DstTy = Res.getLLTTy(*getMRI());
307 auto &Ctx = getMF().getFunction().getContext();
308 auto *CFP =
309 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
310 return buildFConstant(Res, *CFP);
311 }
312
buildFConstant(const DstOp & Res,const APFloat & Val)313 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
314 const APFloat &Val) {
315 auto &Ctx = getMF().getFunction().getContext();
316 auto *CFP = ConstantFP::get(Ctx, Val);
317 return buildFConstant(Res, *CFP);
318 }
319
buildBrCond(const SrcOp & Tst,MachineBasicBlock & Dest)320 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
321 MachineBasicBlock &Dest) {
322 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
323
324 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
325 Tst.addSrcToMIB(MIB);
326 MIB.addMBB(&Dest);
327 return MIB;
328 }
329
330 MachineInstrBuilder
buildLoad(const DstOp & Dst,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)331 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
332 MachinePointerInfo PtrInfo, Align Alignment,
333 MachineMemOperand::Flags MMOFlags,
334 const AAMDNodes &AAInfo) {
335 MMOFlags |= MachineMemOperand::MOLoad;
336 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
337
338 uint64_t Size = MemoryLocation::getSizeOrUnknown(
339 TypeSize::Fixed(Dst.getLLTTy(*getMRI()).getSizeInBytes()));
340 MachineMemOperand *MMO =
341 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
342 return buildLoad(Dst, Addr, *MMO);
343 }
344
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)345 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
346 const DstOp &Res,
347 const SrcOp &Addr,
348 MachineMemOperand &MMO) {
349 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
350 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
351
352 auto MIB = buildInstr(Opcode);
353 Res.addDefToMIB(*getMRI(), MIB);
354 Addr.addSrcToMIB(MIB);
355 MIB.addMemOperand(&MMO);
356 return MIB;
357 }
358
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)359 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
360 const DstOp &Dst, const SrcOp &BasePtr,
361 MachineMemOperand &BaseMMO, int64_t Offset) {
362 LLT LoadTy = Dst.getLLTTy(*getMRI());
363 MachineMemOperand *OffsetMMO =
364 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes());
365
366 if (Offset == 0) // This may be a size or type changing load.
367 return buildLoad(Dst, BasePtr, *OffsetMMO);
368
369 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
370 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
371 auto ConstOffset = buildConstant(OffsetTy, Offset);
372 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
373 return buildLoad(Dst, Ptr, *OffsetMMO);
374 }
375
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)376 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
377 const SrcOp &Addr,
378 MachineMemOperand &MMO) {
379 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
380 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
381
382 auto MIB = buildInstr(TargetOpcode::G_STORE);
383 Val.addSrcToMIB(MIB);
384 Addr.addSrcToMIB(MIB);
385 MIB.addMemOperand(&MMO);
386 return MIB;
387 }
388
389 MachineInstrBuilder
buildStore(const SrcOp & Val,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)390 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
391 MachinePointerInfo PtrInfo, Align Alignment,
392 MachineMemOperand::Flags MMOFlags,
393 const AAMDNodes &AAInfo) {
394 MMOFlags |= MachineMemOperand::MOStore;
395 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
396
397 uint64_t Size = MemoryLocation::getSizeOrUnknown(
398 TypeSize::Fixed(Val.getLLTTy(*getMRI()).getSizeInBytes()));
399 MachineMemOperand *MMO =
400 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
401 return buildStore(Val, Addr, *MMO);
402 }
403
buildAnyExt(const DstOp & Res,const SrcOp & Op)404 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
405 const SrcOp &Op) {
406 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
407 }
408
buildSExt(const DstOp & Res,const SrcOp & Op)409 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
410 const SrcOp &Op) {
411 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
412 }
413
buildZExt(const DstOp & Res,const SrcOp & Op)414 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
415 const SrcOp &Op) {
416 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
417 }
418
getBoolExtOp(bool IsVec,bool IsFP) const419 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
420 const auto *TLI = getMF().getSubtarget().getTargetLowering();
421 switch (TLI->getBooleanContents(IsVec, IsFP)) {
422 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
423 return TargetOpcode::G_SEXT;
424 case TargetLoweringBase::ZeroOrOneBooleanContent:
425 return TargetOpcode::G_ZEXT;
426 default:
427 return TargetOpcode::G_ANYEXT;
428 }
429 }
430
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)431 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
432 const SrcOp &Op,
433 bool IsFP) {
434 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
435 return buildInstr(ExtOp, Res, Op);
436 }
437
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)438 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
439 const DstOp &Res,
440 const SrcOp &Op) {
441 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
442 TargetOpcode::G_SEXT == ExtOpc) &&
443 "Expecting Extending Opc");
444 assert(Res.getLLTTy(*getMRI()).isScalar() ||
445 Res.getLLTTy(*getMRI()).isVector());
446 assert(Res.getLLTTy(*getMRI()).isScalar() ==
447 Op.getLLTTy(*getMRI()).isScalar());
448
449 unsigned Opcode = TargetOpcode::COPY;
450 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
451 Op.getLLTTy(*getMRI()).getSizeInBits())
452 Opcode = ExtOpc;
453 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
454 Op.getLLTTy(*getMRI()).getSizeInBits())
455 Opcode = TargetOpcode::G_TRUNC;
456 else
457 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
458
459 return buildInstr(Opcode, Res, Op);
460 }
461
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)462 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
463 const SrcOp &Op) {
464 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
465 }
466
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)467 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
468 const SrcOp &Op) {
469 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
470 }
471
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)472 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
473 const SrcOp &Op) {
474 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
475 }
476
buildCast(const DstOp & Dst,const SrcOp & Src)477 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
478 const SrcOp &Src) {
479 LLT SrcTy = Src.getLLTTy(*getMRI());
480 LLT DstTy = Dst.getLLTTy(*getMRI());
481 if (SrcTy == DstTy)
482 return buildCopy(Dst, Src);
483
484 unsigned Opcode;
485 if (SrcTy.isPointer() && DstTy.isScalar())
486 Opcode = TargetOpcode::G_PTRTOINT;
487 else if (DstTy.isPointer() && SrcTy.isScalar())
488 Opcode = TargetOpcode::G_INTTOPTR;
489 else {
490 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
491 Opcode = TargetOpcode::G_BITCAST;
492 }
493
494 return buildInstr(Opcode, Dst, Src);
495 }
496
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)497 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
498 const SrcOp &Src,
499 uint64_t Index) {
500 LLT SrcTy = Src.getLLTTy(*getMRI());
501 LLT DstTy = Dst.getLLTTy(*getMRI());
502
503 #ifndef NDEBUG
504 assert(SrcTy.isValid() && "invalid operand type");
505 assert(DstTy.isValid() && "invalid operand type");
506 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
507 "extracting off end of register");
508 #endif
509
510 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
511 assert(Index == 0 && "insertion past the end of a register");
512 return buildCast(Dst, Src);
513 }
514
515 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
516 Dst.addDefToMIB(*getMRI(), Extract);
517 Src.addSrcToMIB(Extract);
518 Extract.addImm(Index);
519 return Extract;
520 }
521
buildSequence(Register Res,ArrayRef<Register> Ops,ArrayRef<uint64_t> Indices)522 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
523 ArrayRef<uint64_t> Indices) {
524 #ifndef NDEBUG
525 assert(Ops.size() == Indices.size() && "incompatible args");
526 assert(!Ops.empty() && "invalid trivial sequence");
527 assert(llvm::is_sorted(Indices) &&
528 "sequence offsets must be in ascending order");
529
530 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
531 for (auto Op : Ops)
532 assert(getMRI()->getType(Op).isValid() && "invalid operand type");
533 #endif
534
535 LLT ResTy = getMRI()->getType(Res);
536 LLT OpTy = getMRI()->getType(Ops[0]);
537 unsigned OpSize = OpTy.getSizeInBits();
538 bool MaybeMerge = true;
539 for (unsigned i = 0; i < Ops.size(); ++i) {
540 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
541 MaybeMerge = false;
542 break;
543 }
544 }
545
546 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
547 buildMerge(Res, Ops);
548 return;
549 }
550
551 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
552 buildUndef(ResIn);
553
554 for (unsigned i = 0; i < Ops.size(); ++i) {
555 Register ResOut = i + 1 == Ops.size()
556 ? Res
557 : getMRI()->createGenericVirtualRegister(ResTy);
558 buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
559 ResIn = ResOut;
560 }
561 }
562
buildUndef(const DstOp & Res)563 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
564 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
565 }
566
buildMerge(const DstOp & Res,ArrayRef<Register> Ops)567 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
568 ArrayRef<Register> Ops) {
569 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
570 // we need some temporary storage for the DstOp objects. Here we use a
571 // sufficiently large SmallVector to not go through the heap.
572 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
573 assert(TmpVec.size() > 1);
574 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
575 }
576
577 MachineInstrBuilder
buildMerge(const DstOp & Res,std::initializer_list<SrcOp> Ops)578 MachineIRBuilder::buildMerge(const DstOp &Res,
579 std::initializer_list<SrcOp> Ops) {
580 assert(Ops.size() > 1);
581 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
582 }
583
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)584 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
585 const SrcOp &Op) {
586 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
587 // we need some temporary storage for the DstOp objects. Here we use a
588 // sufficiently large SmallVector to not go through the heap.
589 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
590 assert(TmpVec.size() > 1);
591 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
592 }
593
buildUnmerge(LLT Res,const SrcOp & Op)594 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
595 const SrcOp &Op) {
596 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
597 SmallVector<Register, 8> TmpVec;
598 for (unsigned I = 0; I != NumReg; ++I)
599 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
600 return buildUnmerge(TmpVec, Op);
601 }
602
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)603 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
604 const SrcOp &Op) {
605 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
606 // we need some temporary storage for the DstOp objects. Here we use a
607 // sufficiently large SmallVector to not go through the heap.
608 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
609 assert(TmpVec.size() > 1);
610 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
611 }
612
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)613 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
614 ArrayRef<Register> Ops) {
615 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
616 // we need some temporary storage for the DstOp objects. Here we use a
617 // sufficiently large SmallVector to not go through the heap.
618 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
619 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
620 }
621
buildSplatVector(const DstOp & Res,const SrcOp & Src)622 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
623 const SrcOp &Src) {
624 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
625 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
626 }
627
628 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)629 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
630 ArrayRef<Register> Ops) {
631 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
632 // we need some temporary storage for the DstOp objects. Here we use a
633 // sufficiently large SmallVector to not go through the heap.
634 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
635 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
636 }
637
buildShuffleSplat(const DstOp & Res,const SrcOp & Src)638 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
639 const SrcOp &Src) {
640 LLT DstTy = Res.getLLTTy(*getMRI());
641 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
642 "Expected Src to match Dst elt ty");
643 auto UndefVec = buildUndef(DstTy);
644 auto Zero = buildConstant(LLT::scalar(64), 0);
645 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
646 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
647 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
648 }
649
buildShuffleVector(const DstOp & Res,const SrcOp & Src1,const SrcOp & Src2,ArrayRef<int> Mask)650 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
651 const SrcOp &Src1,
652 const SrcOp &Src2,
653 ArrayRef<int> Mask) {
654 LLT DstTy = Res.getLLTTy(*getMRI());
655 LLT Src1Ty = Src1.getLLTTy(*getMRI());
656 LLT Src2Ty = Src2.getLLTTy(*getMRI());
657 assert(Src1Ty.getNumElements() + Src2Ty.getNumElements() >= Mask.size());
658 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
659 DstTy.getElementType() == Src2Ty.getElementType());
660 (void)Src1Ty;
661 (void)Src2Ty;
662 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
663 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {DstTy}, {Src1, Src2})
664 .addShuffleMask(MaskAlloc);
665 }
666
667 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)668 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
669 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
670 // we need some temporary storage for the DstOp objects. Here we use a
671 // sufficiently large SmallVector to not go through the heap.
672 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
673 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
674 }
675
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)676 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
677 const SrcOp &Src,
678 const SrcOp &Op,
679 unsigned Index) {
680 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
681 Res.getLLTTy(*getMRI()).getSizeInBits() &&
682 "insertion past the end of a register");
683
684 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
685 Op.getLLTTy(*getMRI()).getSizeInBits()) {
686 return buildCast(Res, Op);
687 }
688
689 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
690 }
691
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects)692 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
693 ArrayRef<Register> ResultRegs,
694 bool HasSideEffects) {
695 auto MIB =
696 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
697 : TargetOpcode::G_INTRINSIC);
698 for (unsigned ResultReg : ResultRegs)
699 MIB.addDef(ResultReg);
700 MIB.addIntrinsicID(ID);
701 return MIB;
702 }
703
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects)704 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
705 ArrayRef<DstOp> Results,
706 bool HasSideEffects) {
707 auto MIB =
708 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
709 : TargetOpcode::G_INTRINSIC);
710 for (DstOp Result : Results)
711 Result.addDefToMIB(*getMRI(), MIB);
712 MIB.addIntrinsicID(ID);
713 return MIB;
714 }
715
buildTrunc(const DstOp & Res,const SrcOp & Op)716 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
717 const SrcOp &Op) {
718 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
719 }
720
buildFPTrunc(const DstOp & Res,const SrcOp & Op,Optional<unsigned> Flags)721 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
722 const SrcOp &Op,
723 Optional<unsigned> Flags) {
724 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
725 }
726
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)727 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
728 const DstOp &Res,
729 const SrcOp &Op0,
730 const SrcOp &Op1) {
731 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
732 }
733
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)734 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
735 const DstOp &Res,
736 const SrcOp &Op0,
737 const SrcOp &Op1,
738 Optional<unsigned> Flags) {
739
740 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
741 }
742
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)743 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
744 const SrcOp &Tst,
745 const SrcOp &Op0,
746 const SrcOp &Op1,
747 Optional<unsigned> Flags) {
748
749 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
750 }
751
752 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)753 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
754 const SrcOp &Elt, const SrcOp &Idx) {
755 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
756 }
757
758 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)759 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
760 const SrcOp &Idx) {
761 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
762 }
763
buildAtomicCmpXchgWithSuccess(Register OldValRes,Register SuccessRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)764 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
765 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
766 Register NewVal, MachineMemOperand &MMO) {
767 #ifndef NDEBUG
768 LLT OldValResTy = getMRI()->getType(OldValRes);
769 LLT SuccessResTy = getMRI()->getType(SuccessRes);
770 LLT AddrTy = getMRI()->getType(Addr);
771 LLT CmpValTy = getMRI()->getType(CmpVal);
772 LLT NewValTy = getMRI()->getType(NewVal);
773 assert(OldValResTy.isScalar() && "invalid operand type");
774 assert(SuccessResTy.isScalar() && "invalid operand type");
775 assert(AddrTy.isPointer() && "invalid operand type");
776 assert(CmpValTy.isValid() && "invalid operand type");
777 assert(NewValTy.isValid() && "invalid operand type");
778 assert(OldValResTy == CmpValTy && "type mismatch");
779 assert(OldValResTy == NewValTy && "type mismatch");
780 #endif
781
782 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
783 .addDef(OldValRes)
784 .addDef(SuccessRes)
785 .addUse(Addr)
786 .addUse(CmpVal)
787 .addUse(NewVal)
788 .addMemOperand(&MMO);
789 }
790
791 MachineInstrBuilder
buildAtomicCmpXchg(Register OldValRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)792 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
793 Register CmpVal, Register NewVal,
794 MachineMemOperand &MMO) {
795 #ifndef NDEBUG
796 LLT OldValResTy = getMRI()->getType(OldValRes);
797 LLT AddrTy = getMRI()->getType(Addr);
798 LLT CmpValTy = getMRI()->getType(CmpVal);
799 LLT NewValTy = getMRI()->getType(NewVal);
800 assert(OldValResTy.isScalar() && "invalid operand type");
801 assert(AddrTy.isPointer() && "invalid operand type");
802 assert(CmpValTy.isValid() && "invalid operand type");
803 assert(NewValTy.isValid() && "invalid operand type");
804 assert(OldValResTy == CmpValTy && "type mismatch");
805 assert(OldValResTy == NewValTy && "type mismatch");
806 #endif
807
808 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
809 .addDef(OldValRes)
810 .addUse(Addr)
811 .addUse(CmpVal)
812 .addUse(NewVal)
813 .addMemOperand(&MMO);
814 }
815
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)816 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
817 unsigned Opcode, const DstOp &OldValRes,
818 const SrcOp &Addr, const SrcOp &Val,
819 MachineMemOperand &MMO) {
820
821 #ifndef NDEBUG
822 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
823 LLT AddrTy = Addr.getLLTTy(*getMRI());
824 LLT ValTy = Val.getLLTTy(*getMRI());
825 assert(OldValResTy.isScalar() && "invalid operand type");
826 assert(AddrTy.isPointer() && "invalid operand type");
827 assert(ValTy.isValid() && "invalid operand type");
828 assert(OldValResTy == ValTy && "type mismatch");
829 assert(MMO.isAtomic() && "not atomic mem operand");
830 #endif
831
832 auto MIB = buildInstr(Opcode);
833 OldValRes.addDefToMIB(*getMRI(), MIB);
834 Addr.addSrcToMIB(MIB);
835 Val.addSrcToMIB(MIB);
836 MIB.addMemOperand(&MMO);
837 return MIB;
838 }
839
840 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)841 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
842 Register Val, MachineMemOperand &MMO) {
843 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
844 MMO);
845 }
846 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)847 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
848 Register Val, MachineMemOperand &MMO) {
849 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
850 MMO);
851 }
852 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)853 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
854 Register Val, MachineMemOperand &MMO) {
855 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
856 MMO);
857 }
858 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)859 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
860 Register Val, MachineMemOperand &MMO) {
861 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
862 MMO);
863 }
864 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)865 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
866 Register Val, MachineMemOperand &MMO) {
867 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
868 MMO);
869 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)870 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
871 Register Addr,
872 Register Val,
873 MachineMemOperand &MMO) {
874 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
875 MMO);
876 }
877 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)878 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
879 Register Val, MachineMemOperand &MMO) {
880 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
881 MMO);
882 }
883 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)884 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
885 Register Val, MachineMemOperand &MMO) {
886 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
887 MMO);
888 }
889 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)890 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
891 Register Val, MachineMemOperand &MMO) {
892 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
893 MMO);
894 }
895 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)896 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
897 Register Val, MachineMemOperand &MMO) {
898 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
899 MMO);
900 }
901 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)902 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
903 Register Val, MachineMemOperand &MMO) {
904 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
905 MMO);
906 }
907
908 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)909 MachineIRBuilder::buildAtomicRMWFAdd(
910 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
911 MachineMemOperand &MMO) {
912 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
913 MMO);
914 }
915
916 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)917 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
918 MachineMemOperand &MMO) {
919 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
920 MMO);
921 }
922
923 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)924 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
925 return buildInstr(TargetOpcode::G_FENCE)
926 .addImm(Ordering)
927 .addImm(Scope);
928 }
929
930 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)931 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
932 #ifndef NDEBUG
933 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
934 #endif
935
936 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
937 }
938
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)939 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
940 bool IsExtend) {
941 #ifndef NDEBUG
942 if (DstTy.isVector()) {
943 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
944 assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
945 "different number of elements in a trunc/ext");
946 } else
947 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
948
949 if (IsExtend)
950 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
951 "invalid narrowing extend");
952 else
953 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
954 "invalid widening trunc");
955 #endif
956 }
957
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)958 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
959 const LLT Op0Ty, const LLT Op1Ty) {
960 #ifndef NDEBUG
961 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
962 "invalid operand type");
963 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
964 if (ResTy.isScalar() || ResTy.isPointer())
965 assert(TstTy.isScalar() && "type mismatch");
966 else
967 assert((TstTy.isScalar() ||
968 (TstTy.isVector() &&
969 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
970 "type mismatch");
971 #endif
972 }
973
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,Optional<unsigned> Flags)974 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
975 ArrayRef<DstOp> DstOps,
976 ArrayRef<SrcOp> SrcOps,
977 Optional<unsigned> Flags) {
978 switch (Opc) {
979 default:
980 break;
981 case TargetOpcode::G_SELECT: {
982 assert(DstOps.size() == 1 && "Invalid select");
983 assert(SrcOps.size() == 3 && "Invalid select");
984 validateSelectOp(
985 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
986 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
987 break;
988 }
989 case TargetOpcode::G_FNEG:
990 case TargetOpcode::G_ABS:
991 // All these are unary ops.
992 assert(DstOps.size() == 1 && "Invalid Dst");
993 assert(SrcOps.size() == 1 && "Invalid Srcs");
994 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
995 SrcOps[0].getLLTTy(*getMRI()));
996 break;
997 case TargetOpcode::G_ADD:
998 case TargetOpcode::G_AND:
999 case TargetOpcode::G_MUL:
1000 case TargetOpcode::G_OR:
1001 case TargetOpcode::G_SUB:
1002 case TargetOpcode::G_XOR:
1003 case TargetOpcode::G_UDIV:
1004 case TargetOpcode::G_SDIV:
1005 case TargetOpcode::G_UREM:
1006 case TargetOpcode::G_SREM:
1007 case TargetOpcode::G_SMIN:
1008 case TargetOpcode::G_SMAX:
1009 case TargetOpcode::G_UMIN:
1010 case TargetOpcode::G_UMAX:
1011 case TargetOpcode::G_UADDSAT:
1012 case TargetOpcode::G_SADDSAT:
1013 case TargetOpcode::G_USUBSAT:
1014 case TargetOpcode::G_SSUBSAT: {
1015 // All these are binary ops.
1016 assert(DstOps.size() == 1 && "Invalid Dst");
1017 assert(SrcOps.size() == 2 && "Invalid Srcs");
1018 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1019 SrcOps[0].getLLTTy(*getMRI()),
1020 SrcOps[1].getLLTTy(*getMRI()));
1021 break;
1022 }
1023 case TargetOpcode::G_SHL:
1024 case TargetOpcode::G_ASHR:
1025 case TargetOpcode::G_LSHR:
1026 case TargetOpcode::G_USHLSAT:
1027 case TargetOpcode::G_SSHLSAT: {
1028 assert(DstOps.size() == 1 && "Invalid Dst");
1029 assert(SrcOps.size() == 2 && "Invalid Srcs");
1030 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1031 SrcOps[0].getLLTTy(*getMRI()),
1032 SrcOps[1].getLLTTy(*getMRI()));
1033 break;
1034 }
1035 case TargetOpcode::G_SEXT:
1036 case TargetOpcode::G_ZEXT:
1037 case TargetOpcode::G_ANYEXT:
1038 assert(DstOps.size() == 1 && "Invalid Dst");
1039 assert(SrcOps.size() == 1 && "Invalid Srcs");
1040 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1041 SrcOps[0].getLLTTy(*getMRI()), true);
1042 break;
1043 case TargetOpcode::G_TRUNC:
1044 case TargetOpcode::G_FPTRUNC: {
1045 assert(DstOps.size() == 1 && "Invalid Dst");
1046 assert(SrcOps.size() == 1 && "Invalid Srcs");
1047 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1048 SrcOps[0].getLLTTy(*getMRI()), false);
1049 break;
1050 }
1051 case TargetOpcode::G_BITCAST: {
1052 assert(DstOps.size() == 1 && "Invalid Dst");
1053 assert(SrcOps.size() == 1 && "Invalid Srcs");
1054 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1055 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1056 break;
1057 }
1058 case TargetOpcode::COPY:
1059 assert(DstOps.size() == 1 && "Invalid Dst");
1060 // If the caller wants to add a subreg source it has to be done separately
1061 // so we may not have any SrcOps at this point yet.
1062 break;
1063 case TargetOpcode::G_FCMP:
1064 case TargetOpcode::G_ICMP: {
1065 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1066 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1067 // For F/ICMP, the first src operand is the predicate, followed by
1068 // the two comparands.
1069 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1070 "Expecting predicate");
1071 assert([&]() -> bool {
1072 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1073 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1074 : CmpInst::isFPPredicate(Pred);
1075 }() && "Invalid predicate");
1076 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1077 "Type mismatch");
1078 assert([&]() -> bool {
1079 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1080 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1081 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1082 return DstTy.isScalar();
1083 else
1084 return DstTy.isVector() &&
1085 DstTy.getNumElements() == Op0Ty.getNumElements();
1086 }() && "Type Mismatch");
1087 break;
1088 }
1089 case TargetOpcode::G_UNMERGE_VALUES: {
1090 assert(!DstOps.empty() && "Invalid trivial sequence");
1091 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1092 assert(std::all_of(DstOps.begin(), DstOps.end(),
1093 [&, this](const DstOp &Op) {
1094 return Op.getLLTTy(*getMRI()) ==
1095 DstOps[0].getLLTTy(*getMRI());
1096 }) &&
1097 "type mismatch in output list");
1098 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1099 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1100 "input operands do not cover output register");
1101 break;
1102 }
1103 case TargetOpcode::G_MERGE_VALUES: {
1104 assert(!SrcOps.empty() && "invalid trivial sequence");
1105 assert(DstOps.size() == 1 && "Invalid Dst");
1106 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1107 [&, this](const SrcOp &Op) {
1108 return Op.getLLTTy(*getMRI()) ==
1109 SrcOps[0].getLLTTy(*getMRI());
1110 }) &&
1111 "type mismatch in input list");
1112 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1113 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1114 "input operands do not cover output register");
1115 if (SrcOps.size() == 1)
1116 return buildCast(DstOps[0], SrcOps[0]);
1117 if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1118 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1119 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1120 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1121 }
1122 break;
1123 }
1124 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1125 assert(DstOps.size() == 1 && "Invalid Dst size");
1126 assert(SrcOps.size() == 2 && "Invalid Src size");
1127 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1128 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1129 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1130 "Invalid operand type");
1131 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1132 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1133 DstOps[0].getLLTTy(*getMRI()) &&
1134 "Type mismatch");
1135 break;
1136 }
1137 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1138 assert(DstOps.size() == 1 && "Invalid dst size");
1139 assert(SrcOps.size() == 3 && "Invalid src size");
1140 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1141 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1142 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1143 SrcOps[1].getLLTTy(*getMRI()) &&
1144 "Type mismatch");
1145 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1146 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1147 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1148 "Type mismatch");
1149 break;
1150 }
1151 case TargetOpcode::G_BUILD_VECTOR: {
1152 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1153 "Must have at least 2 operands");
1154 assert(DstOps.size() == 1 && "Invalid DstOps");
1155 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1156 "Res type must be a vector");
1157 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1158 [&, this](const SrcOp &Op) {
1159 return Op.getLLTTy(*getMRI()) ==
1160 SrcOps[0].getLLTTy(*getMRI());
1161 }) &&
1162 "type mismatch in input list");
1163 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1164 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1165 "input scalars do not exactly cover the output vector register");
1166 break;
1167 }
1168 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1169 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1170 "Must have at least 2 operands");
1171 assert(DstOps.size() == 1 && "Invalid DstOps");
1172 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1173 "Res type must be a vector");
1174 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1175 [&, this](const SrcOp &Op) {
1176 return Op.getLLTTy(*getMRI()) ==
1177 SrcOps[0].getLLTTy(*getMRI());
1178 }) &&
1179 "type mismatch in input list");
1180 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1181 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1182 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1183 break;
1184 }
1185 case TargetOpcode::G_CONCAT_VECTORS: {
1186 assert(DstOps.size() == 1 && "Invalid DstOps");
1187 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1188 "Must have at least 2 operands");
1189 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1190 [&, this](const SrcOp &Op) {
1191 return (Op.getLLTTy(*getMRI()).isVector() &&
1192 Op.getLLTTy(*getMRI()) ==
1193 SrcOps[0].getLLTTy(*getMRI()));
1194 }) &&
1195 "type mismatch in input list");
1196 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1197 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1198 "input vectors do not exactly cover the output vector register");
1199 break;
1200 }
1201 case TargetOpcode::G_UADDE: {
1202 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1203 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1204 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1205 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1206 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1207 "Invalid operand");
1208 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1209 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1210 "type mismatch");
1211 break;
1212 }
1213 }
1214
1215 auto MIB = buildInstr(Opc);
1216 for (const DstOp &Op : DstOps)
1217 Op.addDefToMIB(*getMRI(), MIB);
1218 for (const SrcOp &Op : SrcOps)
1219 Op.addSrcToMIB(MIB);
1220 if (Flags)
1221 MIB->setFlags(*Flags);
1222 return MIB;
1223 }
1224