• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file This file implements the LegalizerHelper class to legalize
11 /// individual instructions and the LegalizeMachineIR wrapper pass for the
12 /// primary legalization.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
17 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
18 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/raw_ostream.h"
24 
25 
26 #define DEBUG_TYPE "legalizer"
27 
28 using namespace llvm;
29 using namespace LegalizeActions;
30 
LegalizerHelper(MachineFunction & MF)31 LegalizerHelper::LegalizerHelper(MachineFunction &MF)
32     : MRI(MF.getRegInfo()), LI(*MF.getSubtarget().getLegalizerInfo()) {
33   MIRBuilder.setMF(MF);
34 }
35 
36 LegalizerHelper::LegalizeResult
legalizeInstrStep(MachineInstr & MI)37 LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
38   LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
39 
40   auto Step = LI.getAction(MI, MRI);
41   switch (Step.Action) {
42   case Legal:
43     LLVM_DEBUG(dbgs() << ".. Already legal\n");
44     return AlreadyLegal;
45   case Libcall:
46     LLVM_DEBUG(dbgs() << ".. Convert to libcall\n");
47     return libcall(MI);
48   case NarrowScalar:
49     LLVM_DEBUG(dbgs() << ".. Narrow scalar\n");
50     return narrowScalar(MI, Step.TypeIdx, Step.NewType);
51   case WidenScalar:
52     LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
53     return widenScalar(MI, Step.TypeIdx, Step.NewType);
54   case Lower:
55     LLVM_DEBUG(dbgs() << ".. Lower\n");
56     return lower(MI, Step.TypeIdx, Step.NewType);
57   case FewerElements:
58     LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n");
59     return fewerElementsVector(MI, Step.TypeIdx, Step.NewType);
60   case Custom:
61     LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
62     return LI.legalizeCustom(MI, MRI, MIRBuilder) ? Legalized
63                                                   : UnableToLegalize;
64   default:
65     LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
66     return UnableToLegalize;
67   }
68 }
69 
extractParts(unsigned Reg,LLT Ty,int NumParts,SmallVectorImpl<unsigned> & VRegs)70 void LegalizerHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
71                                    SmallVectorImpl<unsigned> &VRegs) {
72   for (int i = 0; i < NumParts; ++i)
73     VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
74   MIRBuilder.buildUnmerge(VRegs, Reg);
75 }
76 
getRTLibDesc(unsigned Opcode,unsigned Size)77 static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
78   switch (Opcode) {
79   case TargetOpcode::G_SDIV:
80     assert(Size == 32 && "Unsupported size");
81     return RTLIB::SDIV_I32;
82   case TargetOpcode::G_UDIV:
83     assert(Size == 32 && "Unsupported size");
84     return RTLIB::UDIV_I32;
85   case TargetOpcode::G_SREM:
86     assert(Size == 32 && "Unsupported size");
87     return RTLIB::SREM_I32;
88   case TargetOpcode::G_UREM:
89     assert(Size == 32 && "Unsupported size");
90     return RTLIB::UREM_I32;
91   case TargetOpcode::G_FADD:
92     assert((Size == 32 || Size == 64) && "Unsupported size");
93     return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32;
94   case TargetOpcode::G_FSUB:
95     assert((Size == 32 || Size == 64) && "Unsupported size");
96     return Size == 64 ? RTLIB::SUB_F64 : RTLIB::SUB_F32;
97   case TargetOpcode::G_FMUL:
98     assert((Size == 32 || Size == 64) && "Unsupported size");
99     return Size == 64 ? RTLIB::MUL_F64 : RTLIB::MUL_F32;
100   case TargetOpcode::G_FDIV:
101     assert((Size == 32 || Size == 64) && "Unsupported size");
102     return Size == 64 ? RTLIB::DIV_F64 : RTLIB::DIV_F32;
103   case TargetOpcode::G_FREM:
104     return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32;
105   case TargetOpcode::G_FPOW:
106     return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32;
107   case TargetOpcode::G_FMA:
108     assert((Size == 32 || Size == 64) && "Unsupported size");
109     return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32;
110   }
111   llvm_unreachable("Unknown libcall function");
112 }
113 
114 LegalizerHelper::LegalizeResult
createLibcall(MachineIRBuilder & MIRBuilder,RTLIB::Libcall Libcall,const CallLowering::ArgInfo & Result,ArrayRef<CallLowering::ArgInfo> Args)115 llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
116                     const CallLowering::ArgInfo &Result,
117                     ArrayRef<CallLowering::ArgInfo> Args) {
118   auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
119   auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
120   const char *Name = TLI.getLibcallName(Libcall);
121 
122   MIRBuilder.getMF().getFrameInfo().setHasCalls(true);
123   if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(Libcall),
124                      MachineOperand::CreateES(Name), Result, Args))
125     return LegalizerHelper::UnableToLegalize;
126 
127   return LegalizerHelper::Legalized;
128 }
129 
130 // Useful for libcalls where all operands have the same type.
131 static LegalizerHelper::LegalizeResult
simpleLibcall(MachineInstr & MI,MachineIRBuilder & MIRBuilder,unsigned Size,Type * OpType)132 simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
133               Type *OpType) {
134   auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
135 
136   SmallVector<CallLowering::ArgInfo, 3> Args;
137   for (unsigned i = 1; i < MI.getNumOperands(); i++)
138     Args.push_back({MI.getOperand(i).getReg(), OpType});
139   return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType},
140                        Args);
141 }
142 
getConvRTLibDesc(unsigned Opcode,Type * ToType,Type * FromType)143 static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType,
144                                        Type *FromType) {
145   auto ToMVT = MVT::getVT(ToType);
146   auto FromMVT = MVT::getVT(FromType);
147 
148   switch (Opcode) {
149   case TargetOpcode::G_FPEXT:
150     return RTLIB::getFPEXT(FromMVT, ToMVT);
151   case TargetOpcode::G_FPTRUNC:
152     return RTLIB::getFPROUND(FromMVT, ToMVT);
153   case TargetOpcode::G_FPTOSI:
154     return RTLIB::getFPTOSINT(FromMVT, ToMVT);
155   case TargetOpcode::G_FPTOUI:
156     return RTLIB::getFPTOUINT(FromMVT, ToMVT);
157   case TargetOpcode::G_SITOFP:
158     return RTLIB::getSINTTOFP(FromMVT, ToMVT);
159   case TargetOpcode::G_UITOFP:
160     return RTLIB::getUINTTOFP(FromMVT, ToMVT);
161   }
162   llvm_unreachable("Unsupported libcall function");
163 }
164 
165 static LegalizerHelper::LegalizeResult
conversionLibcall(MachineInstr & MI,MachineIRBuilder & MIRBuilder,Type * ToType,Type * FromType)166 conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType,
167                   Type *FromType) {
168   RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType);
169   return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType},
170                        {{MI.getOperand(1).getReg(), FromType}});
171 }
172 
173 LegalizerHelper::LegalizeResult
libcall(MachineInstr & MI)174 LegalizerHelper::libcall(MachineInstr &MI) {
175   LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
176   unsigned Size = LLTy.getSizeInBits();
177   auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
178 
179   MIRBuilder.setInstr(MI);
180 
181   switch (MI.getOpcode()) {
182   default:
183     return UnableToLegalize;
184   case TargetOpcode::G_SDIV:
185   case TargetOpcode::G_UDIV:
186   case TargetOpcode::G_SREM:
187   case TargetOpcode::G_UREM: {
188     Type *HLTy = Type::getInt32Ty(Ctx);
189     auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
190     if (Status != Legalized)
191       return Status;
192     break;
193   }
194   case TargetOpcode::G_FADD:
195   case TargetOpcode::G_FSUB:
196   case TargetOpcode::G_FMUL:
197   case TargetOpcode::G_FDIV:
198   case TargetOpcode::G_FMA:
199   case TargetOpcode::G_FPOW:
200   case TargetOpcode::G_FREM: {
201     Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
202     auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
203     if (Status != Legalized)
204       return Status;
205     break;
206   }
207   case TargetOpcode::G_FPEXT: {
208     // FIXME: Support other floating point types (half, fp128 etc)
209     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
210     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
211     if (ToSize != 64 || FromSize != 32)
212       return UnableToLegalize;
213     LegalizeResult Status = conversionLibcall(
214         MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx));
215     if (Status != Legalized)
216       return Status;
217     break;
218   }
219   case TargetOpcode::G_FPTRUNC: {
220     // FIXME: Support other floating point types (half, fp128 etc)
221     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
222     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
223     if (ToSize != 32 || FromSize != 64)
224       return UnableToLegalize;
225     LegalizeResult Status = conversionLibcall(
226         MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx));
227     if (Status != Legalized)
228       return Status;
229     break;
230   }
231   case TargetOpcode::G_FPTOSI:
232   case TargetOpcode::G_FPTOUI: {
233     // FIXME: Support other types
234     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
235     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
236     if (ToSize != 32 || (FromSize != 32 && FromSize != 64))
237       return UnableToLegalize;
238     LegalizeResult Status = conversionLibcall(
239         MI, MIRBuilder, Type::getInt32Ty(Ctx),
240         FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx));
241     if (Status != Legalized)
242       return Status;
243     break;
244   }
245   case TargetOpcode::G_SITOFP:
246   case TargetOpcode::G_UITOFP: {
247     // FIXME: Support other types
248     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
249     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
250     if (FromSize != 32 || (ToSize != 32 && ToSize != 64))
251       return UnableToLegalize;
252     LegalizeResult Status = conversionLibcall(
253         MI, MIRBuilder,
254         ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx),
255         Type::getInt32Ty(Ctx));
256     if (Status != Legalized)
257       return Status;
258     break;
259   }
260   }
261 
262   MI.eraseFromParent();
263   return Legalized;
264 }
265 
narrowScalar(MachineInstr & MI,unsigned TypeIdx,LLT NarrowTy)266 LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
267                                                               unsigned TypeIdx,
268                                                               LLT NarrowTy) {
269   // FIXME: Don't know how to handle secondary types yet.
270   if (TypeIdx != 0 && MI.getOpcode() != TargetOpcode::G_EXTRACT)
271     return UnableToLegalize;
272 
273   MIRBuilder.setInstr(MI);
274 
275   uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
276   uint64_t NarrowSize = NarrowTy.getSizeInBits();
277 
278   switch (MI.getOpcode()) {
279   default:
280     return UnableToLegalize;
281   case TargetOpcode::G_IMPLICIT_DEF: {
282     // FIXME: add support for when SizeOp0 isn't an exact multiple of
283     // NarrowSize.
284     if (SizeOp0 % NarrowSize != 0)
285       return UnableToLegalize;
286     int NumParts = SizeOp0 / NarrowSize;
287 
288     SmallVector<unsigned, 2> DstRegs;
289     for (int i = 0; i < NumParts; ++i)
290       DstRegs.push_back(
291           MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
292     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
293     MI.eraseFromParent();
294     return Legalized;
295   }
296   case TargetOpcode::G_ADD: {
297     // FIXME: add support for when SizeOp0 isn't an exact multiple of
298     // NarrowSize.
299     if (SizeOp0 % NarrowSize != 0)
300       return UnableToLegalize;
301     // Expand in terms of carry-setting/consuming G_ADDE instructions.
302     int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
303 
304     SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
305     extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
306     extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
307 
308     unsigned CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
309     MIRBuilder.buildConstant(CarryIn, 0);
310 
311     for (int i = 0; i < NumParts; ++i) {
312       unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
313       unsigned CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
314 
315       MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
316                             Src2Regs[i], CarryIn);
317 
318       DstRegs.push_back(DstReg);
319       CarryIn = CarryOut;
320     }
321     unsigned DstReg = MI.getOperand(0).getReg();
322     MIRBuilder.buildMerge(DstReg, DstRegs);
323     MI.eraseFromParent();
324     return Legalized;
325   }
326   case TargetOpcode::G_EXTRACT: {
327     if (TypeIdx != 1)
328       return UnableToLegalize;
329 
330     int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
331     // FIXME: add support for when SizeOp1 isn't an exact multiple of
332     // NarrowSize.
333     if (SizeOp1 % NarrowSize != 0)
334       return UnableToLegalize;
335     int NumParts = SizeOp1 / NarrowSize;
336 
337     SmallVector<unsigned, 2> SrcRegs, DstRegs;
338     SmallVector<uint64_t, 2> Indexes;
339     extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
340 
341     unsigned OpReg = MI.getOperand(0).getReg();
342     uint64_t OpStart = MI.getOperand(2).getImm();
343     uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
344     for (int i = 0; i < NumParts; ++i) {
345       unsigned SrcStart = i * NarrowSize;
346 
347       if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) {
348         // No part of the extract uses this subregister, ignore it.
349         continue;
350       } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
351         // The entire subregister is extracted, forward the value.
352         DstRegs.push_back(SrcRegs[i]);
353         continue;
354       }
355 
356       // OpSegStart is where this destination segment would start in OpReg if it
357       // extended infinitely in both directions.
358       int64_t ExtractOffset;
359       uint64_t SegSize;
360       if (OpStart < SrcStart) {
361         ExtractOffset = 0;
362         SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart);
363       } else {
364         ExtractOffset = OpStart - SrcStart;
365         SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize);
366       }
367 
368       unsigned SegReg = SrcRegs[i];
369       if (ExtractOffset != 0 || SegSize != NarrowSize) {
370         // A genuine extract is needed.
371         SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
372         MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset);
373       }
374 
375       DstRegs.push_back(SegReg);
376     }
377 
378     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
379     MI.eraseFromParent();
380     return Legalized;
381   }
382   case TargetOpcode::G_INSERT: {
383     // FIXME: add support for when SizeOp0 isn't an exact multiple of
384     // NarrowSize.
385     if (SizeOp0 % NarrowSize != 0)
386       return UnableToLegalize;
387 
388     int NumParts = SizeOp0 / NarrowSize;
389 
390     SmallVector<unsigned, 2> SrcRegs, DstRegs;
391     SmallVector<uint64_t, 2> Indexes;
392     extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
393 
394     unsigned OpReg = MI.getOperand(2).getReg();
395     uint64_t OpStart = MI.getOperand(3).getImm();
396     uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
397     for (int i = 0; i < NumParts; ++i) {
398       unsigned DstStart = i * NarrowSize;
399 
400       if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) {
401         // No part of the insert affects this subregister, forward the original.
402         DstRegs.push_back(SrcRegs[i]);
403         continue;
404       } else if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
405         // The entire subregister is defined by this insert, forward the new
406         // value.
407         DstRegs.push_back(OpReg);
408         continue;
409       }
410 
411       // OpSegStart is where this destination segment would start in OpReg if it
412       // extended infinitely in both directions.
413       int64_t ExtractOffset, InsertOffset;
414       uint64_t SegSize;
415       if (OpStart < DstStart) {
416         InsertOffset = 0;
417         ExtractOffset = DstStart - OpStart;
418         SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart);
419       } else {
420         InsertOffset = OpStart - DstStart;
421         ExtractOffset = 0;
422         SegSize =
423             std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart);
424       }
425 
426       unsigned SegReg = OpReg;
427       if (ExtractOffset != 0 || SegSize != OpSize) {
428         // A genuine extract is needed.
429         SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
430         MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset);
431       }
432 
433       unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
434       MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset);
435       DstRegs.push_back(DstReg);
436     }
437 
438     assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered");
439     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
440     MI.eraseFromParent();
441     return Legalized;
442   }
443   case TargetOpcode::G_LOAD: {
444     // FIXME: add support for when SizeOp0 isn't an exact multiple of
445     // NarrowSize.
446     if (SizeOp0 % NarrowSize != 0)
447       return UnableToLegalize;
448 
449     const auto &MMO = **MI.memoperands_begin();
450     // This implementation doesn't work for atomics. Give up instead of doing
451     // something invalid.
452     if (MMO.getOrdering() != AtomicOrdering::NotAtomic ||
453         MMO.getFailureOrdering() != AtomicOrdering::NotAtomic)
454       return UnableToLegalize;
455 
456     int NumParts = SizeOp0 / NarrowSize;
457     LLT OffsetTy = LLT::scalar(
458         MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits());
459 
460     SmallVector<unsigned, 2> DstRegs;
461     for (int i = 0; i < NumParts; ++i) {
462       unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
463       unsigned SrcReg = 0;
464       unsigned Adjustment = i * NarrowSize / 8;
465 
466       MachineMemOperand *SplitMMO = MIRBuilder.getMF().getMachineMemOperand(
467           MMO.getPointerInfo().getWithOffset(Adjustment), MMO.getFlags(),
468           NarrowSize / 8, i == 0 ? MMO.getAlignment() : NarrowSize / 8,
469           MMO.getAAInfo(), MMO.getRanges(), MMO.getSyncScopeID(),
470           MMO.getOrdering(), MMO.getFailureOrdering());
471 
472       MIRBuilder.materializeGEP(SrcReg, MI.getOperand(1).getReg(), OffsetTy,
473                                 Adjustment);
474 
475       MIRBuilder.buildLoad(DstReg, SrcReg, *SplitMMO);
476 
477       DstRegs.push_back(DstReg);
478     }
479     unsigned DstReg = MI.getOperand(0).getReg();
480     MIRBuilder.buildMerge(DstReg, DstRegs);
481     MI.eraseFromParent();
482     return Legalized;
483   }
484   case TargetOpcode::G_STORE: {
485     // FIXME: add support for when SizeOp0 isn't an exact multiple of
486     // NarrowSize.
487     if (SizeOp0 % NarrowSize != 0)
488       return UnableToLegalize;
489 
490     const auto &MMO = **MI.memoperands_begin();
491     // This implementation doesn't work for atomics. Give up instead of doing
492     // something invalid.
493     if (MMO.getOrdering() != AtomicOrdering::NotAtomic ||
494         MMO.getFailureOrdering() != AtomicOrdering::NotAtomic)
495       return UnableToLegalize;
496 
497     int NumParts = SizeOp0 / NarrowSize;
498     LLT OffsetTy = LLT::scalar(
499         MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits());
500 
501     SmallVector<unsigned, 2> SrcRegs;
502     extractParts(MI.getOperand(0).getReg(), NarrowTy, NumParts, SrcRegs);
503 
504     for (int i = 0; i < NumParts; ++i) {
505       unsigned DstReg = 0;
506       unsigned Adjustment = i * NarrowSize / 8;
507 
508       MachineMemOperand *SplitMMO = MIRBuilder.getMF().getMachineMemOperand(
509           MMO.getPointerInfo().getWithOffset(Adjustment), MMO.getFlags(),
510           NarrowSize / 8, i == 0 ? MMO.getAlignment() : NarrowSize / 8,
511           MMO.getAAInfo(), MMO.getRanges(), MMO.getSyncScopeID(),
512           MMO.getOrdering(), MMO.getFailureOrdering());
513 
514       MIRBuilder.materializeGEP(DstReg, MI.getOperand(1).getReg(), OffsetTy,
515                                 Adjustment);
516 
517       MIRBuilder.buildStore(SrcRegs[i], DstReg, *SplitMMO);
518     }
519     MI.eraseFromParent();
520     return Legalized;
521   }
522   case TargetOpcode::G_CONSTANT: {
523     // FIXME: add support for when SizeOp0 isn't an exact multiple of
524     // NarrowSize.
525     if (SizeOp0 % NarrowSize != 0)
526       return UnableToLegalize;
527     int NumParts = SizeOp0 / NarrowSize;
528     const APInt &Cst = MI.getOperand(1).getCImm()->getValue();
529     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
530 
531     SmallVector<unsigned, 2> DstRegs;
532     for (int i = 0; i < NumParts; ++i) {
533       unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
534       ConstantInt *CI =
535           ConstantInt::get(Ctx, Cst.lshr(NarrowSize * i).trunc(NarrowSize));
536       MIRBuilder.buildConstant(DstReg, *CI);
537       DstRegs.push_back(DstReg);
538     }
539     unsigned DstReg = MI.getOperand(0).getReg();
540     MIRBuilder.buildMerge(DstReg, DstRegs);
541     MI.eraseFromParent();
542     return Legalized;
543   }
544   case TargetOpcode::G_OR: {
545     // Legalize bitwise operation:
546     // A = BinOp<Ty> B, C
547     // into:
548     // B1, ..., BN = G_UNMERGE_VALUES B
549     // C1, ..., CN = G_UNMERGE_VALUES C
550     // A1 = BinOp<Ty/N> B1, C2
551     // ...
552     // AN = BinOp<Ty/N> BN, CN
553     // A = G_MERGE_VALUES A1, ..., AN
554 
555     // FIXME: add support for when SizeOp0 isn't an exact multiple of
556     // NarrowSize.
557     if (SizeOp0 % NarrowSize != 0)
558       return UnableToLegalize;
559     int NumParts = SizeOp0 / NarrowSize;
560 
561     // List the registers where the destination will be scattered.
562     SmallVector<unsigned, 2> DstRegs;
563     // List the registers where the first argument will be split.
564     SmallVector<unsigned, 2> SrcsReg1;
565     // List the registers where the second argument will be split.
566     SmallVector<unsigned, 2> SrcsReg2;
567     // Create all the temporary registers.
568     for (int i = 0; i < NumParts; ++i) {
569       unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
570       unsigned SrcReg1 = MRI.createGenericVirtualRegister(NarrowTy);
571       unsigned SrcReg2 = MRI.createGenericVirtualRegister(NarrowTy);
572 
573       DstRegs.push_back(DstReg);
574       SrcsReg1.push_back(SrcReg1);
575       SrcsReg2.push_back(SrcReg2);
576     }
577     // Explode the big arguments into smaller chunks.
578     MIRBuilder.buildUnmerge(SrcsReg1, MI.getOperand(1).getReg());
579     MIRBuilder.buildUnmerge(SrcsReg2, MI.getOperand(2).getReg());
580 
581     // Do the operation on each small part.
582     for (int i = 0; i < NumParts; ++i)
583       MIRBuilder.buildOr(DstRegs[i], SrcsReg1[i], SrcsReg2[i]);
584 
585     // Gather the destination registers into the final destination.
586     unsigned DstReg = MI.getOperand(0).getReg();
587     MIRBuilder.buildMerge(DstReg, DstRegs);
588     MI.eraseFromParent();
589     return Legalized;
590   }
591   }
592 }
593 
widenScalarSrc(MachineInstr & MI,LLT WideTy,unsigned OpIdx,unsigned ExtOpcode)594 void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy,
595                                      unsigned OpIdx, unsigned ExtOpcode) {
596   MachineOperand &MO = MI.getOperand(OpIdx);
597   auto ExtB = MIRBuilder.buildInstr(ExtOpcode, WideTy, MO.getReg());
598   MO.setReg(ExtB->getOperand(0).getReg());
599 }
600 
widenScalarDst(MachineInstr & MI,LLT WideTy,unsigned OpIdx,unsigned TruncOpcode)601 void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
602                                      unsigned OpIdx, unsigned TruncOpcode) {
603   MachineOperand &MO = MI.getOperand(OpIdx);
604   unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
605   MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
606   MIRBuilder.buildInstr(TruncOpcode, MO.getReg(), DstExt);
607   MO.setReg(DstExt);
608 }
609 
610 LegalizerHelper::LegalizeResult
widenScalar(MachineInstr & MI,unsigned TypeIdx,LLT WideTy)611 LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
612   MIRBuilder.setInstr(MI);
613 
614   switch (MI.getOpcode()) {
615   default:
616     return UnableToLegalize;
617 
618   case TargetOpcode::G_ADD:
619   case TargetOpcode::G_AND:
620   case TargetOpcode::G_MUL:
621   case TargetOpcode::G_OR:
622   case TargetOpcode::G_XOR:
623   case TargetOpcode::G_SUB:
624     // Perform operation at larger width (any extension is fine here, high bits
625     // don't affect the result) and then truncate the result back to the
626     // original type.
627     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
628     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
629     widenScalarDst(MI, WideTy);
630     MIRBuilder.recordInsertion(&MI);
631     return Legalized;
632 
633   case TargetOpcode::G_SHL:
634     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
635     // The "number of bits to shift" operand must preserve its value as an
636     // unsigned integer:
637     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
638     widenScalarDst(MI, WideTy);
639     MIRBuilder.recordInsertion(&MI);
640     return Legalized;
641 
642   case TargetOpcode::G_SDIV:
643   case TargetOpcode::G_SREM:
644     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
645     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
646     widenScalarDst(MI, WideTy);
647     MIRBuilder.recordInsertion(&MI);
648     return Legalized;
649 
650   case TargetOpcode::G_ASHR:
651     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
652     // The "number of bits to shift" operand must preserve its value as an
653     // unsigned integer:
654     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
655     widenScalarDst(MI, WideTy);
656     MIRBuilder.recordInsertion(&MI);
657     return Legalized;
658 
659   case TargetOpcode::G_UDIV:
660   case TargetOpcode::G_UREM:
661   case TargetOpcode::G_LSHR:
662     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
663     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
664     widenScalarDst(MI, WideTy);
665     MIRBuilder.recordInsertion(&MI);
666     return Legalized;
667 
668   case TargetOpcode::G_SELECT:
669     if (TypeIdx != 0)
670       return UnableToLegalize;
671     // Perform operation at larger width (any extension is fine here, high bits
672     // don't affect the result) and then truncate the result back to the
673     // original type.
674     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
675     widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
676     widenScalarDst(MI, WideTy);
677     MIRBuilder.recordInsertion(&MI);
678     return Legalized;
679 
680   case TargetOpcode::G_FPTOSI:
681   case TargetOpcode::G_FPTOUI:
682     if (TypeIdx != 0)
683       return UnableToLegalize;
684     widenScalarDst(MI, WideTy);
685     MIRBuilder.recordInsertion(&MI);
686     return Legalized;
687 
688   case TargetOpcode::G_SITOFP:
689     if (TypeIdx != 1)
690       return UnableToLegalize;
691     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
692     MIRBuilder.recordInsertion(&MI);
693     return Legalized;
694 
695   case TargetOpcode::G_UITOFP:
696     if (TypeIdx != 1)
697       return UnableToLegalize;
698     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
699     MIRBuilder.recordInsertion(&MI);
700     return Legalized;
701 
702   case TargetOpcode::G_INSERT:
703     if (TypeIdx != 0)
704       return UnableToLegalize;
705     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
706     widenScalarDst(MI, WideTy);
707     MIRBuilder.recordInsertion(&MI);
708     return Legalized;
709 
710   case TargetOpcode::G_LOAD:
711     // For some types like i24, we might try to widen to i32. To properly handle
712     // this we should be using a dedicated extending load, until then avoid
713     // trying to legalize.
714     if (alignTo(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(), 8) !=
715         WideTy.getSizeInBits())
716       return UnableToLegalize;
717     LLVM_FALLTHROUGH;
718   case TargetOpcode::G_SEXTLOAD:
719   case TargetOpcode::G_ZEXTLOAD:
720     widenScalarDst(MI, WideTy);
721     MIRBuilder.recordInsertion(&MI);
722     return Legalized;
723 
724   case TargetOpcode::G_STORE: {
725     if (MRI.getType(MI.getOperand(0).getReg()) != LLT::scalar(1) ||
726         WideTy != LLT::scalar(8))
727       return UnableToLegalize;
728 
729     widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ZEXT);
730     MIRBuilder.recordInsertion(&MI);
731     return Legalized;
732   }
733   case TargetOpcode::G_CONSTANT: {
734     MachineOperand &SrcMO = MI.getOperand(1);
735     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
736     const APInt &Val = SrcMO.getCImm()->getValue().sext(WideTy.getSizeInBits());
737     SrcMO.setCImm(ConstantInt::get(Ctx, Val));
738 
739     widenScalarDst(MI, WideTy);
740     MIRBuilder.recordInsertion(&MI);
741     return Legalized;
742   }
743   case TargetOpcode::G_FCONSTANT: {
744     MachineOperand &SrcMO = MI.getOperand(1);
745     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
746     APFloat Val = SrcMO.getFPImm()->getValueAPF();
747     bool LosesInfo;
748     switch (WideTy.getSizeInBits()) {
749     case 32:
750       Val.convert(APFloat::IEEEsingle(), APFloat::rmTowardZero, &LosesInfo);
751       break;
752     case 64:
753       Val.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &LosesInfo);
754       break;
755     default:
756       llvm_unreachable("Unhandled fp widen type");
757     }
758     SrcMO.setFPImm(ConstantFP::get(Ctx, Val));
759 
760     widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
761     MIRBuilder.recordInsertion(&MI);
762     return Legalized;
763   }
764   case TargetOpcode::G_BRCOND:
765     widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ANYEXT);
766     MIRBuilder.recordInsertion(&MI);
767     return Legalized;
768 
769   case TargetOpcode::G_FCMP:
770     if (TypeIdx == 0)
771       widenScalarDst(MI, WideTy);
772     else {
773       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
774       widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT);
775     }
776     MIRBuilder.recordInsertion(&MI);
777     return Legalized;
778 
779   case TargetOpcode::G_ICMP:
780     if (TypeIdx == 0)
781       widenScalarDst(MI, WideTy);
782     else {
783       unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>(
784                                MI.getOperand(1).getPredicate()))
785                                ? TargetOpcode::G_SEXT
786                                : TargetOpcode::G_ZEXT;
787       widenScalarSrc(MI, WideTy, 2, ExtOpcode);
788       widenScalarSrc(MI, WideTy, 3, ExtOpcode);
789     }
790     MIRBuilder.recordInsertion(&MI);
791     return Legalized;
792 
793   case TargetOpcode::G_GEP:
794     assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
795     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
796     MIRBuilder.recordInsertion(&MI);
797     return Legalized;
798 
799   case TargetOpcode::G_PHI: {
800     assert(TypeIdx == 0 && "Expecting only Idx 0");
801 
802     for (unsigned I = 1; I < MI.getNumOperands(); I += 2) {
803       MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
804       MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
805       widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT);
806     }
807 
808     MachineBasicBlock &MBB = *MI.getParent();
809     MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
810     widenScalarDst(MI, WideTy);
811     MIRBuilder.recordInsertion(&MI);
812     return Legalized;
813   }
814   }
815 }
816 
817 LegalizerHelper::LegalizeResult
lower(MachineInstr & MI,unsigned TypeIdx,LLT Ty)818 LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
819   using namespace TargetOpcode;
820   MIRBuilder.setInstr(MI);
821 
822   switch(MI.getOpcode()) {
823   default:
824     return UnableToLegalize;
825   case TargetOpcode::G_SREM:
826   case TargetOpcode::G_UREM: {
827     unsigned QuotReg = MRI.createGenericVirtualRegister(Ty);
828     MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
829         .addDef(QuotReg)
830         .addUse(MI.getOperand(1).getReg())
831         .addUse(MI.getOperand(2).getReg());
832 
833     unsigned ProdReg = MRI.createGenericVirtualRegister(Ty);
834     MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
835     MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
836                         ProdReg);
837     MI.eraseFromParent();
838     return Legalized;
839   }
840   case TargetOpcode::G_SMULO:
841   case TargetOpcode::G_UMULO: {
842     // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the
843     // result.
844     unsigned Res = MI.getOperand(0).getReg();
845     unsigned Overflow = MI.getOperand(1).getReg();
846     unsigned LHS = MI.getOperand(2).getReg();
847     unsigned RHS = MI.getOperand(3).getReg();
848 
849     MIRBuilder.buildMul(Res, LHS, RHS);
850 
851     unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO
852                           ? TargetOpcode::G_SMULH
853                           : TargetOpcode::G_UMULH;
854 
855     unsigned HiPart = MRI.createGenericVirtualRegister(Ty);
856     MIRBuilder.buildInstr(Opcode)
857       .addDef(HiPart)
858       .addUse(LHS)
859       .addUse(RHS);
860 
861     unsigned Zero = MRI.createGenericVirtualRegister(Ty);
862     MIRBuilder.buildConstant(Zero, 0);
863 
864     // For *signed* multiply, overflow is detected by checking:
865     // (hi != (lo >> bitwidth-1))
866     if (Opcode == TargetOpcode::G_SMULH) {
867       unsigned Shifted = MRI.createGenericVirtualRegister(Ty);
868       unsigned ShiftAmt = MRI.createGenericVirtualRegister(Ty);
869       MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1);
870       MIRBuilder.buildInstr(TargetOpcode::G_ASHR)
871         .addDef(Shifted)
872         .addUse(Res)
873         .addUse(ShiftAmt);
874       MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted);
875     } else {
876       MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
877     }
878     MI.eraseFromParent();
879     return Legalized;
880   }
881   case TargetOpcode::G_FNEG: {
882     // TODO: Handle vector types once we are able to
883     // represent them.
884     if (Ty.isVector())
885       return UnableToLegalize;
886     unsigned Res = MI.getOperand(0).getReg();
887     Type *ZeroTy;
888     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
889     switch (Ty.getSizeInBits()) {
890     case 16:
891       ZeroTy = Type::getHalfTy(Ctx);
892       break;
893     case 32:
894       ZeroTy = Type::getFloatTy(Ctx);
895       break;
896     case 64:
897       ZeroTy = Type::getDoubleTy(Ctx);
898       break;
899     case 128:
900       ZeroTy = Type::getFP128Ty(Ctx);
901       break;
902     default:
903       llvm_unreachable("unexpected floating-point type");
904     }
905     ConstantFP &ZeroForNegation =
906         *cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
907     auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
908     MIRBuilder.buildInstr(TargetOpcode::G_FSUB)
909         .addDef(Res)
910         .addUse(Zero->getOperand(0).getReg())
911         .addUse(MI.getOperand(1).getReg());
912     MI.eraseFromParent();
913     return Legalized;
914   }
915   case TargetOpcode::G_FSUB: {
916     // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)).
917     // First, check if G_FNEG is marked as Lower. If so, we may
918     // end up with an infinite loop as G_FSUB is used to legalize G_FNEG.
919     if (LI.getAction({G_FNEG, {Ty}}).Action == Lower)
920       return UnableToLegalize;
921     unsigned Res = MI.getOperand(0).getReg();
922     unsigned LHS = MI.getOperand(1).getReg();
923     unsigned RHS = MI.getOperand(2).getReg();
924     unsigned Neg = MRI.createGenericVirtualRegister(Ty);
925     MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS);
926     MIRBuilder.buildInstr(TargetOpcode::G_FADD)
927         .addDef(Res)
928         .addUse(LHS)
929         .addUse(Neg);
930     MI.eraseFromParent();
931     return Legalized;
932   }
933   case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
934     unsigned OldValRes = MI.getOperand(0).getReg();
935     unsigned SuccessRes = MI.getOperand(1).getReg();
936     unsigned Addr = MI.getOperand(2).getReg();
937     unsigned CmpVal = MI.getOperand(3).getReg();
938     unsigned NewVal = MI.getOperand(4).getReg();
939     MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal,
940                                   **MI.memoperands_begin());
941     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal);
942     MI.eraseFromParent();
943     return Legalized;
944   }
945   case TargetOpcode::G_LOAD:
946   case TargetOpcode::G_SEXTLOAD:
947   case TargetOpcode::G_ZEXTLOAD: {
948     // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
949     unsigned DstReg = MI.getOperand(0).getReg();
950     unsigned PtrReg = MI.getOperand(1).getReg();
951     LLT DstTy = MRI.getType(DstReg);
952     auto &MMO = **MI.memoperands_begin();
953 
954     if (DstTy.getSizeInBits() == MMO.getSize() /* in bytes */ * 8) {
955       // In the case of G_LOAD, this was a non-extending load already and we're
956       // about to lower to the same instruction.
957       if (MI.getOpcode() == TargetOpcode::G_LOAD)
958           return UnableToLegalize;
959       MIRBuilder.buildLoad(DstReg, PtrReg, MMO);
960       MI.eraseFromParent();
961       return Legalized;
962     }
963 
964     if (DstTy.isScalar()) {
965       unsigned TmpReg = MRI.createGenericVirtualRegister(
966           LLT::scalar(MMO.getSize() /* in bytes */ * 8));
967       MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
968       switch (MI.getOpcode()) {
969       default:
970         llvm_unreachable("Unexpected opcode");
971       case TargetOpcode::G_LOAD:
972         MIRBuilder.buildAnyExt(DstReg, TmpReg);
973         break;
974       case TargetOpcode::G_SEXTLOAD:
975         MIRBuilder.buildSExt(DstReg, TmpReg);
976         break;
977       case TargetOpcode::G_ZEXTLOAD:
978         MIRBuilder.buildZExt(DstReg, TmpReg);
979         break;
980       }
981       MI.eraseFromParent();
982       return Legalized;
983     }
984 
985     return UnableToLegalize;
986   }
987   }
988 }
989 
990 LegalizerHelper::LegalizeResult
fewerElementsVector(MachineInstr & MI,unsigned TypeIdx,LLT NarrowTy)991 LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
992                                      LLT NarrowTy) {
993   // FIXME: Don't know how to handle secondary types yet.
994   if (TypeIdx != 0)
995     return UnableToLegalize;
996   switch (MI.getOpcode()) {
997   default:
998     return UnableToLegalize;
999   case TargetOpcode::G_ADD: {
1000     unsigned NarrowSize = NarrowTy.getSizeInBits();
1001     unsigned DstReg = MI.getOperand(0).getReg();
1002     unsigned Size = MRI.getType(DstReg).getSizeInBits();
1003     int NumParts = Size / NarrowSize;
1004     // FIXME: Don't know how to handle the situation where the small vectors
1005     // aren't all the same size yet.
1006     if (Size % NarrowSize != 0)
1007       return UnableToLegalize;
1008 
1009     MIRBuilder.setInstr(MI);
1010 
1011     SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
1012     extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
1013     extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
1014 
1015     for (int i = 0; i < NumParts; ++i) {
1016       unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
1017       MIRBuilder.buildAdd(DstReg, Src1Regs[i], Src2Regs[i]);
1018       DstRegs.push_back(DstReg);
1019     }
1020 
1021     MIRBuilder.buildMerge(DstReg, DstRegs);
1022     MI.eraseFromParent();
1023     return Legalized;
1024   }
1025   }
1026 }
1027