1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/TargetLowering.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/CodeGen/CallingConvLower.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/CodeGen/TargetSubtargetInfo.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/MC/MCAsmInfo.h"
30 #include "llvm/MC/MCExpr.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/KnownBits.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Target/TargetLoweringObjectFile.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include <cctype>
37 using namespace llvm;
38
39 /// NOTE: The TargetMachine owns TLOF.
TargetLowering(const TargetMachine & tm)40 TargetLowering::TargetLowering(const TargetMachine &tm)
41 : TargetLoweringBase(tm) {}
42
getTargetNodeName(unsigned Opcode) const43 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
44 return nullptr;
45 }
46
isPositionIndependent() const47 bool TargetLowering::isPositionIndependent() const {
48 return getTargetMachine().isPositionIndependent();
49 }
50
51 /// Check whether a given call node is in tail position within its function. If
52 /// so, it sets Chain to the input chain of the tail call.
isInTailCallPosition(SelectionDAG & DAG,SDNode * Node,SDValue & Chain) const53 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
54 SDValue &Chain) const {
55 const Function &F = DAG.getMachineFunction().getFunction();
56
57 // Conservatively require the attributes of the call to match those of
58 // the return. Ignore noalias because it doesn't affect the call sequence.
59 AttributeList CallerAttrs = F.getAttributes();
60 if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
61 .removeAttribute(Attribute::NoAlias)
62 .hasAttributes())
63 return false;
64
65 // It's not safe to eliminate the sign / zero extension of the return value.
66 if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) ||
67 CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
68 return false;
69
70 // Check if the only use is a function return node.
71 return isUsedByReturnOnly(Node, Chain);
72 }
73
parametersInCSRMatch(const MachineRegisterInfo & MRI,const uint32_t * CallerPreservedMask,const SmallVectorImpl<CCValAssign> & ArgLocs,const SmallVectorImpl<SDValue> & OutVals) const74 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
75 const uint32_t *CallerPreservedMask,
76 const SmallVectorImpl<CCValAssign> &ArgLocs,
77 const SmallVectorImpl<SDValue> &OutVals) const {
78 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
79 const CCValAssign &ArgLoc = ArgLocs[I];
80 if (!ArgLoc.isRegLoc())
81 continue;
82 unsigned Reg = ArgLoc.getLocReg();
83 // Only look at callee saved registers.
84 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
85 continue;
86 // Check that we pass the value used for the caller.
87 // (We look for a CopyFromReg reading a virtual register that is used
88 // for the function live-in value of register Reg)
89 SDValue Value = OutVals[I];
90 if (Value->getOpcode() != ISD::CopyFromReg)
91 return false;
92 unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
93 if (MRI.getLiveInPhysReg(ArgReg) != Reg)
94 return false;
95 }
96 return true;
97 }
98
99 /// Set CallLoweringInfo attribute flags based on a call instruction
100 /// and called function attributes.
setAttributes(ImmutableCallSite * CS,unsigned ArgIdx)101 void TargetLoweringBase::ArgListEntry::setAttributes(ImmutableCallSite *CS,
102 unsigned ArgIdx) {
103 IsSExt = CS->paramHasAttr(ArgIdx, Attribute::SExt);
104 IsZExt = CS->paramHasAttr(ArgIdx, Attribute::ZExt);
105 IsInReg = CS->paramHasAttr(ArgIdx, Attribute::InReg);
106 IsSRet = CS->paramHasAttr(ArgIdx, Attribute::StructRet);
107 IsNest = CS->paramHasAttr(ArgIdx, Attribute::Nest);
108 IsByVal = CS->paramHasAttr(ArgIdx, Attribute::ByVal);
109 IsInAlloca = CS->paramHasAttr(ArgIdx, Attribute::InAlloca);
110 IsReturned = CS->paramHasAttr(ArgIdx, Attribute::Returned);
111 IsSwiftSelf = CS->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
112 IsSwiftError = CS->paramHasAttr(ArgIdx, Attribute::SwiftError);
113 Alignment = CS->getParamAlignment(ArgIdx);
114 }
115
116 /// Generate a libcall taking the given operands as arguments and returning a
117 /// result of type RetVT.
118 std::pair<SDValue, SDValue>
makeLibCall(SelectionDAG & DAG,RTLIB::Libcall LC,EVT RetVT,ArrayRef<SDValue> Ops,bool isSigned,const SDLoc & dl,bool doesNotReturn,bool isReturnValueUsed) const119 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
120 ArrayRef<SDValue> Ops, bool isSigned,
121 const SDLoc &dl, bool doesNotReturn,
122 bool isReturnValueUsed) const {
123 TargetLowering::ArgListTy Args;
124 Args.reserve(Ops.size());
125
126 TargetLowering::ArgListEntry Entry;
127 for (SDValue Op : Ops) {
128 Entry.Node = Op;
129 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
130 Entry.IsSExt = shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
131 Entry.IsZExt = !shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
132 Args.push_back(Entry);
133 }
134
135 if (LC == RTLIB::UNKNOWN_LIBCALL)
136 report_fatal_error("Unsupported library call operation!");
137 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
138 getPointerTy(DAG.getDataLayout()));
139
140 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
141 TargetLowering::CallLoweringInfo CLI(DAG);
142 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
143 CLI.setDebugLoc(dl)
144 .setChain(DAG.getEntryNode())
145 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
146 .setNoReturn(doesNotReturn)
147 .setDiscardResult(!isReturnValueUsed)
148 .setSExtResult(signExtend)
149 .setZExtResult(!signExtend);
150 return LowerCallTo(CLI);
151 }
152
153 /// Soften the operands of a comparison. This code is shared among BR_CC,
154 /// SELECT_CC, and SETCC handlers.
softenSetCCOperands(SelectionDAG & DAG,EVT VT,SDValue & NewLHS,SDValue & NewRHS,ISD::CondCode & CCCode,const SDLoc & dl) const155 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
156 SDValue &NewLHS, SDValue &NewRHS,
157 ISD::CondCode &CCCode,
158 const SDLoc &dl) const {
159 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
160 && "Unsupported setcc type!");
161
162 // Expand into one or more soft-fp libcall(s).
163 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
164 bool ShouldInvertCC = false;
165 switch (CCCode) {
166 case ISD::SETEQ:
167 case ISD::SETOEQ:
168 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
169 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
170 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
171 break;
172 case ISD::SETNE:
173 case ISD::SETUNE:
174 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
175 (VT == MVT::f64) ? RTLIB::UNE_F64 :
176 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
177 break;
178 case ISD::SETGE:
179 case ISD::SETOGE:
180 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
181 (VT == MVT::f64) ? RTLIB::OGE_F64 :
182 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
183 break;
184 case ISD::SETLT:
185 case ISD::SETOLT:
186 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
187 (VT == MVT::f64) ? RTLIB::OLT_F64 :
188 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
189 break;
190 case ISD::SETLE:
191 case ISD::SETOLE:
192 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
193 (VT == MVT::f64) ? RTLIB::OLE_F64 :
194 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
195 break;
196 case ISD::SETGT:
197 case ISD::SETOGT:
198 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
199 (VT == MVT::f64) ? RTLIB::OGT_F64 :
200 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
201 break;
202 case ISD::SETUO:
203 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
204 (VT == MVT::f64) ? RTLIB::UO_F64 :
205 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
206 break;
207 case ISD::SETO:
208 LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
209 (VT == MVT::f64) ? RTLIB::O_F64 :
210 (VT == MVT::f128) ? RTLIB::O_F128 : RTLIB::O_PPCF128;
211 break;
212 case ISD::SETONE:
213 // SETONE = SETOLT | SETOGT
214 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
215 (VT == MVT::f64) ? RTLIB::OLT_F64 :
216 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
217 LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
218 (VT == MVT::f64) ? RTLIB::OGT_F64 :
219 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
220 break;
221 case ISD::SETUEQ:
222 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
223 (VT == MVT::f64) ? RTLIB::UO_F64 :
224 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
225 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
226 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
227 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
228 break;
229 default:
230 // Invert CC for unordered comparisons
231 ShouldInvertCC = true;
232 switch (CCCode) {
233 case ISD::SETULT:
234 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
235 (VT == MVT::f64) ? RTLIB::OGE_F64 :
236 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
237 break;
238 case ISD::SETULE:
239 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
240 (VT == MVT::f64) ? RTLIB::OGT_F64 :
241 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
242 break;
243 case ISD::SETUGT:
244 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
245 (VT == MVT::f64) ? RTLIB::OLE_F64 :
246 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
247 break;
248 case ISD::SETUGE:
249 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
250 (VT == MVT::f64) ? RTLIB::OLT_F64 :
251 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
252 break;
253 default: llvm_unreachable("Do not know how to soften this setcc!");
254 }
255 }
256
257 // Use the target specific return value for comparions lib calls.
258 EVT RetVT = getCmpLibcallReturnType();
259 SDValue Ops[2] = {NewLHS, NewRHS};
260 NewLHS = makeLibCall(DAG, LC1, RetVT, Ops, false /*sign irrelevant*/,
261 dl).first;
262 NewRHS = DAG.getConstant(0, dl, RetVT);
263
264 CCCode = getCmpLibcallCC(LC1);
265 if (ShouldInvertCC)
266 CCCode = getSetCCInverse(CCCode, /*isInteger=*/true);
267
268 if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
269 SDValue Tmp = DAG.getNode(
270 ISD::SETCC, dl,
271 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
272 NewLHS, NewRHS, DAG.getCondCode(CCCode));
273 NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, false/*sign irrelevant*/,
274 dl).first;
275 NewLHS = DAG.getNode(
276 ISD::SETCC, dl,
277 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
278 NewLHS, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
279 NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
280 NewRHS = SDValue();
281 }
282 }
283
284 /// Return the entry encoding for a jump table in the current function. The
285 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
getJumpTableEncoding() const286 unsigned TargetLowering::getJumpTableEncoding() const {
287 // In non-pic modes, just use the address of a block.
288 if (!isPositionIndependent())
289 return MachineJumpTableInfo::EK_BlockAddress;
290
291 // In PIC mode, if the target supports a GPRel32 directive, use it.
292 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
293 return MachineJumpTableInfo::EK_GPRel32BlockAddress;
294
295 // Otherwise, use a label difference.
296 return MachineJumpTableInfo::EK_LabelDifference32;
297 }
298
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const299 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
300 SelectionDAG &DAG) const {
301 // If our PIC model is GP relative, use the global offset table as the base.
302 unsigned JTEncoding = getJumpTableEncoding();
303
304 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
305 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
306 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
307
308 return Table;
309 }
310
311 /// This returns the relocation base for the given PIC jumptable, the same as
312 /// getPICJumpTableRelocBase, but as an MCExpr.
313 const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction * MF,unsigned JTI,MCContext & Ctx) const314 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
315 unsigned JTI,MCContext &Ctx) const{
316 // The normal PIC reloc base is the label at the start of the jump table.
317 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
318 }
319
320 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const321 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
322 const TargetMachine &TM = getTargetMachine();
323 const GlobalValue *GV = GA->getGlobal();
324
325 // If the address is not even local to this DSO we will have to load it from
326 // a got and then add the offset.
327 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
328 return false;
329
330 // If the code is position independent we will have to add a base register.
331 if (isPositionIndependent())
332 return false;
333
334 // Otherwise we can do it.
335 return true;
336 }
337
338 //===----------------------------------------------------------------------===//
339 // Optimization Methods
340 //===----------------------------------------------------------------------===//
341
342 /// If the specified instruction has a constant integer operand and there are
343 /// bits set in that constant that are not demanded, then clear those bits and
344 /// return true.
ShrinkDemandedConstant(SDValue Op,const APInt & Demanded,TargetLoweringOpt & TLO) const345 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
346 TargetLoweringOpt &TLO) const {
347 SelectionDAG &DAG = TLO.DAG;
348 SDLoc DL(Op);
349 unsigned Opcode = Op.getOpcode();
350
351 // Do target-specific constant optimization.
352 if (targetShrinkDemandedConstant(Op, Demanded, TLO))
353 return TLO.New.getNode();
354
355 // FIXME: ISD::SELECT, ISD::SELECT_CC
356 switch (Opcode) {
357 default:
358 break;
359 case ISD::XOR:
360 case ISD::AND:
361 case ISD::OR: {
362 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
363 if (!Op1C)
364 return false;
365
366 // If this is a 'not' op, don't touch it because that's a canonical form.
367 const APInt &C = Op1C->getAPIntValue();
368 if (Opcode == ISD::XOR && Demanded.isSubsetOf(C))
369 return false;
370
371 if (!C.isSubsetOf(Demanded)) {
372 EVT VT = Op.getValueType();
373 SDValue NewC = DAG.getConstant(Demanded & C, DL, VT);
374 SDValue NewOp = DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC);
375 return TLO.CombineTo(Op, NewOp);
376 }
377
378 break;
379 }
380 }
381
382 return false;
383 }
384
385 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
386 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
387 /// generalized for targets with other types of implicit widening casts.
ShrinkDemandedOp(SDValue Op,unsigned BitWidth,const APInt & Demanded,TargetLoweringOpt & TLO) const388 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
389 const APInt &Demanded,
390 TargetLoweringOpt &TLO) const {
391 assert(Op.getNumOperands() == 2 &&
392 "ShrinkDemandedOp only supports binary operators!");
393 assert(Op.getNode()->getNumValues() == 1 &&
394 "ShrinkDemandedOp only supports nodes with one result!");
395
396 SelectionDAG &DAG = TLO.DAG;
397 SDLoc dl(Op);
398
399 // Early return, as this function cannot handle vector types.
400 if (Op.getValueType().isVector())
401 return false;
402
403 // Don't do this if the node has another user, which may require the
404 // full value.
405 if (!Op.getNode()->hasOneUse())
406 return false;
407
408 // Search for the smallest integer type with free casts to and from
409 // Op's type. For expedience, just check power-of-2 integer types.
410 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
411 unsigned DemandedSize = Demanded.getActiveBits();
412 unsigned SmallVTBits = DemandedSize;
413 if (!isPowerOf2_32(SmallVTBits))
414 SmallVTBits = NextPowerOf2(SmallVTBits);
415 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
416 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
417 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
418 TLI.isZExtFree(SmallVT, Op.getValueType())) {
419 // We found a type with free casts.
420 SDValue X = DAG.getNode(
421 Op.getOpcode(), dl, SmallVT,
422 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)),
423 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1)));
424 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?");
425 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X);
426 return TLO.CombineTo(Op, Z);
427 }
428 }
429 return false;
430 }
431
432 bool
SimplifyDemandedBits(SDNode * User,unsigned OpIdx,const APInt & Demanded,DAGCombinerInfo & DCI,TargetLoweringOpt & TLO) const433 TargetLowering::SimplifyDemandedBits(SDNode *User, unsigned OpIdx,
434 const APInt &Demanded,
435 DAGCombinerInfo &DCI,
436 TargetLoweringOpt &TLO) const {
437 SDValue Op = User->getOperand(OpIdx);
438 KnownBits Known;
439
440 if (!SimplifyDemandedBits(Op, Demanded, Known, TLO, 0, true))
441 return false;
442
443
444 // Old will not always be the same as Op. For example:
445 //
446 // Demanded = 0xffffff
447 // Op = i64 truncate (i32 and x, 0xffffff)
448 // In this case simplify demand bits will want to replace the 'and' node
449 // with the value 'x', which will give us:
450 // Old = i32 and x, 0xffffff
451 // New = x
452 if (TLO.Old.hasOneUse()) {
453 // For the one use case, we just commit the change.
454 DCI.CommitTargetLoweringOpt(TLO);
455 return true;
456 }
457
458 // If Old has more than one use then it must be Op, because the
459 // AssumeSingleUse flag is not propogated to recursive calls of
460 // SimplifyDemanded bits, so the only node with multiple use that
461 // it will attempt to combine will be Op.
462 assert(TLO.Old == Op);
463
464 SmallVector <SDValue, 4> NewOps;
465 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
466 if (i == OpIdx) {
467 NewOps.push_back(TLO.New);
468 continue;
469 }
470 NewOps.push_back(User->getOperand(i));
471 }
472 User = TLO.DAG.UpdateNodeOperands(User, NewOps);
473 // Op has less users now, so we may be able to perform additional combines
474 // with it.
475 DCI.AddToWorklist(Op.getNode());
476 // User's operands have been updated, so we may be able to do new combines
477 // with it.
478 DCI.AddToWorklist(User);
479 return true;
480 }
481
SimplifyDemandedBits(SDValue Op,const APInt & DemandedMask,DAGCombinerInfo & DCI) const482 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
483 DAGCombinerInfo &DCI) const {
484
485 SelectionDAG &DAG = DCI.DAG;
486 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
487 !DCI.isBeforeLegalizeOps());
488 KnownBits Known;
489
490 bool Simplified = SimplifyDemandedBits(Op, DemandedMask, Known, TLO);
491 if (Simplified)
492 DCI.CommitTargetLoweringOpt(TLO);
493 return Simplified;
494 }
495
496 /// Look at Op. At this point, we know that only the DemandedMask bits of the
497 /// result of Op are ever used downstream. If we can use this information to
498 /// simplify Op, create a new simplified DAG node and return true, returning the
499 /// original and new nodes in Old and New. Otherwise, analyze the expression and
500 /// return a mask of Known bits for the expression (used to simplify the
501 /// caller). The Known bits may only be accurate for those bits in the
502 /// DemandedMask.
SimplifyDemandedBits(SDValue Op,const APInt & DemandedMask,KnownBits & Known,TargetLoweringOpt & TLO,unsigned Depth,bool AssumeSingleUse) const503 bool TargetLowering::SimplifyDemandedBits(SDValue Op,
504 const APInt &DemandedMask,
505 KnownBits &Known,
506 TargetLoweringOpt &TLO,
507 unsigned Depth,
508 bool AssumeSingleUse) const {
509 unsigned BitWidth = DemandedMask.getBitWidth();
510 assert(Op.getScalarValueSizeInBits() == BitWidth &&
511 "Mask size mismatches value type size!");
512 APInt NewMask = DemandedMask;
513 SDLoc dl(Op);
514 auto &DL = TLO.DAG.getDataLayout();
515
516 // Don't know anything.
517 Known = KnownBits(BitWidth);
518
519 if (Op.getOpcode() == ISD::Constant) {
520 // We know all of the bits for a constant!
521 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue();
522 Known.Zero = ~Known.One;
523 return false;
524 }
525
526 // Other users may use these bits.
527 EVT VT = Op.getValueType();
528 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) {
529 if (Depth != 0) {
530 // If not at the root, Just compute the Known bits to
531 // simplify things downstream.
532 TLO.DAG.computeKnownBits(Op, Known, Depth);
533 return false;
534 }
535 // If this is the root being simplified, allow it to have multiple uses,
536 // just set the NewMask to all bits.
537 NewMask = APInt::getAllOnesValue(BitWidth);
538 } else if (DemandedMask == 0) {
539 // Not demanding any bits from Op.
540 if (!Op.isUndef())
541 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
542 return false;
543 } else if (Depth == 6) { // Limit search depth.
544 return false;
545 }
546
547 KnownBits Known2, KnownOut;
548 switch (Op.getOpcode()) {
549 case ISD::BUILD_VECTOR:
550 // Collect the known bits that are shared by every constant vector element.
551 Known.Zero.setAllBits(); Known.One.setAllBits();
552 for (SDValue SrcOp : Op->ops()) {
553 if (!isa<ConstantSDNode>(SrcOp)) {
554 // We can only handle all constant values - bail out with no known bits.
555 Known = KnownBits(BitWidth);
556 return false;
557 }
558 Known2.One = cast<ConstantSDNode>(SrcOp)->getAPIntValue();
559 Known2.Zero = ~Known2.One;
560
561 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
562 if (Known2.One.getBitWidth() != BitWidth) {
563 assert(Known2.getBitWidth() > BitWidth &&
564 "Expected BUILD_VECTOR implicit truncation");
565 Known2 = Known2.trunc(BitWidth);
566 }
567
568 // Known bits are the values that are shared by every element.
569 // TODO: support per-element known bits.
570 Known.One &= Known2.One;
571 Known.Zero &= Known2.Zero;
572 }
573 return false; // Don't fall through, will infinitely loop.
574 case ISD::AND:
575 // If the RHS is a constant, check to see if the LHS would be zero without
576 // using the bits from the RHS. Below, we use knowledge about the RHS to
577 // simplify the LHS, here we're using information from the LHS to simplify
578 // the RHS.
579 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op.getOperand(1))) {
580 SDValue Op0 = Op.getOperand(0);
581 KnownBits LHSKnown;
582 // Do not increment Depth here; that can cause an infinite loop.
583 TLO.DAG.computeKnownBits(Op0, LHSKnown, Depth);
584 // If the LHS already has zeros where RHSC does, this 'and' is dead.
585 if ((LHSKnown.Zero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
586 return TLO.CombineTo(Op, Op0);
587
588 // If any of the set bits in the RHS are known zero on the LHS, shrink
589 // the constant.
590 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & NewMask, TLO))
591 return true;
592
593 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its
594 // constant, but if this 'and' is only clearing bits that were just set by
595 // the xor, then this 'and' can be eliminated by shrinking the mask of
596 // the xor. For example, for a 32-bit X:
597 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1
598 if (isBitwiseNot(Op0) && Op0.hasOneUse() &&
599 LHSKnown.One == ~RHSC->getAPIntValue()) {
600 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0),
601 Op.getOperand(1));
602 return TLO.CombineTo(Op, Xor);
603 }
604 }
605
606 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, Known, TLO, Depth+1))
607 return true;
608 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
609 if (SimplifyDemandedBits(Op.getOperand(0), ~Known.Zero & NewMask,
610 Known2, TLO, Depth+1))
611 return true;
612 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
613
614 // If all of the demanded bits are known one on one side, return the other.
615 // These bits cannot contribute to the result of the 'and'.
616 if (NewMask.isSubsetOf(Known2.Zero | Known.One))
617 return TLO.CombineTo(Op, Op.getOperand(0));
618 if (NewMask.isSubsetOf(Known.Zero | Known2.One))
619 return TLO.CombineTo(Op, Op.getOperand(1));
620 // If all of the demanded bits in the inputs are known zeros, return zero.
621 if (NewMask.isSubsetOf(Known.Zero | Known2.Zero))
622 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT));
623 // If the RHS is a constant, see if we can simplify it.
624 if (ShrinkDemandedConstant(Op, ~Known2.Zero & NewMask, TLO))
625 return true;
626 // If the operation can be done in a smaller type, do so.
627 if (ShrinkDemandedOp(Op, BitWidth, NewMask, TLO))
628 return true;
629
630 // Output known-1 bits are only known if set in both the LHS & RHS.
631 Known.One &= Known2.One;
632 // Output known-0 are known to be clear if zero in either the LHS | RHS.
633 Known.Zero |= Known2.Zero;
634 break;
635 case ISD::OR:
636 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, Known, TLO, Depth+1))
637 return true;
638 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
639 if (SimplifyDemandedBits(Op.getOperand(0), ~Known.One & NewMask,
640 Known2, TLO, Depth+1))
641 return true;
642 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
643
644 // If all of the demanded bits are known zero on one side, return the other.
645 // These bits cannot contribute to the result of the 'or'.
646 if (NewMask.isSubsetOf(Known2.One | Known.Zero))
647 return TLO.CombineTo(Op, Op.getOperand(0));
648 if (NewMask.isSubsetOf(Known.One | Known2.Zero))
649 return TLO.CombineTo(Op, Op.getOperand(1));
650 // If the RHS is a constant, see if we can simplify it.
651 if (ShrinkDemandedConstant(Op, NewMask, TLO))
652 return true;
653 // If the operation can be done in a smaller type, do so.
654 if (ShrinkDemandedOp(Op, BitWidth, NewMask, TLO))
655 return true;
656
657 // Output known-0 bits are only known if clear in both the LHS & RHS.
658 Known.Zero &= Known2.Zero;
659 // Output known-1 are known to be set if set in either the LHS | RHS.
660 Known.One |= Known2.One;
661 break;
662 case ISD::XOR: {
663 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, Known, TLO, Depth+1))
664 return true;
665 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
666 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, Known2, TLO, Depth+1))
667 return true;
668 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
669
670 // If all of the demanded bits are known zero on one side, return the other.
671 // These bits cannot contribute to the result of the 'xor'.
672 if (NewMask.isSubsetOf(Known.Zero))
673 return TLO.CombineTo(Op, Op.getOperand(0));
674 if (NewMask.isSubsetOf(Known2.Zero))
675 return TLO.CombineTo(Op, Op.getOperand(1));
676 // If the operation can be done in a smaller type, do so.
677 if (ShrinkDemandedOp(Op, BitWidth, NewMask, TLO))
678 return true;
679
680 // If all of the unknown bits are known to be zero on one side or the other
681 // (but not both) turn this into an *inclusive* or.
682 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
683 if ((NewMask & ~Known.Zero & ~Known2.Zero) == 0)
684 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT,
685 Op.getOperand(0),
686 Op.getOperand(1)));
687
688 // Output known-0 bits are known if clear or set in both the LHS & RHS.
689 KnownOut.Zero = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
690 // Output known-1 are known to be set if set in only one of the LHS, RHS.
691 KnownOut.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
692
693 // If all of the demanded bits on one side are known, and all of the set
694 // bits on that side are also known to be set on the other side, turn this
695 // into an AND, as we know the bits will be cleared.
696 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
697 // NB: it is okay if more bits are known than are requested
698 if (NewMask.isSubsetOf(Known.Zero|Known.One)) { // all known on one side
699 if (Known.One == Known2.One) { // set bits are the same on both sides
700 SDValue ANDC = TLO.DAG.getConstant(~Known.One & NewMask, dl, VT);
701 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
702 Op.getOperand(0), ANDC));
703 }
704 }
705
706 // If the RHS is a constant, see if we can change it. Don't alter a -1
707 // constant because that's a 'not' op, and that is better for combining and
708 // codegen.
709 ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1));
710 if (C && !C->isAllOnesValue()) {
711 if (NewMask.isSubsetOf(C->getAPIntValue())) {
712 // We're flipping all demanded bits. Flip the undemanded bits too.
713 SDValue New = TLO.DAG.getNOT(dl, Op.getOperand(0), VT);
714 return TLO.CombineTo(Op, New);
715 }
716 // If we can't turn this into a 'not', try to shrink the constant.
717 if (ShrinkDemandedConstant(Op, NewMask, TLO))
718 return true;
719 }
720
721 Known = std::move(KnownOut);
722 break;
723 }
724 case ISD::SELECT:
725 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, Known, TLO, Depth+1))
726 return true;
727 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, Known2, TLO, Depth+1))
728 return true;
729 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
730 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
731
732 // If the operands are constants, see if we can simplify them.
733 if (ShrinkDemandedConstant(Op, NewMask, TLO))
734 return true;
735
736 // Only known if known in both the LHS and RHS.
737 Known.One &= Known2.One;
738 Known.Zero &= Known2.Zero;
739 break;
740 case ISD::SELECT_CC:
741 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, Known, TLO, Depth+1))
742 return true;
743 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, Known2, TLO, Depth+1))
744 return true;
745 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
746 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
747
748 // If the operands are constants, see if we can simplify them.
749 if (ShrinkDemandedConstant(Op, NewMask, TLO))
750 return true;
751
752 // Only known if known in both the LHS and RHS.
753 Known.One &= Known2.One;
754 Known.Zero &= Known2.Zero;
755 break;
756 case ISD::SETCC: {
757 SDValue Op0 = Op.getOperand(0);
758 SDValue Op1 = Op.getOperand(1);
759 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
760 // If (1) we only need the sign-bit, (2) the setcc operands are the same
761 // width as the setcc result, and (3) the result of a setcc conforms to 0 or
762 // -1, we may be able to bypass the setcc.
763 if (NewMask.isSignMask() && Op0.getScalarValueSizeInBits() == BitWidth &&
764 getBooleanContents(VT) ==
765 BooleanContent::ZeroOrNegativeOneBooleanContent) {
766 // If we're testing X < 0, then this compare isn't needed - just use X!
767 // FIXME: We're limiting to integer types here, but this should also work
768 // if we don't care about FP signed-zero. The use of SETLT with FP means
769 // that we don't care about NaNs.
770 if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
771 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
772 return TLO.CombineTo(Op, Op0);
773
774 // TODO: Should we check for other forms of sign-bit comparisons?
775 // Examples: X <= -1, X >= 0
776 }
777 if (getBooleanContents(Op0.getValueType()) ==
778 TargetLowering::ZeroOrOneBooleanContent &&
779 BitWidth > 1)
780 Known.Zero.setBitsFrom(1);
781 break;
782 }
783 case ISD::SHL:
784 if (ConstantSDNode *SA = isConstOrConstSplat(Op.getOperand(1))) {
785 SDValue InOp = Op.getOperand(0);
786
787 // If the shift count is an invalid immediate, don't do anything.
788 if (SA->getAPIntValue().uge(BitWidth))
789 break;
790
791 unsigned ShAmt = SA->getZExtValue();
792
793 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
794 // single shift. We can do this if the bottom bits (which are shifted
795 // out) are never demanded.
796 if (InOp.getOpcode() == ISD::SRL) {
797 if (ConstantSDNode *SA2 = isConstOrConstSplat(InOp.getOperand(1))) {
798 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
799 if (SA2->getAPIntValue().ult(BitWidth)) {
800 unsigned C1 = SA2->getZExtValue();
801 unsigned Opc = ISD::SHL;
802 int Diff = ShAmt-C1;
803 if (Diff < 0) {
804 Diff = -Diff;
805 Opc = ISD::SRL;
806 }
807
808 SDValue NewSA =
809 TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
810 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
811 InOp.getOperand(0),
812 NewSA));
813 }
814 }
815 }
816 }
817
818 if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt), Known, TLO, Depth+1))
819 return true;
820
821 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
822 // are not demanded. This will likely allow the anyext to be folded away.
823 if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
824 SDValue InnerOp = InOp.getOperand(0);
825 EVT InnerVT = InnerOp.getValueType();
826 unsigned InnerBits = InnerVT.getScalarSizeInBits();
827 if (ShAmt < InnerBits && NewMask.getActiveBits() <= InnerBits &&
828 isTypeDesirableForOp(ISD::SHL, InnerVT)) {
829 EVT ShTy = getShiftAmountTy(InnerVT, DL);
830 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
831 ShTy = InnerVT;
832 SDValue NarrowShl =
833 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
834 TLO.DAG.getConstant(ShAmt, dl, ShTy));
835 return
836 TLO.CombineTo(Op,
837 TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl));
838 }
839 // Repeat the SHL optimization above in cases where an extension
840 // intervenes: (shl (anyext (shr x, c1)), c2) to
841 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits
842 // aren't demanded (as above) and that the shifted upper c1 bits of
843 // x aren't demanded.
844 if (InOp.hasOneUse() && InnerOp.getOpcode() == ISD::SRL &&
845 InnerOp.hasOneUse()) {
846 if (ConstantSDNode *SA2 = isConstOrConstSplat(InnerOp.getOperand(1))) {
847 unsigned InnerShAmt = SA2->getLimitedValue(InnerBits);
848 if (InnerShAmt < ShAmt &&
849 InnerShAmt < InnerBits &&
850 NewMask.getActiveBits() <= (InnerBits - InnerShAmt + ShAmt) &&
851 NewMask.countTrailingZeros() >= ShAmt) {
852 SDValue NewSA =
853 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
854 Op.getOperand(1).getValueType());
855 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
856 InnerOp.getOperand(0));
857 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT,
858 NewExt, NewSA));
859 }
860 }
861 }
862 }
863
864 Known.Zero <<= ShAmt;
865 Known.One <<= ShAmt;
866 // low bits known zero.
867 Known.Zero.setLowBits(ShAmt);
868 }
869 break;
870 case ISD::SRL:
871 if (ConstantSDNode *SA = isConstOrConstSplat(Op.getOperand(1))) {
872 SDValue InOp = Op.getOperand(0);
873
874 // If the shift count is an invalid immediate, don't do anything.
875 if (SA->getAPIntValue().uge(BitWidth))
876 break;
877
878 unsigned ShAmt = SA->getZExtValue();
879 APInt InDemandedMask = (NewMask << ShAmt);
880
881 // If the shift is exact, then it does demand the low bits (and knows that
882 // they are zero).
883 if (Op->getFlags().hasExact())
884 InDemandedMask.setLowBits(ShAmt);
885
886 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
887 // single shift. We can do this if the top bits (which are shifted out)
888 // are never demanded.
889 if (InOp.getOpcode() == ISD::SHL) {
890 if (ConstantSDNode *SA2 = isConstOrConstSplat(InOp.getOperand(1))) {
891 if (ShAmt &&
892 (NewMask & APInt::getHighBitsSet(BitWidth, ShAmt)) == 0) {
893 if (SA2->getAPIntValue().ult(BitWidth)) {
894 unsigned C1 = SA2->getZExtValue();
895 unsigned Opc = ISD::SRL;
896 int Diff = ShAmt-C1;
897 if (Diff < 0) {
898 Diff = -Diff;
899 Opc = ISD::SHL;
900 }
901
902 SDValue NewSA =
903 TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
904 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
905 InOp.getOperand(0),
906 NewSA));
907 }
908 }
909 }
910 }
911
912 // Compute the new bits that are at the top now.
913 if (SimplifyDemandedBits(InOp, InDemandedMask, Known, TLO, Depth+1))
914 return true;
915 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
916 Known.Zero.lshrInPlace(ShAmt);
917 Known.One.lshrInPlace(ShAmt);
918
919 Known.Zero.setHighBits(ShAmt); // High bits known zero.
920 }
921 break;
922 case ISD::SRA:
923 // If this is an arithmetic shift right and only the low-bit is set, we can
924 // always convert this into a logical shr, even if the shift amount is
925 // variable. The low bit of the shift cannot be an input sign bit unless
926 // the shift amount is >= the size of the datatype, which is undefined.
927 if (NewMask.isOneValue())
928 return TLO.CombineTo(Op,
929 TLO.DAG.getNode(ISD::SRL, dl, VT, Op.getOperand(0),
930 Op.getOperand(1)));
931
932 if (ConstantSDNode *SA = isConstOrConstSplat(Op.getOperand(1))) {
933 // If the shift count is an invalid immediate, don't do anything.
934 if (SA->getAPIntValue().uge(BitWidth))
935 break;
936
937 unsigned ShAmt = SA->getZExtValue();
938 APInt InDemandedMask = (NewMask << ShAmt);
939
940 // If the shift is exact, then it does demand the low bits (and knows that
941 // they are zero).
942 if (Op->getFlags().hasExact())
943 InDemandedMask.setLowBits(ShAmt);
944
945 // If any of the demanded bits are produced by the sign extension, we also
946 // demand the input sign bit.
947 if (NewMask.countLeadingZeros() < ShAmt)
948 InDemandedMask.setSignBit();
949
950 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, Known, TLO,
951 Depth+1))
952 return true;
953 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
954 Known.Zero.lshrInPlace(ShAmt);
955 Known.One.lshrInPlace(ShAmt);
956
957 // If the input sign bit is known to be zero, or if none of the top bits
958 // are demanded, turn this into an unsigned shift right.
959 if (Known.Zero[BitWidth - ShAmt - 1] ||
960 NewMask.countLeadingZeros() >= ShAmt) {
961 SDNodeFlags Flags;
962 Flags.setExact(Op->getFlags().hasExact());
963 return TLO.CombineTo(Op,
964 TLO.DAG.getNode(ISD::SRL, dl, VT, Op.getOperand(0),
965 Op.getOperand(1), Flags));
966 }
967
968 int Log2 = NewMask.exactLogBase2();
969 if (Log2 >= 0) {
970 // The bit must come from the sign.
971 SDValue NewSA =
972 TLO.DAG.getConstant(BitWidth - 1 - Log2, dl,
973 Op.getOperand(1).getValueType());
974 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
975 Op.getOperand(0), NewSA));
976 }
977
978 if (Known.One[BitWidth - ShAmt - 1])
979 // New bits are known one.
980 Known.One.setHighBits(ShAmt);
981 }
982 break;
983 case ISD::SIGN_EXTEND_INREG: {
984 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
985 unsigned ExVTBits = ExVT.getScalarSizeInBits();
986
987 // If we only care about the highest bit, don't bother shifting right.
988 if (NewMask.isSignMask()) {
989 SDValue InOp = Op.getOperand(0);
990 bool AlreadySignExtended =
991 TLO.DAG.ComputeNumSignBits(InOp) >= BitWidth-ExVTBits+1;
992 // However if the input is already sign extended we expect the sign
993 // extension to be dropped altogether later and do not simplify.
994 if (!AlreadySignExtended) {
995 // Compute the correct shift amount type, which must be getShiftAmountTy
996 // for scalar types after legalization.
997 EVT ShiftAmtTy = VT;
998 if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
999 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
1000
1001 SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ExVTBits, dl,
1002 ShiftAmtTy);
1003 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT, InOp,
1004 ShiftAmt));
1005 }
1006 }
1007
1008 // If none of the extended bits are demanded, eliminate the sextinreg.
1009 if (NewMask.getActiveBits() <= ExVTBits)
1010 return TLO.CombineTo(Op, Op.getOperand(0));
1011
1012 APInt InputDemandedBits = NewMask.getLoBits(ExVTBits);
1013
1014 // Since the sign extended bits are demanded, we know that the sign
1015 // bit is demanded.
1016 InputDemandedBits.setBit(ExVTBits - 1);
1017
1018 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
1019 Known, TLO, Depth+1))
1020 return true;
1021 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1022
1023 // If the sign bit of the input is known set or clear, then we know the
1024 // top bits of the result.
1025
1026 // If the input sign bit is known zero, convert this into a zero extension.
1027 if (Known.Zero[ExVTBits - 1])
1028 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(
1029 Op.getOperand(0), dl, ExVT.getScalarType()));
1030
1031 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits);
1032 if (Known.One[ExVTBits - 1]) { // Input sign bit known set
1033 Known.One.setBitsFrom(ExVTBits);
1034 Known.Zero &= Mask;
1035 } else { // Input sign bit unknown
1036 Known.Zero &= Mask;
1037 Known.One &= Mask;
1038 }
1039 break;
1040 }
1041 case ISD::BUILD_PAIR: {
1042 EVT HalfVT = Op.getOperand(0).getValueType();
1043 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
1044
1045 APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
1046 APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
1047
1048 KnownBits KnownLo, KnownHi;
1049
1050 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1))
1051 return true;
1052
1053 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1))
1054 return true;
1055
1056 Known.Zero = KnownLo.Zero.zext(BitWidth) |
1057 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth);
1058
1059 Known.One = KnownLo.One.zext(BitWidth) |
1060 KnownHi.One.zext(BitWidth).shl(HalfBitWidth);
1061 break;
1062 }
1063 case ISD::ZERO_EXTEND: {
1064 unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1065
1066 // If none of the top bits are demanded, convert this into an any_extend.
1067 if (NewMask.getActiveBits() <= OperandBitWidth)
1068 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
1069 Op.getOperand(0)));
1070
1071 APInt InMask = NewMask.trunc(OperandBitWidth);
1072 if (SimplifyDemandedBits(Op.getOperand(0), InMask, Known, TLO, Depth+1))
1073 return true;
1074 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1075 Known = Known.zext(BitWidth);
1076 Known.Zero.setBitsFrom(OperandBitWidth);
1077 break;
1078 }
1079 case ISD::SIGN_EXTEND: {
1080 unsigned InBits = Op.getOperand(0).getValueType().getScalarSizeInBits();
1081
1082 // If none of the top bits are demanded, convert this into an any_extend.
1083 if (NewMask.getActiveBits() <= InBits)
1084 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
1085 Op.getOperand(0)));
1086
1087 // Since some of the sign extended bits are demanded, we know that the sign
1088 // bit is demanded.
1089 APInt InDemandedBits = NewMask.trunc(InBits);
1090 InDemandedBits.setBit(InBits - 1);
1091
1092 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, Known, TLO,
1093 Depth+1))
1094 return true;
1095 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1096 // If the sign bit is known one, the top bits match.
1097 Known = Known.sext(BitWidth);
1098
1099 // If the sign bit is known zero, convert this to a zero extend.
1100 if (Known.isNonNegative())
1101 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT,
1102 Op.getOperand(0)));
1103 break;
1104 }
1105 case ISD::ANY_EXTEND: {
1106 unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1107 APInt InMask = NewMask.trunc(OperandBitWidth);
1108 if (SimplifyDemandedBits(Op.getOperand(0), InMask, Known, TLO, Depth+1))
1109 return true;
1110 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1111 Known = Known.zext(BitWidth);
1112 break;
1113 }
1114 case ISD::TRUNCATE: {
1115 // Simplify the input, using demanded bit information, and compute the known
1116 // zero/one bits live out.
1117 unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1118 APInt TruncMask = NewMask.zext(OperandBitWidth);
1119 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, Known, TLO, Depth+1))
1120 return true;
1121 Known = Known.trunc(BitWidth);
1122
1123 // If the input is only used by this truncate, see if we can shrink it based
1124 // on the known demanded bits.
1125 if (Op.getOperand(0).getNode()->hasOneUse()) {
1126 SDValue In = Op.getOperand(0);
1127 switch (In.getOpcode()) {
1128 default: break;
1129 case ISD::SRL:
1130 // Shrink SRL by a constant if none of the high bits shifted in are
1131 // demanded.
1132 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT))
1133 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1134 // undesirable.
1135 break;
1136 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
1137 if (!ShAmt)
1138 break;
1139 SDValue Shift = In.getOperand(1);
1140 if (TLO.LegalTypes()) {
1141 uint64_t ShVal = ShAmt->getZExtValue();
1142 Shift = TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(VT, DL));
1143 }
1144
1145 if (ShAmt->getZExtValue() < BitWidth) {
1146 APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
1147 OperandBitWidth - BitWidth);
1148 HighBits.lshrInPlace(ShAmt->getZExtValue());
1149 HighBits = HighBits.trunc(BitWidth);
1150
1151 if (!(HighBits & NewMask)) {
1152 // None of the shifted in bits are needed. Add a truncate of the
1153 // shift input, then shift it.
1154 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl, VT,
1155 In.getOperand(0));
1156 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc,
1157 Shift));
1158 }
1159 }
1160 break;
1161 }
1162 }
1163
1164 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1165 break;
1166 }
1167 case ISD::AssertZext: {
1168 // AssertZext demands all of the high bits, plus any of the low bits
1169 // demanded by its users.
1170 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1171 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits());
1172 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | NewMask,
1173 Known, TLO, Depth+1))
1174 return true;
1175 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1176
1177 Known.Zero |= ~InMask;
1178 break;
1179 }
1180 case ISD::BITCAST:
1181 // If this is an FP->Int bitcast and if the sign bit is the only
1182 // thing demanded, turn this into a FGETSIGN.
1183 if (!TLO.LegalOperations() && !VT.isVector() &&
1184 !Op.getOperand(0).getValueType().isVector() &&
1185 NewMask == APInt::getSignMask(Op.getValueSizeInBits()) &&
1186 Op.getOperand(0).getValueType().isFloatingPoint()) {
1187 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT);
1188 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1189 if ((OpVTLegal || i32Legal) && VT.isSimple() &&
1190 Op.getOperand(0).getValueType() != MVT::f16 &&
1191 Op.getOperand(0).getValueType() != MVT::f128) {
1192 // Cannot eliminate/lower SHL for f128 yet.
1193 EVT Ty = OpVTLegal ? VT : MVT::i32;
1194 // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1195 // place. We expect the SHL to be eliminated by other optimizations.
1196 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
1197 unsigned OpVTSizeInBits = Op.getValueSizeInBits();
1198 if (!OpVTLegal && OpVTSizeInBits > 32)
1199 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign);
1200 unsigned ShVal = Op.getValueSizeInBits() - 1;
1201 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT);
1202 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt));
1203 }
1204 }
1205 // If this is a bitcast, let computeKnownBits handle it. Only do this on a
1206 // recursive call where Known may be useful to the caller.
1207 if (Depth > 0) {
1208 TLO.DAG.computeKnownBits(Op, Known, Depth);
1209 return false;
1210 }
1211 break;
1212 case ISD::ADD:
1213 case ISD::MUL:
1214 case ISD::SUB: {
1215 // Add, Sub, and Mul don't demand any bits in positions beyond that
1216 // of the highest bit demanded of them.
1217 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
1218 unsigned NewMaskLZ = NewMask.countLeadingZeros();
1219 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - NewMaskLZ);
1220 if (SimplifyDemandedBits(Op0, LoMask, Known2, TLO, Depth + 1) ||
1221 SimplifyDemandedBits(Op1, LoMask, Known2, TLO, Depth + 1) ||
1222 // See if the operation should be performed at a smaller bit width.
1223 ShrinkDemandedOp(Op, BitWidth, NewMask, TLO)) {
1224 SDNodeFlags Flags = Op.getNode()->getFlags();
1225 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1226 // Disable the nsw and nuw flags. We can no longer guarantee that we
1227 // won't wrap after simplification.
1228 Flags.setNoSignedWrap(false);
1229 Flags.setNoUnsignedWrap(false);
1230 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1,
1231 Flags);
1232 return TLO.CombineTo(Op, NewOp);
1233 }
1234 return true;
1235 }
1236
1237 // If we have a constant operand, we may be able to turn it into -1 if we
1238 // do not demand the high bits. This can make the constant smaller to
1239 // encode, allow more general folding, or match specialized instruction
1240 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that
1241 // is probably not useful (and could be detrimental).
1242 ConstantSDNode *C = isConstOrConstSplat(Op1);
1243 APInt HighMask = APInt::getHighBitsSet(NewMask.getBitWidth(), NewMaskLZ);
1244 if (C && !C->isAllOnesValue() && !C->isOne() &&
1245 (C->getAPIntValue() | HighMask).isAllOnesValue()) {
1246 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT);
1247 // We can't guarantee that the new math op doesn't wrap, so explicitly
1248 // clear those flags to prevent folding with a potential existing node
1249 // that has those flags set.
1250 SDNodeFlags Flags;
1251 Flags.setNoSignedWrap(false);
1252 Flags.setNoUnsignedWrap(false);
1253 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags);
1254 return TLO.CombineTo(Op, NewOp);
1255 }
1256
1257 LLVM_FALLTHROUGH;
1258 }
1259 default:
1260 // Just use computeKnownBits to compute output bits.
1261 TLO.DAG.computeKnownBits(Op, Known, Depth);
1262 break;
1263 }
1264
1265 // If we know the value of all of the demanded bits, return this as a
1266 // constant.
1267 if (NewMask.isSubsetOf(Known.Zero|Known.One)) {
1268 // Avoid folding to a constant if any OpaqueConstant is involved.
1269 const SDNode *N = Op.getNode();
1270 for (SDNodeIterator I = SDNodeIterator::begin(N),
1271 E = SDNodeIterator::end(N); I != E; ++I) {
1272 SDNode *Op = *I;
1273 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
1274 if (C->isOpaque())
1275 return false;
1276 }
1277 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT));
1278 }
1279
1280 return false;
1281 }
1282
SimplifyDemandedVectorElts(SDValue Op,const APInt & DemandedElts,APInt & KnownUndef,APInt & KnownZero,DAGCombinerInfo & DCI) const1283 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op,
1284 const APInt &DemandedElts,
1285 APInt &KnownUndef,
1286 APInt &KnownZero,
1287 DAGCombinerInfo &DCI) const {
1288 SelectionDAG &DAG = DCI.DAG;
1289 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1290 !DCI.isBeforeLegalizeOps());
1291
1292 bool Simplified =
1293 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO);
1294 if (Simplified)
1295 DCI.CommitTargetLoweringOpt(TLO);
1296 return Simplified;
1297 }
1298
SimplifyDemandedVectorElts(SDValue Op,const APInt & DemandedEltMask,APInt & KnownUndef,APInt & KnownZero,TargetLoweringOpt & TLO,unsigned Depth,bool AssumeSingleUse) const1299 bool TargetLowering::SimplifyDemandedVectorElts(
1300 SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef,
1301 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth,
1302 bool AssumeSingleUse) const {
1303 EVT VT = Op.getValueType();
1304 APInt DemandedElts = DemandedEltMask;
1305 unsigned NumElts = DemandedElts.getBitWidth();
1306 assert(VT.isVector() && "Expected vector op");
1307 assert(VT.getVectorNumElements() == NumElts &&
1308 "Mask size mismatches value type element count!");
1309
1310 KnownUndef = KnownZero = APInt::getNullValue(NumElts);
1311
1312 // Undef operand.
1313 if (Op.isUndef()) {
1314 KnownUndef.setAllBits();
1315 return false;
1316 }
1317
1318 // If Op has other users, assume that all elements are needed.
1319 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse)
1320 DemandedElts.setAllBits();
1321
1322 // Not demanding any elements from Op.
1323 if (DemandedElts == 0) {
1324 KnownUndef.setAllBits();
1325 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
1326 }
1327
1328 // Limit search depth.
1329 if (Depth >= 6)
1330 return false;
1331
1332 SDLoc DL(Op);
1333 unsigned EltSizeInBits = VT.getScalarSizeInBits();
1334
1335 switch (Op.getOpcode()) {
1336 case ISD::SCALAR_TO_VECTOR: {
1337 if (!DemandedElts[0]) {
1338 KnownUndef.setAllBits();
1339 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
1340 }
1341 KnownUndef.setHighBits(NumElts - 1);
1342 break;
1343 }
1344 case ISD::BITCAST: {
1345 SDValue Src = Op.getOperand(0);
1346 EVT SrcVT = Src.getValueType();
1347
1348 // We only handle vectors here.
1349 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits?
1350 if (!SrcVT.isVector())
1351 break;
1352
1353 // Fast handling of 'identity' bitcasts.
1354 unsigned NumSrcElts = SrcVT.getVectorNumElements();
1355 if (NumSrcElts == NumElts)
1356 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
1357 KnownZero, TLO, Depth + 1);
1358
1359 APInt SrcZero, SrcUndef;
1360 APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts);
1361
1362 // Bitcast from 'large element' src vector to 'small element' vector, we
1363 // must demand a source element if any DemandedElt maps to it.
1364 if ((NumElts % NumSrcElts) == 0) {
1365 unsigned Scale = NumElts / NumSrcElts;
1366 for (unsigned i = 0; i != NumElts; ++i)
1367 if (DemandedElts[i])
1368 SrcDemandedElts.setBit(i / Scale);
1369
1370 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
1371 TLO, Depth + 1))
1372 return true;
1373
1374 // If the src element is zero/undef then all the output elements will be -
1375 // only demanded elements are guaranteed to be correct.
1376 for (unsigned i = 0; i != NumSrcElts; ++i) {
1377 if (SrcDemandedElts[i]) {
1378 if (SrcZero[i])
1379 KnownZero.setBits(i * Scale, (i + 1) * Scale);
1380 if (SrcUndef[i])
1381 KnownUndef.setBits(i * Scale, (i + 1) * Scale);
1382 }
1383 }
1384 }
1385
1386 // Bitcast from 'small element' src vector to 'large element' vector, we
1387 // demand all smaller source elements covered by the larger demanded element
1388 // of this vector.
1389 if ((NumSrcElts % NumElts) == 0) {
1390 unsigned Scale = NumSrcElts / NumElts;
1391 for (unsigned i = 0; i != NumElts; ++i)
1392 if (DemandedElts[i])
1393 SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale);
1394
1395 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
1396 TLO, Depth + 1))
1397 return true;
1398
1399 // If all the src elements covering an output element are zero/undef, then
1400 // the output element will be as well, assuming it was demanded.
1401 for (unsigned i = 0; i != NumElts; ++i) {
1402 if (DemandedElts[i]) {
1403 if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue())
1404 KnownZero.setBit(i);
1405 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue())
1406 KnownUndef.setBit(i);
1407 }
1408 }
1409 }
1410 break;
1411 }
1412 case ISD::BUILD_VECTOR: {
1413 // Check all elements and simplify any unused elements with UNDEF.
1414 if (!DemandedElts.isAllOnesValue()) {
1415 // Don't simplify BROADCASTS.
1416 if (llvm::any_of(Op->op_values(),
1417 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) {
1418 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end());
1419 bool Updated = false;
1420 for (unsigned i = 0; i != NumElts; ++i) {
1421 if (!DemandedElts[i] && !Ops[i].isUndef()) {
1422 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType());
1423 KnownUndef.setBit(i);
1424 Updated = true;
1425 }
1426 }
1427 if (Updated)
1428 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops));
1429 }
1430 }
1431 for (unsigned i = 0; i != NumElts; ++i) {
1432 SDValue SrcOp = Op.getOperand(i);
1433 if (SrcOp.isUndef()) {
1434 KnownUndef.setBit(i);
1435 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() &&
1436 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) {
1437 KnownZero.setBit(i);
1438 }
1439 }
1440 break;
1441 }
1442 case ISD::CONCAT_VECTORS: {
1443 EVT SubVT = Op.getOperand(0).getValueType();
1444 unsigned NumSubVecs = Op.getNumOperands();
1445 unsigned NumSubElts = SubVT.getVectorNumElements();
1446 for (unsigned i = 0; i != NumSubVecs; ++i) {
1447 SDValue SubOp = Op.getOperand(i);
1448 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts);
1449 APInt SubUndef, SubZero;
1450 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
1451 Depth + 1))
1452 return true;
1453 KnownUndef.insertBits(SubUndef, i * NumSubElts);
1454 KnownZero.insertBits(SubZero, i * NumSubElts);
1455 }
1456 break;
1457 }
1458 case ISD::INSERT_SUBVECTOR: {
1459 if (!isa<ConstantSDNode>(Op.getOperand(2)))
1460 break;
1461 SDValue Base = Op.getOperand(0);
1462 SDValue Sub = Op.getOperand(1);
1463 EVT SubVT = Sub.getValueType();
1464 unsigned NumSubElts = SubVT.getVectorNumElements();
1465 const APInt& Idx = cast<ConstantSDNode>(Op.getOperand(2))->getAPIntValue();
1466 if (Idx.uge(NumElts - NumSubElts))
1467 break;
1468 unsigned SubIdx = Idx.getZExtValue();
1469 APInt SubElts = DemandedElts.extractBits(NumSubElts, SubIdx);
1470 APInt SubUndef, SubZero;
1471 if (SimplifyDemandedVectorElts(Sub, SubElts, SubUndef, SubZero, TLO,
1472 Depth + 1))
1473 return true;
1474 APInt BaseElts = DemandedElts;
1475 BaseElts.insertBits(APInt::getNullValue(NumSubElts), SubIdx);
1476 if (SimplifyDemandedVectorElts(Base, BaseElts, KnownUndef, KnownZero, TLO,
1477 Depth + 1))
1478 return true;
1479 KnownUndef.insertBits(SubUndef, SubIdx);
1480 KnownZero.insertBits(SubZero, SubIdx);
1481 break;
1482 }
1483 case ISD::EXTRACT_SUBVECTOR: {
1484 if (!isa<ConstantSDNode>(Op.getOperand(1)))
1485 break;
1486 SDValue Src = Op.getOperand(0);
1487 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1488 const APInt& Idx = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue();
1489 if (Idx.uge(NumSrcElts - NumElts))
1490 break;
1491 // Offset the demanded elts by the subvector index.
1492 uint64_t SubIdx = Idx.getZExtValue();
1493 APInt SrcElts = DemandedElts.zext(NumSrcElts).shl(SubIdx);
1494 APInt SrcUndef, SrcZero;
1495 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
1496 Depth + 1))
1497 return true;
1498 KnownUndef = SrcUndef.extractBits(NumElts, SubIdx);
1499 KnownZero = SrcZero.extractBits(NumElts, SubIdx);
1500 break;
1501 }
1502 case ISD::INSERT_VECTOR_ELT: {
1503 SDValue Vec = Op.getOperand(0);
1504 SDValue Scl = Op.getOperand(1);
1505 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
1506
1507 // For a legal, constant insertion index, if we don't need this insertion
1508 // then strip it, else remove it from the demanded elts.
1509 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
1510 unsigned Idx = CIdx->getZExtValue();
1511 if (!DemandedElts[Idx])
1512 return TLO.CombineTo(Op, Vec);
1513 DemandedElts.clearBit(Idx);
1514
1515 if (SimplifyDemandedVectorElts(Vec, DemandedElts, KnownUndef,
1516 KnownZero, TLO, Depth + 1))
1517 return true;
1518
1519 KnownUndef.clearBit(Idx);
1520 if (Scl.isUndef())
1521 KnownUndef.setBit(Idx);
1522
1523 KnownZero.clearBit(Idx);
1524 if (isNullConstant(Scl) || isNullFPConstant(Scl))
1525 KnownZero.setBit(Idx);
1526 break;
1527 }
1528
1529 APInt VecUndef, VecZero;
1530 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
1531 Depth + 1))
1532 return true;
1533 // Without knowing the insertion index we can't set KnownUndef/KnownZero.
1534 break;
1535 }
1536 case ISD::VSELECT: {
1537 APInt DemandedLHS(DemandedElts);
1538 APInt DemandedRHS(DemandedElts);
1539
1540 // TODO - add support for constant vselect masks.
1541
1542 // See if we can simplify either vselect operand.
1543 APInt UndefLHS, ZeroLHS;
1544 APInt UndefRHS, ZeroRHS;
1545 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS,
1546 ZeroLHS, TLO, Depth + 1))
1547 return true;
1548 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS,
1549 ZeroRHS, TLO, Depth + 1))
1550 return true;
1551
1552 KnownUndef = UndefLHS & UndefRHS;
1553 KnownZero = ZeroLHS & ZeroRHS;
1554 break;
1555 }
1556 case ISD::VECTOR_SHUFFLE: {
1557 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
1558
1559 // Collect demanded elements from shuffle operands..
1560 APInt DemandedLHS(NumElts, 0);
1561 APInt DemandedRHS(NumElts, 0);
1562 for (unsigned i = 0; i != NumElts; ++i) {
1563 int M = ShuffleMask[i];
1564 if (M < 0 || !DemandedElts[i])
1565 continue;
1566 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
1567 if (M < (int)NumElts)
1568 DemandedLHS.setBit(M);
1569 else
1570 DemandedRHS.setBit(M - NumElts);
1571 }
1572
1573 // See if we can simplify either shuffle operand.
1574 APInt UndefLHS, ZeroLHS;
1575 APInt UndefRHS, ZeroRHS;
1576 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS,
1577 ZeroLHS, TLO, Depth + 1))
1578 return true;
1579 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS,
1580 ZeroRHS, TLO, Depth + 1))
1581 return true;
1582
1583 // Simplify mask using undef elements from LHS/RHS.
1584 bool Updated = false;
1585 bool IdentityLHS = true, IdentityRHS = true;
1586 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end());
1587 for (unsigned i = 0; i != NumElts; ++i) {
1588 int &M = NewMask[i];
1589 if (M < 0)
1590 continue;
1591 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) ||
1592 (M >= (int)NumElts && UndefRHS[M - NumElts])) {
1593 Updated = true;
1594 M = -1;
1595 }
1596 IdentityLHS &= (M < 0) || (M == (int)i);
1597 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
1598 }
1599
1600 // Update legal shuffle masks based on demanded elements if it won't reduce
1601 // to Identity which can cause premature removal of the shuffle mask.
1602 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps &&
1603 isShuffleMaskLegal(NewMask, VT))
1604 return TLO.CombineTo(Op,
1605 TLO.DAG.getVectorShuffle(VT, DL, Op.getOperand(0),
1606 Op.getOperand(1), NewMask));
1607
1608 // Propagate undef/zero elements from LHS/RHS.
1609 for (unsigned i = 0; i != NumElts; ++i) {
1610 int M = ShuffleMask[i];
1611 if (M < 0) {
1612 KnownUndef.setBit(i);
1613 } else if (M < (int)NumElts) {
1614 if (UndefLHS[M])
1615 KnownUndef.setBit(i);
1616 if (ZeroLHS[M])
1617 KnownZero.setBit(i);
1618 } else {
1619 if (UndefRHS[M - NumElts])
1620 KnownUndef.setBit(i);
1621 if (ZeroRHS[M - NumElts])
1622 KnownZero.setBit(i);
1623 }
1624 }
1625 break;
1626 }
1627 case ISD::ADD:
1628 case ISD::SUB: {
1629 APInt SrcUndef, SrcZero;
1630 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, SrcUndef,
1631 SrcZero, TLO, Depth + 1))
1632 return true;
1633 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
1634 KnownZero, TLO, Depth + 1))
1635 return true;
1636 KnownZero &= SrcZero;
1637 KnownUndef &= SrcUndef;
1638 break;
1639 }
1640 case ISD::TRUNCATE:
1641 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
1642 KnownZero, TLO, Depth + 1))
1643 return true;
1644 break;
1645 default: {
1646 if (Op.getOpcode() >= ISD::BUILTIN_OP_END)
1647 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef,
1648 KnownZero, TLO, Depth))
1649 return true;
1650 break;
1651 }
1652 }
1653
1654 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero");
1655 return false;
1656 }
1657
1658 /// Determine which of the bits specified in Mask are known to be either zero or
1659 /// one and return them in the Known.
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const1660 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1661 KnownBits &Known,
1662 const APInt &DemandedElts,
1663 const SelectionDAG &DAG,
1664 unsigned Depth) const {
1665 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1666 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1667 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1668 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1669 "Should use MaskedValueIsZero if you don't know whether Op"
1670 " is a target node!");
1671 Known.resetAll();
1672 }
1673
computeKnownBitsForFrameIndex(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const1674 void TargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
1675 KnownBits &Known,
1676 const APInt &DemandedElts,
1677 const SelectionDAG &DAG,
1678 unsigned Depth) const {
1679 assert(isa<FrameIndexSDNode>(Op) && "expected FrameIndex");
1680
1681 if (unsigned Align = DAG.InferPtrAlignment(Op)) {
1682 // The low bits are known zero if the pointer is aligned.
1683 Known.Zero.setLowBits(Log2_32(Align));
1684 }
1685 }
1686
1687 /// This method can be implemented by targets that want to expose additional
1688 /// information about sign bits to the DAG Combiner.
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt &,const SelectionDAG &,unsigned Depth) const1689 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
1690 const APInt &,
1691 const SelectionDAG &,
1692 unsigned Depth) const {
1693 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1694 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1695 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1696 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1697 "Should use ComputeNumSignBits if you don't know whether Op"
1698 " is a target node!");
1699 return 1;
1700 }
1701
SimplifyDemandedVectorEltsForTargetNode(SDValue Op,const APInt & DemandedElts,APInt & KnownUndef,APInt & KnownZero,TargetLoweringOpt & TLO,unsigned Depth) const1702 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
1703 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
1704 TargetLoweringOpt &TLO, unsigned Depth) const {
1705 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1706 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1707 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1708 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1709 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
1710 " is a target node!");
1711 return false;
1712 }
1713
1714 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must
1715 // work with truncating build vectors and vectors with elements of less than
1716 // 8 bits.
isConstTrueVal(const SDNode * N) const1717 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
1718 if (!N)
1719 return false;
1720
1721 APInt CVal;
1722 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
1723 CVal = CN->getAPIntValue();
1724 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) {
1725 auto *CN = BV->getConstantSplatNode();
1726 if (!CN)
1727 return false;
1728
1729 // If this is a truncating build vector, truncate the splat value.
1730 // Otherwise, we may fail to match the expected values below.
1731 unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits();
1732 CVal = CN->getAPIntValue();
1733 if (BVEltWidth < CVal.getBitWidth())
1734 CVal = CVal.trunc(BVEltWidth);
1735 } else {
1736 return false;
1737 }
1738
1739 switch (getBooleanContents(N->getValueType(0))) {
1740 case UndefinedBooleanContent:
1741 return CVal[0];
1742 case ZeroOrOneBooleanContent:
1743 return CVal.isOneValue();
1744 case ZeroOrNegativeOneBooleanContent:
1745 return CVal.isAllOnesValue();
1746 }
1747
1748 llvm_unreachable("Invalid boolean contents");
1749 }
1750
isConstFalseVal(const SDNode * N) const1751 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
1752 if (!N)
1753 return false;
1754
1755 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1756 if (!CN) {
1757 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1758 if (!BV)
1759 return false;
1760
1761 // Only interested in constant splats, we don't care about undef
1762 // elements in identifying boolean constants and getConstantSplatNode
1763 // returns NULL if all ops are undef;
1764 CN = BV->getConstantSplatNode();
1765 if (!CN)
1766 return false;
1767 }
1768
1769 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
1770 return !CN->getAPIntValue()[0];
1771
1772 return CN->isNullValue();
1773 }
1774
isExtendedTrueVal(const ConstantSDNode * N,EVT VT,bool SExt) const1775 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
1776 bool SExt) const {
1777 if (VT == MVT::i1)
1778 return N->isOne();
1779
1780 TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
1781 switch (Cnt) {
1782 case TargetLowering::ZeroOrOneBooleanContent:
1783 // An extended value of 1 is always true, unless its original type is i1,
1784 // in which case it will be sign extended to -1.
1785 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
1786 case TargetLowering::UndefinedBooleanContent:
1787 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1788 return N->isAllOnesValue() && SExt;
1789 }
1790 llvm_unreachable("Unexpected enumeration.");
1791 }
1792
1793 /// This helper function of SimplifySetCC tries to optimize the comparison when
1794 /// either operand of the SetCC node is a bitwise-and instruction.
simplifySetCCWithAnd(EVT VT,SDValue N0,SDValue N1,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL) const1795 SDValue TargetLowering::simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
1796 ISD::CondCode Cond,
1797 DAGCombinerInfo &DCI,
1798 const SDLoc &DL) const {
1799 // Match these patterns in any of their permutations:
1800 // (X & Y) == Y
1801 // (X & Y) != Y
1802 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
1803 std::swap(N0, N1);
1804
1805 EVT OpVT = N0.getValueType();
1806 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
1807 (Cond != ISD::SETEQ && Cond != ISD::SETNE))
1808 return SDValue();
1809
1810 SDValue X, Y;
1811 if (N0.getOperand(0) == N1) {
1812 X = N0.getOperand(1);
1813 Y = N0.getOperand(0);
1814 } else if (N0.getOperand(1) == N1) {
1815 X = N0.getOperand(0);
1816 Y = N0.getOperand(1);
1817 } else {
1818 return SDValue();
1819 }
1820
1821 SelectionDAG &DAG = DCI.DAG;
1822 SDValue Zero = DAG.getConstant(0, DL, OpVT);
1823 if (DAG.isKnownToBeAPowerOfTwo(Y)) {
1824 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
1825 // Note that where Y is variable and is known to have at most one bit set
1826 // (for example, if it is Z & 1) we cannot do this; the expressions are not
1827 // equivalent when Y == 0.
1828 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
1829 if (DCI.isBeforeLegalizeOps() ||
1830 isCondCodeLegal(Cond, N0.getSimpleValueType()))
1831 return DAG.getSetCC(DL, VT, N0, Zero, Cond);
1832 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
1833 // If the target supports an 'and-not' or 'and-complement' logic operation,
1834 // try to use that to make a comparison operation more efficient.
1835 // But don't do this transform if the mask is a single bit because there are
1836 // more efficient ways to deal with that case (for example, 'bt' on x86 or
1837 // 'rlwinm' on PPC).
1838
1839 // Bail out if the compare operand that we want to turn into a zero is
1840 // already a zero (otherwise, infinite loop).
1841 auto *YConst = dyn_cast<ConstantSDNode>(Y);
1842 if (YConst && YConst->isNullValue())
1843 return SDValue();
1844
1845 // Transform this into: ~X & Y == 0.
1846 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
1847 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
1848 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
1849 }
1850
1851 return SDValue();
1852 }
1853
1854 /// There are multiple IR patterns that could be checking whether certain
1855 /// truncation of a signed number would be lossy or not. The pattern which is
1856 /// best at IR level, may not lower optimally. Thus, we want to unfold it.
1857 /// We are looking for the following pattern: (KeptBits is a constant)
1858 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
1859 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false.
1860 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0
1861 /// We will unfold it into the natural trunc+sext pattern:
1862 /// ((%x << C) a>> C) dstcond %x
1863 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x)
optimizeSetCCOfSignedTruncationCheck(EVT SCCVT,SDValue N0,SDValue N1,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL) const1864 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
1865 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI,
1866 const SDLoc &DL) const {
1867 // We must be comparing with a constant.
1868 ConstantSDNode *C1;
1869 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
1870 return SDValue();
1871
1872 // N0 should be: add %x, (1 << (KeptBits-1))
1873 if (N0->getOpcode() != ISD::ADD)
1874 return SDValue();
1875
1876 // And we must be 'add'ing a constant.
1877 ConstantSDNode *C01;
1878 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1))))
1879 return SDValue();
1880
1881 SDValue X = N0->getOperand(0);
1882 EVT XVT = X.getValueType();
1883
1884 // Validate constants ...
1885
1886 APInt I1 = C1->getAPIntValue();
1887
1888 ISD::CondCode NewCond;
1889 if (Cond == ISD::CondCode::SETULT) {
1890 NewCond = ISD::CondCode::SETEQ;
1891 } else if (Cond == ISD::CondCode::SETULE) {
1892 NewCond = ISD::CondCode::SETEQ;
1893 // But need to 'canonicalize' the constant.
1894 I1 += 1;
1895 } else if (Cond == ISD::CondCode::SETUGT) {
1896 NewCond = ISD::CondCode::SETNE;
1897 // But need to 'canonicalize' the constant.
1898 I1 += 1;
1899 } else if (Cond == ISD::CondCode::SETUGE) {
1900 NewCond = ISD::CondCode::SETNE;
1901 } else
1902 return SDValue();
1903
1904 const APInt &I01 = C01->getAPIntValue();
1905 // Both of them must be power-of-two, and the constant from setcc is bigger.
1906 if (!(I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2()))
1907 return SDValue();
1908
1909 // They are power-of-two, so which bit is set?
1910 const unsigned KeptBits = I1.logBase2();
1911 const unsigned KeptBitsMinusOne = I01.logBase2();
1912
1913 // Magic!
1914 if (KeptBits != (KeptBitsMinusOne + 1))
1915 return SDValue();
1916 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable");
1917
1918 // We don't want to do this in every single case.
1919 SelectionDAG &DAG = DCI.DAG;
1920 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck(
1921 XVT, KeptBits))
1922 return SDValue();
1923
1924 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits;
1925 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable");
1926
1927 // Unfold into: ((%x << C) a>> C) cond %x
1928 // Where 'cond' will be either 'eq' or 'ne'.
1929 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT);
1930 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt);
1931 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt);
1932 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond);
1933
1934 return T2;
1935 }
1936
1937 /// Try to simplify a setcc built with the specified operands and cc. If it is
1938 /// unable to simplify it, return a null SDValue.
SimplifySetCC(EVT VT,SDValue N0,SDValue N1,ISD::CondCode Cond,bool foldBooleans,DAGCombinerInfo & DCI,const SDLoc & dl) const1939 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1940 ISD::CondCode Cond, bool foldBooleans,
1941 DAGCombinerInfo &DCI,
1942 const SDLoc &dl) const {
1943 SelectionDAG &DAG = DCI.DAG;
1944 EVT OpVT = N0.getValueType();
1945
1946 // These setcc operations always fold.
1947 switch (Cond) {
1948 default: break;
1949 case ISD::SETFALSE:
1950 case ISD::SETFALSE2: return DAG.getBoolConstant(false, dl, VT, OpVT);
1951 case ISD::SETTRUE:
1952 case ISD::SETTRUE2: return DAG.getBoolConstant(true, dl, VT, OpVT);
1953 }
1954
1955 // Ensure that the constant occurs on the RHS and fold constant comparisons.
1956 // TODO: Handle non-splat vector constants. All undef causes trouble.
1957 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
1958 if (isConstOrConstSplat(N0) &&
1959 (DCI.isBeforeLegalizeOps() ||
1960 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
1961 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
1962
1963 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1964 const APInt &C1 = N1C->getAPIntValue();
1965
1966 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1967 // equality comparison, then we're just comparing whether X itself is
1968 // zero.
1969 if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) &&
1970 N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1971 N0.getOperand(1).getOpcode() == ISD::Constant) {
1972 const APInt &ShAmt
1973 = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1974 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1975 ShAmt == Log2_32(N0.getValueSizeInBits())) {
1976 if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1977 // (srl (ctlz x), 5) == 0 -> X != 0
1978 // (srl (ctlz x), 5) != 1 -> X != 0
1979 Cond = ISD::SETNE;
1980 } else {
1981 // (srl (ctlz x), 5) != 0 -> X == 0
1982 // (srl (ctlz x), 5) == 1 -> X == 0
1983 Cond = ISD::SETEQ;
1984 }
1985 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
1986 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
1987 Zero, Cond);
1988 }
1989 }
1990
1991 SDValue CTPOP = N0;
1992 // Look through truncs that don't change the value of a ctpop.
1993 if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
1994 CTPOP = N0.getOperand(0);
1995
1996 if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
1997 (N0 == CTPOP ||
1998 N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) {
1999 EVT CTVT = CTPOP.getValueType();
2000 SDValue CTOp = CTPOP.getOperand(0);
2001
2002 // (ctpop x) u< 2 -> (x & x-1) == 0
2003 // (ctpop x) u> 1 -> (x & x-1) != 0
2004 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
2005 SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
2006 DAG.getConstant(1, dl, CTVT));
2007 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
2008 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
2009 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
2010 }
2011
2012 // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
2013 }
2014
2015 // (zext x) == C --> x == (trunc C)
2016 // (sext x) == C --> x == (trunc C)
2017 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2018 DCI.isBeforeLegalize() && N0->hasOneUse()) {
2019 unsigned MinBits = N0.getValueSizeInBits();
2020 SDValue PreExt;
2021 bool Signed = false;
2022 if (N0->getOpcode() == ISD::ZERO_EXTEND) {
2023 // ZExt
2024 MinBits = N0->getOperand(0).getValueSizeInBits();
2025 PreExt = N0->getOperand(0);
2026 } else if (N0->getOpcode() == ISD::AND) {
2027 // DAGCombine turns costly ZExts into ANDs
2028 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
2029 if ((C->getAPIntValue()+1).isPowerOf2()) {
2030 MinBits = C->getAPIntValue().countTrailingOnes();
2031 PreExt = N0->getOperand(0);
2032 }
2033 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
2034 // SExt
2035 MinBits = N0->getOperand(0).getValueSizeInBits();
2036 PreExt = N0->getOperand(0);
2037 Signed = true;
2038 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
2039 // ZEXTLOAD / SEXTLOAD
2040 if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
2041 MinBits = LN0->getMemoryVT().getSizeInBits();
2042 PreExt = N0;
2043 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
2044 Signed = true;
2045 MinBits = LN0->getMemoryVT().getSizeInBits();
2046 PreExt = N0;
2047 }
2048 }
2049
2050 // Figure out how many bits we need to preserve this constant.
2051 unsigned ReqdBits = Signed ?
2052 C1.getBitWidth() - C1.getNumSignBits() + 1 :
2053 C1.getActiveBits();
2054
2055 // Make sure we're not losing bits from the constant.
2056 if (MinBits > 0 &&
2057 MinBits < C1.getBitWidth() &&
2058 MinBits >= ReqdBits) {
2059 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
2060 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
2061 // Will get folded away.
2062 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
2063 if (MinBits == 1 && C1 == 1)
2064 // Invert the condition.
2065 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1),
2066 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
2067 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
2068 return DAG.getSetCC(dl, VT, Trunc, C, Cond);
2069 }
2070
2071 // If truncating the setcc operands is not desirable, we can still
2072 // simplify the expression in some cases:
2073 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
2074 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
2075 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
2076 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
2077 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
2078 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
2079 SDValue TopSetCC = N0->getOperand(0);
2080 unsigned N0Opc = N0->getOpcode();
2081 bool SExt = (N0Opc == ISD::SIGN_EXTEND);
2082 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
2083 TopSetCC.getOpcode() == ISD::SETCC &&
2084 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
2085 (isConstFalseVal(N1C) ||
2086 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
2087
2088 bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
2089 (!N1C->isNullValue() && Cond == ISD::SETNE);
2090
2091 if (!Inverse)
2092 return TopSetCC;
2093
2094 ISD::CondCode InvCond = ISD::getSetCCInverse(
2095 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
2096 TopSetCC.getOperand(0).getValueType().isInteger());
2097 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
2098 TopSetCC.getOperand(1),
2099 InvCond);
2100 }
2101 }
2102 }
2103
2104 // If the LHS is '(and load, const)', the RHS is 0, the test is for
2105 // equality or unsigned, and all 1 bits of the const are in the same
2106 // partial word, see if we can shorten the load.
2107 if (DCI.isBeforeLegalize() &&
2108 !ISD::isSignedIntSetCC(Cond) &&
2109 N0.getOpcode() == ISD::AND && C1 == 0 &&
2110 N0.getNode()->hasOneUse() &&
2111 isa<LoadSDNode>(N0.getOperand(0)) &&
2112 N0.getOperand(0).getNode()->hasOneUse() &&
2113 isa<ConstantSDNode>(N0.getOperand(1))) {
2114 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
2115 APInt bestMask;
2116 unsigned bestWidth = 0, bestOffset = 0;
2117 if (!Lod->isVolatile() && Lod->isUnindexed()) {
2118 unsigned origWidth = N0.getValueSizeInBits();
2119 unsigned maskWidth = origWidth;
2120 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
2121 // 8 bits, but have to be careful...
2122 if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
2123 origWidth = Lod->getMemoryVT().getSizeInBits();
2124 const APInt &Mask =
2125 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
2126 for (unsigned width = origWidth / 2; width>=8; width /= 2) {
2127 APInt newMask = APInt::getLowBitsSet(maskWidth, width);
2128 for (unsigned offset=0; offset<origWidth/width; offset++) {
2129 if (Mask.isSubsetOf(newMask)) {
2130 if (DAG.getDataLayout().isLittleEndian())
2131 bestOffset = (uint64_t)offset * (width/8);
2132 else
2133 bestOffset = (origWidth/width - offset - 1) * (width/8);
2134 bestMask = Mask.lshr(offset * (width/8) * 8);
2135 bestWidth = width;
2136 break;
2137 }
2138 newMask <<= width;
2139 }
2140 }
2141 }
2142 if (bestWidth) {
2143 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
2144 if (newVT.isRound()) {
2145 EVT PtrType = Lod->getOperand(1).getValueType();
2146 SDValue Ptr = Lod->getBasePtr();
2147 if (bestOffset != 0)
2148 Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
2149 DAG.getConstant(bestOffset, dl, PtrType));
2150 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
2151 SDValue NewLoad = DAG.getLoad(
2152 newVT, dl, Lod->getChain(), Ptr,
2153 Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign);
2154 return DAG.getSetCC(dl, VT,
2155 DAG.getNode(ISD::AND, dl, newVT, NewLoad,
2156 DAG.getConstant(bestMask.trunc(bestWidth),
2157 dl, newVT)),
2158 DAG.getConstant(0LL, dl, newVT), Cond);
2159 }
2160 }
2161 }
2162
2163 // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
2164 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
2165 unsigned InSize = N0.getOperand(0).getValueSizeInBits();
2166
2167 // If the comparison constant has bits in the upper part, the
2168 // zero-extended value could never match.
2169 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
2170 C1.getBitWidth() - InSize))) {
2171 switch (Cond) {
2172 case ISD::SETUGT:
2173 case ISD::SETUGE:
2174 case ISD::SETEQ:
2175 return DAG.getConstant(0, dl, VT);
2176 case ISD::SETULT:
2177 case ISD::SETULE:
2178 case ISD::SETNE:
2179 return DAG.getConstant(1, dl, VT);
2180 case ISD::SETGT:
2181 case ISD::SETGE:
2182 // True if the sign bit of C1 is set.
2183 return DAG.getConstant(C1.isNegative(), dl, VT);
2184 case ISD::SETLT:
2185 case ISD::SETLE:
2186 // True if the sign bit of C1 isn't set.
2187 return DAG.getConstant(C1.isNonNegative(), dl, VT);
2188 default:
2189 break;
2190 }
2191 }
2192
2193 // Otherwise, we can perform the comparison with the low bits.
2194 switch (Cond) {
2195 case ISD::SETEQ:
2196 case ISD::SETNE:
2197 case ISD::SETUGT:
2198 case ISD::SETUGE:
2199 case ISD::SETULT:
2200 case ISD::SETULE: {
2201 EVT newVT = N0.getOperand(0).getValueType();
2202 if (DCI.isBeforeLegalizeOps() ||
2203 (isOperationLegal(ISD::SETCC, newVT) &&
2204 isCondCodeLegal(Cond, newVT.getSimpleVT()))) {
2205 EVT NewSetCCVT =
2206 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), newVT);
2207 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
2208
2209 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
2210 NewConst, Cond);
2211 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
2212 }
2213 break;
2214 }
2215 default:
2216 break; // todo, be more careful with signed comparisons
2217 }
2218 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
2219 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
2220 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
2221 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
2222 EVT ExtDstTy = N0.getValueType();
2223 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
2224
2225 // If the constant doesn't fit into the number of bits for the source of
2226 // the sign extension, it is impossible for both sides to be equal.
2227 if (C1.getMinSignedBits() > ExtSrcTyBits)
2228 return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
2229
2230 SDValue ZextOp;
2231 EVT Op0Ty = N0.getOperand(0).getValueType();
2232 if (Op0Ty == ExtSrcTy) {
2233 ZextOp = N0.getOperand(0);
2234 } else {
2235 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
2236 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
2237 DAG.getConstant(Imm, dl, Op0Ty));
2238 }
2239 if (!DCI.isCalledByLegalizer())
2240 DCI.AddToWorklist(ZextOp.getNode());
2241 // Otherwise, make this a use of a zext.
2242 return DAG.getSetCC(dl, VT, ZextOp,
2243 DAG.getConstant(C1 & APInt::getLowBitsSet(
2244 ExtDstTyBits,
2245 ExtSrcTyBits),
2246 dl, ExtDstTy),
2247 Cond);
2248 } else if ((N1C->isNullValue() || N1C->isOne()) &&
2249 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
2250 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
2251 if (N0.getOpcode() == ISD::SETCC &&
2252 isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
2253 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne());
2254 if (TrueWhenTrue)
2255 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
2256 // Invert the condition.
2257 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
2258 CC = ISD::getSetCCInverse(CC,
2259 N0.getOperand(0).getValueType().isInteger());
2260 if (DCI.isBeforeLegalizeOps() ||
2261 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
2262 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
2263 }
2264
2265 if ((N0.getOpcode() == ISD::XOR ||
2266 (N0.getOpcode() == ISD::AND &&
2267 N0.getOperand(0).getOpcode() == ISD::XOR &&
2268 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
2269 isa<ConstantSDNode>(N0.getOperand(1)) &&
2270 cast<ConstantSDNode>(N0.getOperand(1))->isOne()) {
2271 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
2272 // can only do this if the top bits are known zero.
2273 unsigned BitWidth = N0.getValueSizeInBits();
2274 if (DAG.MaskedValueIsZero(N0,
2275 APInt::getHighBitsSet(BitWidth,
2276 BitWidth-1))) {
2277 // Okay, get the un-inverted input value.
2278 SDValue Val;
2279 if (N0.getOpcode() == ISD::XOR) {
2280 Val = N0.getOperand(0);
2281 } else {
2282 assert(N0.getOpcode() == ISD::AND &&
2283 N0.getOperand(0).getOpcode() == ISD::XOR);
2284 // ((X^1)&1)^1 -> X & 1
2285 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
2286 N0.getOperand(0).getOperand(0),
2287 N0.getOperand(1));
2288 }
2289
2290 return DAG.getSetCC(dl, VT, Val, N1,
2291 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
2292 }
2293 } else if (N1C->isOne() &&
2294 (VT == MVT::i1 ||
2295 getBooleanContents(N0->getValueType(0)) ==
2296 ZeroOrOneBooleanContent)) {
2297 SDValue Op0 = N0;
2298 if (Op0.getOpcode() == ISD::TRUNCATE)
2299 Op0 = Op0.getOperand(0);
2300
2301 if ((Op0.getOpcode() == ISD::XOR) &&
2302 Op0.getOperand(0).getOpcode() == ISD::SETCC &&
2303 Op0.getOperand(1).getOpcode() == ISD::SETCC) {
2304 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
2305 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
2306 return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1),
2307 Cond);
2308 }
2309 if (Op0.getOpcode() == ISD::AND &&
2310 isa<ConstantSDNode>(Op0.getOperand(1)) &&
2311 cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) {
2312 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
2313 if (Op0.getValueType().bitsGT(VT))
2314 Op0 = DAG.getNode(ISD::AND, dl, VT,
2315 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
2316 DAG.getConstant(1, dl, VT));
2317 else if (Op0.getValueType().bitsLT(VT))
2318 Op0 = DAG.getNode(ISD::AND, dl, VT,
2319 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
2320 DAG.getConstant(1, dl, VT));
2321
2322 return DAG.getSetCC(dl, VT, Op0,
2323 DAG.getConstant(0, dl, Op0.getValueType()),
2324 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
2325 }
2326 if (Op0.getOpcode() == ISD::AssertZext &&
2327 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
2328 return DAG.getSetCC(dl, VT, Op0,
2329 DAG.getConstant(0, dl, Op0.getValueType()),
2330 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
2331 }
2332 }
2333
2334 if (SDValue V =
2335 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl))
2336 return V;
2337 }
2338
2339 // These simplifications apply to splat vectors as well.
2340 // TODO: Handle more splat vector cases.
2341 if (auto *N1C = isConstOrConstSplat(N1)) {
2342 const APInt &C1 = N1C->getAPIntValue();
2343
2344 APInt MinVal, MaxVal;
2345 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits();
2346 if (ISD::isSignedIntSetCC(Cond)) {
2347 MinVal = APInt::getSignedMinValue(OperandBitSize);
2348 MaxVal = APInt::getSignedMaxValue(OperandBitSize);
2349 } else {
2350 MinVal = APInt::getMinValue(OperandBitSize);
2351 MaxVal = APInt::getMaxValue(OperandBitSize);
2352 }
2353
2354 // Canonicalize GE/LE comparisons to use GT/LT comparisons.
2355 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
2356 // X >= MIN --> true
2357 if (C1 == MinVal)
2358 return DAG.getBoolConstant(true, dl, VT, OpVT);
2359
2360 if (!VT.isVector()) { // TODO: Support this for vectors.
2361 // X >= C0 --> X > (C0 - 1)
2362 APInt C = C1 - 1;
2363 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
2364 if ((DCI.isBeforeLegalizeOps() ||
2365 isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
2366 (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
2367 isLegalICmpImmediate(C.getSExtValue())))) {
2368 return DAG.getSetCC(dl, VT, N0,
2369 DAG.getConstant(C, dl, N1.getValueType()),
2370 NewCC);
2371 }
2372 }
2373 }
2374
2375 if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
2376 // X <= MAX --> true
2377 if (C1 == MaxVal)
2378 return DAG.getBoolConstant(true, dl, VT, OpVT);
2379
2380 // X <= C0 --> X < (C0 + 1)
2381 if (!VT.isVector()) { // TODO: Support this for vectors.
2382 APInt C = C1 + 1;
2383 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
2384 if ((DCI.isBeforeLegalizeOps() ||
2385 isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
2386 (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
2387 isLegalICmpImmediate(C.getSExtValue())))) {
2388 return DAG.getSetCC(dl, VT, N0,
2389 DAG.getConstant(C, dl, N1.getValueType()),
2390 NewCC);
2391 }
2392 }
2393 }
2394
2395 if (Cond == ISD::SETLT || Cond == ISD::SETULT) {
2396 if (C1 == MinVal)
2397 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false
2398
2399 // TODO: Support this for vectors after legalize ops.
2400 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
2401 // Canonicalize setlt X, Max --> setne X, Max
2402 if (C1 == MaxVal)
2403 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
2404
2405 // If we have setult X, 1, turn it into seteq X, 0
2406 if (C1 == MinVal+1)
2407 return DAG.getSetCC(dl, VT, N0,
2408 DAG.getConstant(MinVal, dl, N0.getValueType()),
2409 ISD::SETEQ);
2410 }
2411 }
2412
2413 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) {
2414 if (C1 == MaxVal)
2415 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false
2416
2417 // TODO: Support this for vectors after legalize ops.
2418 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
2419 // Canonicalize setgt X, Min --> setne X, Min
2420 if (C1 == MinVal)
2421 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
2422
2423 // If we have setugt X, Max-1, turn it into seteq X, Max
2424 if (C1 == MaxVal-1)
2425 return DAG.getSetCC(dl, VT, N0,
2426 DAG.getConstant(MaxVal, dl, N0.getValueType()),
2427 ISD::SETEQ);
2428 }
2429 }
2430
2431 // If we have "setcc X, C0", check to see if we can shrink the immediate
2432 // by changing cc.
2433 // TODO: Support this for vectors after legalize ops.
2434 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
2435 // SETUGT X, SINTMAX -> SETLT X, 0
2436 if (Cond == ISD::SETUGT &&
2437 C1 == APInt::getSignedMaxValue(OperandBitSize))
2438 return DAG.getSetCC(dl, VT, N0,
2439 DAG.getConstant(0, dl, N1.getValueType()),
2440 ISD::SETLT);
2441
2442 // SETULT X, SINTMIN -> SETGT X, -1
2443 if (Cond == ISD::SETULT &&
2444 C1 == APInt::getSignedMinValue(OperandBitSize)) {
2445 SDValue ConstMinusOne =
2446 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
2447 N1.getValueType());
2448 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
2449 }
2450 }
2451 }
2452
2453 // Back to non-vector simplifications.
2454 // TODO: Can we do these for vector splats?
2455 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
2456 const APInt &C1 = N1C->getAPIntValue();
2457
2458 // Fold bit comparisons when we can.
2459 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2460 (VT == N0.getValueType() ||
2461 (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
2462 N0.getOpcode() == ISD::AND) {
2463 auto &DL = DAG.getDataLayout();
2464 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2465 EVT ShiftTy = getShiftAmountTy(N0.getValueType(), DL,
2466 !DCI.isBeforeLegalize());
2467 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
2468 // Perform the xform if the AND RHS is a single bit.
2469 if (AndRHS->getAPIntValue().isPowerOf2()) {
2470 return DAG.getNode(ISD::TRUNCATE, dl, VT,
2471 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
2472 DAG.getConstant(AndRHS->getAPIntValue().logBase2(), dl,
2473 ShiftTy)));
2474 }
2475 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
2476 // (X & 8) == 8 --> (X & 8) >> 3
2477 // Perform the xform if C1 is a single bit.
2478 if (C1.isPowerOf2()) {
2479 return DAG.getNode(ISD::TRUNCATE, dl, VT,
2480 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
2481 DAG.getConstant(C1.logBase2(), dl,
2482 ShiftTy)));
2483 }
2484 }
2485 }
2486 }
2487
2488 if (C1.getMinSignedBits() <= 64 &&
2489 !isLegalICmpImmediate(C1.getSExtValue())) {
2490 // (X & -256) == 256 -> (X >> 8) == 1
2491 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2492 N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
2493 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2494 const APInt &AndRHSC = AndRHS->getAPIntValue();
2495 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
2496 unsigned ShiftBits = AndRHSC.countTrailingZeros();
2497 auto &DL = DAG.getDataLayout();
2498 EVT ShiftTy = getShiftAmountTy(N0.getValueType(), DL,
2499 !DCI.isBeforeLegalize());
2500 EVT CmpTy = N0.getValueType();
2501 SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
2502 DAG.getConstant(ShiftBits, dl,
2503 ShiftTy));
2504 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, CmpTy);
2505 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
2506 }
2507 }
2508 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
2509 Cond == ISD::SETULE || Cond == ISD::SETUGT) {
2510 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
2511 // X < 0x100000000 -> (X >> 32) < 1
2512 // X >= 0x100000000 -> (X >> 32) >= 1
2513 // X <= 0x0ffffffff -> (X >> 32) < 1
2514 // X > 0x0ffffffff -> (X >> 32) >= 1
2515 unsigned ShiftBits;
2516 APInt NewC = C1;
2517 ISD::CondCode NewCond = Cond;
2518 if (AdjOne) {
2519 ShiftBits = C1.countTrailingOnes();
2520 NewC = NewC + 1;
2521 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
2522 } else {
2523 ShiftBits = C1.countTrailingZeros();
2524 }
2525 NewC.lshrInPlace(ShiftBits);
2526 if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
2527 isLegalICmpImmediate(NewC.getSExtValue())) {
2528 auto &DL = DAG.getDataLayout();
2529 EVT ShiftTy = getShiftAmountTy(N0.getValueType(), DL,
2530 !DCI.isBeforeLegalize());
2531 EVT CmpTy = N0.getValueType();
2532 SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
2533 DAG.getConstant(ShiftBits, dl, ShiftTy));
2534 SDValue CmpRHS = DAG.getConstant(NewC, dl, CmpTy);
2535 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
2536 }
2537 }
2538 }
2539 }
2540
2541 if (isa<ConstantFPSDNode>(N0.getNode())) {
2542 // Constant fold or commute setcc.
2543 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
2544 if (O.getNode()) return O;
2545 } else if (auto *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
2546 // If the RHS of an FP comparison is a constant, simplify it away in
2547 // some cases.
2548 if (CFP->getValueAPF().isNaN()) {
2549 // If an operand is known to be a nan, we can fold it.
2550 switch (ISD::getUnorderedFlavor(Cond)) {
2551 default: llvm_unreachable("Unknown flavor!");
2552 case 0: // Known false.
2553 return DAG.getBoolConstant(false, dl, VT, OpVT);
2554 case 1: // Known true.
2555 return DAG.getBoolConstant(true, dl, VT, OpVT);
2556 case 2: // Undefined.
2557 return DAG.getUNDEF(VT);
2558 }
2559 }
2560
2561 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
2562 // constant if knowing that the operand is non-nan is enough. We prefer to
2563 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
2564 // materialize 0.0.
2565 if (Cond == ISD::SETO || Cond == ISD::SETUO)
2566 return DAG.getSetCC(dl, VT, N0, N0, Cond);
2567
2568 // setcc (fneg x), C -> setcc swap(pred) x, -C
2569 if (N0.getOpcode() == ISD::FNEG) {
2570 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond);
2571 if (DCI.isBeforeLegalizeOps() ||
2572 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) {
2573 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1);
2574 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond);
2575 }
2576 }
2577
2578 // If the condition is not legal, see if we can find an equivalent one
2579 // which is legal.
2580 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
2581 // If the comparison was an awkward floating-point == or != and one of
2582 // the comparison operands is infinity or negative infinity, convert the
2583 // condition to a less-awkward <= or >=.
2584 if (CFP->getValueAPF().isInfinity()) {
2585 if (CFP->getValueAPF().isNegative()) {
2586 if (Cond == ISD::SETOEQ &&
2587 isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
2588 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
2589 if (Cond == ISD::SETUEQ &&
2590 isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
2591 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
2592 if (Cond == ISD::SETUNE &&
2593 isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
2594 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
2595 if (Cond == ISD::SETONE &&
2596 isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
2597 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
2598 } else {
2599 if (Cond == ISD::SETOEQ &&
2600 isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
2601 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
2602 if (Cond == ISD::SETUEQ &&
2603 isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
2604 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
2605 if (Cond == ISD::SETUNE &&
2606 isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
2607 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
2608 if (Cond == ISD::SETONE &&
2609 isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
2610 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
2611 }
2612 }
2613 }
2614 }
2615
2616 if (N0 == N1) {
2617 // The sext(setcc()) => setcc() optimization relies on the appropriate
2618 // constant being emitted.
2619
2620 bool EqTrue = ISD::isTrueWhenEqual(Cond);
2621
2622 // We can always fold X == X for integer setcc's.
2623 if (N0.getValueType().isInteger())
2624 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
2625
2626 unsigned UOF = ISD::getUnorderedFlavor(Cond);
2627 if (UOF == 2) // FP operators that are undefined on NaNs.
2628 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
2629 if (UOF == unsigned(EqTrue))
2630 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
2631 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
2632 // if it is not already.
2633 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
2634 if (NewCond != Cond &&
2635 (DCI.isBeforeLegalizeOps() ||
2636 isCondCodeLegal(NewCond, N0.getSimpleValueType())))
2637 return DAG.getSetCC(dl, VT, N0, N1, NewCond);
2638 }
2639
2640 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2641 N0.getValueType().isInteger()) {
2642 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
2643 N0.getOpcode() == ISD::XOR) {
2644 // Simplify (X+Y) == (X+Z) --> Y == Z
2645 if (N0.getOpcode() == N1.getOpcode()) {
2646 if (N0.getOperand(0) == N1.getOperand(0))
2647 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
2648 if (N0.getOperand(1) == N1.getOperand(1))
2649 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
2650 if (isCommutativeBinOp(N0.getOpcode())) {
2651 // If X op Y == Y op X, try other combinations.
2652 if (N0.getOperand(0) == N1.getOperand(1))
2653 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
2654 Cond);
2655 if (N0.getOperand(1) == N1.getOperand(0))
2656 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
2657 Cond);
2658 }
2659 }
2660
2661 // If RHS is a legal immediate value for a compare instruction, we need
2662 // to be careful about increasing register pressure needlessly.
2663 bool LegalRHSImm = false;
2664
2665 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
2666 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2667 // Turn (X+C1) == C2 --> X == C2-C1
2668 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
2669 return DAG.getSetCC(dl, VT, N0.getOperand(0),
2670 DAG.getConstant(RHSC->getAPIntValue()-
2671 LHSR->getAPIntValue(),
2672 dl, N0.getValueType()), Cond);
2673 }
2674
2675 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
2676 if (N0.getOpcode() == ISD::XOR)
2677 // If we know that all of the inverted bits are zero, don't bother
2678 // performing the inversion.
2679 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
2680 return
2681 DAG.getSetCC(dl, VT, N0.getOperand(0),
2682 DAG.getConstant(LHSR->getAPIntValue() ^
2683 RHSC->getAPIntValue(),
2684 dl, N0.getValueType()),
2685 Cond);
2686 }
2687
2688 // Turn (C1-X) == C2 --> X == C1-C2
2689 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
2690 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
2691 return
2692 DAG.getSetCC(dl, VT, N0.getOperand(1),
2693 DAG.getConstant(SUBC->getAPIntValue() -
2694 RHSC->getAPIntValue(),
2695 dl, N0.getValueType()),
2696 Cond);
2697 }
2698 }
2699
2700 // Could RHSC fold directly into a compare?
2701 if (RHSC->getValueType(0).getSizeInBits() <= 64)
2702 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
2703 }
2704
2705 // Simplify (X+Z) == X --> Z == 0
2706 // Don't do this if X is an immediate that can fold into a cmp
2707 // instruction and X+Z has other uses. It could be an induction variable
2708 // chain, and the transform would increase register pressure.
2709 if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
2710 if (N0.getOperand(0) == N1)
2711 return DAG.getSetCC(dl, VT, N0.getOperand(1),
2712 DAG.getConstant(0, dl, N0.getValueType()), Cond);
2713 if (N0.getOperand(1) == N1) {
2714 if (isCommutativeBinOp(N0.getOpcode()))
2715 return DAG.getSetCC(dl, VT, N0.getOperand(0),
2716 DAG.getConstant(0, dl, N0.getValueType()),
2717 Cond);
2718 if (N0.getNode()->hasOneUse()) {
2719 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
2720 auto &DL = DAG.getDataLayout();
2721 // (Z-X) == X --> Z == X<<1
2722 SDValue SH = DAG.getNode(
2723 ISD::SHL, dl, N1.getValueType(), N1,
2724 DAG.getConstant(1, dl,
2725 getShiftAmountTy(N1.getValueType(), DL,
2726 !DCI.isBeforeLegalize())));
2727 if (!DCI.isCalledByLegalizer())
2728 DCI.AddToWorklist(SH.getNode());
2729 return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
2730 }
2731 }
2732 }
2733 }
2734
2735 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
2736 N1.getOpcode() == ISD::XOR) {
2737 // Simplify X == (X+Z) --> Z == 0
2738 if (N1.getOperand(0) == N0)
2739 return DAG.getSetCC(dl, VT, N1.getOperand(1),
2740 DAG.getConstant(0, dl, N1.getValueType()), Cond);
2741 if (N1.getOperand(1) == N0) {
2742 if (isCommutativeBinOp(N1.getOpcode()))
2743 return DAG.getSetCC(dl, VT, N1.getOperand(0),
2744 DAG.getConstant(0, dl, N1.getValueType()), Cond);
2745 if (N1.getNode()->hasOneUse()) {
2746 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
2747 auto &DL = DAG.getDataLayout();
2748 // X == (Z-X) --> X<<1 == Z
2749 SDValue SH = DAG.getNode(
2750 ISD::SHL, dl, N1.getValueType(), N0,
2751 DAG.getConstant(1, dl, getShiftAmountTy(N0.getValueType(), DL,
2752 !DCI.isBeforeLegalize())));
2753 if (!DCI.isCalledByLegalizer())
2754 DCI.AddToWorklist(SH.getNode());
2755 return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
2756 }
2757 }
2758 }
2759
2760 if (SDValue V = simplifySetCCWithAnd(VT, N0, N1, Cond, DCI, dl))
2761 return V;
2762 }
2763
2764 // Fold away ALL boolean setcc's.
2765 SDValue Temp;
2766 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) {
2767 EVT OpVT = N0.getValueType();
2768 switch (Cond) {
2769 default: llvm_unreachable("Unknown integer setcc!");
2770 case ISD::SETEQ: // X == Y -> ~(X^Y)
2771 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
2772 N0 = DAG.getNOT(dl, Temp, OpVT);
2773 if (!DCI.isCalledByLegalizer())
2774 DCI.AddToWorklist(Temp.getNode());
2775 break;
2776 case ISD::SETNE: // X != Y --> (X^Y)
2777 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
2778 break;
2779 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y
2780 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y
2781 Temp = DAG.getNOT(dl, N0, OpVT);
2782 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp);
2783 if (!DCI.isCalledByLegalizer())
2784 DCI.AddToWorklist(Temp.getNode());
2785 break;
2786 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X
2787 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X
2788 Temp = DAG.getNOT(dl, N1, OpVT);
2789 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp);
2790 if (!DCI.isCalledByLegalizer())
2791 DCI.AddToWorklist(Temp.getNode());
2792 break;
2793 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
2794 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
2795 Temp = DAG.getNOT(dl, N0, OpVT);
2796 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp);
2797 if (!DCI.isCalledByLegalizer())
2798 DCI.AddToWorklist(Temp.getNode());
2799 break;
2800 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
2801 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
2802 Temp = DAG.getNOT(dl, N1, OpVT);
2803 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp);
2804 break;
2805 }
2806 if (VT.getScalarType() != MVT::i1) {
2807 if (!DCI.isCalledByLegalizer())
2808 DCI.AddToWorklist(N0.getNode());
2809 // FIXME: If running after legalize, we probably can't do this.
2810 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT));
2811 N0 = DAG.getNode(ExtendCode, dl, VT, N0);
2812 }
2813 return N0;
2814 }
2815
2816 // Could not fold it.
2817 return SDValue();
2818 }
2819
2820 /// Returns true (and the GlobalValue and the offset) if the node is a
2821 /// GlobalAddress + offset.
isGAPlusOffset(SDNode * N,const GlobalValue * & GA,int64_t & Offset) const2822 bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
2823 int64_t &Offset) const {
2824 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
2825 GA = GASD->getGlobal();
2826 Offset += GASD->getOffset();
2827 return true;
2828 }
2829
2830 if (N->getOpcode() == ISD::ADD) {
2831 SDValue N1 = N->getOperand(0);
2832 SDValue N2 = N->getOperand(1);
2833 if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
2834 if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
2835 Offset += V->getSExtValue();
2836 return true;
2837 }
2838 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
2839 if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
2840 Offset += V->getSExtValue();
2841 return true;
2842 }
2843 }
2844 }
2845
2846 return false;
2847 }
2848
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const2849 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
2850 DAGCombinerInfo &DCI) const {
2851 // Default implementation: no optimization.
2852 return SDValue();
2853 }
2854
2855 //===----------------------------------------------------------------------===//
2856 // Inline Assembler Implementation Methods
2857 //===----------------------------------------------------------------------===//
2858
2859 TargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const2860 TargetLowering::getConstraintType(StringRef Constraint) const {
2861 unsigned S = Constraint.size();
2862
2863 if (S == 1) {
2864 switch (Constraint[0]) {
2865 default: break;
2866 case 'r': return C_RegisterClass;
2867 case 'm': // memory
2868 case 'o': // offsetable
2869 case 'V': // not offsetable
2870 return C_Memory;
2871 case 'i': // Simple Integer or Relocatable Constant
2872 case 'n': // Simple Integer
2873 case 'E': // Floating Point Constant
2874 case 'F': // Floating Point Constant
2875 case 's': // Relocatable Constant
2876 case 'p': // Address.
2877 case 'X': // Allow ANY value.
2878 case 'I': // Target registers.
2879 case 'J':
2880 case 'K':
2881 case 'L':
2882 case 'M':
2883 case 'N':
2884 case 'O':
2885 case 'P':
2886 case '<':
2887 case '>':
2888 return C_Other;
2889 }
2890 }
2891
2892 if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
2893 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
2894 return C_Memory;
2895 return C_Register;
2896 }
2897 return C_Unknown;
2898 }
2899
2900 /// Try to replace an X constraint, which matches anything, with another that
2901 /// has more specific requirements based on the type of the corresponding
2902 /// operand.
LowerXConstraint(EVT ConstraintVT) const2903 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
2904 if (ConstraintVT.isInteger())
2905 return "r";
2906 if (ConstraintVT.isFloatingPoint())
2907 return "f"; // works for many targets
2908 return nullptr;
2909 }
2910
2911 /// Lower the specified operand into the Ops vector.
2912 /// If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const2913 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2914 std::string &Constraint,
2915 std::vector<SDValue> &Ops,
2916 SelectionDAG &DAG) const {
2917
2918 if (Constraint.length() > 1) return;
2919
2920 char ConstraintLetter = Constraint[0];
2921 switch (ConstraintLetter) {
2922 default: break;
2923 case 'X': // Allows any operand; labels (basic block) use this.
2924 if (Op.getOpcode() == ISD::BasicBlock) {
2925 Ops.push_back(Op);
2926 return;
2927 }
2928 LLVM_FALLTHROUGH;
2929 case 'i': // Simple Integer or Relocatable Constant
2930 case 'n': // Simple Integer
2931 case 's': { // Relocatable Constant
2932 // These operands are interested in values of the form (GV+C), where C may
2933 // be folded in as an offset of GV, or it may be explicitly added. Also, it
2934 // is possible and fine if either GV or C are missing.
2935 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2936 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2937
2938 // If we have "(add GV, C)", pull out GV/C
2939 if (Op.getOpcode() == ISD::ADD) {
2940 C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2941 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
2942 if (!C || !GA) {
2943 C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
2944 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
2945 }
2946 if (!C || !GA) {
2947 C = nullptr;
2948 GA = nullptr;
2949 }
2950 }
2951
2952 // If we find a valid operand, map to the TargetXXX version so that the
2953 // value itself doesn't get selected.
2954 if (GA) { // Either &GV or &GV+C
2955 if (ConstraintLetter != 'n') {
2956 int64_t Offs = GA->getOffset();
2957 if (C) Offs += C->getZExtValue();
2958 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
2959 C ? SDLoc(C) : SDLoc(),
2960 Op.getValueType(), Offs));
2961 }
2962 return;
2963 }
2964 if (C) { // just C, no GV.
2965 // Simple constants are not allowed for 's'.
2966 if (ConstraintLetter != 's') {
2967 // gcc prints these as sign extended. Sign extend value to 64 bits
2968 // now; without this it would get ZExt'd later in
2969 // ScheduleDAGSDNodes::EmitNode, which is very generic.
2970 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
2971 SDLoc(C), MVT::i64));
2972 }
2973 return;
2974 }
2975 break;
2976 }
2977 }
2978 }
2979
2980 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * RI,StringRef Constraint,MVT VT) const2981 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
2982 StringRef Constraint,
2983 MVT VT) const {
2984 if (Constraint.empty() || Constraint[0] != '{')
2985 return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
2986 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
2987
2988 // Remove the braces from around the name.
2989 StringRef RegName(Constraint.data()+1, Constraint.size()-2);
2990
2991 std::pair<unsigned, const TargetRegisterClass*> R =
2992 std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
2993
2994 // Figure out which register class contains this reg.
2995 for (const TargetRegisterClass *RC : RI->regclasses()) {
2996 // If none of the value types for this register class are valid, we
2997 // can't use it. For example, 64-bit reg classes on 32-bit targets.
2998 if (!isLegalRC(*RI, *RC))
2999 continue;
3000
3001 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
3002 I != E; ++I) {
3003 if (RegName.equals_lower(RI->getRegAsmName(*I))) {
3004 std::pair<unsigned, const TargetRegisterClass*> S =
3005 std::make_pair(*I, RC);
3006
3007 // If this register class has the requested value type, return it,
3008 // otherwise keep searching and return the first class found
3009 // if no other is found which explicitly has the requested type.
3010 if (RI->isTypeLegalForClass(*RC, VT))
3011 return S;
3012 if (!R.second)
3013 R = S;
3014 }
3015 }
3016 }
3017
3018 return R;
3019 }
3020
3021 //===----------------------------------------------------------------------===//
3022 // Constraint Selection.
3023
3024 /// Return true of this is an input operand that is a matching constraint like
3025 /// "4".
isMatchingInputConstraint() const3026 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
3027 assert(!ConstraintCode.empty() && "No known constraint!");
3028 return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
3029 }
3030
3031 /// If this is an input matching constraint, this method returns the output
3032 /// operand it matches.
getMatchedOperand() const3033 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
3034 assert(!ConstraintCode.empty() && "No known constraint!");
3035 return atoi(ConstraintCode.c_str());
3036 }
3037
3038 /// Split up the constraint string from the inline assembly value into the
3039 /// specific constraints and their prefixes, and also tie in the associated
3040 /// operand values.
3041 /// If this returns an empty vector, and if the constraint string itself
3042 /// isn't empty, there was an error parsing.
3043 TargetLowering::AsmOperandInfoVector
ParseConstraints(const DataLayout & DL,const TargetRegisterInfo * TRI,ImmutableCallSite CS) const3044 TargetLowering::ParseConstraints(const DataLayout &DL,
3045 const TargetRegisterInfo *TRI,
3046 ImmutableCallSite CS) const {
3047 /// Information about all of the constraints.
3048 AsmOperandInfoVector ConstraintOperands;
3049 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
3050 unsigned maCount = 0; // Largest number of multiple alternative constraints.
3051
3052 // Do a prepass over the constraints, canonicalizing them, and building up the
3053 // ConstraintOperands list.
3054 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
3055 unsigned ResNo = 0; // ResNo - The result number of the next output.
3056
3057 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
3058 ConstraintOperands.emplace_back(std::move(CI));
3059 AsmOperandInfo &OpInfo = ConstraintOperands.back();
3060
3061 // Update multiple alternative constraint count.
3062 if (OpInfo.multipleAlternatives.size() > maCount)
3063 maCount = OpInfo.multipleAlternatives.size();
3064
3065 OpInfo.ConstraintVT = MVT::Other;
3066
3067 // Compute the value type for each operand.
3068 switch (OpInfo.Type) {
3069 case InlineAsm::isOutput:
3070 // Indirect outputs just consume an argument.
3071 if (OpInfo.isIndirect) {
3072 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
3073 break;
3074 }
3075
3076 // The return value of the call is this value. As such, there is no
3077 // corresponding argument.
3078 assert(!CS.getType()->isVoidTy() &&
3079 "Bad inline asm!");
3080 if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
3081 OpInfo.ConstraintVT =
3082 getSimpleValueType(DL, STy->getElementType(ResNo));
3083 } else {
3084 assert(ResNo == 0 && "Asm only has one result!");
3085 OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
3086 }
3087 ++ResNo;
3088 break;
3089 case InlineAsm::isInput:
3090 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
3091 break;
3092 case InlineAsm::isClobber:
3093 // Nothing to do.
3094 break;
3095 }
3096
3097 if (OpInfo.CallOperandVal) {
3098 llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
3099 if (OpInfo.isIndirect) {
3100 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
3101 if (!PtrTy)
3102 report_fatal_error("Indirect operand for inline asm not a pointer!");
3103 OpTy = PtrTy->getElementType();
3104 }
3105
3106 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
3107 if (StructType *STy = dyn_cast<StructType>(OpTy))
3108 if (STy->getNumElements() == 1)
3109 OpTy = STy->getElementType(0);
3110
3111 // If OpTy is not a single value, it may be a struct/union that we
3112 // can tile with integers.
3113 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
3114 unsigned BitSize = DL.getTypeSizeInBits(OpTy);
3115 switch (BitSize) {
3116 default: break;
3117 case 1:
3118 case 8:
3119 case 16:
3120 case 32:
3121 case 64:
3122 case 128:
3123 OpInfo.ConstraintVT =
3124 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
3125 break;
3126 }
3127 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
3128 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
3129 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
3130 } else {
3131 OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
3132 }
3133 }
3134 }
3135
3136 // If we have multiple alternative constraints, select the best alternative.
3137 if (!ConstraintOperands.empty()) {
3138 if (maCount) {
3139 unsigned bestMAIndex = 0;
3140 int bestWeight = -1;
3141 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match.
3142 int weight = -1;
3143 unsigned maIndex;
3144 // Compute the sums of the weights for each alternative, keeping track
3145 // of the best (highest weight) one so far.
3146 for (maIndex = 0; maIndex < maCount; ++maIndex) {
3147 int weightSum = 0;
3148 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
3149 cIndex != eIndex; ++cIndex) {
3150 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
3151 if (OpInfo.Type == InlineAsm::isClobber)
3152 continue;
3153
3154 // If this is an output operand with a matching input operand,
3155 // look up the matching input. If their types mismatch, e.g. one
3156 // is an integer, the other is floating point, or their sizes are
3157 // different, flag it as an maCantMatch.
3158 if (OpInfo.hasMatchingInput()) {
3159 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
3160 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
3161 if ((OpInfo.ConstraintVT.isInteger() !=
3162 Input.ConstraintVT.isInteger()) ||
3163 (OpInfo.ConstraintVT.getSizeInBits() !=
3164 Input.ConstraintVT.getSizeInBits())) {
3165 weightSum = -1; // Can't match.
3166 break;
3167 }
3168 }
3169 }
3170 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
3171 if (weight == -1) {
3172 weightSum = -1;
3173 break;
3174 }
3175 weightSum += weight;
3176 }
3177 // Update best.
3178 if (weightSum > bestWeight) {
3179 bestWeight = weightSum;
3180 bestMAIndex = maIndex;
3181 }
3182 }
3183
3184 // Now select chosen alternative in each constraint.
3185 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
3186 cIndex != eIndex; ++cIndex) {
3187 AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
3188 if (cInfo.Type == InlineAsm::isClobber)
3189 continue;
3190 cInfo.selectAlternative(bestMAIndex);
3191 }
3192 }
3193 }
3194
3195 // Check and hook up tied operands, choose constraint code to use.
3196 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
3197 cIndex != eIndex; ++cIndex) {
3198 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
3199
3200 // If this is an output operand with a matching input operand, look up the
3201 // matching input. If their types mismatch, e.g. one is an integer, the
3202 // other is floating point, or their sizes are different, flag it as an
3203 // error.
3204 if (OpInfo.hasMatchingInput()) {
3205 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
3206
3207 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
3208 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
3209 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
3210 OpInfo.ConstraintVT);
3211 std::pair<unsigned, const TargetRegisterClass *> InputRC =
3212 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
3213 Input.ConstraintVT);
3214 if ((OpInfo.ConstraintVT.isInteger() !=
3215 Input.ConstraintVT.isInteger()) ||
3216 (MatchRC.second != InputRC.second)) {
3217 report_fatal_error("Unsupported asm: input constraint"
3218 " with a matching output constraint of"
3219 " incompatible type!");
3220 }
3221 }
3222 }
3223 }
3224
3225 return ConstraintOperands;
3226 }
3227
3228 /// Return an integer indicating how general CT is.
getConstraintGenerality(TargetLowering::ConstraintType CT)3229 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
3230 switch (CT) {
3231 case TargetLowering::C_Other:
3232 case TargetLowering::C_Unknown:
3233 return 0;
3234 case TargetLowering::C_Register:
3235 return 1;
3236 case TargetLowering::C_RegisterClass:
3237 return 2;
3238 case TargetLowering::C_Memory:
3239 return 3;
3240 }
3241 llvm_unreachable("Invalid constraint type");
3242 }
3243
3244 /// Examine constraint type and operand type and determine a weight value.
3245 /// This object must already have been set up with the operand type
3246 /// and the current alternative constraint selected.
3247 TargetLowering::ConstraintWeight
getMultipleConstraintMatchWeight(AsmOperandInfo & info,int maIndex) const3248 TargetLowering::getMultipleConstraintMatchWeight(
3249 AsmOperandInfo &info, int maIndex) const {
3250 InlineAsm::ConstraintCodeVector *rCodes;
3251 if (maIndex >= (int)info.multipleAlternatives.size())
3252 rCodes = &info.Codes;
3253 else
3254 rCodes = &info.multipleAlternatives[maIndex].Codes;
3255 ConstraintWeight BestWeight = CW_Invalid;
3256
3257 // Loop over the options, keeping track of the most general one.
3258 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
3259 ConstraintWeight weight =
3260 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
3261 if (weight > BestWeight)
3262 BestWeight = weight;
3263 }
3264
3265 return BestWeight;
3266 }
3267
3268 /// Examine constraint type and operand type and determine a weight value.
3269 /// This object must already have been set up with the operand type
3270 /// and the current alternative constraint selected.
3271 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const3272 TargetLowering::getSingleConstraintMatchWeight(
3273 AsmOperandInfo &info, const char *constraint) const {
3274 ConstraintWeight weight = CW_Invalid;
3275 Value *CallOperandVal = info.CallOperandVal;
3276 // If we don't have a value, we can't do a match,
3277 // but allow it at the lowest weight.
3278 if (!CallOperandVal)
3279 return CW_Default;
3280 // Look at the constraint type.
3281 switch (*constraint) {
3282 case 'i': // immediate integer.
3283 case 'n': // immediate integer with a known value.
3284 if (isa<ConstantInt>(CallOperandVal))
3285 weight = CW_Constant;
3286 break;
3287 case 's': // non-explicit intregal immediate.
3288 if (isa<GlobalValue>(CallOperandVal))
3289 weight = CW_Constant;
3290 break;
3291 case 'E': // immediate float if host format.
3292 case 'F': // immediate float.
3293 if (isa<ConstantFP>(CallOperandVal))
3294 weight = CW_Constant;
3295 break;
3296 case '<': // memory operand with autodecrement.
3297 case '>': // memory operand with autoincrement.
3298 case 'm': // memory operand.
3299 case 'o': // offsettable memory operand
3300 case 'V': // non-offsettable memory operand
3301 weight = CW_Memory;
3302 break;
3303 case 'r': // general register.
3304 case 'g': // general register, memory operand or immediate integer.
3305 // note: Clang converts "g" to "imr".
3306 if (CallOperandVal->getType()->isIntegerTy())
3307 weight = CW_Register;
3308 break;
3309 case 'X': // any operand.
3310 default:
3311 weight = CW_Default;
3312 break;
3313 }
3314 return weight;
3315 }
3316
3317 /// If there are multiple different constraints that we could pick for this
3318 /// operand (e.g. "imr") try to pick the 'best' one.
3319 /// This is somewhat tricky: constraints fall into four classes:
3320 /// Other -> immediates and magic values
3321 /// Register -> one specific register
3322 /// RegisterClass -> a group of regs
3323 /// Memory -> memory
3324 /// Ideally, we would pick the most specific constraint possible: if we have
3325 /// something that fits into a register, we would pick it. The problem here
3326 /// is that if we have something that could either be in a register or in
3327 /// memory that use of the register could cause selection of *other*
3328 /// operands to fail: they might only succeed if we pick memory. Because of
3329 /// this the heuristic we use is:
3330 ///
3331 /// 1) If there is an 'other' constraint, and if the operand is valid for
3332 /// that constraint, use it. This makes us take advantage of 'i'
3333 /// constraints when available.
3334 /// 2) Otherwise, pick the most general constraint present. This prefers
3335 /// 'm' over 'r', for example.
3336 ///
ChooseConstraint(TargetLowering::AsmOperandInfo & OpInfo,const TargetLowering & TLI,SDValue Op,SelectionDAG * DAG)3337 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
3338 const TargetLowering &TLI,
3339 SDValue Op, SelectionDAG *DAG) {
3340 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
3341 unsigned BestIdx = 0;
3342 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
3343 int BestGenerality = -1;
3344
3345 // Loop over the options, keeping track of the most general one.
3346 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
3347 TargetLowering::ConstraintType CType =
3348 TLI.getConstraintType(OpInfo.Codes[i]);
3349
3350 // If this is an 'other' constraint, see if the operand is valid for it.
3351 // For example, on X86 we might have an 'rI' constraint. If the operand
3352 // is an integer in the range [0..31] we want to use I (saving a load
3353 // of a register), otherwise we must use 'r'.
3354 if (CType == TargetLowering::C_Other && Op.getNode()) {
3355 assert(OpInfo.Codes[i].size() == 1 &&
3356 "Unhandled multi-letter 'other' constraint");
3357 std::vector<SDValue> ResultOps;
3358 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
3359 ResultOps, *DAG);
3360 if (!ResultOps.empty()) {
3361 BestType = CType;
3362 BestIdx = i;
3363 break;
3364 }
3365 }
3366
3367 // Things with matching constraints can only be registers, per gcc
3368 // documentation. This mainly affects "g" constraints.
3369 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
3370 continue;
3371
3372 // This constraint letter is more general than the previous one, use it.
3373 int Generality = getConstraintGenerality(CType);
3374 if (Generality > BestGenerality) {
3375 BestType = CType;
3376 BestIdx = i;
3377 BestGenerality = Generality;
3378 }
3379 }
3380
3381 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
3382 OpInfo.ConstraintType = BestType;
3383 }
3384
3385 /// Determines the constraint code and constraint type to use for the specific
3386 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
ComputeConstraintToUse(AsmOperandInfo & OpInfo,SDValue Op,SelectionDAG * DAG) const3387 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
3388 SDValue Op,
3389 SelectionDAG *DAG) const {
3390 assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
3391
3392 // Single-letter constraints ('r') are very common.
3393 if (OpInfo.Codes.size() == 1) {
3394 OpInfo.ConstraintCode = OpInfo.Codes[0];
3395 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
3396 } else {
3397 ChooseConstraint(OpInfo, *this, Op, DAG);
3398 }
3399
3400 // 'X' matches anything.
3401 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
3402 // Labels and constants are handled elsewhere ('X' is the only thing
3403 // that matches labels). For Functions, the type here is the type of
3404 // the result, which is not what we want to look at; leave them alone.
3405 Value *v = OpInfo.CallOperandVal;
3406 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
3407 OpInfo.CallOperandVal = v;
3408 return;
3409 }
3410
3411 // Otherwise, try to resolve it to something we know about by looking at
3412 // the actual operand type.
3413 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
3414 OpInfo.ConstraintCode = Repl;
3415 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
3416 }
3417 }
3418 }
3419
3420 /// Given an exact SDIV by a constant, create a multiplication
3421 /// with the multiplicative inverse of the constant.
BuildExactSDIV(const TargetLowering & TLI,SDValue Op1,APInt d,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created)3422 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
3423 const SDLoc &dl, SelectionDAG &DAG,
3424 SmallVectorImpl<SDNode *> &Created) {
3425 assert(d != 0 && "Division by zero!");
3426
3427 // Shift the value upfront if it is even, so the LSB is one.
3428 unsigned ShAmt = d.countTrailingZeros();
3429 if (ShAmt) {
3430 // TODO: For UDIV use SRL instead of SRA.
3431 SDValue Amt =
3432 DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(),
3433 DAG.getDataLayout()));
3434 SDNodeFlags Flags;
3435 Flags.setExact(true);
3436 Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, Flags);
3437 Created.push_back(Op1.getNode());
3438 d.ashrInPlace(ShAmt);
3439 }
3440
3441 // Calculate the multiplicative inverse, using Newton's method.
3442 APInt t, xn = d;
3443 while ((t = d*xn) != 1)
3444 xn *= APInt(d.getBitWidth(), 2) - t;
3445
3446 SDValue Op2 = DAG.getConstant(xn, dl, Op1.getValueType());
3447 SDValue Mul = DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2);
3448 Created.push_back(Mul.getNode());
3449 return Mul;
3450 }
3451
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created) const3452 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
3453 SelectionDAG &DAG,
3454 SmallVectorImpl<SDNode *> &Created) const {
3455 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
3456 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3457 if (TLI.isIntDivCheap(N->getValueType(0), Attr))
3458 return SDValue(N,0); // Lower SDIV as SDIV
3459 return SDValue();
3460 }
3461
3462 /// Given an ISD::SDIV node expressing a divide by constant,
3463 /// return a DAG expression to select that will generate the same value by
3464 /// multiplying by a magic number.
3465 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
BuildSDIV(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,bool IsAfterLegalization,SmallVectorImpl<SDNode * > & Created) const3466 SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
3467 SelectionDAG &DAG, bool IsAfterLegalization,
3468 SmallVectorImpl<SDNode *> &Created) const {
3469 EVT VT = N->getValueType(0);
3470 SDLoc dl(N);
3471
3472 // Check to see if we can do this.
3473 // FIXME: We should be more aggressive here.
3474 if (!isTypeLegal(VT))
3475 return SDValue();
3476
3477 // If the sdiv has an 'exact' bit we can use a simpler lowering.
3478 if (N->getFlags().hasExact())
3479 return BuildExactSDIV(*this, N->getOperand(0), Divisor, dl, DAG, Created);
3480
3481 APInt::ms magics = Divisor.magic();
3482
3483 // Multiply the numerator (operand 0) by the magic value
3484 // FIXME: We should support doing a MUL in a wider type
3485 SDValue Q;
3486 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
3487 isOperationLegalOrCustom(ISD::MULHS, VT))
3488 Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
3489 DAG.getConstant(magics.m, dl, VT));
3490 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
3491 isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
3492 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
3493 N->getOperand(0),
3494 DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
3495 else
3496 return SDValue(); // No mulhs or equvialent
3497
3498 Created.push_back(Q.getNode());
3499
3500 // If d > 0 and m < 0, add the numerator
3501 if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
3502 Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
3503 Created.push_back(Q.getNode());
3504 }
3505 // If d < 0 and m > 0, subtract the numerator.
3506 if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
3507 Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
3508 Created.push_back(Q.getNode());
3509 }
3510 auto &DL = DAG.getDataLayout();
3511 // Shift right algebraic if shift value is nonzero
3512 if (magics.s > 0) {
3513 Q = DAG.getNode(
3514 ISD::SRA, dl, VT, Q,
3515 DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
3516 Created.push_back(Q.getNode());
3517 }
3518 // Extract the sign bit and add it to the quotient
3519 SDValue T =
3520 DAG.getNode(ISD::SRL, dl, VT, Q,
3521 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
3522 getShiftAmountTy(Q.getValueType(), DL)));
3523 Created.push_back(T.getNode());
3524 return DAG.getNode(ISD::ADD, dl, VT, Q, T);
3525 }
3526
3527 /// Given an ISD::UDIV node expressing a divide by constant,
3528 /// return a DAG expression to select that will generate the same value by
3529 /// multiplying by a magic number.
3530 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
BuildUDIV(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,bool IsAfterLegalization,SmallVectorImpl<SDNode * > & Created) const3531 SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
3532 SelectionDAG &DAG, bool IsAfterLegalization,
3533 SmallVectorImpl<SDNode *> &Created) const {
3534 EVT VT = N->getValueType(0);
3535 SDLoc dl(N);
3536 auto &DL = DAG.getDataLayout();
3537
3538 // Check to see if we can do this.
3539 // FIXME: We should be more aggressive here.
3540 if (!isTypeLegal(VT))
3541 return SDValue();
3542
3543 // FIXME: We should use a narrower constant when the upper
3544 // bits are known to be zero.
3545 APInt::mu magics = Divisor.magicu();
3546
3547 SDValue Q = N->getOperand(0);
3548
3549 // If the divisor is even, we can avoid using the expensive fixup by shifting
3550 // the divided value upfront.
3551 if (magics.a != 0 && !Divisor[0]) {
3552 unsigned Shift = Divisor.countTrailingZeros();
3553 Q = DAG.getNode(
3554 ISD::SRL, dl, VT, Q,
3555 DAG.getConstant(Shift, dl, getShiftAmountTy(Q.getValueType(), DL)));
3556 Created.push_back(Q.getNode());
3557
3558 // Get magic number for the shifted divisor.
3559 magics = Divisor.lshr(Shift).magicu(Shift);
3560 assert(magics.a == 0 && "Should use cheap fixup now");
3561 }
3562
3563 // Multiply the numerator (operand 0) by the magic value
3564 // FIXME: We should support doing a MUL in a wider type
3565 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
3566 isOperationLegalOrCustom(ISD::MULHU, VT))
3567 Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, dl, VT));
3568 else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
3569 isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
3570 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
3571 DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
3572 else
3573 return SDValue(); // No mulhu or equivalent
3574
3575 Created.push_back(Q.getNode());
3576
3577 if (magics.a == 0) {
3578 assert(magics.s < Divisor.getBitWidth() &&
3579 "We shouldn't generate an undefined shift!");
3580 return DAG.getNode(
3581 ISD::SRL, dl, VT, Q,
3582 DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
3583 } else {
3584 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
3585 Created.push_back(NPQ.getNode());
3586 NPQ = DAG.getNode(
3587 ISD::SRL, dl, VT, NPQ,
3588 DAG.getConstant(1, dl, getShiftAmountTy(NPQ.getValueType(), DL)));
3589 Created.push_back(NPQ.getNode());
3590 NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
3591 Created.push_back(NPQ.getNode());
3592 return DAG.getNode(
3593 ISD::SRL, dl, VT, NPQ,
3594 DAG.getConstant(magics.s - 1, dl,
3595 getShiftAmountTy(NPQ.getValueType(), DL)));
3596 }
3597 }
3598
3599 bool TargetLowering::
verifyReturnAddressArgumentIsConstant(SDValue Op,SelectionDAG & DAG) const3600 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
3601 if (!isa<ConstantSDNode>(Op.getOperand(0))) {
3602 DAG.getContext()->emitError("argument to '__builtin_return_address' must "
3603 "be a constant integer");
3604 return true;
3605 }
3606
3607 return false;
3608 }
3609
3610 //===----------------------------------------------------------------------===//
3611 // Legalization Utilities
3612 //===----------------------------------------------------------------------===//
3613
expandMUL_LOHI(unsigned Opcode,EVT VT,SDLoc dl,SDValue LHS,SDValue RHS,SmallVectorImpl<SDValue> & Result,EVT HiLoVT,SelectionDAG & DAG,MulExpansionKind Kind,SDValue LL,SDValue LH,SDValue RL,SDValue RH) const3614 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl,
3615 SDValue LHS, SDValue RHS,
3616 SmallVectorImpl<SDValue> &Result,
3617 EVT HiLoVT, SelectionDAG &DAG,
3618 MulExpansionKind Kind, SDValue LL,
3619 SDValue LH, SDValue RL, SDValue RH) const {
3620 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI ||
3621 Opcode == ISD::SMUL_LOHI);
3622
3623 bool HasMULHS = (Kind == MulExpansionKind::Always) ||
3624 isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
3625 bool HasMULHU = (Kind == MulExpansionKind::Always) ||
3626 isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
3627 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) ||
3628 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
3629 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) ||
3630 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
3631
3632 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
3633 return false;
3634
3635 unsigned OuterBitSize = VT.getScalarSizeInBits();
3636 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits();
3637 unsigned LHSSB = DAG.ComputeNumSignBits(LHS);
3638 unsigned RHSSB = DAG.ComputeNumSignBits(RHS);
3639
3640 // LL, LH, RL, and RH must be either all NULL or all set to a value.
3641 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
3642 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
3643
3644 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT);
3645 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi,
3646 bool Signed) -> bool {
3647 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) {
3648 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R);
3649 Hi = SDValue(Lo.getNode(), 1);
3650 return true;
3651 }
3652 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) {
3653 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R);
3654 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R);
3655 return true;
3656 }
3657 return false;
3658 };
3659
3660 SDValue Lo, Hi;
3661
3662 if (!LL.getNode() && !RL.getNode() &&
3663 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3664 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS);
3665 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS);
3666 }
3667
3668 if (!LL.getNode())
3669 return false;
3670
3671 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
3672 if (DAG.MaskedValueIsZero(LHS, HighMask) &&
3673 DAG.MaskedValueIsZero(RHS, HighMask)) {
3674 // The inputs are both zero-extended.
3675 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) {
3676 Result.push_back(Lo);
3677 Result.push_back(Hi);
3678 if (Opcode != ISD::MUL) {
3679 SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
3680 Result.push_back(Zero);
3681 Result.push_back(Zero);
3682 }
3683 return true;
3684 }
3685 }
3686
3687 if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize &&
3688 RHSSB > InnerBitSize) {
3689 // The input values are both sign-extended.
3690 // TODO non-MUL case?
3691 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) {
3692 Result.push_back(Lo);
3693 Result.push_back(Hi);
3694 return true;
3695 }
3696 }
3697
3698 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
3699 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout());
3700 if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) {
3701 // FIXME getShiftAmountTy does not always return a sensible result when VT
3702 // is an illegal type, and so the type may be too small to fit the shift
3703 // amount. Override it with i32. The shift will have to be legalized.
3704 ShiftAmountTy = MVT::i32;
3705 }
3706 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy);
3707
3708 if (!LH.getNode() && !RH.getNode() &&
3709 isOperationLegalOrCustom(ISD::SRL, VT) &&
3710 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3711 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift);
3712 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
3713 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift);
3714 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
3715 }
3716
3717 if (!LH.getNode())
3718 return false;
3719
3720 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false))
3721 return false;
3722
3723 Result.push_back(Lo);
3724
3725 if (Opcode == ISD::MUL) {
3726 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3727 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3728 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3729 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3730 Result.push_back(Hi);
3731 return true;
3732 }
3733
3734 // Compute the full width result.
3735 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue {
3736 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
3737 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
3738 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
3739 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi);
3740 };
3741
3742 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
3743 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false))
3744 return false;
3745
3746 // This is effectively the add part of a multiply-add of half-sized operands,
3747 // so it cannot overflow.
3748 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
3749
3750 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false))
3751 return false;
3752
3753 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next,
3754 Merge(Lo, Hi));
3755
3756 SDValue Carry = Next.getValue(1);
3757 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3758 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
3759
3760 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI))
3761 return false;
3762
3763 SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
3764 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero,
3765 Carry);
3766 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
3767
3768 if (Opcode == ISD::SMUL_LOHI) {
3769 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
3770 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL));
3771 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT);
3772
3773 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
3774 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL));
3775 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT);
3776 }
3777
3778 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3779 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
3780 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3781 return true;
3782 }
3783
expandMUL(SDNode * N,SDValue & Lo,SDValue & Hi,EVT HiLoVT,SelectionDAG & DAG,MulExpansionKind Kind,SDValue LL,SDValue LH,SDValue RL,SDValue RH) const3784 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3785 SelectionDAG &DAG, MulExpansionKind Kind,
3786 SDValue LL, SDValue LH, SDValue RL,
3787 SDValue RH) const {
3788 SmallVector<SDValue, 2> Result;
3789 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), N,
3790 N->getOperand(0), N->getOperand(1), Result, HiLoVT,
3791 DAG, Kind, LL, LH, RL, RH);
3792 if (Ok) {
3793 assert(Result.size() == 2);
3794 Lo = Result[0];
3795 Hi = Result[1];
3796 }
3797 return Ok;
3798 }
3799
expandFP_TO_SINT(SDNode * Node,SDValue & Result,SelectionDAG & DAG) const3800 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
3801 SelectionDAG &DAG) const {
3802 EVT VT = Node->getOperand(0).getValueType();
3803 EVT NVT = Node->getValueType(0);
3804 SDLoc dl(SDValue(Node, 0));
3805
3806 // FIXME: Only f32 to i64 conversions are supported.
3807 if (VT != MVT::f32 || NVT != MVT::i64)
3808 return false;
3809
3810 // Expand f32 -> i64 conversion
3811 // This algorithm comes from compiler-rt's implementation of fixsfdi:
3812 // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c
3813 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(),
3814 VT.getSizeInBits());
3815 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
3816 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
3817 SDValue Bias = DAG.getConstant(127, dl, IntVT);
3818 SDValue SignMask = DAG.getConstant(APInt::getSignMask(VT.getSizeInBits()), dl,
3819 IntVT);
3820 SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT);
3821 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
3822
3823 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
3824
3825 auto &DL = DAG.getDataLayout();
3826 SDValue ExponentBits = DAG.getNode(
3827 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
3828 DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT, DL)));
3829 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
3830
3831 SDValue Sign = DAG.getNode(
3832 ISD::SRA, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
3833 DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT, DL)));
3834 Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
3835
3836 SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
3837 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
3838 DAG.getConstant(0x00800000, dl, IntVT));
3839
3840 R = DAG.getZExtOrTrunc(R, dl, NVT);
3841
3842 R = DAG.getSelectCC(
3843 dl, Exponent, ExponentLoBit,
3844 DAG.getNode(ISD::SHL, dl, NVT, R,
3845 DAG.getZExtOrTrunc(
3846 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
3847 dl, getShiftAmountTy(IntVT, DL))),
3848 DAG.getNode(ISD::SRL, dl, NVT, R,
3849 DAG.getZExtOrTrunc(
3850 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
3851 dl, getShiftAmountTy(IntVT, DL))),
3852 ISD::SETGT);
3853
3854 SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
3855 DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
3856 Sign);
3857
3858 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
3859 DAG.getConstant(0, dl, NVT), Ret, ISD::SETLT);
3860 return true;
3861 }
3862
scalarizeVectorLoad(LoadSDNode * LD,SelectionDAG & DAG) const3863 SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
3864 SelectionDAG &DAG) const {
3865 SDLoc SL(LD);
3866 SDValue Chain = LD->getChain();
3867 SDValue BasePTR = LD->getBasePtr();
3868 EVT SrcVT = LD->getMemoryVT();
3869 ISD::LoadExtType ExtType = LD->getExtensionType();
3870
3871 unsigned NumElem = SrcVT.getVectorNumElements();
3872
3873 EVT SrcEltVT = SrcVT.getScalarType();
3874 EVT DstEltVT = LD->getValueType(0).getScalarType();
3875
3876 unsigned Stride = SrcEltVT.getSizeInBits() / 8;
3877 assert(SrcEltVT.isByteSized());
3878
3879 EVT PtrVT = BasePTR.getValueType();
3880
3881 SmallVector<SDValue, 8> Vals;
3882 SmallVector<SDValue, 8> LoadChains;
3883
3884 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3885 SDValue ScalarLoad =
3886 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
3887 LD->getPointerInfo().getWithOffset(Idx * Stride),
3888 SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride),
3889 LD->getMemOperand()->getFlags(), LD->getAAInfo());
3890
3891 BasePTR = DAG.getNode(ISD::ADD, SL, PtrVT, BasePTR,
3892 DAG.getConstant(Stride, SL, PtrVT));
3893
3894 Vals.push_back(ScalarLoad.getValue(0));
3895 LoadChains.push_back(ScalarLoad.getValue(1));
3896 }
3897
3898 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
3899 SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals);
3900
3901 return DAG.getMergeValues({ Value, NewChain }, SL);
3902 }
3903
scalarizeVectorStore(StoreSDNode * ST,SelectionDAG & DAG) const3904 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
3905 SelectionDAG &DAG) const {
3906 SDLoc SL(ST);
3907
3908 SDValue Chain = ST->getChain();
3909 SDValue BasePtr = ST->getBasePtr();
3910 SDValue Value = ST->getValue();
3911 EVT StVT = ST->getMemoryVT();
3912
3913 // The type of the data we want to save
3914 EVT RegVT = Value.getValueType();
3915 EVT RegSclVT = RegVT.getScalarType();
3916
3917 // The type of data as saved in memory.
3918 EVT MemSclVT = StVT.getScalarType();
3919
3920 EVT IdxVT = getVectorIdxTy(DAG.getDataLayout());
3921 unsigned NumElem = StVT.getVectorNumElements();
3922
3923 // A vector must always be stored in memory as-is, i.e. without any padding
3924 // between the elements, since various code depend on it, e.g. in the
3925 // handling of a bitcast of a vector type to int, which may be done with a
3926 // vector store followed by an integer load. A vector that does not have
3927 // elements that are byte-sized must therefore be stored as an integer
3928 // built out of the extracted vector elements.
3929 if (!MemSclVT.isByteSized()) {
3930 unsigned NumBits = StVT.getSizeInBits();
3931 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits);
3932
3933 SDValue CurrVal = DAG.getConstant(0, SL, IntVT);
3934
3935 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3936 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
3937 DAG.getConstant(Idx, SL, IdxVT));
3938 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt);
3939 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc);
3940 unsigned ShiftIntoIdx =
3941 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx);
3942 SDValue ShiftAmount =
3943 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT);
3944 SDValue ShiftedElt =
3945 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount);
3946 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt);
3947 }
3948
3949 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
3950 ST->getAlignment(), ST->getMemOperand()->getFlags(),
3951 ST->getAAInfo());
3952 }
3953
3954 // Store Stride in bytes
3955 unsigned Stride = MemSclVT.getSizeInBits() / 8;
3956 assert (Stride && "Zero stride!");
3957 // Extract each of the elements from the original vector and save them into
3958 // memory individually.
3959 SmallVector<SDValue, 8> Stores;
3960 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3961 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
3962 DAG.getConstant(Idx, SL, IdxVT));
3963
3964 SDValue Ptr = DAG.getObjectPtrOffset(SL, BasePtr, Idx * Stride);
3965
3966 // This scalar TruncStore may be illegal, but we legalize it later.
3967 SDValue Store = DAG.getTruncStore(
3968 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
3969 MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride),
3970 ST->getMemOperand()->getFlags(), ST->getAAInfo());
3971
3972 Stores.push_back(Store);
3973 }
3974
3975 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
3976 }
3977
3978 std::pair<SDValue, SDValue>
expandUnalignedLoad(LoadSDNode * LD,SelectionDAG & DAG) const3979 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
3980 assert(LD->getAddressingMode() == ISD::UNINDEXED &&
3981 "unaligned indexed loads not implemented!");
3982 SDValue Chain = LD->getChain();
3983 SDValue Ptr = LD->getBasePtr();
3984 EVT VT = LD->getValueType(0);
3985 EVT LoadedVT = LD->getMemoryVT();
3986 SDLoc dl(LD);
3987 auto &MF = DAG.getMachineFunction();
3988
3989 if (VT.isFloatingPoint() || VT.isVector()) {
3990 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
3991 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
3992 if (!isOperationLegalOrCustom(ISD::LOAD, intVT)) {
3993 // Scalarize the load and let the individual components be handled.
3994 SDValue Scalarized = scalarizeVectorLoad(LD, DAG);
3995 if (Scalarized->getOpcode() == ISD::MERGE_VALUES)
3996 return std::make_pair(Scalarized.getOperand(0), Scalarized.getOperand(1));
3997 return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
3998 }
3999
4000 // Expand to a (misaligned) integer load of the same size,
4001 // then bitconvert to floating point or vector.
4002 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
4003 LD->getMemOperand());
4004 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
4005 if (LoadedVT != VT)
4006 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
4007 ISD::ANY_EXTEND, dl, VT, Result);
4008
4009 return std::make_pair(Result, newLoad.getValue(1));
4010 }
4011
4012 // Copy the value to a (aligned) stack slot using (unaligned) integer
4013 // loads and stores, then do a (aligned) load from the stack slot.
4014 MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
4015 unsigned LoadedBytes = LoadedVT.getStoreSize();
4016 unsigned RegBytes = RegVT.getSizeInBits() / 8;
4017 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
4018
4019 // Make sure the stack slot is also aligned for the register type.
4020 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
4021 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex();
4022 SmallVector<SDValue, 8> Stores;
4023 SDValue StackPtr = StackBase;
4024 unsigned Offset = 0;
4025
4026 EVT PtrVT = Ptr.getValueType();
4027 EVT StackPtrVT = StackPtr.getValueType();
4028
4029 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
4030 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
4031
4032 // Do all but one copies using the full register width.
4033 for (unsigned i = 1; i < NumRegs; i++) {
4034 // Load one integer register's worth from the original location.
4035 SDValue Load = DAG.getLoad(
4036 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset),
4037 MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(),
4038 LD->getAAInfo());
4039 // Follow the load with a store to the stack slot. Remember the store.
4040 Stores.push_back(DAG.getStore(
4041 Load.getValue(1), dl, Load, StackPtr,
4042 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)));
4043 // Increment the pointers.
4044 Offset += RegBytes;
4045
4046 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement);
4047 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement);
4048 }
4049
4050 // The last copy may be partial. Do an extending load.
4051 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
4052 8 * (LoadedBytes - Offset));
4053 SDValue Load =
4054 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
4055 LD->getPointerInfo().getWithOffset(Offset), MemVT,
4056 MinAlign(LD->getAlignment(), Offset),
4057 LD->getMemOperand()->getFlags(), LD->getAAInfo());
4058 // Follow the load with a store to the stack slot. Remember the store.
4059 // On big-endian machines this requires a truncating store to ensure
4060 // that the bits end up in the right place.
4061 Stores.push_back(DAG.getTruncStore(
4062 Load.getValue(1), dl, Load, StackPtr,
4063 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT));
4064
4065 // The order of the stores doesn't matter - say it with a TokenFactor.
4066 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
4067
4068 // Finally, perform the original load only redirected to the stack slot.
4069 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
4070 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0),
4071 LoadedVT);
4072
4073 // Callers expect a MERGE_VALUES node.
4074 return std::make_pair(Load, TF);
4075 }
4076
4077 assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
4078 "Unaligned load of unsupported type.");
4079
4080 // Compute the new VT that is half the size of the old one. This is an
4081 // integer MVT.
4082 unsigned NumBits = LoadedVT.getSizeInBits();
4083 EVT NewLoadedVT;
4084 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
4085 NumBits >>= 1;
4086
4087 unsigned Alignment = LD->getAlignment();
4088 unsigned IncrementSize = NumBits / 8;
4089 ISD::LoadExtType HiExtType = LD->getExtensionType();
4090
4091 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
4092 if (HiExtType == ISD::NON_EXTLOAD)
4093 HiExtType = ISD::ZEXTLOAD;
4094
4095 // Load the value in two parts
4096 SDValue Lo, Hi;
4097 if (DAG.getDataLayout().isLittleEndian()) {
4098 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
4099 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
4100 LD->getAAInfo());
4101
4102 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
4103 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
4104 LD->getPointerInfo().getWithOffset(IncrementSize),
4105 NewLoadedVT, MinAlign(Alignment, IncrementSize),
4106 LD->getMemOperand()->getFlags(), LD->getAAInfo());
4107 } else {
4108 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
4109 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
4110 LD->getAAInfo());
4111
4112 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
4113 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
4114 LD->getPointerInfo().getWithOffset(IncrementSize),
4115 NewLoadedVT, MinAlign(Alignment, IncrementSize),
4116 LD->getMemOperand()->getFlags(), LD->getAAInfo());
4117 }
4118
4119 // aggregate the two parts
4120 SDValue ShiftAmount =
4121 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
4122 DAG.getDataLayout()));
4123 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
4124 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
4125
4126 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
4127 Hi.getValue(1));
4128
4129 return std::make_pair(Result, TF);
4130 }
4131
expandUnalignedStore(StoreSDNode * ST,SelectionDAG & DAG) const4132 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
4133 SelectionDAG &DAG) const {
4134 assert(ST->getAddressingMode() == ISD::UNINDEXED &&
4135 "unaligned indexed stores not implemented!");
4136 SDValue Chain = ST->getChain();
4137 SDValue Ptr = ST->getBasePtr();
4138 SDValue Val = ST->getValue();
4139 EVT VT = Val.getValueType();
4140 int Alignment = ST->getAlignment();
4141 auto &MF = DAG.getMachineFunction();
4142
4143 SDLoc dl(ST);
4144 if (ST->getMemoryVT().isFloatingPoint() ||
4145 ST->getMemoryVT().isVector()) {
4146 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
4147 if (isTypeLegal(intVT)) {
4148 if (!isOperationLegalOrCustom(ISD::STORE, intVT)) {
4149 // Scalarize the store and let the individual components be handled.
4150 SDValue Result = scalarizeVectorStore(ST, DAG);
4151
4152 return Result;
4153 }
4154 // Expand to a bitconvert of the value to the integer type of the
4155 // same size, then a (misaligned) int store.
4156 // FIXME: Does not handle truncating floating point stores!
4157 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
4158 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
4159 Alignment, ST->getMemOperand()->getFlags());
4160 return Result;
4161 }
4162 // Do a (aligned) store to a stack slot, then copy from the stack slot
4163 // to the final destination using (unaligned) integer loads and stores.
4164 EVT StoredVT = ST->getMemoryVT();
4165 MVT RegVT =
4166 getRegisterType(*DAG.getContext(),
4167 EVT::getIntegerVT(*DAG.getContext(),
4168 StoredVT.getSizeInBits()));
4169 EVT PtrVT = Ptr.getValueType();
4170 unsigned StoredBytes = StoredVT.getStoreSize();
4171 unsigned RegBytes = RegVT.getSizeInBits() / 8;
4172 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
4173
4174 // Make sure the stack slot is also aligned for the register type.
4175 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
4176 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
4177
4178 // Perform the original store, only redirected to the stack slot.
4179 SDValue Store = DAG.getTruncStore(
4180 Chain, dl, Val, StackPtr,
4181 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoredVT);
4182
4183 EVT StackPtrVT = StackPtr.getValueType();
4184
4185 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
4186 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
4187 SmallVector<SDValue, 8> Stores;
4188 unsigned Offset = 0;
4189
4190 // Do all but one copies using the full register width.
4191 for (unsigned i = 1; i < NumRegs; i++) {
4192 // Load one integer register's worth from the stack slot.
4193 SDValue Load = DAG.getLoad(
4194 RegVT, dl, Store, StackPtr,
4195 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset));
4196 // Store it to the final location. Remember the store.
4197 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
4198 ST->getPointerInfo().getWithOffset(Offset),
4199 MinAlign(ST->getAlignment(), Offset),
4200 ST->getMemOperand()->getFlags()));
4201 // Increment the pointers.
4202 Offset += RegBytes;
4203 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement);
4204 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement);
4205 }
4206
4207 // The last store may be partial. Do a truncating store. On big-endian
4208 // machines this requires an extending load from the stack slot to ensure
4209 // that the bits are in the right place.
4210 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
4211 8 * (StoredBytes - Offset));
4212
4213 // Load from the stack slot.
4214 SDValue Load = DAG.getExtLoad(
4215 ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
4216 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT);
4217
4218 Stores.push_back(
4219 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
4220 ST->getPointerInfo().getWithOffset(Offset), MemVT,
4221 MinAlign(ST->getAlignment(), Offset),
4222 ST->getMemOperand()->getFlags(), ST->getAAInfo()));
4223 // The order of the stores doesn't matter - say it with a TokenFactor.
4224 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
4225 return Result;
4226 }
4227
4228 assert(ST->getMemoryVT().isInteger() &&
4229 !ST->getMemoryVT().isVector() &&
4230 "Unaligned store of unknown type.");
4231 // Get the half-size VT
4232 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
4233 int NumBits = NewStoredVT.getSizeInBits();
4234 int IncrementSize = NumBits / 8;
4235
4236 // Divide the stored value in two parts.
4237 SDValue ShiftAmount =
4238 DAG.getConstant(NumBits, dl, getShiftAmountTy(Val.getValueType(),
4239 DAG.getDataLayout()));
4240 SDValue Lo = Val;
4241 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
4242
4243 // Store the two parts
4244 SDValue Store1, Store2;
4245 Store1 = DAG.getTruncStore(Chain, dl,
4246 DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
4247 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
4248 ST->getMemOperand()->getFlags());
4249
4250 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
4251 Alignment = MinAlign(Alignment, IncrementSize);
4252 Store2 = DAG.getTruncStore(
4253 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
4254 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
4255 ST->getMemOperand()->getFlags(), ST->getAAInfo());
4256
4257 SDValue Result =
4258 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
4259 return Result;
4260 }
4261
4262 SDValue
IncrementMemoryAddress(SDValue Addr,SDValue Mask,const SDLoc & DL,EVT DataVT,SelectionDAG & DAG,bool IsCompressedMemory) const4263 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
4264 const SDLoc &DL, EVT DataVT,
4265 SelectionDAG &DAG,
4266 bool IsCompressedMemory) const {
4267 SDValue Increment;
4268 EVT AddrVT = Addr.getValueType();
4269 EVT MaskVT = Mask.getValueType();
4270 assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() &&
4271 "Incompatible types of Data and Mask");
4272 if (IsCompressedMemory) {
4273 // Incrementing the pointer according to number of '1's in the mask.
4274 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits());
4275 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask);
4276 if (MaskIntVT.getSizeInBits() < 32) {
4277 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg);
4278 MaskIntVT = MVT::i32;
4279 }
4280
4281 // Count '1's with POPCNT.
4282 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg);
4283 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT);
4284 // Scale is an element size in bytes.
4285 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL,
4286 AddrVT);
4287 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale);
4288 } else
4289 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT);
4290
4291 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment);
4292 }
4293
clampDynamicVectorIndex(SelectionDAG & DAG,SDValue Idx,EVT VecVT,const SDLoc & dl)4294 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG,
4295 SDValue Idx,
4296 EVT VecVT,
4297 const SDLoc &dl) {
4298 if (isa<ConstantSDNode>(Idx))
4299 return Idx;
4300
4301 EVT IdxVT = Idx.getValueType();
4302 unsigned NElts = VecVT.getVectorNumElements();
4303 if (isPowerOf2_32(NElts)) {
4304 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(),
4305 Log2_32(NElts));
4306 return DAG.getNode(ISD::AND, dl, IdxVT, Idx,
4307 DAG.getConstant(Imm, dl, IdxVT));
4308 }
4309
4310 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx,
4311 DAG.getConstant(NElts - 1, dl, IdxVT));
4312 }
4313
getVectorElementPointer(SelectionDAG & DAG,SDValue VecPtr,EVT VecVT,SDValue Index) const4314 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG,
4315 SDValue VecPtr, EVT VecVT,
4316 SDValue Index) const {
4317 SDLoc dl(Index);
4318 // Make sure the index type is big enough to compute in.
4319 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType());
4320
4321 EVT EltVT = VecVT.getVectorElementType();
4322
4323 // Calculate the element offset and add it to the pointer.
4324 unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
4325 assert(EltSize * 8 == EltVT.getSizeInBits() &&
4326 "Converting bits to bytes lost precision");
4327
4328 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl);
4329
4330 EVT IdxVT = Index.getValueType();
4331
4332 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index,
4333 DAG.getConstant(EltSize, dl, IdxVT));
4334 return DAG.getNode(ISD::ADD, dl, IdxVT, VecPtr, Index);
4335 }
4336
4337 //===----------------------------------------------------------------------===//
4338 // Implementation of Emulated TLS Model
4339 //===----------------------------------------------------------------------===//
4340
LowerToTLSEmulatedModel(const GlobalAddressSDNode * GA,SelectionDAG & DAG) const4341 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
4342 SelectionDAG &DAG) const {
4343 // Access to address of TLS varialbe xyz is lowered to a function call:
4344 // __emutls_get_address( address of global variable named "__emutls_v.xyz" )
4345 EVT PtrVT = getPointerTy(DAG.getDataLayout());
4346 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
4347 SDLoc dl(GA);
4348
4349 ArgListTy Args;
4350 ArgListEntry Entry;
4351 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
4352 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
4353 StringRef EmuTlsVarName(NameString);
4354 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
4355 assert(EmuTlsVar && "Cannot find EmuTlsVar ");
4356 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
4357 Entry.Ty = VoidPtrType;
4358 Args.push_back(Entry);
4359
4360 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
4361
4362 TargetLowering::CallLoweringInfo CLI(DAG);
4363 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
4364 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args));
4365 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
4366
4367 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
4368 // At last for X86 targets, maybe good for other targets too?
4369 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
4370 MFI.setAdjustsStack(true); // Is this only for X86 target?
4371 MFI.setHasCalls(true);
4372
4373 assert((GA->getOffset() == 0) &&
4374 "Emulated TLS must have zero offset in GlobalAddressSDNode");
4375 return CallResult.first;
4376 }
4377
lowerCmpEqZeroToCtlzSrl(SDValue Op,SelectionDAG & DAG) const4378 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op,
4379 SelectionDAG &DAG) const {
4380 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node.");
4381 if (!isCtlzFast())
4382 return SDValue();
4383 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
4384 SDLoc dl(Op);
4385 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
4386 if (C->isNullValue() && CC == ISD::SETEQ) {
4387 EVT VT = Op.getOperand(0).getValueType();
4388 SDValue Zext = Op.getOperand(0);
4389 if (VT.bitsLT(MVT::i32)) {
4390 VT = MVT::i32;
4391 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
4392 }
4393 unsigned Log2b = Log2_32(VT.getSizeInBits());
4394 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
4395 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
4396 DAG.getConstant(Log2b, dl, MVT::i32));
4397 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
4398 }
4399 }
4400 return SDValue();
4401 }
4402