1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/CodeGen/CallingConvLower.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Target/TargetLoweringObjectFile.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetSubtargetInfo.h"
35 #include <cctype>
36 using namespace llvm;
37
38 /// NOTE: The TargetMachine owns TLOF.
TargetLowering(const TargetMachine & tm)39 TargetLowering::TargetLowering(const TargetMachine &tm)
40 : TargetLoweringBase(tm) {}
41
getTargetNodeName(unsigned Opcode) const42 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
43 return nullptr;
44 }
45
isPositionIndependent() const46 bool TargetLowering::isPositionIndependent() const {
47 return getTargetMachine().isPositionIndependent();
48 }
49
50 /// Check whether a given call node is in tail position within its function. If
51 /// so, it sets Chain to the input chain of the tail call.
isInTailCallPosition(SelectionDAG & DAG,SDNode * Node,SDValue & Chain) const52 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
53 SDValue &Chain) const {
54 const Function *F = DAG.getMachineFunction().getFunction();
55
56 // Conservatively require the attributes of the call to match those of
57 // the return. Ignore noalias because it doesn't affect the call sequence.
58 AttributeSet CallerAttrs = F->getAttributes();
59 if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex)
60 .removeAttribute(Attribute::NoAlias).hasAttributes())
61 return false;
62
63 // It's not safe to eliminate the sign / zero extension of the return value.
64 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
65 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
66 return false;
67
68 // Check if the only use is a function return node.
69 return isUsedByReturnOnly(Node, Chain);
70 }
71
parametersInCSRMatch(const MachineRegisterInfo & MRI,const uint32_t * CallerPreservedMask,const SmallVectorImpl<CCValAssign> & ArgLocs,const SmallVectorImpl<SDValue> & OutVals) const72 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
73 const uint32_t *CallerPreservedMask,
74 const SmallVectorImpl<CCValAssign> &ArgLocs,
75 const SmallVectorImpl<SDValue> &OutVals) const {
76 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
77 const CCValAssign &ArgLoc = ArgLocs[I];
78 if (!ArgLoc.isRegLoc())
79 continue;
80 unsigned Reg = ArgLoc.getLocReg();
81 // Only look at callee saved registers.
82 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
83 continue;
84 // Check that we pass the value used for the caller.
85 // (We look for a CopyFromReg reading a virtual register that is used
86 // for the function live-in value of register Reg)
87 SDValue Value = OutVals[I];
88 if (Value->getOpcode() != ISD::CopyFromReg)
89 return false;
90 unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
91 if (MRI.getLiveInPhysReg(ArgReg) != Reg)
92 return false;
93 }
94 return true;
95 }
96
97 /// \brief Set CallLoweringInfo attribute flags based on a call instruction
98 /// and called function attributes.
setAttributes(ImmutableCallSite * CS,unsigned AttrIdx)99 void TargetLowering::ArgListEntry::setAttributes(ImmutableCallSite *CS,
100 unsigned AttrIdx) {
101 isSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
102 isZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
103 isInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
104 isSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
105 isNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
106 isByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
107 isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
108 isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
109 isSwiftSelf = CS->paramHasAttr(AttrIdx, Attribute::SwiftSelf);
110 isSwiftError = CS->paramHasAttr(AttrIdx, Attribute::SwiftError);
111 Alignment = CS->getParamAlignment(AttrIdx);
112 }
113
114 /// Generate a libcall taking the given operands as arguments and returning a
115 /// result of type RetVT.
116 std::pair<SDValue, SDValue>
makeLibCall(SelectionDAG & DAG,RTLIB::Libcall LC,EVT RetVT,ArrayRef<SDValue> Ops,bool isSigned,const SDLoc & dl,bool doesNotReturn,bool isReturnValueUsed) const117 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
118 ArrayRef<SDValue> Ops, bool isSigned,
119 const SDLoc &dl, bool doesNotReturn,
120 bool isReturnValueUsed) const {
121 TargetLowering::ArgListTy Args;
122 Args.reserve(Ops.size());
123
124 TargetLowering::ArgListEntry Entry;
125 for (SDValue Op : Ops) {
126 Entry.Node = Op;
127 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
128 Entry.isSExt = shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
129 Entry.isZExt = !shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
130 Args.push_back(Entry);
131 }
132
133 if (LC == RTLIB::UNKNOWN_LIBCALL)
134 report_fatal_error("Unsupported library call operation!");
135 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
136 getPointerTy(DAG.getDataLayout()));
137
138 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
139 TargetLowering::CallLoweringInfo CLI(DAG);
140 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
141 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
142 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
143 .setNoReturn(doesNotReturn).setDiscardResult(!isReturnValueUsed)
144 .setSExtResult(signExtend).setZExtResult(!signExtend);
145 return LowerCallTo(CLI);
146 }
147
148 /// Soften the operands of a comparison. This code is shared among BR_CC,
149 /// SELECT_CC, and SETCC handlers.
softenSetCCOperands(SelectionDAG & DAG,EVT VT,SDValue & NewLHS,SDValue & NewRHS,ISD::CondCode & CCCode,const SDLoc & dl) const150 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
151 SDValue &NewLHS, SDValue &NewRHS,
152 ISD::CondCode &CCCode,
153 const SDLoc &dl) const {
154 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
155 && "Unsupported setcc type!");
156
157 // Expand into one or more soft-fp libcall(s).
158 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
159 bool ShouldInvertCC = false;
160 switch (CCCode) {
161 case ISD::SETEQ:
162 case ISD::SETOEQ:
163 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
164 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
165 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
166 break;
167 case ISD::SETNE:
168 case ISD::SETUNE:
169 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
170 (VT == MVT::f64) ? RTLIB::UNE_F64 :
171 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
172 break;
173 case ISD::SETGE:
174 case ISD::SETOGE:
175 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
176 (VT == MVT::f64) ? RTLIB::OGE_F64 :
177 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
178 break;
179 case ISD::SETLT:
180 case ISD::SETOLT:
181 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
182 (VT == MVT::f64) ? RTLIB::OLT_F64 :
183 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
184 break;
185 case ISD::SETLE:
186 case ISD::SETOLE:
187 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
188 (VT == MVT::f64) ? RTLIB::OLE_F64 :
189 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
190 break;
191 case ISD::SETGT:
192 case ISD::SETOGT:
193 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
194 (VT == MVT::f64) ? RTLIB::OGT_F64 :
195 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
196 break;
197 case ISD::SETUO:
198 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
199 (VT == MVT::f64) ? RTLIB::UO_F64 :
200 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
201 break;
202 case ISD::SETO:
203 LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
204 (VT == MVT::f64) ? RTLIB::O_F64 :
205 (VT == MVT::f128) ? RTLIB::O_F128 : RTLIB::O_PPCF128;
206 break;
207 case ISD::SETONE:
208 // SETONE = SETOLT | SETOGT
209 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
210 (VT == MVT::f64) ? RTLIB::OLT_F64 :
211 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
212 LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
213 (VT == MVT::f64) ? RTLIB::OGT_F64 :
214 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
215 break;
216 case ISD::SETUEQ:
217 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
218 (VT == MVT::f64) ? RTLIB::UO_F64 :
219 (VT == MVT::f128) ? RTLIB::UO_F64 : RTLIB::UO_PPCF128;
220 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
221 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
222 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
223 break;
224 default:
225 // Invert CC for unordered comparisons
226 ShouldInvertCC = true;
227 switch (CCCode) {
228 case ISD::SETULT:
229 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
230 (VT == MVT::f64) ? RTLIB::OGE_F64 :
231 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
232 break;
233 case ISD::SETULE:
234 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
235 (VT == MVT::f64) ? RTLIB::OGT_F64 :
236 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
237 break;
238 case ISD::SETUGT:
239 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
240 (VT == MVT::f64) ? RTLIB::OLE_F64 :
241 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
242 break;
243 case ISD::SETUGE:
244 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
245 (VT == MVT::f64) ? RTLIB::OLT_F64 :
246 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
247 break;
248 default: llvm_unreachable("Do not know how to soften this setcc!");
249 }
250 }
251
252 // Use the target specific return value for comparions lib calls.
253 EVT RetVT = getCmpLibcallReturnType();
254 SDValue Ops[2] = {NewLHS, NewRHS};
255 NewLHS = makeLibCall(DAG, LC1, RetVT, Ops, false /*sign irrelevant*/,
256 dl).first;
257 NewRHS = DAG.getConstant(0, dl, RetVT);
258
259 CCCode = getCmpLibcallCC(LC1);
260 if (ShouldInvertCC)
261 CCCode = getSetCCInverse(CCCode, /*isInteger=*/true);
262
263 if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
264 SDValue Tmp = DAG.getNode(
265 ISD::SETCC, dl,
266 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
267 NewLHS, NewRHS, DAG.getCondCode(CCCode));
268 NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, false/*sign irrelevant*/,
269 dl).first;
270 NewLHS = DAG.getNode(
271 ISD::SETCC, dl,
272 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
273 NewLHS, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
274 NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
275 NewRHS = SDValue();
276 }
277 }
278
279 /// Return the entry encoding for a jump table in the current function. The
280 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
getJumpTableEncoding() const281 unsigned TargetLowering::getJumpTableEncoding() const {
282 // In non-pic modes, just use the address of a block.
283 if (!isPositionIndependent())
284 return MachineJumpTableInfo::EK_BlockAddress;
285
286 // In PIC mode, if the target supports a GPRel32 directive, use it.
287 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
288 return MachineJumpTableInfo::EK_GPRel32BlockAddress;
289
290 // Otherwise, use a label difference.
291 return MachineJumpTableInfo::EK_LabelDifference32;
292 }
293
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const294 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
295 SelectionDAG &DAG) const {
296 // If our PIC model is GP relative, use the global offset table as the base.
297 unsigned JTEncoding = getJumpTableEncoding();
298
299 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
300 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
301 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
302
303 return Table;
304 }
305
306 /// This returns the relocation base for the given PIC jumptable, the same as
307 /// getPICJumpTableRelocBase, but as an MCExpr.
308 const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction * MF,unsigned JTI,MCContext & Ctx) const309 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
310 unsigned JTI,MCContext &Ctx) const{
311 // The normal PIC reloc base is the label at the start of the jump table.
312 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
313 }
314
315 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const316 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
317 const TargetMachine &TM = getTargetMachine();
318 const GlobalValue *GV = GA->getGlobal();
319
320 // If the address is not even local to this DSO we will have to load it from
321 // a got and then add the offset.
322 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
323 return false;
324
325 // If the code is position independent we will have to add a base register.
326 if (isPositionIndependent())
327 return false;
328
329 // Otherwise we can do it.
330 return true;
331 }
332
333 //===----------------------------------------------------------------------===//
334 // Optimization Methods
335 //===----------------------------------------------------------------------===//
336
337 /// Check to see if the specified operand of the specified instruction is a
338 /// constant integer. If so, check to see if there are any bits set in the
339 /// constant that are not demanded. If so, shrink the constant and return true.
ShrinkDemandedConstant(SDValue Op,const APInt & Demanded)340 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
341 const APInt &Demanded) {
342 SDLoc dl(Op);
343
344 // FIXME: ISD::SELECT, ISD::SELECT_CC
345 switch (Op.getOpcode()) {
346 default: break;
347 case ISD::XOR:
348 case ISD::AND:
349 case ISD::OR: {
350 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
351 if (!C) return false;
352
353 if (Op.getOpcode() == ISD::XOR &&
354 (C->getAPIntValue() | (~Demanded)).isAllOnesValue())
355 return false;
356
357 // if we can expand it to have all bits set, do it
358 if (C->getAPIntValue().intersects(~Demanded)) {
359 EVT VT = Op.getValueType();
360 SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
361 DAG.getConstant(Demanded &
362 C->getAPIntValue(),
363 dl, VT));
364 return CombineTo(Op, New);
365 }
366
367 break;
368 }
369 }
370
371 return false;
372 }
373
374 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
375 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
376 /// generalized for targets with other types of implicit widening casts.
ShrinkDemandedOp(SDValue Op,unsigned BitWidth,const APInt & Demanded,const SDLoc & dl)377 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
378 unsigned BitWidth,
379 const APInt &Demanded,
380 const SDLoc &dl) {
381 assert(Op.getNumOperands() == 2 &&
382 "ShrinkDemandedOp only supports binary operators!");
383 assert(Op.getNode()->getNumValues() == 1 &&
384 "ShrinkDemandedOp only supports nodes with one result!");
385
386 // Early return, as this function cannot handle vector types.
387 if (Op.getValueType().isVector())
388 return false;
389
390 // Don't do this if the node has another user, which may require the
391 // full value.
392 if (!Op.getNode()->hasOneUse())
393 return false;
394
395 // Search for the smallest integer type with free casts to and from
396 // Op's type. For expedience, just check power-of-2 integer types.
397 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
398 unsigned DemandedSize = BitWidth - Demanded.countLeadingZeros();
399 unsigned SmallVTBits = DemandedSize;
400 if (!isPowerOf2_32(SmallVTBits))
401 SmallVTBits = NextPowerOf2(SmallVTBits);
402 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
403 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
404 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
405 TLI.isZExtFree(SmallVT, Op.getValueType())) {
406 // We found a type with free casts.
407 SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT,
408 DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
409 Op.getNode()->getOperand(0)),
410 DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
411 Op.getNode()->getOperand(1)));
412 bool NeedZext = DemandedSize > SmallVTBits;
413 SDValue Z = DAG.getNode(NeedZext ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND,
414 dl, Op.getValueType(), X);
415 return CombineTo(Op, Z);
416 }
417 }
418 return false;
419 }
420
421 /// Look at Op. At this point, we know that only the DemandedMask bits of the
422 /// result of Op are ever used downstream. If we can use this information to
423 /// simplify Op, create a new simplified DAG node and return true, returning the
424 /// original and new nodes in Old and New. Otherwise, analyze the expression and
425 /// return a mask of KnownOne and KnownZero bits for the expression (used to
426 /// simplify the caller). The KnownZero/One bits may only be accurate for those
427 /// bits in the DemandedMask.
SimplifyDemandedBits(SDValue Op,const APInt & DemandedMask,APInt & KnownZero,APInt & KnownOne,TargetLoweringOpt & TLO,unsigned Depth) const428 bool TargetLowering::SimplifyDemandedBits(SDValue Op,
429 const APInt &DemandedMask,
430 APInt &KnownZero,
431 APInt &KnownOne,
432 TargetLoweringOpt &TLO,
433 unsigned Depth) const {
434 unsigned BitWidth = DemandedMask.getBitWidth();
435 assert(Op.getValueType().getScalarType().getSizeInBits() == BitWidth &&
436 "Mask size mismatches value type size!");
437 APInt NewMask = DemandedMask;
438 SDLoc dl(Op);
439 auto &DL = TLO.DAG.getDataLayout();
440
441 // Don't know anything.
442 KnownZero = KnownOne = APInt(BitWidth, 0);
443
444 // Other users may use these bits.
445 if (!Op.getNode()->hasOneUse()) {
446 if (Depth != 0) {
447 // If not at the root, Just compute the KnownZero/KnownOne bits to
448 // simplify things downstream.
449 TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
450 return false;
451 }
452 // If this is the root being simplified, allow it to have multiple uses,
453 // just set the NewMask to all bits.
454 NewMask = APInt::getAllOnesValue(BitWidth);
455 } else if (DemandedMask == 0) {
456 // Not demanding any bits from Op.
457 if (!Op.isUndef())
458 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
459 return false;
460 } else if (Depth == 6) { // Limit search depth.
461 return false;
462 }
463
464 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
465 switch (Op.getOpcode()) {
466 case ISD::Constant:
467 // We know all of the bits for a constant!
468 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
469 KnownZero = ~KnownOne;
470 return false; // Don't fall through, will infinitely loop.
471 case ISD::AND:
472 // If the RHS is a constant, check to see if the LHS would be zero without
473 // using the bits from the RHS. Below, we use knowledge about the RHS to
474 // simplify the LHS, here we're using information from the LHS to simplify
475 // the RHS.
476 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
477 APInt LHSZero, LHSOne;
478 // Do not increment Depth here; that can cause an infinite loop.
479 TLO.DAG.computeKnownBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
480 // If the LHS already has zeros where RHSC does, this and is dead.
481 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
482 return TLO.CombineTo(Op, Op.getOperand(0));
483 // If any of the set bits in the RHS are known zero on the LHS, shrink
484 // the constant.
485 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
486 return true;
487 }
488
489 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
490 KnownOne, TLO, Depth+1))
491 return true;
492 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
493 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
494 KnownZero2, KnownOne2, TLO, Depth+1))
495 return true;
496 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
497
498 // If all of the demanded bits are known one on one side, return the other.
499 // These bits cannot contribute to the result of the 'and'.
500 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
501 return TLO.CombineTo(Op, Op.getOperand(0));
502 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
503 return TLO.CombineTo(Op, Op.getOperand(1));
504 // If all of the demanded bits in the inputs are known zeros, return zero.
505 if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
506 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, Op.getValueType()));
507 // If the RHS is a constant, see if we can simplify it.
508 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
509 return true;
510 // If the operation can be done in a smaller type, do so.
511 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
512 return true;
513
514 // Output known-1 bits are only known if set in both the LHS & RHS.
515 KnownOne &= KnownOne2;
516 // Output known-0 are known to be clear if zero in either the LHS | RHS.
517 KnownZero |= KnownZero2;
518 break;
519 case ISD::OR:
520 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
521 KnownOne, TLO, Depth+1))
522 return true;
523 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
524 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
525 KnownZero2, KnownOne2, TLO, Depth+1))
526 return true;
527 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
528
529 // If all of the demanded bits are known zero on one side, return the other.
530 // These bits cannot contribute to the result of the 'or'.
531 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
532 return TLO.CombineTo(Op, Op.getOperand(0));
533 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
534 return TLO.CombineTo(Op, Op.getOperand(1));
535 // If all of the potentially set bits on one side are known to be set on
536 // the other side, just use the 'other' side.
537 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
538 return TLO.CombineTo(Op, Op.getOperand(0));
539 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
540 return TLO.CombineTo(Op, Op.getOperand(1));
541 // If the RHS is a constant, see if we can simplify it.
542 if (TLO.ShrinkDemandedConstant(Op, NewMask))
543 return true;
544 // If the operation can be done in a smaller type, do so.
545 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
546 return true;
547
548 // Output known-0 bits are only known if clear in both the LHS & RHS.
549 KnownZero &= KnownZero2;
550 // Output known-1 are known to be set if set in either the LHS | RHS.
551 KnownOne |= KnownOne2;
552 break;
553 case ISD::XOR:
554 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
555 KnownOne, TLO, Depth+1))
556 return true;
557 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
558 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
559 KnownOne2, TLO, Depth+1))
560 return true;
561 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
562
563 // If all of the demanded bits are known zero on one side, return the other.
564 // These bits cannot contribute to the result of the 'xor'.
565 if ((KnownZero & NewMask) == NewMask)
566 return TLO.CombineTo(Op, Op.getOperand(0));
567 if ((KnownZero2 & NewMask) == NewMask)
568 return TLO.CombineTo(Op, Op.getOperand(1));
569 // If the operation can be done in a smaller type, do so.
570 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
571 return true;
572
573 // If all of the unknown bits are known to be zero on one side or the other
574 // (but not both) turn this into an *inclusive* or.
575 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
576 if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
577 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
578 Op.getOperand(0),
579 Op.getOperand(1)));
580
581 // Output known-0 bits are known if clear or set in both the LHS & RHS.
582 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
583 // Output known-1 are known to be set if set in only one of the LHS, RHS.
584 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
585
586 // If all of the demanded bits on one side are known, and all of the set
587 // bits on that side are also known to be set on the other side, turn this
588 // into an AND, as we know the bits will be cleared.
589 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
590 // NB: it is okay if more bits are known than are requested
591 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
592 if (KnownOne == KnownOne2) { // set bits are the same on both sides
593 EVT VT = Op.getValueType();
594 SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, dl, VT);
595 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
596 Op.getOperand(0), ANDC));
597 }
598 }
599
600 // If the RHS is a constant, see if we can simplify it.
601 // for XOR, we prefer to force bits to 1 if they will make a -1.
602 // if we can't force bits, try to shrink constant
603 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
604 APInt Expanded = C->getAPIntValue() | (~NewMask);
605 // if we can expand it to have all bits set, do it
606 if (Expanded.isAllOnesValue()) {
607 if (Expanded != C->getAPIntValue()) {
608 EVT VT = Op.getValueType();
609 SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
610 TLO.DAG.getConstant(Expanded, dl, VT));
611 return TLO.CombineTo(Op, New);
612 }
613 // if it already has all the bits set, nothing to change
614 // but don't shrink either!
615 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) {
616 return true;
617 }
618 }
619
620 KnownZero = KnownZeroOut;
621 KnownOne = KnownOneOut;
622 break;
623 case ISD::SELECT:
624 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
625 KnownOne, TLO, Depth+1))
626 return true;
627 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
628 KnownOne2, TLO, Depth+1))
629 return true;
630 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
631 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
632
633 // If the operands are constants, see if we can simplify them.
634 if (TLO.ShrinkDemandedConstant(Op, NewMask))
635 return true;
636
637 // Only known if known in both the LHS and RHS.
638 KnownOne &= KnownOne2;
639 KnownZero &= KnownZero2;
640 break;
641 case ISD::SELECT_CC:
642 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
643 KnownOne, TLO, Depth+1))
644 return true;
645 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
646 KnownOne2, TLO, Depth+1))
647 return true;
648 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
649 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
650
651 // If the operands are constants, see if we can simplify them.
652 if (TLO.ShrinkDemandedConstant(Op, NewMask))
653 return true;
654
655 // Only known if known in both the LHS and RHS.
656 KnownOne &= KnownOne2;
657 KnownZero &= KnownZero2;
658 break;
659 case ISD::SHL:
660 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
661 unsigned ShAmt = SA->getZExtValue();
662 SDValue InOp = Op.getOperand(0);
663
664 // If the shift count is an invalid immediate, don't do anything.
665 if (ShAmt >= BitWidth)
666 break;
667
668 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
669 // single shift. We can do this if the bottom bits (which are shifted
670 // out) are never demanded.
671 if (InOp.getOpcode() == ISD::SRL &&
672 isa<ConstantSDNode>(InOp.getOperand(1))) {
673 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
674 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
675 unsigned Opc = ISD::SHL;
676 int Diff = ShAmt-C1;
677 if (Diff < 0) {
678 Diff = -Diff;
679 Opc = ISD::SRL;
680 }
681
682 SDValue NewSA =
683 TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
684 EVT VT = Op.getValueType();
685 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
686 InOp.getOperand(0), NewSA));
687 }
688 }
689
690 if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
691 KnownZero, KnownOne, TLO, Depth+1))
692 return true;
693
694 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
695 // are not demanded. This will likely allow the anyext to be folded away.
696 if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
697 SDValue InnerOp = InOp.getNode()->getOperand(0);
698 EVT InnerVT = InnerOp.getValueType();
699 unsigned InnerBits = InnerVT.getSizeInBits();
700 if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
701 isTypeDesirableForOp(ISD::SHL, InnerVT)) {
702 EVT ShTy = getShiftAmountTy(InnerVT, DL);
703 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
704 ShTy = InnerVT;
705 SDValue NarrowShl =
706 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
707 TLO.DAG.getConstant(ShAmt, dl, ShTy));
708 return
709 TLO.CombineTo(Op,
710 TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(),
711 NarrowShl));
712 }
713 // Repeat the SHL optimization above in cases where an extension
714 // intervenes: (shl (anyext (shr x, c1)), c2) to
715 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits
716 // aren't demanded (as above) and that the shifted upper c1 bits of
717 // x aren't demanded.
718 if (InOp.hasOneUse() &&
719 InnerOp.getOpcode() == ISD::SRL &&
720 InnerOp.hasOneUse() &&
721 isa<ConstantSDNode>(InnerOp.getOperand(1))) {
722 uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
723 ->getZExtValue();
724 if (InnerShAmt < ShAmt &&
725 InnerShAmt < InnerBits &&
726 NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 &&
727 NewMask.trunc(ShAmt) == 0) {
728 SDValue NewSA =
729 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
730 Op.getOperand(1).getValueType());
731 EVT VT = Op.getValueType();
732 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
733 InnerOp.getOperand(0));
734 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT,
735 NewExt, NewSA));
736 }
737 }
738 }
739
740 KnownZero <<= SA->getZExtValue();
741 KnownOne <<= SA->getZExtValue();
742 // low bits known zero.
743 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue());
744 }
745 break;
746 case ISD::SRL:
747 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
748 EVT VT = Op.getValueType();
749 unsigned ShAmt = SA->getZExtValue();
750 unsigned VTSize = VT.getSizeInBits();
751 SDValue InOp = Op.getOperand(0);
752
753 // If the shift count is an invalid immediate, don't do anything.
754 if (ShAmt >= BitWidth)
755 break;
756
757 APInt InDemandedMask = (NewMask << ShAmt);
758
759 // If the shift is exact, then it does demand the low bits (and knows that
760 // they are zero).
761 if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
762 InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
763
764 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
765 // single shift. We can do this if the top bits (which are shifted out)
766 // are never demanded.
767 if (InOp.getOpcode() == ISD::SHL &&
768 isa<ConstantSDNode>(InOp.getOperand(1))) {
769 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
770 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
771 unsigned Opc = ISD::SRL;
772 int Diff = ShAmt-C1;
773 if (Diff < 0) {
774 Diff = -Diff;
775 Opc = ISD::SHL;
776 }
777
778 SDValue NewSA =
779 TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
780 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
781 InOp.getOperand(0), NewSA));
782 }
783 }
784
785 // Compute the new bits that are at the top now.
786 if (SimplifyDemandedBits(InOp, InDemandedMask,
787 KnownZero, KnownOne, TLO, Depth+1))
788 return true;
789 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
790 KnownZero = KnownZero.lshr(ShAmt);
791 KnownOne = KnownOne.lshr(ShAmt);
792
793 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
794 KnownZero |= HighBits; // High bits known zero.
795 }
796 break;
797 case ISD::SRA:
798 // If this is an arithmetic shift right and only the low-bit is set, we can
799 // always convert this into a logical shr, even if the shift amount is
800 // variable. The low bit of the shift cannot be an input sign bit unless
801 // the shift amount is >= the size of the datatype, which is undefined.
802 if (NewMask == 1)
803 return TLO.CombineTo(Op,
804 TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
805 Op.getOperand(0), Op.getOperand(1)));
806
807 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
808 EVT VT = Op.getValueType();
809 unsigned ShAmt = SA->getZExtValue();
810
811 // If the shift count is an invalid immediate, don't do anything.
812 if (ShAmt >= BitWidth)
813 break;
814
815 APInt InDemandedMask = (NewMask << ShAmt);
816
817 // If the shift is exact, then it does demand the low bits (and knows that
818 // they are zero).
819 if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
820 InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
821
822 // If any of the demanded bits are produced by the sign extension, we also
823 // demand the input sign bit.
824 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
825 if (HighBits.intersects(NewMask))
826 InDemandedMask |= APInt::getSignBit(VT.getScalarType().getSizeInBits());
827
828 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
829 KnownZero, KnownOne, TLO, Depth+1))
830 return true;
831 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
832 KnownZero = KnownZero.lshr(ShAmt);
833 KnownOne = KnownOne.lshr(ShAmt);
834
835 // Handle the sign bit, adjusted to where it is now in the mask.
836 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
837
838 // If the input sign bit is known to be zero, or if none of the top bits
839 // are demanded, turn this into an unsigned shift right.
840 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
841 SDNodeFlags Flags;
842 Flags.setExact(cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact());
843 return TLO.CombineTo(Op,
844 TLO.DAG.getNode(ISD::SRL, dl, VT, Op.getOperand(0),
845 Op.getOperand(1), &Flags));
846 }
847
848 int Log2 = NewMask.exactLogBase2();
849 if (Log2 >= 0) {
850 // The bit must come from the sign.
851 SDValue NewSA =
852 TLO.DAG.getConstant(BitWidth - 1 - Log2, dl,
853 Op.getOperand(1).getValueType());
854 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
855 Op.getOperand(0), NewSA));
856 }
857
858 if (KnownOne.intersects(SignBit))
859 // New bits are known one.
860 KnownOne |= HighBits;
861 }
862 break;
863 case ISD::SIGN_EXTEND_INREG: {
864 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
865
866 APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
867 // If we only care about the highest bit, don't bother shifting right.
868 if (MsbMask == NewMask) {
869 unsigned ShAmt = ExVT.getScalarType().getSizeInBits();
870 SDValue InOp = Op.getOperand(0);
871 unsigned VTBits = Op->getValueType(0).getScalarType().getSizeInBits();
872 bool AlreadySignExtended =
873 TLO.DAG.ComputeNumSignBits(InOp) >= VTBits-ShAmt+1;
874 // However if the input is already sign extended we expect the sign
875 // extension to be dropped altogether later and do not simplify.
876 if (!AlreadySignExtended) {
877 // Compute the correct shift amount type, which must be getShiftAmountTy
878 // for scalar types after legalization.
879 EVT ShiftAmtTy = Op.getValueType();
880 if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
881 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
882
883 SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, dl,
884 ShiftAmtTy);
885 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
886 Op.getValueType(), InOp,
887 ShiftAmt));
888 }
889 }
890
891 // Sign extension. Compute the demanded bits in the result that are not
892 // present in the input.
893 APInt NewBits =
894 APInt::getHighBitsSet(BitWidth,
895 BitWidth - ExVT.getScalarType().getSizeInBits());
896
897 // If none of the extended bits are demanded, eliminate the sextinreg.
898 if ((NewBits & NewMask) == 0)
899 return TLO.CombineTo(Op, Op.getOperand(0));
900
901 APInt InSignBit =
902 APInt::getSignBit(ExVT.getScalarType().getSizeInBits()).zext(BitWidth);
903 APInt InputDemandedBits =
904 APInt::getLowBitsSet(BitWidth,
905 ExVT.getScalarType().getSizeInBits()) &
906 NewMask;
907
908 // Since the sign extended bits are demanded, we know that the sign
909 // bit is demanded.
910 InputDemandedBits |= InSignBit;
911
912 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
913 KnownZero, KnownOne, TLO, Depth+1))
914 return true;
915 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
916
917 // If the sign bit of the input is known set or clear, then we know the
918 // top bits of the result.
919
920 // If the input sign bit is known zero, convert this into a zero extension.
921 if (KnownZero.intersects(InSignBit))
922 return TLO.CombineTo(Op,
923 TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,ExVT));
924
925 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
926 KnownOne |= NewBits;
927 KnownZero &= ~NewBits;
928 } else { // Input sign bit unknown
929 KnownZero &= ~NewBits;
930 KnownOne &= ~NewBits;
931 }
932 break;
933 }
934 case ISD::BUILD_PAIR: {
935 EVT HalfVT = Op.getOperand(0).getValueType();
936 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
937
938 APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
939 APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
940
941 APInt KnownZeroLo, KnownOneLo;
942 APInt KnownZeroHi, KnownOneHi;
943
944 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
945 KnownOneLo, TLO, Depth + 1))
946 return true;
947
948 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
949 KnownOneHi, TLO, Depth + 1))
950 return true;
951
952 KnownZero = KnownZeroLo.zext(BitWidth) |
953 KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
954
955 KnownOne = KnownOneLo.zext(BitWidth) |
956 KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
957 break;
958 }
959 case ISD::ZERO_EXTEND: {
960 unsigned OperandBitWidth =
961 Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
962 APInt InMask = NewMask.trunc(OperandBitWidth);
963
964 // If none of the top bits are demanded, convert this into an any_extend.
965 APInt NewBits =
966 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
967 if (!NewBits.intersects(NewMask))
968 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
969 Op.getValueType(),
970 Op.getOperand(0)));
971
972 if (SimplifyDemandedBits(Op.getOperand(0), InMask,
973 KnownZero, KnownOne, TLO, Depth+1))
974 return true;
975 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
976 KnownZero = KnownZero.zext(BitWidth);
977 KnownOne = KnownOne.zext(BitWidth);
978 KnownZero |= NewBits;
979 break;
980 }
981 case ISD::SIGN_EXTEND: {
982 EVT InVT = Op.getOperand(0).getValueType();
983 unsigned InBits = InVT.getScalarType().getSizeInBits();
984 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
985 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
986 APInt NewBits = ~InMask & NewMask;
987
988 // If none of the top bits are demanded, convert this into an any_extend.
989 if (NewBits == 0)
990 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
991 Op.getValueType(),
992 Op.getOperand(0)));
993
994 // Since some of the sign extended bits are demanded, we know that the sign
995 // bit is demanded.
996 APInt InDemandedBits = InMask & NewMask;
997 InDemandedBits |= InSignBit;
998 InDemandedBits = InDemandedBits.trunc(InBits);
999
1000 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
1001 KnownOne, TLO, Depth+1))
1002 return true;
1003 KnownZero = KnownZero.zext(BitWidth);
1004 KnownOne = KnownOne.zext(BitWidth);
1005
1006 // If the sign bit is known zero, convert this to a zero extend.
1007 if (KnownZero.intersects(InSignBit))
1008 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
1009 Op.getValueType(),
1010 Op.getOperand(0)));
1011
1012 // If the sign bit is known one, the top bits match.
1013 if (KnownOne.intersects(InSignBit)) {
1014 KnownOne |= NewBits;
1015 assert((KnownZero & NewBits) == 0);
1016 } else { // Otherwise, top bits aren't known.
1017 assert((KnownOne & NewBits) == 0);
1018 assert((KnownZero & NewBits) == 0);
1019 }
1020 break;
1021 }
1022 case ISD::ANY_EXTEND: {
1023 unsigned OperandBitWidth =
1024 Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
1025 APInt InMask = NewMask.trunc(OperandBitWidth);
1026 if (SimplifyDemandedBits(Op.getOperand(0), InMask,
1027 KnownZero, KnownOne, TLO, Depth+1))
1028 return true;
1029 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1030 KnownZero = KnownZero.zext(BitWidth);
1031 KnownOne = KnownOne.zext(BitWidth);
1032 break;
1033 }
1034 case ISD::TRUNCATE: {
1035 // Simplify the input, using demanded bit information, and compute the known
1036 // zero/one bits live out.
1037 unsigned OperandBitWidth =
1038 Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
1039 APInt TruncMask = NewMask.zext(OperandBitWidth);
1040 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
1041 KnownZero, KnownOne, TLO, Depth+1))
1042 return true;
1043 KnownZero = KnownZero.trunc(BitWidth);
1044 KnownOne = KnownOne.trunc(BitWidth);
1045
1046 // If the input is only used by this truncate, see if we can shrink it based
1047 // on the known demanded bits.
1048 if (Op.getOperand(0).getNode()->hasOneUse()) {
1049 SDValue In = Op.getOperand(0);
1050 switch (In.getOpcode()) {
1051 default: break;
1052 case ISD::SRL:
1053 // Shrink SRL by a constant if none of the high bits shifted in are
1054 // demanded.
1055 if (TLO.LegalTypes() &&
1056 !isTypeDesirableForOp(ISD::SRL, Op.getValueType()))
1057 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1058 // undesirable.
1059 break;
1060 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
1061 if (!ShAmt)
1062 break;
1063 SDValue Shift = In.getOperand(1);
1064 if (TLO.LegalTypes()) {
1065 uint64_t ShVal = ShAmt->getZExtValue();
1066 Shift = TLO.DAG.getConstant(ShVal, dl,
1067 getShiftAmountTy(Op.getValueType(), DL));
1068 }
1069
1070 APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
1071 OperandBitWidth - BitWidth);
1072 HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
1073
1074 if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
1075 // None of the shifted in bits are needed. Add a truncate of the
1076 // shift input, then shift it.
1077 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
1078 Op.getValueType(),
1079 In.getOperand(0));
1080 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
1081 Op.getValueType(),
1082 NewTrunc,
1083 Shift));
1084 }
1085 break;
1086 }
1087 }
1088
1089 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1090 break;
1091 }
1092 case ISD::AssertZext: {
1093 // AssertZext demands all of the high bits, plus any of the low bits
1094 // demanded by its users.
1095 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1096 APInt InMask = APInt::getLowBitsSet(BitWidth,
1097 VT.getSizeInBits());
1098 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | NewMask,
1099 KnownZero, KnownOne, TLO, Depth+1))
1100 return true;
1101 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1102
1103 KnownZero |= ~InMask & NewMask;
1104 break;
1105 }
1106 case ISD::BITCAST:
1107 // If this is an FP->Int bitcast and if the sign bit is the only
1108 // thing demanded, turn this into a FGETSIGN.
1109 if (!TLO.LegalOperations() &&
1110 !Op.getValueType().isVector() &&
1111 !Op.getOperand(0).getValueType().isVector() &&
1112 NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) &&
1113 Op.getOperand(0).getValueType().isFloatingPoint()) {
1114 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
1115 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1116 if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple() &&
1117 Op.getOperand(0).getValueType() != MVT::f128) {
1118 // Cannot eliminate/lower SHL for f128 yet.
1119 EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32;
1120 // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1121 // place. We expect the SHL to be eliminated by other optimizations.
1122 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
1123 unsigned OpVTSizeInBits = Op.getValueType().getSizeInBits();
1124 if (!OpVTLegal && OpVTSizeInBits > 32)
1125 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
1126 unsigned ShVal = Op.getValueType().getSizeInBits()-1;
1127 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());
1128 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
1129 Op.getValueType(),
1130 Sign, ShAmt));
1131 }
1132 }
1133 break;
1134 case ISD::ADD:
1135 case ISD::MUL:
1136 case ISD::SUB: {
1137 // Add, Sub, and Mul don't demand any bits in positions beyond that
1138 // of the highest bit demanded of them.
1139 APInt LoMask = APInt::getLowBitsSet(BitWidth,
1140 BitWidth - NewMask.countLeadingZeros());
1141 if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2,
1142 KnownOne2, TLO, Depth+1))
1143 return true;
1144 if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2,
1145 KnownOne2, TLO, Depth+1))
1146 return true;
1147 // See if the operation should be performed at a smaller bit width.
1148 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
1149 return true;
1150 }
1151 // FALL THROUGH
1152 default:
1153 // Just use computeKnownBits to compute output bits.
1154 TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
1155 break;
1156 }
1157
1158 // If we know the value of all of the demanded bits, return this as a
1159 // constant.
1160 if ((NewMask & (KnownZero|KnownOne)) == NewMask) {
1161 // Avoid folding to a constant if any OpaqueConstant is involved.
1162 const SDNode *N = Op.getNode();
1163 for (SDNodeIterator I = SDNodeIterator::begin(N),
1164 E = SDNodeIterator::end(N); I != E; ++I) {
1165 SDNode *Op = *I;
1166 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
1167 if (C->isOpaque())
1168 return false;
1169 }
1170 return TLO.CombineTo(Op,
1171 TLO.DAG.getConstant(KnownOne, dl, Op.getValueType()));
1172 }
1173
1174 return false;
1175 }
1176
1177 /// Determine which of the bits specified in Mask are known to be either zero or
1178 /// one and return them in the KnownZero/KnownOne bitsets.
computeKnownBitsForTargetNode(const SDValue Op,APInt & KnownZero,APInt & KnownOne,const SelectionDAG & DAG,unsigned Depth) const1179 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1180 APInt &KnownZero,
1181 APInt &KnownOne,
1182 const SelectionDAG &DAG,
1183 unsigned Depth) const {
1184 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1185 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1186 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1187 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1188 "Should use MaskedValueIsZero if you don't know whether Op"
1189 " is a target node!");
1190 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
1191 }
1192
1193 /// This method can be implemented by targets that want to expose additional
1194 /// information about sign bits to the DAG Combiner.
ComputeNumSignBitsForTargetNode(SDValue Op,const SelectionDAG &,unsigned Depth) const1195 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
1196 const SelectionDAG &,
1197 unsigned Depth) const {
1198 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1199 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1200 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1201 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1202 "Should use ComputeNumSignBits if you don't know whether Op"
1203 " is a target node!");
1204 return 1;
1205 }
1206
isConstTrueVal(const SDNode * N) const1207 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
1208 if (!N)
1209 return false;
1210
1211 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1212 if (!CN) {
1213 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1214 if (!BV)
1215 return false;
1216
1217 BitVector UndefElements;
1218 CN = BV->getConstantSplatNode(&UndefElements);
1219 // Only interested in constant splats, and we don't try to handle undef
1220 // elements in identifying boolean constants.
1221 if (!CN || UndefElements.none())
1222 return false;
1223 }
1224
1225 switch (getBooleanContents(N->getValueType(0))) {
1226 case UndefinedBooleanContent:
1227 return CN->getAPIntValue()[0];
1228 case ZeroOrOneBooleanContent:
1229 return CN->isOne();
1230 case ZeroOrNegativeOneBooleanContent:
1231 return CN->isAllOnesValue();
1232 }
1233
1234 llvm_unreachable("Invalid boolean contents");
1235 }
1236
isConstFalseVal(const SDNode * N) const1237 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
1238 if (!N)
1239 return false;
1240
1241 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1242 if (!CN) {
1243 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1244 if (!BV)
1245 return false;
1246
1247 BitVector UndefElements;
1248 CN = BV->getConstantSplatNode(&UndefElements);
1249 // Only interested in constant splats, and we don't try to handle undef
1250 // elements in identifying boolean constants.
1251 if (!CN || UndefElements.none())
1252 return false;
1253 }
1254
1255 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
1256 return !CN->getAPIntValue()[0];
1257
1258 return CN->isNullValue();
1259 }
1260
isExtendedTrueVal(const ConstantSDNode * N,EVT VT,bool SExt) const1261 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
1262 bool SExt) const {
1263 if (VT == MVT::i1)
1264 return N->isOne();
1265
1266 TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
1267 switch (Cnt) {
1268 case TargetLowering::ZeroOrOneBooleanContent:
1269 // An extended value of 1 is always true, unless its original type is i1,
1270 // in which case it will be sign extended to -1.
1271 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
1272 case TargetLowering::UndefinedBooleanContent:
1273 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1274 return N->isAllOnesValue() && SExt;
1275 }
1276 llvm_unreachable("Unexpected enumeration.");
1277 }
1278
1279 /// This helper function of SimplifySetCC tries to optimize the comparison when
1280 /// either operand of the SetCC node is a bitwise-and instruction.
simplifySetCCWithAnd(EVT VT,SDValue N0,SDValue N1,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL) const1281 SDValue TargetLowering::simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
1282 ISD::CondCode Cond,
1283 DAGCombinerInfo &DCI,
1284 const SDLoc &DL) const {
1285 // Match these patterns in any of their permutations:
1286 // (X & Y) == Y
1287 // (X & Y) != Y
1288 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
1289 std::swap(N0, N1);
1290
1291 EVT OpVT = N0.getValueType();
1292 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
1293 (Cond != ISD::SETEQ && Cond != ISD::SETNE))
1294 return SDValue();
1295
1296 SDValue X, Y;
1297 if (N0.getOperand(0) == N1) {
1298 X = N0.getOperand(1);
1299 Y = N0.getOperand(0);
1300 } else if (N0.getOperand(1) == N1) {
1301 X = N0.getOperand(0);
1302 Y = N0.getOperand(1);
1303 } else {
1304 return SDValue();
1305 }
1306
1307 SelectionDAG &DAG = DCI.DAG;
1308 SDValue Zero = DAG.getConstant(0, DL, OpVT);
1309 if (DAG.isKnownToBeAPowerOfTwo(Y)) {
1310 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
1311 // Note that where Y is variable and is known to have at most one bit set
1312 // (for example, if it is Z & 1) we cannot do this; the expressions are not
1313 // equivalent when Y == 0.
1314 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
1315 if (DCI.isBeforeLegalizeOps() ||
1316 isCondCodeLegal(Cond, N0.getSimpleValueType()))
1317 return DAG.getSetCC(DL, VT, N0, Zero, Cond);
1318 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
1319 // If the target supports an 'and-not' or 'and-complement' logic operation,
1320 // try to use that to make a comparison operation more efficient.
1321 // But don't do this transform if the mask is a single bit because there are
1322 // more efficient ways to deal with that case (for example, 'bt' on x86 or
1323 // 'rlwinm' on PPC).
1324
1325 // Bail out if the compare operand that we want to turn into a zero is
1326 // already a zero (otherwise, infinite loop).
1327 auto *YConst = dyn_cast<ConstantSDNode>(Y);
1328 if (YConst && YConst->isNullValue())
1329 return SDValue();
1330
1331 // Transform this into: ~X & Y == 0.
1332 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
1333 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
1334 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
1335 }
1336
1337 return SDValue();
1338 }
1339
1340 /// Try to simplify a setcc built with the specified operands and cc. If it is
1341 /// unable to simplify it, return a null SDValue.
SimplifySetCC(EVT VT,SDValue N0,SDValue N1,ISD::CondCode Cond,bool foldBooleans,DAGCombinerInfo & DCI,const SDLoc & dl) const1342 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1343 ISD::CondCode Cond, bool foldBooleans,
1344 DAGCombinerInfo &DCI,
1345 const SDLoc &dl) const {
1346 SelectionDAG &DAG = DCI.DAG;
1347
1348 // These setcc operations always fold.
1349 switch (Cond) {
1350 default: break;
1351 case ISD::SETFALSE:
1352 case ISD::SETFALSE2: return DAG.getConstant(0, dl, VT);
1353 case ISD::SETTRUE:
1354 case ISD::SETTRUE2: {
1355 TargetLowering::BooleanContent Cnt =
1356 getBooleanContents(N0->getValueType(0));
1357 return DAG.getConstant(
1358 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1359 VT);
1360 }
1361 }
1362
1363 // Ensure that the constant occurs on the RHS, and fold constant
1364 // comparisons.
1365 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
1366 if (isa<ConstantSDNode>(N0.getNode()) &&
1367 (DCI.isBeforeLegalizeOps() ||
1368 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
1369 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
1370
1371 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1372 const APInt &C1 = N1C->getAPIntValue();
1373
1374 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1375 // equality comparison, then we're just comparing whether X itself is
1376 // zero.
1377 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1378 N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1379 N0.getOperand(1).getOpcode() == ISD::Constant) {
1380 const APInt &ShAmt
1381 = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1382 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1383 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
1384 if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1385 // (srl (ctlz x), 5) == 0 -> X != 0
1386 // (srl (ctlz x), 5) != 1 -> X != 0
1387 Cond = ISD::SETNE;
1388 } else {
1389 // (srl (ctlz x), 5) != 0 -> X == 0
1390 // (srl (ctlz x), 5) == 1 -> X == 0
1391 Cond = ISD::SETEQ;
1392 }
1393 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
1394 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
1395 Zero, Cond);
1396 }
1397 }
1398
1399 SDValue CTPOP = N0;
1400 // Look through truncs that don't change the value of a ctpop.
1401 if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
1402 CTPOP = N0.getOperand(0);
1403
1404 if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
1405 (N0 == CTPOP || N0.getValueType().getSizeInBits() >
1406 Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) {
1407 EVT CTVT = CTPOP.getValueType();
1408 SDValue CTOp = CTPOP.getOperand(0);
1409
1410 // (ctpop x) u< 2 -> (x & x-1) == 0
1411 // (ctpop x) u> 1 -> (x & x-1) != 0
1412 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
1413 SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
1414 DAG.getConstant(1, dl, CTVT));
1415 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
1416 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
1417 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
1418 }
1419
1420 // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
1421 }
1422
1423 // (zext x) == C --> x == (trunc C)
1424 // (sext x) == C --> x == (trunc C)
1425 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1426 DCI.isBeforeLegalize() && N0->hasOneUse()) {
1427 unsigned MinBits = N0.getValueSizeInBits();
1428 SDValue PreExt;
1429 bool Signed = false;
1430 if (N0->getOpcode() == ISD::ZERO_EXTEND) {
1431 // ZExt
1432 MinBits = N0->getOperand(0).getValueSizeInBits();
1433 PreExt = N0->getOperand(0);
1434 } else if (N0->getOpcode() == ISD::AND) {
1435 // DAGCombine turns costly ZExts into ANDs
1436 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
1437 if ((C->getAPIntValue()+1).isPowerOf2()) {
1438 MinBits = C->getAPIntValue().countTrailingOnes();
1439 PreExt = N0->getOperand(0);
1440 }
1441 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
1442 // SExt
1443 MinBits = N0->getOperand(0).getValueSizeInBits();
1444 PreExt = N0->getOperand(0);
1445 Signed = true;
1446 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
1447 // ZEXTLOAD / SEXTLOAD
1448 if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
1449 MinBits = LN0->getMemoryVT().getSizeInBits();
1450 PreExt = N0;
1451 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
1452 Signed = true;
1453 MinBits = LN0->getMemoryVT().getSizeInBits();
1454 PreExt = N0;
1455 }
1456 }
1457
1458 // Figure out how many bits we need to preserve this constant.
1459 unsigned ReqdBits = Signed ?
1460 C1.getBitWidth() - C1.getNumSignBits() + 1 :
1461 C1.getActiveBits();
1462
1463 // Make sure we're not losing bits from the constant.
1464 if (MinBits > 0 &&
1465 MinBits < C1.getBitWidth() &&
1466 MinBits >= ReqdBits) {
1467 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
1468 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
1469 // Will get folded away.
1470 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
1471 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
1472 return DAG.getSetCC(dl, VT, Trunc, C, Cond);
1473 }
1474
1475 // If truncating the setcc operands is not desirable, we can still
1476 // simplify the expression in some cases:
1477 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
1478 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
1479 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
1480 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
1481 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
1482 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
1483 SDValue TopSetCC = N0->getOperand(0);
1484 unsigned N0Opc = N0->getOpcode();
1485 bool SExt = (N0Opc == ISD::SIGN_EXTEND);
1486 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
1487 TopSetCC.getOpcode() == ISD::SETCC &&
1488 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
1489 (isConstFalseVal(N1C) ||
1490 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
1491
1492 bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
1493 (!N1C->isNullValue() && Cond == ISD::SETNE);
1494
1495 if (!Inverse)
1496 return TopSetCC;
1497
1498 ISD::CondCode InvCond = ISD::getSetCCInverse(
1499 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
1500 TopSetCC.getOperand(0).getValueType().isInteger());
1501 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
1502 TopSetCC.getOperand(1),
1503 InvCond);
1504
1505 }
1506 }
1507 }
1508
1509 // If the LHS is '(and load, const)', the RHS is 0,
1510 // the test is for equality or unsigned, and all 1 bits of the const are
1511 // in the same partial word, see if we can shorten the load.
1512 if (DCI.isBeforeLegalize() &&
1513 !ISD::isSignedIntSetCC(Cond) &&
1514 N0.getOpcode() == ISD::AND && C1 == 0 &&
1515 N0.getNode()->hasOneUse() &&
1516 isa<LoadSDNode>(N0.getOperand(0)) &&
1517 N0.getOperand(0).getNode()->hasOneUse() &&
1518 isa<ConstantSDNode>(N0.getOperand(1))) {
1519 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
1520 APInt bestMask;
1521 unsigned bestWidth = 0, bestOffset = 0;
1522 if (!Lod->isVolatile() && Lod->isUnindexed()) {
1523 unsigned origWidth = N0.getValueType().getSizeInBits();
1524 unsigned maskWidth = origWidth;
1525 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
1526 // 8 bits, but have to be careful...
1527 if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
1528 origWidth = Lod->getMemoryVT().getSizeInBits();
1529 const APInt &Mask =
1530 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1531 for (unsigned width = origWidth / 2; width>=8; width /= 2) {
1532 APInt newMask = APInt::getLowBitsSet(maskWidth, width);
1533 for (unsigned offset=0; offset<origWidth/width; offset++) {
1534 if ((newMask & Mask) == Mask) {
1535 if (!DAG.getDataLayout().isLittleEndian())
1536 bestOffset = (origWidth/width - offset - 1) * (width/8);
1537 else
1538 bestOffset = (uint64_t)offset * (width/8);
1539 bestMask = Mask.lshr(offset * (width/8) * 8);
1540 bestWidth = width;
1541 break;
1542 }
1543 newMask = newMask << width;
1544 }
1545 }
1546 }
1547 if (bestWidth) {
1548 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
1549 if (newVT.isRound()) {
1550 EVT PtrType = Lod->getOperand(1).getValueType();
1551 SDValue Ptr = Lod->getBasePtr();
1552 if (bestOffset != 0)
1553 Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
1554 DAG.getConstant(bestOffset, dl, PtrType));
1555 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
1556 SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
1557 Lod->getPointerInfo().getWithOffset(bestOffset),
1558 false, false, false, NewAlign);
1559 return DAG.getSetCC(dl, VT,
1560 DAG.getNode(ISD::AND, dl, newVT, NewLoad,
1561 DAG.getConstant(bestMask.trunc(bestWidth),
1562 dl, newVT)),
1563 DAG.getConstant(0LL, dl, newVT), Cond);
1564 }
1565 }
1566 }
1567
1568 // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1569 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1570 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
1571
1572 // If the comparison constant has bits in the upper part, the
1573 // zero-extended value could never match.
1574 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
1575 C1.getBitWidth() - InSize))) {
1576 switch (Cond) {
1577 case ISD::SETUGT:
1578 case ISD::SETUGE:
1579 case ISD::SETEQ: return DAG.getConstant(0, dl, VT);
1580 case ISD::SETULT:
1581 case ISD::SETULE:
1582 case ISD::SETNE: return DAG.getConstant(1, dl, VT);
1583 case ISD::SETGT:
1584 case ISD::SETGE:
1585 // True if the sign bit of C1 is set.
1586 return DAG.getConstant(C1.isNegative(), dl, VT);
1587 case ISD::SETLT:
1588 case ISD::SETLE:
1589 // True if the sign bit of C1 isn't set.
1590 return DAG.getConstant(C1.isNonNegative(), dl, VT);
1591 default:
1592 break;
1593 }
1594 }
1595
1596 // Otherwise, we can perform the comparison with the low bits.
1597 switch (Cond) {
1598 case ISD::SETEQ:
1599 case ISD::SETNE:
1600 case ISD::SETUGT:
1601 case ISD::SETUGE:
1602 case ISD::SETULT:
1603 case ISD::SETULE: {
1604 EVT newVT = N0.getOperand(0).getValueType();
1605 if (DCI.isBeforeLegalizeOps() ||
1606 (isOperationLegal(ISD::SETCC, newVT) &&
1607 getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
1608 EVT NewSetCCVT =
1609 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), newVT);
1610 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
1611
1612 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
1613 NewConst, Cond);
1614 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
1615 }
1616 break;
1617 }
1618 default:
1619 break; // todo, be more careful with signed comparisons
1620 }
1621 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1622 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1623 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1624 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
1625 EVT ExtDstTy = N0.getValueType();
1626 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
1627
1628 // If the constant doesn't fit into the number of bits for the source of
1629 // the sign extension, it is impossible for both sides to be equal.
1630 if (C1.getMinSignedBits() > ExtSrcTyBits)
1631 return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
1632
1633 SDValue ZextOp;
1634 EVT Op0Ty = N0.getOperand(0).getValueType();
1635 if (Op0Ty == ExtSrcTy) {
1636 ZextOp = N0.getOperand(0);
1637 } else {
1638 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
1639 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
1640 DAG.getConstant(Imm, dl, Op0Ty));
1641 }
1642 if (!DCI.isCalledByLegalizer())
1643 DCI.AddToWorklist(ZextOp.getNode());
1644 // Otherwise, make this a use of a zext.
1645 return DAG.getSetCC(dl, VT, ZextOp,
1646 DAG.getConstant(C1 & APInt::getLowBitsSet(
1647 ExtDstTyBits,
1648 ExtSrcTyBits),
1649 dl, ExtDstTy),
1650 Cond);
1651 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
1652 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1653 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
1654 if (N0.getOpcode() == ISD::SETCC &&
1655 isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
1656 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1);
1657 if (TrueWhenTrue)
1658 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
1659 // Invert the condition.
1660 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1661 CC = ISD::getSetCCInverse(CC,
1662 N0.getOperand(0).getValueType().isInteger());
1663 if (DCI.isBeforeLegalizeOps() ||
1664 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
1665 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
1666 }
1667
1668 if ((N0.getOpcode() == ISD::XOR ||
1669 (N0.getOpcode() == ISD::AND &&
1670 N0.getOperand(0).getOpcode() == ISD::XOR &&
1671 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1672 isa<ConstantSDNode>(N0.getOperand(1)) &&
1673 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
1674 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
1675 // can only do this if the top bits are known zero.
1676 unsigned BitWidth = N0.getValueSizeInBits();
1677 if (DAG.MaskedValueIsZero(N0,
1678 APInt::getHighBitsSet(BitWidth,
1679 BitWidth-1))) {
1680 // Okay, get the un-inverted input value.
1681 SDValue Val;
1682 if (N0.getOpcode() == ISD::XOR)
1683 Val = N0.getOperand(0);
1684 else {
1685 assert(N0.getOpcode() == ISD::AND &&
1686 N0.getOperand(0).getOpcode() == ISD::XOR);
1687 // ((X^1)&1)^1 -> X & 1
1688 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
1689 N0.getOperand(0).getOperand(0),
1690 N0.getOperand(1));
1691 }
1692
1693 return DAG.getSetCC(dl, VT, Val, N1,
1694 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1695 }
1696 } else if (N1C->getAPIntValue() == 1 &&
1697 (VT == MVT::i1 ||
1698 getBooleanContents(N0->getValueType(0)) ==
1699 ZeroOrOneBooleanContent)) {
1700 SDValue Op0 = N0;
1701 if (Op0.getOpcode() == ISD::TRUNCATE)
1702 Op0 = Op0.getOperand(0);
1703
1704 if ((Op0.getOpcode() == ISD::XOR) &&
1705 Op0.getOperand(0).getOpcode() == ISD::SETCC &&
1706 Op0.getOperand(1).getOpcode() == ISD::SETCC) {
1707 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
1708 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
1709 return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1),
1710 Cond);
1711 }
1712 if (Op0.getOpcode() == ISD::AND &&
1713 isa<ConstantSDNode>(Op0.getOperand(1)) &&
1714 cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) {
1715 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
1716 if (Op0.getValueType().bitsGT(VT))
1717 Op0 = DAG.getNode(ISD::AND, dl, VT,
1718 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
1719 DAG.getConstant(1, dl, VT));
1720 else if (Op0.getValueType().bitsLT(VT))
1721 Op0 = DAG.getNode(ISD::AND, dl, VT,
1722 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
1723 DAG.getConstant(1, dl, VT));
1724
1725 return DAG.getSetCC(dl, VT, Op0,
1726 DAG.getConstant(0, dl, Op0.getValueType()),
1727 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1728 }
1729 if (Op0.getOpcode() == ISD::AssertZext &&
1730 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
1731 return DAG.getSetCC(dl, VT, Op0,
1732 DAG.getConstant(0, dl, Op0.getValueType()),
1733 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1734 }
1735 }
1736
1737 APInt MinVal, MaxVal;
1738 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
1739 if (ISD::isSignedIntSetCC(Cond)) {
1740 MinVal = APInt::getSignedMinValue(OperandBitSize);
1741 MaxVal = APInt::getSignedMaxValue(OperandBitSize);
1742 } else {
1743 MinVal = APInt::getMinValue(OperandBitSize);
1744 MaxVal = APInt::getMaxValue(OperandBitSize);
1745 }
1746
1747 // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1748 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1749 if (C1 == MinVal) return DAG.getConstant(1, dl, VT); // X >= MIN --> true
1750 // X >= C0 --> X > (C0 - 1)
1751 APInt C = C1 - 1;
1752 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
1753 if ((DCI.isBeforeLegalizeOps() ||
1754 isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1755 (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1756 isLegalICmpImmediate(C.getSExtValue())))) {
1757 return DAG.getSetCC(dl, VT, N0,
1758 DAG.getConstant(C, dl, N1.getValueType()),
1759 NewCC);
1760 }
1761 }
1762
1763 if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1764 if (C1 == MaxVal) return DAG.getConstant(1, dl, VT); // X <= MAX --> true
1765 // X <= C0 --> X < (C0 + 1)
1766 APInt C = C1 + 1;
1767 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
1768 if ((DCI.isBeforeLegalizeOps() ||
1769 isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1770 (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1771 isLegalICmpImmediate(C.getSExtValue())))) {
1772 return DAG.getSetCC(dl, VT, N0,
1773 DAG.getConstant(C, dl, N1.getValueType()),
1774 NewCC);
1775 }
1776 }
1777
1778 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1779 return DAG.getConstant(0, dl, VT); // X < MIN --> false
1780 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1781 return DAG.getConstant(1, dl, VT); // X >= MIN --> true
1782 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1783 return DAG.getConstant(0, dl, VT); // X > MAX --> false
1784 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1785 return DAG.getConstant(1, dl, VT); // X <= MAX --> true
1786
1787 // Canonicalize setgt X, Min --> setne X, Min
1788 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1789 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1790 // Canonicalize setlt X, Max --> setne X, Max
1791 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1792 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1793
1794 // If we have setult X, 1, turn it into seteq X, 0
1795 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1796 return DAG.getSetCC(dl, VT, N0,
1797 DAG.getConstant(MinVal, dl, N0.getValueType()),
1798 ISD::SETEQ);
1799 // If we have setugt X, Max-1, turn it into seteq X, Max
1800 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1801 return DAG.getSetCC(dl, VT, N0,
1802 DAG.getConstant(MaxVal, dl, N0.getValueType()),
1803 ISD::SETEQ);
1804
1805 // If we have "setcc X, C0", check to see if we can shrink the immediate
1806 // by changing cc.
1807
1808 // SETUGT X, SINTMAX -> SETLT X, 0
1809 if (Cond == ISD::SETUGT &&
1810 C1 == APInt::getSignedMaxValue(OperandBitSize))
1811 return DAG.getSetCC(dl, VT, N0,
1812 DAG.getConstant(0, dl, N1.getValueType()),
1813 ISD::SETLT);
1814
1815 // SETULT X, SINTMIN -> SETGT X, -1
1816 if (Cond == ISD::SETULT &&
1817 C1 == APInt::getSignedMinValue(OperandBitSize)) {
1818 SDValue ConstMinusOne =
1819 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
1820 N1.getValueType());
1821 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
1822 }
1823
1824 // Fold bit comparisons when we can.
1825 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1826 (VT == N0.getValueType() ||
1827 (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
1828 N0.getOpcode() == ISD::AND) {
1829 auto &DL = DAG.getDataLayout();
1830 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1831 EVT ShiftTy = DCI.isBeforeLegalize()
1832 ? getPointerTy(DL)
1833 : getShiftAmountTy(N0.getValueType(), DL);
1834 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
1835 // Perform the xform if the AND RHS is a single bit.
1836 if (AndRHS->getAPIntValue().isPowerOf2()) {
1837 return DAG.getNode(ISD::TRUNCATE, dl, VT,
1838 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1839 DAG.getConstant(AndRHS->getAPIntValue().logBase2(), dl,
1840 ShiftTy)));
1841 }
1842 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
1843 // (X & 8) == 8 --> (X & 8) >> 3
1844 // Perform the xform if C1 is a single bit.
1845 if (C1.isPowerOf2()) {
1846 return DAG.getNode(ISD::TRUNCATE, dl, VT,
1847 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1848 DAG.getConstant(C1.logBase2(), dl,
1849 ShiftTy)));
1850 }
1851 }
1852 }
1853 }
1854
1855 if (C1.getMinSignedBits() <= 64 &&
1856 !isLegalICmpImmediate(C1.getSExtValue())) {
1857 // (X & -256) == 256 -> (X >> 8) == 1
1858 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1859 N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
1860 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1861 const APInt &AndRHSC = AndRHS->getAPIntValue();
1862 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
1863 unsigned ShiftBits = AndRHSC.countTrailingZeros();
1864 auto &DL = DAG.getDataLayout();
1865 EVT ShiftTy = DCI.isBeforeLegalize()
1866 ? getPointerTy(DL)
1867 : getShiftAmountTy(N0.getValueType(), DL);
1868 EVT CmpTy = N0.getValueType();
1869 SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
1870 DAG.getConstant(ShiftBits, dl,
1871 ShiftTy));
1872 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, CmpTy);
1873 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
1874 }
1875 }
1876 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
1877 Cond == ISD::SETULE || Cond == ISD::SETUGT) {
1878 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
1879 // X < 0x100000000 -> (X >> 32) < 1
1880 // X >= 0x100000000 -> (X >> 32) >= 1
1881 // X <= 0x0ffffffff -> (X >> 32) < 1
1882 // X > 0x0ffffffff -> (X >> 32) >= 1
1883 unsigned ShiftBits;
1884 APInt NewC = C1;
1885 ISD::CondCode NewCond = Cond;
1886 if (AdjOne) {
1887 ShiftBits = C1.countTrailingOnes();
1888 NewC = NewC + 1;
1889 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1890 } else {
1891 ShiftBits = C1.countTrailingZeros();
1892 }
1893 NewC = NewC.lshr(ShiftBits);
1894 if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
1895 isLegalICmpImmediate(NewC.getSExtValue())) {
1896 auto &DL = DAG.getDataLayout();
1897 EVT ShiftTy = DCI.isBeforeLegalize()
1898 ? getPointerTy(DL)
1899 : getShiftAmountTy(N0.getValueType(), DL);
1900 EVT CmpTy = N0.getValueType();
1901 SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
1902 DAG.getConstant(ShiftBits, dl, ShiftTy));
1903 SDValue CmpRHS = DAG.getConstant(NewC, dl, CmpTy);
1904 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
1905 }
1906 }
1907 }
1908 }
1909
1910 if (isa<ConstantFPSDNode>(N0.getNode())) {
1911 // Constant fold or commute setcc.
1912 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
1913 if (O.getNode()) return O;
1914 } else if (auto *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1915 // If the RHS of an FP comparison is a constant, simplify it away in
1916 // some cases.
1917 if (CFP->getValueAPF().isNaN()) {
1918 // If an operand is known to be a nan, we can fold it.
1919 switch (ISD::getUnorderedFlavor(Cond)) {
1920 default: llvm_unreachable("Unknown flavor!");
1921 case 0: // Known false.
1922 return DAG.getConstant(0, dl, VT);
1923 case 1: // Known true.
1924 return DAG.getConstant(1, dl, VT);
1925 case 2: // Undefined.
1926 return DAG.getUNDEF(VT);
1927 }
1928 }
1929
1930 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
1931 // constant if knowing that the operand is non-nan is enough. We prefer to
1932 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
1933 // materialize 0.0.
1934 if (Cond == ISD::SETO || Cond == ISD::SETUO)
1935 return DAG.getSetCC(dl, VT, N0, N0, Cond);
1936
1937 // If the condition is not legal, see if we can find an equivalent one
1938 // which is legal.
1939 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
1940 // If the comparison was an awkward floating-point == or != and one of
1941 // the comparison operands is infinity or negative infinity, convert the
1942 // condition to a less-awkward <= or >=.
1943 if (CFP->getValueAPF().isInfinity()) {
1944 if (CFP->getValueAPF().isNegative()) {
1945 if (Cond == ISD::SETOEQ &&
1946 isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
1947 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
1948 if (Cond == ISD::SETUEQ &&
1949 isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
1950 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
1951 if (Cond == ISD::SETUNE &&
1952 isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
1953 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
1954 if (Cond == ISD::SETONE &&
1955 isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
1956 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
1957 } else {
1958 if (Cond == ISD::SETOEQ &&
1959 isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
1960 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
1961 if (Cond == ISD::SETUEQ &&
1962 isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
1963 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
1964 if (Cond == ISD::SETUNE &&
1965 isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
1966 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
1967 if (Cond == ISD::SETONE &&
1968 isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
1969 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
1970 }
1971 }
1972 }
1973 }
1974
1975 if (N0 == N1) {
1976 // The sext(setcc()) => setcc() optimization relies on the appropriate
1977 // constant being emitted.
1978 uint64_t EqVal = 0;
1979 switch (getBooleanContents(N0.getValueType())) {
1980 case UndefinedBooleanContent:
1981 case ZeroOrOneBooleanContent:
1982 EqVal = ISD::isTrueWhenEqual(Cond);
1983 break;
1984 case ZeroOrNegativeOneBooleanContent:
1985 EqVal = ISD::isTrueWhenEqual(Cond) ? -1 : 0;
1986 break;
1987 }
1988
1989 // We can always fold X == X for integer setcc's.
1990 if (N0.getValueType().isInteger()) {
1991 return DAG.getConstant(EqVal, dl, VT);
1992 }
1993 unsigned UOF = ISD::getUnorderedFlavor(Cond);
1994 if (UOF == 2) // FP operators that are undefined on NaNs.
1995 return DAG.getConstant(EqVal, dl, VT);
1996 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
1997 return DAG.getConstant(EqVal, dl, VT);
1998 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
1999 // if it is not already.
2000 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
2001 if (NewCond != Cond && (DCI.isBeforeLegalizeOps() ||
2002 getCondCodeAction(NewCond, N0.getSimpleValueType()) == Legal))
2003 return DAG.getSetCC(dl, VT, N0, N1, NewCond);
2004 }
2005
2006 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2007 N0.getValueType().isInteger()) {
2008 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
2009 N0.getOpcode() == ISD::XOR) {
2010 // Simplify (X+Y) == (X+Z) --> Y == Z
2011 if (N0.getOpcode() == N1.getOpcode()) {
2012 if (N0.getOperand(0) == N1.getOperand(0))
2013 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
2014 if (N0.getOperand(1) == N1.getOperand(1))
2015 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
2016 if (DAG.isCommutativeBinOp(N0.getOpcode())) {
2017 // If X op Y == Y op X, try other combinations.
2018 if (N0.getOperand(0) == N1.getOperand(1))
2019 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
2020 Cond);
2021 if (N0.getOperand(1) == N1.getOperand(0))
2022 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
2023 Cond);
2024 }
2025 }
2026
2027 // If RHS is a legal immediate value for a compare instruction, we need
2028 // to be careful about increasing register pressure needlessly.
2029 bool LegalRHSImm = false;
2030
2031 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
2032 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2033 // Turn (X+C1) == C2 --> X == C2-C1
2034 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
2035 return DAG.getSetCC(dl, VT, N0.getOperand(0),
2036 DAG.getConstant(RHSC->getAPIntValue()-
2037 LHSR->getAPIntValue(),
2038 dl, N0.getValueType()), Cond);
2039 }
2040
2041 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
2042 if (N0.getOpcode() == ISD::XOR)
2043 // If we know that all of the inverted bits are zero, don't bother
2044 // performing the inversion.
2045 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
2046 return
2047 DAG.getSetCC(dl, VT, N0.getOperand(0),
2048 DAG.getConstant(LHSR->getAPIntValue() ^
2049 RHSC->getAPIntValue(),
2050 dl, N0.getValueType()),
2051 Cond);
2052 }
2053
2054 // Turn (C1-X) == C2 --> X == C1-C2
2055 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
2056 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
2057 return
2058 DAG.getSetCC(dl, VT, N0.getOperand(1),
2059 DAG.getConstant(SUBC->getAPIntValue() -
2060 RHSC->getAPIntValue(),
2061 dl, N0.getValueType()),
2062 Cond);
2063 }
2064 }
2065
2066 // Could RHSC fold directly into a compare?
2067 if (RHSC->getValueType(0).getSizeInBits() <= 64)
2068 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
2069 }
2070
2071 // Simplify (X+Z) == X --> Z == 0
2072 // Don't do this if X is an immediate that can fold into a cmp
2073 // instruction and X+Z has other uses. It could be an induction variable
2074 // chain, and the transform would increase register pressure.
2075 if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
2076 if (N0.getOperand(0) == N1)
2077 return DAG.getSetCC(dl, VT, N0.getOperand(1),
2078 DAG.getConstant(0, dl, N0.getValueType()), Cond);
2079 if (N0.getOperand(1) == N1) {
2080 if (DAG.isCommutativeBinOp(N0.getOpcode()))
2081 return DAG.getSetCC(dl, VT, N0.getOperand(0),
2082 DAG.getConstant(0, dl, N0.getValueType()),
2083 Cond);
2084 if (N0.getNode()->hasOneUse()) {
2085 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
2086 auto &DL = DAG.getDataLayout();
2087 // (Z-X) == X --> Z == X<<1
2088 SDValue SH = DAG.getNode(
2089 ISD::SHL, dl, N1.getValueType(), N1,
2090 DAG.getConstant(1, dl,
2091 getShiftAmountTy(N1.getValueType(), DL)));
2092 if (!DCI.isCalledByLegalizer())
2093 DCI.AddToWorklist(SH.getNode());
2094 return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
2095 }
2096 }
2097 }
2098 }
2099
2100 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
2101 N1.getOpcode() == ISD::XOR) {
2102 // Simplify X == (X+Z) --> Z == 0
2103 if (N1.getOperand(0) == N0)
2104 return DAG.getSetCC(dl, VT, N1.getOperand(1),
2105 DAG.getConstant(0, dl, N1.getValueType()), Cond);
2106 if (N1.getOperand(1) == N0) {
2107 if (DAG.isCommutativeBinOp(N1.getOpcode()))
2108 return DAG.getSetCC(dl, VT, N1.getOperand(0),
2109 DAG.getConstant(0, dl, N1.getValueType()), Cond);
2110 if (N1.getNode()->hasOneUse()) {
2111 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
2112 auto &DL = DAG.getDataLayout();
2113 // X == (Z-X) --> X<<1 == Z
2114 SDValue SH = DAG.getNode(
2115 ISD::SHL, dl, N1.getValueType(), N0,
2116 DAG.getConstant(1, dl, getShiftAmountTy(N0.getValueType(), DL)));
2117 if (!DCI.isCalledByLegalizer())
2118 DCI.AddToWorklist(SH.getNode());
2119 return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
2120 }
2121 }
2122 }
2123
2124 if (SDValue V = simplifySetCCWithAnd(VT, N0, N1, Cond, DCI, dl))
2125 return V;
2126 }
2127
2128 // Fold away ALL boolean setcc's.
2129 SDValue Temp;
2130 if (N0.getValueType() == MVT::i1 && foldBooleans) {
2131 switch (Cond) {
2132 default: llvm_unreachable("Unknown integer setcc!");
2133 case ISD::SETEQ: // X == Y -> ~(X^Y)
2134 Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2135 N0 = DAG.getNOT(dl, Temp, MVT::i1);
2136 if (!DCI.isCalledByLegalizer())
2137 DCI.AddToWorklist(Temp.getNode());
2138 break;
2139 case ISD::SETNE: // X != Y --> (X^Y)
2140 N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2141 break;
2142 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y
2143 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y
2144 Temp = DAG.getNOT(dl, N0, MVT::i1);
2145 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
2146 if (!DCI.isCalledByLegalizer())
2147 DCI.AddToWorklist(Temp.getNode());
2148 break;
2149 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X
2150 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X
2151 Temp = DAG.getNOT(dl, N1, MVT::i1);
2152 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
2153 if (!DCI.isCalledByLegalizer())
2154 DCI.AddToWorklist(Temp.getNode());
2155 break;
2156 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
2157 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
2158 Temp = DAG.getNOT(dl, N0, MVT::i1);
2159 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
2160 if (!DCI.isCalledByLegalizer())
2161 DCI.AddToWorklist(Temp.getNode());
2162 break;
2163 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
2164 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
2165 Temp = DAG.getNOT(dl, N1, MVT::i1);
2166 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
2167 break;
2168 }
2169 if (VT != MVT::i1) {
2170 if (!DCI.isCalledByLegalizer())
2171 DCI.AddToWorklist(N0.getNode());
2172 // FIXME: If running after legalize, we probably can't do this.
2173 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0);
2174 }
2175 return N0;
2176 }
2177
2178 // Could not fold it.
2179 return SDValue();
2180 }
2181
2182 /// Returns true (and the GlobalValue and the offset) if the node is a
2183 /// GlobalAddress + offset.
isGAPlusOffset(SDNode * N,const GlobalValue * & GA,int64_t & Offset) const2184 bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
2185 int64_t &Offset) const {
2186 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
2187 GA = GASD->getGlobal();
2188 Offset += GASD->getOffset();
2189 return true;
2190 }
2191
2192 if (N->getOpcode() == ISD::ADD) {
2193 SDValue N1 = N->getOperand(0);
2194 SDValue N2 = N->getOperand(1);
2195 if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
2196 if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
2197 Offset += V->getSExtValue();
2198 return true;
2199 }
2200 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
2201 if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
2202 Offset += V->getSExtValue();
2203 return true;
2204 }
2205 }
2206 }
2207
2208 return false;
2209 }
2210
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const2211 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
2212 DAGCombinerInfo &DCI) const {
2213 // Default implementation: no optimization.
2214 return SDValue();
2215 }
2216
2217 //===----------------------------------------------------------------------===//
2218 // Inline Assembler Implementation Methods
2219 //===----------------------------------------------------------------------===//
2220
2221 TargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const2222 TargetLowering::getConstraintType(StringRef Constraint) const {
2223 unsigned S = Constraint.size();
2224
2225 if (S == 1) {
2226 switch (Constraint[0]) {
2227 default: break;
2228 case 'r': return C_RegisterClass;
2229 case 'm': // memory
2230 case 'o': // offsetable
2231 case 'V': // not offsetable
2232 return C_Memory;
2233 case 'i': // Simple Integer or Relocatable Constant
2234 case 'n': // Simple Integer
2235 case 'E': // Floating Point Constant
2236 case 'F': // Floating Point Constant
2237 case 's': // Relocatable Constant
2238 case 'p': // Address.
2239 case 'X': // Allow ANY value.
2240 case 'I': // Target registers.
2241 case 'J':
2242 case 'K':
2243 case 'L':
2244 case 'M':
2245 case 'N':
2246 case 'O':
2247 case 'P':
2248 case '<':
2249 case '>':
2250 return C_Other;
2251 }
2252 }
2253
2254 if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
2255 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
2256 return C_Memory;
2257 return C_Register;
2258 }
2259 return C_Unknown;
2260 }
2261
2262 /// Try to replace an X constraint, which matches anything, with another that
2263 /// has more specific requirements based on the type of the corresponding
2264 /// operand.
LowerXConstraint(EVT ConstraintVT) const2265 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
2266 if (ConstraintVT.isInteger())
2267 return "r";
2268 if (ConstraintVT.isFloatingPoint())
2269 return "f"; // works for many targets
2270 return nullptr;
2271 }
2272
2273 /// Lower the specified operand into the Ops vector.
2274 /// If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const2275 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2276 std::string &Constraint,
2277 std::vector<SDValue> &Ops,
2278 SelectionDAG &DAG) const {
2279
2280 if (Constraint.length() > 1) return;
2281
2282 char ConstraintLetter = Constraint[0];
2283 switch (ConstraintLetter) {
2284 default: break;
2285 case 'X': // Allows any operand; labels (basic block) use this.
2286 if (Op.getOpcode() == ISD::BasicBlock) {
2287 Ops.push_back(Op);
2288 return;
2289 }
2290 // fall through
2291 case 'i': // Simple Integer or Relocatable Constant
2292 case 'n': // Simple Integer
2293 case 's': { // Relocatable Constant
2294 // These operands are interested in values of the form (GV+C), where C may
2295 // be folded in as an offset of GV, or it may be explicitly added. Also, it
2296 // is possible and fine if either GV or C are missing.
2297 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2298 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2299
2300 // If we have "(add GV, C)", pull out GV/C
2301 if (Op.getOpcode() == ISD::ADD) {
2302 C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2303 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
2304 if (!C || !GA) {
2305 C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
2306 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
2307 }
2308 if (!C || !GA) {
2309 C = nullptr;
2310 GA = nullptr;
2311 }
2312 }
2313
2314 // If we find a valid operand, map to the TargetXXX version so that the
2315 // value itself doesn't get selected.
2316 if (GA) { // Either &GV or &GV+C
2317 if (ConstraintLetter != 'n') {
2318 int64_t Offs = GA->getOffset();
2319 if (C) Offs += C->getZExtValue();
2320 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
2321 C ? SDLoc(C) : SDLoc(),
2322 Op.getValueType(), Offs));
2323 }
2324 return;
2325 }
2326 if (C) { // just C, no GV.
2327 // Simple constants are not allowed for 's'.
2328 if (ConstraintLetter != 's') {
2329 // gcc prints these as sign extended. Sign extend value to 64 bits
2330 // now; without this it would get ZExt'd later in
2331 // ScheduleDAGSDNodes::EmitNode, which is very generic.
2332 Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
2333 SDLoc(C), MVT::i64));
2334 }
2335 return;
2336 }
2337 break;
2338 }
2339 }
2340 }
2341
2342 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * RI,StringRef Constraint,MVT VT) const2343 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
2344 StringRef Constraint,
2345 MVT VT) const {
2346 if (Constraint.empty() || Constraint[0] != '{')
2347 return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
2348 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
2349
2350 // Remove the braces from around the name.
2351 StringRef RegName(Constraint.data()+1, Constraint.size()-2);
2352
2353 std::pair<unsigned, const TargetRegisterClass*> R =
2354 std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
2355
2356 // Figure out which register class contains this reg.
2357 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
2358 E = RI->regclass_end(); RCI != E; ++RCI) {
2359 const TargetRegisterClass *RC = *RCI;
2360
2361 // If none of the value types for this register class are valid, we
2362 // can't use it. For example, 64-bit reg classes on 32-bit targets.
2363 if (!isLegalRC(RC))
2364 continue;
2365
2366 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
2367 I != E; ++I) {
2368 if (RegName.equals_lower(RI->getRegAsmName(*I))) {
2369 std::pair<unsigned, const TargetRegisterClass*> S =
2370 std::make_pair(*I, RC);
2371
2372 // If this register class has the requested value type, return it,
2373 // otherwise keep searching and return the first class found
2374 // if no other is found which explicitly has the requested type.
2375 if (RC->hasType(VT))
2376 return S;
2377 else if (!R.second)
2378 R = S;
2379 }
2380 }
2381 }
2382
2383 return R;
2384 }
2385
2386 //===----------------------------------------------------------------------===//
2387 // Constraint Selection.
2388
2389 /// Return true of this is an input operand that is a matching constraint like
2390 /// "4".
isMatchingInputConstraint() const2391 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
2392 assert(!ConstraintCode.empty() && "No known constraint!");
2393 return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
2394 }
2395
2396 /// If this is an input matching constraint, this method returns the output
2397 /// operand it matches.
getMatchedOperand() const2398 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
2399 assert(!ConstraintCode.empty() && "No known constraint!");
2400 return atoi(ConstraintCode.c_str());
2401 }
2402
2403 /// Split up the constraint string from the inline assembly value into the
2404 /// specific constraints and their prefixes, and also tie in the associated
2405 /// operand values.
2406 /// If this returns an empty vector, and if the constraint string itself
2407 /// isn't empty, there was an error parsing.
2408 TargetLowering::AsmOperandInfoVector
ParseConstraints(const DataLayout & DL,const TargetRegisterInfo * TRI,ImmutableCallSite CS) const2409 TargetLowering::ParseConstraints(const DataLayout &DL,
2410 const TargetRegisterInfo *TRI,
2411 ImmutableCallSite CS) const {
2412 /// Information about all of the constraints.
2413 AsmOperandInfoVector ConstraintOperands;
2414 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
2415 unsigned maCount = 0; // Largest number of multiple alternative constraints.
2416
2417 // Do a prepass over the constraints, canonicalizing them, and building up the
2418 // ConstraintOperands list.
2419 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
2420 unsigned ResNo = 0; // ResNo - The result number of the next output.
2421
2422 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2423 ConstraintOperands.emplace_back(std::move(CI));
2424 AsmOperandInfo &OpInfo = ConstraintOperands.back();
2425
2426 // Update multiple alternative constraint count.
2427 if (OpInfo.multipleAlternatives.size() > maCount)
2428 maCount = OpInfo.multipleAlternatives.size();
2429
2430 OpInfo.ConstraintVT = MVT::Other;
2431
2432 // Compute the value type for each operand.
2433 switch (OpInfo.Type) {
2434 case InlineAsm::isOutput:
2435 // Indirect outputs just consume an argument.
2436 if (OpInfo.isIndirect) {
2437 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2438 break;
2439 }
2440
2441 // The return value of the call is this value. As such, there is no
2442 // corresponding argument.
2443 assert(!CS.getType()->isVoidTy() &&
2444 "Bad inline asm!");
2445 if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
2446 OpInfo.ConstraintVT =
2447 getSimpleValueType(DL, STy->getElementType(ResNo));
2448 } else {
2449 assert(ResNo == 0 && "Asm only has one result!");
2450 OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
2451 }
2452 ++ResNo;
2453 break;
2454 case InlineAsm::isInput:
2455 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2456 break;
2457 case InlineAsm::isClobber:
2458 // Nothing to do.
2459 break;
2460 }
2461
2462 if (OpInfo.CallOperandVal) {
2463 llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
2464 if (OpInfo.isIndirect) {
2465 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
2466 if (!PtrTy)
2467 report_fatal_error("Indirect operand for inline asm not a pointer!");
2468 OpTy = PtrTy->getElementType();
2469 }
2470
2471 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
2472 if (StructType *STy = dyn_cast<StructType>(OpTy))
2473 if (STy->getNumElements() == 1)
2474 OpTy = STy->getElementType(0);
2475
2476 // If OpTy is not a single value, it may be a struct/union that we
2477 // can tile with integers.
2478 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
2479 unsigned BitSize = DL.getTypeSizeInBits(OpTy);
2480 switch (BitSize) {
2481 default: break;
2482 case 1:
2483 case 8:
2484 case 16:
2485 case 32:
2486 case 64:
2487 case 128:
2488 OpInfo.ConstraintVT =
2489 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
2490 break;
2491 }
2492 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
2493 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
2494 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
2495 } else {
2496 OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
2497 }
2498 }
2499 }
2500
2501 // If we have multiple alternative constraints, select the best alternative.
2502 if (!ConstraintOperands.empty()) {
2503 if (maCount) {
2504 unsigned bestMAIndex = 0;
2505 int bestWeight = -1;
2506 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match.
2507 int weight = -1;
2508 unsigned maIndex;
2509 // Compute the sums of the weights for each alternative, keeping track
2510 // of the best (highest weight) one so far.
2511 for (maIndex = 0; maIndex < maCount; ++maIndex) {
2512 int weightSum = 0;
2513 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2514 cIndex != eIndex; ++cIndex) {
2515 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2516 if (OpInfo.Type == InlineAsm::isClobber)
2517 continue;
2518
2519 // If this is an output operand with a matching input operand,
2520 // look up the matching input. If their types mismatch, e.g. one
2521 // is an integer, the other is floating point, or their sizes are
2522 // different, flag it as an maCantMatch.
2523 if (OpInfo.hasMatchingInput()) {
2524 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2525 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2526 if ((OpInfo.ConstraintVT.isInteger() !=
2527 Input.ConstraintVT.isInteger()) ||
2528 (OpInfo.ConstraintVT.getSizeInBits() !=
2529 Input.ConstraintVT.getSizeInBits())) {
2530 weightSum = -1; // Can't match.
2531 break;
2532 }
2533 }
2534 }
2535 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
2536 if (weight == -1) {
2537 weightSum = -1;
2538 break;
2539 }
2540 weightSum += weight;
2541 }
2542 // Update best.
2543 if (weightSum > bestWeight) {
2544 bestWeight = weightSum;
2545 bestMAIndex = maIndex;
2546 }
2547 }
2548
2549 // Now select chosen alternative in each constraint.
2550 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2551 cIndex != eIndex; ++cIndex) {
2552 AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
2553 if (cInfo.Type == InlineAsm::isClobber)
2554 continue;
2555 cInfo.selectAlternative(bestMAIndex);
2556 }
2557 }
2558 }
2559
2560 // Check and hook up tied operands, choose constraint code to use.
2561 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2562 cIndex != eIndex; ++cIndex) {
2563 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2564
2565 // If this is an output operand with a matching input operand, look up the
2566 // matching input. If their types mismatch, e.g. one is an integer, the
2567 // other is floating point, or their sizes are different, flag it as an
2568 // error.
2569 if (OpInfo.hasMatchingInput()) {
2570 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2571
2572 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2573 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
2574 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
2575 OpInfo.ConstraintVT);
2576 std::pair<unsigned, const TargetRegisterClass *> InputRC =
2577 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
2578 Input.ConstraintVT);
2579 if ((OpInfo.ConstraintVT.isInteger() !=
2580 Input.ConstraintVT.isInteger()) ||
2581 (MatchRC.second != InputRC.second)) {
2582 report_fatal_error("Unsupported asm: input constraint"
2583 " with a matching output constraint of"
2584 " incompatible type!");
2585 }
2586 }
2587 }
2588 }
2589
2590 return ConstraintOperands;
2591 }
2592
2593 /// Return an integer indicating how general CT is.
getConstraintGenerality(TargetLowering::ConstraintType CT)2594 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
2595 switch (CT) {
2596 case TargetLowering::C_Other:
2597 case TargetLowering::C_Unknown:
2598 return 0;
2599 case TargetLowering::C_Register:
2600 return 1;
2601 case TargetLowering::C_RegisterClass:
2602 return 2;
2603 case TargetLowering::C_Memory:
2604 return 3;
2605 }
2606 llvm_unreachable("Invalid constraint type");
2607 }
2608
2609 /// Examine constraint type and operand type and determine a weight value.
2610 /// This object must already have been set up with the operand type
2611 /// and the current alternative constraint selected.
2612 TargetLowering::ConstraintWeight
getMultipleConstraintMatchWeight(AsmOperandInfo & info,int maIndex) const2613 TargetLowering::getMultipleConstraintMatchWeight(
2614 AsmOperandInfo &info, int maIndex) const {
2615 InlineAsm::ConstraintCodeVector *rCodes;
2616 if (maIndex >= (int)info.multipleAlternatives.size())
2617 rCodes = &info.Codes;
2618 else
2619 rCodes = &info.multipleAlternatives[maIndex].Codes;
2620 ConstraintWeight BestWeight = CW_Invalid;
2621
2622 // Loop over the options, keeping track of the most general one.
2623 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
2624 ConstraintWeight weight =
2625 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
2626 if (weight > BestWeight)
2627 BestWeight = weight;
2628 }
2629
2630 return BestWeight;
2631 }
2632
2633 /// Examine constraint type and operand type and determine a weight value.
2634 /// This object must already have been set up with the operand type
2635 /// and the current alternative constraint selected.
2636 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const2637 TargetLowering::getSingleConstraintMatchWeight(
2638 AsmOperandInfo &info, const char *constraint) const {
2639 ConstraintWeight weight = CW_Invalid;
2640 Value *CallOperandVal = info.CallOperandVal;
2641 // If we don't have a value, we can't do a match,
2642 // but allow it at the lowest weight.
2643 if (!CallOperandVal)
2644 return CW_Default;
2645 // Look at the constraint type.
2646 switch (*constraint) {
2647 case 'i': // immediate integer.
2648 case 'n': // immediate integer with a known value.
2649 if (isa<ConstantInt>(CallOperandVal))
2650 weight = CW_Constant;
2651 break;
2652 case 's': // non-explicit intregal immediate.
2653 if (isa<GlobalValue>(CallOperandVal))
2654 weight = CW_Constant;
2655 break;
2656 case 'E': // immediate float if host format.
2657 case 'F': // immediate float.
2658 if (isa<ConstantFP>(CallOperandVal))
2659 weight = CW_Constant;
2660 break;
2661 case '<': // memory operand with autodecrement.
2662 case '>': // memory operand with autoincrement.
2663 case 'm': // memory operand.
2664 case 'o': // offsettable memory operand
2665 case 'V': // non-offsettable memory operand
2666 weight = CW_Memory;
2667 break;
2668 case 'r': // general register.
2669 case 'g': // general register, memory operand or immediate integer.
2670 // note: Clang converts "g" to "imr".
2671 if (CallOperandVal->getType()->isIntegerTy())
2672 weight = CW_Register;
2673 break;
2674 case 'X': // any operand.
2675 default:
2676 weight = CW_Default;
2677 break;
2678 }
2679 return weight;
2680 }
2681
2682 /// If there are multiple different constraints that we could pick for this
2683 /// operand (e.g. "imr") try to pick the 'best' one.
2684 /// This is somewhat tricky: constraints fall into four classes:
2685 /// Other -> immediates and magic values
2686 /// Register -> one specific register
2687 /// RegisterClass -> a group of regs
2688 /// Memory -> memory
2689 /// Ideally, we would pick the most specific constraint possible: if we have
2690 /// something that fits into a register, we would pick it. The problem here
2691 /// is that if we have something that could either be in a register or in
2692 /// memory that use of the register could cause selection of *other*
2693 /// operands to fail: they might only succeed if we pick memory. Because of
2694 /// this the heuristic we use is:
2695 ///
2696 /// 1) If there is an 'other' constraint, and if the operand is valid for
2697 /// that constraint, use it. This makes us take advantage of 'i'
2698 /// constraints when available.
2699 /// 2) Otherwise, pick the most general constraint present. This prefers
2700 /// 'm' over 'r', for example.
2701 ///
ChooseConstraint(TargetLowering::AsmOperandInfo & OpInfo,const TargetLowering & TLI,SDValue Op,SelectionDAG * DAG)2702 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
2703 const TargetLowering &TLI,
2704 SDValue Op, SelectionDAG *DAG) {
2705 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
2706 unsigned BestIdx = 0;
2707 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
2708 int BestGenerality = -1;
2709
2710 // Loop over the options, keeping track of the most general one.
2711 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
2712 TargetLowering::ConstraintType CType =
2713 TLI.getConstraintType(OpInfo.Codes[i]);
2714
2715 // If this is an 'other' constraint, see if the operand is valid for it.
2716 // For example, on X86 we might have an 'rI' constraint. If the operand
2717 // is an integer in the range [0..31] we want to use I (saving a load
2718 // of a register), otherwise we must use 'r'.
2719 if (CType == TargetLowering::C_Other && Op.getNode()) {
2720 assert(OpInfo.Codes[i].size() == 1 &&
2721 "Unhandled multi-letter 'other' constraint");
2722 std::vector<SDValue> ResultOps;
2723 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
2724 ResultOps, *DAG);
2725 if (!ResultOps.empty()) {
2726 BestType = CType;
2727 BestIdx = i;
2728 break;
2729 }
2730 }
2731
2732 // Things with matching constraints can only be registers, per gcc
2733 // documentation. This mainly affects "g" constraints.
2734 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
2735 continue;
2736
2737 // This constraint letter is more general than the previous one, use it.
2738 int Generality = getConstraintGenerality(CType);
2739 if (Generality > BestGenerality) {
2740 BestType = CType;
2741 BestIdx = i;
2742 BestGenerality = Generality;
2743 }
2744 }
2745
2746 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
2747 OpInfo.ConstraintType = BestType;
2748 }
2749
2750 /// Determines the constraint code and constraint type to use for the specific
2751 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
ComputeConstraintToUse(AsmOperandInfo & OpInfo,SDValue Op,SelectionDAG * DAG) const2752 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2753 SDValue Op,
2754 SelectionDAG *DAG) const {
2755 assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
2756
2757 // Single-letter constraints ('r') are very common.
2758 if (OpInfo.Codes.size() == 1) {
2759 OpInfo.ConstraintCode = OpInfo.Codes[0];
2760 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2761 } else {
2762 ChooseConstraint(OpInfo, *this, Op, DAG);
2763 }
2764
2765 // 'X' matches anything.
2766 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
2767 // Labels and constants are handled elsewhere ('X' is the only thing
2768 // that matches labels). For Functions, the type here is the type of
2769 // the result, which is not what we want to look at; leave them alone.
2770 Value *v = OpInfo.CallOperandVal;
2771 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
2772 OpInfo.CallOperandVal = v;
2773 return;
2774 }
2775
2776 // Otherwise, try to resolve it to something we know about by looking at
2777 // the actual operand type.
2778 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
2779 OpInfo.ConstraintCode = Repl;
2780 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2781 }
2782 }
2783 }
2784
2785 /// \brief Given an exact SDIV by a constant, create a multiplication
2786 /// with the multiplicative inverse of the constant.
BuildExactSDIV(const TargetLowering & TLI,SDValue Op1,APInt d,const SDLoc & dl,SelectionDAG & DAG,std::vector<SDNode * > & Created)2787 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
2788 const SDLoc &dl, SelectionDAG &DAG,
2789 std::vector<SDNode *> &Created) {
2790 assert(d != 0 && "Division by zero!");
2791
2792 // Shift the value upfront if it is even, so the LSB is one.
2793 unsigned ShAmt = d.countTrailingZeros();
2794 if (ShAmt) {
2795 // TODO: For UDIV use SRL instead of SRA.
2796 SDValue Amt =
2797 DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(),
2798 DAG.getDataLayout()));
2799 SDNodeFlags Flags;
2800 Flags.setExact(true);
2801 Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
2802 Created.push_back(Op1.getNode());
2803 d = d.ashr(ShAmt);
2804 }
2805
2806 // Calculate the multiplicative inverse, using Newton's method.
2807 APInt t, xn = d;
2808 while ((t = d*xn) != 1)
2809 xn *= APInt(d.getBitWidth(), 2) - t;
2810
2811 SDValue Op2 = DAG.getConstant(xn, dl, Op1.getValueType());
2812 SDValue Mul = DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2);
2813 Created.push_back(Mul.getNode());
2814 return Mul;
2815 }
2816
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,std::vector<SDNode * > * Created) const2817 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
2818 SelectionDAG &DAG,
2819 std::vector<SDNode *> *Created) const {
2820 AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes();
2821 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2822 if (TLI.isIntDivCheap(N->getValueType(0), Attr))
2823 return SDValue(N,0); // Lower SDIV as SDIV
2824 return SDValue();
2825 }
2826
2827 /// \brief Given an ISD::SDIV node expressing a divide by constant,
2828 /// return a DAG expression to select that will generate the same value by
2829 /// multiplying by a magic number.
2830 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
BuildSDIV(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,bool IsAfterLegalization,std::vector<SDNode * > * Created) const2831 SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
2832 SelectionDAG &DAG, bool IsAfterLegalization,
2833 std::vector<SDNode *> *Created) const {
2834 assert(Created && "No vector to hold sdiv ops.");
2835
2836 EVT VT = N->getValueType(0);
2837 SDLoc dl(N);
2838
2839 // Check to see if we can do this.
2840 // FIXME: We should be more aggressive here.
2841 if (!isTypeLegal(VT))
2842 return SDValue();
2843
2844 // If the sdiv has an 'exact' bit we can use a simpler lowering.
2845 if (cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact())
2846 return BuildExactSDIV(*this, N->getOperand(0), Divisor, dl, DAG, *Created);
2847
2848 APInt::ms magics = Divisor.magic();
2849
2850 // Multiply the numerator (operand 0) by the magic value
2851 // FIXME: We should support doing a MUL in a wider type
2852 SDValue Q;
2853 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
2854 isOperationLegalOrCustom(ISD::MULHS, VT))
2855 Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
2856 DAG.getConstant(magics.m, dl, VT));
2857 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
2858 isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
2859 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
2860 N->getOperand(0),
2861 DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
2862 else
2863 return SDValue(); // No mulhs or equvialent
2864 // If d > 0 and m < 0, add the numerator
2865 if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
2866 Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
2867 Created->push_back(Q.getNode());
2868 }
2869 // If d < 0 and m > 0, subtract the numerator.
2870 if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
2871 Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
2872 Created->push_back(Q.getNode());
2873 }
2874 auto &DL = DAG.getDataLayout();
2875 // Shift right algebraic if shift value is nonzero
2876 if (magics.s > 0) {
2877 Q = DAG.getNode(
2878 ISD::SRA, dl, VT, Q,
2879 DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
2880 Created->push_back(Q.getNode());
2881 }
2882 // Extract the sign bit and add it to the quotient
2883 SDValue T =
2884 DAG.getNode(ISD::SRL, dl, VT, Q,
2885 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
2886 getShiftAmountTy(Q.getValueType(), DL)));
2887 Created->push_back(T.getNode());
2888 return DAG.getNode(ISD::ADD, dl, VT, Q, T);
2889 }
2890
2891 /// \brief Given an ISD::UDIV node expressing a divide by constant,
2892 /// return a DAG expression to select that will generate the same value by
2893 /// multiplying by a magic number.
2894 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
BuildUDIV(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,bool IsAfterLegalization,std::vector<SDNode * > * Created) const2895 SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
2896 SelectionDAG &DAG, bool IsAfterLegalization,
2897 std::vector<SDNode *> *Created) const {
2898 assert(Created && "No vector to hold udiv ops.");
2899
2900 EVT VT = N->getValueType(0);
2901 SDLoc dl(N);
2902 auto &DL = DAG.getDataLayout();
2903
2904 // Check to see if we can do this.
2905 // FIXME: We should be more aggressive here.
2906 if (!isTypeLegal(VT))
2907 return SDValue();
2908
2909 // FIXME: We should use a narrower constant when the upper
2910 // bits are known to be zero.
2911 APInt::mu magics = Divisor.magicu();
2912
2913 SDValue Q = N->getOperand(0);
2914
2915 // If the divisor is even, we can avoid using the expensive fixup by shifting
2916 // the divided value upfront.
2917 if (magics.a != 0 && !Divisor[0]) {
2918 unsigned Shift = Divisor.countTrailingZeros();
2919 Q = DAG.getNode(
2920 ISD::SRL, dl, VT, Q,
2921 DAG.getConstant(Shift, dl, getShiftAmountTy(Q.getValueType(), DL)));
2922 Created->push_back(Q.getNode());
2923
2924 // Get magic number for the shifted divisor.
2925 magics = Divisor.lshr(Shift).magicu(Shift);
2926 assert(magics.a == 0 && "Should use cheap fixup now");
2927 }
2928
2929 // Multiply the numerator (operand 0) by the magic value
2930 // FIXME: We should support doing a MUL in a wider type
2931 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
2932 isOperationLegalOrCustom(ISD::MULHU, VT))
2933 Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, dl, VT));
2934 else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
2935 isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
2936 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
2937 DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
2938 else
2939 return SDValue(); // No mulhu or equvialent
2940
2941 Created->push_back(Q.getNode());
2942
2943 if (magics.a == 0) {
2944 assert(magics.s < Divisor.getBitWidth() &&
2945 "We shouldn't generate an undefined shift!");
2946 return DAG.getNode(
2947 ISD::SRL, dl, VT, Q,
2948 DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
2949 } else {
2950 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
2951 Created->push_back(NPQ.getNode());
2952 NPQ = DAG.getNode(
2953 ISD::SRL, dl, VT, NPQ,
2954 DAG.getConstant(1, dl, getShiftAmountTy(NPQ.getValueType(), DL)));
2955 Created->push_back(NPQ.getNode());
2956 NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
2957 Created->push_back(NPQ.getNode());
2958 return DAG.getNode(
2959 ISD::SRL, dl, VT, NPQ,
2960 DAG.getConstant(magics.s - 1, dl,
2961 getShiftAmountTy(NPQ.getValueType(), DL)));
2962 }
2963 }
2964
2965 bool TargetLowering::
verifyReturnAddressArgumentIsConstant(SDValue Op,SelectionDAG & DAG) const2966 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
2967 if (!isa<ConstantSDNode>(Op.getOperand(0))) {
2968 DAG.getContext()->emitError("argument to '__builtin_return_address' must "
2969 "be a constant integer");
2970 return true;
2971 }
2972
2973 return false;
2974 }
2975
2976 //===----------------------------------------------------------------------===//
2977 // Legalization Utilities
2978 //===----------------------------------------------------------------------===//
2979
expandMUL(SDNode * N,SDValue & Lo,SDValue & Hi,EVT HiLoVT,SelectionDAG & DAG,SDValue LL,SDValue LH,SDValue RL,SDValue RH) const2980 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
2981 SelectionDAG &DAG, SDValue LL, SDValue LH,
2982 SDValue RL, SDValue RH) const {
2983 EVT VT = N->getValueType(0);
2984 SDLoc dl(N);
2985
2986 bool HasMULHS = isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
2987 bool HasMULHU = isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
2988 bool HasSMUL_LOHI = isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
2989 bool HasUMUL_LOHI = isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
2990 if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
2991 unsigned OuterBitSize = VT.getSizeInBits();
2992 unsigned InnerBitSize = HiLoVT.getSizeInBits();
2993 unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
2994 unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
2995
2996 // LL, LH, RL, and RH must be either all NULL or all set to a value.
2997 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
2998 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
2999
3000 if (!LL.getNode() && !RL.getNode() &&
3001 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3002 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(0));
3003 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(1));
3004 }
3005
3006 if (!LL.getNode())
3007 return false;
3008
3009 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
3010 if (DAG.MaskedValueIsZero(N->getOperand(0), HighMask) &&
3011 DAG.MaskedValueIsZero(N->getOperand(1), HighMask)) {
3012 // The inputs are both zero-extended.
3013 if (HasUMUL_LOHI) {
3014 // We can emit a umul_lohi.
3015 Lo = DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(HiLoVT, HiLoVT), LL,
3016 RL);
3017 Hi = SDValue(Lo.getNode(), 1);
3018 return true;
3019 }
3020 if (HasMULHU) {
3021 // We can emit a mulhu+mul.
3022 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3023 Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
3024 return true;
3025 }
3026 }
3027 if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
3028 // The input values are both sign-extended.
3029 if (HasSMUL_LOHI) {
3030 // We can emit a smul_lohi.
3031 Lo = DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(HiLoVT, HiLoVT), LL,
3032 RL);
3033 Hi = SDValue(Lo.getNode(), 1);
3034 return true;
3035 }
3036 if (HasMULHS) {
3037 // We can emit a mulhs+mul.
3038 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3039 Hi = DAG.getNode(ISD::MULHS, dl, HiLoVT, LL, RL);
3040 return true;
3041 }
3042 }
3043
3044 if (!LH.getNode() && !RH.getNode() &&
3045 isOperationLegalOrCustom(ISD::SRL, VT) &&
3046 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3047 auto &DL = DAG.getDataLayout();
3048 unsigned ShiftAmt = VT.getSizeInBits() - HiLoVT.getSizeInBits();
3049 SDValue Shift = DAG.getConstant(ShiftAmt, dl, getShiftAmountTy(VT, DL));
3050 LH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(0), Shift);
3051 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
3052 RH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(1), Shift);
3053 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
3054 }
3055
3056 if (!LH.getNode())
3057 return false;
3058
3059 if (HasUMUL_LOHI) {
3060 // Lo,Hi = umul LHS, RHS.
3061 SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, dl,
3062 DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
3063 Lo = UMulLOHI;
3064 Hi = UMulLOHI.getValue(1);
3065 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3066 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3067 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3068 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3069 return true;
3070 }
3071 if (HasMULHU) {
3072 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3073 Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
3074 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3075 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3076 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3077 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3078 return true;
3079 }
3080 }
3081 return false;
3082 }
3083
expandFP_TO_SINT(SDNode * Node,SDValue & Result,SelectionDAG & DAG) const3084 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
3085 SelectionDAG &DAG) const {
3086 EVT VT = Node->getOperand(0).getValueType();
3087 EVT NVT = Node->getValueType(0);
3088 SDLoc dl(SDValue(Node, 0));
3089
3090 // FIXME: Only f32 to i64 conversions are supported.
3091 if (VT != MVT::f32 || NVT != MVT::i64)
3092 return false;
3093
3094 // Expand f32 -> i64 conversion
3095 // This algorithm comes from compiler-rt's implementation of fixsfdi:
3096 // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c
3097 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(),
3098 VT.getSizeInBits());
3099 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
3100 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
3101 SDValue Bias = DAG.getConstant(127, dl, IntVT);
3102 SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()), dl,
3103 IntVT);
3104 SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT);
3105 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
3106
3107 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
3108
3109 auto &DL = DAG.getDataLayout();
3110 SDValue ExponentBits = DAG.getNode(
3111 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
3112 DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT, DL)));
3113 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
3114
3115 SDValue Sign = DAG.getNode(
3116 ISD::SRA, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
3117 DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT, DL)));
3118 Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
3119
3120 SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
3121 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
3122 DAG.getConstant(0x00800000, dl, IntVT));
3123
3124 R = DAG.getZExtOrTrunc(R, dl, NVT);
3125
3126 R = DAG.getSelectCC(
3127 dl, Exponent, ExponentLoBit,
3128 DAG.getNode(ISD::SHL, dl, NVT, R,
3129 DAG.getZExtOrTrunc(
3130 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
3131 dl, getShiftAmountTy(IntVT, DL))),
3132 DAG.getNode(ISD::SRL, dl, NVT, R,
3133 DAG.getZExtOrTrunc(
3134 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
3135 dl, getShiftAmountTy(IntVT, DL))),
3136 ISD::SETGT);
3137
3138 SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
3139 DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
3140 Sign);
3141
3142 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
3143 DAG.getConstant(0, dl, NVT), Ret, ISD::SETLT);
3144 return true;
3145 }
3146
scalarizeVectorLoad(LoadSDNode * LD,SelectionDAG & DAG) const3147 SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
3148 SelectionDAG &DAG) const {
3149 SDLoc SL(LD);
3150 SDValue Chain = LD->getChain();
3151 SDValue BasePTR = LD->getBasePtr();
3152 EVT SrcVT = LD->getMemoryVT();
3153 ISD::LoadExtType ExtType = LD->getExtensionType();
3154
3155 unsigned NumElem = SrcVT.getVectorNumElements();
3156
3157 EVT SrcEltVT = SrcVT.getScalarType();
3158 EVT DstEltVT = LD->getValueType(0).getScalarType();
3159
3160 unsigned Stride = SrcEltVT.getSizeInBits() / 8;
3161 assert(SrcEltVT.isByteSized());
3162
3163 EVT PtrVT = BasePTR.getValueType();
3164
3165 SmallVector<SDValue, 8> Vals;
3166 SmallVector<SDValue, 8> LoadChains;
3167
3168 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3169 SDValue ScalarLoad = DAG.getExtLoad(
3170 ExtType, SL, DstEltVT,
3171 Chain, BasePTR, LD->getPointerInfo().getWithOffset(Idx * Stride),
3172 SrcEltVT,
3173 LD->isVolatile(), LD->isNonTemporal(), LD->isInvariant(),
3174 MinAlign(LD->getAlignment(), Idx * Stride), LD->getAAInfo());
3175
3176 BasePTR = DAG.getNode(ISD::ADD, SL, PtrVT, BasePTR,
3177 DAG.getConstant(Stride, SL, PtrVT));
3178
3179 Vals.push_back(ScalarLoad.getValue(0));
3180 LoadChains.push_back(ScalarLoad.getValue(1));
3181 }
3182
3183 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
3184 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, SL, LD->getValueType(0), Vals);
3185
3186 return DAG.getMergeValues({ Value, NewChain }, SL);
3187 }
3188
3189 // FIXME: This relies on each element having a byte size, otherwise the stride
3190 // is 0 and just overwrites the same location. ExpandStore currently expects
3191 // this broken behavior.
scalarizeVectorStore(StoreSDNode * ST,SelectionDAG & DAG) const3192 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
3193 SelectionDAG &DAG) const {
3194 SDLoc SL(ST);
3195
3196 SDValue Chain = ST->getChain();
3197 SDValue BasePtr = ST->getBasePtr();
3198 SDValue Value = ST->getValue();
3199 EVT StVT = ST->getMemoryVT();
3200
3201 unsigned Alignment = ST->getAlignment();
3202 bool isVolatile = ST->isVolatile();
3203 bool isNonTemporal = ST->isNonTemporal();
3204 AAMDNodes AAInfo = ST->getAAInfo();
3205
3206 // The type of the data we want to save
3207 EVT RegVT = Value.getValueType();
3208 EVT RegSclVT = RegVT.getScalarType();
3209
3210 // The type of data as saved in memory.
3211 EVT MemSclVT = StVT.getScalarType();
3212
3213 EVT PtrVT = BasePtr.getValueType();
3214
3215 // Store Stride in bytes
3216 unsigned Stride = MemSclVT.getSizeInBits() / 8;
3217 EVT IdxVT = getVectorIdxTy(DAG.getDataLayout());
3218 unsigned NumElem = StVT.getVectorNumElements();
3219
3220 // Extract each of the elements from the original vector and save them into
3221 // memory individually.
3222 SmallVector<SDValue, 8> Stores;
3223 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3224 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
3225 DAG.getConstant(Idx, SL, IdxVT));
3226
3227 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
3228 DAG.getConstant(Idx * Stride, SL, PtrVT));
3229
3230 // This scalar TruncStore may be illegal, but we legalize it later.
3231 SDValue Store = DAG.getTruncStore(
3232 Chain, SL, Elt, Ptr,
3233 ST->getPointerInfo().getWithOffset(Idx * Stride), MemSclVT,
3234 isVolatile, isNonTemporal, MinAlign(Alignment, Idx * Stride),
3235 AAInfo);
3236
3237 Stores.push_back(Store);
3238 }
3239
3240 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
3241 }
3242
3243 std::pair<SDValue, SDValue>
expandUnalignedLoad(LoadSDNode * LD,SelectionDAG & DAG) const3244 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
3245 assert(LD->getAddressingMode() == ISD::UNINDEXED &&
3246 "unaligned indexed loads not implemented!");
3247 SDValue Chain = LD->getChain();
3248 SDValue Ptr = LD->getBasePtr();
3249 EVT VT = LD->getValueType(0);
3250 EVT LoadedVT = LD->getMemoryVT();
3251 SDLoc dl(LD);
3252 if (VT.isFloatingPoint() || VT.isVector()) {
3253 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
3254 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
3255 if (!isOperationLegalOrCustom(ISD::LOAD, intVT)) {
3256 // Scalarize the load and let the individual components be handled.
3257 SDValue Scalarized = scalarizeVectorLoad(LD, DAG);
3258 return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
3259 }
3260
3261 // Expand to a (misaligned) integer load of the same size,
3262 // then bitconvert to floating point or vector.
3263 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
3264 LD->getMemOperand());
3265 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
3266 if (LoadedVT != VT)
3267 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
3268 ISD::ANY_EXTEND, dl, VT, Result);
3269
3270 return std::make_pair(Result, newLoad.getValue(1));
3271 }
3272
3273 // Copy the value to a (aligned) stack slot using (unaligned) integer
3274 // loads and stores, then do a (aligned) load from the stack slot.
3275 MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
3276 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
3277 unsigned RegBytes = RegVT.getSizeInBits() / 8;
3278 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
3279
3280 // Make sure the stack slot is also aligned for the register type.
3281 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
3282
3283 SmallVector<SDValue, 8> Stores;
3284 SDValue StackPtr = StackBase;
3285 unsigned Offset = 0;
3286
3287 EVT PtrVT = Ptr.getValueType();
3288 EVT StackPtrVT = StackPtr.getValueType();
3289
3290 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3291 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3292
3293 // Do all but one copies using the full register width.
3294 for (unsigned i = 1; i < NumRegs; i++) {
3295 // Load one integer register's worth from the original location.
3296 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr,
3297 LD->getPointerInfo().getWithOffset(Offset),
3298 LD->isVolatile(), LD->isNonTemporal(),
3299 LD->isInvariant(),
3300 MinAlign(LD->getAlignment(), Offset),
3301 LD->getAAInfo());
3302 // Follow the load with a store to the stack slot. Remember the store.
3303 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
3304 MachinePointerInfo(), false, false, 0));
3305 // Increment the pointers.
3306 Offset += RegBytes;
3307 Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3308 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT, StackPtr,
3309 StackPtrIncrement);
3310 }
3311
3312 // The last copy may be partial. Do an extending load.
3313 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3314 8 * (LoadedBytes - Offset));
3315 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
3316 LD->getPointerInfo().getWithOffset(Offset),
3317 MemVT, LD->isVolatile(),
3318 LD->isNonTemporal(),
3319 LD->isInvariant(),
3320 MinAlign(LD->getAlignment(), Offset),
3321 LD->getAAInfo());
3322 // Follow the load with a store to the stack slot. Remember the store.
3323 // On big-endian machines this requires a truncating store to ensure
3324 // that the bits end up in the right place.
3325 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
3326 MachinePointerInfo(), MemVT,
3327 false, false, 0));
3328
3329 // The order of the stores doesn't matter - say it with a TokenFactor.
3330 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3331
3332 // Finally, perform the original load only redirected to the stack slot.
3333 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
3334 MachinePointerInfo(), LoadedVT, false,false, false,
3335 0);
3336
3337 // Callers expect a MERGE_VALUES node.
3338 return std::make_pair(Load, TF);
3339 }
3340
3341 assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
3342 "Unaligned load of unsupported type.");
3343
3344 // Compute the new VT that is half the size of the old one. This is an
3345 // integer MVT.
3346 unsigned NumBits = LoadedVT.getSizeInBits();
3347 EVT NewLoadedVT;
3348 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
3349 NumBits >>= 1;
3350
3351 unsigned Alignment = LD->getAlignment();
3352 unsigned IncrementSize = NumBits / 8;
3353 ISD::LoadExtType HiExtType = LD->getExtensionType();
3354
3355 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
3356 if (HiExtType == ISD::NON_EXTLOAD)
3357 HiExtType = ISD::ZEXTLOAD;
3358
3359 // Load the value in two parts
3360 SDValue Lo, Hi;
3361 if (DAG.getDataLayout().isLittleEndian()) {
3362 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3363 NewLoadedVT, LD->isVolatile(),
3364 LD->isNonTemporal(), LD->isInvariant(), Alignment,
3365 LD->getAAInfo());
3366 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3367 DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3368 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
3369 LD->getPointerInfo().getWithOffset(IncrementSize),
3370 NewLoadedVT, LD->isVolatile(),
3371 LD->isNonTemporal(),LD->isInvariant(),
3372 MinAlign(Alignment, IncrementSize), LD->getAAInfo());
3373 } else {
3374 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3375 NewLoadedVT, LD->isVolatile(),
3376 LD->isNonTemporal(), LD->isInvariant(), Alignment,
3377 LD->getAAInfo());
3378 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3379 DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3380 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
3381 LD->getPointerInfo().getWithOffset(IncrementSize),
3382 NewLoadedVT, LD->isVolatile(),
3383 LD->isNonTemporal(), LD->isInvariant(),
3384 MinAlign(Alignment, IncrementSize), LD->getAAInfo());
3385 }
3386
3387 // aggregate the two parts
3388 SDValue ShiftAmount =
3389 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
3390 DAG.getDataLayout()));
3391 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
3392 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
3393
3394 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
3395 Hi.getValue(1));
3396
3397 return std::make_pair(Result, TF);
3398 }
3399
expandUnalignedStore(StoreSDNode * ST,SelectionDAG & DAG) const3400 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
3401 SelectionDAG &DAG) const {
3402 assert(ST->getAddressingMode() == ISD::UNINDEXED &&
3403 "unaligned indexed stores not implemented!");
3404 SDValue Chain = ST->getChain();
3405 SDValue Ptr = ST->getBasePtr();
3406 SDValue Val = ST->getValue();
3407 EVT VT = Val.getValueType();
3408 int Alignment = ST->getAlignment();
3409
3410 SDLoc dl(ST);
3411 if (ST->getMemoryVT().isFloatingPoint() ||
3412 ST->getMemoryVT().isVector()) {
3413 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
3414 if (isTypeLegal(intVT)) {
3415 if (!isOperationLegalOrCustom(ISD::STORE, intVT)) {
3416 // Scalarize the store and let the individual components be handled.
3417 SDValue Result = scalarizeVectorStore(ST, DAG);
3418
3419 return Result;
3420 }
3421 // Expand to a bitconvert of the value to the integer type of the
3422 // same size, then a (misaligned) int store.
3423 // FIXME: Does not handle truncating floating point stores!
3424 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
3425 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
3426 ST->isVolatile(), ST->isNonTemporal(), Alignment);
3427 return Result;
3428 }
3429 // Do a (aligned) store to a stack slot, then copy from the stack slot
3430 // to the final destination using (unaligned) integer loads and stores.
3431 EVT StoredVT = ST->getMemoryVT();
3432 MVT RegVT =
3433 getRegisterType(*DAG.getContext(),
3434 EVT::getIntegerVT(*DAG.getContext(),
3435 StoredVT.getSizeInBits()));
3436 EVT PtrVT = Ptr.getValueType();
3437 unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
3438 unsigned RegBytes = RegVT.getSizeInBits() / 8;
3439 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
3440
3441 // Make sure the stack slot is also aligned for the register type.
3442 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
3443
3444 // Perform the original store, only redirected to the stack slot.
3445 SDValue Store = DAG.getTruncStore(Chain, dl,
3446 Val, StackPtr, MachinePointerInfo(),
3447 StoredVT, false, false, 0);
3448
3449 EVT StackPtrVT = StackPtr.getValueType();
3450
3451 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3452 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3453 SmallVector<SDValue, 8> Stores;
3454 unsigned Offset = 0;
3455
3456 // Do all but one copies using the full register width.
3457 for (unsigned i = 1; i < NumRegs; i++) {
3458 // Load one integer register's worth from the stack slot.
3459 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr,
3460 MachinePointerInfo(),
3461 false, false, false, 0);
3462 // Store it to the final location. Remember the store.
3463 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
3464 ST->getPointerInfo().getWithOffset(Offset),
3465 ST->isVolatile(), ST->isNonTemporal(),
3466 MinAlign(ST->getAlignment(), Offset)));
3467 // Increment the pointers.
3468 Offset += RegBytes;
3469 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT,
3470 StackPtr, StackPtrIncrement);
3471 Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3472 }
3473
3474 // The last store may be partial. Do a truncating store. On big-endian
3475 // machines this requires an extending load from the stack slot to ensure
3476 // that the bits are in the right place.
3477 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3478 8 * (StoredBytes - Offset));
3479
3480 // Load from the stack slot.
3481 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
3482 MachinePointerInfo(),
3483 MemVT, false, false, false, 0);
3484
3485 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
3486 ST->getPointerInfo()
3487 .getWithOffset(Offset),
3488 MemVT, ST->isVolatile(),
3489 ST->isNonTemporal(),
3490 MinAlign(ST->getAlignment(), Offset),
3491 ST->getAAInfo()));
3492 // The order of the stores doesn't matter - say it with a TokenFactor.
3493 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3494 return Result;
3495 }
3496
3497 assert(ST->getMemoryVT().isInteger() &&
3498 !ST->getMemoryVT().isVector() &&
3499 "Unaligned store of unknown type.");
3500 // Get the half-size VT
3501 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
3502 int NumBits = NewStoredVT.getSizeInBits();
3503 int IncrementSize = NumBits / 8;
3504
3505 // Divide the stored value in two parts.
3506 SDValue ShiftAmount =
3507 DAG.getConstant(NumBits, dl, getShiftAmountTy(Val.getValueType(),
3508 DAG.getDataLayout()));
3509 SDValue Lo = Val;
3510 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
3511
3512 // Store the two parts
3513 SDValue Store1, Store2;
3514 Store1 = DAG.getTruncStore(Chain, dl,
3515 DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
3516 Ptr, ST->getPointerInfo(), NewStoredVT,
3517 ST->isVolatile(), ST->isNonTemporal(), Alignment);
3518
3519 EVT PtrVT = Ptr.getValueType();
3520 Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3521 DAG.getConstant(IncrementSize, dl, PtrVT));
3522 Alignment = MinAlign(Alignment, IncrementSize);
3523 Store2 = DAG.getTruncStore(
3524 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
3525 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT,
3526 ST->isVolatile(), ST->isNonTemporal(), Alignment, ST->getAAInfo());
3527
3528 SDValue Result =
3529 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
3530 return Result;
3531 }
3532
3533 //===----------------------------------------------------------------------===//
3534 // Implementation of Emulated TLS Model
3535 //===----------------------------------------------------------------------===//
3536
LowerToTLSEmulatedModel(const GlobalAddressSDNode * GA,SelectionDAG & DAG) const3537 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3538 SelectionDAG &DAG) const {
3539 // Access to address of TLS varialbe xyz is lowered to a function call:
3540 // __emutls_get_address( address of global variable named "__emutls_v.xyz" )
3541 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3542 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
3543 SDLoc dl(GA);
3544
3545 ArgListTy Args;
3546 ArgListEntry Entry;
3547 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
3548 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
3549 StringRef EmuTlsVarName(NameString);
3550 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
3551 assert(EmuTlsVar && "Cannot find EmuTlsVar ");
3552 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
3553 Entry.Ty = VoidPtrType;
3554 Args.push_back(Entry);
3555
3556 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
3557
3558 TargetLowering::CallLoweringInfo CLI(DAG);
3559 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
3560 CLI.setCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args));
3561 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3562
3563 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
3564 // At last for X86 targets, maybe good for other targets too?
3565 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
3566 MFI->setAdjustsStack(true); // Is this only for X86 target?
3567 MFI->setHasCalls(true);
3568
3569 assert((GA->getOffset() == 0) &&
3570 "Emulated TLS must have zero offset in GlobalAddressSDNode");
3571 return CallResult.first;
3572 }
3573