1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/CodeGen/Analysis.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/CodeGen/SelectionDAG.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/GlobalVariable.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/MC/MCAsmInfo.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Target/TargetLoweringObjectFile.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include <cctype>
35 using namespace llvm;
36
37 /// NOTE: The constructor takes ownership of TLOF.
TargetLowering(const TargetMachine & tm,const TargetLoweringObjectFile * tlof)38 TargetLowering::TargetLowering(const TargetMachine &tm,
39 const TargetLoweringObjectFile *tlof)
40 : TargetLoweringBase(tm, tlof) {}
41
getTargetNodeName(unsigned Opcode) const42 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
43 return nullptr;
44 }
45
46 /// Check whether a given call node is in tail position within its function. If
47 /// so, it sets Chain to the input chain of the tail call.
isInTailCallPosition(SelectionDAG & DAG,SDNode * Node,SDValue & Chain) const48 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
49 SDValue &Chain) const {
50 const Function *F = DAG.getMachineFunction().getFunction();
51
52 // Conservatively require the attributes of the call to match those of
53 // the return. Ignore noalias because it doesn't affect the call sequence.
54 AttributeSet CallerAttrs = F->getAttributes();
55 if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex)
56 .removeAttribute(Attribute::NoAlias).hasAttributes())
57 return false;
58
59 // It's not safe to eliminate the sign / zero extension of the return value.
60 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
61 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
62 return false;
63
64 // Check if the only use is a function return node.
65 return isUsedByReturnOnly(Node, Chain);
66 }
67
68 /// \brief Set CallLoweringInfo attribute flags based on a call instruction
69 /// and called function attributes.
setAttributes(ImmutableCallSite * CS,unsigned AttrIdx)70 void TargetLowering::ArgListEntry::setAttributes(ImmutableCallSite *CS,
71 unsigned AttrIdx) {
72 isSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
73 isZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
74 isInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
75 isSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
76 isNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
77 isByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
78 isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
79 isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
80 Alignment = CS->getParamAlignment(AttrIdx);
81 }
82
83 /// Generate a libcall taking the given operands as arguments and returning a
84 /// result of type RetVT.
85 std::pair<SDValue, SDValue>
makeLibCall(SelectionDAG & DAG,RTLIB::Libcall LC,EVT RetVT,const SDValue * Ops,unsigned NumOps,bool isSigned,SDLoc dl,bool doesNotReturn,bool isReturnValueUsed) const86 TargetLowering::makeLibCall(SelectionDAG &DAG,
87 RTLIB::Libcall LC, EVT RetVT,
88 const SDValue *Ops, unsigned NumOps,
89 bool isSigned, SDLoc dl,
90 bool doesNotReturn,
91 bool isReturnValueUsed) const {
92 TargetLowering::ArgListTy Args;
93 Args.reserve(NumOps);
94
95 TargetLowering::ArgListEntry Entry;
96 for (unsigned i = 0; i != NumOps; ++i) {
97 Entry.Node = Ops[i];
98 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
99 Entry.isSExt = isSigned;
100 Entry.isZExt = !isSigned;
101 Args.push_back(Entry);
102 }
103 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), getPointerTy());
104
105 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
106 TargetLowering::CallLoweringInfo CLI(DAG);
107 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
108 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
109 .setNoReturn(doesNotReturn).setDiscardResult(!isReturnValueUsed)
110 .setSExtResult(isSigned).setZExtResult(!isSigned);
111 return LowerCallTo(CLI);
112 }
113
114
115 /// SoftenSetCCOperands - Soften the operands of a comparison. This code is
116 /// shared among BR_CC, SELECT_CC, and SETCC handlers.
softenSetCCOperands(SelectionDAG & DAG,EVT VT,SDValue & NewLHS,SDValue & NewRHS,ISD::CondCode & CCCode,SDLoc dl) const117 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
118 SDValue &NewLHS, SDValue &NewRHS,
119 ISD::CondCode &CCCode,
120 SDLoc dl) const {
121 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
122 && "Unsupported setcc type!");
123
124 // Expand into one or more soft-fp libcall(s).
125 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
126 switch (CCCode) {
127 case ISD::SETEQ:
128 case ISD::SETOEQ:
129 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
130 (VT == MVT::f64) ? RTLIB::OEQ_F64 : RTLIB::OEQ_F128;
131 break;
132 case ISD::SETNE:
133 case ISD::SETUNE:
134 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
135 (VT == MVT::f64) ? RTLIB::UNE_F64 : RTLIB::UNE_F128;
136 break;
137 case ISD::SETGE:
138 case ISD::SETOGE:
139 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
140 (VT == MVT::f64) ? RTLIB::OGE_F64 : RTLIB::OGE_F128;
141 break;
142 case ISD::SETLT:
143 case ISD::SETOLT:
144 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
145 (VT == MVT::f64) ? RTLIB::OLT_F64 : RTLIB::OLT_F128;
146 break;
147 case ISD::SETLE:
148 case ISD::SETOLE:
149 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
150 (VT == MVT::f64) ? RTLIB::OLE_F64 : RTLIB::OLE_F128;
151 break;
152 case ISD::SETGT:
153 case ISD::SETOGT:
154 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
155 (VT == MVT::f64) ? RTLIB::OGT_F64 : RTLIB::OGT_F128;
156 break;
157 case ISD::SETUO:
158 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
159 (VT == MVT::f64) ? RTLIB::UO_F64 : RTLIB::UO_F128;
160 break;
161 case ISD::SETO:
162 LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
163 (VT == MVT::f64) ? RTLIB::O_F64 : RTLIB::O_F128;
164 break;
165 default:
166 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
167 (VT == MVT::f64) ? RTLIB::UO_F64 : RTLIB::UO_F128;
168 switch (CCCode) {
169 case ISD::SETONE:
170 // SETONE = SETOLT | SETOGT
171 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
172 (VT == MVT::f64) ? RTLIB::OLT_F64 : RTLIB::OLT_F128;
173 // Fallthrough
174 case ISD::SETUGT:
175 LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
176 (VT == MVT::f64) ? RTLIB::OGT_F64 : RTLIB::OGT_F128;
177 break;
178 case ISD::SETUGE:
179 LC2 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
180 (VT == MVT::f64) ? RTLIB::OGE_F64 : RTLIB::OGE_F128;
181 break;
182 case ISD::SETULT:
183 LC2 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
184 (VT == MVT::f64) ? RTLIB::OLT_F64 : RTLIB::OLT_F128;
185 break;
186 case ISD::SETULE:
187 LC2 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
188 (VT == MVT::f64) ? RTLIB::OLE_F64 : RTLIB::OLE_F128;
189 break;
190 case ISD::SETUEQ:
191 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
192 (VT == MVT::f64) ? RTLIB::OEQ_F64 : RTLIB::OEQ_F128;
193 break;
194 default: llvm_unreachable("Do not know how to soften this setcc!");
195 }
196 }
197
198 // Use the target specific return value for comparions lib calls.
199 EVT RetVT = getCmpLibcallReturnType();
200 SDValue Ops[2] = { NewLHS, NewRHS };
201 NewLHS = makeLibCall(DAG, LC1, RetVT, Ops, 2, false/*sign irrelevant*/,
202 dl).first;
203 NewRHS = DAG.getConstant(0, RetVT);
204 CCCode = getCmpLibcallCC(LC1);
205 if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
206 SDValue Tmp = DAG.getNode(ISD::SETCC, dl,
207 getSetCCResultType(*DAG.getContext(), RetVT),
208 NewLHS, NewRHS, DAG.getCondCode(CCCode));
209 NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, 2, false/*sign irrelevant*/,
210 dl).first;
211 NewLHS = DAG.getNode(ISD::SETCC, dl,
212 getSetCCResultType(*DAG.getContext(), RetVT), NewLHS,
213 NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
214 NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
215 NewRHS = SDValue();
216 }
217 }
218
219 /// getJumpTableEncoding - Return the entry encoding for a jump table in the
220 /// current function. The returned value is a member of the
221 /// MachineJumpTableInfo::JTEntryKind enum.
getJumpTableEncoding() const222 unsigned TargetLowering::getJumpTableEncoding() const {
223 // In non-pic modes, just use the address of a block.
224 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
225 return MachineJumpTableInfo::EK_BlockAddress;
226
227 // In PIC mode, if the target supports a GPRel32 directive, use it.
228 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
229 return MachineJumpTableInfo::EK_GPRel32BlockAddress;
230
231 // Otherwise, use a label difference.
232 return MachineJumpTableInfo::EK_LabelDifference32;
233 }
234
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const235 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
236 SelectionDAG &DAG) const {
237 // If our PIC model is GP relative, use the global offset table as the base.
238 unsigned JTEncoding = getJumpTableEncoding();
239
240 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
241 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
242 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(0));
243
244 return Table;
245 }
246
247 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the
248 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
249 /// MCExpr.
250 const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction * MF,unsigned JTI,MCContext & Ctx) const251 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
252 unsigned JTI,MCContext &Ctx) const{
253 // The normal PIC reloc base is the label at the start of the jump table.
254 return MCSymbolRefExpr::Create(MF->getJTISymbol(JTI, Ctx), Ctx);
255 }
256
257 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const258 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
259 // Assume that everything is safe in static mode.
260 if (getTargetMachine().getRelocationModel() == Reloc::Static)
261 return true;
262
263 // In dynamic-no-pic mode, assume that known defined values are safe.
264 if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC &&
265 GA &&
266 !GA->getGlobal()->isDeclaration() &&
267 !GA->getGlobal()->isWeakForLinker())
268 return true;
269
270 // Otherwise assume nothing is safe.
271 return false;
272 }
273
274 //===----------------------------------------------------------------------===//
275 // Optimization Methods
276 //===----------------------------------------------------------------------===//
277
278 /// ShrinkDemandedConstant - Check to see if the specified operand of the
279 /// specified instruction is a constant integer. If so, check to see if there
280 /// are any bits set in the constant that are not demanded. If so, shrink the
281 /// constant and return true.
ShrinkDemandedConstant(SDValue Op,const APInt & Demanded)282 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
283 const APInt &Demanded) {
284 SDLoc dl(Op);
285
286 // FIXME: ISD::SELECT, ISD::SELECT_CC
287 switch (Op.getOpcode()) {
288 default: break;
289 case ISD::XOR:
290 case ISD::AND:
291 case ISD::OR: {
292 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
293 if (!C) return false;
294
295 if (Op.getOpcode() == ISD::XOR &&
296 (C->getAPIntValue() | (~Demanded)).isAllOnesValue())
297 return false;
298
299 // if we can expand it to have all bits set, do it
300 if (C->getAPIntValue().intersects(~Demanded)) {
301 EVT VT = Op.getValueType();
302 SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
303 DAG.getConstant(Demanded &
304 C->getAPIntValue(),
305 VT));
306 return CombineTo(Op, New);
307 }
308
309 break;
310 }
311 }
312
313 return false;
314 }
315
316 /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the
317 /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening
318 /// cast, but it could be generalized for targets with other types of
319 /// implicit widening casts.
320 bool
ShrinkDemandedOp(SDValue Op,unsigned BitWidth,const APInt & Demanded,SDLoc dl)321 TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
322 unsigned BitWidth,
323 const APInt &Demanded,
324 SDLoc dl) {
325 assert(Op.getNumOperands() == 2 &&
326 "ShrinkDemandedOp only supports binary operators!");
327 assert(Op.getNode()->getNumValues() == 1 &&
328 "ShrinkDemandedOp only supports nodes with one result!");
329
330 // Early return, as this function cannot handle vector types.
331 if (Op.getValueType().isVector())
332 return false;
333
334 // Don't do this if the node has another user, which may require the
335 // full value.
336 if (!Op.getNode()->hasOneUse())
337 return false;
338
339 // Search for the smallest integer type with free casts to and from
340 // Op's type. For expedience, just check power-of-2 integer types.
341 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
342 unsigned DemandedSize = BitWidth - Demanded.countLeadingZeros();
343 unsigned SmallVTBits = DemandedSize;
344 if (!isPowerOf2_32(SmallVTBits))
345 SmallVTBits = NextPowerOf2(SmallVTBits);
346 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
347 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
348 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
349 TLI.isZExtFree(SmallVT, Op.getValueType())) {
350 // We found a type with free casts.
351 SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT,
352 DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
353 Op.getNode()->getOperand(0)),
354 DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
355 Op.getNode()->getOperand(1)));
356 bool NeedZext = DemandedSize > SmallVTBits;
357 SDValue Z = DAG.getNode(NeedZext ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND,
358 dl, Op.getValueType(), X);
359 return CombineTo(Op, Z);
360 }
361 }
362 return false;
363 }
364
365 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the
366 /// DemandedMask bits of the result of Op are ever used downstream. If we can
367 /// use this information to simplify Op, create a new simplified DAG node and
368 /// return true, returning the original and new nodes in Old and New. Otherwise,
369 /// analyze the expression and return a mask of KnownOne and KnownZero bits for
370 /// the expression (used to simplify the caller). The KnownZero/One bits may
371 /// only be accurate for those bits in the DemandedMask.
SimplifyDemandedBits(SDValue Op,const APInt & DemandedMask,APInt & KnownZero,APInt & KnownOne,TargetLoweringOpt & TLO,unsigned Depth) const372 bool TargetLowering::SimplifyDemandedBits(SDValue Op,
373 const APInt &DemandedMask,
374 APInt &KnownZero,
375 APInt &KnownOne,
376 TargetLoweringOpt &TLO,
377 unsigned Depth) const {
378 unsigned BitWidth = DemandedMask.getBitWidth();
379 assert(Op.getValueType().getScalarType().getSizeInBits() == BitWidth &&
380 "Mask size mismatches value type size!");
381 APInt NewMask = DemandedMask;
382 SDLoc dl(Op);
383
384 // Don't know anything.
385 KnownZero = KnownOne = APInt(BitWidth, 0);
386
387 // Other users may use these bits.
388 if (!Op.getNode()->hasOneUse()) {
389 if (Depth != 0) {
390 // If not at the root, Just compute the KnownZero/KnownOne bits to
391 // simplify things downstream.
392 TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
393 return false;
394 }
395 // If this is the root being simplified, allow it to have multiple uses,
396 // just set the NewMask to all bits.
397 NewMask = APInt::getAllOnesValue(BitWidth);
398 } else if (DemandedMask == 0) {
399 // Not demanding any bits from Op.
400 if (Op.getOpcode() != ISD::UNDEF)
401 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
402 return false;
403 } else if (Depth == 6) { // Limit search depth.
404 return false;
405 }
406
407 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
408 switch (Op.getOpcode()) {
409 case ISD::Constant:
410 // We know all of the bits for a constant!
411 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
412 KnownZero = ~KnownOne;
413 return false; // Don't fall through, will infinitely loop.
414 case ISD::AND:
415 // If the RHS is a constant, check to see if the LHS would be zero without
416 // using the bits from the RHS. Below, we use knowledge about the RHS to
417 // simplify the LHS, here we're using information from the LHS to simplify
418 // the RHS.
419 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
420 APInt LHSZero, LHSOne;
421 // Do not increment Depth here; that can cause an infinite loop.
422 TLO.DAG.computeKnownBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
423 // If the LHS already has zeros where RHSC does, this and is dead.
424 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
425 return TLO.CombineTo(Op, Op.getOperand(0));
426 // If any of the set bits in the RHS are known zero on the LHS, shrink
427 // the constant.
428 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
429 return true;
430 }
431
432 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
433 KnownOne, TLO, Depth+1))
434 return true;
435 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
436 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
437 KnownZero2, KnownOne2, TLO, Depth+1))
438 return true;
439 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
440
441 // If all of the demanded bits are known one on one side, return the other.
442 // These bits cannot contribute to the result of the 'and'.
443 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
444 return TLO.CombineTo(Op, Op.getOperand(0));
445 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
446 return TLO.CombineTo(Op, Op.getOperand(1));
447 // If all of the demanded bits in the inputs are known zeros, return zero.
448 if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
449 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType()));
450 // If the RHS is a constant, see if we can simplify it.
451 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
452 return true;
453 // If the operation can be done in a smaller type, do so.
454 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
455 return true;
456
457 // Output known-1 bits are only known if set in both the LHS & RHS.
458 KnownOne &= KnownOne2;
459 // Output known-0 are known to be clear if zero in either the LHS | RHS.
460 KnownZero |= KnownZero2;
461 break;
462 case ISD::OR:
463 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
464 KnownOne, TLO, Depth+1))
465 return true;
466 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
467 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
468 KnownZero2, KnownOne2, TLO, Depth+1))
469 return true;
470 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
471
472 // If all of the demanded bits are known zero on one side, return the other.
473 // These bits cannot contribute to the result of the 'or'.
474 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
475 return TLO.CombineTo(Op, Op.getOperand(0));
476 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
477 return TLO.CombineTo(Op, Op.getOperand(1));
478 // If all of the potentially set bits on one side are known to be set on
479 // the other side, just use the 'other' side.
480 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
481 return TLO.CombineTo(Op, Op.getOperand(0));
482 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
483 return TLO.CombineTo(Op, Op.getOperand(1));
484 // If the RHS is a constant, see if we can simplify it.
485 if (TLO.ShrinkDemandedConstant(Op, NewMask))
486 return true;
487 // If the operation can be done in a smaller type, do so.
488 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
489 return true;
490
491 // Output known-0 bits are only known if clear in both the LHS & RHS.
492 KnownZero &= KnownZero2;
493 // Output known-1 are known to be set if set in either the LHS | RHS.
494 KnownOne |= KnownOne2;
495 break;
496 case ISD::XOR:
497 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
498 KnownOne, TLO, Depth+1))
499 return true;
500 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
501 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
502 KnownOne2, TLO, Depth+1))
503 return true;
504 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
505
506 // If all of the demanded bits are known zero on one side, return the other.
507 // These bits cannot contribute to the result of the 'xor'.
508 if ((KnownZero & NewMask) == NewMask)
509 return TLO.CombineTo(Op, Op.getOperand(0));
510 if ((KnownZero2 & NewMask) == NewMask)
511 return TLO.CombineTo(Op, Op.getOperand(1));
512 // If the operation can be done in a smaller type, do so.
513 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
514 return true;
515
516 // If all of the unknown bits are known to be zero on one side or the other
517 // (but not both) turn this into an *inclusive* or.
518 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
519 if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
520 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
521 Op.getOperand(0),
522 Op.getOperand(1)));
523
524 // Output known-0 bits are known if clear or set in both the LHS & RHS.
525 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
526 // Output known-1 are known to be set if set in only one of the LHS, RHS.
527 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
528
529 // If all of the demanded bits on one side are known, and all of the set
530 // bits on that side are also known to be set on the other side, turn this
531 // into an AND, as we know the bits will be cleared.
532 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
533 // NB: it is okay if more bits are known than are requested
534 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
535 if (KnownOne == KnownOne2) { // set bits are the same on both sides
536 EVT VT = Op.getValueType();
537 SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
538 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
539 Op.getOperand(0), ANDC));
540 }
541 }
542
543 // If the RHS is a constant, see if we can simplify it.
544 // for XOR, we prefer to force bits to 1 if they will make a -1.
545 // if we can't force bits, try to shrink constant
546 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
547 APInt Expanded = C->getAPIntValue() | (~NewMask);
548 // if we can expand it to have all bits set, do it
549 if (Expanded.isAllOnesValue()) {
550 if (Expanded != C->getAPIntValue()) {
551 EVT VT = Op.getValueType();
552 SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
553 TLO.DAG.getConstant(Expanded, VT));
554 return TLO.CombineTo(Op, New);
555 }
556 // if it already has all the bits set, nothing to change
557 // but don't shrink either!
558 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) {
559 return true;
560 }
561 }
562
563 KnownZero = KnownZeroOut;
564 KnownOne = KnownOneOut;
565 break;
566 case ISD::SELECT:
567 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
568 KnownOne, TLO, Depth+1))
569 return true;
570 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
571 KnownOne2, TLO, Depth+1))
572 return true;
573 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
574 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
575
576 // If the operands are constants, see if we can simplify them.
577 if (TLO.ShrinkDemandedConstant(Op, NewMask))
578 return true;
579
580 // Only known if known in both the LHS and RHS.
581 KnownOne &= KnownOne2;
582 KnownZero &= KnownZero2;
583 break;
584 case ISD::SELECT_CC:
585 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
586 KnownOne, TLO, Depth+1))
587 return true;
588 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
589 KnownOne2, TLO, Depth+1))
590 return true;
591 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
592 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
593
594 // If the operands are constants, see if we can simplify them.
595 if (TLO.ShrinkDemandedConstant(Op, NewMask))
596 return true;
597
598 // Only known if known in both the LHS and RHS.
599 KnownOne &= KnownOne2;
600 KnownZero &= KnownZero2;
601 break;
602 case ISD::SHL:
603 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
604 unsigned ShAmt = SA->getZExtValue();
605 SDValue InOp = Op.getOperand(0);
606
607 // If the shift count is an invalid immediate, don't do anything.
608 if (ShAmt >= BitWidth)
609 break;
610
611 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
612 // single shift. We can do this if the bottom bits (which are shifted
613 // out) are never demanded.
614 if (InOp.getOpcode() == ISD::SRL &&
615 isa<ConstantSDNode>(InOp.getOperand(1))) {
616 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
617 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
618 unsigned Opc = ISD::SHL;
619 int Diff = ShAmt-C1;
620 if (Diff < 0) {
621 Diff = -Diff;
622 Opc = ISD::SRL;
623 }
624
625 SDValue NewSA =
626 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
627 EVT VT = Op.getValueType();
628 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
629 InOp.getOperand(0), NewSA));
630 }
631 }
632
633 if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
634 KnownZero, KnownOne, TLO, Depth+1))
635 return true;
636
637 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
638 // are not demanded. This will likely allow the anyext to be folded away.
639 if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
640 SDValue InnerOp = InOp.getNode()->getOperand(0);
641 EVT InnerVT = InnerOp.getValueType();
642 unsigned InnerBits = InnerVT.getSizeInBits();
643 if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
644 isTypeDesirableForOp(ISD::SHL, InnerVT)) {
645 EVT ShTy = getShiftAmountTy(InnerVT);
646 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
647 ShTy = InnerVT;
648 SDValue NarrowShl =
649 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
650 TLO.DAG.getConstant(ShAmt, ShTy));
651 return
652 TLO.CombineTo(Op,
653 TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(),
654 NarrowShl));
655 }
656 // Repeat the SHL optimization above in cases where an extension
657 // intervenes: (shl (anyext (shr x, c1)), c2) to
658 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits
659 // aren't demanded (as above) and that the shifted upper c1 bits of
660 // x aren't demanded.
661 if (InOp.hasOneUse() &&
662 InnerOp.getOpcode() == ISD::SRL &&
663 InnerOp.hasOneUse() &&
664 isa<ConstantSDNode>(InnerOp.getOperand(1))) {
665 uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
666 ->getZExtValue();
667 if (InnerShAmt < ShAmt &&
668 InnerShAmt < InnerBits &&
669 NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 &&
670 NewMask.trunc(ShAmt) == 0) {
671 SDValue NewSA =
672 TLO.DAG.getConstant(ShAmt - InnerShAmt,
673 Op.getOperand(1).getValueType());
674 EVT VT = Op.getValueType();
675 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
676 InnerOp.getOperand(0));
677 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT,
678 NewExt, NewSA));
679 }
680 }
681 }
682
683 KnownZero <<= SA->getZExtValue();
684 KnownOne <<= SA->getZExtValue();
685 // low bits known zero.
686 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue());
687 }
688 break;
689 case ISD::SRL:
690 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
691 EVT VT = Op.getValueType();
692 unsigned ShAmt = SA->getZExtValue();
693 unsigned VTSize = VT.getSizeInBits();
694 SDValue InOp = Op.getOperand(0);
695
696 // If the shift count is an invalid immediate, don't do anything.
697 if (ShAmt >= BitWidth)
698 break;
699
700 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
701 // single shift. We can do this if the top bits (which are shifted out)
702 // are never demanded.
703 if (InOp.getOpcode() == ISD::SHL &&
704 isa<ConstantSDNode>(InOp.getOperand(1))) {
705 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
706 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
707 unsigned Opc = ISD::SRL;
708 int Diff = ShAmt-C1;
709 if (Diff < 0) {
710 Diff = -Diff;
711 Opc = ISD::SHL;
712 }
713
714 SDValue NewSA =
715 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
716 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
717 InOp.getOperand(0), NewSA));
718 }
719 }
720
721 // Compute the new bits that are at the top now.
722 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt),
723 KnownZero, KnownOne, TLO, Depth+1))
724 return true;
725 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
726 KnownZero = KnownZero.lshr(ShAmt);
727 KnownOne = KnownOne.lshr(ShAmt);
728
729 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
730 KnownZero |= HighBits; // High bits known zero.
731 }
732 break;
733 case ISD::SRA:
734 // If this is an arithmetic shift right and only the low-bit is set, we can
735 // always convert this into a logical shr, even if the shift amount is
736 // variable. The low bit of the shift cannot be an input sign bit unless
737 // the shift amount is >= the size of the datatype, which is undefined.
738 if (NewMask == 1)
739 return TLO.CombineTo(Op,
740 TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
741 Op.getOperand(0), Op.getOperand(1)));
742
743 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
744 EVT VT = Op.getValueType();
745 unsigned ShAmt = SA->getZExtValue();
746
747 // If the shift count is an invalid immediate, don't do anything.
748 if (ShAmt >= BitWidth)
749 break;
750
751 APInt InDemandedMask = (NewMask << ShAmt);
752
753 // If any of the demanded bits are produced by the sign extension, we also
754 // demand the input sign bit.
755 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
756 if (HighBits.intersects(NewMask))
757 InDemandedMask |= APInt::getSignBit(VT.getScalarType().getSizeInBits());
758
759 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
760 KnownZero, KnownOne, TLO, Depth+1))
761 return true;
762 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
763 KnownZero = KnownZero.lshr(ShAmt);
764 KnownOne = KnownOne.lshr(ShAmt);
765
766 // Handle the sign bit, adjusted to where it is now in the mask.
767 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
768
769 // If the input sign bit is known to be zero, or if none of the top bits
770 // are demanded, turn this into an unsigned shift right.
771 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits)
772 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
773 Op.getOperand(0),
774 Op.getOperand(1)));
775
776 int Log2 = NewMask.exactLogBase2();
777 if (Log2 >= 0) {
778 // The bit must come from the sign.
779 SDValue NewSA =
780 TLO.DAG.getConstant(BitWidth - 1 - Log2,
781 Op.getOperand(1).getValueType());
782 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
783 Op.getOperand(0), NewSA));
784 }
785
786 if (KnownOne.intersects(SignBit))
787 // New bits are known one.
788 KnownOne |= HighBits;
789 }
790 break;
791 case ISD::SIGN_EXTEND_INREG: {
792 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
793
794 APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
795 // If we only care about the highest bit, don't bother shifting right.
796 if (MsbMask == DemandedMask) {
797 unsigned ShAmt = ExVT.getScalarType().getSizeInBits();
798 SDValue InOp = Op.getOperand(0);
799
800 // Compute the correct shift amount type, which must be getShiftAmountTy
801 // for scalar types after legalization.
802 EVT ShiftAmtTy = Op.getValueType();
803 if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
804 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy);
805
806 SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, ShiftAmtTy);
807 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
808 Op.getValueType(), InOp, ShiftAmt));
809 }
810
811 // Sign extension. Compute the demanded bits in the result that are not
812 // present in the input.
813 APInt NewBits =
814 APInt::getHighBitsSet(BitWidth,
815 BitWidth - ExVT.getScalarType().getSizeInBits());
816
817 // If none of the extended bits are demanded, eliminate the sextinreg.
818 if ((NewBits & NewMask) == 0)
819 return TLO.CombineTo(Op, Op.getOperand(0));
820
821 APInt InSignBit =
822 APInt::getSignBit(ExVT.getScalarType().getSizeInBits()).zext(BitWidth);
823 APInt InputDemandedBits =
824 APInt::getLowBitsSet(BitWidth,
825 ExVT.getScalarType().getSizeInBits()) &
826 NewMask;
827
828 // Since the sign extended bits are demanded, we know that the sign
829 // bit is demanded.
830 InputDemandedBits |= InSignBit;
831
832 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
833 KnownZero, KnownOne, TLO, Depth+1))
834 return true;
835 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
836
837 // If the sign bit of the input is known set or clear, then we know the
838 // top bits of the result.
839
840 // If the input sign bit is known zero, convert this into a zero extension.
841 if (KnownZero.intersects(InSignBit))
842 return TLO.CombineTo(Op,
843 TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,ExVT));
844
845 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
846 KnownOne |= NewBits;
847 KnownZero &= ~NewBits;
848 } else { // Input sign bit unknown
849 KnownZero &= ~NewBits;
850 KnownOne &= ~NewBits;
851 }
852 break;
853 }
854 case ISD::BUILD_PAIR: {
855 EVT HalfVT = Op.getOperand(0).getValueType();
856 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
857
858 APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
859 APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
860
861 APInt KnownZeroLo, KnownOneLo;
862 APInt KnownZeroHi, KnownOneHi;
863
864 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
865 KnownOneLo, TLO, Depth + 1))
866 return true;
867
868 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
869 KnownOneHi, TLO, Depth + 1))
870 return true;
871
872 KnownZero = KnownZeroLo.zext(BitWidth) |
873 KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
874
875 KnownOne = KnownOneLo.zext(BitWidth) |
876 KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
877 break;
878 }
879 case ISD::ZERO_EXTEND: {
880 unsigned OperandBitWidth =
881 Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
882 APInt InMask = NewMask.trunc(OperandBitWidth);
883
884 // If none of the top bits are demanded, convert this into an any_extend.
885 APInt NewBits =
886 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
887 if (!NewBits.intersects(NewMask))
888 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
889 Op.getValueType(),
890 Op.getOperand(0)));
891
892 if (SimplifyDemandedBits(Op.getOperand(0), InMask,
893 KnownZero, KnownOne, TLO, Depth+1))
894 return true;
895 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
896 KnownZero = KnownZero.zext(BitWidth);
897 KnownOne = KnownOne.zext(BitWidth);
898 KnownZero |= NewBits;
899 break;
900 }
901 case ISD::SIGN_EXTEND: {
902 EVT InVT = Op.getOperand(0).getValueType();
903 unsigned InBits = InVT.getScalarType().getSizeInBits();
904 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
905 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
906 APInt NewBits = ~InMask & NewMask;
907
908 // If none of the top bits are demanded, convert this into an any_extend.
909 if (NewBits == 0)
910 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
911 Op.getValueType(),
912 Op.getOperand(0)));
913
914 // Since some of the sign extended bits are demanded, we know that the sign
915 // bit is demanded.
916 APInt InDemandedBits = InMask & NewMask;
917 InDemandedBits |= InSignBit;
918 InDemandedBits = InDemandedBits.trunc(InBits);
919
920 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
921 KnownOne, TLO, Depth+1))
922 return true;
923 KnownZero = KnownZero.zext(BitWidth);
924 KnownOne = KnownOne.zext(BitWidth);
925
926 // If the sign bit is known zero, convert this to a zero extend.
927 if (KnownZero.intersects(InSignBit))
928 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
929 Op.getValueType(),
930 Op.getOperand(0)));
931
932 // If the sign bit is known one, the top bits match.
933 if (KnownOne.intersects(InSignBit)) {
934 KnownOne |= NewBits;
935 assert((KnownZero & NewBits) == 0);
936 } else { // Otherwise, top bits aren't known.
937 assert((KnownOne & NewBits) == 0);
938 assert((KnownZero & NewBits) == 0);
939 }
940 break;
941 }
942 case ISD::ANY_EXTEND: {
943 unsigned OperandBitWidth =
944 Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
945 APInt InMask = NewMask.trunc(OperandBitWidth);
946 if (SimplifyDemandedBits(Op.getOperand(0), InMask,
947 KnownZero, KnownOne, TLO, Depth+1))
948 return true;
949 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
950 KnownZero = KnownZero.zext(BitWidth);
951 KnownOne = KnownOne.zext(BitWidth);
952 break;
953 }
954 case ISD::TRUNCATE: {
955 // Simplify the input, using demanded bit information, and compute the known
956 // zero/one bits live out.
957 unsigned OperandBitWidth =
958 Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
959 APInt TruncMask = NewMask.zext(OperandBitWidth);
960 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
961 KnownZero, KnownOne, TLO, Depth+1))
962 return true;
963 KnownZero = KnownZero.trunc(BitWidth);
964 KnownOne = KnownOne.trunc(BitWidth);
965
966 // If the input is only used by this truncate, see if we can shrink it based
967 // on the known demanded bits.
968 if (Op.getOperand(0).getNode()->hasOneUse()) {
969 SDValue In = Op.getOperand(0);
970 switch (In.getOpcode()) {
971 default: break;
972 case ISD::SRL:
973 // Shrink SRL by a constant if none of the high bits shifted in are
974 // demanded.
975 if (TLO.LegalTypes() &&
976 !isTypeDesirableForOp(ISD::SRL, Op.getValueType()))
977 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
978 // undesirable.
979 break;
980 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
981 if (!ShAmt)
982 break;
983 SDValue Shift = In.getOperand(1);
984 if (TLO.LegalTypes()) {
985 uint64_t ShVal = ShAmt->getZExtValue();
986 Shift =
987 TLO.DAG.getConstant(ShVal, getShiftAmountTy(Op.getValueType()));
988 }
989
990 APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
991 OperandBitWidth - BitWidth);
992 HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
993
994 if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
995 // None of the shifted in bits are needed. Add a truncate of the
996 // shift input, then shift it.
997 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
998 Op.getValueType(),
999 In.getOperand(0));
1000 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
1001 Op.getValueType(),
1002 NewTrunc,
1003 Shift));
1004 }
1005 break;
1006 }
1007 }
1008
1009 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1010 break;
1011 }
1012 case ISD::AssertZext: {
1013 // AssertZext demands all of the high bits, plus any of the low bits
1014 // demanded by its users.
1015 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1016 APInt InMask = APInt::getLowBitsSet(BitWidth,
1017 VT.getSizeInBits());
1018 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | NewMask,
1019 KnownZero, KnownOne, TLO, Depth+1))
1020 return true;
1021 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1022
1023 KnownZero |= ~InMask & NewMask;
1024 break;
1025 }
1026 case ISD::BITCAST:
1027 // If this is an FP->Int bitcast and if the sign bit is the only
1028 // thing demanded, turn this into a FGETSIGN.
1029 if (!TLO.LegalOperations() &&
1030 !Op.getValueType().isVector() &&
1031 !Op.getOperand(0).getValueType().isVector() &&
1032 NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) &&
1033 Op.getOperand(0).getValueType().isFloatingPoint()) {
1034 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
1035 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1036 if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple()) {
1037 EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32;
1038 // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1039 // place. We expect the SHL to be eliminated by other optimizations.
1040 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
1041 unsigned OpVTSizeInBits = Op.getValueType().getSizeInBits();
1042 if (!OpVTLegal && OpVTSizeInBits > 32)
1043 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
1044 unsigned ShVal = Op.getValueType().getSizeInBits()-1;
1045 SDValue ShAmt = TLO.DAG.getConstant(ShVal, Op.getValueType());
1046 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
1047 Op.getValueType(),
1048 Sign, ShAmt));
1049 }
1050 }
1051 break;
1052 case ISD::ADD:
1053 case ISD::MUL:
1054 case ISD::SUB: {
1055 // Add, Sub, and Mul don't demand any bits in positions beyond that
1056 // of the highest bit demanded of them.
1057 APInt LoMask = APInt::getLowBitsSet(BitWidth,
1058 BitWidth - NewMask.countLeadingZeros());
1059 if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2,
1060 KnownOne2, TLO, Depth+1))
1061 return true;
1062 if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2,
1063 KnownOne2, TLO, Depth+1))
1064 return true;
1065 // See if the operation should be performed at a smaller bit width.
1066 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
1067 return true;
1068 }
1069 // FALL THROUGH
1070 default:
1071 // Just use computeKnownBits to compute output bits.
1072 TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
1073 break;
1074 }
1075
1076 // If we know the value of all of the demanded bits, return this as a
1077 // constant.
1078 if ((NewMask & (KnownZero|KnownOne)) == NewMask)
1079 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType()));
1080
1081 return false;
1082 }
1083
1084 /// computeKnownBitsForTargetNode - Determine which of the bits specified
1085 /// in Mask are known to be either zero or one and return them in the
1086 /// KnownZero/KnownOne bitsets.
computeKnownBitsForTargetNode(const SDValue Op,APInt & KnownZero,APInt & KnownOne,const SelectionDAG & DAG,unsigned Depth) const1087 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1088 APInt &KnownZero,
1089 APInt &KnownOne,
1090 const SelectionDAG &DAG,
1091 unsigned Depth) const {
1092 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1093 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1094 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1095 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1096 "Should use MaskedValueIsZero if you don't know whether Op"
1097 " is a target node!");
1098 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
1099 }
1100
1101 /// ComputeNumSignBitsForTargetNode - This method can be implemented by
1102 /// targets that want to expose additional information about sign bits to the
1103 /// DAG Combiner.
ComputeNumSignBitsForTargetNode(SDValue Op,const SelectionDAG &,unsigned Depth) const1104 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
1105 const SelectionDAG &,
1106 unsigned Depth) const {
1107 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1108 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1109 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1110 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1111 "Should use ComputeNumSignBits if you don't know whether Op"
1112 " is a target node!");
1113 return 1;
1114 }
1115
1116 /// ValueHasExactlyOneBitSet - Test if the given value is known to have exactly
1117 /// one bit set. This differs from computeKnownBits in that it doesn't need to
1118 /// determine which bit is set.
1119 ///
ValueHasExactlyOneBitSet(SDValue Val,const SelectionDAG & DAG)1120 static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
1121 // A left-shift of a constant one will have exactly one bit set, because
1122 // shifting the bit off the end is undefined.
1123 if (Val.getOpcode() == ISD::SHL)
1124 if (ConstantSDNode *C =
1125 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0)))
1126 if (C->getAPIntValue() == 1)
1127 return true;
1128
1129 // Similarly, a right-shift of a constant sign-bit will have exactly
1130 // one bit set.
1131 if (Val.getOpcode() == ISD::SRL)
1132 if (ConstantSDNode *C =
1133 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0)))
1134 if (C->getAPIntValue().isSignBit())
1135 return true;
1136
1137 // More could be done here, though the above checks are enough
1138 // to handle some common cases.
1139
1140 // Fall back to computeKnownBits to catch other known cases.
1141 EVT OpVT = Val.getValueType();
1142 unsigned BitWidth = OpVT.getScalarType().getSizeInBits();
1143 APInt KnownZero, KnownOne;
1144 DAG.computeKnownBits(Val, KnownZero, KnownOne);
1145 return (KnownZero.countPopulation() == BitWidth - 1) &&
1146 (KnownOne.countPopulation() == 1);
1147 }
1148
isConstTrueVal(const SDNode * N) const1149 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
1150 if (!N)
1151 return false;
1152
1153 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1154 if (!CN) {
1155 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1156 if (!BV)
1157 return false;
1158
1159 BitVector UndefElements;
1160 CN = BV->getConstantSplatNode(&UndefElements);
1161 // Only interested in constant splats, and we don't try to handle undef
1162 // elements in identifying boolean constants.
1163 if (!CN || UndefElements.none())
1164 return false;
1165 }
1166
1167 switch (getBooleanContents(N->getValueType(0))) {
1168 case UndefinedBooleanContent:
1169 return CN->getAPIntValue()[0];
1170 case ZeroOrOneBooleanContent:
1171 return CN->isOne();
1172 case ZeroOrNegativeOneBooleanContent:
1173 return CN->isAllOnesValue();
1174 }
1175
1176 llvm_unreachable("Invalid boolean contents");
1177 }
1178
isConstFalseVal(const SDNode * N) const1179 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
1180 if (!N)
1181 return false;
1182
1183 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1184 if (!CN) {
1185 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1186 if (!BV)
1187 return false;
1188
1189 BitVector UndefElements;
1190 CN = BV->getConstantSplatNode(&UndefElements);
1191 // Only interested in constant splats, and we don't try to handle undef
1192 // elements in identifying boolean constants.
1193 if (!CN || UndefElements.none())
1194 return false;
1195 }
1196
1197 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
1198 return !CN->getAPIntValue()[0];
1199
1200 return CN->isNullValue();
1201 }
1202
1203 /// SimplifySetCC - Try to simplify a setcc built with the specified operands
1204 /// and cc. If it is unable to simplify it, return a null SDValue.
1205 SDValue
SimplifySetCC(EVT VT,SDValue N0,SDValue N1,ISD::CondCode Cond,bool foldBooleans,DAGCombinerInfo & DCI,SDLoc dl) const1206 TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1207 ISD::CondCode Cond, bool foldBooleans,
1208 DAGCombinerInfo &DCI, SDLoc dl) const {
1209 SelectionDAG &DAG = DCI.DAG;
1210
1211 // These setcc operations always fold.
1212 switch (Cond) {
1213 default: break;
1214 case ISD::SETFALSE:
1215 case ISD::SETFALSE2: return DAG.getConstant(0, VT);
1216 case ISD::SETTRUE:
1217 case ISD::SETTRUE2: {
1218 TargetLowering::BooleanContent Cnt =
1219 getBooleanContents(N0->getValueType(0));
1220 return DAG.getConstant(
1221 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1222 }
1223 }
1224
1225 // Ensure that the constant occurs on the RHS, and fold constant
1226 // comparisons.
1227 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
1228 if (isa<ConstantSDNode>(N0.getNode()) &&
1229 (DCI.isBeforeLegalizeOps() ||
1230 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
1231 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
1232
1233 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1234 const APInt &C1 = N1C->getAPIntValue();
1235
1236 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1237 // equality comparison, then we're just comparing whether X itself is
1238 // zero.
1239 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1240 N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1241 N0.getOperand(1).getOpcode() == ISD::Constant) {
1242 const APInt &ShAmt
1243 = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1244 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1245 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
1246 if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1247 // (srl (ctlz x), 5) == 0 -> X != 0
1248 // (srl (ctlz x), 5) != 1 -> X != 0
1249 Cond = ISD::SETNE;
1250 } else {
1251 // (srl (ctlz x), 5) != 0 -> X == 0
1252 // (srl (ctlz x), 5) == 1 -> X == 0
1253 Cond = ISD::SETEQ;
1254 }
1255 SDValue Zero = DAG.getConstant(0, N0.getValueType());
1256 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
1257 Zero, Cond);
1258 }
1259 }
1260
1261 SDValue CTPOP = N0;
1262 // Look through truncs that don't change the value of a ctpop.
1263 if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
1264 CTPOP = N0.getOperand(0);
1265
1266 if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
1267 (N0 == CTPOP || N0.getValueType().getSizeInBits() >
1268 Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) {
1269 EVT CTVT = CTPOP.getValueType();
1270 SDValue CTOp = CTPOP.getOperand(0);
1271
1272 // (ctpop x) u< 2 -> (x & x-1) == 0
1273 // (ctpop x) u> 1 -> (x & x-1) != 0
1274 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
1275 SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
1276 DAG.getConstant(1, CTVT));
1277 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
1278 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
1279 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, CTVT), CC);
1280 }
1281
1282 // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
1283 }
1284
1285 // (zext x) == C --> x == (trunc C)
1286 if (DCI.isBeforeLegalize() && N0->hasOneUse() &&
1287 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1288 unsigned MinBits = N0.getValueSizeInBits();
1289 SDValue PreZExt;
1290 if (N0->getOpcode() == ISD::ZERO_EXTEND) {
1291 // ZExt
1292 MinBits = N0->getOperand(0).getValueSizeInBits();
1293 PreZExt = N0->getOperand(0);
1294 } else if (N0->getOpcode() == ISD::AND) {
1295 // DAGCombine turns costly ZExts into ANDs
1296 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
1297 if ((C->getAPIntValue()+1).isPowerOf2()) {
1298 MinBits = C->getAPIntValue().countTrailingOnes();
1299 PreZExt = N0->getOperand(0);
1300 }
1301 } else if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(N0)) {
1302 // ZEXTLOAD
1303 if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
1304 MinBits = LN0->getMemoryVT().getSizeInBits();
1305 PreZExt = N0;
1306 }
1307 }
1308
1309 // Make sure we're not losing bits from the constant.
1310 if (MinBits > 0 &&
1311 MinBits < C1.getBitWidth() && MinBits >= C1.getActiveBits()) {
1312 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
1313 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
1314 // Will get folded away.
1315 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreZExt);
1316 SDValue C = DAG.getConstant(C1.trunc(MinBits), MinVT);
1317 return DAG.getSetCC(dl, VT, Trunc, C, Cond);
1318 }
1319 }
1320 }
1321
1322 // If the LHS is '(and load, const)', the RHS is 0,
1323 // the test is for equality or unsigned, and all 1 bits of the const are
1324 // in the same partial word, see if we can shorten the load.
1325 if (DCI.isBeforeLegalize() &&
1326 !ISD::isSignedIntSetCC(Cond) &&
1327 N0.getOpcode() == ISD::AND && C1 == 0 &&
1328 N0.getNode()->hasOneUse() &&
1329 isa<LoadSDNode>(N0.getOperand(0)) &&
1330 N0.getOperand(0).getNode()->hasOneUse() &&
1331 isa<ConstantSDNode>(N0.getOperand(1))) {
1332 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
1333 APInt bestMask;
1334 unsigned bestWidth = 0, bestOffset = 0;
1335 if (!Lod->isVolatile() && Lod->isUnindexed()) {
1336 unsigned origWidth = N0.getValueType().getSizeInBits();
1337 unsigned maskWidth = origWidth;
1338 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
1339 // 8 bits, but have to be careful...
1340 if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
1341 origWidth = Lod->getMemoryVT().getSizeInBits();
1342 const APInt &Mask =
1343 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1344 for (unsigned width = origWidth / 2; width>=8; width /= 2) {
1345 APInt newMask = APInt::getLowBitsSet(maskWidth, width);
1346 for (unsigned offset=0; offset<origWidth/width; offset++) {
1347 if ((newMask & Mask) == Mask) {
1348 if (!getDataLayout()->isLittleEndian())
1349 bestOffset = (origWidth/width - offset - 1) * (width/8);
1350 else
1351 bestOffset = (uint64_t)offset * (width/8);
1352 bestMask = Mask.lshr(offset * (width/8) * 8);
1353 bestWidth = width;
1354 break;
1355 }
1356 newMask = newMask << width;
1357 }
1358 }
1359 }
1360 if (bestWidth) {
1361 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
1362 if (newVT.isRound()) {
1363 EVT PtrType = Lod->getOperand(1).getValueType();
1364 SDValue Ptr = Lod->getBasePtr();
1365 if (bestOffset != 0)
1366 Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
1367 DAG.getConstant(bestOffset, PtrType));
1368 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
1369 SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
1370 Lod->getPointerInfo().getWithOffset(bestOffset),
1371 false, false, false, NewAlign);
1372 return DAG.getSetCC(dl, VT,
1373 DAG.getNode(ISD::AND, dl, newVT, NewLoad,
1374 DAG.getConstant(bestMask.trunc(bestWidth),
1375 newVT)),
1376 DAG.getConstant(0LL, newVT), Cond);
1377 }
1378 }
1379 }
1380
1381 // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1382 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1383 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
1384
1385 // If the comparison constant has bits in the upper part, the
1386 // zero-extended value could never match.
1387 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
1388 C1.getBitWidth() - InSize))) {
1389 switch (Cond) {
1390 case ISD::SETUGT:
1391 case ISD::SETUGE:
1392 case ISD::SETEQ: return DAG.getConstant(0, VT);
1393 case ISD::SETULT:
1394 case ISD::SETULE:
1395 case ISD::SETNE: return DAG.getConstant(1, VT);
1396 case ISD::SETGT:
1397 case ISD::SETGE:
1398 // True if the sign bit of C1 is set.
1399 return DAG.getConstant(C1.isNegative(), VT);
1400 case ISD::SETLT:
1401 case ISD::SETLE:
1402 // True if the sign bit of C1 isn't set.
1403 return DAG.getConstant(C1.isNonNegative(), VT);
1404 default:
1405 break;
1406 }
1407 }
1408
1409 // Otherwise, we can perform the comparison with the low bits.
1410 switch (Cond) {
1411 case ISD::SETEQ:
1412 case ISD::SETNE:
1413 case ISD::SETUGT:
1414 case ISD::SETUGE:
1415 case ISD::SETULT:
1416 case ISD::SETULE: {
1417 EVT newVT = N0.getOperand(0).getValueType();
1418 if (DCI.isBeforeLegalizeOps() ||
1419 (isOperationLegal(ISD::SETCC, newVT) &&
1420 getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
1421 EVT NewSetCCVT = getSetCCResultType(*DAG.getContext(), newVT);
1422 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), newVT);
1423
1424 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
1425 NewConst, Cond);
1426 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
1427 }
1428 break;
1429 }
1430 default:
1431 break; // todo, be more careful with signed comparisons
1432 }
1433 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1434 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1435 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1436 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
1437 EVT ExtDstTy = N0.getValueType();
1438 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
1439
1440 // If the constant doesn't fit into the number of bits for the source of
1441 // the sign extension, it is impossible for both sides to be equal.
1442 if (C1.getMinSignedBits() > ExtSrcTyBits)
1443 return DAG.getConstant(Cond == ISD::SETNE, VT);
1444
1445 SDValue ZextOp;
1446 EVT Op0Ty = N0.getOperand(0).getValueType();
1447 if (Op0Ty == ExtSrcTy) {
1448 ZextOp = N0.getOperand(0);
1449 } else {
1450 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
1451 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
1452 DAG.getConstant(Imm, Op0Ty));
1453 }
1454 if (!DCI.isCalledByLegalizer())
1455 DCI.AddToWorklist(ZextOp.getNode());
1456 // Otherwise, make this a use of a zext.
1457 return DAG.getSetCC(dl, VT, ZextOp,
1458 DAG.getConstant(C1 & APInt::getLowBitsSet(
1459 ExtDstTyBits,
1460 ExtSrcTyBits),
1461 ExtDstTy),
1462 Cond);
1463 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
1464 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1465 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
1466 if (N0.getOpcode() == ISD::SETCC &&
1467 isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
1468 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1);
1469 if (TrueWhenTrue)
1470 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
1471 // Invert the condition.
1472 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1473 CC = ISD::getSetCCInverse(CC,
1474 N0.getOperand(0).getValueType().isInteger());
1475 if (DCI.isBeforeLegalizeOps() ||
1476 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
1477 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
1478 }
1479
1480 if ((N0.getOpcode() == ISD::XOR ||
1481 (N0.getOpcode() == ISD::AND &&
1482 N0.getOperand(0).getOpcode() == ISD::XOR &&
1483 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1484 isa<ConstantSDNode>(N0.getOperand(1)) &&
1485 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
1486 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
1487 // can only do this if the top bits are known zero.
1488 unsigned BitWidth = N0.getValueSizeInBits();
1489 if (DAG.MaskedValueIsZero(N0,
1490 APInt::getHighBitsSet(BitWidth,
1491 BitWidth-1))) {
1492 // Okay, get the un-inverted input value.
1493 SDValue Val;
1494 if (N0.getOpcode() == ISD::XOR)
1495 Val = N0.getOperand(0);
1496 else {
1497 assert(N0.getOpcode() == ISD::AND &&
1498 N0.getOperand(0).getOpcode() == ISD::XOR);
1499 // ((X^1)&1)^1 -> X & 1
1500 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
1501 N0.getOperand(0).getOperand(0),
1502 N0.getOperand(1));
1503 }
1504
1505 return DAG.getSetCC(dl, VT, Val, N1,
1506 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1507 }
1508 } else if (N1C->getAPIntValue() == 1 &&
1509 (VT == MVT::i1 ||
1510 getBooleanContents(N0->getValueType(0)) ==
1511 ZeroOrOneBooleanContent)) {
1512 SDValue Op0 = N0;
1513 if (Op0.getOpcode() == ISD::TRUNCATE)
1514 Op0 = Op0.getOperand(0);
1515
1516 if ((Op0.getOpcode() == ISD::XOR) &&
1517 Op0.getOperand(0).getOpcode() == ISD::SETCC &&
1518 Op0.getOperand(1).getOpcode() == ISD::SETCC) {
1519 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
1520 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
1521 return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1),
1522 Cond);
1523 }
1524 if (Op0.getOpcode() == ISD::AND &&
1525 isa<ConstantSDNode>(Op0.getOperand(1)) &&
1526 cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) {
1527 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
1528 if (Op0.getValueType().bitsGT(VT))
1529 Op0 = DAG.getNode(ISD::AND, dl, VT,
1530 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
1531 DAG.getConstant(1, VT));
1532 else if (Op0.getValueType().bitsLT(VT))
1533 Op0 = DAG.getNode(ISD::AND, dl, VT,
1534 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
1535 DAG.getConstant(1, VT));
1536
1537 return DAG.getSetCC(dl, VT, Op0,
1538 DAG.getConstant(0, Op0.getValueType()),
1539 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1540 }
1541 if (Op0.getOpcode() == ISD::AssertZext &&
1542 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
1543 return DAG.getSetCC(dl, VT, Op0,
1544 DAG.getConstant(0, Op0.getValueType()),
1545 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1546 }
1547 }
1548
1549 APInt MinVal, MaxVal;
1550 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
1551 if (ISD::isSignedIntSetCC(Cond)) {
1552 MinVal = APInt::getSignedMinValue(OperandBitSize);
1553 MaxVal = APInt::getSignedMaxValue(OperandBitSize);
1554 } else {
1555 MinVal = APInt::getMinValue(OperandBitSize);
1556 MaxVal = APInt::getMaxValue(OperandBitSize);
1557 }
1558
1559 // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1560 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1561 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
1562 // X >= C0 --> X > (C0 - 1)
1563 APInt C = C1 - 1;
1564 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
1565 if ((DCI.isBeforeLegalizeOps() ||
1566 isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1567 (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1568 isLegalICmpImmediate(C.getSExtValue())))) {
1569 return DAG.getSetCC(dl, VT, N0,
1570 DAG.getConstant(C, N1.getValueType()),
1571 NewCC);
1572 }
1573 }
1574
1575 if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1576 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
1577 // X <= C0 --> X < (C0 + 1)
1578 APInt C = C1 + 1;
1579 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
1580 if ((DCI.isBeforeLegalizeOps() ||
1581 isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1582 (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1583 isLegalICmpImmediate(C.getSExtValue())))) {
1584 return DAG.getSetCC(dl, VT, N0,
1585 DAG.getConstant(C, N1.getValueType()),
1586 NewCC);
1587 }
1588 }
1589
1590 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1591 return DAG.getConstant(0, VT); // X < MIN --> false
1592 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1593 return DAG.getConstant(1, VT); // X >= MIN --> true
1594 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1595 return DAG.getConstant(0, VT); // X > MAX --> false
1596 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1597 return DAG.getConstant(1, VT); // X <= MAX --> true
1598
1599 // Canonicalize setgt X, Min --> setne X, Min
1600 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1601 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1602 // Canonicalize setlt X, Max --> setne X, Max
1603 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1604 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1605
1606 // If we have setult X, 1, turn it into seteq X, 0
1607 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1608 return DAG.getSetCC(dl, VT, N0,
1609 DAG.getConstant(MinVal, N0.getValueType()),
1610 ISD::SETEQ);
1611 // If we have setugt X, Max-1, turn it into seteq X, Max
1612 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1613 return DAG.getSetCC(dl, VT, N0,
1614 DAG.getConstant(MaxVal, N0.getValueType()),
1615 ISD::SETEQ);
1616
1617 // If we have "setcc X, C0", check to see if we can shrink the immediate
1618 // by changing cc.
1619
1620 // SETUGT X, SINTMAX -> SETLT X, 0
1621 if (Cond == ISD::SETUGT &&
1622 C1 == APInt::getSignedMaxValue(OperandBitSize))
1623 return DAG.getSetCC(dl, VT, N0,
1624 DAG.getConstant(0, N1.getValueType()),
1625 ISD::SETLT);
1626
1627 // SETULT X, SINTMIN -> SETGT X, -1
1628 if (Cond == ISD::SETULT &&
1629 C1 == APInt::getSignedMinValue(OperandBitSize)) {
1630 SDValue ConstMinusOne =
1631 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize),
1632 N1.getValueType());
1633 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
1634 }
1635
1636 // Fold bit comparisons when we can.
1637 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1638 (VT == N0.getValueType() ||
1639 (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
1640 N0.getOpcode() == ISD::AND)
1641 if (ConstantSDNode *AndRHS =
1642 dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1643 EVT ShiftTy = DCI.isBeforeLegalize() ?
1644 getPointerTy() : getShiftAmountTy(N0.getValueType());
1645 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
1646 // Perform the xform if the AND RHS is a single bit.
1647 if (AndRHS->getAPIntValue().isPowerOf2()) {
1648 return DAG.getNode(ISD::TRUNCATE, dl, VT,
1649 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1650 DAG.getConstant(AndRHS->getAPIntValue().logBase2(), ShiftTy)));
1651 }
1652 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
1653 // (X & 8) == 8 --> (X & 8) >> 3
1654 // Perform the xform if C1 is a single bit.
1655 if (C1.isPowerOf2()) {
1656 return DAG.getNode(ISD::TRUNCATE, dl, VT,
1657 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1658 DAG.getConstant(C1.logBase2(), ShiftTy)));
1659 }
1660 }
1661 }
1662
1663 if (C1.getMinSignedBits() <= 64 &&
1664 !isLegalICmpImmediate(C1.getSExtValue())) {
1665 // (X & -256) == 256 -> (X >> 8) == 1
1666 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1667 N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
1668 if (ConstantSDNode *AndRHS =
1669 dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1670 const APInt &AndRHSC = AndRHS->getAPIntValue();
1671 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
1672 unsigned ShiftBits = AndRHSC.countTrailingZeros();
1673 EVT ShiftTy = DCI.isBeforeLegalize() ?
1674 getPointerTy() : getShiftAmountTy(N0.getValueType());
1675 EVT CmpTy = N0.getValueType();
1676 SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
1677 DAG.getConstant(ShiftBits, ShiftTy));
1678 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), CmpTy);
1679 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
1680 }
1681 }
1682 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
1683 Cond == ISD::SETULE || Cond == ISD::SETUGT) {
1684 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
1685 // X < 0x100000000 -> (X >> 32) < 1
1686 // X >= 0x100000000 -> (X >> 32) >= 1
1687 // X <= 0x0ffffffff -> (X >> 32) < 1
1688 // X > 0x0ffffffff -> (X >> 32) >= 1
1689 unsigned ShiftBits;
1690 APInt NewC = C1;
1691 ISD::CondCode NewCond = Cond;
1692 if (AdjOne) {
1693 ShiftBits = C1.countTrailingOnes();
1694 NewC = NewC + 1;
1695 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1696 } else {
1697 ShiftBits = C1.countTrailingZeros();
1698 }
1699 NewC = NewC.lshr(ShiftBits);
1700 if (ShiftBits && isLegalICmpImmediate(NewC.getSExtValue())) {
1701 EVT ShiftTy = DCI.isBeforeLegalize() ?
1702 getPointerTy() : getShiftAmountTy(N0.getValueType());
1703 EVT CmpTy = N0.getValueType();
1704 SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
1705 DAG.getConstant(ShiftBits, ShiftTy));
1706 SDValue CmpRHS = DAG.getConstant(NewC, CmpTy);
1707 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
1708 }
1709 }
1710 }
1711 }
1712
1713 if (isa<ConstantFPSDNode>(N0.getNode())) {
1714 // Constant fold or commute setcc.
1715 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
1716 if (O.getNode()) return O;
1717 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1718 // If the RHS of an FP comparison is a constant, simplify it away in
1719 // some cases.
1720 if (CFP->getValueAPF().isNaN()) {
1721 // If an operand is known to be a nan, we can fold it.
1722 switch (ISD::getUnorderedFlavor(Cond)) {
1723 default: llvm_unreachable("Unknown flavor!");
1724 case 0: // Known false.
1725 return DAG.getConstant(0, VT);
1726 case 1: // Known true.
1727 return DAG.getConstant(1, VT);
1728 case 2: // Undefined.
1729 return DAG.getUNDEF(VT);
1730 }
1731 }
1732
1733 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
1734 // constant if knowing that the operand is non-nan is enough. We prefer to
1735 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
1736 // materialize 0.0.
1737 if (Cond == ISD::SETO || Cond == ISD::SETUO)
1738 return DAG.getSetCC(dl, VT, N0, N0, Cond);
1739
1740 // If the condition is not legal, see if we can find an equivalent one
1741 // which is legal.
1742 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
1743 // If the comparison was an awkward floating-point == or != and one of
1744 // the comparison operands is infinity or negative infinity, convert the
1745 // condition to a less-awkward <= or >=.
1746 if (CFP->getValueAPF().isInfinity()) {
1747 if (CFP->getValueAPF().isNegative()) {
1748 if (Cond == ISD::SETOEQ &&
1749 isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
1750 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
1751 if (Cond == ISD::SETUEQ &&
1752 isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
1753 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
1754 if (Cond == ISD::SETUNE &&
1755 isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
1756 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
1757 if (Cond == ISD::SETONE &&
1758 isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
1759 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
1760 } else {
1761 if (Cond == ISD::SETOEQ &&
1762 isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
1763 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
1764 if (Cond == ISD::SETUEQ &&
1765 isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
1766 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
1767 if (Cond == ISD::SETUNE &&
1768 isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
1769 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
1770 if (Cond == ISD::SETONE &&
1771 isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
1772 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
1773 }
1774 }
1775 }
1776 }
1777
1778 if (N0 == N1) {
1779 // The sext(setcc()) => setcc() optimization relies on the appropriate
1780 // constant being emitted.
1781 uint64_t EqVal = 0;
1782 switch (getBooleanContents(N0.getValueType())) {
1783 case UndefinedBooleanContent:
1784 case ZeroOrOneBooleanContent:
1785 EqVal = ISD::isTrueWhenEqual(Cond);
1786 break;
1787 case ZeroOrNegativeOneBooleanContent:
1788 EqVal = ISD::isTrueWhenEqual(Cond) ? -1 : 0;
1789 break;
1790 }
1791
1792 // We can always fold X == X for integer setcc's.
1793 if (N0.getValueType().isInteger()) {
1794 return DAG.getConstant(EqVal, VT);
1795 }
1796 unsigned UOF = ISD::getUnorderedFlavor(Cond);
1797 if (UOF == 2) // FP operators that are undefined on NaNs.
1798 return DAG.getConstant(EqVal, VT);
1799 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
1800 return DAG.getConstant(EqVal, VT);
1801 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
1802 // if it is not already.
1803 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
1804 if (NewCond != Cond && (DCI.isBeforeLegalizeOps() ||
1805 getCondCodeAction(NewCond, N0.getSimpleValueType()) == Legal))
1806 return DAG.getSetCC(dl, VT, N0, N1, NewCond);
1807 }
1808
1809 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1810 N0.getValueType().isInteger()) {
1811 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
1812 N0.getOpcode() == ISD::XOR) {
1813 // Simplify (X+Y) == (X+Z) --> Y == Z
1814 if (N0.getOpcode() == N1.getOpcode()) {
1815 if (N0.getOperand(0) == N1.getOperand(0))
1816 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
1817 if (N0.getOperand(1) == N1.getOperand(1))
1818 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
1819 if (DAG.isCommutativeBinOp(N0.getOpcode())) {
1820 // If X op Y == Y op X, try other combinations.
1821 if (N0.getOperand(0) == N1.getOperand(1))
1822 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
1823 Cond);
1824 if (N0.getOperand(1) == N1.getOperand(0))
1825 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
1826 Cond);
1827 }
1828 }
1829
1830 // If RHS is a legal immediate value for a compare instruction, we need
1831 // to be careful about increasing register pressure needlessly.
1832 bool LegalRHSImm = false;
1833
1834 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
1835 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1836 // Turn (X+C1) == C2 --> X == C2-C1
1837 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
1838 return DAG.getSetCC(dl, VT, N0.getOperand(0),
1839 DAG.getConstant(RHSC->getAPIntValue()-
1840 LHSR->getAPIntValue(),
1841 N0.getValueType()), Cond);
1842 }
1843
1844 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
1845 if (N0.getOpcode() == ISD::XOR)
1846 // If we know that all of the inverted bits are zero, don't bother
1847 // performing the inversion.
1848 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
1849 return
1850 DAG.getSetCC(dl, VT, N0.getOperand(0),
1851 DAG.getConstant(LHSR->getAPIntValue() ^
1852 RHSC->getAPIntValue(),
1853 N0.getValueType()),
1854 Cond);
1855 }
1856
1857 // Turn (C1-X) == C2 --> X == C1-C2
1858 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
1859 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
1860 return
1861 DAG.getSetCC(dl, VT, N0.getOperand(1),
1862 DAG.getConstant(SUBC->getAPIntValue() -
1863 RHSC->getAPIntValue(),
1864 N0.getValueType()),
1865 Cond);
1866 }
1867 }
1868
1869 // Could RHSC fold directly into a compare?
1870 if (RHSC->getValueType(0).getSizeInBits() <= 64)
1871 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
1872 }
1873
1874 // Simplify (X+Z) == X --> Z == 0
1875 // Don't do this if X is an immediate that can fold into a cmp
1876 // instruction and X+Z has other uses. It could be an induction variable
1877 // chain, and the transform would increase register pressure.
1878 if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
1879 if (N0.getOperand(0) == N1)
1880 return DAG.getSetCC(dl, VT, N0.getOperand(1),
1881 DAG.getConstant(0, N0.getValueType()), Cond);
1882 if (N0.getOperand(1) == N1) {
1883 if (DAG.isCommutativeBinOp(N0.getOpcode()))
1884 return DAG.getSetCC(dl, VT, N0.getOperand(0),
1885 DAG.getConstant(0, N0.getValueType()), Cond);
1886 if (N0.getNode()->hasOneUse()) {
1887 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
1888 // (Z-X) == X --> Z == X<<1
1889 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N1,
1890 DAG.getConstant(1, getShiftAmountTy(N1.getValueType())));
1891 if (!DCI.isCalledByLegalizer())
1892 DCI.AddToWorklist(SH.getNode());
1893 return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
1894 }
1895 }
1896 }
1897 }
1898
1899 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
1900 N1.getOpcode() == ISD::XOR) {
1901 // Simplify X == (X+Z) --> Z == 0
1902 if (N1.getOperand(0) == N0)
1903 return DAG.getSetCC(dl, VT, N1.getOperand(1),
1904 DAG.getConstant(0, N1.getValueType()), Cond);
1905 if (N1.getOperand(1) == N0) {
1906 if (DAG.isCommutativeBinOp(N1.getOpcode()))
1907 return DAG.getSetCC(dl, VT, N1.getOperand(0),
1908 DAG.getConstant(0, N1.getValueType()), Cond);
1909 if (N1.getNode()->hasOneUse()) {
1910 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
1911 // X == (Z-X) --> X<<1 == Z
1912 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0,
1913 DAG.getConstant(1, getShiftAmountTy(N0.getValueType())));
1914 if (!DCI.isCalledByLegalizer())
1915 DCI.AddToWorklist(SH.getNode());
1916 return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
1917 }
1918 }
1919 }
1920
1921 // Simplify x&y == y to x&y != 0 if y has exactly one bit set.
1922 // Note that where y is variable and is known to have at most
1923 // one bit set (for example, if it is z&1) we cannot do this;
1924 // the expressions are not equivalent when y==0.
1925 if (N0.getOpcode() == ISD::AND)
1926 if (N0.getOperand(0) == N1 || N0.getOperand(1) == N1) {
1927 if (ValueHasExactlyOneBitSet(N1, DAG)) {
1928 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
1929 if (DCI.isBeforeLegalizeOps() ||
1930 isCondCodeLegal(Cond, N0.getSimpleValueType())) {
1931 SDValue Zero = DAG.getConstant(0, N1.getValueType());
1932 return DAG.getSetCC(dl, VT, N0, Zero, Cond);
1933 }
1934 }
1935 }
1936 if (N1.getOpcode() == ISD::AND)
1937 if (N1.getOperand(0) == N0 || N1.getOperand(1) == N0) {
1938 if (ValueHasExactlyOneBitSet(N0, DAG)) {
1939 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
1940 if (DCI.isBeforeLegalizeOps() ||
1941 isCondCodeLegal(Cond, N1.getSimpleValueType())) {
1942 SDValue Zero = DAG.getConstant(0, N0.getValueType());
1943 return DAG.getSetCC(dl, VT, N1, Zero, Cond);
1944 }
1945 }
1946 }
1947 }
1948
1949 // Fold away ALL boolean setcc's.
1950 SDValue Temp;
1951 if (N0.getValueType() == MVT::i1 && foldBooleans) {
1952 switch (Cond) {
1953 default: llvm_unreachable("Unknown integer setcc!");
1954 case ISD::SETEQ: // X == Y -> ~(X^Y)
1955 Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
1956 N0 = DAG.getNOT(dl, Temp, MVT::i1);
1957 if (!DCI.isCalledByLegalizer())
1958 DCI.AddToWorklist(Temp.getNode());
1959 break;
1960 case ISD::SETNE: // X != Y --> (X^Y)
1961 N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
1962 break;
1963 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y
1964 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y
1965 Temp = DAG.getNOT(dl, N0, MVT::i1);
1966 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
1967 if (!DCI.isCalledByLegalizer())
1968 DCI.AddToWorklist(Temp.getNode());
1969 break;
1970 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X
1971 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X
1972 Temp = DAG.getNOT(dl, N1, MVT::i1);
1973 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
1974 if (!DCI.isCalledByLegalizer())
1975 DCI.AddToWorklist(Temp.getNode());
1976 break;
1977 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
1978 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
1979 Temp = DAG.getNOT(dl, N0, MVT::i1);
1980 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
1981 if (!DCI.isCalledByLegalizer())
1982 DCI.AddToWorklist(Temp.getNode());
1983 break;
1984 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
1985 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
1986 Temp = DAG.getNOT(dl, N1, MVT::i1);
1987 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
1988 break;
1989 }
1990 if (VT != MVT::i1) {
1991 if (!DCI.isCalledByLegalizer())
1992 DCI.AddToWorklist(N0.getNode());
1993 // FIXME: If running after legalize, we probably can't do this.
1994 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0);
1995 }
1996 return N0;
1997 }
1998
1999 // Could not fold it.
2000 return SDValue();
2001 }
2002
2003 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
2004 /// node is a GlobalAddress + offset.
isGAPlusOffset(SDNode * N,const GlobalValue * & GA,int64_t & Offset) const2005 bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
2006 int64_t &Offset) const {
2007 if (isa<GlobalAddressSDNode>(N)) {
2008 GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N);
2009 GA = GASD->getGlobal();
2010 Offset += GASD->getOffset();
2011 return true;
2012 }
2013
2014 if (N->getOpcode() == ISD::ADD) {
2015 SDValue N1 = N->getOperand(0);
2016 SDValue N2 = N->getOperand(1);
2017 if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
2018 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
2019 if (V) {
2020 Offset += V->getSExtValue();
2021 return true;
2022 }
2023 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
2024 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
2025 if (V) {
2026 Offset += V->getSExtValue();
2027 return true;
2028 }
2029 }
2030 }
2031
2032 return false;
2033 }
2034
2035
2036 SDValue TargetLowering::
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const2037 PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
2038 // Default implementation: no optimization.
2039 return SDValue();
2040 }
2041
2042 //===----------------------------------------------------------------------===//
2043 // Inline Assembler Implementation Methods
2044 //===----------------------------------------------------------------------===//
2045
2046
2047 TargetLowering::ConstraintType
getConstraintType(const std::string & Constraint) const2048 TargetLowering::getConstraintType(const std::string &Constraint) const {
2049 unsigned S = Constraint.size();
2050
2051 if (S == 1) {
2052 switch (Constraint[0]) {
2053 default: break;
2054 case 'r': return C_RegisterClass;
2055 case 'm': // memory
2056 case 'o': // offsetable
2057 case 'V': // not offsetable
2058 return C_Memory;
2059 case 'i': // Simple Integer or Relocatable Constant
2060 case 'n': // Simple Integer
2061 case 'E': // Floating Point Constant
2062 case 'F': // Floating Point Constant
2063 case 's': // Relocatable Constant
2064 case 'p': // Address.
2065 case 'X': // Allow ANY value.
2066 case 'I': // Target registers.
2067 case 'J':
2068 case 'K':
2069 case 'L':
2070 case 'M':
2071 case 'N':
2072 case 'O':
2073 case 'P':
2074 case '<':
2075 case '>':
2076 return C_Other;
2077 }
2078 }
2079
2080 if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
2081 if (S == 8 && !Constraint.compare(1, 6, "memory", 6)) // "{memory}"
2082 return C_Memory;
2083 return C_Register;
2084 }
2085 return C_Unknown;
2086 }
2087
2088 /// LowerXConstraint - try to replace an X constraint, which matches anything,
2089 /// with another that has more specific requirements based on the type of the
2090 /// corresponding operand.
LowerXConstraint(EVT ConstraintVT) const2091 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
2092 if (ConstraintVT.isInteger())
2093 return "r";
2094 if (ConstraintVT.isFloatingPoint())
2095 return "f"; // works for many targets
2096 return nullptr;
2097 }
2098
2099 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
2100 /// vector. If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const2101 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2102 std::string &Constraint,
2103 std::vector<SDValue> &Ops,
2104 SelectionDAG &DAG) const {
2105
2106 if (Constraint.length() > 1) return;
2107
2108 char ConstraintLetter = Constraint[0];
2109 switch (ConstraintLetter) {
2110 default: break;
2111 case 'X': // Allows any operand; labels (basic block) use this.
2112 if (Op.getOpcode() == ISD::BasicBlock) {
2113 Ops.push_back(Op);
2114 return;
2115 }
2116 // fall through
2117 case 'i': // Simple Integer or Relocatable Constant
2118 case 'n': // Simple Integer
2119 case 's': { // Relocatable Constant
2120 // These operands are interested in values of the form (GV+C), where C may
2121 // be folded in as an offset of GV, or it may be explicitly added. Also, it
2122 // is possible and fine if either GV or C are missing.
2123 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2124 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2125
2126 // If we have "(add GV, C)", pull out GV/C
2127 if (Op.getOpcode() == ISD::ADD) {
2128 C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2129 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
2130 if (!C || !GA) {
2131 C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
2132 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
2133 }
2134 if (!C || !GA)
2135 C = nullptr, GA = nullptr;
2136 }
2137
2138 // If we find a valid operand, map to the TargetXXX version so that the
2139 // value itself doesn't get selected.
2140 if (GA) { // Either &GV or &GV+C
2141 if (ConstraintLetter != 'n') {
2142 int64_t Offs = GA->getOffset();
2143 if (C) Offs += C->getZExtValue();
2144 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
2145 C ? SDLoc(C) : SDLoc(),
2146 Op.getValueType(), Offs));
2147 return;
2148 }
2149 }
2150 if (C) { // just C, no GV.
2151 // Simple constants are not allowed for 's'.
2152 if (ConstraintLetter != 's') {
2153 // gcc prints these as sign extended. Sign extend value to 64 bits
2154 // now; without this it would get ZExt'd later in
2155 // ScheduleDAGSDNodes::EmitNode, which is very generic.
2156 Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
2157 MVT::i64));
2158 return;
2159 }
2160 }
2161 break;
2162 }
2163 }
2164 }
2165
2166 std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
getRegForInlineAsmConstraint(const std::string & Constraint,MVT VT) const2167 getRegForInlineAsmConstraint(const std::string &Constraint,
2168 MVT VT) const {
2169 if (Constraint.empty() || Constraint[0] != '{')
2170 return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
2171 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
2172
2173 // Remove the braces from around the name.
2174 StringRef RegName(Constraint.data()+1, Constraint.size()-2);
2175
2176 std::pair<unsigned, const TargetRegisterClass*> R =
2177 std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
2178
2179 // Figure out which register class contains this reg.
2180 const TargetRegisterInfo *RI = getTargetMachine().getRegisterInfo();
2181 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
2182 E = RI->regclass_end(); RCI != E; ++RCI) {
2183 const TargetRegisterClass *RC = *RCI;
2184
2185 // If none of the value types for this register class are valid, we
2186 // can't use it. For example, 64-bit reg classes on 32-bit targets.
2187 if (!isLegalRC(RC))
2188 continue;
2189
2190 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
2191 I != E; ++I) {
2192 if (RegName.equals_lower(RI->getName(*I))) {
2193 std::pair<unsigned, const TargetRegisterClass*> S =
2194 std::make_pair(*I, RC);
2195
2196 // If this register class has the requested value type, return it,
2197 // otherwise keep searching and return the first class found
2198 // if no other is found which explicitly has the requested type.
2199 if (RC->hasType(VT))
2200 return S;
2201 else if (!R.second)
2202 R = S;
2203 }
2204 }
2205 }
2206
2207 return R;
2208 }
2209
2210 //===----------------------------------------------------------------------===//
2211 // Constraint Selection.
2212
2213 /// isMatchingInputConstraint - Return true of this is an input operand that is
2214 /// a matching constraint like "4".
isMatchingInputConstraint() const2215 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
2216 assert(!ConstraintCode.empty() && "No known constraint!");
2217 return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
2218 }
2219
2220 /// getMatchedOperand - If this is an input matching constraint, this method
2221 /// returns the output operand it matches.
getMatchedOperand() const2222 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
2223 assert(!ConstraintCode.empty() && "No known constraint!");
2224 return atoi(ConstraintCode.c_str());
2225 }
2226
2227
2228 /// ParseConstraints - Split up the constraint string from the inline
2229 /// assembly value into the specific constraints and their prefixes,
2230 /// and also tie in the associated operand values.
2231 /// If this returns an empty vector, and if the constraint string itself
2232 /// isn't empty, there was an error parsing.
ParseConstraints(ImmutableCallSite CS) const2233 TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
2234 ImmutableCallSite CS) const {
2235 /// ConstraintOperands - Information about all of the constraints.
2236 AsmOperandInfoVector ConstraintOperands;
2237 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
2238 unsigned maCount = 0; // Largest number of multiple alternative constraints.
2239
2240 // Do a prepass over the constraints, canonicalizing them, and building up the
2241 // ConstraintOperands list.
2242 InlineAsm::ConstraintInfoVector
2243 ConstraintInfos = IA->ParseConstraints();
2244
2245 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
2246 unsigned ResNo = 0; // ResNo - The result number of the next output.
2247
2248 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
2249 ConstraintOperands.push_back(AsmOperandInfo(ConstraintInfos[i]));
2250 AsmOperandInfo &OpInfo = ConstraintOperands.back();
2251
2252 // Update multiple alternative constraint count.
2253 if (OpInfo.multipleAlternatives.size() > maCount)
2254 maCount = OpInfo.multipleAlternatives.size();
2255
2256 OpInfo.ConstraintVT = MVT::Other;
2257
2258 // Compute the value type for each operand.
2259 switch (OpInfo.Type) {
2260 case InlineAsm::isOutput:
2261 // Indirect outputs just consume an argument.
2262 if (OpInfo.isIndirect) {
2263 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2264 break;
2265 }
2266
2267 // The return value of the call is this value. As such, there is no
2268 // corresponding argument.
2269 assert(!CS.getType()->isVoidTy() &&
2270 "Bad inline asm!");
2271 if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
2272 OpInfo.ConstraintVT = getSimpleValueType(STy->getElementType(ResNo));
2273 } else {
2274 assert(ResNo == 0 && "Asm only has one result!");
2275 OpInfo.ConstraintVT = getSimpleValueType(CS.getType());
2276 }
2277 ++ResNo;
2278 break;
2279 case InlineAsm::isInput:
2280 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2281 break;
2282 case InlineAsm::isClobber:
2283 // Nothing to do.
2284 break;
2285 }
2286
2287 if (OpInfo.CallOperandVal) {
2288 llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
2289 if (OpInfo.isIndirect) {
2290 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
2291 if (!PtrTy)
2292 report_fatal_error("Indirect operand for inline asm not a pointer!");
2293 OpTy = PtrTy->getElementType();
2294 }
2295
2296 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
2297 if (StructType *STy = dyn_cast<StructType>(OpTy))
2298 if (STy->getNumElements() == 1)
2299 OpTy = STy->getElementType(0);
2300
2301 // If OpTy is not a single value, it may be a struct/union that we
2302 // can tile with integers.
2303 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
2304 unsigned BitSize = getDataLayout()->getTypeSizeInBits(OpTy);
2305 switch (BitSize) {
2306 default: break;
2307 case 1:
2308 case 8:
2309 case 16:
2310 case 32:
2311 case 64:
2312 case 128:
2313 OpInfo.ConstraintVT =
2314 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
2315 break;
2316 }
2317 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
2318 unsigned PtrSize
2319 = getDataLayout()->getPointerSizeInBits(PT->getAddressSpace());
2320 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
2321 } else {
2322 OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
2323 }
2324 }
2325 }
2326
2327 // If we have multiple alternative constraints, select the best alternative.
2328 if (ConstraintInfos.size()) {
2329 if (maCount) {
2330 unsigned bestMAIndex = 0;
2331 int bestWeight = -1;
2332 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match.
2333 int weight = -1;
2334 unsigned maIndex;
2335 // Compute the sums of the weights for each alternative, keeping track
2336 // of the best (highest weight) one so far.
2337 for (maIndex = 0; maIndex < maCount; ++maIndex) {
2338 int weightSum = 0;
2339 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2340 cIndex != eIndex; ++cIndex) {
2341 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2342 if (OpInfo.Type == InlineAsm::isClobber)
2343 continue;
2344
2345 // If this is an output operand with a matching input operand,
2346 // look up the matching input. If their types mismatch, e.g. one
2347 // is an integer, the other is floating point, or their sizes are
2348 // different, flag it as an maCantMatch.
2349 if (OpInfo.hasMatchingInput()) {
2350 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2351 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2352 if ((OpInfo.ConstraintVT.isInteger() !=
2353 Input.ConstraintVT.isInteger()) ||
2354 (OpInfo.ConstraintVT.getSizeInBits() !=
2355 Input.ConstraintVT.getSizeInBits())) {
2356 weightSum = -1; // Can't match.
2357 break;
2358 }
2359 }
2360 }
2361 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
2362 if (weight == -1) {
2363 weightSum = -1;
2364 break;
2365 }
2366 weightSum += weight;
2367 }
2368 // Update best.
2369 if (weightSum > bestWeight) {
2370 bestWeight = weightSum;
2371 bestMAIndex = maIndex;
2372 }
2373 }
2374
2375 // Now select chosen alternative in each constraint.
2376 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2377 cIndex != eIndex; ++cIndex) {
2378 AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
2379 if (cInfo.Type == InlineAsm::isClobber)
2380 continue;
2381 cInfo.selectAlternative(bestMAIndex);
2382 }
2383 }
2384 }
2385
2386 // Check and hook up tied operands, choose constraint code to use.
2387 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2388 cIndex != eIndex; ++cIndex) {
2389 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2390
2391 // If this is an output operand with a matching input operand, look up the
2392 // matching input. If their types mismatch, e.g. one is an integer, the
2393 // other is floating point, or their sizes are different, flag it as an
2394 // error.
2395 if (OpInfo.hasMatchingInput()) {
2396 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2397
2398 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2399 std::pair<unsigned, const TargetRegisterClass*> MatchRC =
2400 getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
2401 OpInfo.ConstraintVT);
2402 std::pair<unsigned, const TargetRegisterClass*> InputRC =
2403 getRegForInlineAsmConstraint(Input.ConstraintCode,
2404 Input.ConstraintVT);
2405 if ((OpInfo.ConstraintVT.isInteger() !=
2406 Input.ConstraintVT.isInteger()) ||
2407 (MatchRC.second != InputRC.second)) {
2408 report_fatal_error("Unsupported asm: input constraint"
2409 " with a matching output constraint of"
2410 " incompatible type!");
2411 }
2412 }
2413
2414 }
2415 }
2416
2417 return ConstraintOperands;
2418 }
2419
2420
2421 /// getConstraintGenerality - Return an integer indicating how general CT
2422 /// is.
getConstraintGenerality(TargetLowering::ConstraintType CT)2423 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
2424 switch (CT) {
2425 case TargetLowering::C_Other:
2426 case TargetLowering::C_Unknown:
2427 return 0;
2428 case TargetLowering::C_Register:
2429 return 1;
2430 case TargetLowering::C_RegisterClass:
2431 return 2;
2432 case TargetLowering::C_Memory:
2433 return 3;
2434 }
2435 llvm_unreachable("Invalid constraint type");
2436 }
2437
2438 /// Examine constraint type and operand type and determine a weight value.
2439 /// This object must already have been set up with the operand type
2440 /// and the current alternative constraint selected.
2441 TargetLowering::ConstraintWeight
getMultipleConstraintMatchWeight(AsmOperandInfo & info,int maIndex) const2442 TargetLowering::getMultipleConstraintMatchWeight(
2443 AsmOperandInfo &info, int maIndex) const {
2444 InlineAsm::ConstraintCodeVector *rCodes;
2445 if (maIndex >= (int)info.multipleAlternatives.size())
2446 rCodes = &info.Codes;
2447 else
2448 rCodes = &info.multipleAlternatives[maIndex].Codes;
2449 ConstraintWeight BestWeight = CW_Invalid;
2450
2451 // Loop over the options, keeping track of the most general one.
2452 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
2453 ConstraintWeight weight =
2454 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
2455 if (weight > BestWeight)
2456 BestWeight = weight;
2457 }
2458
2459 return BestWeight;
2460 }
2461
2462 /// Examine constraint type and operand type and determine a weight value.
2463 /// This object must already have been set up with the operand type
2464 /// and the current alternative constraint selected.
2465 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const2466 TargetLowering::getSingleConstraintMatchWeight(
2467 AsmOperandInfo &info, const char *constraint) const {
2468 ConstraintWeight weight = CW_Invalid;
2469 Value *CallOperandVal = info.CallOperandVal;
2470 // If we don't have a value, we can't do a match,
2471 // but allow it at the lowest weight.
2472 if (!CallOperandVal)
2473 return CW_Default;
2474 // Look at the constraint type.
2475 switch (*constraint) {
2476 case 'i': // immediate integer.
2477 case 'n': // immediate integer with a known value.
2478 if (isa<ConstantInt>(CallOperandVal))
2479 weight = CW_Constant;
2480 break;
2481 case 's': // non-explicit intregal immediate.
2482 if (isa<GlobalValue>(CallOperandVal))
2483 weight = CW_Constant;
2484 break;
2485 case 'E': // immediate float if host format.
2486 case 'F': // immediate float.
2487 if (isa<ConstantFP>(CallOperandVal))
2488 weight = CW_Constant;
2489 break;
2490 case '<': // memory operand with autodecrement.
2491 case '>': // memory operand with autoincrement.
2492 case 'm': // memory operand.
2493 case 'o': // offsettable memory operand
2494 case 'V': // non-offsettable memory operand
2495 weight = CW_Memory;
2496 break;
2497 case 'r': // general register.
2498 case 'g': // general register, memory operand or immediate integer.
2499 // note: Clang converts "g" to "imr".
2500 if (CallOperandVal->getType()->isIntegerTy())
2501 weight = CW_Register;
2502 break;
2503 case 'X': // any operand.
2504 default:
2505 weight = CW_Default;
2506 break;
2507 }
2508 return weight;
2509 }
2510
2511 /// ChooseConstraint - If there are multiple different constraints that we
2512 /// could pick for this operand (e.g. "imr") try to pick the 'best' one.
2513 /// This is somewhat tricky: constraints fall into four classes:
2514 /// Other -> immediates and magic values
2515 /// Register -> one specific register
2516 /// RegisterClass -> a group of regs
2517 /// Memory -> memory
2518 /// Ideally, we would pick the most specific constraint possible: if we have
2519 /// something that fits into a register, we would pick it. The problem here
2520 /// is that if we have something that could either be in a register or in
2521 /// memory that use of the register could cause selection of *other*
2522 /// operands to fail: they might only succeed if we pick memory. Because of
2523 /// this the heuristic we use is:
2524 ///
2525 /// 1) If there is an 'other' constraint, and if the operand is valid for
2526 /// that constraint, use it. This makes us take advantage of 'i'
2527 /// constraints when available.
2528 /// 2) Otherwise, pick the most general constraint present. This prefers
2529 /// 'm' over 'r', for example.
2530 ///
ChooseConstraint(TargetLowering::AsmOperandInfo & OpInfo,const TargetLowering & TLI,SDValue Op,SelectionDAG * DAG)2531 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
2532 const TargetLowering &TLI,
2533 SDValue Op, SelectionDAG *DAG) {
2534 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
2535 unsigned BestIdx = 0;
2536 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
2537 int BestGenerality = -1;
2538
2539 // Loop over the options, keeping track of the most general one.
2540 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
2541 TargetLowering::ConstraintType CType =
2542 TLI.getConstraintType(OpInfo.Codes[i]);
2543
2544 // If this is an 'other' constraint, see if the operand is valid for it.
2545 // For example, on X86 we might have an 'rI' constraint. If the operand
2546 // is an integer in the range [0..31] we want to use I (saving a load
2547 // of a register), otherwise we must use 'r'.
2548 if (CType == TargetLowering::C_Other && Op.getNode()) {
2549 assert(OpInfo.Codes[i].size() == 1 &&
2550 "Unhandled multi-letter 'other' constraint");
2551 std::vector<SDValue> ResultOps;
2552 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
2553 ResultOps, *DAG);
2554 if (!ResultOps.empty()) {
2555 BestType = CType;
2556 BestIdx = i;
2557 break;
2558 }
2559 }
2560
2561 // Things with matching constraints can only be registers, per gcc
2562 // documentation. This mainly affects "g" constraints.
2563 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
2564 continue;
2565
2566 // This constraint letter is more general than the previous one, use it.
2567 int Generality = getConstraintGenerality(CType);
2568 if (Generality > BestGenerality) {
2569 BestType = CType;
2570 BestIdx = i;
2571 BestGenerality = Generality;
2572 }
2573 }
2574
2575 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
2576 OpInfo.ConstraintType = BestType;
2577 }
2578
2579 /// ComputeConstraintToUse - Determines the constraint code and constraint
2580 /// type to use for the specific AsmOperandInfo, setting
2581 /// OpInfo.ConstraintCode and OpInfo.ConstraintType.
ComputeConstraintToUse(AsmOperandInfo & OpInfo,SDValue Op,SelectionDAG * DAG) const2582 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2583 SDValue Op,
2584 SelectionDAG *DAG) const {
2585 assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
2586
2587 // Single-letter constraints ('r') are very common.
2588 if (OpInfo.Codes.size() == 1) {
2589 OpInfo.ConstraintCode = OpInfo.Codes[0];
2590 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2591 } else {
2592 ChooseConstraint(OpInfo, *this, Op, DAG);
2593 }
2594
2595 // 'X' matches anything.
2596 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
2597 // Labels and constants are handled elsewhere ('X' is the only thing
2598 // that matches labels). For Functions, the type here is the type of
2599 // the result, which is not what we want to look at; leave them alone.
2600 Value *v = OpInfo.CallOperandVal;
2601 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
2602 OpInfo.CallOperandVal = v;
2603 return;
2604 }
2605
2606 // Otherwise, try to resolve it to something we know about by looking at
2607 // the actual operand type.
2608 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
2609 OpInfo.ConstraintCode = Repl;
2610 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2611 }
2612 }
2613 }
2614
2615 /// \brief Given an exact SDIV by a constant, create a multiplication
2616 /// with the multiplicative inverse of the constant.
BuildExactSDIV(SDValue Op1,SDValue Op2,SDLoc dl,SelectionDAG & DAG) const2617 SDValue TargetLowering::BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
2618 SelectionDAG &DAG) const {
2619 ConstantSDNode *C = cast<ConstantSDNode>(Op2);
2620 APInt d = C->getAPIntValue();
2621 assert(d != 0 && "Division by zero!");
2622
2623 // Shift the value upfront if it is even, so the LSB is one.
2624 unsigned ShAmt = d.countTrailingZeros();
2625 if (ShAmt) {
2626 // TODO: For UDIV use SRL instead of SRA.
2627 SDValue Amt = DAG.getConstant(ShAmt, getShiftAmountTy(Op1.getValueType()));
2628 Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, false, false,
2629 true);
2630 d = d.ashr(ShAmt);
2631 }
2632
2633 // Calculate the multiplicative inverse, using Newton's method.
2634 APInt t, xn = d;
2635 while ((t = d*xn) != 1)
2636 xn *= APInt(d.getBitWidth(), 2) - t;
2637
2638 Op2 = DAG.getConstant(xn, Op1.getValueType());
2639 return DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2);
2640 }
2641
2642 /// \brief Given an ISD::SDIV node expressing a divide by constant,
2643 /// return a DAG expression to select that will generate the same value by
2644 /// multiplying by a magic number. See:
2645 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
BuildSDIV(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,bool IsAfterLegalization,std::vector<SDNode * > * Created) const2646 SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
2647 SelectionDAG &DAG, bool IsAfterLegalization,
2648 std::vector<SDNode *> *Created) const {
2649 EVT VT = N->getValueType(0);
2650 SDLoc dl(N);
2651
2652 // Check to see if we can do this.
2653 // FIXME: We should be more aggressive here.
2654 if (!isTypeLegal(VT))
2655 return SDValue();
2656
2657 APInt::ms magics = Divisor.magic();
2658
2659 // Multiply the numerator (operand 0) by the magic value
2660 // FIXME: We should support doing a MUL in a wider type
2661 SDValue Q;
2662 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
2663 isOperationLegalOrCustom(ISD::MULHS, VT))
2664 Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
2665 DAG.getConstant(magics.m, VT));
2666 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
2667 isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
2668 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
2669 N->getOperand(0),
2670 DAG.getConstant(magics.m, VT)).getNode(), 1);
2671 else
2672 return SDValue(); // No mulhs or equvialent
2673 // If d > 0 and m < 0, add the numerator
2674 if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
2675 Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
2676 if (Created)
2677 Created->push_back(Q.getNode());
2678 }
2679 // If d < 0 and m > 0, subtract the numerator.
2680 if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
2681 Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
2682 if (Created)
2683 Created->push_back(Q.getNode());
2684 }
2685 // Shift right algebraic if shift value is nonzero
2686 if (magics.s > 0) {
2687 Q = DAG.getNode(ISD::SRA, dl, VT, Q,
2688 DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType())));
2689 if (Created)
2690 Created->push_back(Q.getNode());
2691 }
2692 // Extract the sign bit and add it to the quotient
2693 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q,
2694 DAG.getConstant(VT.getScalarSizeInBits() - 1,
2695 getShiftAmountTy(Q.getValueType())));
2696 if (Created)
2697 Created->push_back(T.getNode());
2698 return DAG.getNode(ISD::ADD, dl, VT, Q, T);
2699 }
2700
2701 /// \brief Given an ISD::UDIV node expressing a divide by constant,
2702 /// return a DAG expression to select that will generate the same value by
2703 /// multiplying by a magic number. See:
2704 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
BuildUDIV(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,bool IsAfterLegalization,std::vector<SDNode * > * Created) const2705 SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
2706 SelectionDAG &DAG, bool IsAfterLegalization,
2707 std::vector<SDNode *> *Created) const {
2708 EVT VT = N->getValueType(0);
2709 SDLoc dl(N);
2710
2711 // Check to see if we can do this.
2712 // FIXME: We should be more aggressive here.
2713 if (!isTypeLegal(VT))
2714 return SDValue();
2715
2716 // FIXME: We should use a narrower constant when the upper
2717 // bits are known to be zero.
2718 APInt::mu magics = Divisor.magicu();
2719
2720 SDValue Q = N->getOperand(0);
2721
2722 // If the divisor is even, we can avoid using the expensive fixup by shifting
2723 // the divided value upfront.
2724 if (magics.a != 0 && !Divisor[0]) {
2725 unsigned Shift = Divisor.countTrailingZeros();
2726 Q = DAG.getNode(ISD::SRL, dl, VT, Q,
2727 DAG.getConstant(Shift, getShiftAmountTy(Q.getValueType())));
2728 if (Created)
2729 Created->push_back(Q.getNode());
2730
2731 // Get magic number for the shifted divisor.
2732 magics = Divisor.lshr(Shift).magicu(Shift);
2733 assert(magics.a == 0 && "Should use cheap fixup now");
2734 }
2735
2736 // Multiply the numerator (operand 0) by the magic value
2737 // FIXME: We should support doing a MUL in a wider type
2738 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
2739 isOperationLegalOrCustom(ISD::MULHU, VT))
2740 Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, VT));
2741 else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
2742 isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
2743 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
2744 DAG.getConstant(magics.m, VT)).getNode(), 1);
2745 else
2746 return SDValue(); // No mulhu or equvialent
2747 if (Created)
2748 Created->push_back(Q.getNode());
2749
2750 if (magics.a == 0) {
2751 assert(magics.s < Divisor.getBitWidth() &&
2752 "We shouldn't generate an undefined shift!");
2753 return DAG.getNode(ISD::SRL, dl, VT, Q,
2754 DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType())));
2755 } else {
2756 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
2757 if (Created)
2758 Created->push_back(NPQ.getNode());
2759 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ,
2760 DAG.getConstant(1, getShiftAmountTy(NPQ.getValueType())));
2761 if (Created)
2762 Created->push_back(NPQ.getNode());
2763 NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
2764 if (Created)
2765 Created->push_back(NPQ.getNode());
2766 return DAG.getNode(ISD::SRL, dl, VT, NPQ,
2767 DAG.getConstant(magics.s-1, getShiftAmountTy(NPQ.getValueType())));
2768 }
2769 }
2770
2771 bool TargetLowering::
verifyReturnAddressArgumentIsConstant(SDValue Op,SelectionDAG & DAG) const2772 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
2773 if (!isa<ConstantSDNode>(Op.getOperand(0))) {
2774 DAG.getContext()->emitError("argument to '__builtin_return_address' must "
2775 "be a constant integer");
2776 return true;
2777 }
2778
2779 return false;
2780 }
2781
2782 //===----------------------------------------------------------------------===//
2783 // Legalization Utilities
2784 //===----------------------------------------------------------------------===//
2785
expandMUL(SDNode * N,SDValue & Lo,SDValue & Hi,EVT HiLoVT,SelectionDAG & DAG,SDValue LL,SDValue LH,SDValue RL,SDValue RH) const2786 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
2787 SelectionDAG &DAG, SDValue LL, SDValue LH,
2788 SDValue RL, SDValue RH) const {
2789 EVT VT = N->getValueType(0);
2790 SDLoc dl(N);
2791
2792 bool HasMULHS = isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
2793 bool HasMULHU = isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
2794 bool HasSMUL_LOHI = isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
2795 bool HasUMUL_LOHI = isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
2796 if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
2797 unsigned OuterBitSize = VT.getSizeInBits();
2798 unsigned InnerBitSize = HiLoVT.getSizeInBits();
2799 unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
2800 unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
2801
2802 // LL, LH, RL, and RH must be either all NULL or all set to a value.
2803 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
2804 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
2805
2806 if (!LL.getNode() && !RL.getNode() &&
2807 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
2808 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(0));
2809 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(1));
2810 }
2811
2812 if (!LL.getNode())
2813 return false;
2814
2815 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
2816 if (DAG.MaskedValueIsZero(N->getOperand(0), HighMask) &&
2817 DAG.MaskedValueIsZero(N->getOperand(1), HighMask)) {
2818 // The inputs are both zero-extended.
2819 if (HasUMUL_LOHI) {
2820 // We can emit a umul_lohi.
2821 Lo = DAG.getNode(ISD::UMUL_LOHI, dl,
2822 DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
2823 Hi = SDValue(Lo.getNode(), 1);
2824 return true;
2825 }
2826 if (HasMULHU) {
2827 // We can emit a mulhu+mul.
2828 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
2829 Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
2830 return true;
2831 }
2832 }
2833 if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
2834 // The input values are both sign-extended.
2835 if (HasSMUL_LOHI) {
2836 // We can emit a smul_lohi.
2837 Lo = DAG.getNode(ISD::SMUL_LOHI, dl,
2838 DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
2839 Hi = SDValue(Lo.getNode(), 1);
2840 return true;
2841 }
2842 if (HasMULHS) {
2843 // We can emit a mulhs+mul.
2844 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
2845 Hi = DAG.getNode(ISD::MULHS, dl, HiLoVT, LL, RL);
2846 return true;
2847 }
2848 }
2849
2850 if (!LH.getNode() && !RH.getNode() &&
2851 isOperationLegalOrCustom(ISD::SRL, VT) &&
2852 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
2853 unsigned ShiftAmt = VT.getSizeInBits() - HiLoVT.getSizeInBits();
2854 SDValue Shift = DAG.getConstant(ShiftAmt, getShiftAmountTy(VT));
2855 LH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(0), Shift);
2856 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
2857 RH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(1), Shift);
2858 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
2859 }
2860
2861 if (!LH.getNode())
2862 return false;
2863
2864 if (HasUMUL_LOHI) {
2865 // Lo,Hi = umul LHS, RHS.
2866 SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, dl,
2867 DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
2868 Lo = UMulLOHI;
2869 Hi = UMulLOHI.getValue(1);
2870 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
2871 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
2872 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
2873 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
2874 return true;
2875 }
2876 if (HasMULHU) {
2877 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
2878 Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
2879 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
2880 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
2881 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
2882 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
2883 return true;
2884 }
2885 }
2886 return false;
2887 }
2888