1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/TargetLowering.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/CodeGen/CallingConvLower.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineJumpTableInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/TargetRegisterInfo.h"
22 #include "llvm/CodeGen/TargetSubtargetInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/Target/TargetLoweringObjectFile.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include <cctype>
35 using namespace llvm;
36
37 /// NOTE: The TargetMachine owns TLOF.
TargetLowering(const TargetMachine & tm)38 TargetLowering::TargetLowering(const TargetMachine &tm)
39 : TargetLoweringBase(tm) {}
40
getTargetNodeName(unsigned Opcode) const41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
42 return nullptr;
43 }
44
isPositionIndependent() const45 bool TargetLowering::isPositionIndependent() const {
46 return getTargetMachine().isPositionIndependent();
47 }
48
49 /// Check whether a given call node is in tail position within its function. If
50 /// so, it sets Chain to the input chain of the tail call.
isInTailCallPosition(SelectionDAG & DAG,SDNode * Node,SDValue & Chain) const51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
52 SDValue &Chain) const {
53 const Function &F = DAG.getMachineFunction().getFunction();
54
55 // First, check if tail calls have been disabled in this function.
56 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
57 return false;
58
59 // Conservatively require the attributes of the call to match those of
60 // the return. Ignore NoAlias and NonNull because they don't affect the
61 // call sequence.
62 AttributeList CallerAttrs = F.getAttributes();
63 if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
64 .removeAttribute(Attribute::NoAlias)
65 .removeAttribute(Attribute::NonNull)
66 .hasAttributes())
67 return false;
68
69 // It's not safe to eliminate the sign / zero extension of the return value.
70 if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) ||
71 CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
72 return false;
73
74 // Check if the only use is a function return node.
75 return isUsedByReturnOnly(Node, Chain);
76 }
77
parametersInCSRMatch(const MachineRegisterInfo & MRI,const uint32_t * CallerPreservedMask,const SmallVectorImpl<CCValAssign> & ArgLocs,const SmallVectorImpl<SDValue> & OutVals) const78 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
79 const uint32_t *CallerPreservedMask,
80 const SmallVectorImpl<CCValAssign> &ArgLocs,
81 const SmallVectorImpl<SDValue> &OutVals) const {
82 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
83 const CCValAssign &ArgLoc = ArgLocs[I];
84 if (!ArgLoc.isRegLoc())
85 continue;
86 MCRegister Reg = ArgLoc.getLocReg();
87 // Only look at callee saved registers.
88 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
89 continue;
90 // Check that we pass the value used for the caller.
91 // (We look for a CopyFromReg reading a virtual register that is used
92 // for the function live-in value of register Reg)
93 SDValue Value = OutVals[I];
94 if (Value->getOpcode() != ISD::CopyFromReg)
95 return false;
96 Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
97 if (MRI.getLiveInPhysReg(ArgReg) != Reg)
98 return false;
99 }
100 return true;
101 }
102
103 /// Set CallLoweringInfo attribute flags based on a call instruction
104 /// and called function attributes.
setAttributes(const CallBase * Call,unsigned ArgIdx)105 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call,
106 unsigned ArgIdx) {
107 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
108 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
109 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
110 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
111 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
112 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
113 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
114 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
115 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
116 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
117 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
118 Alignment = Call->getParamAlign(ArgIdx);
119 ByValType = nullptr;
120 if (IsByVal)
121 ByValType = Call->getParamByValType(ArgIdx);
122 PreallocatedType = nullptr;
123 if (IsPreallocated)
124 PreallocatedType = Call->getParamPreallocatedType(ArgIdx);
125 }
126
127 /// Generate a libcall taking the given operands as arguments and returning a
128 /// result of type RetVT.
129 std::pair<SDValue, SDValue>
makeLibCall(SelectionDAG & DAG,RTLIB::Libcall LC,EVT RetVT,ArrayRef<SDValue> Ops,MakeLibCallOptions CallOptions,const SDLoc & dl,SDValue InChain) const130 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
131 ArrayRef<SDValue> Ops,
132 MakeLibCallOptions CallOptions,
133 const SDLoc &dl,
134 SDValue InChain) const {
135 if (!InChain)
136 InChain = DAG.getEntryNode();
137
138 TargetLowering::ArgListTy Args;
139 Args.reserve(Ops.size());
140
141 TargetLowering::ArgListEntry Entry;
142 for (unsigned i = 0; i < Ops.size(); ++i) {
143 SDValue NewOp = Ops[i];
144 Entry.Node = NewOp;
145 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
146 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(),
147 CallOptions.IsSExt);
148 Entry.IsZExt = !Entry.IsSExt;
149
150 if (CallOptions.IsSoften &&
151 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) {
152 Entry.IsSExt = Entry.IsZExt = false;
153 }
154 Args.push_back(Entry);
155 }
156
157 if (LC == RTLIB::UNKNOWN_LIBCALL)
158 report_fatal_error("Unsupported library call operation!");
159 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
160 getPointerTy(DAG.getDataLayout()));
161
162 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
163 TargetLowering::CallLoweringInfo CLI(DAG);
164 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt);
165 bool zeroExtend = !signExtend;
166
167 if (CallOptions.IsSoften &&
168 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) {
169 signExtend = zeroExtend = false;
170 }
171
172 CLI.setDebugLoc(dl)
173 .setChain(InChain)
174 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
175 .setNoReturn(CallOptions.DoesNotReturn)
176 .setDiscardResult(!CallOptions.IsReturnValueUsed)
177 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization)
178 .setSExtResult(signExtend)
179 .setZExtResult(zeroExtend);
180 return LowerCallTo(CLI);
181 }
182
findOptimalMemOpLowering(std::vector<EVT> & MemOps,unsigned Limit,const MemOp & Op,unsigned DstAS,unsigned SrcAS,const AttributeList & FuncAttributes) const183 bool TargetLowering::findOptimalMemOpLowering(
184 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS,
185 unsigned SrcAS, const AttributeList &FuncAttributes) const {
186 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
187 return false;
188
189 EVT VT = getOptimalMemOpType(Op, FuncAttributes);
190
191 if (VT == MVT::Other) {
192 // Use the largest integer type whose alignment constraints are satisfied.
193 // We only need to check DstAlign here as SrcAlign is always greater or
194 // equal to DstAlign (or zero).
195 VT = MVT::i64;
196 if (Op.isFixedDstAlign())
197 while (
198 Op.getDstAlign() < (VT.getSizeInBits() / 8) &&
199 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign().value()))
200 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
201 assert(VT.isInteger());
202
203 // Find the largest legal integer type.
204 MVT LVT = MVT::i64;
205 while (!isTypeLegal(LVT))
206 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
207 assert(LVT.isInteger());
208
209 // If the type we've chosen is larger than the largest legal integer type
210 // then use that instead.
211 if (VT.bitsGT(LVT))
212 VT = LVT;
213 }
214
215 unsigned NumMemOps = 0;
216 uint64_t Size = Op.size();
217 while (Size) {
218 unsigned VTSize = VT.getSizeInBits() / 8;
219 while (VTSize > Size) {
220 // For now, only use non-vector load / store's for the left-over pieces.
221 EVT NewVT = VT;
222 unsigned NewVTSize;
223
224 bool Found = false;
225 if (VT.isVector() || VT.isFloatingPoint()) {
226 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
227 if (isOperationLegalOrCustom(ISD::STORE, NewVT) &&
228 isSafeMemOpType(NewVT.getSimpleVT()))
229 Found = true;
230 else if (NewVT == MVT::i64 &&
231 isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
232 isSafeMemOpType(MVT::f64)) {
233 // i64 is usually not legal on 32-bit targets, but f64 may be.
234 NewVT = MVT::f64;
235 Found = true;
236 }
237 }
238
239 if (!Found) {
240 do {
241 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
242 if (NewVT == MVT::i8)
243 break;
244 } while (!isSafeMemOpType(NewVT.getSimpleVT()));
245 }
246 NewVTSize = NewVT.getSizeInBits() / 8;
247
248 // If the new VT cannot cover all of the remaining bits, then consider
249 // issuing a (or a pair of) unaligned and overlapping load / store.
250 bool Fast;
251 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size &&
252 allowsMisalignedMemoryAccesses(
253 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 1,
254 MachineMemOperand::MONone, &Fast) &&
255 Fast)
256 VTSize = Size;
257 else {
258 VT = NewVT;
259 VTSize = NewVTSize;
260 }
261 }
262
263 if (++NumMemOps > Limit)
264 return false;
265
266 MemOps.push_back(VT);
267 Size -= VTSize;
268 }
269
270 return true;
271 }
272
273 /// Soften the operands of a comparison. This code is shared among BR_CC,
274 /// SELECT_CC, and SETCC handlers.
softenSetCCOperands(SelectionDAG & DAG,EVT VT,SDValue & NewLHS,SDValue & NewRHS,ISD::CondCode & CCCode,const SDLoc & dl,const SDValue OldLHS,const SDValue OldRHS) const275 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
276 SDValue &NewLHS, SDValue &NewRHS,
277 ISD::CondCode &CCCode,
278 const SDLoc &dl, const SDValue OldLHS,
279 const SDValue OldRHS) const {
280 SDValue Chain;
281 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
282 OldRHS, Chain);
283 }
284
softenSetCCOperands(SelectionDAG & DAG,EVT VT,SDValue & NewLHS,SDValue & NewRHS,ISD::CondCode & CCCode,const SDLoc & dl,const SDValue OldLHS,const SDValue OldRHS,SDValue & Chain,bool IsSignaling) const285 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
286 SDValue &NewLHS, SDValue &NewRHS,
287 ISD::CondCode &CCCode,
288 const SDLoc &dl, const SDValue OldLHS,
289 const SDValue OldRHS,
290 SDValue &Chain,
291 bool IsSignaling) const {
292 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc
293 // not supporting it. We can update this code when libgcc provides such
294 // functions.
295
296 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
297 && "Unsupported setcc type!");
298
299 // Expand into one or more soft-fp libcall(s).
300 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
301 bool ShouldInvertCC = false;
302 switch (CCCode) {
303 case ISD::SETEQ:
304 case ISD::SETOEQ:
305 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
306 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
307 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
308 break;
309 case ISD::SETNE:
310 case ISD::SETUNE:
311 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
312 (VT == MVT::f64) ? RTLIB::UNE_F64 :
313 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
314 break;
315 case ISD::SETGE:
316 case ISD::SETOGE:
317 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
318 (VT == MVT::f64) ? RTLIB::OGE_F64 :
319 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
320 break;
321 case ISD::SETLT:
322 case ISD::SETOLT:
323 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
324 (VT == MVT::f64) ? RTLIB::OLT_F64 :
325 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
326 break;
327 case ISD::SETLE:
328 case ISD::SETOLE:
329 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
330 (VT == MVT::f64) ? RTLIB::OLE_F64 :
331 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
332 break;
333 case ISD::SETGT:
334 case ISD::SETOGT:
335 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
336 (VT == MVT::f64) ? RTLIB::OGT_F64 :
337 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
338 break;
339 case ISD::SETO:
340 ShouldInvertCC = true;
341 LLVM_FALLTHROUGH;
342 case ISD::SETUO:
343 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
344 (VT == MVT::f64) ? RTLIB::UO_F64 :
345 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
346 break;
347 case ISD::SETONE:
348 // SETONE = O && UNE
349 ShouldInvertCC = true;
350 LLVM_FALLTHROUGH;
351 case ISD::SETUEQ:
352 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
353 (VT == MVT::f64) ? RTLIB::UO_F64 :
354 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
355 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
356 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
357 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
358 break;
359 default:
360 // Invert CC for unordered comparisons
361 ShouldInvertCC = true;
362 switch (CCCode) {
363 case ISD::SETULT:
364 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
365 (VT == MVT::f64) ? RTLIB::OGE_F64 :
366 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
367 break;
368 case ISD::SETULE:
369 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
370 (VT == MVT::f64) ? RTLIB::OGT_F64 :
371 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
372 break;
373 case ISD::SETUGT:
374 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
375 (VT == MVT::f64) ? RTLIB::OLE_F64 :
376 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
377 break;
378 case ISD::SETUGE:
379 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
380 (VT == MVT::f64) ? RTLIB::OLT_F64 :
381 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
382 break;
383 default: llvm_unreachable("Do not know how to soften this setcc!");
384 }
385 }
386
387 // Use the target specific return value for comparions lib calls.
388 EVT RetVT = getCmpLibcallReturnType();
389 SDValue Ops[2] = {NewLHS, NewRHS};
390 TargetLowering::MakeLibCallOptions CallOptions;
391 EVT OpsVT[2] = { OldLHS.getValueType(),
392 OldRHS.getValueType() };
393 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true);
394 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
395 NewLHS = Call.first;
396 NewRHS = DAG.getConstant(0, dl, RetVT);
397
398 CCCode = getCmpLibcallCC(LC1);
399 if (ShouldInvertCC) {
400 assert(RetVT.isInteger());
401 CCCode = getSetCCInverse(CCCode, RetVT);
402 }
403
404 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
405 // Update Chain.
406 Chain = Call.second;
407 } else {
408 EVT SetCCVT =
409 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT);
410 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode);
411 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
412 CCCode = getCmpLibcallCC(LC2);
413 if (ShouldInvertCC)
414 CCCode = getSetCCInverse(CCCode, RetVT);
415 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
416 if (Chain)
417 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second,
418 Call2.second);
419 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl,
420 Tmp.getValueType(), Tmp, NewLHS);
421 NewRHS = SDValue();
422 }
423 }
424
425 /// Return the entry encoding for a jump table in the current function. The
426 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
getJumpTableEncoding() const427 unsigned TargetLowering::getJumpTableEncoding() const {
428 // In non-pic modes, just use the address of a block.
429 if (!isPositionIndependent())
430 return MachineJumpTableInfo::EK_BlockAddress;
431
432 // In PIC mode, if the target supports a GPRel32 directive, use it.
433 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
434 return MachineJumpTableInfo::EK_GPRel32BlockAddress;
435
436 // Otherwise, use a label difference.
437 return MachineJumpTableInfo::EK_LabelDifference32;
438 }
439
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const440 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
441 SelectionDAG &DAG) const {
442 // If our PIC model is GP relative, use the global offset table as the base.
443 unsigned JTEncoding = getJumpTableEncoding();
444
445 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
446 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
447 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
448
449 return Table;
450 }
451
452 /// This returns the relocation base for the given PIC jumptable, the same as
453 /// getPICJumpTableRelocBase, but as an MCExpr.
454 const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction * MF,unsigned JTI,MCContext & Ctx) const455 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
456 unsigned JTI,MCContext &Ctx) const{
457 // The normal PIC reloc base is the label at the start of the jump table.
458 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
459 }
460
461 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const462 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
463 const TargetMachine &TM = getTargetMachine();
464 const GlobalValue *GV = GA->getGlobal();
465
466 // If the address is not even local to this DSO we will have to load it from
467 // a got and then add the offset.
468 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
469 return false;
470
471 // If the code is position independent we will have to add a base register.
472 if (isPositionIndependent())
473 return false;
474
475 // Otherwise we can do it.
476 return true;
477 }
478
479 //===----------------------------------------------------------------------===//
480 // Optimization Methods
481 //===----------------------------------------------------------------------===//
482
483 /// If the specified instruction has a constant integer operand and there are
484 /// bits set in that constant that are not demanded, then clear those bits and
485 /// return true.
ShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO) const486 bool TargetLowering::ShrinkDemandedConstant(SDValue Op,
487 const APInt &DemandedBits,
488 const APInt &DemandedElts,
489 TargetLoweringOpt &TLO) const {
490 SDLoc DL(Op);
491 unsigned Opcode = Op.getOpcode();
492
493 // Do target-specific constant optimization.
494 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
495 return TLO.New.getNode();
496
497 // FIXME: ISD::SELECT, ISD::SELECT_CC
498 switch (Opcode) {
499 default:
500 break;
501 case ISD::XOR:
502 case ISD::AND:
503 case ISD::OR: {
504 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
505 if (!Op1C)
506 return false;
507
508 // If this is a 'not' op, don't touch it because that's a canonical form.
509 const APInt &C = Op1C->getAPIntValue();
510 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C))
511 return false;
512
513 if (!C.isSubsetOf(DemandedBits)) {
514 EVT VT = Op.getValueType();
515 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT);
516 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC);
517 return TLO.CombineTo(Op, NewOp);
518 }
519
520 break;
521 }
522 }
523
524 return false;
525 }
526
ShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,TargetLoweringOpt & TLO) const527 bool TargetLowering::ShrinkDemandedConstant(SDValue Op,
528 const APInt &DemandedBits,
529 TargetLoweringOpt &TLO) const {
530 EVT VT = Op.getValueType();
531 APInt DemandedElts = VT.isVector()
532 ? APInt::getAllOnesValue(VT.getVectorNumElements())
533 : APInt(1, 1);
534 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO);
535 }
536
537 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
538 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
539 /// generalized for targets with other types of implicit widening casts.
ShrinkDemandedOp(SDValue Op,unsigned BitWidth,const APInt & Demanded,TargetLoweringOpt & TLO) const540 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
541 const APInt &Demanded,
542 TargetLoweringOpt &TLO) const {
543 assert(Op.getNumOperands() == 2 &&
544 "ShrinkDemandedOp only supports binary operators!");
545 assert(Op.getNode()->getNumValues() == 1 &&
546 "ShrinkDemandedOp only supports nodes with one result!");
547
548 SelectionDAG &DAG = TLO.DAG;
549 SDLoc dl(Op);
550
551 // Early return, as this function cannot handle vector types.
552 if (Op.getValueType().isVector())
553 return false;
554
555 // Don't do this if the node has another user, which may require the
556 // full value.
557 if (!Op.getNode()->hasOneUse())
558 return false;
559
560 // Search for the smallest integer type with free casts to and from
561 // Op's type. For expedience, just check power-of-2 integer types.
562 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
563 unsigned DemandedSize = Demanded.getActiveBits();
564 unsigned SmallVTBits = DemandedSize;
565 if (!isPowerOf2_32(SmallVTBits))
566 SmallVTBits = NextPowerOf2(SmallVTBits);
567 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
568 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
569 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
570 TLI.isZExtFree(SmallVT, Op.getValueType())) {
571 // We found a type with free casts.
572 SDValue X = DAG.getNode(
573 Op.getOpcode(), dl, SmallVT,
574 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)),
575 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1)));
576 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?");
577 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X);
578 return TLO.CombineTo(Op, Z);
579 }
580 }
581 return false;
582 }
583
SimplifyDemandedBits(SDValue Op,const APInt & DemandedBits,DAGCombinerInfo & DCI) const584 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
585 DAGCombinerInfo &DCI) const {
586 SelectionDAG &DAG = DCI.DAG;
587 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
588 !DCI.isBeforeLegalizeOps());
589 KnownBits Known;
590
591 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO);
592 if (Simplified) {
593 DCI.AddToWorklist(Op.getNode());
594 DCI.CommitTargetLoweringOpt(TLO);
595 }
596 return Simplified;
597 }
598
SimplifyDemandedBits(SDValue Op,const APInt & DemandedBits,KnownBits & Known,TargetLoweringOpt & TLO,unsigned Depth,bool AssumeSingleUse) const599 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
600 KnownBits &Known,
601 TargetLoweringOpt &TLO,
602 unsigned Depth,
603 bool AssumeSingleUse) const {
604 EVT VT = Op.getValueType();
605
606 // TODO: We can probably do more work on calculating the known bits and
607 // simplifying the operations for scalable vectors, but for now we just
608 // bail out.
609 if (VT.isScalableVector()) {
610 // Pretend we don't know anything for now.
611 Known = KnownBits(DemandedBits.getBitWidth());
612 return false;
613 }
614
615 APInt DemandedElts = VT.isVector()
616 ? APInt::getAllOnesValue(VT.getVectorNumElements())
617 : APInt(1, 1);
618 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth,
619 AssumeSingleUse);
620 }
621
622 // TODO: Can we merge SelectionDAG::GetDemandedBits into this?
623 // TODO: Under what circumstances can we create nodes? Constant folding?
SimplifyMultipleUseDemandedBits(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,SelectionDAG & DAG,unsigned Depth) const624 SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
625 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
626 SelectionDAG &DAG, unsigned Depth) const {
627 // Limit search depth.
628 if (Depth >= SelectionDAG::MaxRecursionDepth)
629 return SDValue();
630
631 // Ignore UNDEFs.
632 if (Op.isUndef())
633 return SDValue();
634
635 // Not demanding any bits/elts from Op.
636 if (DemandedBits == 0 || DemandedElts == 0)
637 return DAG.getUNDEF(Op.getValueType());
638
639 unsigned NumElts = DemandedElts.getBitWidth();
640 unsigned BitWidth = DemandedBits.getBitWidth();
641 KnownBits LHSKnown, RHSKnown;
642 switch (Op.getOpcode()) {
643 case ISD::BITCAST: {
644 SDValue Src = peekThroughBitcasts(Op.getOperand(0));
645 EVT SrcVT = Src.getValueType();
646 EVT DstVT = Op.getValueType();
647 if (SrcVT == DstVT)
648 return Src;
649
650 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
651 unsigned NumDstEltBits = DstVT.getScalarSizeInBits();
652 if (NumSrcEltBits == NumDstEltBits)
653 if (SDValue V = SimplifyMultipleUseDemandedBits(
654 Src, DemandedBits, DemandedElts, DAG, Depth + 1))
655 return DAG.getBitcast(DstVT, V);
656
657 // TODO - bigendian once we have test coverage.
658 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 &&
659 DAG.getDataLayout().isLittleEndian()) {
660 unsigned Scale = NumDstEltBits / NumSrcEltBits;
661 unsigned NumSrcElts = SrcVT.getVectorNumElements();
662 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
663 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
664 for (unsigned i = 0; i != Scale; ++i) {
665 unsigned Offset = i * NumSrcEltBits;
666 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
667 if (!Sub.isNullValue()) {
668 DemandedSrcBits |= Sub;
669 for (unsigned j = 0; j != NumElts; ++j)
670 if (DemandedElts[j])
671 DemandedSrcElts.setBit((j * Scale) + i);
672 }
673 }
674
675 if (SDValue V = SimplifyMultipleUseDemandedBits(
676 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1))
677 return DAG.getBitcast(DstVT, V);
678 }
679
680 // TODO - bigendian once we have test coverage.
681 if ((NumSrcEltBits % NumDstEltBits) == 0 &&
682 DAG.getDataLayout().isLittleEndian()) {
683 unsigned Scale = NumSrcEltBits / NumDstEltBits;
684 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
685 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
686 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
687 for (unsigned i = 0; i != NumElts; ++i)
688 if (DemandedElts[i]) {
689 unsigned Offset = (i % Scale) * NumDstEltBits;
690 DemandedSrcBits.insertBits(DemandedBits, Offset);
691 DemandedSrcElts.setBit(i / Scale);
692 }
693
694 if (SDValue V = SimplifyMultipleUseDemandedBits(
695 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1))
696 return DAG.getBitcast(DstVT, V);
697 }
698
699 break;
700 }
701 case ISD::AND: {
702 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
703 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
704
705 // If all of the demanded bits are known 1 on one side, return the other.
706 // These bits cannot contribute to the result of the 'and' in this
707 // context.
708 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
709 return Op.getOperand(0);
710 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
711 return Op.getOperand(1);
712 break;
713 }
714 case ISD::OR: {
715 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
716 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
717
718 // If all of the demanded bits are known zero on one side, return the
719 // other. These bits cannot contribute to the result of the 'or' in this
720 // context.
721 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
722 return Op.getOperand(0);
723 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
724 return Op.getOperand(1);
725 break;
726 }
727 case ISD::XOR: {
728 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
729 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
730
731 // If all of the demanded bits are known zero on one side, return the
732 // other.
733 if (DemandedBits.isSubsetOf(RHSKnown.Zero))
734 return Op.getOperand(0);
735 if (DemandedBits.isSubsetOf(LHSKnown.Zero))
736 return Op.getOperand(1);
737 break;
738 }
739 case ISD::SHL: {
740 // If we are only demanding sign bits then we can use the shift source
741 // directly.
742 if (const APInt *MaxSA =
743 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
744 SDValue Op0 = Op.getOperand(0);
745 unsigned ShAmt = MaxSA->getZExtValue();
746 unsigned NumSignBits =
747 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
748 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
749 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
750 return Op0;
751 }
752 break;
753 }
754 case ISD::SETCC: {
755 SDValue Op0 = Op.getOperand(0);
756 SDValue Op1 = Op.getOperand(1);
757 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
758 // If (1) we only need the sign-bit, (2) the setcc operands are the same
759 // width as the setcc result, and (3) the result of a setcc conforms to 0 or
760 // -1, we may be able to bypass the setcc.
761 if (DemandedBits.isSignMask() &&
762 Op0.getScalarValueSizeInBits() == BitWidth &&
763 getBooleanContents(Op0.getValueType()) ==
764 BooleanContent::ZeroOrNegativeOneBooleanContent) {
765 // If we're testing X < 0, then this compare isn't needed - just use X!
766 // FIXME: We're limiting to integer types here, but this should also work
767 // if we don't care about FP signed-zero. The use of SETLT with FP means
768 // that we don't care about NaNs.
769 if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
770 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
771 return Op0;
772 }
773 break;
774 }
775 case ISD::SIGN_EXTEND_INREG: {
776 // If none of the extended bits are demanded, eliminate the sextinreg.
777 SDValue Op0 = Op.getOperand(0);
778 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
779 unsigned ExBits = ExVT.getScalarSizeInBits();
780 if (DemandedBits.getActiveBits() <= ExBits)
781 return Op0;
782 // If the input is already sign extended, just drop the extension.
783 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
784 if (NumSignBits >= (BitWidth - ExBits + 1))
785 return Op0;
786 break;
787 }
788 case ISD::ANY_EXTEND_VECTOR_INREG:
789 case ISD::SIGN_EXTEND_VECTOR_INREG:
790 case ISD::ZERO_EXTEND_VECTOR_INREG: {
791 // If we only want the lowest element and none of extended bits, then we can
792 // return the bitcasted source vector.
793 SDValue Src = Op.getOperand(0);
794 EVT SrcVT = Src.getValueType();
795 EVT DstVT = Op.getValueType();
796 if (DemandedElts == 1 && DstVT.getSizeInBits() == SrcVT.getSizeInBits() &&
797 DAG.getDataLayout().isLittleEndian() &&
798 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) {
799 return DAG.getBitcast(DstVT, Src);
800 }
801 break;
802 }
803 case ISD::INSERT_VECTOR_ELT: {
804 // If we don't demand the inserted element, return the base vector.
805 SDValue Vec = Op.getOperand(0);
806 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
807 EVT VecVT = Vec.getValueType();
808 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
809 !DemandedElts[CIdx->getZExtValue()])
810 return Vec;
811 break;
812 }
813 case ISD::INSERT_SUBVECTOR: {
814 // If we don't demand the inserted subvector, return the base vector.
815 SDValue Vec = Op.getOperand(0);
816 SDValue Sub = Op.getOperand(1);
817 uint64_t Idx = Op.getConstantOperandVal(2);
818 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
819 if (DemandedElts.extractBits(NumSubElts, Idx) == 0)
820 return Vec;
821 break;
822 }
823 case ISD::VECTOR_SHUFFLE: {
824 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
825
826 // If all the demanded elts are from one operand and are inline,
827 // then we can use the operand directly.
828 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true;
829 for (unsigned i = 0; i != NumElts; ++i) {
830 int M = ShuffleMask[i];
831 if (M < 0 || !DemandedElts[i])
832 continue;
833 AllUndef = false;
834 IdentityLHS &= (M == (int)i);
835 IdentityRHS &= ((M - NumElts) == i);
836 }
837
838 if (AllUndef)
839 return DAG.getUNDEF(Op.getValueType());
840 if (IdentityLHS)
841 return Op.getOperand(0);
842 if (IdentityRHS)
843 return Op.getOperand(1);
844 break;
845 }
846 default:
847 if (Op.getOpcode() >= ISD::BUILTIN_OP_END)
848 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
849 Op, DemandedBits, DemandedElts, DAG, Depth))
850 return V;
851 break;
852 }
853 return SDValue();
854 }
855
SimplifyMultipleUseDemandedBits(SDValue Op,const APInt & DemandedBits,SelectionDAG & DAG,unsigned Depth) const856 SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
857 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG,
858 unsigned Depth) const {
859 EVT VT = Op.getValueType();
860 APInt DemandedElts = VT.isVector()
861 ? APInt::getAllOnesValue(VT.getVectorNumElements())
862 : APInt(1, 1);
863 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG,
864 Depth);
865 }
866
SimplifyMultipleUseDemandedVectorElts(SDValue Op,const APInt & DemandedElts,SelectionDAG & DAG,unsigned Depth) const867 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts(
868 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG,
869 unsigned Depth) const {
870 APInt DemandedBits = APInt::getAllOnesValue(Op.getScalarValueSizeInBits());
871 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG,
872 Depth);
873 }
874
875 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the
876 /// result of Op are ever used downstream. If we can use this information to
877 /// simplify Op, create a new simplified DAG node and return true, returning the
878 /// original and new nodes in Old and New. Otherwise, analyze the expression and
879 /// return a mask of Known bits for the expression (used to simplify the
880 /// caller). The Known bits may only be accurate for those bits in the
881 /// OriginalDemandedBits and OriginalDemandedElts.
SimplifyDemandedBits(SDValue Op,const APInt & OriginalDemandedBits,const APInt & OriginalDemandedElts,KnownBits & Known,TargetLoweringOpt & TLO,unsigned Depth,bool AssumeSingleUse) const882 bool TargetLowering::SimplifyDemandedBits(
883 SDValue Op, const APInt &OriginalDemandedBits,
884 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
885 unsigned Depth, bool AssumeSingleUse) const {
886 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
887 assert(Op.getScalarValueSizeInBits() == BitWidth &&
888 "Mask size mismatches value type size!");
889
890 // Don't know anything.
891 Known = KnownBits(BitWidth);
892
893 // TODO: We can probably do more work on calculating the known bits and
894 // simplifying the operations for scalable vectors, but for now we just
895 // bail out.
896 if (Op.getValueType().isScalableVector())
897 return false;
898
899 unsigned NumElts = OriginalDemandedElts.getBitWidth();
900 assert((!Op.getValueType().isVector() ||
901 NumElts == Op.getValueType().getVectorNumElements()) &&
902 "Unexpected vector size");
903
904 APInt DemandedBits = OriginalDemandedBits;
905 APInt DemandedElts = OriginalDemandedElts;
906 SDLoc dl(Op);
907 auto &DL = TLO.DAG.getDataLayout();
908
909 // Undef operand.
910 if (Op.isUndef())
911 return false;
912
913 if (Op.getOpcode() == ISD::Constant) {
914 // We know all of the bits for a constant!
915 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue();
916 Known.Zero = ~Known.One;
917 return false;
918 }
919
920 if (Op.getOpcode() == ISD::ConstantFP) {
921 // We know all of the bits for a floating point constant!
922 Known.One = cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt();
923 Known.Zero = ~Known.One;
924 return false;
925 }
926
927 // Other users may use these bits.
928 EVT VT = Op.getValueType();
929 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) {
930 if (Depth != 0) {
931 // If not at the root, Just compute the Known bits to
932 // simplify things downstream.
933 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
934 return false;
935 }
936 // If this is the root being simplified, allow it to have multiple uses,
937 // just set the DemandedBits/Elts to all bits.
938 DemandedBits = APInt::getAllOnesValue(BitWidth);
939 DemandedElts = APInt::getAllOnesValue(NumElts);
940 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
941 // Not demanding any bits/elts from Op.
942 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
943 } else if (Depth >= SelectionDAG::MaxRecursionDepth) {
944 // Limit search depth.
945 return false;
946 }
947
948 KnownBits Known2;
949 switch (Op.getOpcode()) {
950 case ISD::TargetConstant:
951 llvm_unreachable("Can't simplify this node");
952 case ISD::SCALAR_TO_VECTOR: {
953 if (!DemandedElts[0])
954 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
955
956 KnownBits SrcKnown;
957 SDValue Src = Op.getOperand(0);
958 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
959 APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth);
960 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1))
961 return true;
962
963 // Upper elements are undef, so only get the knownbits if we just demand
964 // the bottom element.
965 if (DemandedElts == 1)
966 Known = SrcKnown.anyextOrTrunc(BitWidth);
967 break;
968 }
969 case ISD::BUILD_VECTOR:
970 // Collect the known bits that are shared by every demanded element.
971 // TODO: Call SimplifyDemandedBits for non-constant demanded elements.
972 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
973 return false; // Don't fall through, will infinitely loop.
974 case ISD::LOAD: {
975 LoadSDNode *LD = cast<LoadSDNode>(Op);
976 if (getTargetConstantFromLoad(LD)) {
977 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
978 return false; // Don't fall through, will infinitely loop.
979 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
980 // If this is a ZEXTLoad and we are looking at the loaded value.
981 EVT MemVT = LD->getMemoryVT();
982 unsigned MemBits = MemVT.getScalarSizeInBits();
983 Known.Zero.setBitsFrom(MemBits);
984 return false; // Don't fall through, will infinitely loop.
985 }
986 break;
987 }
988 case ISD::INSERT_VECTOR_ELT: {
989 SDValue Vec = Op.getOperand(0);
990 SDValue Scl = Op.getOperand(1);
991 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
992 EVT VecVT = Vec.getValueType();
993
994 // If index isn't constant, assume we need all vector elements AND the
995 // inserted element.
996 APInt DemandedVecElts(DemandedElts);
997 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
998 unsigned Idx = CIdx->getZExtValue();
999 DemandedVecElts.clearBit(Idx);
1000
1001 // Inserted element is not required.
1002 if (!DemandedElts[Idx])
1003 return TLO.CombineTo(Op, Vec);
1004 }
1005
1006 KnownBits KnownScl;
1007 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
1008 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits);
1009 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
1010 return true;
1011
1012 Known = KnownScl.anyextOrTrunc(BitWidth);
1013
1014 KnownBits KnownVec;
1015 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO,
1016 Depth + 1))
1017 return true;
1018
1019 if (!!DemandedVecElts) {
1020 Known.One &= KnownVec.One;
1021 Known.Zero &= KnownVec.Zero;
1022 }
1023
1024 return false;
1025 }
1026 case ISD::INSERT_SUBVECTOR: {
1027 // Demand any elements from the subvector and the remainder from the src its
1028 // inserted into.
1029 SDValue Src = Op.getOperand(0);
1030 SDValue Sub = Op.getOperand(1);
1031 uint64_t Idx = Op.getConstantOperandVal(2);
1032 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
1033 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
1034 APInt DemandedSrcElts = DemandedElts;
1035 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
1036
1037 KnownBits KnownSub, KnownSrc;
1038 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO,
1039 Depth + 1))
1040 return true;
1041 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1042 Depth + 1))
1043 return true;
1044
1045 Known.Zero.setAllBits();
1046 Known.One.setAllBits();
1047 if (!!DemandedSubElts) {
1048 Known.One &= KnownSub.One;
1049 Known.Zero &= KnownSub.Zero;
1050 }
1051 if (!!DemandedSrcElts) {
1052 Known.One &= KnownSrc.One;
1053 Known.Zero &= KnownSrc.Zero;
1054 }
1055
1056 // Attempt to avoid multi-use src if we don't need anything from it.
1057 if (!DemandedBits.isAllOnesValue() || !DemandedSubElts.isAllOnesValue() ||
1058 !DemandedSrcElts.isAllOnesValue()) {
1059 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1060 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1);
1061 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1062 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1);
1063 if (NewSub || NewSrc) {
1064 NewSub = NewSub ? NewSub : Sub;
1065 NewSrc = NewSrc ? NewSrc : Src;
1066 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub,
1067 Op.getOperand(2));
1068 return TLO.CombineTo(Op, NewOp);
1069 }
1070 }
1071 break;
1072 }
1073 case ISD::EXTRACT_SUBVECTOR: {
1074 // Offset the demanded elts by the subvector index.
1075 SDValue Src = Op.getOperand(0);
1076 if (Src.getValueType().isScalableVector())
1077 break;
1078 uint64_t Idx = Op.getConstantOperandVal(1);
1079 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1080 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
1081
1082 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO,
1083 Depth + 1))
1084 return true;
1085
1086 // Attempt to avoid multi-use src if we don't need anything from it.
1087 if (!DemandedBits.isAllOnesValue() || !DemandedSrcElts.isAllOnesValue()) {
1088 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1089 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1);
1090 if (DemandedSrc) {
1091 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc,
1092 Op.getOperand(1));
1093 return TLO.CombineTo(Op, NewOp);
1094 }
1095 }
1096 break;
1097 }
1098 case ISD::CONCAT_VECTORS: {
1099 Known.Zero.setAllBits();
1100 Known.One.setAllBits();
1101 EVT SubVT = Op.getOperand(0).getValueType();
1102 unsigned NumSubVecs = Op.getNumOperands();
1103 unsigned NumSubElts = SubVT.getVectorNumElements();
1104 for (unsigned i = 0; i != NumSubVecs; ++i) {
1105 APInt DemandedSubElts =
1106 DemandedElts.extractBits(NumSubElts, i * NumSubElts);
1107 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts,
1108 Known2, TLO, Depth + 1))
1109 return true;
1110 // Known bits are shared by every demanded subvector element.
1111 if (!!DemandedSubElts) {
1112 Known.One &= Known2.One;
1113 Known.Zero &= Known2.Zero;
1114 }
1115 }
1116 break;
1117 }
1118 case ISD::VECTOR_SHUFFLE: {
1119 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
1120
1121 // Collect demanded elements from shuffle operands..
1122 APInt DemandedLHS(NumElts, 0);
1123 APInt DemandedRHS(NumElts, 0);
1124 for (unsigned i = 0; i != NumElts; ++i) {
1125 if (!DemandedElts[i])
1126 continue;
1127 int M = ShuffleMask[i];
1128 if (M < 0) {
1129 // For UNDEF elements, we don't know anything about the common state of
1130 // the shuffle result.
1131 DemandedLHS.clearAllBits();
1132 DemandedRHS.clearAllBits();
1133 break;
1134 }
1135 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
1136 if (M < (int)NumElts)
1137 DemandedLHS.setBit(M);
1138 else
1139 DemandedRHS.setBit(M - NumElts);
1140 }
1141
1142 if (!!DemandedLHS || !!DemandedRHS) {
1143 SDValue Op0 = Op.getOperand(0);
1144 SDValue Op1 = Op.getOperand(1);
1145
1146 Known.Zero.setAllBits();
1147 Known.One.setAllBits();
1148 if (!!DemandedLHS) {
1149 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO,
1150 Depth + 1))
1151 return true;
1152 Known.One &= Known2.One;
1153 Known.Zero &= Known2.Zero;
1154 }
1155 if (!!DemandedRHS) {
1156 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO,
1157 Depth + 1))
1158 return true;
1159 Known.One &= Known2.One;
1160 Known.Zero &= Known2.Zero;
1161 }
1162
1163 // Attempt to avoid multi-use ops if we don't need anything from them.
1164 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1165 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1);
1166 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1167 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1);
1168 if (DemandedOp0 || DemandedOp1) {
1169 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1170 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1171 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask);
1172 return TLO.CombineTo(Op, NewOp);
1173 }
1174 }
1175 break;
1176 }
1177 case ISD::AND: {
1178 SDValue Op0 = Op.getOperand(0);
1179 SDValue Op1 = Op.getOperand(1);
1180
1181 // If the RHS is a constant, check to see if the LHS would be zero without
1182 // using the bits from the RHS. Below, we use knowledge about the RHS to
1183 // simplify the LHS, here we're using information from the LHS to simplify
1184 // the RHS.
1185 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) {
1186 // Do not increment Depth here; that can cause an infinite loop.
1187 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth);
1188 // If the LHS already has zeros where RHSC does, this 'and' is dead.
1189 if ((LHSKnown.Zero & DemandedBits) ==
1190 (~RHSC->getAPIntValue() & DemandedBits))
1191 return TLO.CombineTo(Op, Op0);
1192
1193 // If any of the set bits in the RHS are known zero on the LHS, shrink
1194 // the constant.
1195 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits,
1196 DemandedElts, TLO))
1197 return true;
1198
1199 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its
1200 // constant, but if this 'and' is only clearing bits that were just set by
1201 // the xor, then this 'and' can be eliminated by shrinking the mask of
1202 // the xor. For example, for a 32-bit X:
1203 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1
1204 if (isBitwiseNot(Op0) && Op0.hasOneUse() &&
1205 LHSKnown.One == ~RHSC->getAPIntValue()) {
1206 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1);
1207 return TLO.CombineTo(Op, Xor);
1208 }
1209 }
1210
1211 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1212 Depth + 1))
1213 return true;
1214 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1215 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts,
1216 Known2, TLO, Depth + 1))
1217 return true;
1218 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1219
1220 // Attempt to avoid multi-use ops if we don't need anything from them.
1221 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1222 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1223 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1224 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1225 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1226 if (DemandedOp0 || DemandedOp1) {
1227 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1228 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1229 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1230 return TLO.CombineTo(Op, NewOp);
1231 }
1232 }
1233
1234 // If all of the demanded bits are known one on one side, return the other.
1235 // These bits cannot contribute to the result of the 'and'.
1236 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One))
1237 return TLO.CombineTo(Op, Op0);
1238 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One))
1239 return TLO.CombineTo(Op, Op1);
1240 // If all of the demanded bits in the inputs are known zeros, return zero.
1241 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
1242 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT));
1243 // If the RHS is a constant, see if we can simplify it.
1244 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts,
1245 TLO))
1246 return true;
1247 // If the operation can be done in a smaller type, do so.
1248 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1249 return true;
1250
1251 Known &= Known2;
1252 break;
1253 }
1254 case ISD::OR: {
1255 SDValue Op0 = Op.getOperand(0);
1256 SDValue Op1 = Op.getOperand(1);
1257
1258 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1259 Depth + 1))
1260 return true;
1261 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1262 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts,
1263 Known2, TLO, Depth + 1))
1264 return true;
1265 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1266
1267 // Attempt to avoid multi-use ops if we don't need anything from them.
1268 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1269 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1270 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1271 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1272 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1273 if (DemandedOp0 || DemandedOp1) {
1274 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1275 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1276 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1277 return TLO.CombineTo(Op, NewOp);
1278 }
1279 }
1280
1281 // If all of the demanded bits are known zero on one side, return the other.
1282 // These bits cannot contribute to the result of the 'or'.
1283 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero))
1284 return TLO.CombineTo(Op, Op0);
1285 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero))
1286 return TLO.CombineTo(Op, Op1);
1287 // If the RHS is a constant, see if we can simplify it.
1288 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
1289 return true;
1290 // If the operation can be done in a smaller type, do so.
1291 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1292 return true;
1293
1294 Known |= Known2;
1295 break;
1296 }
1297 case ISD::XOR: {
1298 SDValue Op0 = Op.getOperand(0);
1299 SDValue Op1 = Op.getOperand(1);
1300
1301 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1302 Depth + 1))
1303 return true;
1304 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1305 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO,
1306 Depth + 1))
1307 return true;
1308 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1309
1310 // Attempt to avoid multi-use ops if we don't need anything from them.
1311 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1312 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1313 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1314 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1315 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1316 if (DemandedOp0 || DemandedOp1) {
1317 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1318 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1319 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1320 return TLO.CombineTo(Op, NewOp);
1321 }
1322 }
1323
1324 // If all of the demanded bits are known zero on one side, return the other.
1325 // These bits cannot contribute to the result of the 'xor'.
1326 if (DemandedBits.isSubsetOf(Known.Zero))
1327 return TLO.CombineTo(Op, Op0);
1328 if (DemandedBits.isSubsetOf(Known2.Zero))
1329 return TLO.CombineTo(Op, Op1);
1330 // If the operation can be done in a smaller type, do so.
1331 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1332 return true;
1333
1334 // If all of the unknown bits are known to be zero on one side or the other
1335 // turn this into an *inclusive* or.
1336 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1337 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
1338 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1));
1339
1340 ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts);
1341 if (C) {
1342 // If one side is a constant, and all of the set bits in the constant are
1343 // also known set on the other side, turn this into an AND, as we know
1344 // the bits will be cleared.
1345 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1346 // NB: it is okay if more bits are known than are requested
1347 if (C->getAPIntValue() == Known2.One) {
1348 SDValue ANDC =
1349 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT);
1350 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC));
1351 }
1352
1353 // If the RHS is a constant, see if we can change it. Don't alter a -1
1354 // constant because that's a 'not' op, and that is better for combining
1355 // and codegen.
1356 if (!C->isAllOnesValue() &&
1357 DemandedBits.isSubsetOf(C->getAPIntValue())) {
1358 // We're flipping all demanded bits. Flip the undemanded bits too.
1359 SDValue New = TLO.DAG.getNOT(dl, Op0, VT);
1360 return TLO.CombineTo(Op, New);
1361 }
1362 }
1363
1364 // If we can't turn this into a 'not', try to shrink the constant.
1365 if (!C || !C->isAllOnesValue())
1366 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
1367 return true;
1368
1369 Known ^= Known2;
1370 break;
1371 }
1372 case ISD::SELECT:
1373 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO,
1374 Depth + 1))
1375 return true;
1376 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO,
1377 Depth + 1))
1378 return true;
1379 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1380 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1381
1382 // If the operands are constants, see if we can simplify them.
1383 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
1384 return true;
1385
1386 // Only known if known in both the LHS and RHS.
1387 Known.One &= Known2.One;
1388 Known.Zero &= Known2.Zero;
1389 break;
1390 case ISD::SELECT_CC:
1391 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO,
1392 Depth + 1))
1393 return true;
1394 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO,
1395 Depth + 1))
1396 return true;
1397 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1398 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1399
1400 // If the operands are constants, see if we can simplify them.
1401 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
1402 return true;
1403
1404 // Only known if known in both the LHS and RHS.
1405 Known.One &= Known2.One;
1406 Known.Zero &= Known2.Zero;
1407 break;
1408 case ISD::SETCC: {
1409 SDValue Op0 = Op.getOperand(0);
1410 SDValue Op1 = Op.getOperand(1);
1411 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1412 // If (1) we only need the sign-bit, (2) the setcc operands are the same
1413 // width as the setcc result, and (3) the result of a setcc conforms to 0 or
1414 // -1, we may be able to bypass the setcc.
1415 if (DemandedBits.isSignMask() &&
1416 Op0.getScalarValueSizeInBits() == BitWidth &&
1417 getBooleanContents(Op0.getValueType()) ==
1418 BooleanContent::ZeroOrNegativeOneBooleanContent) {
1419 // If we're testing X < 0, then this compare isn't needed - just use X!
1420 // FIXME: We're limiting to integer types here, but this should also work
1421 // if we don't care about FP signed-zero. The use of SETLT with FP means
1422 // that we don't care about NaNs.
1423 if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
1424 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
1425 return TLO.CombineTo(Op, Op0);
1426
1427 // TODO: Should we check for other forms of sign-bit comparisons?
1428 // Examples: X <= -1, X >= 0
1429 }
1430 if (getBooleanContents(Op0.getValueType()) ==
1431 TargetLowering::ZeroOrOneBooleanContent &&
1432 BitWidth > 1)
1433 Known.Zero.setBitsFrom(1);
1434 break;
1435 }
1436 case ISD::SHL: {
1437 SDValue Op0 = Op.getOperand(0);
1438 SDValue Op1 = Op.getOperand(1);
1439 EVT ShiftVT = Op1.getValueType();
1440
1441 if (const APInt *SA =
1442 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) {
1443 unsigned ShAmt = SA->getZExtValue();
1444 if (ShAmt == 0)
1445 return TLO.CombineTo(Op, Op0);
1446
1447 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
1448 // single shift. We can do this if the bottom bits (which are shifted
1449 // out) are never demanded.
1450 // TODO - support non-uniform vector amounts.
1451 if (Op0.getOpcode() == ISD::SRL) {
1452 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) {
1453 if (const APInt *SA2 =
1454 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) {
1455 unsigned C1 = SA2->getZExtValue();
1456 unsigned Opc = ISD::SHL;
1457 int Diff = ShAmt - C1;
1458 if (Diff < 0) {
1459 Diff = -Diff;
1460 Opc = ISD::SRL;
1461 }
1462 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT);
1463 return TLO.CombineTo(
1464 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA));
1465 }
1466 }
1467 }
1468
1469 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
1470 // are not demanded. This will likely allow the anyext to be folded away.
1471 // TODO - support non-uniform vector amounts.
1472 if (Op0.getOpcode() == ISD::ANY_EXTEND) {
1473 SDValue InnerOp = Op0.getOperand(0);
1474 EVT InnerVT = InnerOp.getValueType();
1475 unsigned InnerBits = InnerVT.getScalarSizeInBits();
1476 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits &&
1477 isTypeDesirableForOp(ISD::SHL, InnerVT)) {
1478 EVT ShTy = getShiftAmountTy(InnerVT, DL);
1479 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
1480 ShTy = InnerVT;
1481 SDValue NarrowShl =
1482 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
1483 TLO.DAG.getConstant(ShAmt, dl, ShTy));
1484 return TLO.CombineTo(
1485 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl));
1486 }
1487
1488 // Repeat the SHL optimization above in cases where an extension
1489 // intervenes: (shl (anyext (shr x, c1)), c2) to
1490 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits
1491 // aren't demanded (as above) and that the shifted upper c1 bits of
1492 // x aren't demanded.
1493 // TODO - support non-uniform vector amounts.
1494 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL &&
1495 InnerOp.hasOneUse()) {
1496 if (const APInt *SA2 =
1497 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) {
1498 unsigned InnerShAmt = SA2->getZExtValue();
1499 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1500 DemandedBits.getActiveBits() <=
1501 (InnerBits - InnerShAmt + ShAmt) &&
1502 DemandedBits.countTrailingZeros() >= ShAmt) {
1503 SDValue NewSA =
1504 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT);
1505 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
1506 InnerOp.getOperand(0));
1507 return TLO.CombineTo(
1508 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA));
1509 }
1510 }
1511 }
1512 }
1513
1514 APInt InDemandedMask = DemandedBits.lshr(ShAmt);
1515 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1516 Depth + 1))
1517 return true;
1518 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1519 Known.Zero <<= ShAmt;
1520 Known.One <<= ShAmt;
1521 // low bits known zero.
1522 Known.Zero.setLowBits(ShAmt);
1523
1524 // Try shrinking the operation as long as the shift amount will still be
1525 // in range.
1526 if ((ShAmt < DemandedBits.getActiveBits()) &&
1527 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1528 return true;
1529 }
1530
1531 // If we are only demanding sign bits then we can use the shift source
1532 // directly.
1533 if (const APInt *MaxSA =
1534 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
1535 unsigned ShAmt = MaxSA->getZExtValue();
1536 unsigned NumSignBits =
1537 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
1538 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
1539 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1540 return TLO.CombineTo(Op, Op0);
1541 }
1542 break;
1543 }
1544 case ISD::SRL: {
1545 SDValue Op0 = Op.getOperand(0);
1546 SDValue Op1 = Op.getOperand(1);
1547 EVT ShiftVT = Op1.getValueType();
1548
1549 if (const APInt *SA =
1550 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) {
1551 unsigned ShAmt = SA->getZExtValue();
1552 if (ShAmt == 0)
1553 return TLO.CombineTo(Op, Op0);
1554
1555 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
1556 // single shift. We can do this if the top bits (which are shifted out)
1557 // are never demanded.
1558 // TODO - support non-uniform vector amounts.
1559 if (Op0.getOpcode() == ISD::SHL) {
1560 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) {
1561 if (const APInt *SA2 =
1562 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) {
1563 unsigned C1 = SA2->getZExtValue();
1564 unsigned Opc = ISD::SRL;
1565 int Diff = ShAmt - C1;
1566 if (Diff < 0) {
1567 Diff = -Diff;
1568 Opc = ISD::SHL;
1569 }
1570 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT);
1571 return TLO.CombineTo(
1572 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA));
1573 }
1574 }
1575 }
1576
1577 APInt InDemandedMask = (DemandedBits << ShAmt);
1578
1579 // If the shift is exact, then it does demand the low bits (and knows that
1580 // they are zero).
1581 if (Op->getFlags().hasExact())
1582 InDemandedMask.setLowBits(ShAmt);
1583
1584 // Compute the new bits that are at the top now.
1585 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1586 Depth + 1))
1587 return true;
1588 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1589 Known.Zero.lshrInPlace(ShAmt);
1590 Known.One.lshrInPlace(ShAmt);
1591 // High bits known zero.
1592 Known.Zero.setHighBits(ShAmt);
1593 }
1594 break;
1595 }
1596 case ISD::SRA: {
1597 SDValue Op0 = Op.getOperand(0);
1598 SDValue Op1 = Op.getOperand(1);
1599 EVT ShiftVT = Op1.getValueType();
1600
1601 // If we only want bits that already match the signbit then we don't need
1602 // to shift.
1603 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
1604 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >=
1605 NumHiDemandedBits)
1606 return TLO.CombineTo(Op, Op0);
1607
1608 // If this is an arithmetic shift right and only the low-bit is set, we can
1609 // always convert this into a logical shr, even if the shift amount is
1610 // variable. The low bit of the shift cannot be an input sign bit unless
1611 // the shift amount is >= the size of the datatype, which is undefined.
1612 if (DemandedBits.isOneValue())
1613 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1));
1614
1615 if (const APInt *SA =
1616 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) {
1617 unsigned ShAmt = SA->getZExtValue();
1618 if (ShAmt == 0)
1619 return TLO.CombineTo(Op, Op0);
1620
1621 APInt InDemandedMask = (DemandedBits << ShAmt);
1622
1623 // If the shift is exact, then it does demand the low bits (and knows that
1624 // they are zero).
1625 if (Op->getFlags().hasExact())
1626 InDemandedMask.setLowBits(ShAmt);
1627
1628 // If any of the demanded bits are produced by the sign extension, we also
1629 // demand the input sign bit.
1630 if (DemandedBits.countLeadingZeros() < ShAmt)
1631 InDemandedMask.setSignBit();
1632
1633 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1634 Depth + 1))
1635 return true;
1636 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1637 Known.Zero.lshrInPlace(ShAmt);
1638 Known.One.lshrInPlace(ShAmt);
1639
1640 // If the input sign bit is known to be zero, or if none of the top bits
1641 // are demanded, turn this into an unsigned shift right.
1642 if (Known.Zero[BitWidth - ShAmt - 1] ||
1643 DemandedBits.countLeadingZeros() >= ShAmt) {
1644 SDNodeFlags Flags;
1645 Flags.setExact(Op->getFlags().hasExact());
1646 return TLO.CombineTo(
1647 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags));
1648 }
1649
1650 int Log2 = DemandedBits.exactLogBase2();
1651 if (Log2 >= 0) {
1652 // The bit must come from the sign.
1653 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT);
1654 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA));
1655 }
1656
1657 if (Known.One[BitWidth - ShAmt - 1])
1658 // New bits are known one.
1659 Known.One.setHighBits(ShAmt);
1660
1661 // Attempt to avoid multi-use ops if we don't need anything from them.
1662 if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1663 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1664 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1);
1665 if (DemandedOp0) {
1666 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1);
1667 return TLO.CombineTo(Op, NewOp);
1668 }
1669 }
1670 }
1671 break;
1672 }
1673 case ISD::FSHL:
1674 case ISD::FSHR: {
1675 SDValue Op0 = Op.getOperand(0);
1676 SDValue Op1 = Op.getOperand(1);
1677 SDValue Op2 = Op.getOperand(2);
1678 bool IsFSHL = (Op.getOpcode() == ISD::FSHL);
1679
1680 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) {
1681 unsigned Amt = SA->getAPIntValue().urem(BitWidth);
1682
1683 // For fshl, 0-shift returns the 1st arg.
1684 // For fshr, 0-shift returns the 2nd arg.
1685 if (Amt == 0) {
1686 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts,
1687 Known, TLO, Depth + 1))
1688 return true;
1689 break;
1690 }
1691
1692 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt))
1693 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt)
1694 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt));
1695 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt);
1696 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
1697 Depth + 1))
1698 return true;
1699 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
1700 Depth + 1))
1701 return true;
1702
1703 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt));
1704 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt));
1705 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt);
1706 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt);
1707 Known.One |= Known2.One;
1708 Known.Zero |= Known2.Zero;
1709 }
1710
1711 // For pow-2 bitwidths we only demand the bottom modulo amt bits.
1712 if (isPowerOf2_32(BitWidth)) {
1713 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1);
1714 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
1715 Known2, TLO, Depth + 1))
1716 return true;
1717 }
1718 break;
1719 }
1720 case ISD::ROTL:
1721 case ISD::ROTR: {
1722 SDValue Op0 = Op.getOperand(0);
1723 SDValue Op1 = Op.getOperand(1);
1724
1725 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
1726 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1))
1727 return TLO.CombineTo(Op, Op0);
1728
1729 // For pow-2 bitwidths we only demand the bottom modulo amt bits.
1730 if (isPowerOf2_32(BitWidth)) {
1731 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1);
1732 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
1733 Depth + 1))
1734 return true;
1735 }
1736 break;
1737 }
1738 case ISD::BITREVERSE: {
1739 SDValue Src = Op.getOperand(0);
1740 APInt DemandedSrcBits = DemandedBits.reverseBits();
1741 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
1742 Depth + 1))
1743 return true;
1744 Known.One = Known2.One.reverseBits();
1745 Known.Zero = Known2.Zero.reverseBits();
1746 break;
1747 }
1748 case ISD::BSWAP: {
1749 SDValue Src = Op.getOperand(0);
1750 APInt DemandedSrcBits = DemandedBits.byteSwap();
1751 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
1752 Depth + 1))
1753 return true;
1754 Known.One = Known2.One.byteSwap();
1755 Known.Zero = Known2.Zero.byteSwap();
1756 break;
1757 }
1758 case ISD::CTPOP: {
1759 // If only 1 bit is demanded, replace with PARITY as long as we're before
1760 // op legalization.
1761 // FIXME: Limit to scalars for now.
1762 if (DemandedBits.isOneValue() && !TLO.LegalOps && !VT.isVector())
1763 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT,
1764 Op.getOperand(0)));
1765
1766 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
1767 break;
1768 }
1769 case ISD::SIGN_EXTEND_INREG: {
1770 SDValue Op0 = Op.getOperand(0);
1771 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1772 unsigned ExVTBits = ExVT.getScalarSizeInBits();
1773
1774 // If we only care about the highest bit, don't bother shifting right.
1775 if (DemandedBits.isSignMask()) {
1776 unsigned NumSignBits =
1777 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
1778 bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1;
1779 // However if the input is already sign extended we expect the sign
1780 // extension to be dropped altogether later and do not simplify.
1781 if (!AlreadySignExtended) {
1782 // Compute the correct shift amount type, which must be getShiftAmountTy
1783 // for scalar types after legalization.
1784 EVT ShiftAmtTy = VT;
1785 if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
1786 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
1787
1788 SDValue ShiftAmt =
1789 TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy);
1790 return TLO.CombineTo(Op,
1791 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt));
1792 }
1793 }
1794
1795 // If none of the extended bits are demanded, eliminate the sextinreg.
1796 if (DemandedBits.getActiveBits() <= ExVTBits)
1797 return TLO.CombineTo(Op, Op0);
1798
1799 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits);
1800
1801 // Since the sign extended bits are demanded, we know that the sign
1802 // bit is demanded.
1803 InputDemandedBits.setBit(ExVTBits - 1);
1804
1805 if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1))
1806 return true;
1807 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1808
1809 // If the sign bit of the input is known set or clear, then we know the
1810 // top bits of the result.
1811
1812 // If the input sign bit is known zero, convert this into a zero extension.
1813 if (Known.Zero[ExVTBits - 1])
1814 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT));
1815
1816 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits);
1817 if (Known.One[ExVTBits - 1]) { // Input sign bit known set
1818 Known.One.setBitsFrom(ExVTBits);
1819 Known.Zero &= Mask;
1820 } else { // Input sign bit unknown
1821 Known.Zero &= Mask;
1822 Known.One &= Mask;
1823 }
1824 break;
1825 }
1826 case ISD::BUILD_PAIR: {
1827 EVT HalfVT = Op.getOperand(0).getValueType();
1828 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
1829
1830 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
1831 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
1832
1833 KnownBits KnownLo, KnownHi;
1834
1835 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1))
1836 return true;
1837
1838 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1))
1839 return true;
1840
1841 Known.Zero = KnownLo.Zero.zext(BitWidth) |
1842 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth);
1843
1844 Known.One = KnownLo.One.zext(BitWidth) |
1845 KnownHi.One.zext(BitWidth).shl(HalfBitWidth);
1846 break;
1847 }
1848 case ISD::ZERO_EXTEND:
1849 case ISD::ZERO_EXTEND_VECTOR_INREG: {
1850 SDValue Src = Op.getOperand(0);
1851 EVT SrcVT = Src.getValueType();
1852 unsigned InBits = SrcVT.getScalarSizeInBits();
1853 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1854 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG;
1855
1856 // If none of the top bits are demanded, convert this into an any_extend.
1857 if (DemandedBits.getActiveBits() <= InBits) {
1858 // If we only need the non-extended bits of the bottom element
1859 // then we can just bitcast to the result.
1860 if (IsVecInReg && DemandedElts == 1 &&
1861 VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1862 TLO.DAG.getDataLayout().isLittleEndian())
1863 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1864
1865 unsigned Opc =
1866 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
1867 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1868 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1869 }
1870
1871 APInt InDemandedBits = DemandedBits.trunc(InBits);
1872 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1873 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1874 Depth + 1))
1875 return true;
1876 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1877 assert(Known.getBitWidth() == InBits && "Src width has changed?");
1878 Known = Known.zext(BitWidth);
1879
1880 // Attempt to avoid multi-use ops if we don't need anything from them.
1881 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1882 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
1883 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
1884 break;
1885 }
1886 case ISD::SIGN_EXTEND:
1887 case ISD::SIGN_EXTEND_VECTOR_INREG: {
1888 SDValue Src = Op.getOperand(0);
1889 EVT SrcVT = Src.getValueType();
1890 unsigned InBits = SrcVT.getScalarSizeInBits();
1891 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1892 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG;
1893
1894 // If none of the top bits are demanded, convert this into an any_extend.
1895 if (DemandedBits.getActiveBits() <= InBits) {
1896 // If we only need the non-extended bits of the bottom element
1897 // then we can just bitcast to the result.
1898 if (IsVecInReg && DemandedElts == 1 &&
1899 VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1900 TLO.DAG.getDataLayout().isLittleEndian())
1901 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1902
1903 unsigned Opc =
1904 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
1905 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1906 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1907 }
1908
1909 APInt InDemandedBits = DemandedBits.trunc(InBits);
1910 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1911
1912 // Since some of the sign extended bits are demanded, we know that the sign
1913 // bit is demanded.
1914 InDemandedBits.setBit(InBits - 1);
1915
1916 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1917 Depth + 1))
1918 return true;
1919 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1920 assert(Known.getBitWidth() == InBits && "Src width has changed?");
1921
1922 // If the sign bit is known one, the top bits match.
1923 Known = Known.sext(BitWidth);
1924
1925 // If the sign bit is known zero, convert this to a zero extend.
1926 if (Known.isNonNegative()) {
1927 unsigned Opc =
1928 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND;
1929 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1930 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1931 }
1932
1933 // Attempt to avoid multi-use ops if we don't need anything from them.
1934 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1935 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
1936 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
1937 break;
1938 }
1939 case ISD::ANY_EXTEND:
1940 case ISD::ANY_EXTEND_VECTOR_INREG: {
1941 SDValue Src = Op.getOperand(0);
1942 EVT SrcVT = Src.getValueType();
1943 unsigned InBits = SrcVT.getScalarSizeInBits();
1944 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1945 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG;
1946
1947 // If we only need the bottom element then we can just bitcast.
1948 // TODO: Handle ANY_EXTEND?
1949 if (IsVecInReg && DemandedElts == 1 &&
1950 VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1951 TLO.DAG.getDataLayout().isLittleEndian())
1952 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1953
1954 APInt InDemandedBits = DemandedBits.trunc(InBits);
1955 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1956 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1957 Depth + 1))
1958 return true;
1959 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1960 assert(Known.getBitWidth() == InBits && "Src width has changed?");
1961 Known = Known.anyext(BitWidth);
1962
1963 // Attempt to avoid multi-use ops if we don't need anything from them.
1964 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1965 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
1966 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
1967 break;
1968 }
1969 case ISD::TRUNCATE: {
1970 SDValue Src = Op.getOperand(0);
1971
1972 // Simplify the input, using demanded bit information, and compute the known
1973 // zero/one bits live out.
1974 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
1975 APInt TruncMask = DemandedBits.zext(OperandBitWidth);
1976 if (SimplifyDemandedBits(Src, TruncMask, Known, TLO, Depth + 1))
1977 return true;
1978 Known = Known.trunc(BitWidth);
1979
1980 // Attempt to avoid multi-use ops if we don't need anything from them.
1981 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1982 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1))
1983 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc));
1984
1985 // If the input is only used by this truncate, see if we can shrink it based
1986 // on the known demanded bits.
1987 if (Src.getNode()->hasOneUse()) {
1988 switch (Src.getOpcode()) {
1989 default:
1990 break;
1991 case ISD::SRL:
1992 // Shrink SRL by a constant if none of the high bits shifted in are
1993 // demanded.
1994 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT))
1995 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1996 // undesirable.
1997 break;
1998
1999 SDValue ShAmt = Src.getOperand(1);
2000 auto *ShAmtC = dyn_cast<ConstantSDNode>(ShAmt);
2001 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth))
2002 break;
2003 uint64_t ShVal = ShAmtC->getZExtValue();
2004
2005 APInt HighBits =
2006 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth);
2007 HighBits.lshrInPlace(ShVal);
2008 HighBits = HighBits.trunc(BitWidth);
2009
2010 if (!(HighBits & DemandedBits)) {
2011 // None of the shifted in bits are needed. Add a truncate of the
2012 // shift input, then shift it.
2013 if (TLO.LegalTypes())
2014 ShAmt = TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(VT, DL));
2015 SDValue NewTrunc =
2016 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0));
2017 return TLO.CombineTo(
2018 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, ShAmt));
2019 }
2020 break;
2021 }
2022 }
2023
2024 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
2025 break;
2026 }
2027 case ISD::AssertZext: {
2028 // AssertZext demands all of the high bits, plus any of the low bits
2029 // demanded by its users.
2030 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2031 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits());
2032 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known,
2033 TLO, Depth + 1))
2034 return true;
2035 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
2036
2037 Known.Zero |= ~InMask;
2038 break;
2039 }
2040 case ISD::EXTRACT_VECTOR_ELT: {
2041 SDValue Src = Op.getOperand(0);
2042 SDValue Idx = Op.getOperand(1);
2043 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2044 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2045
2046 if (SrcEltCnt.isScalable())
2047 return false;
2048
2049 // Demand the bits from every vector element without a constant index.
2050 unsigned NumSrcElts = SrcEltCnt.getFixedValue();
2051 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
2052 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx))
2053 if (CIdx->getAPIntValue().ult(NumSrcElts))
2054 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue());
2055
2056 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
2057 // anything about the extended bits.
2058 APInt DemandedSrcBits = DemandedBits;
2059 if (BitWidth > EltBitWidth)
2060 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth);
2061
2062 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2063 Depth + 1))
2064 return true;
2065
2066 // Attempt to avoid multi-use ops if we don't need anything from them.
2067 if (!DemandedSrcBits.isAllOnesValue() ||
2068 !DemandedSrcElts.isAllOnesValue()) {
2069 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2070 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) {
2071 SDValue NewOp =
2072 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx);
2073 return TLO.CombineTo(Op, NewOp);
2074 }
2075 }
2076
2077 Known = Known2;
2078 if (BitWidth > EltBitWidth)
2079 Known = Known.anyext(BitWidth);
2080 break;
2081 }
2082 case ISD::BITCAST: {
2083 SDValue Src = Op.getOperand(0);
2084 EVT SrcVT = Src.getValueType();
2085 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
2086
2087 // If this is an FP->Int bitcast and if the sign bit is the only
2088 // thing demanded, turn this into a FGETSIGN.
2089 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() &&
2090 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) &&
2091 SrcVT.isFloatingPoint()) {
2092 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT);
2093 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
2094 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 &&
2095 SrcVT != MVT::f128) {
2096 // Cannot eliminate/lower SHL for f128 yet.
2097 EVT Ty = OpVTLegal ? VT : MVT::i32;
2098 // Make a FGETSIGN + SHL to move the sign bit into the appropriate
2099 // place. We expect the SHL to be eliminated by other optimizations.
2100 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src);
2101 unsigned OpVTSizeInBits = Op.getValueSizeInBits();
2102 if (!OpVTLegal && OpVTSizeInBits > 32)
2103 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign);
2104 unsigned ShVal = Op.getValueSizeInBits() - 1;
2105 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT);
2106 return TLO.CombineTo(Op,
2107 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt));
2108 }
2109 }
2110
2111 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts.
2112 // Demand the elt/bit if any of the original elts/bits are demanded.
2113 // TODO - bigendian once we have test coverage.
2114 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 &&
2115 TLO.DAG.getDataLayout().isLittleEndian()) {
2116 unsigned Scale = BitWidth / NumSrcEltBits;
2117 unsigned NumSrcElts = SrcVT.getVectorNumElements();
2118 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
2119 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
2120 for (unsigned i = 0; i != Scale; ++i) {
2121 unsigned Offset = i * NumSrcEltBits;
2122 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
2123 if (!Sub.isNullValue()) {
2124 DemandedSrcBits |= Sub;
2125 for (unsigned j = 0; j != NumElts; ++j)
2126 if (DemandedElts[j])
2127 DemandedSrcElts.setBit((j * Scale) + i);
2128 }
2129 }
2130
2131 APInt KnownSrcUndef, KnownSrcZero;
2132 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2133 KnownSrcZero, TLO, Depth + 1))
2134 return true;
2135
2136 KnownBits KnownSrcBits;
2137 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2138 KnownSrcBits, TLO, Depth + 1))
2139 return true;
2140 } else if ((NumSrcEltBits % BitWidth) == 0 &&
2141 TLO.DAG.getDataLayout().isLittleEndian()) {
2142 unsigned Scale = NumSrcEltBits / BitWidth;
2143 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
2144 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
2145 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
2146 for (unsigned i = 0; i != NumElts; ++i)
2147 if (DemandedElts[i]) {
2148 unsigned Offset = (i % Scale) * BitWidth;
2149 DemandedSrcBits.insertBits(DemandedBits, Offset);
2150 DemandedSrcElts.setBit(i / Scale);
2151 }
2152
2153 if (SrcVT.isVector()) {
2154 APInt KnownSrcUndef, KnownSrcZero;
2155 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2156 KnownSrcZero, TLO, Depth + 1))
2157 return true;
2158 }
2159
2160 KnownBits KnownSrcBits;
2161 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2162 KnownSrcBits, TLO, Depth + 1))
2163 return true;
2164 }
2165
2166 // If this is a bitcast, let computeKnownBits handle it. Only do this on a
2167 // recursive call where Known may be useful to the caller.
2168 if (Depth > 0) {
2169 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
2170 return false;
2171 }
2172 break;
2173 }
2174 case ISD::ADD:
2175 case ISD::MUL:
2176 case ISD::SUB: {
2177 // Add, Sub, and Mul don't demand any bits in positions beyond that
2178 // of the highest bit demanded of them.
2179 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
2180 SDNodeFlags Flags = Op.getNode()->getFlags();
2181 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros();
2182 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
2183 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO,
2184 Depth + 1) ||
2185 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO,
2186 Depth + 1) ||
2187 // See if the operation should be performed at a smaller bit width.
2188 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) {
2189 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
2190 // Disable the nsw and nuw flags. We can no longer guarantee that we
2191 // won't wrap after simplification.
2192 Flags.setNoSignedWrap(false);
2193 Flags.setNoUnsignedWrap(false);
2194 SDValue NewOp =
2195 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2196 return TLO.CombineTo(Op, NewOp);
2197 }
2198 return true;
2199 }
2200
2201 // Attempt to avoid multi-use ops if we don't need anything from them.
2202 if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
2203 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2204 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1);
2205 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2206 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1);
2207 if (DemandedOp0 || DemandedOp1) {
2208 Flags.setNoSignedWrap(false);
2209 Flags.setNoUnsignedWrap(false);
2210 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2211 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2212 SDValue NewOp =
2213 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2214 return TLO.CombineTo(Op, NewOp);
2215 }
2216 }
2217
2218 // If we have a constant operand, we may be able to turn it into -1 if we
2219 // do not demand the high bits. This can make the constant smaller to
2220 // encode, allow more general folding, or match specialized instruction
2221 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that
2222 // is probably not useful (and could be detrimental).
2223 ConstantSDNode *C = isConstOrConstSplat(Op1);
2224 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ);
2225 if (C && !C->isAllOnesValue() && !C->isOne() &&
2226 (C->getAPIntValue() | HighMask).isAllOnesValue()) {
2227 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT);
2228 // Disable the nsw and nuw flags. We can no longer guarantee that we
2229 // won't wrap after simplification.
2230 Flags.setNoSignedWrap(false);
2231 Flags.setNoUnsignedWrap(false);
2232 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags);
2233 return TLO.CombineTo(Op, NewOp);
2234 }
2235
2236 LLVM_FALLTHROUGH;
2237 }
2238 default:
2239 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
2240 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts,
2241 Known, TLO, Depth))
2242 return true;
2243 break;
2244 }
2245
2246 // Just use computeKnownBits to compute output bits.
2247 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
2248 break;
2249 }
2250
2251 // If we know the value of all of the demanded bits, return this as a
2252 // constant.
2253 if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) {
2254 // Avoid folding to a constant if any OpaqueConstant is involved.
2255 const SDNode *N = Op.getNode();
2256 for (SDNodeIterator I = SDNodeIterator::begin(N),
2257 E = SDNodeIterator::end(N);
2258 I != E; ++I) {
2259 SDNode *Op = *I;
2260 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
2261 if (C->isOpaque())
2262 return false;
2263 }
2264 if (VT.isInteger())
2265 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT));
2266 if (VT.isFloatingPoint())
2267 return TLO.CombineTo(
2268 Op,
2269 TLO.DAG.getConstantFP(
2270 APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT));
2271 }
2272
2273 return false;
2274 }
2275
SimplifyDemandedVectorElts(SDValue Op,const APInt & DemandedElts,APInt & KnownUndef,APInt & KnownZero,DAGCombinerInfo & DCI) const2276 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op,
2277 const APInt &DemandedElts,
2278 APInt &KnownUndef,
2279 APInt &KnownZero,
2280 DAGCombinerInfo &DCI) const {
2281 SelectionDAG &DAG = DCI.DAG;
2282 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2283 !DCI.isBeforeLegalizeOps());
2284
2285 bool Simplified =
2286 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO);
2287 if (Simplified) {
2288 DCI.AddToWorklist(Op.getNode());
2289 DCI.CommitTargetLoweringOpt(TLO);
2290 }
2291
2292 return Simplified;
2293 }
2294
2295 /// Given a vector binary operation and known undefined elements for each input
2296 /// operand, compute whether each element of the output is undefined.
getKnownUndefForVectorBinop(SDValue BO,SelectionDAG & DAG,const APInt & UndefOp0,const APInt & UndefOp1)2297 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG,
2298 const APInt &UndefOp0,
2299 const APInt &UndefOp1) {
2300 EVT VT = BO.getValueType();
2301 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() &&
2302 "Vector binop only");
2303
2304 EVT EltVT = VT.getVectorElementType();
2305 unsigned NumElts = VT.getVectorNumElements();
2306 assert(UndefOp0.getBitWidth() == NumElts &&
2307 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis");
2308
2309 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index,
2310 const APInt &UndefVals) {
2311 if (UndefVals[Index])
2312 return DAG.getUNDEF(EltVT);
2313
2314 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2315 // Try hard to make sure that the getNode() call is not creating temporary
2316 // nodes. Ignore opaque integers because they do not constant fold.
2317 SDValue Elt = BV->getOperand(Index);
2318 auto *C = dyn_cast<ConstantSDNode>(Elt);
2319 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque()))
2320 return Elt;
2321 }
2322
2323 return SDValue();
2324 };
2325
2326 APInt KnownUndef = APInt::getNullValue(NumElts);
2327 for (unsigned i = 0; i != NumElts; ++i) {
2328 // If both inputs for this element are either constant or undef and match
2329 // the element type, compute the constant/undef result for this element of
2330 // the vector.
2331 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does
2332 // not handle FP constants. The code within getNode() should be refactored
2333 // to avoid the danger of creating a bogus temporary node here.
2334 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0);
2335 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1);
2336 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT)
2337 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef())
2338 KnownUndef.setBit(i);
2339 }
2340 return KnownUndef;
2341 }
2342
SimplifyDemandedVectorElts(SDValue Op,const APInt & OriginalDemandedElts,APInt & KnownUndef,APInt & KnownZero,TargetLoweringOpt & TLO,unsigned Depth,bool AssumeSingleUse) const2343 bool TargetLowering::SimplifyDemandedVectorElts(
2344 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef,
2345 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth,
2346 bool AssumeSingleUse) const {
2347 EVT VT = Op.getValueType();
2348 unsigned Opcode = Op.getOpcode();
2349 APInt DemandedElts = OriginalDemandedElts;
2350 unsigned NumElts = DemandedElts.getBitWidth();
2351 assert(VT.isVector() && "Expected vector op");
2352
2353 KnownUndef = KnownZero = APInt::getNullValue(NumElts);
2354
2355 // TODO: For now we assume we know nothing about scalable vectors.
2356 if (VT.isScalableVector())
2357 return false;
2358
2359 assert(VT.getVectorNumElements() == NumElts &&
2360 "Mask size mismatches value type element count!");
2361
2362 // Undef operand.
2363 if (Op.isUndef()) {
2364 KnownUndef.setAllBits();
2365 return false;
2366 }
2367
2368 // If Op has other users, assume that all elements are needed.
2369 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse)
2370 DemandedElts.setAllBits();
2371
2372 // Not demanding any elements from Op.
2373 if (DemandedElts == 0) {
2374 KnownUndef.setAllBits();
2375 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2376 }
2377
2378 // Limit search depth.
2379 if (Depth >= SelectionDAG::MaxRecursionDepth)
2380 return false;
2381
2382 SDLoc DL(Op);
2383 unsigned EltSizeInBits = VT.getScalarSizeInBits();
2384
2385 // Helper for demanding the specified elements and all the bits of both binary
2386 // operands.
2387 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) {
2388 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
2389 TLO.DAG, Depth + 1);
2390 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
2391 TLO.DAG, Depth + 1);
2392 if (NewOp0 || NewOp1) {
2393 SDValue NewOp = TLO.DAG.getNode(
2394 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1);
2395 return TLO.CombineTo(Op, NewOp);
2396 }
2397 return false;
2398 };
2399
2400 switch (Opcode) {
2401 case ISD::SCALAR_TO_VECTOR: {
2402 if (!DemandedElts[0]) {
2403 KnownUndef.setAllBits();
2404 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2405 }
2406 KnownUndef.setHighBits(NumElts - 1);
2407 break;
2408 }
2409 case ISD::BITCAST: {
2410 SDValue Src = Op.getOperand(0);
2411 EVT SrcVT = Src.getValueType();
2412
2413 // We only handle vectors here.
2414 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits?
2415 if (!SrcVT.isVector())
2416 break;
2417
2418 // Fast handling of 'identity' bitcasts.
2419 unsigned NumSrcElts = SrcVT.getVectorNumElements();
2420 if (NumSrcElts == NumElts)
2421 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
2422 KnownZero, TLO, Depth + 1);
2423
2424 APInt SrcZero, SrcUndef;
2425 APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts);
2426
2427 // Bitcast from 'large element' src vector to 'small element' vector, we
2428 // must demand a source element if any DemandedElt maps to it.
2429 if ((NumElts % NumSrcElts) == 0) {
2430 unsigned Scale = NumElts / NumSrcElts;
2431 for (unsigned i = 0; i != NumElts; ++i)
2432 if (DemandedElts[i])
2433 SrcDemandedElts.setBit(i / Scale);
2434
2435 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2436 TLO, Depth + 1))
2437 return true;
2438
2439 // Try calling SimplifyDemandedBits, converting demanded elts to the bits
2440 // of the large element.
2441 // TODO - bigendian once we have test coverage.
2442 if (TLO.DAG.getDataLayout().isLittleEndian()) {
2443 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits();
2444 APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits);
2445 for (unsigned i = 0; i != NumElts; ++i)
2446 if (DemandedElts[i]) {
2447 unsigned Ofs = (i % Scale) * EltSizeInBits;
2448 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits);
2449 }
2450
2451 KnownBits Known;
2452 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
2453 TLO, Depth + 1))
2454 return true;
2455 }
2456
2457 // If the src element is zero/undef then all the output elements will be -
2458 // only demanded elements are guaranteed to be correct.
2459 for (unsigned i = 0; i != NumSrcElts; ++i) {
2460 if (SrcDemandedElts[i]) {
2461 if (SrcZero[i])
2462 KnownZero.setBits(i * Scale, (i + 1) * Scale);
2463 if (SrcUndef[i])
2464 KnownUndef.setBits(i * Scale, (i + 1) * Scale);
2465 }
2466 }
2467 }
2468
2469 // Bitcast from 'small element' src vector to 'large element' vector, we
2470 // demand all smaller source elements covered by the larger demanded element
2471 // of this vector.
2472 if ((NumSrcElts % NumElts) == 0) {
2473 unsigned Scale = NumSrcElts / NumElts;
2474 for (unsigned i = 0; i != NumElts; ++i)
2475 if (DemandedElts[i])
2476 SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale);
2477
2478 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2479 TLO, Depth + 1))
2480 return true;
2481
2482 // If all the src elements covering an output element are zero/undef, then
2483 // the output element will be as well, assuming it was demanded.
2484 for (unsigned i = 0; i != NumElts; ++i) {
2485 if (DemandedElts[i]) {
2486 if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue())
2487 KnownZero.setBit(i);
2488 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue())
2489 KnownUndef.setBit(i);
2490 }
2491 }
2492 }
2493 break;
2494 }
2495 case ISD::BUILD_VECTOR: {
2496 // Check all elements and simplify any unused elements with UNDEF.
2497 if (!DemandedElts.isAllOnesValue()) {
2498 // Don't simplify BROADCASTS.
2499 if (llvm::any_of(Op->op_values(),
2500 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) {
2501 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end());
2502 bool Updated = false;
2503 for (unsigned i = 0; i != NumElts; ++i) {
2504 if (!DemandedElts[i] && !Ops[i].isUndef()) {
2505 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType());
2506 KnownUndef.setBit(i);
2507 Updated = true;
2508 }
2509 }
2510 if (Updated)
2511 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops));
2512 }
2513 }
2514 for (unsigned i = 0; i != NumElts; ++i) {
2515 SDValue SrcOp = Op.getOperand(i);
2516 if (SrcOp.isUndef()) {
2517 KnownUndef.setBit(i);
2518 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() &&
2519 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) {
2520 KnownZero.setBit(i);
2521 }
2522 }
2523 break;
2524 }
2525 case ISD::CONCAT_VECTORS: {
2526 EVT SubVT = Op.getOperand(0).getValueType();
2527 unsigned NumSubVecs = Op.getNumOperands();
2528 unsigned NumSubElts = SubVT.getVectorNumElements();
2529 for (unsigned i = 0; i != NumSubVecs; ++i) {
2530 SDValue SubOp = Op.getOperand(i);
2531 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts);
2532 APInt SubUndef, SubZero;
2533 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
2534 Depth + 1))
2535 return true;
2536 KnownUndef.insertBits(SubUndef, i * NumSubElts);
2537 KnownZero.insertBits(SubZero, i * NumSubElts);
2538 }
2539 break;
2540 }
2541 case ISD::INSERT_SUBVECTOR: {
2542 // Demand any elements from the subvector and the remainder from the src its
2543 // inserted into.
2544 SDValue Src = Op.getOperand(0);
2545 SDValue Sub = Op.getOperand(1);
2546 uint64_t Idx = Op.getConstantOperandVal(2);
2547 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2548 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2549 APInt DemandedSrcElts = DemandedElts;
2550 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
2551
2552 APInt SubUndef, SubZero;
2553 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
2554 Depth + 1))
2555 return true;
2556
2557 // If none of the src operand elements are demanded, replace it with undef.
2558 if (!DemandedSrcElts && !Src.isUndef())
2559 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
2560 TLO.DAG.getUNDEF(VT), Sub,
2561 Op.getOperand(2)));
2562
2563 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
2564 TLO, Depth + 1))
2565 return true;
2566 KnownUndef.insertBits(SubUndef, Idx);
2567 KnownZero.insertBits(SubZero, Idx);
2568
2569 // Attempt to avoid multi-use ops if we don't need anything from them.
2570 if (!DemandedSrcElts.isAllOnesValue() ||
2571 !DemandedSubElts.isAllOnesValue()) {
2572 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
2573 Src, DemandedSrcElts, TLO.DAG, Depth + 1);
2574 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
2575 Sub, DemandedSubElts, TLO.DAG, Depth + 1);
2576 if (NewSrc || NewSub) {
2577 NewSrc = NewSrc ? NewSrc : Src;
2578 NewSub = NewSub ? NewSub : Sub;
2579 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc,
2580 NewSub, Op.getOperand(2));
2581 return TLO.CombineTo(Op, NewOp);
2582 }
2583 }
2584 break;
2585 }
2586 case ISD::EXTRACT_SUBVECTOR: {
2587 // Offset the demanded elts by the subvector index.
2588 SDValue Src = Op.getOperand(0);
2589 if (Src.getValueType().isScalableVector())
2590 break;
2591 uint64_t Idx = Op.getConstantOperandVal(1);
2592 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2593 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2594
2595 APInt SrcUndef, SrcZero;
2596 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
2597 Depth + 1))
2598 return true;
2599 KnownUndef = SrcUndef.extractBits(NumElts, Idx);
2600 KnownZero = SrcZero.extractBits(NumElts, Idx);
2601
2602 // Attempt to avoid multi-use ops if we don't need anything from them.
2603 if (!DemandedElts.isAllOnesValue()) {
2604 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
2605 Src, DemandedSrcElts, TLO.DAG, Depth + 1);
2606 if (NewSrc) {
2607 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc,
2608 Op.getOperand(1));
2609 return TLO.CombineTo(Op, NewOp);
2610 }
2611 }
2612 break;
2613 }
2614 case ISD::INSERT_VECTOR_ELT: {
2615 SDValue Vec = Op.getOperand(0);
2616 SDValue Scl = Op.getOperand(1);
2617 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2618
2619 // For a legal, constant insertion index, if we don't need this insertion
2620 // then strip it, else remove it from the demanded elts.
2621 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
2622 unsigned Idx = CIdx->getZExtValue();
2623 if (!DemandedElts[Idx])
2624 return TLO.CombineTo(Op, Vec);
2625
2626 APInt DemandedVecElts(DemandedElts);
2627 DemandedVecElts.clearBit(Idx);
2628 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
2629 KnownZero, TLO, Depth + 1))
2630 return true;
2631
2632 KnownUndef.setBitVal(Idx, Scl.isUndef());
2633
2634 KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl));
2635 break;
2636 }
2637
2638 APInt VecUndef, VecZero;
2639 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
2640 Depth + 1))
2641 return true;
2642 // Without knowing the insertion index we can't set KnownUndef/KnownZero.
2643 break;
2644 }
2645 case ISD::VSELECT: {
2646 // Try to transform the select condition based on the current demanded
2647 // elements.
2648 // TODO: If a condition element is undef, we can choose from one arm of the
2649 // select (and if one arm is undef, then we can propagate that to the
2650 // result).
2651 // TODO - add support for constant vselect masks (see IR version of this).
2652 APInt UnusedUndef, UnusedZero;
2653 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef,
2654 UnusedZero, TLO, Depth + 1))
2655 return true;
2656
2657 // See if we can simplify either vselect operand.
2658 APInt DemandedLHS(DemandedElts);
2659 APInt DemandedRHS(DemandedElts);
2660 APInt UndefLHS, ZeroLHS;
2661 APInt UndefRHS, ZeroRHS;
2662 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS,
2663 ZeroLHS, TLO, Depth + 1))
2664 return true;
2665 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS,
2666 ZeroRHS, TLO, Depth + 1))
2667 return true;
2668
2669 KnownUndef = UndefLHS & UndefRHS;
2670 KnownZero = ZeroLHS & ZeroRHS;
2671 break;
2672 }
2673 case ISD::VECTOR_SHUFFLE: {
2674 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
2675
2676 // Collect demanded elements from shuffle operands..
2677 APInt DemandedLHS(NumElts, 0);
2678 APInt DemandedRHS(NumElts, 0);
2679 for (unsigned i = 0; i != NumElts; ++i) {
2680 int M = ShuffleMask[i];
2681 if (M < 0 || !DemandedElts[i])
2682 continue;
2683 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
2684 if (M < (int)NumElts)
2685 DemandedLHS.setBit(M);
2686 else
2687 DemandedRHS.setBit(M - NumElts);
2688 }
2689
2690 // See if we can simplify either shuffle operand.
2691 APInt UndefLHS, ZeroLHS;
2692 APInt UndefRHS, ZeroRHS;
2693 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS,
2694 ZeroLHS, TLO, Depth + 1))
2695 return true;
2696 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS,
2697 ZeroRHS, TLO, Depth + 1))
2698 return true;
2699
2700 // Simplify mask using undef elements from LHS/RHS.
2701 bool Updated = false;
2702 bool IdentityLHS = true, IdentityRHS = true;
2703 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end());
2704 for (unsigned i = 0; i != NumElts; ++i) {
2705 int &M = NewMask[i];
2706 if (M < 0)
2707 continue;
2708 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) ||
2709 (M >= (int)NumElts && UndefRHS[M - NumElts])) {
2710 Updated = true;
2711 M = -1;
2712 }
2713 IdentityLHS &= (M < 0) || (M == (int)i);
2714 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
2715 }
2716
2717 // Update legal shuffle masks based on demanded elements if it won't reduce
2718 // to Identity which can cause premature removal of the shuffle mask.
2719 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) {
2720 SDValue LegalShuffle =
2721 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1),
2722 NewMask, TLO.DAG);
2723 if (LegalShuffle)
2724 return TLO.CombineTo(Op, LegalShuffle);
2725 }
2726
2727 // Propagate undef/zero elements from LHS/RHS.
2728 for (unsigned i = 0; i != NumElts; ++i) {
2729 int M = ShuffleMask[i];
2730 if (M < 0) {
2731 KnownUndef.setBit(i);
2732 } else if (M < (int)NumElts) {
2733 if (UndefLHS[M])
2734 KnownUndef.setBit(i);
2735 if (ZeroLHS[M])
2736 KnownZero.setBit(i);
2737 } else {
2738 if (UndefRHS[M - NumElts])
2739 KnownUndef.setBit(i);
2740 if (ZeroRHS[M - NumElts])
2741 KnownZero.setBit(i);
2742 }
2743 }
2744 break;
2745 }
2746 case ISD::ANY_EXTEND_VECTOR_INREG:
2747 case ISD::SIGN_EXTEND_VECTOR_INREG:
2748 case ISD::ZERO_EXTEND_VECTOR_INREG: {
2749 APInt SrcUndef, SrcZero;
2750 SDValue Src = Op.getOperand(0);
2751 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2752 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts);
2753 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
2754 Depth + 1))
2755 return true;
2756 KnownZero = SrcZero.zextOrTrunc(NumElts);
2757 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
2758
2759 if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG &&
2760 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
2761 DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) {
2762 // aext - if we just need the bottom element then we can bitcast.
2763 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
2764 }
2765
2766 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) {
2767 // zext(undef) upper bits are guaranteed to be zero.
2768 if (DemandedElts.isSubsetOf(KnownUndef))
2769 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
2770 KnownUndef.clearAllBits();
2771 }
2772 break;
2773 }
2774
2775 // TODO: There are more binop opcodes that could be handled here - MIN,
2776 // MAX, saturated math, etc.
2777 case ISD::OR:
2778 case ISD::XOR:
2779 case ISD::ADD:
2780 case ISD::SUB:
2781 case ISD::FADD:
2782 case ISD::FSUB:
2783 case ISD::FMUL:
2784 case ISD::FDIV:
2785 case ISD::FREM: {
2786 SDValue Op0 = Op.getOperand(0);
2787 SDValue Op1 = Op.getOperand(1);
2788
2789 APInt UndefRHS, ZeroRHS;
2790 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
2791 Depth + 1))
2792 return true;
2793 APInt UndefLHS, ZeroLHS;
2794 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
2795 Depth + 1))
2796 return true;
2797
2798 KnownZero = ZeroLHS & ZeroRHS;
2799 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS);
2800
2801 // Attempt to avoid multi-use ops if we don't need anything from them.
2802 // TODO - use KnownUndef to relax the demandedelts?
2803 if (!DemandedElts.isAllOnesValue())
2804 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
2805 return true;
2806 break;
2807 }
2808 case ISD::SHL:
2809 case ISD::SRL:
2810 case ISD::SRA:
2811 case ISD::ROTL:
2812 case ISD::ROTR: {
2813 SDValue Op0 = Op.getOperand(0);
2814 SDValue Op1 = Op.getOperand(1);
2815
2816 APInt UndefRHS, ZeroRHS;
2817 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
2818 Depth + 1))
2819 return true;
2820 APInt UndefLHS, ZeroLHS;
2821 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
2822 Depth + 1))
2823 return true;
2824
2825 KnownZero = ZeroLHS;
2826 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop?
2827
2828 // Attempt to avoid multi-use ops if we don't need anything from them.
2829 // TODO - use KnownUndef to relax the demandedelts?
2830 if (!DemandedElts.isAllOnesValue())
2831 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
2832 return true;
2833 break;
2834 }
2835 case ISD::MUL:
2836 case ISD::AND: {
2837 SDValue Op0 = Op.getOperand(0);
2838 SDValue Op1 = Op.getOperand(1);
2839
2840 APInt SrcUndef, SrcZero;
2841 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
2842 Depth + 1))
2843 return true;
2844 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero,
2845 TLO, Depth + 1))
2846 return true;
2847
2848 // If either side has a zero element, then the result element is zero, even
2849 // if the other is an UNDEF.
2850 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros
2851 // and then handle 'and' nodes with the rest of the binop opcodes.
2852 KnownZero |= SrcZero;
2853 KnownUndef &= SrcUndef;
2854 KnownUndef &= ~KnownZero;
2855
2856 // Attempt to avoid multi-use ops if we don't need anything from them.
2857 // TODO - use KnownUndef to relax the demandedelts?
2858 if (!DemandedElts.isAllOnesValue())
2859 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
2860 return true;
2861 break;
2862 }
2863 case ISD::TRUNCATE:
2864 case ISD::SIGN_EXTEND:
2865 case ISD::ZERO_EXTEND:
2866 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
2867 KnownZero, TLO, Depth + 1))
2868 return true;
2869
2870 if (Op.getOpcode() == ISD::ZERO_EXTEND) {
2871 // zext(undef) upper bits are guaranteed to be zero.
2872 if (DemandedElts.isSubsetOf(KnownUndef))
2873 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
2874 KnownUndef.clearAllBits();
2875 }
2876 break;
2877 default: {
2878 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
2879 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef,
2880 KnownZero, TLO, Depth))
2881 return true;
2882 } else {
2883 KnownBits Known;
2884 APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits);
2885 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known,
2886 TLO, Depth, AssumeSingleUse))
2887 return true;
2888 }
2889 break;
2890 }
2891 }
2892 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero");
2893
2894 // Constant fold all undef cases.
2895 // TODO: Handle zero cases as well.
2896 if (DemandedElts.isSubsetOf(KnownUndef))
2897 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2898
2899 return false;
2900 }
2901
2902 /// Determine which of the bits specified in Mask are known to be either zero or
2903 /// one and return them in the Known.
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const2904 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
2905 KnownBits &Known,
2906 const APInt &DemandedElts,
2907 const SelectionDAG &DAG,
2908 unsigned Depth) const {
2909 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2910 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2911 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2912 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2913 "Should use MaskedValueIsZero if you don't know whether Op"
2914 " is a target node!");
2915 Known.resetAll();
2916 }
2917
computeKnownBitsForTargetInstr(GISelKnownBits & Analysis,Register R,KnownBits & Known,const APInt & DemandedElts,const MachineRegisterInfo & MRI,unsigned Depth) const2918 void TargetLowering::computeKnownBitsForTargetInstr(
2919 GISelKnownBits &Analysis, Register R, KnownBits &Known,
2920 const APInt &DemandedElts, const MachineRegisterInfo &MRI,
2921 unsigned Depth) const {
2922 Known.resetAll();
2923 }
2924
computeKnownBitsForFrameIndex(const int FrameIdx,KnownBits & Known,const MachineFunction & MF) const2925 void TargetLowering::computeKnownBitsForFrameIndex(
2926 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const {
2927 // The low bits are known zero if the pointer is aligned.
2928 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx)));
2929 }
2930
computeKnownAlignForTargetInstr(GISelKnownBits & Analysis,Register R,const MachineRegisterInfo & MRI,unsigned Depth) const2931 Align TargetLowering::computeKnownAlignForTargetInstr(
2932 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI,
2933 unsigned Depth) const {
2934 return Align(1);
2935 }
2936
2937 /// This method can be implemented by targets that want to expose additional
2938 /// information about sign bits to the DAG Combiner.
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt &,const SelectionDAG &,unsigned Depth) const2939 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
2940 const APInt &,
2941 const SelectionDAG &,
2942 unsigned Depth) const {
2943 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2944 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2945 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2946 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2947 "Should use ComputeNumSignBits if you don't know whether Op"
2948 " is a target node!");
2949 return 1;
2950 }
2951
computeNumSignBitsForTargetInstr(GISelKnownBits & Analysis,Register R,const APInt & DemandedElts,const MachineRegisterInfo & MRI,unsigned Depth) const2952 unsigned TargetLowering::computeNumSignBitsForTargetInstr(
2953 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts,
2954 const MachineRegisterInfo &MRI, unsigned Depth) const {
2955 return 1;
2956 }
2957
SimplifyDemandedVectorEltsForTargetNode(SDValue Op,const APInt & DemandedElts,APInt & KnownUndef,APInt & KnownZero,TargetLoweringOpt & TLO,unsigned Depth) const2958 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
2959 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
2960 TargetLoweringOpt &TLO, unsigned Depth) const {
2961 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2962 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2963 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2964 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2965 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
2966 " is a target node!");
2967 return false;
2968 }
2969
SimplifyDemandedBitsForTargetNode(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,KnownBits & Known,TargetLoweringOpt & TLO,unsigned Depth) const2970 bool TargetLowering::SimplifyDemandedBitsForTargetNode(
2971 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
2972 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
2973 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2974 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2975 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2976 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2977 "Should use SimplifyDemandedBits if you don't know whether Op"
2978 " is a target node!");
2979 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
2980 return false;
2981 }
2982
SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,SelectionDAG & DAG,unsigned Depth) const2983 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
2984 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
2985 SelectionDAG &DAG, unsigned Depth) const {
2986 assert(
2987 (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2988 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2989 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2990 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2991 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
2992 " is a target node!");
2993 return SDValue();
2994 }
2995
2996 SDValue
buildLegalVectorShuffle(EVT VT,const SDLoc & DL,SDValue N0,SDValue N1,MutableArrayRef<int> Mask,SelectionDAG & DAG) const2997 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
2998 SDValue N1, MutableArrayRef<int> Mask,
2999 SelectionDAG &DAG) const {
3000 bool LegalMask = isShuffleMaskLegal(Mask, VT);
3001 if (!LegalMask) {
3002 std::swap(N0, N1);
3003 ShuffleVectorSDNode::commuteMask(Mask);
3004 LegalMask = isShuffleMaskLegal(Mask, VT);
3005 }
3006
3007 if (!LegalMask)
3008 return SDValue();
3009
3010 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask);
3011 }
3012
getTargetConstantFromLoad(LoadSDNode *) const3013 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const {
3014 return nullptr;
3015 }
3016
isKnownNeverNaNForTargetNode(SDValue Op,const SelectionDAG & DAG,bool SNaN,unsigned Depth) const3017 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
3018 const SelectionDAG &DAG,
3019 bool SNaN,
3020 unsigned Depth) const {
3021 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3022 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3023 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3024 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
3025 "Should use isKnownNeverNaN if you don't know whether Op"
3026 " is a target node!");
3027 return false;
3028 }
3029
3030 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must
3031 // work with truncating build vectors and vectors with elements of less than
3032 // 8 bits.
isConstTrueVal(const SDNode * N) const3033 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
3034 if (!N)
3035 return false;
3036
3037 APInt CVal;
3038 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
3039 CVal = CN->getAPIntValue();
3040 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) {
3041 auto *CN = BV->getConstantSplatNode();
3042 if (!CN)
3043 return false;
3044
3045 // If this is a truncating build vector, truncate the splat value.
3046 // Otherwise, we may fail to match the expected values below.
3047 unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits();
3048 CVal = CN->getAPIntValue();
3049 if (BVEltWidth < CVal.getBitWidth())
3050 CVal = CVal.trunc(BVEltWidth);
3051 } else {
3052 return false;
3053 }
3054
3055 switch (getBooleanContents(N->getValueType(0))) {
3056 case UndefinedBooleanContent:
3057 return CVal[0];
3058 case ZeroOrOneBooleanContent:
3059 return CVal.isOneValue();
3060 case ZeroOrNegativeOneBooleanContent:
3061 return CVal.isAllOnesValue();
3062 }
3063
3064 llvm_unreachable("Invalid boolean contents");
3065 }
3066
isConstFalseVal(const SDNode * N) const3067 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
3068 if (!N)
3069 return false;
3070
3071 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
3072 if (!CN) {
3073 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
3074 if (!BV)
3075 return false;
3076
3077 // Only interested in constant splats, we don't care about undef
3078 // elements in identifying boolean constants and getConstantSplatNode
3079 // returns NULL if all ops are undef;
3080 CN = BV->getConstantSplatNode();
3081 if (!CN)
3082 return false;
3083 }
3084
3085 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
3086 return !CN->getAPIntValue()[0];
3087
3088 return CN->isNullValue();
3089 }
3090
isExtendedTrueVal(const ConstantSDNode * N,EVT VT,bool SExt) const3091 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
3092 bool SExt) const {
3093 if (VT == MVT::i1)
3094 return N->isOne();
3095
3096 TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
3097 switch (Cnt) {
3098 case TargetLowering::ZeroOrOneBooleanContent:
3099 // An extended value of 1 is always true, unless its original type is i1,
3100 // in which case it will be sign extended to -1.
3101 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
3102 case TargetLowering::UndefinedBooleanContent:
3103 case TargetLowering::ZeroOrNegativeOneBooleanContent:
3104 return N->isAllOnesValue() && SExt;
3105 }
3106 llvm_unreachable("Unexpected enumeration.");
3107 }
3108
3109 /// This helper function of SimplifySetCC tries to optimize the comparison when
3110 /// either operand of the SetCC node is a bitwise-and instruction.
foldSetCCWithAnd(EVT VT,SDValue N0,SDValue N1,ISD::CondCode Cond,const SDLoc & DL,DAGCombinerInfo & DCI) const3111 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
3112 ISD::CondCode Cond, const SDLoc &DL,
3113 DAGCombinerInfo &DCI) const {
3114 // Match these patterns in any of their permutations:
3115 // (X & Y) == Y
3116 // (X & Y) != Y
3117 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
3118 std::swap(N0, N1);
3119
3120 EVT OpVT = N0.getValueType();
3121 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
3122 (Cond != ISD::SETEQ && Cond != ISD::SETNE))
3123 return SDValue();
3124
3125 SDValue X, Y;
3126 if (N0.getOperand(0) == N1) {
3127 X = N0.getOperand(1);
3128 Y = N0.getOperand(0);
3129 } else if (N0.getOperand(1) == N1) {
3130 X = N0.getOperand(0);
3131 Y = N0.getOperand(1);
3132 } else {
3133 return SDValue();
3134 }
3135
3136 SelectionDAG &DAG = DCI.DAG;
3137 SDValue Zero = DAG.getConstant(0, DL, OpVT);
3138 if (DAG.isKnownToBeAPowerOfTwo(Y)) {
3139 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
3140 // Note that where Y is variable and is known to have at most one bit set
3141 // (for example, if it is Z & 1) we cannot do this; the expressions are not
3142 // equivalent when Y == 0.
3143 assert(OpVT.isInteger());
3144 Cond = ISD::getSetCCInverse(Cond, OpVT);
3145 if (DCI.isBeforeLegalizeOps() ||
3146 isCondCodeLegal(Cond, N0.getSimpleValueType()))
3147 return DAG.getSetCC(DL, VT, N0, Zero, Cond);
3148 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
3149 // If the target supports an 'and-not' or 'and-complement' logic operation,
3150 // try to use that to make a comparison operation more efficient.
3151 // But don't do this transform if the mask is a single bit because there are
3152 // more efficient ways to deal with that case (for example, 'bt' on x86 or
3153 // 'rlwinm' on PPC).
3154
3155 // Bail out if the compare operand that we want to turn into a zero is
3156 // already a zero (otherwise, infinite loop).
3157 auto *YConst = dyn_cast<ConstantSDNode>(Y);
3158 if (YConst && YConst->isNullValue())
3159 return SDValue();
3160
3161 // Transform this into: ~X & Y == 0.
3162 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
3163 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
3164 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
3165 }
3166
3167 return SDValue();
3168 }
3169
3170 /// There are multiple IR patterns that could be checking whether certain
3171 /// truncation of a signed number would be lossy or not. The pattern which is
3172 /// best at IR level, may not lower optimally. Thus, we want to unfold it.
3173 /// We are looking for the following pattern: (KeptBits is a constant)
3174 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
3175 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false.
3176 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0
3177 /// We will unfold it into the natural trunc+sext pattern:
3178 /// ((%x << C) a>> C) dstcond %x
3179 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x)
optimizeSetCCOfSignedTruncationCheck(EVT SCCVT,SDValue N0,SDValue N1,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL) const3180 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
3181 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI,
3182 const SDLoc &DL) const {
3183 // We must be comparing with a constant.
3184 ConstantSDNode *C1;
3185 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
3186 return SDValue();
3187
3188 // N0 should be: add %x, (1 << (KeptBits-1))
3189 if (N0->getOpcode() != ISD::ADD)
3190 return SDValue();
3191
3192 // And we must be 'add'ing a constant.
3193 ConstantSDNode *C01;
3194 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1))))
3195 return SDValue();
3196
3197 SDValue X = N0->getOperand(0);
3198 EVT XVT = X.getValueType();
3199
3200 // Validate constants ...
3201
3202 APInt I1 = C1->getAPIntValue();
3203
3204 ISD::CondCode NewCond;
3205 if (Cond == ISD::CondCode::SETULT) {
3206 NewCond = ISD::CondCode::SETEQ;
3207 } else if (Cond == ISD::CondCode::SETULE) {
3208 NewCond = ISD::CondCode::SETEQ;
3209 // But need to 'canonicalize' the constant.
3210 I1 += 1;
3211 } else if (Cond == ISD::CondCode::SETUGT) {
3212 NewCond = ISD::CondCode::SETNE;
3213 // But need to 'canonicalize' the constant.
3214 I1 += 1;
3215 } else if (Cond == ISD::CondCode::SETUGE) {
3216 NewCond = ISD::CondCode::SETNE;
3217 } else
3218 return SDValue();
3219
3220 APInt I01 = C01->getAPIntValue();
3221
3222 auto checkConstants = [&I1, &I01]() -> bool {
3223 // Both of them must be power-of-two, and the constant from setcc is bigger.
3224 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2();
3225 };
3226
3227 if (checkConstants()) {
3228 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256
3229 } else {
3230 // What if we invert constants? (and the target predicate)
3231 I1.negate();
3232 I01.negate();
3233 assert(XVT.isInteger());
3234 NewCond = getSetCCInverse(NewCond, XVT);
3235 if (!checkConstants())
3236 return SDValue();
3237 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256
3238 }
3239
3240 // They are power-of-two, so which bit is set?
3241 const unsigned KeptBits = I1.logBase2();
3242 const unsigned KeptBitsMinusOne = I01.logBase2();
3243
3244 // Magic!
3245 if (KeptBits != (KeptBitsMinusOne + 1))
3246 return SDValue();
3247 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable");
3248
3249 // We don't want to do this in every single case.
3250 SelectionDAG &DAG = DCI.DAG;
3251 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck(
3252 XVT, KeptBits))
3253 return SDValue();
3254
3255 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits;
3256 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable");
3257
3258 // Unfold into: ((%x << C) a>> C) cond %x
3259 // Where 'cond' will be either 'eq' or 'ne'.
3260 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT);
3261 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt);
3262 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt);
3263 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond);
3264
3265 return T2;
3266 }
3267
3268 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
optimizeSetCCByHoistingAndByConstFromLogicalShift(EVT SCCVT,SDValue N0,SDValue N1C,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL) const3269 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
3270 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
3271 DAGCombinerInfo &DCI, const SDLoc &DL) const {
3272 assert(isConstOrConstSplat(N1C) &&
3273 isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() &&
3274 "Should be a comparison with 0.");
3275 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3276 "Valid only for [in]equality comparisons.");
3277
3278 unsigned NewShiftOpcode;
3279 SDValue X, C, Y;
3280
3281 SelectionDAG &DAG = DCI.DAG;
3282 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3283
3284 // Look for '(C l>>/<< Y)'.
3285 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) {
3286 // The shift should be one-use.
3287 if (!V.hasOneUse())
3288 return false;
3289 unsigned OldShiftOpcode = V.getOpcode();
3290 switch (OldShiftOpcode) {
3291 case ISD::SHL:
3292 NewShiftOpcode = ISD::SRL;
3293 break;
3294 case ISD::SRL:
3295 NewShiftOpcode = ISD::SHL;
3296 break;
3297 default:
3298 return false; // must be a logical shift.
3299 }
3300 // We should be shifting a constant.
3301 // FIXME: best to use isConstantOrConstantVector().
3302 C = V.getOperand(0);
3303 ConstantSDNode *CC =
3304 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true);
3305 if (!CC)
3306 return false;
3307 Y = V.getOperand(1);
3308
3309 ConstantSDNode *XC =
3310 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true);
3311 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3312 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG);
3313 };
3314
3315 // LHS of comparison should be an one-use 'and'.
3316 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
3317 return SDValue();
3318
3319 X = N0.getOperand(0);
3320 SDValue Mask = N0.getOperand(1);
3321
3322 // 'and' is commutative!
3323 if (!Match(Mask)) {
3324 std::swap(X, Mask);
3325 if (!Match(Mask))
3326 return SDValue();
3327 }
3328
3329 EVT VT = X.getValueType();
3330
3331 // Produce:
3332 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0
3333 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y);
3334 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C);
3335 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond);
3336 return T2;
3337 }
3338
3339 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as
3340 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to
3341 /// handle the commuted versions of these patterns.
foldSetCCWithBinOp(EVT VT,SDValue N0,SDValue N1,ISD::CondCode Cond,const SDLoc & DL,DAGCombinerInfo & DCI) const3342 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1,
3343 ISD::CondCode Cond, const SDLoc &DL,
3344 DAGCombinerInfo &DCI) const {
3345 unsigned BOpcode = N0.getOpcode();
3346 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) &&
3347 "Unexpected binop");
3348 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode");
3349
3350 // (X + Y) == X --> Y == 0
3351 // (X - Y) == X --> Y == 0
3352 // (X ^ Y) == X --> Y == 0
3353 SelectionDAG &DAG = DCI.DAG;
3354 EVT OpVT = N0.getValueType();
3355 SDValue X = N0.getOperand(0);
3356 SDValue Y = N0.getOperand(1);
3357 if (X == N1)
3358 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond);
3359
3360 if (Y != N1)
3361 return SDValue();
3362
3363 // (X + Y) == Y --> X == 0
3364 // (X ^ Y) == Y --> X == 0
3365 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR)
3366 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond);
3367
3368 // The shift would not be valid if the operands are boolean (i1).
3369 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1)
3370 return SDValue();
3371
3372 // (X - Y) == Y --> X == Y << 1
3373 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(),
3374 !DCI.isBeforeLegalize());
3375 SDValue One = DAG.getConstant(1, DL, ShiftVT);
3376 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One);
3377 if (!DCI.isCalledByLegalizer())
3378 DCI.AddToWorklist(YShl1.getNode());
3379 return DAG.getSetCC(DL, VT, X, YShl1, Cond);
3380 }
3381
simplifySetCCWithCTPOP(const TargetLowering & TLI,EVT VT,SDValue N0,const APInt & C1,ISD::CondCode Cond,const SDLoc & dl,SelectionDAG & DAG)3382 static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT,
3383 SDValue N0, const APInt &C1,
3384 ISD::CondCode Cond, const SDLoc &dl,
3385 SelectionDAG &DAG) {
3386 // Look through truncs that don't change the value of a ctpop.
3387 // FIXME: Add vector support? Need to be careful with setcc result type below.
3388 SDValue CTPOP = N0;
3389 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() &&
3390 N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits()))
3391 CTPOP = N0.getOperand(0);
3392
3393 if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse())
3394 return SDValue();
3395
3396 EVT CTVT = CTPOP.getValueType();
3397 SDValue CTOp = CTPOP.getOperand(0);
3398
3399 // If this is a vector CTPOP, keep the CTPOP if it is legal.
3400 // TODO: Should we check if CTPOP is legal(or custom) for scalars?
3401 if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT))
3402 return SDValue();
3403
3404 // (ctpop x) u< 2 -> (x & x-1) == 0
3405 // (ctpop x) u> 1 -> (x & x-1) != 0
3406 if (Cond == ISD::SETULT || Cond == ISD::SETUGT) {
3407 unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond);
3408 if (C1.ugt(CostLimit + (Cond == ISD::SETULT)))
3409 return SDValue();
3410 if (C1 == 0 && (Cond == ISD::SETULT))
3411 return SDValue(); // This is handled elsewhere.
3412
3413 unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT);
3414
3415 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
3416 SDValue Result = CTOp;
3417 for (unsigned i = 0; i < Passes; i++) {
3418 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne);
3419 Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add);
3420 }
3421 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
3422 return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC);
3423 }
3424
3425 // If ctpop is not supported, expand a power-of-2 comparison based on it.
3426 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) {
3427 // For scalars, keep CTPOP if it is legal or custom.
3428 if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT))
3429 return SDValue();
3430 // This is based on X86's custom lowering for CTPOP which produces more
3431 // instructions than the expansion here.
3432
3433 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0)
3434 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0)
3435 SDValue Zero = DAG.getConstant(0, dl, CTVT);
3436 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
3437 assert(CTVT.isInteger());
3438 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT);
3439 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne);
3440 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add);
3441 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond);
3442 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond);
3443 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR;
3444 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS);
3445 }
3446
3447 return SDValue();
3448 }
3449
3450 /// Try to simplify a setcc built with the specified operands and cc. If it is
3451 /// unable to simplify it, return a null SDValue.
SimplifySetCC(EVT VT,SDValue N0,SDValue N1,ISD::CondCode Cond,bool foldBooleans,DAGCombinerInfo & DCI,const SDLoc & dl) const3452 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
3453 ISD::CondCode Cond, bool foldBooleans,
3454 DAGCombinerInfo &DCI,
3455 const SDLoc &dl) const {
3456 SelectionDAG &DAG = DCI.DAG;
3457 const DataLayout &Layout = DAG.getDataLayout();
3458 EVT OpVT = N0.getValueType();
3459
3460 // Constant fold or commute setcc.
3461 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl))
3462 return Fold;
3463
3464 // Ensure that the constant occurs on the RHS and fold constant comparisons.
3465 // TODO: Handle non-splat vector constants. All undef causes trouble.
3466 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
3467 if (isConstOrConstSplat(N0) &&
3468 (DCI.isBeforeLegalizeOps() ||
3469 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
3470 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
3471
3472 // If we have a subtract with the same 2 non-constant operands as this setcc
3473 // -- but in reverse order -- then try to commute the operands of this setcc
3474 // to match. A matching pair of setcc (cmp) and sub may be combined into 1
3475 // instruction on some targets.
3476 if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) &&
3477 (DCI.isBeforeLegalizeOps() ||
3478 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) &&
3479 DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) &&
3480 !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1}))
3481 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
3482
3483 if (auto *N1C = isConstOrConstSplat(N1)) {
3484 const APInt &C1 = N1C->getAPIntValue();
3485
3486 // Optimize some CTPOP cases.
3487 if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG))
3488 return V;
3489 }
3490
3491 // FIXME: Support vectors.
3492 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
3493 const APInt &C1 = N1C->getAPIntValue();
3494
3495 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
3496 // equality comparison, then we're just comparing whether X itself is
3497 // zero.
3498 if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) &&
3499 N0.getOperand(0).getOpcode() == ISD::CTLZ &&
3500 N0.getOperand(1).getOpcode() == ISD::Constant) {
3501 const APInt &ShAmt = N0.getConstantOperandAPInt(1);
3502 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3503 ShAmt == Log2_32(N0.getValueSizeInBits())) {
3504 if ((C1 == 0) == (Cond == ISD::SETEQ)) {
3505 // (srl (ctlz x), 5) == 0 -> X != 0
3506 // (srl (ctlz x), 5) != 1 -> X != 0
3507 Cond = ISD::SETNE;
3508 } else {
3509 // (srl (ctlz x), 5) != 0 -> X == 0
3510 // (srl (ctlz x), 5) == 1 -> X == 0
3511 Cond = ISD::SETEQ;
3512 }
3513 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
3514 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
3515 Zero, Cond);
3516 }
3517 }
3518
3519 // (zext x) == C --> x == (trunc C)
3520 // (sext x) == C --> x == (trunc C)
3521 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3522 DCI.isBeforeLegalize() && N0->hasOneUse()) {
3523 unsigned MinBits = N0.getValueSizeInBits();
3524 SDValue PreExt;
3525 bool Signed = false;
3526 if (N0->getOpcode() == ISD::ZERO_EXTEND) {
3527 // ZExt
3528 MinBits = N0->getOperand(0).getValueSizeInBits();
3529 PreExt = N0->getOperand(0);
3530 } else if (N0->getOpcode() == ISD::AND) {
3531 // DAGCombine turns costly ZExts into ANDs
3532 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
3533 if ((C->getAPIntValue()+1).isPowerOf2()) {
3534 MinBits = C->getAPIntValue().countTrailingOnes();
3535 PreExt = N0->getOperand(0);
3536 }
3537 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
3538 // SExt
3539 MinBits = N0->getOperand(0).getValueSizeInBits();
3540 PreExt = N0->getOperand(0);
3541 Signed = true;
3542 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
3543 // ZEXTLOAD / SEXTLOAD
3544 if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
3545 MinBits = LN0->getMemoryVT().getSizeInBits();
3546 PreExt = N0;
3547 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
3548 Signed = true;
3549 MinBits = LN0->getMemoryVT().getSizeInBits();
3550 PreExt = N0;
3551 }
3552 }
3553
3554 // Figure out how many bits we need to preserve this constant.
3555 unsigned ReqdBits = Signed ?
3556 C1.getBitWidth() - C1.getNumSignBits() + 1 :
3557 C1.getActiveBits();
3558
3559 // Make sure we're not losing bits from the constant.
3560 if (MinBits > 0 &&
3561 MinBits < C1.getBitWidth() &&
3562 MinBits >= ReqdBits) {
3563 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
3564 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
3565 // Will get folded away.
3566 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
3567 if (MinBits == 1 && C1 == 1)
3568 // Invert the condition.
3569 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1),
3570 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3571 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
3572 return DAG.getSetCC(dl, VT, Trunc, C, Cond);
3573 }
3574
3575 // If truncating the setcc operands is not desirable, we can still
3576 // simplify the expression in some cases:
3577 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
3578 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
3579 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
3580 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
3581 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
3582 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
3583 SDValue TopSetCC = N0->getOperand(0);
3584 unsigned N0Opc = N0->getOpcode();
3585 bool SExt = (N0Opc == ISD::SIGN_EXTEND);
3586 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
3587 TopSetCC.getOpcode() == ISD::SETCC &&
3588 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
3589 (isConstFalseVal(N1C) ||
3590 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
3591
3592 bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
3593 (!N1C->isNullValue() && Cond == ISD::SETNE);
3594
3595 if (!Inverse)
3596 return TopSetCC;
3597
3598 ISD::CondCode InvCond = ISD::getSetCCInverse(
3599 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
3600 TopSetCC.getOperand(0).getValueType());
3601 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
3602 TopSetCC.getOperand(1),
3603 InvCond);
3604 }
3605 }
3606 }
3607
3608 // If the LHS is '(and load, const)', the RHS is 0, the test is for
3609 // equality or unsigned, and all 1 bits of the const are in the same
3610 // partial word, see if we can shorten the load.
3611 if (DCI.isBeforeLegalize() &&
3612 !ISD::isSignedIntSetCC(Cond) &&
3613 N0.getOpcode() == ISD::AND && C1 == 0 &&
3614 N0.getNode()->hasOneUse() &&
3615 isa<LoadSDNode>(N0.getOperand(0)) &&
3616 N0.getOperand(0).getNode()->hasOneUse() &&
3617 isa<ConstantSDNode>(N0.getOperand(1))) {
3618 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
3619 APInt bestMask;
3620 unsigned bestWidth = 0, bestOffset = 0;
3621 if (Lod->isSimple() && Lod->isUnindexed()) {
3622 unsigned origWidth = N0.getValueSizeInBits();
3623 unsigned maskWidth = origWidth;
3624 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
3625 // 8 bits, but have to be careful...
3626 if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
3627 origWidth = Lod->getMemoryVT().getSizeInBits();
3628 const APInt &Mask = N0.getConstantOperandAPInt(1);
3629 for (unsigned width = origWidth / 2; width>=8; width /= 2) {
3630 APInt newMask = APInt::getLowBitsSet(maskWidth, width);
3631 for (unsigned offset=0; offset<origWidth/width; offset++) {
3632 if (Mask.isSubsetOf(newMask)) {
3633 if (Layout.isLittleEndian())
3634 bestOffset = (uint64_t)offset * (width/8);
3635 else
3636 bestOffset = (origWidth/width - offset - 1) * (width/8);
3637 bestMask = Mask.lshr(offset * (width/8) * 8);
3638 bestWidth = width;
3639 break;
3640 }
3641 newMask <<= width;
3642 }
3643 }
3644 }
3645 if (bestWidth) {
3646 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
3647 if (newVT.isRound() &&
3648 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) {
3649 SDValue Ptr = Lod->getBasePtr();
3650 if (bestOffset != 0)
3651 Ptr =
3652 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl);
3653 SDValue NewLoad =
3654 DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
3655 Lod->getPointerInfo().getWithOffset(bestOffset),
3656 Lod->getOriginalAlign());
3657 return DAG.getSetCC(dl, VT,
3658 DAG.getNode(ISD::AND, dl, newVT, NewLoad,
3659 DAG.getConstant(bestMask.trunc(bestWidth),
3660 dl, newVT)),
3661 DAG.getConstant(0LL, dl, newVT), Cond);
3662 }
3663 }
3664 }
3665
3666 // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
3667 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
3668 unsigned InSize = N0.getOperand(0).getValueSizeInBits();
3669
3670 // If the comparison constant has bits in the upper part, the
3671 // zero-extended value could never match.
3672 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
3673 C1.getBitWidth() - InSize))) {
3674 switch (Cond) {
3675 case ISD::SETUGT:
3676 case ISD::SETUGE:
3677 case ISD::SETEQ:
3678 return DAG.getConstant(0, dl, VT);
3679 case ISD::SETULT:
3680 case ISD::SETULE:
3681 case ISD::SETNE:
3682 return DAG.getConstant(1, dl, VT);
3683 case ISD::SETGT:
3684 case ISD::SETGE:
3685 // True if the sign bit of C1 is set.
3686 return DAG.getConstant(C1.isNegative(), dl, VT);
3687 case ISD::SETLT:
3688 case ISD::SETLE:
3689 // True if the sign bit of C1 isn't set.
3690 return DAG.getConstant(C1.isNonNegative(), dl, VT);
3691 default:
3692 break;
3693 }
3694 }
3695
3696 // Otherwise, we can perform the comparison with the low bits.
3697 switch (Cond) {
3698 case ISD::SETEQ:
3699 case ISD::SETNE:
3700 case ISD::SETUGT:
3701 case ISD::SETUGE:
3702 case ISD::SETULT:
3703 case ISD::SETULE: {
3704 EVT newVT = N0.getOperand(0).getValueType();
3705 if (DCI.isBeforeLegalizeOps() ||
3706 (isOperationLegal(ISD::SETCC, newVT) &&
3707 isCondCodeLegal(Cond, newVT.getSimpleVT()))) {
3708 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT);
3709 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
3710
3711 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
3712 NewConst, Cond);
3713 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
3714 }
3715 break;
3716 }
3717 default:
3718 break; // todo, be more careful with signed comparisons
3719 }
3720 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
3721 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3722 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
3723 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
3724 EVT ExtDstTy = N0.getValueType();
3725 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
3726
3727 // If the constant doesn't fit into the number of bits for the source of
3728 // the sign extension, it is impossible for both sides to be equal.
3729 if (C1.getMinSignedBits() > ExtSrcTyBits)
3730 return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
3731
3732 SDValue ZextOp;
3733 EVT Op0Ty = N0.getOperand(0).getValueType();
3734 if (Op0Ty == ExtSrcTy) {
3735 ZextOp = N0.getOperand(0);
3736 } else {
3737 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
3738 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
3739 DAG.getConstant(Imm, dl, Op0Ty));
3740 }
3741 if (!DCI.isCalledByLegalizer())
3742 DCI.AddToWorklist(ZextOp.getNode());
3743 // Otherwise, make this a use of a zext.
3744 return DAG.getSetCC(dl, VT, ZextOp,
3745 DAG.getConstant(C1 & APInt::getLowBitsSet(
3746 ExtDstTyBits,
3747 ExtSrcTyBits),
3748 dl, ExtDstTy),
3749 Cond);
3750 } else if ((N1C->isNullValue() || N1C->isOne()) &&
3751 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3752 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
3753 if (N0.getOpcode() == ISD::SETCC &&
3754 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) &&
3755 (N0.getValueType() == MVT::i1 ||
3756 getBooleanContents(N0.getOperand(0).getValueType()) ==
3757 ZeroOrOneBooleanContent)) {
3758 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne());
3759 if (TrueWhenTrue)
3760 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
3761 // Invert the condition.
3762 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
3763 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType());
3764 if (DCI.isBeforeLegalizeOps() ||
3765 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
3766 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
3767 }
3768
3769 if ((N0.getOpcode() == ISD::XOR ||
3770 (N0.getOpcode() == ISD::AND &&
3771 N0.getOperand(0).getOpcode() == ISD::XOR &&
3772 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
3773 isa<ConstantSDNode>(N0.getOperand(1)) &&
3774 cast<ConstantSDNode>(N0.getOperand(1))->isOne()) {
3775 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
3776 // can only do this if the top bits are known zero.
3777 unsigned BitWidth = N0.getValueSizeInBits();
3778 if (DAG.MaskedValueIsZero(N0,
3779 APInt::getHighBitsSet(BitWidth,
3780 BitWidth-1))) {
3781 // Okay, get the un-inverted input value.
3782 SDValue Val;
3783 if (N0.getOpcode() == ISD::XOR) {
3784 Val = N0.getOperand(0);
3785 } else {
3786 assert(N0.getOpcode() == ISD::AND &&
3787 N0.getOperand(0).getOpcode() == ISD::XOR);
3788 // ((X^1)&1)^1 -> X & 1
3789 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
3790 N0.getOperand(0).getOperand(0),
3791 N0.getOperand(1));
3792 }
3793
3794 return DAG.getSetCC(dl, VT, Val, N1,
3795 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3796 }
3797 } else if (N1C->isOne()) {
3798 SDValue Op0 = N0;
3799 if (Op0.getOpcode() == ISD::TRUNCATE)
3800 Op0 = Op0.getOperand(0);
3801
3802 if ((Op0.getOpcode() == ISD::XOR) &&
3803 Op0.getOperand(0).getOpcode() == ISD::SETCC &&
3804 Op0.getOperand(1).getOpcode() == ISD::SETCC) {
3805 SDValue XorLHS = Op0.getOperand(0);
3806 SDValue XorRHS = Op0.getOperand(1);
3807 // Ensure that the input setccs return an i1 type or 0/1 value.
3808 if (Op0.getValueType() == MVT::i1 ||
3809 (getBooleanContents(XorLHS.getOperand(0).getValueType()) ==
3810 ZeroOrOneBooleanContent &&
3811 getBooleanContents(XorRHS.getOperand(0).getValueType()) ==
3812 ZeroOrOneBooleanContent)) {
3813 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
3814 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
3815 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond);
3816 }
3817 }
3818 if (Op0.getOpcode() == ISD::AND &&
3819 isa<ConstantSDNode>(Op0.getOperand(1)) &&
3820 cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) {
3821 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
3822 if (Op0.getValueType().bitsGT(VT))
3823 Op0 = DAG.getNode(ISD::AND, dl, VT,
3824 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
3825 DAG.getConstant(1, dl, VT));
3826 else if (Op0.getValueType().bitsLT(VT))
3827 Op0 = DAG.getNode(ISD::AND, dl, VT,
3828 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
3829 DAG.getConstant(1, dl, VT));
3830
3831 return DAG.getSetCC(dl, VT, Op0,
3832 DAG.getConstant(0, dl, Op0.getValueType()),
3833 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3834 }
3835 if (Op0.getOpcode() == ISD::AssertZext &&
3836 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
3837 return DAG.getSetCC(dl, VT, Op0,
3838 DAG.getConstant(0, dl, Op0.getValueType()),
3839 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3840 }
3841 }
3842
3843 // Given:
3844 // icmp eq/ne (urem %x, %y), 0
3845 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
3846 // icmp eq/ne %x, 0
3847 if (N0.getOpcode() == ISD::UREM && N1C->isNullValue() &&
3848 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3849 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0));
3850 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1));
3851 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
3852 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond);
3853 }
3854
3855 if (SDValue V =
3856 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl))
3857 return V;
3858 }
3859
3860 // These simplifications apply to splat vectors as well.
3861 // TODO: Handle more splat vector cases.
3862 if (auto *N1C = isConstOrConstSplat(N1)) {
3863 const APInt &C1 = N1C->getAPIntValue();
3864
3865 APInt MinVal, MaxVal;
3866 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits();
3867 if (ISD::isSignedIntSetCC(Cond)) {
3868 MinVal = APInt::getSignedMinValue(OperandBitSize);
3869 MaxVal = APInt::getSignedMaxValue(OperandBitSize);
3870 } else {
3871 MinVal = APInt::getMinValue(OperandBitSize);
3872 MaxVal = APInt::getMaxValue(OperandBitSize);
3873 }
3874
3875 // Canonicalize GE/LE comparisons to use GT/LT comparisons.
3876 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
3877 // X >= MIN --> true
3878 if (C1 == MinVal)
3879 return DAG.getBoolConstant(true, dl, VT, OpVT);
3880
3881 if (!VT.isVector()) { // TODO: Support this for vectors.
3882 // X >= C0 --> X > (C0 - 1)
3883 APInt C = C1 - 1;
3884 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
3885 if ((DCI.isBeforeLegalizeOps() ||
3886 isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
3887 (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
3888 isLegalICmpImmediate(C.getSExtValue())))) {
3889 return DAG.getSetCC(dl, VT, N0,
3890 DAG.getConstant(C, dl, N1.getValueType()),
3891 NewCC);
3892 }
3893 }
3894 }
3895
3896 if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
3897 // X <= MAX --> true
3898 if (C1 == MaxVal)
3899 return DAG.getBoolConstant(true, dl, VT, OpVT);
3900
3901 // X <= C0 --> X < (C0 + 1)
3902 if (!VT.isVector()) { // TODO: Support this for vectors.
3903 APInt C = C1 + 1;
3904 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
3905 if ((DCI.isBeforeLegalizeOps() ||
3906 isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
3907 (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
3908 isLegalICmpImmediate(C.getSExtValue())))) {
3909 return DAG.getSetCC(dl, VT, N0,
3910 DAG.getConstant(C, dl, N1.getValueType()),
3911 NewCC);
3912 }
3913 }
3914 }
3915
3916 if (Cond == ISD::SETLT || Cond == ISD::SETULT) {
3917 if (C1 == MinVal)
3918 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false
3919
3920 // TODO: Support this for vectors after legalize ops.
3921 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3922 // Canonicalize setlt X, Max --> setne X, Max
3923 if (C1 == MaxVal)
3924 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
3925
3926 // If we have setult X, 1, turn it into seteq X, 0
3927 if (C1 == MinVal+1)
3928 return DAG.getSetCC(dl, VT, N0,
3929 DAG.getConstant(MinVal, dl, N0.getValueType()),
3930 ISD::SETEQ);
3931 }
3932 }
3933
3934 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) {
3935 if (C1 == MaxVal)
3936 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false
3937
3938 // TODO: Support this for vectors after legalize ops.
3939 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3940 // Canonicalize setgt X, Min --> setne X, Min
3941 if (C1 == MinVal)
3942 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
3943
3944 // If we have setugt X, Max-1, turn it into seteq X, Max
3945 if (C1 == MaxVal-1)
3946 return DAG.getSetCC(dl, VT, N0,
3947 DAG.getConstant(MaxVal, dl, N0.getValueType()),
3948 ISD::SETEQ);
3949 }
3950 }
3951
3952 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) {
3953 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
3954 if (C1.isNullValue())
3955 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
3956 VT, N0, N1, Cond, DCI, dl))
3957 return CC;
3958 }
3959
3960 // If we have "setcc X, C0", check to see if we can shrink the immediate
3961 // by changing cc.
3962 // TODO: Support this for vectors after legalize ops.
3963 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3964 // SETUGT X, SINTMAX -> SETLT X, 0
3965 // SETUGE X, SINTMIN -> SETLT X, 0
3966 if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) ||
3967 (Cond == ISD::SETUGE && C1.isMinSignedValue()))
3968 return DAG.getSetCC(dl, VT, N0,
3969 DAG.getConstant(0, dl, N1.getValueType()),
3970 ISD::SETLT);
3971
3972 // SETULT X, SINTMIN -> SETGT X, -1
3973 // SETULE X, SINTMAX -> SETGT X, -1
3974 if ((Cond == ISD::SETULT && C1.isMinSignedValue()) ||
3975 (Cond == ISD::SETULE && C1.isMaxSignedValue()))
3976 return DAG.getSetCC(dl, VT, N0,
3977 DAG.getAllOnesConstant(dl, N1.getValueType()),
3978 ISD::SETGT);
3979 }
3980 }
3981
3982 // Back to non-vector simplifications.
3983 // TODO: Can we do these for vector splats?
3984 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
3985 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3986 const APInt &C1 = N1C->getAPIntValue();
3987 EVT ShValTy = N0.getValueType();
3988
3989 // Fold bit comparisons when we can. This will result in an
3990 // incorrect value when boolean false is negative one, unless
3991 // the bitsize is 1 in which case the false value is the same
3992 // in practice regardless of the representation.
3993 if ((VT.getSizeInBits() == 1 ||
3994 getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) &&
3995 (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3996 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) &&
3997 N0.getOpcode() == ISD::AND) {
3998 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3999 EVT ShiftTy =
4000 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize());
4001 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
4002 // Perform the xform if the AND RHS is a single bit.
4003 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
4004 if (AndRHS->getAPIntValue().isPowerOf2() &&
4005 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) {
4006 return DAG.getNode(ISD::TRUNCATE, dl, VT,
4007 DAG.getNode(ISD::SRL, dl, ShValTy, N0,
4008 DAG.getConstant(ShCt, dl, ShiftTy)));
4009 }
4010 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
4011 // (X & 8) == 8 --> (X & 8) >> 3
4012 // Perform the xform if C1 is a single bit.
4013 unsigned ShCt = C1.logBase2();
4014 if (C1.isPowerOf2() &&
4015 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) {
4016 return DAG.getNode(ISD::TRUNCATE, dl, VT,
4017 DAG.getNode(ISD::SRL, dl, ShValTy, N0,
4018 DAG.getConstant(ShCt, dl, ShiftTy)));
4019 }
4020 }
4021 }
4022 }
4023
4024 if (C1.getMinSignedBits() <= 64 &&
4025 !isLegalICmpImmediate(C1.getSExtValue())) {
4026 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize());
4027 // (X & -256) == 256 -> (X >> 8) == 1
4028 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
4029 N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
4030 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
4031 const APInt &AndRHSC = AndRHS->getAPIntValue();
4032 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
4033 unsigned ShiftBits = AndRHSC.countTrailingZeros();
4034 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) {
4035 SDValue Shift =
4036 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0),
4037 DAG.getConstant(ShiftBits, dl, ShiftTy));
4038 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy);
4039 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
4040 }
4041 }
4042 }
4043 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
4044 Cond == ISD::SETULE || Cond == ISD::SETUGT) {
4045 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
4046 // X < 0x100000000 -> (X >> 32) < 1
4047 // X >= 0x100000000 -> (X >> 32) >= 1
4048 // X <= 0x0ffffffff -> (X >> 32) < 1
4049 // X > 0x0ffffffff -> (X >> 32) >= 1
4050 unsigned ShiftBits;
4051 APInt NewC = C1;
4052 ISD::CondCode NewCond = Cond;
4053 if (AdjOne) {
4054 ShiftBits = C1.countTrailingOnes();
4055 NewC = NewC + 1;
4056 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
4057 } else {
4058 ShiftBits = C1.countTrailingZeros();
4059 }
4060 NewC.lshrInPlace(ShiftBits);
4061 if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
4062 isLegalICmpImmediate(NewC.getSExtValue()) &&
4063 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) {
4064 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0,
4065 DAG.getConstant(ShiftBits, dl, ShiftTy));
4066 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy);
4067 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
4068 }
4069 }
4070 }
4071 }
4072
4073 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
4074 auto *CFP = cast<ConstantFPSDNode>(N1);
4075 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value");
4076
4077 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
4078 // constant if knowing that the operand is non-nan is enough. We prefer to
4079 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
4080 // materialize 0.0.
4081 if (Cond == ISD::SETO || Cond == ISD::SETUO)
4082 return DAG.getSetCC(dl, VT, N0, N0, Cond);
4083
4084 // setcc (fneg x), C -> setcc swap(pred) x, -C
4085 if (N0.getOpcode() == ISD::FNEG) {
4086 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond);
4087 if (DCI.isBeforeLegalizeOps() ||
4088 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) {
4089 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1);
4090 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond);
4091 }
4092 }
4093
4094 // If the condition is not legal, see if we can find an equivalent one
4095 // which is legal.
4096 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
4097 // If the comparison was an awkward floating-point == or != and one of
4098 // the comparison operands is infinity or negative infinity, convert the
4099 // condition to a less-awkward <= or >=.
4100 if (CFP->getValueAPF().isInfinity()) {
4101 bool IsNegInf = CFP->getValueAPF().isNegative();
4102 ISD::CondCode NewCond = ISD::SETCC_INVALID;
4103 switch (Cond) {
4104 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break;
4105 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break;
4106 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break;
4107 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break;
4108 default: break;
4109 }
4110 if (NewCond != ISD::SETCC_INVALID &&
4111 isCondCodeLegal(NewCond, N0.getSimpleValueType()))
4112 return DAG.getSetCC(dl, VT, N0, N1, NewCond);
4113 }
4114 }
4115 }
4116
4117 if (N0 == N1) {
4118 // The sext(setcc()) => setcc() optimization relies on the appropriate
4119 // constant being emitted.
4120 assert(!N0.getValueType().isInteger() &&
4121 "Integer types should be handled by FoldSetCC");
4122
4123 bool EqTrue = ISD::isTrueWhenEqual(Cond);
4124 unsigned UOF = ISD::getUnorderedFlavor(Cond);
4125 if (UOF == 2) // FP operators that are undefined on NaNs.
4126 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
4127 if (UOF == unsigned(EqTrue))
4128 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
4129 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
4130 // if it is not already.
4131 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
4132 if (NewCond != Cond &&
4133 (DCI.isBeforeLegalizeOps() ||
4134 isCondCodeLegal(NewCond, N0.getSimpleValueType())))
4135 return DAG.getSetCC(dl, VT, N0, N1, NewCond);
4136 }
4137
4138 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
4139 N0.getValueType().isInteger()) {
4140 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
4141 N0.getOpcode() == ISD::XOR) {
4142 // Simplify (X+Y) == (X+Z) --> Y == Z
4143 if (N0.getOpcode() == N1.getOpcode()) {
4144 if (N0.getOperand(0) == N1.getOperand(0))
4145 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
4146 if (N0.getOperand(1) == N1.getOperand(1))
4147 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
4148 if (isCommutativeBinOp(N0.getOpcode())) {
4149 // If X op Y == Y op X, try other combinations.
4150 if (N0.getOperand(0) == N1.getOperand(1))
4151 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
4152 Cond);
4153 if (N0.getOperand(1) == N1.getOperand(0))
4154 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
4155 Cond);
4156 }
4157 }
4158
4159 // If RHS is a legal immediate value for a compare instruction, we need
4160 // to be careful about increasing register pressure needlessly.
4161 bool LegalRHSImm = false;
4162
4163 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
4164 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
4165 // Turn (X+C1) == C2 --> X == C2-C1
4166 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
4167 return DAG.getSetCC(dl, VT, N0.getOperand(0),
4168 DAG.getConstant(RHSC->getAPIntValue()-
4169 LHSR->getAPIntValue(),
4170 dl, N0.getValueType()), Cond);
4171 }
4172
4173 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
4174 if (N0.getOpcode() == ISD::XOR)
4175 // If we know that all of the inverted bits are zero, don't bother
4176 // performing the inversion.
4177 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
4178 return
4179 DAG.getSetCC(dl, VT, N0.getOperand(0),
4180 DAG.getConstant(LHSR->getAPIntValue() ^
4181 RHSC->getAPIntValue(),
4182 dl, N0.getValueType()),
4183 Cond);
4184 }
4185
4186 // Turn (C1-X) == C2 --> X == C1-C2
4187 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
4188 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
4189 return
4190 DAG.getSetCC(dl, VT, N0.getOperand(1),
4191 DAG.getConstant(SUBC->getAPIntValue() -
4192 RHSC->getAPIntValue(),
4193 dl, N0.getValueType()),
4194 Cond);
4195 }
4196 }
4197
4198 // Could RHSC fold directly into a compare?
4199 if (RHSC->getValueType(0).getSizeInBits() <= 64)
4200 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
4201 }
4202
4203 // (X+Y) == X --> Y == 0 and similar folds.
4204 // Don't do this if X is an immediate that can fold into a cmp
4205 // instruction and X+Y has other uses. It could be an induction variable
4206 // chain, and the transform would increase register pressure.
4207 if (!LegalRHSImm || N0.hasOneUse())
4208 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI))
4209 return V;
4210 }
4211
4212 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
4213 N1.getOpcode() == ISD::XOR)
4214 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI))
4215 return V;
4216
4217 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI))
4218 return V;
4219 }
4220
4221 // Fold remainder of division by a constant.
4222 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) &&
4223 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
4224 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4225
4226 // When division is cheap or optimizing for minimum size,
4227 // fall through to DIVREM creation by skipping this fold.
4228 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) {
4229 if (N0.getOpcode() == ISD::UREM) {
4230 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl))
4231 return Folded;
4232 } else if (N0.getOpcode() == ISD::SREM) {
4233 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl))
4234 return Folded;
4235 }
4236 }
4237 }
4238
4239 // Fold away ALL boolean setcc's.
4240 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) {
4241 SDValue Temp;
4242 switch (Cond) {
4243 default: llvm_unreachable("Unknown integer setcc!");
4244 case ISD::SETEQ: // X == Y -> ~(X^Y)
4245 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
4246 N0 = DAG.getNOT(dl, Temp, OpVT);
4247 if (!DCI.isCalledByLegalizer())
4248 DCI.AddToWorklist(Temp.getNode());
4249 break;
4250 case ISD::SETNE: // X != Y --> (X^Y)
4251 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
4252 break;
4253 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y
4254 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y
4255 Temp = DAG.getNOT(dl, N0, OpVT);
4256 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp);
4257 if (!DCI.isCalledByLegalizer())
4258 DCI.AddToWorklist(Temp.getNode());
4259 break;
4260 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X
4261 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X
4262 Temp = DAG.getNOT(dl, N1, OpVT);
4263 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp);
4264 if (!DCI.isCalledByLegalizer())
4265 DCI.AddToWorklist(Temp.getNode());
4266 break;
4267 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
4268 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
4269 Temp = DAG.getNOT(dl, N0, OpVT);
4270 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp);
4271 if (!DCI.isCalledByLegalizer())
4272 DCI.AddToWorklist(Temp.getNode());
4273 break;
4274 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
4275 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
4276 Temp = DAG.getNOT(dl, N1, OpVT);
4277 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp);
4278 break;
4279 }
4280 if (VT.getScalarType() != MVT::i1) {
4281 if (!DCI.isCalledByLegalizer())
4282 DCI.AddToWorklist(N0.getNode());
4283 // FIXME: If running after legalize, we probably can't do this.
4284 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT));
4285 N0 = DAG.getNode(ExtendCode, dl, VT, N0);
4286 }
4287 return N0;
4288 }
4289
4290 // Could not fold it.
4291 return SDValue();
4292 }
4293
4294 /// Returns true (and the GlobalValue and the offset) if the node is a
4295 /// GlobalAddress + offset.
isGAPlusOffset(SDNode * WN,const GlobalValue * & GA,int64_t & Offset) const4296 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA,
4297 int64_t &Offset) const {
4298
4299 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode();
4300
4301 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
4302 GA = GASD->getGlobal();
4303 Offset += GASD->getOffset();
4304 return true;
4305 }
4306
4307 if (N->getOpcode() == ISD::ADD) {
4308 SDValue N1 = N->getOperand(0);
4309 SDValue N2 = N->getOperand(1);
4310 if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
4311 if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
4312 Offset += V->getSExtValue();
4313 return true;
4314 }
4315 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
4316 if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
4317 Offset += V->getSExtValue();
4318 return true;
4319 }
4320 }
4321 }
4322
4323 return false;
4324 }
4325
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const4326 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
4327 DAGCombinerInfo &DCI) const {
4328 // Default implementation: no optimization.
4329 return SDValue();
4330 }
4331
4332 //===----------------------------------------------------------------------===//
4333 // Inline Assembler Implementation Methods
4334 //===----------------------------------------------------------------------===//
4335
4336 TargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const4337 TargetLowering::getConstraintType(StringRef Constraint) const {
4338 unsigned S = Constraint.size();
4339
4340 if (S == 1) {
4341 switch (Constraint[0]) {
4342 default: break;
4343 case 'r':
4344 return C_RegisterClass;
4345 case 'm': // memory
4346 case 'o': // offsetable
4347 case 'V': // not offsetable
4348 return C_Memory;
4349 case 'n': // Simple Integer
4350 case 'E': // Floating Point Constant
4351 case 'F': // Floating Point Constant
4352 return C_Immediate;
4353 case 'i': // Simple Integer or Relocatable Constant
4354 case 's': // Relocatable Constant
4355 case 'p': // Address.
4356 case 'X': // Allow ANY value.
4357 case 'I': // Target registers.
4358 case 'J':
4359 case 'K':
4360 case 'L':
4361 case 'M':
4362 case 'N':
4363 case 'O':
4364 case 'P':
4365 case '<':
4366 case '>':
4367 return C_Other;
4368 }
4369 }
4370
4371 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') {
4372 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
4373 return C_Memory;
4374 return C_Register;
4375 }
4376 return C_Unknown;
4377 }
4378
4379 /// Try to replace an X constraint, which matches anything, with another that
4380 /// has more specific requirements based on the type of the corresponding
4381 /// operand.
LowerXConstraint(EVT ConstraintVT) const4382 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
4383 if (ConstraintVT.isInteger())
4384 return "r";
4385 if (ConstraintVT.isFloatingPoint())
4386 return "f"; // works for many targets
4387 return nullptr;
4388 }
4389
LowerAsmOutputForConstraint(SDValue & Chain,SDValue & Flag,const SDLoc & DL,const AsmOperandInfo & OpInfo,SelectionDAG & DAG) const4390 SDValue TargetLowering::LowerAsmOutputForConstraint(
4391 SDValue &Chain, SDValue &Flag, const SDLoc &DL,
4392 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
4393 return SDValue();
4394 }
4395
4396 /// Lower the specified operand into the Ops vector.
4397 /// If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const4398 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4399 std::string &Constraint,
4400 std::vector<SDValue> &Ops,
4401 SelectionDAG &DAG) const {
4402
4403 if (Constraint.length() > 1) return;
4404
4405 char ConstraintLetter = Constraint[0];
4406 switch (ConstraintLetter) {
4407 default: break;
4408 case 'X': // Allows any operand; labels (basic block) use this.
4409 if (Op.getOpcode() == ISD::BasicBlock ||
4410 Op.getOpcode() == ISD::TargetBlockAddress) {
4411 Ops.push_back(Op);
4412 return;
4413 }
4414 LLVM_FALLTHROUGH;
4415 case 'i': // Simple Integer or Relocatable Constant
4416 case 'n': // Simple Integer
4417 case 's': { // Relocatable Constant
4418
4419 GlobalAddressSDNode *GA;
4420 ConstantSDNode *C;
4421 BlockAddressSDNode *BA;
4422 uint64_t Offset = 0;
4423
4424 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C),
4425 // etc., since getelementpointer is variadic. We can't use
4426 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible
4427 // while in this case the GA may be furthest from the root node which is
4428 // likely an ISD::ADD.
4429 while (1) {
4430 if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') {
4431 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
4432 GA->getValueType(0),
4433 Offset + GA->getOffset()));
4434 return;
4435 } else if ((C = dyn_cast<ConstantSDNode>(Op)) &&
4436 ConstraintLetter != 's') {
4437 // gcc prints these as sign extended. Sign extend value to 64 bits
4438 // now; without this it would get ZExt'd later in
4439 // ScheduleDAGSDNodes::EmitNode, which is very generic.
4440 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1;
4441 BooleanContent BCont = getBooleanContents(MVT::i64);
4442 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
4443 : ISD::SIGN_EXTEND;
4444 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue()
4445 : C->getSExtValue();
4446 Ops.push_back(DAG.getTargetConstant(Offset + ExtVal,
4447 SDLoc(C), MVT::i64));
4448 return;
4449 } else if ((BA = dyn_cast<BlockAddressSDNode>(Op)) &&
4450 ConstraintLetter != 'n') {
4451 Ops.push_back(DAG.getTargetBlockAddress(
4452 BA->getBlockAddress(), BA->getValueType(0),
4453 Offset + BA->getOffset(), BA->getTargetFlags()));
4454 return;
4455 } else {
4456 const unsigned OpCode = Op.getOpcode();
4457 if (OpCode == ISD::ADD || OpCode == ISD::SUB) {
4458 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0))))
4459 Op = Op.getOperand(1);
4460 // Subtraction is not commutative.
4461 else if (OpCode == ISD::ADD &&
4462 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1))))
4463 Op = Op.getOperand(0);
4464 else
4465 return;
4466 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue();
4467 continue;
4468 }
4469 }
4470 return;
4471 }
4472 break;
4473 }
4474 }
4475 }
4476
4477 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * RI,StringRef Constraint,MVT VT) const4478 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
4479 StringRef Constraint,
4480 MVT VT) const {
4481 if (Constraint.empty() || Constraint[0] != '{')
4482 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr));
4483 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?");
4484
4485 // Remove the braces from around the name.
4486 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
4487
4488 std::pair<unsigned, const TargetRegisterClass *> R =
4489 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr));
4490
4491 // Figure out which register class contains this reg.
4492 for (const TargetRegisterClass *RC : RI->regclasses()) {
4493 // If none of the value types for this register class are valid, we
4494 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4495 if (!isLegalRC(*RI, *RC))
4496 continue;
4497
4498 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
4499 I != E; ++I) {
4500 if (RegName.equals_lower(RI->getRegAsmName(*I))) {
4501 std::pair<unsigned, const TargetRegisterClass *> S =
4502 std::make_pair(*I, RC);
4503
4504 // If this register class has the requested value type, return it,
4505 // otherwise keep searching and return the first class found
4506 // if no other is found which explicitly has the requested type.
4507 if (RI->isTypeLegalForClass(*RC, VT))
4508 return S;
4509 if (!R.second)
4510 R = S;
4511 }
4512 }
4513 }
4514
4515 return R;
4516 }
4517
4518 //===----------------------------------------------------------------------===//
4519 // Constraint Selection.
4520
4521 /// Return true of this is an input operand that is a matching constraint like
4522 /// "4".
isMatchingInputConstraint() const4523 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
4524 assert(!ConstraintCode.empty() && "No known constraint!");
4525 return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
4526 }
4527
4528 /// If this is an input matching constraint, this method returns the output
4529 /// operand it matches.
getMatchedOperand() const4530 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
4531 assert(!ConstraintCode.empty() && "No known constraint!");
4532 return atoi(ConstraintCode.c_str());
4533 }
4534
4535 /// Split up the constraint string from the inline assembly value into the
4536 /// specific constraints and their prefixes, and also tie in the associated
4537 /// operand values.
4538 /// If this returns an empty vector, and if the constraint string itself
4539 /// isn't empty, there was an error parsing.
4540 TargetLowering::AsmOperandInfoVector
ParseConstraints(const DataLayout & DL,const TargetRegisterInfo * TRI,const CallBase & Call) const4541 TargetLowering::ParseConstraints(const DataLayout &DL,
4542 const TargetRegisterInfo *TRI,
4543 const CallBase &Call) const {
4544 /// Information about all of the constraints.
4545 AsmOperandInfoVector ConstraintOperands;
4546 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
4547 unsigned maCount = 0; // Largest number of multiple alternative constraints.
4548
4549 // Do a prepass over the constraints, canonicalizing them, and building up the
4550 // ConstraintOperands list.
4551 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
4552 unsigned ResNo = 0; // ResNo - The result number of the next output.
4553
4554 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
4555 ConstraintOperands.emplace_back(std::move(CI));
4556 AsmOperandInfo &OpInfo = ConstraintOperands.back();
4557
4558 // Update multiple alternative constraint count.
4559 if (OpInfo.multipleAlternatives.size() > maCount)
4560 maCount = OpInfo.multipleAlternatives.size();
4561
4562 OpInfo.ConstraintVT = MVT::Other;
4563
4564 // Compute the value type for each operand.
4565 switch (OpInfo.Type) {
4566 case InlineAsm::isOutput:
4567 // Indirect outputs just consume an argument.
4568 if (OpInfo.isIndirect) {
4569 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
4570 break;
4571 }
4572
4573 // The return value of the call is this value. As such, there is no
4574 // corresponding argument.
4575 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
4576 if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
4577 OpInfo.ConstraintVT =
4578 getSimpleValueType(DL, STy->getElementType(ResNo));
4579 } else {
4580 assert(ResNo == 0 && "Asm only has one result!");
4581 OpInfo.ConstraintVT = getSimpleValueType(DL, Call.getType());
4582 }
4583 ++ResNo;
4584 break;
4585 case InlineAsm::isInput:
4586 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
4587 break;
4588 case InlineAsm::isClobber:
4589 // Nothing to do.
4590 break;
4591 }
4592
4593 if (OpInfo.CallOperandVal) {
4594 llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
4595 if (OpInfo.isIndirect) {
4596 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
4597 if (!PtrTy)
4598 report_fatal_error("Indirect operand for inline asm not a pointer!");
4599 OpTy = PtrTy->getElementType();
4600 }
4601
4602 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
4603 if (StructType *STy = dyn_cast<StructType>(OpTy))
4604 if (STy->getNumElements() == 1)
4605 OpTy = STy->getElementType(0);
4606
4607 // If OpTy is not a single value, it may be a struct/union that we
4608 // can tile with integers.
4609 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4610 unsigned BitSize = DL.getTypeSizeInBits(OpTy);
4611 switch (BitSize) {
4612 default: break;
4613 case 1:
4614 case 8:
4615 case 16:
4616 case 32:
4617 case 64:
4618 case 128:
4619 OpInfo.ConstraintVT =
4620 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
4621 break;
4622 }
4623 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
4624 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
4625 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
4626 } else {
4627 OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
4628 }
4629 }
4630 }
4631
4632 // If we have multiple alternative constraints, select the best alternative.
4633 if (!ConstraintOperands.empty()) {
4634 if (maCount) {
4635 unsigned bestMAIndex = 0;
4636 int bestWeight = -1;
4637 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match.
4638 int weight = -1;
4639 unsigned maIndex;
4640 // Compute the sums of the weights for each alternative, keeping track
4641 // of the best (highest weight) one so far.
4642 for (maIndex = 0; maIndex < maCount; ++maIndex) {
4643 int weightSum = 0;
4644 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4645 cIndex != eIndex; ++cIndex) {
4646 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex];
4647 if (OpInfo.Type == InlineAsm::isClobber)
4648 continue;
4649
4650 // If this is an output operand with a matching input operand,
4651 // look up the matching input. If their types mismatch, e.g. one
4652 // is an integer, the other is floating point, or their sizes are
4653 // different, flag it as an maCantMatch.
4654 if (OpInfo.hasMatchingInput()) {
4655 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
4656 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
4657 if ((OpInfo.ConstraintVT.isInteger() !=
4658 Input.ConstraintVT.isInteger()) ||
4659 (OpInfo.ConstraintVT.getSizeInBits() !=
4660 Input.ConstraintVT.getSizeInBits())) {
4661 weightSum = -1; // Can't match.
4662 break;
4663 }
4664 }
4665 }
4666 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
4667 if (weight == -1) {
4668 weightSum = -1;
4669 break;
4670 }
4671 weightSum += weight;
4672 }
4673 // Update best.
4674 if (weightSum > bestWeight) {
4675 bestWeight = weightSum;
4676 bestMAIndex = maIndex;
4677 }
4678 }
4679
4680 // Now select chosen alternative in each constraint.
4681 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4682 cIndex != eIndex; ++cIndex) {
4683 AsmOperandInfo &cInfo = ConstraintOperands[cIndex];
4684 if (cInfo.Type == InlineAsm::isClobber)
4685 continue;
4686 cInfo.selectAlternative(bestMAIndex);
4687 }
4688 }
4689 }
4690
4691 // Check and hook up tied operands, choose constraint code to use.
4692 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4693 cIndex != eIndex; ++cIndex) {
4694 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex];
4695
4696 // If this is an output operand with a matching input operand, look up the
4697 // matching input. If their types mismatch, e.g. one is an integer, the
4698 // other is floating point, or their sizes are different, flag it as an
4699 // error.
4700 if (OpInfo.hasMatchingInput()) {
4701 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
4702
4703 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
4704 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
4705 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
4706 OpInfo.ConstraintVT);
4707 std::pair<unsigned, const TargetRegisterClass *> InputRC =
4708 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
4709 Input.ConstraintVT);
4710 if ((OpInfo.ConstraintVT.isInteger() !=
4711 Input.ConstraintVT.isInteger()) ||
4712 (MatchRC.second != InputRC.second)) {
4713 report_fatal_error("Unsupported asm: input constraint"
4714 " with a matching output constraint of"
4715 " incompatible type!");
4716 }
4717 }
4718 }
4719 }
4720
4721 return ConstraintOperands;
4722 }
4723
4724 /// Return an integer indicating how general CT is.
getConstraintGenerality(TargetLowering::ConstraintType CT)4725 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
4726 switch (CT) {
4727 case TargetLowering::C_Immediate:
4728 case TargetLowering::C_Other:
4729 case TargetLowering::C_Unknown:
4730 return 0;
4731 case TargetLowering::C_Register:
4732 return 1;
4733 case TargetLowering::C_RegisterClass:
4734 return 2;
4735 case TargetLowering::C_Memory:
4736 return 3;
4737 }
4738 llvm_unreachable("Invalid constraint type");
4739 }
4740
4741 /// Examine constraint type and operand type and determine a weight value.
4742 /// This object must already have been set up with the operand type
4743 /// and the current alternative constraint selected.
4744 TargetLowering::ConstraintWeight
getMultipleConstraintMatchWeight(AsmOperandInfo & info,int maIndex) const4745 TargetLowering::getMultipleConstraintMatchWeight(
4746 AsmOperandInfo &info, int maIndex) const {
4747 InlineAsm::ConstraintCodeVector *rCodes;
4748 if (maIndex >= (int)info.multipleAlternatives.size())
4749 rCodes = &info.Codes;
4750 else
4751 rCodes = &info.multipleAlternatives[maIndex].Codes;
4752 ConstraintWeight BestWeight = CW_Invalid;
4753
4754 // Loop over the options, keeping track of the most general one.
4755 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
4756 ConstraintWeight weight =
4757 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
4758 if (weight > BestWeight)
4759 BestWeight = weight;
4760 }
4761
4762 return BestWeight;
4763 }
4764
4765 /// Examine constraint type and operand type and determine a weight value.
4766 /// This object must already have been set up with the operand type
4767 /// and the current alternative constraint selected.
4768 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const4769 TargetLowering::getSingleConstraintMatchWeight(
4770 AsmOperandInfo &info, const char *constraint) const {
4771 ConstraintWeight weight = CW_Invalid;
4772 Value *CallOperandVal = info.CallOperandVal;
4773 // If we don't have a value, we can't do a match,
4774 // but allow it at the lowest weight.
4775 if (!CallOperandVal)
4776 return CW_Default;
4777 // Look at the constraint type.
4778 switch (*constraint) {
4779 case 'i': // immediate integer.
4780 case 'n': // immediate integer with a known value.
4781 if (isa<ConstantInt>(CallOperandVal))
4782 weight = CW_Constant;
4783 break;
4784 case 's': // non-explicit intregal immediate.
4785 if (isa<GlobalValue>(CallOperandVal))
4786 weight = CW_Constant;
4787 break;
4788 case 'E': // immediate float if host format.
4789 case 'F': // immediate float.
4790 if (isa<ConstantFP>(CallOperandVal))
4791 weight = CW_Constant;
4792 break;
4793 case '<': // memory operand with autodecrement.
4794 case '>': // memory operand with autoincrement.
4795 case 'm': // memory operand.
4796 case 'o': // offsettable memory operand
4797 case 'V': // non-offsettable memory operand
4798 weight = CW_Memory;
4799 break;
4800 case 'r': // general register.
4801 case 'g': // general register, memory operand or immediate integer.
4802 // note: Clang converts "g" to "imr".
4803 if (CallOperandVal->getType()->isIntegerTy())
4804 weight = CW_Register;
4805 break;
4806 case 'X': // any operand.
4807 default:
4808 weight = CW_Default;
4809 break;
4810 }
4811 return weight;
4812 }
4813
4814 /// If there are multiple different constraints that we could pick for this
4815 /// operand (e.g. "imr") try to pick the 'best' one.
4816 /// This is somewhat tricky: constraints fall into four classes:
4817 /// Other -> immediates and magic values
4818 /// Register -> one specific register
4819 /// RegisterClass -> a group of regs
4820 /// Memory -> memory
4821 /// Ideally, we would pick the most specific constraint possible: if we have
4822 /// something that fits into a register, we would pick it. The problem here
4823 /// is that if we have something that could either be in a register or in
4824 /// memory that use of the register could cause selection of *other*
4825 /// operands to fail: they might only succeed if we pick memory. Because of
4826 /// this the heuristic we use is:
4827 ///
4828 /// 1) If there is an 'other' constraint, and if the operand is valid for
4829 /// that constraint, use it. This makes us take advantage of 'i'
4830 /// constraints when available.
4831 /// 2) Otherwise, pick the most general constraint present. This prefers
4832 /// 'm' over 'r', for example.
4833 ///
ChooseConstraint(TargetLowering::AsmOperandInfo & OpInfo,const TargetLowering & TLI,SDValue Op,SelectionDAG * DAG)4834 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
4835 const TargetLowering &TLI,
4836 SDValue Op, SelectionDAG *DAG) {
4837 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
4838 unsigned BestIdx = 0;
4839 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
4840 int BestGenerality = -1;
4841
4842 // Loop over the options, keeping track of the most general one.
4843 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
4844 TargetLowering::ConstraintType CType =
4845 TLI.getConstraintType(OpInfo.Codes[i]);
4846
4847 // Indirect 'other' or 'immediate' constraints are not allowed.
4848 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
4849 CType == TargetLowering::C_Register ||
4850 CType == TargetLowering::C_RegisterClass))
4851 continue;
4852
4853 // If this is an 'other' or 'immediate' constraint, see if the operand is
4854 // valid for it. For example, on X86 we might have an 'rI' constraint. If
4855 // the operand is an integer in the range [0..31] we want to use I (saving a
4856 // load of a register), otherwise we must use 'r'.
4857 if ((CType == TargetLowering::C_Other ||
4858 CType == TargetLowering::C_Immediate) && Op.getNode()) {
4859 assert(OpInfo.Codes[i].size() == 1 &&
4860 "Unhandled multi-letter 'other' constraint");
4861 std::vector<SDValue> ResultOps;
4862 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
4863 ResultOps, *DAG);
4864 if (!ResultOps.empty()) {
4865 BestType = CType;
4866 BestIdx = i;
4867 break;
4868 }
4869 }
4870
4871 // Things with matching constraints can only be registers, per gcc
4872 // documentation. This mainly affects "g" constraints.
4873 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
4874 continue;
4875
4876 // This constraint letter is more general than the previous one, use it.
4877 int Generality = getConstraintGenerality(CType);
4878 if (Generality > BestGenerality) {
4879 BestType = CType;
4880 BestIdx = i;
4881 BestGenerality = Generality;
4882 }
4883 }
4884
4885 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
4886 OpInfo.ConstraintType = BestType;
4887 }
4888
4889 /// Determines the constraint code and constraint type to use for the specific
4890 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
ComputeConstraintToUse(AsmOperandInfo & OpInfo,SDValue Op,SelectionDAG * DAG) const4891 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
4892 SDValue Op,
4893 SelectionDAG *DAG) const {
4894 assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
4895
4896 // Single-letter constraints ('r') are very common.
4897 if (OpInfo.Codes.size() == 1) {
4898 OpInfo.ConstraintCode = OpInfo.Codes[0];
4899 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
4900 } else {
4901 ChooseConstraint(OpInfo, *this, Op, DAG);
4902 }
4903
4904 // 'X' matches anything.
4905 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
4906 // Labels and constants are handled elsewhere ('X' is the only thing
4907 // that matches labels). For Functions, the type here is the type of
4908 // the result, which is not what we want to look at; leave them alone.
4909 Value *v = OpInfo.CallOperandVal;
4910 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
4911 OpInfo.CallOperandVal = v;
4912 return;
4913 }
4914
4915 if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress)
4916 return;
4917
4918 // Otherwise, try to resolve it to something we know about by looking at
4919 // the actual operand type.
4920 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
4921 OpInfo.ConstraintCode = Repl;
4922 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
4923 }
4924 }
4925 }
4926
4927 /// Given an exact SDIV by a constant, create a multiplication
4928 /// with the multiplicative inverse of the constant.
BuildExactSDIV(const TargetLowering & TLI,SDNode * N,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created)4929 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
4930 const SDLoc &dl, SelectionDAG &DAG,
4931 SmallVectorImpl<SDNode *> &Created) {
4932 SDValue Op0 = N->getOperand(0);
4933 SDValue Op1 = N->getOperand(1);
4934 EVT VT = N->getValueType(0);
4935 EVT SVT = VT.getScalarType();
4936 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
4937 EVT ShSVT = ShVT.getScalarType();
4938
4939 bool UseSRA = false;
4940 SmallVector<SDValue, 16> Shifts, Factors;
4941
4942 auto BuildSDIVPattern = [&](ConstantSDNode *C) {
4943 if (C->isNullValue())
4944 return false;
4945 APInt Divisor = C->getAPIntValue();
4946 unsigned Shift = Divisor.countTrailingZeros();
4947 if (Shift) {
4948 Divisor.ashrInPlace(Shift);
4949 UseSRA = true;
4950 }
4951 // Calculate the multiplicative inverse, using Newton's method.
4952 APInt t;
4953 APInt Factor = Divisor;
4954 while ((t = Divisor * Factor) != 1)
4955 Factor *= APInt(Divisor.getBitWidth(), 2) - t;
4956 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT));
4957 Factors.push_back(DAG.getConstant(Factor, dl, SVT));
4958 return true;
4959 };
4960
4961 // Collect all magic values from the build vector.
4962 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern))
4963 return SDValue();
4964
4965 SDValue Shift, Factor;
4966 if (VT.isVector()) {
4967 Shift = DAG.getBuildVector(ShVT, dl, Shifts);
4968 Factor = DAG.getBuildVector(VT, dl, Factors);
4969 } else {
4970 Shift = Shifts[0];
4971 Factor = Factors[0];
4972 }
4973
4974 SDValue Res = Op0;
4975
4976 // Shift the value upfront if it is even, so the LSB is one.
4977 if (UseSRA) {
4978 // TODO: For UDIV use SRL instead of SRA.
4979 SDNodeFlags Flags;
4980 Flags.setExact(true);
4981 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags);
4982 Created.push_back(Res.getNode());
4983 }
4984
4985 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor);
4986 }
4987
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created) const4988 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
4989 SelectionDAG &DAG,
4990 SmallVectorImpl<SDNode *> &Created) const {
4991 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4992 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4993 if (TLI.isIntDivCheap(N->getValueType(0), Attr))
4994 return SDValue(N, 0); // Lower SDIV as SDIV
4995 return SDValue();
4996 }
4997
4998 /// Given an ISD::SDIV node expressing a divide by constant,
4999 /// return a DAG expression to select that will generate the same value by
5000 /// multiplying by a magic number.
5001 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
BuildSDIV(SDNode * N,SelectionDAG & DAG,bool IsAfterLegalization,SmallVectorImpl<SDNode * > & Created) const5002 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
5003 bool IsAfterLegalization,
5004 SmallVectorImpl<SDNode *> &Created) const {
5005 SDLoc dl(N);
5006 EVT VT = N->getValueType(0);
5007 EVT SVT = VT.getScalarType();
5008 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
5009 EVT ShSVT = ShVT.getScalarType();
5010 unsigned EltBits = VT.getScalarSizeInBits();
5011
5012 // Check to see if we can do this.
5013 // FIXME: We should be more aggressive here.
5014 if (!isTypeLegal(VT))
5015 return SDValue();
5016
5017 // If the sdiv has an 'exact' bit we can use a simpler lowering.
5018 if (N->getFlags().hasExact())
5019 return BuildExactSDIV(*this, N, dl, DAG, Created);
5020
5021 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks;
5022
5023 auto BuildSDIVPattern = [&](ConstantSDNode *C) {
5024 if (C->isNullValue())
5025 return false;
5026
5027 const APInt &Divisor = C->getAPIntValue();
5028 APInt::ms magics = Divisor.magic();
5029 int NumeratorFactor = 0;
5030 int ShiftMask = -1;
5031
5032 if (Divisor.isOneValue() || Divisor.isAllOnesValue()) {
5033 // If d is +1/-1, we just multiply the numerator by +1/-1.
5034 NumeratorFactor = Divisor.getSExtValue();
5035 magics.m = 0;
5036 magics.s = 0;
5037 ShiftMask = 0;
5038 } else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
5039 // If d > 0 and m < 0, add the numerator.
5040 NumeratorFactor = 1;
5041 } else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
5042 // If d < 0 and m > 0, subtract the numerator.
5043 NumeratorFactor = -1;
5044 }
5045
5046 MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT));
5047 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT));
5048 Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT));
5049 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT));
5050 return true;
5051 };
5052
5053 SDValue N0 = N->getOperand(0);
5054 SDValue N1 = N->getOperand(1);
5055
5056 // Collect the shifts / magic values from each element.
5057 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern))
5058 return SDValue();
5059
5060 SDValue MagicFactor, Factor, Shift, ShiftMask;
5061 if (VT.isVector()) {
5062 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
5063 Factor = DAG.getBuildVector(VT, dl, Factors);
5064 Shift = DAG.getBuildVector(ShVT, dl, Shifts);
5065 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks);
5066 } else {
5067 MagicFactor = MagicFactors[0];
5068 Factor = Factors[0];
5069 Shift = Shifts[0];
5070 ShiftMask = ShiftMasks[0];
5071 }
5072
5073 // Multiply the numerator (operand 0) by the magic value.
5074 // FIXME: We should support doing a MUL in a wider type.
5075 SDValue Q;
5076 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT)
5077 : isOperationLegalOrCustom(ISD::MULHS, VT))
5078 Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor);
5079 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT)
5080 : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) {
5081 SDValue LoHi =
5082 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor);
5083 Q = SDValue(LoHi.getNode(), 1);
5084 } else
5085 return SDValue(); // No mulhs or equivalent.
5086 Created.push_back(Q.getNode());
5087
5088 // (Optionally) Add/subtract the numerator using Factor.
5089 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor);
5090 Created.push_back(Factor.getNode());
5091 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor);
5092 Created.push_back(Q.getNode());
5093
5094 // Shift right algebraic by shift value.
5095 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift);
5096 Created.push_back(Q.getNode());
5097
5098 // Extract the sign bit, mask it and add it to the quotient.
5099 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT);
5100 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift);
5101 Created.push_back(T.getNode());
5102 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask);
5103 Created.push_back(T.getNode());
5104 return DAG.getNode(ISD::ADD, dl, VT, Q, T);
5105 }
5106
5107 /// Given an ISD::UDIV node expressing a divide by constant,
5108 /// return a DAG expression to select that will generate the same value by
5109 /// multiplying by a magic number.
5110 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
BuildUDIV(SDNode * N,SelectionDAG & DAG,bool IsAfterLegalization,SmallVectorImpl<SDNode * > & Created) const5111 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
5112 bool IsAfterLegalization,
5113 SmallVectorImpl<SDNode *> &Created) const {
5114 SDLoc dl(N);
5115 EVT VT = N->getValueType(0);
5116 EVT SVT = VT.getScalarType();
5117 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
5118 EVT ShSVT = ShVT.getScalarType();
5119 unsigned EltBits = VT.getScalarSizeInBits();
5120
5121 // Check to see if we can do this.
5122 // FIXME: We should be more aggressive here.
5123 if (!isTypeLegal(VT))
5124 return SDValue();
5125
5126 bool UseNPQ = false;
5127 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
5128
5129 auto BuildUDIVPattern = [&](ConstantSDNode *C) {
5130 if (C->isNullValue())
5131 return false;
5132 // FIXME: We should use a narrower constant when the upper
5133 // bits are known to be zero.
5134 APInt Divisor = C->getAPIntValue();
5135 APInt::mu magics = Divisor.magicu();
5136 unsigned PreShift = 0, PostShift = 0;
5137
5138 // If the divisor is even, we can avoid using the expensive fixup by
5139 // shifting the divided value upfront.
5140 if (magics.a != 0 && !Divisor[0]) {
5141 PreShift = Divisor.countTrailingZeros();
5142 // Get magic number for the shifted divisor.
5143 magics = Divisor.lshr(PreShift).magicu(PreShift);
5144 assert(magics.a == 0 && "Should use cheap fixup now");
5145 }
5146
5147 APInt Magic = magics.m;
5148
5149 unsigned SelNPQ;
5150 if (magics.a == 0 || Divisor.isOneValue()) {
5151 assert(magics.s < Divisor.getBitWidth() &&
5152 "We shouldn't generate an undefined shift!");
5153 PostShift = magics.s;
5154 SelNPQ = false;
5155 } else {
5156 PostShift = magics.s - 1;
5157 SelNPQ = true;
5158 }
5159
5160 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT));
5161 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT));
5162 NPQFactors.push_back(
5163 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1)
5164 : APInt::getNullValue(EltBits),
5165 dl, SVT));
5166 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT));
5167 UseNPQ |= SelNPQ;
5168 return true;
5169 };
5170
5171 SDValue N0 = N->getOperand(0);
5172 SDValue N1 = N->getOperand(1);
5173
5174 // Collect the shifts/magic values from each element.
5175 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern))
5176 return SDValue();
5177
5178 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
5179 if (VT.isVector()) {
5180 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts);
5181 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
5182 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors);
5183 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts);
5184 } else {
5185 PreShift = PreShifts[0];
5186 MagicFactor = MagicFactors[0];
5187 PostShift = PostShifts[0];
5188 }
5189
5190 SDValue Q = N0;
5191 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift);
5192 Created.push_back(Q.getNode());
5193
5194 // FIXME: We should support doing a MUL in a wider type.
5195 auto GetMULHU = [&](SDValue X, SDValue Y) {
5196 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT)
5197 : isOperationLegalOrCustom(ISD::MULHU, VT))
5198 return DAG.getNode(ISD::MULHU, dl, VT, X, Y);
5199 if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT)
5200 : isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) {
5201 SDValue LoHi =
5202 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y);
5203 return SDValue(LoHi.getNode(), 1);
5204 }
5205 return SDValue(); // No mulhu or equivalent
5206 };
5207
5208 // Multiply the numerator (operand 0) by the magic value.
5209 Q = GetMULHU(Q, MagicFactor);
5210 if (!Q)
5211 return SDValue();
5212
5213 Created.push_back(Q.getNode());
5214
5215 if (UseNPQ) {
5216 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q);
5217 Created.push_back(NPQ.getNode());
5218
5219 // For vectors we might have a mix of non-NPQ/NPQ paths, so use
5220 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero.
5221 if (VT.isVector())
5222 NPQ = GetMULHU(NPQ, NPQFactor);
5223 else
5224 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT));
5225
5226 Created.push_back(NPQ.getNode());
5227
5228 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
5229 Created.push_back(Q.getNode());
5230 }
5231
5232 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift);
5233 Created.push_back(Q.getNode());
5234
5235 SDValue One = DAG.getConstant(1, dl, VT);
5236 SDValue IsOne = DAG.getSetCC(dl, VT, N1, One, ISD::SETEQ);
5237 return DAG.getSelect(dl, VT, IsOne, N0, Q);
5238 }
5239
5240 /// If all values in Values that *don't* match the predicate are same 'splat'
5241 /// value, then replace all values with that splat value.
5242 /// Else, if AlternativeReplacement was provided, then replace all values that
5243 /// do match predicate with AlternativeReplacement value.
5244 static void
turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values,std::function<bool (SDValue)> Predicate,SDValue AlternativeReplacement=SDValue ())5245 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values,
5246 std::function<bool(SDValue)> Predicate,
5247 SDValue AlternativeReplacement = SDValue()) {
5248 SDValue Replacement;
5249 // Is there a value for which the Predicate does *NOT* match? What is it?
5250 auto SplatValue = llvm::find_if_not(Values, Predicate);
5251 if (SplatValue != Values.end()) {
5252 // Does Values consist only of SplatValue's and values matching Predicate?
5253 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) {
5254 return Value == *SplatValue || Predicate(Value);
5255 })) // Then we shall replace values matching predicate with SplatValue.
5256 Replacement = *SplatValue;
5257 }
5258 if (!Replacement) {
5259 // Oops, we did not find the "baseline" splat value.
5260 if (!AlternativeReplacement)
5261 return; // Nothing to do.
5262 // Let's replace with provided value then.
5263 Replacement = AlternativeReplacement;
5264 }
5265 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement);
5266 }
5267
5268 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE
5269 /// where the divisor is constant and the comparison target is zero,
5270 /// return a DAG expression that will generate the same comparison result
5271 /// using only multiplications, additions and shifts/rotations.
5272 /// Ref: "Hacker's Delight" 10-17.
buildUREMEqFold(EVT SETCCVT,SDValue REMNode,SDValue CompTargetNode,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL) const5273 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode,
5274 SDValue CompTargetNode,
5275 ISD::CondCode Cond,
5276 DAGCombinerInfo &DCI,
5277 const SDLoc &DL) const {
5278 SmallVector<SDNode *, 5> Built;
5279 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond,
5280 DCI, DL, Built)) {
5281 for (SDNode *N : Built)
5282 DCI.AddToWorklist(N);
5283 return Folded;
5284 }
5285
5286 return SDValue();
5287 }
5288
5289 SDValue
prepareUREMEqFold(EVT SETCCVT,SDValue REMNode,SDValue CompTargetNode,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL,SmallVectorImpl<SDNode * > & Created) const5290 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
5291 SDValue CompTargetNode, ISD::CondCode Cond,
5292 DAGCombinerInfo &DCI, const SDLoc &DL,
5293 SmallVectorImpl<SDNode *> &Created) const {
5294 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q)
5295 // - D must be constant, with D = D0 * 2^K where D0 is odd
5296 // - P is the multiplicative inverse of D0 modulo 2^W
5297 // - Q = floor(((2^W) - 1) / D)
5298 // where W is the width of the common type of N and D.
5299 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
5300 "Only applicable for (in)equality comparisons.");
5301
5302 SelectionDAG &DAG = DCI.DAG;
5303
5304 EVT VT = REMNode.getValueType();
5305 EVT SVT = VT.getScalarType();
5306 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
5307 EVT ShSVT = ShVT.getScalarType();
5308
5309 // If MUL is unavailable, we cannot proceed in any case.
5310 if (!isOperationLegalOrCustom(ISD::MUL, VT))
5311 return SDValue();
5312
5313 bool ComparingWithAllZeros = true;
5314 bool AllComparisonsWithNonZerosAreTautological = true;
5315 bool HadTautologicalLanes = false;
5316 bool AllLanesAreTautological = true;
5317 bool HadEvenDivisor = false;
5318 bool AllDivisorsArePowerOfTwo = true;
5319 bool HadTautologicalInvertedLanes = false;
5320 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts;
5321
5322 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) {
5323 // Division by 0 is UB. Leave it to be constant-folded elsewhere.
5324 if (CDiv->isNullValue())
5325 return false;
5326
5327 const APInt &D = CDiv->getAPIntValue();
5328 const APInt &Cmp = CCmp->getAPIntValue();
5329
5330 ComparingWithAllZeros &= Cmp.isNullValue();
5331
5332 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`,
5333 // if C2 is not less than C1, the comparison is always false.
5334 // But we will only be able to produce the comparison that will give the
5335 // opposive tautological answer. So this lane would need to be fixed up.
5336 bool TautologicalInvertedLane = D.ule(Cmp);
5337 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
5338
5339 // If all lanes are tautological (either all divisors are ones, or divisor
5340 // is not greater than the constant we are comparing with),
5341 // we will prefer to avoid the fold.
5342 bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane;
5343 HadTautologicalLanes |= TautologicalLane;
5344 AllLanesAreTautological &= TautologicalLane;
5345
5346 // If we are comparing with non-zero, we need'll need to subtract said
5347 // comparison value from the LHS. But there is no point in doing that if
5348 // every lane where we are comparing with non-zero is tautological..
5349 if (!Cmp.isNullValue())
5350 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
5351
5352 // Decompose D into D0 * 2^K
5353 unsigned K = D.countTrailingZeros();
5354 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate.");
5355 APInt D0 = D.lshr(K);
5356
5357 // D is even if it has trailing zeros.
5358 HadEvenDivisor |= (K != 0);
5359 // D is a power-of-two if D0 is one.
5360 // If all divisors are power-of-two, we will prefer to avoid the fold.
5361 AllDivisorsArePowerOfTwo &= D0.isOneValue();
5362
5363 // P = inv(D0, 2^W)
5364 // 2^W requires W + 1 bits, so we have to extend and then truncate.
5365 unsigned W = D.getBitWidth();
5366 APInt P = D0.zext(W + 1)
5367 .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
5368 .trunc(W);
5369 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable
5370 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check.");
5371
5372 // Q = floor((2^W - 1) u/ D)
5373 // R = ((2^W - 1) u% D)
5374 APInt Q, R;
5375 APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R);
5376
5377 // If we are comparing with zero, then that comparison constant is okay,
5378 // else it may need to be one less than that.
5379 if (Cmp.ugt(R))
5380 Q -= 1;
5381
5382 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
5383 "We are expecting that K is always less than all-ones for ShSVT");
5384
5385 // If the lane is tautological the result can be constant-folded.
5386 if (TautologicalLane) {
5387 // Set P and K amount to a bogus values so we can try to splat them.
5388 P = 0;
5389 K = -1;
5390 // And ensure that comparison constant is tautological,
5391 // it will always compare true/false.
5392 Q = -1;
5393 }
5394
5395 PAmts.push_back(DAG.getConstant(P, DL, SVT));
5396 KAmts.push_back(
5397 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
5398 QAmts.push_back(DAG.getConstant(Q, DL, SVT));
5399 return true;
5400 };
5401
5402 SDValue N = REMNode.getOperand(0);
5403 SDValue D = REMNode.getOperand(1);
5404
5405 // Collect the values from each element.
5406 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern))
5407 return SDValue();
5408
5409 // If all lanes are tautological, the result can be constant-folded.
5410 if (AllLanesAreTautological)
5411 return SDValue();
5412
5413 // If this is a urem by a powers-of-two, avoid the fold since it can be
5414 // best implemented as a bit test.
5415 if (AllDivisorsArePowerOfTwo)
5416 return SDValue();
5417
5418 SDValue PVal, KVal, QVal;
5419 if (VT.isVector()) {
5420 if (HadTautologicalLanes) {
5421 // Try to turn PAmts into a splat, since we don't care about the values
5422 // that are currently '0'. If we can't, just keep '0'`s.
5423 turnVectorIntoSplatVector(PAmts, isNullConstant);
5424 // Try to turn KAmts into a splat, since we don't care about the values
5425 // that are currently '-1'. If we can't, change them to '0'`s.
5426 turnVectorIntoSplatVector(KAmts, isAllOnesConstant,
5427 DAG.getConstant(0, DL, ShSVT));
5428 }
5429
5430 PVal = DAG.getBuildVector(VT, DL, PAmts);
5431 KVal = DAG.getBuildVector(ShVT, DL, KAmts);
5432 QVal = DAG.getBuildVector(VT, DL, QAmts);
5433 } else {
5434 PVal = PAmts[0];
5435 KVal = KAmts[0];
5436 QVal = QAmts[0];
5437 }
5438
5439 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
5440 if (!isOperationLegalOrCustom(ISD::SUB, VT))
5441 return SDValue(); // FIXME: Could/should use `ISD::ADD`?
5442 assert(CompTargetNode.getValueType() == N.getValueType() &&
5443 "Expecting that the types on LHS and RHS of comparisons match.");
5444 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode);
5445 }
5446
5447 // (mul N, P)
5448 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal);
5449 Created.push_back(Op0.getNode());
5450
5451 // Rotate right only if any divisor was even. We avoid rotates for all-odd
5452 // divisors as a performance improvement, since rotating by 0 is a no-op.
5453 if (HadEvenDivisor) {
5454 // We need ROTR to do this.
5455 if (!isOperationLegalOrCustom(ISD::ROTR, VT))
5456 return SDValue();
5457 SDNodeFlags Flags;
5458 Flags.setExact(true);
5459 // UREM: (rotr (mul N, P), K)
5460 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags);
5461 Created.push_back(Op0.getNode());
5462 }
5463
5464 // UREM: (setule/setugt (rotr (mul N, P), K), Q)
5465 SDValue NewCC =
5466 DAG.getSetCC(DL, SETCCVT, Op0, QVal,
5467 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
5468 if (!HadTautologicalInvertedLanes)
5469 return NewCC;
5470
5471 // If any lanes previously compared always-false, the NewCC will give
5472 // always-true result for them, so we need to fixup those lanes.
5473 // Or the other way around for inequality predicate.
5474 assert(VT.isVector() && "Can/should only get here for vectors.");
5475 Created.push_back(NewCC.getNode());
5476
5477 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`,
5478 // if C2 is not less than C1, the comparison is always false.
5479 // But we have produced the comparison that will give the
5480 // opposive tautological answer. So these lanes would need to be fixed up.
5481 SDValue TautologicalInvertedChannels =
5482 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE);
5483 Created.push_back(TautologicalInvertedChannels.getNode());
5484
5485 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) {
5486 // If we have a vector select, let's replace the comparison results in the
5487 // affected lanes with the correct tautological result.
5488 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true,
5489 DL, SETCCVT, SETCCVT);
5490 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels,
5491 Replacement, NewCC);
5492 }
5493
5494 // Else, we can just invert the comparison result in the appropriate lanes.
5495 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT))
5496 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC,
5497 TautologicalInvertedChannels);
5498
5499 return SDValue(); // Don't know how to lower.
5500 }
5501
5502 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE
5503 /// where the divisor is constant and the comparison target is zero,
5504 /// return a DAG expression that will generate the same comparison result
5505 /// using only multiplications, additions and shifts/rotations.
5506 /// Ref: "Hacker's Delight" 10-17.
buildSREMEqFold(EVT SETCCVT,SDValue REMNode,SDValue CompTargetNode,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL) const5507 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode,
5508 SDValue CompTargetNode,
5509 ISD::CondCode Cond,
5510 DAGCombinerInfo &DCI,
5511 const SDLoc &DL) const {
5512 SmallVector<SDNode *, 7> Built;
5513 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond,
5514 DCI, DL, Built)) {
5515 assert(Built.size() <= 7 && "Max size prediction failed.");
5516 for (SDNode *N : Built)
5517 DCI.AddToWorklist(N);
5518 return Folded;
5519 }
5520
5521 return SDValue();
5522 }
5523
5524 SDValue
prepareSREMEqFold(EVT SETCCVT,SDValue REMNode,SDValue CompTargetNode,ISD::CondCode Cond,DAGCombinerInfo & DCI,const SDLoc & DL,SmallVectorImpl<SDNode * > & Created) const5525 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
5526 SDValue CompTargetNode, ISD::CondCode Cond,
5527 DAGCombinerInfo &DCI, const SDLoc &DL,
5528 SmallVectorImpl<SDNode *> &Created) const {
5529 // Fold:
5530 // (seteq/ne (srem N, D), 0)
5531 // To:
5532 // (setule/ugt (rotr (add (mul N, P), A), K), Q)
5533 //
5534 // - D must be constant, with D = D0 * 2^K where D0 is odd
5535 // - P is the multiplicative inverse of D0 modulo 2^W
5536 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k)))
5537 // - Q = floor((2 * A) / (2^K))
5538 // where W is the width of the common type of N and D.
5539 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
5540 "Only applicable for (in)equality comparisons.");
5541
5542 SelectionDAG &DAG = DCI.DAG;
5543
5544 EVT VT = REMNode.getValueType();
5545 EVT SVT = VT.getScalarType();
5546 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
5547 EVT ShSVT = ShVT.getScalarType();
5548
5549 // If MUL is unavailable, we cannot proceed in any case.
5550 if (!isOperationLegalOrCustom(ISD::MUL, VT))
5551 return SDValue();
5552
5553 // TODO: Could support comparing with non-zero too.
5554 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode);
5555 if (!CompTarget || !CompTarget->isNullValue())
5556 return SDValue();
5557
5558 bool HadIntMinDivisor = false;
5559 bool HadOneDivisor = false;
5560 bool AllDivisorsAreOnes = true;
5561 bool HadEvenDivisor = false;
5562 bool NeedToApplyOffset = false;
5563 bool AllDivisorsArePowerOfTwo = true;
5564 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts;
5565
5566 auto BuildSREMPattern = [&](ConstantSDNode *C) {
5567 // Division by 0 is UB. Leave it to be constant-folded elsewhere.
5568 if (C->isNullValue())
5569 return false;
5570
5571 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine.
5572
5573 // WARNING: this fold is only valid for positive divisors!
5574 APInt D = C->getAPIntValue();
5575 if (D.isNegative())
5576 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C`
5577
5578 HadIntMinDivisor |= D.isMinSignedValue();
5579
5580 // If all divisors are ones, we will prefer to avoid the fold.
5581 HadOneDivisor |= D.isOneValue();
5582 AllDivisorsAreOnes &= D.isOneValue();
5583
5584 // Decompose D into D0 * 2^K
5585 unsigned K = D.countTrailingZeros();
5586 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate.");
5587 APInt D0 = D.lshr(K);
5588
5589 if (!D.isMinSignedValue()) {
5590 // D is even if it has trailing zeros; unless it's INT_MIN, in which case
5591 // we don't care about this lane in this fold, we'll special-handle it.
5592 HadEvenDivisor |= (K != 0);
5593 }
5594
5595 // D is a power-of-two if D0 is one. This includes INT_MIN.
5596 // If all divisors are power-of-two, we will prefer to avoid the fold.
5597 AllDivisorsArePowerOfTwo &= D0.isOneValue();
5598
5599 // P = inv(D0, 2^W)
5600 // 2^W requires W + 1 bits, so we have to extend and then truncate.
5601 unsigned W = D.getBitWidth();
5602 APInt P = D0.zext(W + 1)
5603 .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
5604 .trunc(W);
5605 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable
5606 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check.");
5607
5608 // A = floor((2^(W - 1) - 1) / D0) & -2^K
5609 APInt A = APInt::getSignedMaxValue(W).udiv(D0);
5610 A.clearLowBits(K);
5611
5612 if (!D.isMinSignedValue()) {
5613 // If divisor INT_MIN, then we don't care about this lane in this fold,
5614 // we'll special-handle it.
5615 NeedToApplyOffset |= A != 0;
5616 }
5617
5618 // Q = floor((2 * A) / (2^K))
5619 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K));
5620
5621 assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) &&
5622 "We are expecting that A is always less than all-ones for SVT");
5623 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
5624 "We are expecting that K is always less than all-ones for ShSVT");
5625
5626 // If the divisor is 1 the result can be constant-folded. Likewise, we
5627 // don't care about INT_MIN lanes, those can be set to undef if appropriate.
5628 if (D.isOneValue()) {
5629 // Set P, A and K to a bogus values so we can try to splat them.
5630 P = 0;
5631 A = -1;
5632 K = -1;
5633
5634 // x ?% 1 == 0 <--> true <--> x u<= -1
5635 Q = -1;
5636 }
5637
5638 PAmts.push_back(DAG.getConstant(P, DL, SVT));
5639 AAmts.push_back(DAG.getConstant(A, DL, SVT));
5640 KAmts.push_back(
5641 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
5642 QAmts.push_back(DAG.getConstant(Q, DL, SVT));
5643 return true;
5644 };
5645
5646 SDValue N = REMNode.getOperand(0);
5647 SDValue D = REMNode.getOperand(1);
5648
5649 // Collect the values from each element.
5650 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern))
5651 return SDValue();
5652
5653 // If this is a srem by a one, avoid the fold since it can be constant-folded.
5654 if (AllDivisorsAreOnes)
5655 return SDValue();
5656
5657 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold
5658 // since it can be best implemented as a bit test.
5659 if (AllDivisorsArePowerOfTwo)
5660 return SDValue();
5661
5662 SDValue PVal, AVal, KVal, QVal;
5663 if (VT.isVector()) {
5664 if (HadOneDivisor) {
5665 // Try to turn PAmts into a splat, since we don't care about the values
5666 // that are currently '0'. If we can't, just keep '0'`s.
5667 turnVectorIntoSplatVector(PAmts, isNullConstant);
5668 // Try to turn AAmts into a splat, since we don't care about the
5669 // values that are currently '-1'. If we can't, change them to '0'`s.
5670 turnVectorIntoSplatVector(AAmts, isAllOnesConstant,
5671 DAG.getConstant(0, DL, SVT));
5672 // Try to turn KAmts into a splat, since we don't care about the values
5673 // that are currently '-1'. If we can't, change them to '0'`s.
5674 turnVectorIntoSplatVector(KAmts, isAllOnesConstant,
5675 DAG.getConstant(0, DL, ShSVT));
5676 }
5677
5678 PVal = DAG.getBuildVector(VT, DL, PAmts);
5679 AVal = DAG.getBuildVector(VT, DL, AAmts);
5680 KVal = DAG.getBuildVector(ShVT, DL, KAmts);
5681 QVal = DAG.getBuildVector(VT, DL, QAmts);
5682 } else {
5683 PVal = PAmts[0];
5684 AVal = AAmts[0];
5685 KVal = KAmts[0];
5686 QVal = QAmts[0];
5687 }
5688
5689 // (mul N, P)
5690 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal);
5691 Created.push_back(Op0.getNode());
5692
5693 if (NeedToApplyOffset) {
5694 // We need ADD to do this.
5695 if (!isOperationLegalOrCustom(ISD::ADD, VT))
5696 return SDValue();
5697
5698 // (add (mul N, P), A)
5699 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal);
5700 Created.push_back(Op0.getNode());
5701 }
5702
5703 // Rotate right only if any divisor was even. We avoid rotates for all-odd
5704 // divisors as a performance improvement, since rotating by 0 is a no-op.
5705 if (HadEvenDivisor) {
5706 // We need ROTR to do this.
5707 if (!isOperationLegalOrCustom(ISD::ROTR, VT))
5708 return SDValue();
5709 SDNodeFlags Flags;
5710 Flags.setExact(true);
5711 // SREM: (rotr (add (mul N, P), A), K)
5712 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags);
5713 Created.push_back(Op0.getNode());
5714 }
5715
5716 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q)
5717 SDValue Fold =
5718 DAG.getSetCC(DL, SETCCVT, Op0, QVal,
5719 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
5720
5721 // If we didn't have lanes with INT_MIN divisor, then we're done.
5722 if (!HadIntMinDivisor)
5723 return Fold;
5724
5725 // That fold is only valid for positive divisors. Which effectively means,
5726 // it is invalid for INT_MIN divisors. So if we have such a lane,
5727 // we must fix-up results for said lanes.
5728 assert(VT.isVector() && "Can/should only get here for vectors.");
5729
5730 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) ||
5731 !isOperationLegalOrCustom(ISD::AND, VT) ||
5732 !isOperationLegalOrCustom(Cond, VT) ||
5733 !isOperationLegalOrCustom(ISD::VSELECT, VT))
5734 return SDValue();
5735
5736 Created.push_back(Fold.getNode());
5737
5738 SDValue IntMin = DAG.getConstant(
5739 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT);
5740 SDValue IntMax = DAG.getConstant(
5741 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT);
5742 SDValue Zero =
5743 DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT);
5744
5745 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded.
5746 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ);
5747 Created.push_back(DivisorIsIntMin.getNode());
5748
5749 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0
5750 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax);
5751 Created.push_back(Masked.getNode());
5752 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond);
5753 Created.push_back(MaskedIsZero.getNode());
5754
5755 // To produce final result we need to blend 2 vectors: 'SetCC' and
5756 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick
5757 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is
5758 // constant-folded, select can get lowered to a shuffle with constant mask.
5759 SDValue Blended =
5760 DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold);
5761
5762 return Blended;
5763 }
5764
5765 bool TargetLowering::
verifyReturnAddressArgumentIsConstant(SDValue Op,SelectionDAG & DAG) const5766 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
5767 if (!isa<ConstantSDNode>(Op.getOperand(0))) {
5768 DAG.getContext()->emitError("argument to '__builtin_return_address' must "
5769 "be a constant integer");
5770 return true;
5771 }
5772
5773 return false;
5774 }
5775
getNegatedExpression(SDValue Op,SelectionDAG & DAG,bool LegalOps,bool OptForSize,NegatibleCost & Cost,unsigned Depth) const5776 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
5777 bool LegalOps, bool OptForSize,
5778 NegatibleCost &Cost,
5779 unsigned Depth) const {
5780 // fneg is removable even if it has multiple uses.
5781 if (Op.getOpcode() == ISD::FNEG) {
5782 Cost = NegatibleCost::Cheaper;
5783 return Op.getOperand(0);
5784 }
5785
5786 // Don't recurse exponentially.
5787 if (Depth > SelectionDAG::MaxRecursionDepth)
5788 return SDValue();
5789
5790 // Pre-increment recursion depth for use in recursive calls.
5791 ++Depth;
5792 const SDNodeFlags Flags = Op->getFlags();
5793 const TargetOptions &Options = DAG.getTarget().Options;
5794 EVT VT = Op.getValueType();
5795 unsigned Opcode = Op.getOpcode();
5796
5797 // Don't allow anything with multiple uses unless we know it is free.
5798 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) {
5799 bool IsFreeExtend = Opcode == ISD::FP_EXTEND &&
5800 isFPExtFree(VT, Op.getOperand(0).getValueType());
5801 if (!IsFreeExtend)
5802 return SDValue();
5803 }
5804
5805 auto RemoveDeadNode = [&](SDValue N) {
5806 if (N && N.getNode()->use_empty())
5807 DAG.RemoveDeadNode(N.getNode());
5808 };
5809
5810 SDLoc DL(Op);
5811
5812 switch (Opcode) {
5813 case ISD::ConstantFP: {
5814 // Don't invert constant FP values after legalization unless the target says
5815 // the negated constant is legal.
5816 bool IsOpLegal =
5817 isOperationLegal(ISD::ConstantFP, VT) ||
5818 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT,
5819 OptForSize);
5820
5821 if (LegalOps && !IsOpLegal)
5822 break;
5823
5824 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
5825 V.changeSign();
5826 SDValue CFP = DAG.getConstantFP(V, DL, VT);
5827
5828 // If we already have the use of the negated floating constant, it is free
5829 // to negate it even it has multiple uses.
5830 if (!Op.hasOneUse() && CFP.use_empty())
5831 break;
5832 Cost = NegatibleCost::Neutral;
5833 return CFP;
5834 }
5835 case ISD::BUILD_VECTOR: {
5836 // Only permit BUILD_VECTOR of constants.
5837 if (llvm::any_of(Op->op_values(), [&](SDValue N) {
5838 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
5839 }))
5840 break;
5841
5842 bool IsOpLegal =
5843 (isOperationLegal(ISD::ConstantFP, VT) &&
5844 isOperationLegal(ISD::BUILD_VECTOR, VT)) ||
5845 llvm::all_of(Op->op_values(), [&](SDValue N) {
5846 return N.isUndef() ||
5847 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT,
5848 OptForSize);
5849 });
5850
5851 if (LegalOps && !IsOpLegal)
5852 break;
5853
5854 SmallVector<SDValue, 4> Ops;
5855 for (SDValue C : Op->op_values()) {
5856 if (C.isUndef()) {
5857 Ops.push_back(C);
5858 continue;
5859 }
5860 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF();
5861 V.changeSign();
5862 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType()));
5863 }
5864 Cost = NegatibleCost::Neutral;
5865 return DAG.getBuildVector(VT, DL, Ops);
5866 }
5867 case ISD::FADD: {
5868 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5869 break;
5870
5871 // After operation legalization, it might not be legal to create new FSUBs.
5872 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT))
5873 break;
5874 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
5875
5876 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y)
5877 NegatibleCost CostX = NegatibleCost::Expensive;
5878 SDValue NegX =
5879 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth);
5880 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X)
5881 NegatibleCost CostY = NegatibleCost::Expensive;
5882 SDValue NegY =
5883 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth);
5884
5885 // Negate the X if its cost is less or equal than Y.
5886 if (NegX && (CostX <= CostY)) {
5887 Cost = CostX;
5888 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags);
5889 if (NegY != N)
5890 RemoveDeadNode(NegY);
5891 return N;
5892 }
5893
5894 // Negate the Y if it is not expensive.
5895 if (NegY) {
5896 Cost = CostY;
5897 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags);
5898 if (NegX != N)
5899 RemoveDeadNode(NegX);
5900 return N;
5901 }
5902 break;
5903 }
5904 case ISD::FSUB: {
5905 // We can't turn -(A-B) into B-A when we honor signed zeros.
5906 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5907 break;
5908
5909 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
5910 // fold (fneg (fsub 0, Y)) -> Y
5911 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true))
5912 if (C->isZero()) {
5913 Cost = NegatibleCost::Cheaper;
5914 return Y;
5915 }
5916
5917 // fold (fneg (fsub X, Y)) -> (fsub Y, X)
5918 Cost = NegatibleCost::Neutral;
5919 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags);
5920 }
5921 case ISD::FMUL:
5922 case ISD::FDIV: {
5923 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
5924
5925 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
5926 NegatibleCost CostX = NegatibleCost::Expensive;
5927 SDValue NegX =
5928 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth);
5929 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
5930 NegatibleCost CostY = NegatibleCost::Expensive;
5931 SDValue NegY =
5932 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth);
5933
5934 // Negate the X if its cost is less or equal than Y.
5935 if (NegX && (CostX <= CostY)) {
5936 Cost = CostX;
5937 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags);
5938 if (NegY != N)
5939 RemoveDeadNode(NegY);
5940 return N;
5941 }
5942
5943 // Ignore X * 2.0 because that is expected to be canonicalized to X + X.
5944 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1)))
5945 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL)
5946 break;
5947
5948 // Negate the Y if it is not expensive.
5949 if (NegY) {
5950 Cost = CostY;
5951 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags);
5952 if (NegX != N)
5953 RemoveDeadNode(NegX);
5954 return N;
5955 }
5956 break;
5957 }
5958 case ISD::FMA:
5959 case ISD::FMAD: {
5960 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5961 break;
5962
5963 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2);
5964 NegatibleCost CostZ = NegatibleCost::Expensive;
5965 SDValue NegZ =
5966 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth);
5967 // Give up if fail to negate the Z.
5968 if (!NegZ)
5969 break;
5970
5971 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z))
5972 NegatibleCost CostX = NegatibleCost::Expensive;
5973 SDValue NegX =
5974 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth);
5975 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z))
5976 NegatibleCost CostY = NegatibleCost::Expensive;
5977 SDValue NegY =
5978 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth);
5979
5980 // Negate the X if its cost is less or equal than Y.
5981 if (NegX && (CostX <= CostY)) {
5982 Cost = std::min(CostX, CostZ);
5983 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags);
5984 if (NegY != N)
5985 RemoveDeadNode(NegY);
5986 return N;
5987 }
5988
5989 // Negate the Y if it is not expensive.
5990 if (NegY) {
5991 Cost = std::min(CostY, CostZ);
5992 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags);
5993 if (NegX != N)
5994 RemoveDeadNode(NegX);
5995 return N;
5996 }
5997 break;
5998 }
5999
6000 case ISD::FP_EXTEND:
6001 case ISD::FSIN:
6002 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps,
6003 OptForSize, Cost, Depth))
6004 return DAG.getNode(Opcode, DL, VT, NegV);
6005 break;
6006 case ISD::FP_ROUND:
6007 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps,
6008 OptForSize, Cost, Depth))
6009 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1));
6010 break;
6011 }
6012
6013 return SDValue();
6014 }
6015
6016 //===----------------------------------------------------------------------===//
6017 // Legalization Utilities
6018 //===----------------------------------------------------------------------===//
6019
expandMUL_LOHI(unsigned Opcode,EVT VT,const SDLoc & dl,SDValue LHS,SDValue RHS,SmallVectorImpl<SDValue> & Result,EVT HiLoVT,SelectionDAG & DAG,MulExpansionKind Kind,SDValue LL,SDValue LH,SDValue RL,SDValue RH) const6020 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl,
6021 SDValue LHS, SDValue RHS,
6022 SmallVectorImpl<SDValue> &Result,
6023 EVT HiLoVT, SelectionDAG &DAG,
6024 MulExpansionKind Kind, SDValue LL,
6025 SDValue LH, SDValue RL, SDValue RH) const {
6026 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI ||
6027 Opcode == ISD::SMUL_LOHI);
6028
6029 bool HasMULHS = (Kind == MulExpansionKind::Always) ||
6030 isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
6031 bool HasMULHU = (Kind == MulExpansionKind::Always) ||
6032 isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
6033 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) ||
6034 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
6035 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) ||
6036 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
6037
6038 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
6039 return false;
6040
6041 unsigned OuterBitSize = VT.getScalarSizeInBits();
6042 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits();
6043 unsigned LHSSB = DAG.ComputeNumSignBits(LHS);
6044 unsigned RHSSB = DAG.ComputeNumSignBits(RHS);
6045
6046 // LL, LH, RL, and RH must be either all NULL or all set to a value.
6047 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
6048 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
6049
6050 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT);
6051 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi,
6052 bool Signed) -> bool {
6053 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) {
6054 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R);
6055 Hi = SDValue(Lo.getNode(), 1);
6056 return true;
6057 }
6058 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) {
6059 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R);
6060 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R);
6061 return true;
6062 }
6063 return false;
6064 };
6065
6066 SDValue Lo, Hi;
6067
6068 if (!LL.getNode() && !RL.getNode() &&
6069 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
6070 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS);
6071 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS);
6072 }
6073
6074 if (!LL.getNode())
6075 return false;
6076
6077 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
6078 if (DAG.MaskedValueIsZero(LHS, HighMask) &&
6079 DAG.MaskedValueIsZero(RHS, HighMask)) {
6080 // The inputs are both zero-extended.
6081 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) {
6082 Result.push_back(Lo);
6083 Result.push_back(Hi);
6084 if (Opcode != ISD::MUL) {
6085 SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
6086 Result.push_back(Zero);
6087 Result.push_back(Zero);
6088 }
6089 return true;
6090 }
6091 }
6092
6093 if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize &&
6094 RHSSB > InnerBitSize) {
6095 // The input values are both sign-extended.
6096 // TODO non-MUL case?
6097 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) {
6098 Result.push_back(Lo);
6099 Result.push_back(Hi);
6100 return true;
6101 }
6102 }
6103
6104 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
6105 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout());
6106 if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) {
6107 // FIXME getShiftAmountTy does not always return a sensible result when VT
6108 // is an illegal type, and so the type may be too small to fit the shift
6109 // amount. Override it with i32. The shift will have to be legalized.
6110 ShiftAmountTy = MVT::i32;
6111 }
6112 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy);
6113
6114 if (!LH.getNode() && !RH.getNode() &&
6115 isOperationLegalOrCustom(ISD::SRL, VT) &&
6116 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
6117 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift);
6118 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
6119 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift);
6120 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
6121 }
6122
6123 if (!LH.getNode())
6124 return false;
6125
6126 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false))
6127 return false;
6128
6129 Result.push_back(Lo);
6130
6131 if (Opcode == ISD::MUL) {
6132 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
6133 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
6134 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
6135 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
6136 Result.push_back(Hi);
6137 return true;
6138 }
6139
6140 // Compute the full width result.
6141 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue {
6142 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
6143 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
6144 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
6145 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi);
6146 };
6147
6148 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
6149 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false))
6150 return false;
6151
6152 // This is effectively the add part of a multiply-add of half-sized operands,
6153 // so it cannot overflow.
6154 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
6155
6156 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false))
6157 return false;
6158
6159 SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
6160 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
6161
6162 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) &&
6163 isOperationLegalOrCustom(ISD::ADDE, VT));
6164 if (UseGlue)
6165 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next,
6166 Merge(Lo, Hi));
6167 else
6168 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next,
6169 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType));
6170
6171 SDValue Carry = Next.getValue(1);
6172 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
6173 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
6174
6175 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI))
6176 return false;
6177
6178 if (UseGlue)
6179 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero,
6180 Carry);
6181 else
6182 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi,
6183 Zero, Carry);
6184
6185 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
6186
6187 if (Opcode == ISD::SMUL_LOHI) {
6188 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
6189 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL));
6190 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT);
6191
6192 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
6193 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL));
6194 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT);
6195 }
6196
6197 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
6198 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
6199 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
6200 return true;
6201 }
6202
expandMUL(SDNode * N,SDValue & Lo,SDValue & Hi,EVT HiLoVT,SelectionDAG & DAG,MulExpansionKind Kind,SDValue LL,SDValue LH,SDValue RL,SDValue RH) const6203 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
6204 SelectionDAG &DAG, MulExpansionKind Kind,
6205 SDValue LL, SDValue LH, SDValue RL,
6206 SDValue RH) const {
6207 SmallVector<SDValue, 2> Result;
6208 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N),
6209 N->getOperand(0), N->getOperand(1), Result, HiLoVT,
6210 DAG, Kind, LL, LH, RL, RH);
6211 if (Ok) {
6212 assert(Result.size() == 2);
6213 Lo = Result[0];
6214 Hi = Result[1];
6215 }
6216 return Ok;
6217 }
6218
6219 // Check that (every element of) Z is undef or not an exact multiple of BW.
isNonZeroModBitWidthOrUndef(SDValue Z,unsigned BW)6220 static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) {
6221 return ISD::matchUnaryPredicate(
6222 Z,
6223 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; },
6224 true);
6225 }
6226
expandFunnelShift(SDNode * Node,SDValue & Result,SelectionDAG & DAG) const6227 bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result,
6228 SelectionDAG &DAG) const {
6229 EVT VT = Node->getValueType(0);
6230
6231 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) ||
6232 !isOperationLegalOrCustom(ISD::SRL, VT) ||
6233 !isOperationLegalOrCustom(ISD::SUB, VT) ||
6234 !isOperationLegalOrCustomOrPromote(ISD::OR, VT)))
6235 return false;
6236
6237 SDValue X = Node->getOperand(0);
6238 SDValue Y = Node->getOperand(1);
6239 SDValue Z = Node->getOperand(2);
6240
6241 unsigned BW = VT.getScalarSizeInBits();
6242 bool IsFSHL = Node->getOpcode() == ISD::FSHL;
6243 SDLoc DL(SDValue(Node, 0));
6244
6245 EVT ShVT = Z.getValueType();
6246
6247 // If a funnel shift in the other direction is more supported, use it.
6248 unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL;
6249 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) &&
6250 isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) {
6251 if (isNonZeroModBitWidthOrUndef(Z, BW)) {
6252 // fshl X, Y, Z -> fshr X, Y, -Z
6253 // fshr X, Y, Z -> fshl X, Y, -Z
6254 SDValue Zero = DAG.getConstant(0, DL, ShVT);
6255 Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z);
6256 } else {
6257 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z
6258 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z
6259 SDValue One = DAG.getConstant(1, DL, ShVT);
6260 if (IsFSHL) {
6261 Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One);
6262 X = DAG.getNode(ISD::SRL, DL, VT, X, One);
6263 } else {
6264 X = DAG.getNode(RevOpcode, DL, VT, X, Y, One);
6265 Y = DAG.getNode(ISD::SHL, DL, VT, Y, One);
6266 }
6267 Z = DAG.getNOT(DL, Z, ShVT);
6268 }
6269 Result = DAG.getNode(RevOpcode, DL, VT, X, Y, Z);
6270 return true;
6271 }
6272
6273 SDValue ShX, ShY;
6274 SDValue ShAmt, InvShAmt;
6275 if (isNonZeroModBitWidthOrUndef(Z, BW)) {
6276 // fshl: X << C | Y >> (BW - C)
6277 // fshr: X << (BW - C) | Y >> C
6278 // where C = Z % BW is not zero
6279 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT);
6280 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC);
6281 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt);
6282 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt);
6283 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt);
6284 } else {
6285 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW))
6286 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW)
6287 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT);
6288 if (isPowerOf2_32(BW)) {
6289 // Z % BW -> Z & (BW - 1)
6290 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask);
6291 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1)
6292 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask);
6293 } else {
6294 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT);
6295 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC);
6296 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt);
6297 }
6298
6299 SDValue One = DAG.getConstant(1, DL, ShVT);
6300 if (IsFSHL) {
6301 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt);
6302 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One);
6303 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt);
6304 } else {
6305 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One);
6306 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt);
6307 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt);
6308 }
6309 }
6310 Result = DAG.getNode(ISD::OR, DL, VT, ShX, ShY);
6311 return true;
6312 }
6313
6314 // TODO: Merge with expandFunnelShift.
expandROT(SDNode * Node,bool AllowVectorOps,SDValue & Result,SelectionDAG & DAG) const6315 bool TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps,
6316 SDValue &Result, SelectionDAG &DAG) const {
6317 EVT VT = Node->getValueType(0);
6318 unsigned EltSizeInBits = VT.getScalarSizeInBits();
6319 bool IsLeft = Node->getOpcode() == ISD::ROTL;
6320 SDValue Op0 = Node->getOperand(0);
6321 SDValue Op1 = Node->getOperand(1);
6322 SDLoc DL(SDValue(Node, 0));
6323
6324 EVT ShVT = Op1.getValueType();
6325 SDValue Zero = DAG.getConstant(0, DL, ShVT);
6326
6327 // If a rotate in the other direction is supported, use it.
6328 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL;
6329 if (isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) {
6330 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1);
6331 Result = DAG.getNode(RevRot, DL, VT, Op0, Sub);
6332 return true;
6333 }
6334
6335 if (!AllowVectorOps && VT.isVector() &&
6336 (!isOperationLegalOrCustom(ISD::SHL, VT) ||
6337 !isOperationLegalOrCustom(ISD::SRL, VT) ||
6338 !isOperationLegalOrCustom(ISD::SUB, VT) ||
6339 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) ||
6340 !isOperationLegalOrCustomOrPromote(ISD::AND, VT)))
6341 return false;
6342
6343 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL;
6344 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL;
6345 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT);
6346 SDValue ShVal;
6347 SDValue HsVal;
6348 if (isPowerOf2_32(EltSizeInBits)) {
6349 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1))
6350 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1))
6351 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1);
6352 SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC);
6353 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt);
6354 SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC);
6355 HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt);
6356 } else {
6357 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w))
6358 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w))
6359 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT);
6360 SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC);
6361 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt);
6362 SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt);
6363 SDValue One = DAG.getConstant(1, DL, ShVT);
6364 HsVal =
6365 DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt);
6366 }
6367 Result = DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal);
6368 return true;
6369 }
6370
expandFP_TO_SINT(SDNode * Node,SDValue & Result,SelectionDAG & DAG) const6371 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
6372 SelectionDAG &DAG) const {
6373 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
6374 SDValue Src = Node->getOperand(OpNo);
6375 EVT SrcVT = Src.getValueType();
6376 EVT DstVT = Node->getValueType(0);
6377 SDLoc dl(SDValue(Node, 0));
6378
6379 // FIXME: Only f32 to i64 conversions are supported.
6380 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
6381 return false;
6382
6383 if (Node->isStrictFPOpcode())
6384 // When a NaN is converted to an integer a trap is allowed. We can't
6385 // use this expansion here because it would eliminate that trap. Other
6386 // traps are also allowed and cannot be eliminated. See
6387 // IEEE 754-2008 sec 5.8.
6388 return false;
6389
6390 // Expand f32 -> i64 conversion
6391 // This algorithm comes from compiler-rt's implementation of fixsfdi:
6392 // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c
6393 unsigned SrcEltBits = SrcVT.getScalarSizeInBits();
6394 EVT IntVT = SrcVT.changeTypeToInteger();
6395 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout());
6396
6397 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
6398 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
6399 SDValue Bias = DAG.getConstant(127, dl, IntVT);
6400 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT);
6401 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT);
6402 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
6403
6404 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src);
6405
6406 SDValue ExponentBits = DAG.getNode(
6407 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
6408 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT));
6409 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
6410
6411 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT,
6412 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
6413 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT));
6414 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT);
6415
6416 SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
6417 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
6418 DAG.getConstant(0x00800000, dl, IntVT));
6419
6420 R = DAG.getZExtOrTrunc(R, dl, DstVT);
6421
6422 R = DAG.getSelectCC(
6423 dl, Exponent, ExponentLoBit,
6424 DAG.getNode(ISD::SHL, dl, DstVT, R,
6425 DAG.getZExtOrTrunc(
6426 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
6427 dl, IntShVT)),
6428 DAG.getNode(ISD::SRL, dl, DstVT, R,
6429 DAG.getZExtOrTrunc(
6430 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
6431 dl, IntShVT)),
6432 ISD::SETGT);
6433
6434 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT,
6435 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign);
6436
6437 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
6438 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT);
6439 return true;
6440 }
6441
expandFP_TO_UINT(SDNode * Node,SDValue & Result,SDValue & Chain,SelectionDAG & DAG) const6442 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result,
6443 SDValue &Chain,
6444 SelectionDAG &DAG) const {
6445 SDLoc dl(SDValue(Node, 0));
6446 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
6447 SDValue Src = Node->getOperand(OpNo);
6448
6449 EVT SrcVT = Src.getValueType();
6450 EVT DstVT = Node->getValueType(0);
6451 EVT SetCCVT =
6452 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
6453 EVT DstSetCCVT =
6454 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
6455
6456 // Only expand vector types if we have the appropriate vector bit operations.
6457 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT :
6458 ISD::FP_TO_SINT;
6459 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) ||
6460 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT)))
6461 return false;
6462
6463 // If the maximum float value is smaller then the signed integer range,
6464 // the destination signmask can't be represented by the float, so we can
6465 // just use FP_TO_SINT directly.
6466 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT);
6467 APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits()));
6468 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits());
6469 if (APFloat::opOverflow &
6470 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) {
6471 if (Node->isStrictFPOpcode()) {
6472 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
6473 { Node->getOperand(0), Src });
6474 Chain = Result.getValue(1);
6475 } else
6476 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src);
6477 return true;
6478 }
6479
6480 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
6481 SDValue Sel;
6482
6483 if (Node->isStrictFPOpcode()) {
6484 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
6485 Node->getOperand(0), /*IsSignaling*/ true);
6486 Chain = Sel.getValue(1);
6487 } else {
6488 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT);
6489 }
6490
6491 bool Strict = Node->isStrictFPOpcode() ||
6492 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false);
6493
6494 if (Strict) {
6495 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the
6496 // signmask then offset (the result of which should be fully representable).
6497 // Sel = Src < 0x8000000000000000
6498 // FltOfs = select Sel, 0, 0x8000000000000000
6499 // IntOfs = select Sel, 0, 0x8000000000000000
6500 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
6501
6502 // TODO: Should any fast-math-flags be set for the FSUB?
6503 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel,
6504 DAG.getConstantFP(0.0, dl, SrcVT), Cst);
6505 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
6506 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel,
6507 DAG.getConstant(0, dl, DstVT),
6508 DAG.getConstant(SignMask, dl, DstVT));
6509 SDValue SInt;
6510 if (Node->isStrictFPOpcode()) {
6511 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other },
6512 { Chain, Src, FltOfs });
6513 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
6514 { Val.getValue(1), Val });
6515 Chain = SInt.getValue(1);
6516 } else {
6517 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs);
6518 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val);
6519 }
6520 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
6521 } else {
6522 // Expand based on maximum range of FP_TO_SINT:
6523 // True = fp_to_sint(Src)
6524 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000)
6525 // Result = select (Src < 0x8000000000000000), True, False
6526
6527 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src);
6528 // TODO: Should any fast-math-flags be set for the FSUB?
6529 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT,
6530 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst));
6531 False = DAG.getNode(ISD::XOR, dl, DstVT, False,
6532 DAG.getConstant(SignMask, dl, DstVT));
6533 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
6534 Result = DAG.getSelect(dl, DstVT, Sel, True, False);
6535 }
6536 return true;
6537 }
6538
expandUINT_TO_FP(SDNode * Node,SDValue & Result,SDValue & Chain,SelectionDAG & DAG) const6539 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result,
6540 SDValue &Chain,
6541 SelectionDAG &DAG) const {
6542 // This transform is not correct for converting 0 when rounding mode is set
6543 // to round toward negative infinity which will produce -0.0. So disable under
6544 // strictfp.
6545 if (Node->isStrictFPOpcode())
6546 return false;
6547
6548 SDValue Src = Node->getOperand(0);
6549 EVT SrcVT = Src.getValueType();
6550 EVT DstVT = Node->getValueType(0);
6551
6552 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64)
6553 return false;
6554
6555 // Only expand vector types if we have the appropriate vector bit operations.
6556 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) ||
6557 !isOperationLegalOrCustom(ISD::FADD, DstVT) ||
6558 !isOperationLegalOrCustom(ISD::FSUB, DstVT) ||
6559 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) ||
6560 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT)))
6561 return false;
6562
6563 SDLoc dl(SDValue(Node, 0));
6564 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout());
6565
6566 // Implementation of unsigned i64 to f64 following the algorithm in
6567 // __floatundidf in compiler_rt. This implementation performs rounding
6568 // correctly in all rounding modes with the exception of converting 0
6569 // when rounding toward negative infinity. In that case the fsub will produce
6570 // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect.
6571 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT);
6572 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP(
6573 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT);
6574 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT);
6575 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT);
6576 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT);
6577
6578 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask);
6579 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift);
6580 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52);
6581 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84);
6582 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr);
6583 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr);
6584 SDValue HiSub =
6585 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52);
6586 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub);
6587 return true;
6588 }
6589
expandFMINNUM_FMAXNUM(SDNode * Node,SelectionDAG & DAG) const6590 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
6591 SelectionDAG &DAG) const {
6592 SDLoc dl(Node);
6593 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ?
6594 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
6595 EVT VT = Node->getValueType(0);
6596 if (isOperationLegalOrCustom(NewOp, VT)) {
6597 SDValue Quiet0 = Node->getOperand(0);
6598 SDValue Quiet1 = Node->getOperand(1);
6599
6600 if (!Node->getFlags().hasNoNaNs()) {
6601 // Insert canonicalizes if it's possible we need to quiet to get correct
6602 // sNaN behavior.
6603 if (!DAG.isKnownNeverSNaN(Quiet0)) {
6604 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0,
6605 Node->getFlags());
6606 }
6607 if (!DAG.isKnownNeverSNaN(Quiet1)) {
6608 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1,
6609 Node->getFlags());
6610 }
6611 }
6612
6613 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
6614 }
6615
6616 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that
6617 // instead if there are no NaNs.
6618 if (Node->getFlags().hasNoNaNs()) {
6619 unsigned IEEE2018Op =
6620 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM;
6621 if (isOperationLegalOrCustom(IEEE2018Op, VT)) {
6622 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
6623 Node->getOperand(1), Node->getFlags());
6624 }
6625 }
6626
6627 // If none of the above worked, but there are no NaNs, then expand to
6628 // a compare/select sequence. This is required for correctness since
6629 // InstCombine might have canonicalized a fcmp+select sequence to a
6630 // FMINNUM/FMAXNUM node. If we were to fall through to the default
6631 // expansion to libcall, we might introduce a link-time dependency
6632 // on libm into a file that originally did not have one.
6633 if (Node->getFlags().hasNoNaNs()) {
6634 ISD::CondCode Pred =
6635 Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT;
6636 SDValue Op1 = Node->getOperand(0);
6637 SDValue Op2 = Node->getOperand(1);
6638 SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred);
6639 // Copy FMF flags, but always set the no-signed-zeros flag
6640 // as this is implied by the FMINNUM/FMAXNUM semantics.
6641 SDNodeFlags Flags = Node->getFlags();
6642 Flags.setNoSignedZeros(true);
6643 SelCC->setFlags(Flags);
6644 return SelCC;
6645 }
6646
6647 return SDValue();
6648 }
6649
expandCTPOP(SDNode * Node,SDValue & Result,SelectionDAG & DAG) const6650 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result,
6651 SelectionDAG &DAG) const {
6652 SDLoc dl(Node);
6653 EVT VT = Node->getValueType(0);
6654 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6655 SDValue Op = Node->getOperand(0);
6656 unsigned Len = VT.getScalarSizeInBits();
6657 assert(VT.isInteger() && "CTPOP not implemented for this type.");
6658
6659 // TODO: Add support for irregular type lengths.
6660 if (!(Len <= 128 && Len % 8 == 0))
6661 return false;
6662
6663 // Only expand vector types if we have the appropriate vector bit operations.
6664 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::ADD, VT) ||
6665 !isOperationLegalOrCustom(ISD::SUB, VT) ||
6666 !isOperationLegalOrCustom(ISD::SRL, VT) ||
6667 (Len != 8 && !isOperationLegalOrCustom(ISD::MUL, VT)) ||
6668 !isOperationLegalOrCustomOrPromote(ISD::AND, VT)))
6669 return false;
6670
6671 // This is the "best" algorithm from
6672 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
6673 SDValue Mask55 =
6674 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT);
6675 SDValue Mask33 =
6676 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT);
6677 SDValue Mask0F =
6678 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT);
6679 SDValue Mask01 =
6680 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT);
6681
6682 // v = v - ((v >> 1) & 0x55555555...)
6683 Op = DAG.getNode(ISD::SUB, dl, VT, Op,
6684 DAG.getNode(ISD::AND, dl, VT,
6685 DAG.getNode(ISD::SRL, dl, VT, Op,
6686 DAG.getConstant(1, dl, ShVT)),
6687 Mask55));
6688 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
6689 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33),
6690 DAG.getNode(ISD::AND, dl, VT,
6691 DAG.getNode(ISD::SRL, dl, VT, Op,
6692 DAG.getConstant(2, dl, ShVT)),
6693 Mask33));
6694 // v = (v + (v >> 4)) & 0x0F0F0F0F...
6695 Op = DAG.getNode(ISD::AND, dl, VT,
6696 DAG.getNode(ISD::ADD, dl, VT, Op,
6697 DAG.getNode(ISD::SRL, dl, VT, Op,
6698 DAG.getConstant(4, dl, ShVT))),
6699 Mask0F);
6700 // v = (v * 0x01010101...) >> (Len - 8)
6701 if (Len > 8)
6702 Op =
6703 DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
6704 DAG.getConstant(Len - 8, dl, ShVT));
6705
6706 Result = Op;
6707 return true;
6708 }
6709
expandCTLZ(SDNode * Node,SDValue & Result,SelectionDAG & DAG) const6710 bool TargetLowering::expandCTLZ(SDNode *Node, SDValue &Result,
6711 SelectionDAG &DAG) const {
6712 SDLoc dl(Node);
6713 EVT VT = Node->getValueType(0);
6714 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6715 SDValue Op = Node->getOperand(0);
6716 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6717
6718 // If the non-ZERO_UNDEF version is supported we can use that instead.
6719 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF &&
6720 isOperationLegalOrCustom(ISD::CTLZ, VT)) {
6721 Result = DAG.getNode(ISD::CTLZ, dl, VT, Op);
6722 return true;
6723 }
6724
6725 // If the ZERO_UNDEF version is supported use that and handle the zero case.
6726 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) {
6727 EVT SetCCVT =
6728 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
6729 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op);
6730 SDValue Zero = DAG.getConstant(0, dl, VT);
6731 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ);
6732 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero,
6733 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ);
6734 return true;
6735 }
6736
6737 // Only expand vector types if we have the appropriate vector bit operations.
6738 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) ||
6739 !isOperationLegalOrCustom(ISD::CTPOP, VT) ||
6740 !isOperationLegalOrCustom(ISD::SRL, VT) ||
6741 !isOperationLegalOrCustomOrPromote(ISD::OR, VT)))
6742 return false;
6743
6744 // for now, we do this:
6745 // x = x | (x >> 1);
6746 // x = x | (x >> 2);
6747 // ...
6748 // x = x | (x >>16);
6749 // x = x | (x >>32); // for 64-bit input
6750 // return popcount(~x);
6751 //
6752 // Ref: "Hacker's Delight" by Henry Warren
6753 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) {
6754 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT);
6755 Op = DAG.getNode(ISD::OR, dl, VT, Op,
6756 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp));
6757 }
6758 Op = DAG.getNOT(dl, Op, VT);
6759 Result = DAG.getNode(ISD::CTPOP, dl, VT, Op);
6760 return true;
6761 }
6762
expandCTTZ(SDNode * Node,SDValue & Result,SelectionDAG & DAG) const6763 bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result,
6764 SelectionDAG &DAG) const {
6765 SDLoc dl(Node);
6766 EVT VT = Node->getValueType(0);
6767 SDValue Op = Node->getOperand(0);
6768 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6769
6770 // If the non-ZERO_UNDEF version is supported we can use that instead.
6771 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF &&
6772 isOperationLegalOrCustom(ISD::CTTZ, VT)) {
6773 Result = DAG.getNode(ISD::CTTZ, dl, VT, Op);
6774 return true;
6775 }
6776
6777 // If the ZERO_UNDEF version is supported use that and handle the zero case.
6778 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) {
6779 EVT SetCCVT =
6780 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
6781 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op);
6782 SDValue Zero = DAG.getConstant(0, dl, VT);
6783 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ);
6784 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero,
6785 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ);
6786 return true;
6787 }
6788
6789 // Only expand vector types if we have the appropriate vector bit operations.
6790 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) ||
6791 (!isOperationLegalOrCustom(ISD::CTPOP, VT) &&
6792 !isOperationLegalOrCustom(ISD::CTLZ, VT)) ||
6793 !isOperationLegalOrCustom(ISD::SUB, VT) ||
6794 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) ||
6795 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
6796 return false;
6797
6798 // for now, we use: { return popcount(~x & (x - 1)); }
6799 // unless the target has ctlz but not ctpop, in which case we use:
6800 // { return 32 - nlz(~x & (x-1)); }
6801 // Ref: "Hacker's Delight" by Henry Warren
6802 SDValue Tmp = DAG.getNode(
6803 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT),
6804 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT)));
6805
6806 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
6807 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) {
6808 Result =
6809 DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT),
6810 DAG.getNode(ISD::CTLZ, dl, VT, Tmp));
6811 return true;
6812 }
6813
6814 Result = DAG.getNode(ISD::CTPOP, dl, VT, Tmp);
6815 return true;
6816 }
6817
expandABS(SDNode * N,SDValue & Result,SelectionDAG & DAG,bool IsNegative) const6818 bool TargetLowering::expandABS(SDNode *N, SDValue &Result,
6819 SelectionDAG &DAG, bool IsNegative) const {
6820 SDLoc dl(N);
6821 EVT VT = N->getValueType(0);
6822 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6823 SDValue Op = N->getOperand(0);
6824
6825 // abs(x) -> smax(x,sub(0,x))
6826 if (!IsNegative && isOperationLegal(ISD::SUB, VT) &&
6827 isOperationLegal(ISD::SMAX, VT)) {
6828 SDValue Zero = DAG.getConstant(0, dl, VT);
6829 Result = DAG.getNode(ISD::SMAX, dl, VT, Op,
6830 DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
6831 return true;
6832 }
6833
6834 // abs(x) -> umin(x,sub(0,x))
6835 if (!IsNegative && isOperationLegal(ISD::SUB, VT) &&
6836 isOperationLegal(ISD::UMIN, VT)) {
6837 SDValue Zero = DAG.getConstant(0, dl, VT);
6838 Result = DAG.getNode(ISD::UMIN, dl, VT, Op,
6839 DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
6840 return true;
6841 }
6842
6843 // 0 - abs(x) -> smin(x, sub(0,x))
6844 if (IsNegative && isOperationLegal(ISD::SUB, VT) &&
6845 isOperationLegal(ISD::SMIN, VT)) {
6846 SDValue Zero = DAG.getConstant(0, dl, VT);
6847 Result = DAG.getNode(ISD::SMIN, dl, VT, Op,
6848 DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
6849 return true;
6850 }
6851
6852 // Only expand vector types if we have the appropriate vector operations.
6853 if (VT.isVector() &&
6854 (!isOperationLegalOrCustom(ISD::SRA, VT) ||
6855 (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) ||
6856 (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) ||
6857 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
6858 return false;
6859
6860 SDValue Shift =
6861 DAG.getNode(ISD::SRA, dl, VT, Op,
6862 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT));
6863 if (!IsNegative) {
6864 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift);
6865 Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift);
6866 } else {
6867 // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y))
6868 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift);
6869 Result = DAG.getNode(ISD::SUB, dl, VT, Shift, Xor);
6870 }
6871 return true;
6872 }
6873
6874 std::pair<SDValue, SDValue>
scalarizeVectorLoad(LoadSDNode * LD,SelectionDAG & DAG) const6875 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
6876 SelectionDAG &DAG) const {
6877 SDLoc SL(LD);
6878 SDValue Chain = LD->getChain();
6879 SDValue BasePTR = LD->getBasePtr();
6880 EVT SrcVT = LD->getMemoryVT();
6881 EVT DstVT = LD->getValueType(0);
6882 ISD::LoadExtType ExtType = LD->getExtensionType();
6883
6884 if (SrcVT.isScalableVector())
6885 report_fatal_error("Cannot scalarize scalable vector loads");
6886
6887 unsigned NumElem = SrcVT.getVectorNumElements();
6888
6889 EVT SrcEltVT = SrcVT.getScalarType();
6890 EVT DstEltVT = DstVT.getScalarType();
6891
6892 // A vector must always be stored in memory as-is, i.e. without any padding
6893 // between the elements, since various code depend on it, e.g. in the
6894 // handling of a bitcast of a vector type to int, which may be done with a
6895 // vector store followed by an integer load. A vector that does not have
6896 // elements that are byte-sized must therefore be stored as an integer
6897 // built out of the extracted vector elements.
6898 if (!SrcEltVT.isByteSized()) {
6899 unsigned NumLoadBits = SrcVT.getStoreSizeInBits();
6900 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits);
6901
6902 unsigned NumSrcBits = SrcVT.getSizeInBits();
6903 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits);
6904
6905 unsigned SrcEltBits = SrcEltVT.getSizeInBits();
6906 SDValue SrcEltBitMask = DAG.getConstant(
6907 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT);
6908
6909 // Load the whole vector and avoid masking off the top bits as it makes
6910 // the codegen worse.
6911 SDValue Load =
6912 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR,
6913 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(),
6914 LD->getMemOperand()->getFlags(), LD->getAAInfo());
6915
6916 SmallVector<SDValue, 8> Vals;
6917 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6918 unsigned ShiftIntoIdx =
6919 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx);
6920 SDValue ShiftAmount =
6921 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(),
6922 LoadVT, SL, /*LegalTypes=*/false);
6923 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount);
6924 SDValue Elt =
6925 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask);
6926 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt);
6927
6928 if (ExtType != ISD::NON_EXTLOAD) {
6929 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType);
6930 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar);
6931 }
6932
6933 Vals.push_back(Scalar);
6934 }
6935
6936 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals);
6937 return std::make_pair(Value, Load.getValue(1));
6938 }
6939
6940 unsigned Stride = SrcEltVT.getSizeInBits() / 8;
6941 assert(SrcEltVT.isByteSized());
6942
6943 SmallVector<SDValue, 8> Vals;
6944 SmallVector<SDValue, 8> LoadChains;
6945
6946 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6947 SDValue ScalarLoad =
6948 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
6949 LD->getPointerInfo().getWithOffset(Idx * Stride),
6950 SrcEltVT, LD->getOriginalAlign(),
6951 LD->getMemOperand()->getFlags(), LD->getAAInfo());
6952
6953 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride));
6954
6955 Vals.push_back(ScalarLoad.getValue(0));
6956 LoadChains.push_back(ScalarLoad.getValue(1));
6957 }
6958
6959 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
6960 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals);
6961
6962 return std::make_pair(Value, NewChain);
6963 }
6964
scalarizeVectorStore(StoreSDNode * ST,SelectionDAG & DAG) const6965 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
6966 SelectionDAG &DAG) const {
6967 SDLoc SL(ST);
6968
6969 SDValue Chain = ST->getChain();
6970 SDValue BasePtr = ST->getBasePtr();
6971 SDValue Value = ST->getValue();
6972 EVT StVT = ST->getMemoryVT();
6973
6974 if (StVT.isScalableVector())
6975 report_fatal_error("Cannot scalarize scalable vector stores");
6976
6977 // The type of the data we want to save
6978 EVT RegVT = Value.getValueType();
6979 EVT RegSclVT = RegVT.getScalarType();
6980
6981 // The type of data as saved in memory.
6982 EVT MemSclVT = StVT.getScalarType();
6983
6984 unsigned NumElem = StVT.getVectorNumElements();
6985
6986 // A vector must always be stored in memory as-is, i.e. without any padding
6987 // between the elements, since various code depend on it, e.g. in the
6988 // handling of a bitcast of a vector type to int, which may be done with a
6989 // vector store followed by an integer load. A vector that does not have
6990 // elements that are byte-sized must therefore be stored as an integer
6991 // built out of the extracted vector elements.
6992 if (!MemSclVT.isByteSized()) {
6993 unsigned NumBits = StVT.getSizeInBits();
6994 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits);
6995
6996 SDValue CurrVal = DAG.getConstant(0, SL, IntVT);
6997
6998 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6999 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
7000 DAG.getVectorIdxConstant(Idx, SL));
7001 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt);
7002 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc);
7003 unsigned ShiftIntoIdx =
7004 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx);
7005 SDValue ShiftAmount =
7006 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT);
7007 SDValue ShiftedElt =
7008 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount);
7009 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt);
7010 }
7011
7012 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
7013 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
7014 ST->getAAInfo());
7015 }
7016
7017 // Store Stride in bytes
7018 unsigned Stride = MemSclVT.getSizeInBits() / 8;
7019 assert(Stride && "Zero stride!");
7020 // Extract each of the elements from the original vector and save them into
7021 // memory individually.
7022 SmallVector<SDValue, 8> Stores;
7023 for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
7024 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
7025 DAG.getVectorIdxConstant(Idx, SL));
7026
7027 SDValue Ptr =
7028 DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride));
7029
7030 // This scalar TruncStore may be illegal, but we legalize it later.
7031 SDValue Store = DAG.getTruncStore(
7032 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
7033 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
7034 ST->getAAInfo());
7035
7036 Stores.push_back(Store);
7037 }
7038
7039 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
7040 }
7041
7042 std::pair<SDValue, SDValue>
expandUnalignedLoad(LoadSDNode * LD,SelectionDAG & DAG) const7043 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
7044 assert(LD->getAddressingMode() == ISD::UNINDEXED &&
7045 "unaligned indexed loads not implemented!");
7046 SDValue Chain = LD->getChain();
7047 SDValue Ptr = LD->getBasePtr();
7048 EVT VT = LD->getValueType(0);
7049 EVT LoadedVT = LD->getMemoryVT();
7050 SDLoc dl(LD);
7051 auto &MF = DAG.getMachineFunction();
7052
7053 if (VT.isFloatingPoint() || VT.isVector()) {
7054 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
7055 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
7056 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) &&
7057 LoadedVT.isVector()) {
7058 // Scalarize the load and let the individual components be handled.
7059 return scalarizeVectorLoad(LD, DAG);
7060 }
7061
7062 // Expand to a (misaligned) integer load of the same size,
7063 // then bitconvert to floating point or vector.
7064 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
7065 LD->getMemOperand());
7066 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
7067 if (LoadedVT != VT)
7068 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
7069 ISD::ANY_EXTEND, dl, VT, Result);
7070
7071 return std::make_pair(Result, newLoad.getValue(1));
7072 }
7073
7074 // Copy the value to a (aligned) stack slot using (unaligned) integer
7075 // loads and stores, then do a (aligned) load from the stack slot.
7076 MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
7077 unsigned LoadedBytes = LoadedVT.getStoreSize();
7078 unsigned RegBytes = RegVT.getSizeInBits() / 8;
7079 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
7080
7081 // Make sure the stack slot is also aligned for the register type.
7082 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
7083 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex();
7084 SmallVector<SDValue, 8> Stores;
7085 SDValue StackPtr = StackBase;
7086 unsigned Offset = 0;
7087
7088 EVT PtrVT = Ptr.getValueType();
7089 EVT StackPtrVT = StackPtr.getValueType();
7090
7091 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
7092 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
7093
7094 // Do all but one copies using the full register width.
7095 for (unsigned i = 1; i < NumRegs; i++) {
7096 // Load one integer register's worth from the original location.
7097 SDValue Load = DAG.getLoad(
7098 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset),
7099 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
7100 LD->getAAInfo());
7101 // Follow the load with a store to the stack slot. Remember the store.
7102 Stores.push_back(DAG.getStore(
7103 Load.getValue(1), dl, Load, StackPtr,
7104 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)));
7105 // Increment the pointers.
7106 Offset += RegBytes;
7107
7108 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement);
7109 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement);
7110 }
7111
7112 // The last copy may be partial. Do an extending load.
7113 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
7114 8 * (LoadedBytes - Offset));
7115 SDValue Load =
7116 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
7117 LD->getPointerInfo().getWithOffset(Offset), MemVT,
7118 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
7119 LD->getAAInfo());
7120 // Follow the load with a store to the stack slot. Remember the store.
7121 // On big-endian machines this requires a truncating store to ensure
7122 // that the bits end up in the right place.
7123 Stores.push_back(DAG.getTruncStore(
7124 Load.getValue(1), dl, Load, StackPtr,
7125 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT));
7126
7127 // The order of the stores doesn't matter - say it with a TokenFactor.
7128 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
7129
7130 // Finally, perform the original load only redirected to the stack slot.
7131 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
7132 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0),
7133 LoadedVT);
7134
7135 // Callers expect a MERGE_VALUES node.
7136 return std::make_pair(Load, TF);
7137 }
7138
7139 assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
7140 "Unaligned load of unsupported type.");
7141
7142 // Compute the new VT that is half the size of the old one. This is an
7143 // integer MVT.
7144 unsigned NumBits = LoadedVT.getSizeInBits();
7145 EVT NewLoadedVT;
7146 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
7147 NumBits >>= 1;
7148
7149 Align Alignment = LD->getOriginalAlign();
7150 unsigned IncrementSize = NumBits / 8;
7151 ISD::LoadExtType HiExtType = LD->getExtensionType();
7152
7153 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
7154 if (HiExtType == ISD::NON_EXTLOAD)
7155 HiExtType = ISD::ZEXTLOAD;
7156
7157 // Load the value in two parts
7158 SDValue Lo, Hi;
7159 if (DAG.getDataLayout().isLittleEndian()) {
7160 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
7161 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
7162 LD->getAAInfo());
7163
7164 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
7165 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
7166 LD->getPointerInfo().getWithOffset(IncrementSize),
7167 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
7168 LD->getAAInfo());
7169 } else {
7170 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
7171 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
7172 LD->getAAInfo());
7173
7174 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
7175 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
7176 LD->getPointerInfo().getWithOffset(IncrementSize),
7177 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
7178 LD->getAAInfo());
7179 }
7180
7181 // aggregate the two parts
7182 SDValue ShiftAmount =
7183 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
7184 DAG.getDataLayout()));
7185 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
7186 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
7187
7188 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
7189 Hi.getValue(1));
7190
7191 return std::make_pair(Result, TF);
7192 }
7193
expandUnalignedStore(StoreSDNode * ST,SelectionDAG & DAG) const7194 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
7195 SelectionDAG &DAG) const {
7196 assert(ST->getAddressingMode() == ISD::UNINDEXED &&
7197 "unaligned indexed stores not implemented!");
7198 SDValue Chain = ST->getChain();
7199 SDValue Ptr = ST->getBasePtr();
7200 SDValue Val = ST->getValue();
7201 EVT VT = Val.getValueType();
7202 Align Alignment = ST->getOriginalAlign();
7203 auto &MF = DAG.getMachineFunction();
7204 EVT StoreMemVT = ST->getMemoryVT();
7205
7206 SDLoc dl(ST);
7207 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) {
7208 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
7209 if (isTypeLegal(intVT)) {
7210 if (!isOperationLegalOrCustom(ISD::STORE, intVT) &&
7211 StoreMemVT.isVector()) {
7212 // Scalarize the store and let the individual components be handled.
7213 SDValue Result = scalarizeVectorStore(ST, DAG);
7214 return Result;
7215 }
7216 // Expand to a bitconvert of the value to the integer type of the
7217 // same size, then a (misaligned) int store.
7218 // FIXME: Does not handle truncating floating point stores!
7219 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
7220 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
7221 Alignment, ST->getMemOperand()->getFlags());
7222 return Result;
7223 }
7224 // Do a (aligned) store to a stack slot, then copy from the stack slot
7225 // to the final destination using (unaligned) integer loads and stores.
7226 MVT RegVT = getRegisterType(
7227 *DAG.getContext(),
7228 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits()));
7229 EVT PtrVT = Ptr.getValueType();
7230 unsigned StoredBytes = StoreMemVT.getStoreSize();
7231 unsigned RegBytes = RegVT.getSizeInBits() / 8;
7232 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
7233
7234 // Make sure the stack slot is also aligned for the register type.
7235 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT);
7236 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
7237
7238 // Perform the original store, only redirected to the stack slot.
7239 SDValue Store = DAG.getTruncStore(
7240 Chain, dl, Val, StackPtr,
7241 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT);
7242
7243 EVT StackPtrVT = StackPtr.getValueType();
7244
7245 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
7246 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
7247 SmallVector<SDValue, 8> Stores;
7248 unsigned Offset = 0;
7249
7250 // Do all but one copies using the full register width.
7251 for (unsigned i = 1; i < NumRegs; i++) {
7252 // Load one integer register's worth from the stack slot.
7253 SDValue Load = DAG.getLoad(
7254 RegVT, dl, Store, StackPtr,
7255 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset));
7256 // Store it to the final location. Remember the store.
7257 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
7258 ST->getPointerInfo().getWithOffset(Offset),
7259 ST->getOriginalAlign(),
7260 ST->getMemOperand()->getFlags()));
7261 // Increment the pointers.
7262 Offset += RegBytes;
7263 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement);
7264 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement);
7265 }
7266
7267 // The last store may be partial. Do a truncating store. On big-endian
7268 // machines this requires an extending load from the stack slot to ensure
7269 // that the bits are in the right place.
7270 EVT LoadMemVT =
7271 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset));
7272
7273 // Load from the stack slot.
7274 SDValue Load = DAG.getExtLoad(
7275 ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
7276 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT);
7277
7278 Stores.push_back(
7279 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
7280 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT,
7281 ST->getOriginalAlign(),
7282 ST->getMemOperand()->getFlags(), ST->getAAInfo()));
7283 // The order of the stores doesn't matter - say it with a TokenFactor.
7284 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
7285 return Result;
7286 }
7287
7288 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() &&
7289 "Unaligned store of unknown type.");
7290 // Get the half-size VT
7291 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext());
7292 unsigned NumBits = NewStoredVT.getFixedSizeInBits();
7293 unsigned IncrementSize = NumBits / 8;
7294
7295 // Divide the stored value in two parts.
7296 SDValue ShiftAmount = DAG.getConstant(
7297 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout()));
7298 SDValue Lo = Val;
7299 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
7300
7301 // Store the two parts
7302 SDValue Store1, Store2;
7303 Store1 = DAG.getTruncStore(Chain, dl,
7304 DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
7305 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
7306 ST->getMemOperand()->getFlags());
7307
7308 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
7309 Store2 = DAG.getTruncStore(
7310 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
7311 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
7312 ST->getMemOperand()->getFlags(), ST->getAAInfo());
7313
7314 SDValue Result =
7315 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
7316 return Result;
7317 }
7318
7319 SDValue
IncrementMemoryAddress(SDValue Addr,SDValue Mask,const SDLoc & DL,EVT DataVT,SelectionDAG & DAG,bool IsCompressedMemory) const7320 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
7321 const SDLoc &DL, EVT DataVT,
7322 SelectionDAG &DAG,
7323 bool IsCompressedMemory) const {
7324 SDValue Increment;
7325 EVT AddrVT = Addr.getValueType();
7326 EVT MaskVT = Mask.getValueType();
7327 assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() &&
7328 "Incompatible types of Data and Mask");
7329 if (IsCompressedMemory) {
7330 if (DataVT.isScalableVector())
7331 report_fatal_error(
7332 "Cannot currently handle compressed memory with scalable vectors");
7333 // Incrementing the pointer according to number of '1's in the mask.
7334 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits());
7335 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask);
7336 if (MaskIntVT.getSizeInBits() < 32) {
7337 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg);
7338 MaskIntVT = MVT::i32;
7339 }
7340
7341 // Count '1's with POPCNT.
7342 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg);
7343 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT);
7344 // Scale is an element size in bytes.
7345 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL,
7346 AddrVT);
7347 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale);
7348 } else if (DataVT.isScalableVector()) {
7349 Increment = DAG.getVScale(DL, AddrVT,
7350 APInt(AddrVT.getFixedSizeInBits(),
7351 DataVT.getStoreSize().getKnownMinSize()));
7352 } else
7353 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT);
7354
7355 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment);
7356 }
7357
clampDynamicVectorIndex(SelectionDAG & DAG,SDValue Idx,EVT VecVT,const SDLoc & dl)7358 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG,
7359 SDValue Idx,
7360 EVT VecVT,
7361 const SDLoc &dl) {
7362 if (!VecVT.isScalableVector() && isa<ConstantSDNode>(Idx))
7363 return Idx;
7364
7365 EVT IdxVT = Idx.getValueType();
7366 unsigned NElts = VecVT.getVectorMinNumElements();
7367 if (VecVT.isScalableVector()) {
7368 SDValue VS = DAG.getVScale(dl, IdxVT,
7369 APInt(IdxVT.getFixedSizeInBits(),
7370 NElts));
7371 SDValue Sub = DAG.getNode(ISD::SUB, dl, IdxVT, VS,
7372 DAG.getConstant(1, dl, IdxVT));
7373
7374 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub);
7375 } else {
7376 if (isPowerOf2_32(NElts)) {
7377 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(),
7378 Log2_32(NElts));
7379 return DAG.getNode(ISD::AND, dl, IdxVT, Idx,
7380 DAG.getConstant(Imm, dl, IdxVT));
7381 }
7382 }
7383
7384 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx,
7385 DAG.getConstant(NElts - 1, dl, IdxVT));
7386 }
7387
getVectorElementPointer(SelectionDAG & DAG,SDValue VecPtr,EVT VecVT,SDValue Index) const7388 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG,
7389 SDValue VecPtr, EVT VecVT,
7390 SDValue Index) const {
7391 SDLoc dl(Index);
7392 // Make sure the index type is big enough to compute in.
7393 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType());
7394
7395 EVT EltVT = VecVT.getVectorElementType();
7396
7397 // Calculate the element offset and add it to the pointer.
7398 unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size.
7399 assert(EltSize * 8 == EltVT.getFixedSizeInBits() &&
7400 "Converting bits to bytes lost precision");
7401
7402 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl);
7403
7404 EVT IdxVT = Index.getValueType();
7405
7406 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index,
7407 DAG.getConstant(EltSize, dl, IdxVT));
7408 return DAG.getMemBasePlusOffset(VecPtr, Index, dl);
7409 }
7410
7411 //===----------------------------------------------------------------------===//
7412 // Implementation of Emulated TLS Model
7413 //===----------------------------------------------------------------------===//
7414
LowerToTLSEmulatedModel(const GlobalAddressSDNode * GA,SelectionDAG & DAG) const7415 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
7416 SelectionDAG &DAG) const {
7417 // Access to address of TLS varialbe xyz is lowered to a function call:
7418 // __emutls_get_address( address of global variable named "__emutls_v.xyz" )
7419 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7420 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
7421 SDLoc dl(GA);
7422
7423 ArgListTy Args;
7424 ArgListEntry Entry;
7425 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
7426 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
7427 StringRef EmuTlsVarName(NameString);
7428 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
7429 assert(EmuTlsVar && "Cannot find EmuTlsVar ");
7430 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
7431 Entry.Ty = VoidPtrType;
7432 Args.push_back(Entry);
7433
7434 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
7435
7436 TargetLowering::CallLoweringInfo CLI(DAG);
7437 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
7438 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args));
7439 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
7440
7441 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
7442 // At last for X86 targets, maybe good for other targets too?
7443 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7444 MFI.setAdjustsStack(true); // Is this only for X86 target?
7445 MFI.setHasCalls(true);
7446
7447 assert((GA->getOffset() == 0) &&
7448 "Emulated TLS must have zero offset in GlobalAddressSDNode");
7449 return CallResult.first;
7450 }
7451
lowerCmpEqZeroToCtlzSrl(SDValue Op,SelectionDAG & DAG) const7452 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op,
7453 SelectionDAG &DAG) const {
7454 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node.");
7455 if (!isCtlzFast())
7456 return SDValue();
7457 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
7458 SDLoc dl(Op);
7459 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
7460 if (C->isNullValue() && CC == ISD::SETEQ) {
7461 EVT VT = Op.getOperand(0).getValueType();
7462 SDValue Zext = Op.getOperand(0);
7463 if (VT.bitsLT(MVT::i32)) {
7464 VT = MVT::i32;
7465 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
7466 }
7467 unsigned Log2b = Log2_32(VT.getSizeInBits());
7468 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
7469 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
7470 DAG.getConstant(Log2b, dl, MVT::i32));
7471 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
7472 }
7473 }
7474 return SDValue();
7475 }
7476
7477 // Convert redundant addressing modes (e.g. scaling is redundant
7478 // when accessing bytes).
7479 ISD::MemIndexType
getCanonicalIndexType(ISD::MemIndexType IndexType,EVT MemVT,SDValue Offsets) const7480 TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT,
7481 SDValue Offsets) const {
7482 bool IsScaledIndex =
7483 (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::UNSIGNED_SCALED);
7484 bool IsSignedIndex =
7485 (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::SIGNED_UNSCALED);
7486
7487 // Scaling is unimportant for bytes, canonicalize to unscaled.
7488 if (IsScaledIndex && MemVT.getScalarType() == MVT::i8) {
7489 IsScaledIndex = false;
7490 IndexType = IsSignedIndex ? ISD::SIGNED_UNSCALED : ISD::UNSIGNED_UNSCALED;
7491 }
7492
7493 return IndexType;
7494 }
7495
expandIntMINMAX(SDNode * Node,SelectionDAG & DAG) const7496 SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const {
7497 SDValue Op0 = Node->getOperand(0);
7498 SDValue Op1 = Node->getOperand(1);
7499 EVT VT = Op0.getValueType();
7500 unsigned Opcode = Node->getOpcode();
7501 SDLoc DL(Node);
7502
7503 // umin(x,y) -> sub(x,usubsat(x,y))
7504 if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) &&
7505 isOperationLegal(ISD::USUBSAT, VT)) {
7506 return DAG.getNode(ISD::SUB, DL, VT, Op0,
7507 DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1));
7508 }
7509
7510 // umax(x,y) -> add(x,usubsat(y,x))
7511 if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) &&
7512 isOperationLegal(ISD::USUBSAT, VT)) {
7513 return DAG.getNode(ISD::ADD, DL, VT, Op0,
7514 DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0));
7515 }
7516
7517 // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B
7518 ISD::CondCode CC;
7519 switch (Opcode) {
7520 default: llvm_unreachable("How did we get here?");
7521 case ISD::SMAX: CC = ISD::SETGT; break;
7522 case ISD::SMIN: CC = ISD::SETLT; break;
7523 case ISD::UMAX: CC = ISD::SETUGT; break;
7524 case ISD::UMIN: CC = ISD::SETULT; break;
7525 }
7526
7527 // FIXME: Should really try to split the vector in case it's legal on a
7528 // subvector.
7529 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT))
7530 return DAG.UnrollVectorOp(Node);
7531
7532 SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC);
7533 return DAG.getSelect(DL, VT, Cond, Op0, Op1);
7534 }
7535
expandAddSubSat(SDNode * Node,SelectionDAG & DAG) const7536 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
7537 unsigned Opcode = Node->getOpcode();
7538 SDValue LHS = Node->getOperand(0);
7539 SDValue RHS = Node->getOperand(1);
7540 EVT VT = LHS.getValueType();
7541 SDLoc dl(Node);
7542
7543 assert(VT == RHS.getValueType() && "Expected operands to be the same type");
7544 assert(VT.isInteger() && "Expected operands to be integers");
7545
7546 // usub.sat(a, b) -> umax(a, b) - b
7547 if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) {
7548 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS);
7549 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS);
7550 }
7551
7552 // uadd.sat(a, b) -> umin(a, ~b) + b
7553 if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) {
7554 SDValue InvRHS = DAG.getNOT(dl, RHS, VT);
7555 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS);
7556 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS);
7557 }
7558
7559 unsigned OverflowOp;
7560 switch (Opcode) {
7561 case ISD::SADDSAT:
7562 OverflowOp = ISD::SADDO;
7563 break;
7564 case ISD::UADDSAT:
7565 OverflowOp = ISD::UADDO;
7566 break;
7567 case ISD::SSUBSAT:
7568 OverflowOp = ISD::SSUBO;
7569 break;
7570 case ISD::USUBSAT:
7571 OverflowOp = ISD::USUBO;
7572 break;
7573 default:
7574 llvm_unreachable("Expected method to receive signed or unsigned saturation "
7575 "addition or subtraction node.");
7576 }
7577
7578 // FIXME: Should really try to split the vector in case it's legal on a
7579 // subvector.
7580 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT))
7581 return DAG.UnrollVectorOp(Node);
7582
7583 unsigned BitWidth = LHS.getScalarValueSizeInBits();
7584 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7585 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT),
7586 LHS, RHS);
7587 SDValue SumDiff = Result.getValue(0);
7588 SDValue Overflow = Result.getValue(1);
7589 SDValue Zero = DAG.getConstant(0, dl, VT);
7590 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
7591
7592 if (Opcode == ISD::UADDSAT) {
7593 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) {
7594 // (LHS + RHS) | OverflowMask
7595 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT);
7596 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask);
7597 }
7598 // Overflow ? 0xffff.... : (LHS + RHS)
7599 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff);
7600 } else if (Opcode == ISD::USUBSAT) {
7601 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) {
7602 // (LHS - RHS) & ~OverflowMask
7603 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT);
7604 SDValue Not = DAG.getNOT(dl, OverflowMask, VT);
7605 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not);
7606 }
7607 // Overflow ? 0 : (LHS - RHS)
7608 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff);
7609 } else {
7610 // SatMax -> Overflow && SumDiff < 0
7611 // SatMin -> Overflow && SumDiff >= 0
7612 APInt MinVal = APInt::getSignedMinValue(BitWidth);
7613 APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
7614 SDValue SatMin = DAG.getConstant(MinVal, dl, VT);
7615 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7616 SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT);
7617 Result = DAG.getSelect(dl, VT, SumNeg, SatMax, SatMin);
7618 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff);
7619 }
7620 }
7621
expandShlSat(SDNode * Node,SelectionDAG & DAG) const7622 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const {
7623 unsigned Opcode = Node->getOpcode();
7624 bool IsSigned = Opcode == ISD::SSHLSAT;
7625 SDValue LHS = Node->getOperand(0);
7626 SDValue RHS = Node->getOperand(1);
7627 EVT VT = LHS.getValueType();
7628 SDLoc dl(Node);
7629
7630 assert((Node->getOpcode() == ISD::SSHLSAT ||
7631 Node->getOpcode() == ISD::USHLSAT) &&
7632 "Expected a SHLSAT opcode");
7633 assert(VT == RHS.getValueType() && "Expected operands to be the same type");
7634 assert(VT.isInteger() && "Expected operands to be integers");
7635
7636 // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate.
7637
7638 unsigned BW = VT.getScalarSizeInBits();
7639 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS);
7640 SDValue Orig =
7641 DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS);
7642
7643 SDValue SatVal;
7644 if (IsSigned) {
7645 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT);
7646 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT);
7647 SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT),
7648 SatMin, SatMax, ISD::SETLT);
7649 } else {
7650 SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT);
7651 }
7652 Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE);
7653
7654 return Result;
7655 }
7656
7657 SDValue
expandFixedPointMul(SDNode * Node,SelectionDAG & DAG) const7658 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const {
7659 assert((Node->getOpcode() == ISD::SMULFIX ||
7660 Node->getOpcode() == ISD::UMULFIX ||
7661 Node->getOpcode() == ISD::SMULFIXSAT ||
7662 Node->getOpcode() == ISD::UMULFIXSAT) &&
7663 "Expected a fixed point multiplication opcode");
7664
7665 SDLoc dl(Node);
7666 SDValue LHS = Node->getOperand(0);
7667 SDValue RHS = Node->getOperand(1);
7668 EVT VT = LHS.getValueType();
7669 unsigned Scale = Node->getConstantOperandVal(2);
7670 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT ||
7671 Node->getOpcode() == ISD::UMULFIXSAT);
7672 bool Signed = (Node->getOpcode() == ISD::SMULFIX ||
7673 Node->getOpcode() == ISD::SMULFIXSAT);
7674 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7675 unsigned VTSize = VT.getScalarSizeInBits();
7676
7677 if (!Scale) {
7678 // [us]mul.fix(a, b, 0) -> mul(a, b)
7679 if (!Saturating) {
7680 if (isOperationLegalOrCustom(ISD::MUL, VT))
7681 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7682 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) {
7683 SDValue Result =
7684 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS);
7685 SDValue Product = Result.getValue(0);
7686 SDValue Overflow = Result.getValue(1);
7687 SDValue Zero = DAG.getConstant(0, dl, VT);
7688
7689 APInt MinVal = APInt::getSignedMinValue(VTSize);
7690 APInt MaxVal = APInt::getSignedMaxValue(VTSize);
7691 SDValue SatMin = DAG.getConstant(MinVal, dl, VT);
7692 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7693 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Product, Zero, ISD::SETLT);
7694 Result = DAG.getSelect(dl, VT, ProdNeg, SatMax, SatMin);
7695 return DAG.getSelect(dl, VT, Overflow, Result, Product);
7696 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) {
7697 SDValue Result =
7698 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS);
7699 SDValue Product = Result.getValue(0);
7700 SDValue Overflow = Result.getValue(1);
7701
7702 APInt MaxVal = APInt::getMaxValue(VTSize);
7703 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7704 return DAG.getSelect(dl, VT, Overflow, SatMax, Product);
7705 }
7706 }
7707
7708 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) &&
7709 "Expected scale to be less than the number of bits if signed or at "
7710 "most the number of bits if unsigned.");
7711 assert(LHS.getValueType() == RHS.getValueType() &&
7712 "Expected both operands to be the same type");
7713
7714 // Get the upper and lower bits of the result.
7715 SDValue Lo, Hi;
7716 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI;
7717 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU;
7718 if (isOperationLegalOrCustom(LoHiOp, VT)) {
7719 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS);
7720 Lo = Result.getValue(0);
7721 Hi = Result.getValue(1);
7722 } else if (isOperationLegalOrCustom(HiOp, VT)) {
7723 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7724 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS);
7725 } else if (VT.isVector()) {
7726 return SDValue();
7727 } else {
7728 report_fatal_error("Unable to expand fixed point multiplication.");
7729 }
7730
7731 if (Scale == VTSize)
7732 // Result is just the top half since we'd be shifting by the width of the
7733 // operand. Overflow impossible so this works for both UMULFIX and
7734 // UMULFIXSAT.
7735 return Hi;
7736
7737 // The result will need to be shifted right by the scale since both operands
7738 // are scaled. The result is given to us in 2 halves, so we only want part of
7739 // both in the result.
7740 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout());
7741 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo,
7742 DAG.getConstant(Scale, dl, ShiftTy));
7743 if (!Saturating)
7744 return Result;
7745
7746 if (!Signed) {
7747 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the
7748 // widened multiplication) aren't all zeroes.
7749
7750 // Saturate to max if ((Hi >> Scale) != 0),
7751 // which is the same as if (Hi > ((1 << Scale) - 1))
7752 APInt MaxVal = APInt::getMaxValue(VTSize);
7753 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale),
7754 dl, VT);
7755 Result = DAG.getSelectCC(dl, Hi, LowMask,
7756 DAG.getConstant(MaxVal, dl, VT), Result,
7757 ISD::SETUGT);
7758
7759 return Result;
7760 }
7761
7762 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the
7763 // widened multiplication) aren't all ones or all zeroes.
7764
7765 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT);
7766 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT);
7767
7768 if (Scale == 0) {
7769 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo,
7770 DAG.getConstant(VTSize - 1, dl, ShiftTy));
7771 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE);
7772 // Saturated to SatMin if wide product is negative, and SatMax if wide
7773 // product is positive ...
7774 SDValue Zero = DAG.getConstant(0, dl, VT);
7775 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax,
7776 ISD::SETLT);
7777 // ... but only if we overflowed.
7778 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
7779 }
7780
7781 // We handled Scale==0 above so all the bits to examine is in Hi.
7782
7783 // Saturate to max if ((Hi >> (Scale - 1)) > 0),
7784 // which is the same as if (Hi > (1 << (Scale - 1)) - 1)
7785 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1),
7786 dl, VT);
7787 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT);
7788 // Saturate to min if (Hi >> (Scale - 1)) < -1),
7789 // which is the same as if (HI < (-1 << (Scale - 1))
7790 SDValue HighMask =
7791 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1),
7792 dl, VT);
7793 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT);
7794 return Result;
7795 }
7796
7797 SDValue
expandFixedPointDiv(unsigned Opcode,const SDLoc & dl,SDValue LHS,SDValue RHS,unsigned Scale,SelectionDAG & DAG) const7798 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
7799 SDValue LHS, SDValue RHS,
7800 unsigned Scale, SelectionDAG &DAG) const {
7801 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT ||
7802 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) &&
7803 "Expected a fixed point division opcode");
7804
7805 EVT VT = LHS.getValueType();
7806 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
7807 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
7808 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7809
7810 // If there is enough room in the type to upscale the LHS or downscale the
7811 // RHS before the division, we can perform it in this type without having to
7812 // resize. For signed operations, the LHS headroom is the number of
7813 // redundant sign bits, and for unsigned ones it is the number of zeroes.
7814 // The headroom for the RHS is the number of trailing zeroes.
7815 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1
7816 : DAG.computeKnownBits(LHS).countMinLeadingZeros();
7817 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros();
7818
7819 // For signed saturating operations, we need to be able to detect true integer
7820 // division overflow; that is, when you have MIN / -EPS. However, this
7821 // is undefined behavior and if we emit divisions that could take such
7822 // values it may cause undesired behavior (arithmetic exceptions on x86, for
7823 // example).
7824 // Avoid this by requiring an extra bit so that we never get this case.
7825 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale
7826 // signed saturating division, we need to emit a whopping 32-bit division.
7827 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed))
7828 return SDValue();
7829
7830 unsigned LHSShift = std::min(LHSLead, Scale);
7831 unsigned RHSShift = Scale - LHSShift;
7832
7833 // At this point, we know that if we shift the LHS up by LHSShift and the
7834 // RHS down by RHSShift, we can emit a regular division with a final scaling
7835 // factor of Scale.
7836
7837 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout());
7838 if (LHSShift)
7839 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS,
7840 DAG.getConstant(LHSShift, dl, ShiftTy));
7841 if (RHSShift)
7842 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS,
7843 DAG.getConstant(RHSShift, dl, ShiftTy));
7844
7845 SDValue Quot;
7846 if (Signed) {
7847 // For signed operations, if the resulting quotient is negative and the
7848 // remainder is nonzero, subtract 1 from the quotient to round towards
7849 // negative infinity.
7850 SDValue Rem;
7851 // FIXME: Ideally we would always produce an SDIVREM here, but if the
7852 // type isn't legal, SDIVREM cannot be expanded. There is no reason why
7853 // we couldn't just form a libcall, but the type legalizer doesn't do it.
7854 if (isTypeLegal(VT) &&
7855 isOperationLegalOrCustom(ISD::SDIVREM, VT)) {
7856 Quot = DAG.getNode(ISD::SDIVREM, dl,
7857 DAG.getVTList(VT, VT),
7858 LHS, RHS);
7859 Rem = Quot.getValue(1);
7860 Quot = Quot.getValue(0);
7861 } else {
7862 Quot = DAG.getNode(ISD::SDIV, dl, VT,
7863 LHS, RHS);
7864 Rem = DAG.getNode(ISD::SREM, dl, VT,
7865 LHS, RHS);
7866 }
7867 SDValue Zero = DAG.getConstant(0, dl, VT);
7868 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE);
7869 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT);
7870 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT);
7871 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg);
7872 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot,
7873 DAG.getConstant(1, dl, VT));
7874 Quot = DAG.getSelect(dl, VT,
7875 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg),
7876 Sub1, Quot);
7877 } else
7878 Quot = DAG.getNode(ISD::UDIV, dl, VT,
7879 LHS, RHS);
7880
7881 return Quot;
7882 }
7883
expandUADDSUBO(SDNode * Node,SDValue & Result,SDValue & Overflow,SelectionDAG & DAG) const7884 void TargetLowering::expandUADDSUBO(
7885 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const {
7886 SDLoc dl(Node);
7887 SDValue LHS = Node->getOperand(0);
7888 SDValue RHS = Node->getOperand(1);
7889 bool IsAdd = Node->getOpcode() == ISD::UADDO;
7890
7891 // If ADD/SUBCARRY is legal, use that instead.
7892 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY;
7893 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) {
7894 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1));
7895 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(),
7896 { LHS, RHS, CarryIn });
7897 Result = SDValue(NodeCarry.getNode(), 0);
7898 Overflow = SDValue(NodeCarry.getNode(), 1);
7899 return;
7900 }
7901
7902 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl,
7903 LHS.getValueType(), LHS, RHS);
7904
7905 EVT ResultType = Node->getValueType(1);
7906 EVT SetCCType = getSetCCResultType(
7907 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0));
7908 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT;
7909 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC);
7910 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType);
7911 }
7912
expandSADDSUBO(SDNode * Node,SDValue & Result,SDValue & Overflow,SelectionDAG & DAG) const7913 void TargetLowering::expandSADDSUBO(
7914 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const {
7915 SDLoc dl(Node);
7916 SDValue LHS = Node->getOperand(0);
7917 SDValue RHS = Node->getOperand(1);
7918 bool IsAdd = Node->getOpcode() == ISD::SADDO;
7919
7920 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl,
7921 LHS.getValueType(), LHS, RHS);
7922
7923 EVT ResultType = Node->getValueType(1);
7924 EVT OType = getSetCCResultType(
7925 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0));
7926
7927 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow.
7928 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT;
7929 if (isOperationLegalOrCustom(OpcSat, LHS.getValueType())) {
7930 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS);
7931 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE);
7932 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType);
7933 return;
7934 }
7935
7936 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType());
7937
7938 // For an addition, the result should be less than one of the operands (LHS)
7939 // if and only if the other operand (RHS) is negative, otherwise there will
7940 // be overflow.
7941 // For a subtraction, the result should be less than one of the operands
7942 // (LHS) if and only if the other operand (RHS) is (non-zero) positive,
7943 // otherwise there will be overflow.
7944 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT);
7945 SDValue ConditionRHS =
7946 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT);
7947
7948 Overflow = DAG.getBoolExtOrTrunc(
7949 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
7950 ResultType, ResultType);
7951 }
7952
expandMULO(SDNode * Node,SDValue & Result,SDValue & Overflow,SelectionDAG & DAG) const7953 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result,
7954 SDValue &Overflow, SelectionDAG &DAG) const {
7955 SDLoc dl(Node);
7956 EVT VT = Node->getValueType(0);
7957 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7958 SDValue LHS = Node->getOperand(0);
7959 SDValue RHS = Node->getOperand(1);
7960 bool isSigned = Node->getOpcode() == ISD::SMULO;
7961
7962 // For power-of-two multiplications we can use a simpler shift expansion.
7963 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) {
7964 const APInt &C = RHSC->getAPIntValue();
7965 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X }
7966 if (C.isPowerOf2()) {
7967 // smulo(x, signed_min) is same as umulo(x, signed_min).
7968 bool UseArithShift = isSigned && !C.isMinSignedValue();
7969 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout());
7970 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy);
7971 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt);
7972 Overflow = DAG.getSetCC(dl, SetCCVT,
7973 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL,
7974 dl, VT, Result, ShiftAmt),
7975 LHS, ISD::SETNE);
7976 return true;
7977 }
7978 }
7979
7980 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2);
7981 if (VT.isVector())
7982 WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT,
7983 VT.getVectorNumElements());
7984
7985 SDValue BottomHalf;
7986 SDValue TopHalf;
7987 static const unsigned Ops[2][3] =
7988 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND },
7989 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }};
7990 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) {
7991 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7992 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS);
7993 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) {
7994 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
7995 RHS);
7996 TopHalf = BottomHalf.getValue(1);
7997 } else if (isTypeLegal(WideVT)) {
7998 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
7999 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
8000 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
8001 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
8002 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl,
8003 getShiftAmountTy(WideVT, DAG.getDataLayout()));
8004 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT,
8005 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt));
8006 } else {
8007 if (VT.isVector())
8008 return false;
8009
8010 // We can fall back to a libcall with an illegal type for the MUL if we
8011 // have a libcall big enough.
8012 // Also, we can fall back to a division in some cases, but that's a big
8013 // performance hit in the general case.
8014 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
8015 if (WideVT == MVT::i16)
8016 LC = RTLIB::MUL_I16;
8017 else if (WideVT == MVT::i32)
8018 LC = RTLIB::MUL_I32;
8019 else if (WideVT == MVT::i64)
8020 LC = RTLIB::MUL_I64;
8021 else if (WideVT == MVT::i128)
8022 LC = RTLIB::MUL_I128;
8023 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!");
8024
8025 SDValue HiLHS;
8026 SDValue HiRHS;
8027 if (isSigned) {
8028 // The high part is obtained by SRA'ing all but one of the bits of low
8029 // part.
8030 unsigned LoSize = VT.getFixedSizeInBits();
8031 HiLHS =
8032 DAG.getNode(ISD::SRA, dl, VT, LHS,
8033 DAG.getConstant(LoSize - 1, dl,
8034 getPointerTy(DAG.getDataLayout())));
8035 HiRHS =
8036 DAG.getNode(ISD::SRA, dl, VT, RHS,
8037 DAG.getConstant(LoSize - 1, dl,
8038 getPointerTy(DAG.getDataLayout())));
8039 } else {
8040 HiLHS = DAG.getConstant(0, dl, VT);
8041 HiRHS = DAG.getConstant(0, dl, VT);
8042 }
8043
8044 // Here we're passing the 2 arguments explicitly as 4 arguments that are
8045 // pre-lowered to the correct types. This all depends upon WideVT not
8046 // being a legal type for the architecture and thus has to be split to
8047 // two arguments.
8048 SDValue Ret;
8049 TargetLowering::MakeLibCallOptions CallOptions;
8050 CallOptions.setSExt(isSigned);
8051 CallOptions.setIsPostTypeLegalization(true);
8052 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) {
8053 // Halves of WideVT are packed into registers in different order
8054 // depending on platform endianness. This is usually handled by
8055 // the C calling convention, but we can't defer to it in
8056 // the legalizer.
8057 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS };
8058 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
8059 } else {
8060 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
8061 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
8062 }
8063 assert(Ret.getOpcode() == ISD::MERGE_VALUES &&
8064 "Ret value is a collection of constituent nodes holding result.");
8065 if (DAG.getDataLayout().isLittleEndian()) {
8066 // Same as above.
8067 BottomHalf = Ret.getOperand(0);
8068 TopHalf = Ret.getOperand(1);
8069 } else {
8070 BottomHalf = Ret.getOperand(1);
8071 TopHalf = Ret.getOperand(0);
8072 }
8073 }
8074
8075 Result = BottomHalf;
8076 if (isSigned) {
8077 SDValue ShiftAmt = DAG.getConstant(
8078 VT.getScalarSizeInBits() - 1, dl,
8079 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout()));
8080 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
8081 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE);
8082 } else {
8083 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf,
8084 DAG.getConstant(0, dl, VT), ISD::SETNE);
8085 }
8086
8087 // Truncate the result if SetCC returns a larger type than needed.
8088 EVT RType = Node->getValueType(1);
8089 if (RType.bitsLT(Overflow.getValueType()))
8090 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow);
8091
8092 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() &&
8093 "Unexpected result type for S/UMULO legalization");
8094 return true;
8095 }
8096
expandVecReduce(SDNode * Node,SelectionDAG & DAG) const8097 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const {
8098 SDLoc dl(Node);
8099 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode());
8100 SDValue Op = Node->getOperand(0);
8101 EVT VT = Op.getValueType();
8102
8103 if (VT.isScalableVector())
8104 report_fatal_error(
8105 "Expanding reductions for scalable vectors is undefined.");
8106
8107 // Try to use a shuffle reduction for power of two vectors.
8108 if (VT.isPow2VectorType()) {
8109 while (VT.getVectorNumElements() > 1) {
8110 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
8111 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT))
8112 break;
8113
8114 SDValue Lo, Hi;
8115 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl);
8116 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi);
8117 VT = HalfVT;
8118 }
8119 }
8120
8121 EVT EltVT = VT.getVectorElementType();
8122 unsigned NumElts = VT.getVectorNumElements();
8123
8124 SmallVector<SDValue, 8> Ops;
8125 DAG.ExtractVectorElements(Op, Ops, 0, NumElts);
8126
8127 SDValue Res = Ops[0];
8128 for (unsigned i = 1; i < NumElts; i++)
8129 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
8130
8131 // Result type may be wider than element type.
8132 if (EltVT != Node->getValueType(0))
8133 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res);
8134 return Res;
8135 }
8136
expandVecReduceSeq(SDNode * Node,SelectionDAG & DAG) const8137 SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const {
8138 SDLoc dl(Node);
8139 SDValue AccOp = Node->getOperand(0);
8140 SDValue VecOp = Node->getOperand(1);
8141 SDNodeFlags Flags = Node->getFlags();
8142
8143 EVT VT = VecOp.getValueType();
8144 EVT EltVT = VT.getVectorElementType();
8145 unsigned NumElts = VT.getVectorNumElements();
8146
8147 SmallVector<SDValue, 8> Ops;
8148 DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts);
8149
8150 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode());
8151
8152 SDValue Res = AccOp;
8153 for (unsigned i = 0; i < NumElts; i++)
8154 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
8155
8156 return Res;
8157 }
8158
expandREM(SDNode * Node,SDValue & Result,SelectionDAG & DAG) const8159 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result,
8160 SelectionDAG &DAG) const {
8161 EVT VT = Node->getValueType(0);
8162 SDLoc dl(Node);
8163 bool isSigned = Node->getOpcode() == ISD::SREM;
8164 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV;
8165 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
8166 SDValue Dividend = Node->getOperand(0);
8167 SDValue Divisor = Node->getOperand(1);
8168 if (isOperationLegalOrCustom(DivRemOpc, VT)) {
8169 SDVTList VTs = DAG.getVTList(VT, VT);
8170 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1);
8171 return true;
8172 } else if (isOperationLegalOrCustom(DivOpc, VT)) {
8173 // X % Y -> X-X/Y*Y
8174 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor);
8175 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor);
8176 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
8177 return true;
8178 }
8179 return false;
8180 }
8181