1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/BlockFrequencyInfo.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/CodeGen/ISDOpcodes.h"
32 #include "llvm/CodeGen/MachineBasicBlock.h"
33 #include "llvm/CodeGen/MachineConstantPool.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineMemOperand.h"
37 #include "llvm/CodeGen/RuntimeLibcalls.h"
38 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
39 #include "llvm/CodeGen/SelectionDAGNodes.h"
40 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
41 #include "llvm/CodeGen/TargetLowering.h"
42 #include "llvm/CodeGen/TargetRegisterInfo.h"
43 #include "llvm/CodeGen/TargetSubtargetInfo.h"
44 #include "llvm/CodeGen/ValueTypes.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GlobalValue.h"
53 #include "llvm/IR/Metadata.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/Support/Casting.h"
57 #include "llvm/Support/CodeGen.h"
58 #include "llvm/Support/Compiler.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/KnownBits.h"
62 #include "llvm/Support/MachineValueType.h"
63 #include "llvm/Support/ManagedStatic.h"
64 #include "llvm/Support/MathExtras.h"
65 #include "llvm/Support/Mutex.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "llvm/Target/TargetMachine.h"
68 #include "llvm/Target/TargetOptions.h"
69 #include "llvm/Transforms/Utils/SizeOpts.h"
70 #include <algorithm>
71 #include <cassert>
72 #include <cstdint>
73 #include <cstdlib>
74 #include <limits>
75 #include <set>
76 #include <string>
77 #include <utility>
78 #include <vector>
79
80 using namespace llvm;
81
82 /// makeVTList - Return an instance of the SDVTList struct initialized with the
83 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)84 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
85 SDVTList Res = {VTs, NumVTs};
86 return Res;
87 }
88
89 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)90 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)91 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
NodeInserted(SDNode *)92 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
93
anchor()94 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
95
96 #define DEBUG_TYPE "selectiondag"
97
98 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
99 cl::Hidden, cl::init(true),
100 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
101
102 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
103 cl::desc("Number limit for gluing ld/st of memcpy."),
104 cl::Hidden, cl::init(0));
105
NewSDValueDbgMsg(SDValue V,StringRef Msg,SelectionDAG * G)106 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
107 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
108 }
109
110 //===----------------------------------------------------------------------===//
111 // ConstantFPSDNode Class
112 //===----------------------------------------------------------------------===//
113
114 /// isExactlyValue - We don't rely on operator== working on double values, as
115 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
116 /// As such, this method can be used to do an exact bit-for-bit comparison of
117 /// two floating point values.
isExactlyValue(const APFloat & V) const118 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
119 return getValueAPF().bitwiseIsEqual(V);
120 }
121
isValueValidForType(EVT VT,const APFloat & Val)122 bool ConstantFPSDNode::isValueValidForType(EVT VT,
123 const APFloat& Val) {
124 assert(VT.isFloatingPoint() && "Can only convert between FP types");
125
126 // convert modifies in place, so make a copy.
127 APFloat Val2 = APFloat(Val);
128 bool losesInfo;
129 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
130 APFloat::rmNearestTiesToEven,
131 &losesInfo);
132 return !losesInfo;
133 }
134
135 //===----------------------------------------------------------------------===//
136 // ISD Namespace
137 //===----------------------------------------------------------------------===//
138
isConstantSplatVector(const SDNode * N,APInt & SplatVal)139 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
140 auto *BV = dyn_cast<BuildVectorSDNode>(N);
141 if (!BV)
142 return false;
143
144 APInt SplatUndef;
145 unsigned SplatBitSize;
146 bool HasUndefs;
147 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
148 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
149 EltSize) &&
150 EltSize == SplatBitSize;
151 }
152
153 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
154 // specializations of the more general isConstantSplatVector()?
155
isBuildVectorAllOnes(const SDNode * N)156 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
157 // Look through a bit convert.
158 while (N->getOpcode() == ISD::BITCAST)
159 N = N->getOperand(0).getNode();
160
161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
162
163 unsigned i = 0, e = N->getNumOperands();
164
165 // Skip over all of the undef values.
166 while (i != e && N->getOperand(i).isUndef())
167 ++i;
168
169 // Do not accept an all-undef vector.
170 if (i == e) return false;
171
172 // Do not accept build_vectors that aren't all constants or which have non-~0
173 // elements. We have to be a bit careful here, as the type of the constant
174 // may not be the same as the type of the vector elements due to type
175 // legalization (the elements are promoted to a legal type for the target and
176 // a vector of a type may be legal when the base element type is not).
177 // We only want to check enough bits to cover the vector elements, because
178 // we care if the resultant vector is all ones, not whether the individual
179 // constants are.
180 SDValue NotZero = N->getOperand(i);
181 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
182 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
183 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
184 return false;
185 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
186 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
187 return false;
188 } else
189 return false;
190
191 // Okay, we have at least one ~0 value, check to see if the rest match or are
192 // undefs. Even with the above element type twiddling, this should be OK, as
193 // the same type legalization should have applied to all the elements.
194 for (++i; i != e; ++i)
195 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
196 return false;
197 return true;
198 }
199
isBuildVectorAllZeros(const SDNode * N)200 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
201 // Look through a bit convert.
202 while (N->getOpcode() == ISD::BITCAST)
203 N = N->getOperand(0).getNode();
204
205 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
206
207 bool IsAllUndef = true;
208 for (const SDValue &Op : N->op_values()) {
209 if (Op.isUndef())
210 continue;
211 IsAllUndef = false;
212 // Do not accept build_vectors that aren't all constants or which have non-0
213 // elements. We have to be a bit careful here, as the type of the constant
214 // may not be the same as the type of the vector elements due to type
215 // legalization (the elements are promoted to a legal type for the target
216 // and a vector of a type may be legal when the base element type is not).
217 // We only want to check enough bits to cover the vector elements, because
218 // we care if the resultant vector is all zeros, not whether the individual
219 // constants are.
220 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
221 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
222 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
223 return false;
224 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
225 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
226 return false;
227 } else
228 return false;
229 }
230
231 // Do not accept an all-undef vector.
232 if (IsAllUndef)
233 return false;
234 return true;
235 }
236
isBuildVectorOfConstantSDNodes(const SDNode * N)237 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
238 if (N->getOpcode() != ISD::BUILD_VECTOR)
239 return false;
240
241 for (const SDValue &Op : N->op_values()) {
242 if (Op.isUndef())
243 continue;
244 if (!isa<ConstantSDNode>(Op))
245 return false;
246 }
247 return true;
248 }
249
isBuildVectorOfConstantFPSDNodes(const SDNode * N)250 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
251 if (N->getOpcode() != ISD::BUILD_VECTOR)
252 return false;
253
254 for (const SDValue &Op : N->op_values()) {
255 if (Op.isUndef())
256 continue;
257 if (!isa<ConstantFPSDNode>(Op))
258 return false;
259 }
260 return true;
261 }
262
allOperandsUndef(const SDNode * N)263 bool ISD::allOperandsUndef(const SDNode *N) {
264 // Return false if the node has no operands.
265 // This is "logically inconsistent" with the definition of "all" but
266 // is probably the desired behavior.
267 if (N->getNumOperands() == 0)
268 return false;
269 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
270 }
271
matchUnaryPredicate(SDValue Op,std::function<bool (ConstantSDNode *)> Match,bool AllowUndefs)272 bool ISD::matchUnaryPredicate(SDValue Op,
273 std::function<bool(ConstantSDNode *)> Match,
274 bool AllowUndefs) {
275 // FIXME: Add support for scalar UNDEF cases?
276 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
277 return Match(Cst);
278
279 // FIXME: Add support for vector UNDEF cases?
280 if (ISD::BUILD_VECTOR != Op.getOpcode())
281 return false;
282
283 EVT SVT = Op.getValueType().getScalarType();
284 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
285 if (AllowUndefs && Op.getOperand(i).isUndef()) {
286 if (!Match(nullptr))
287 return false;
288 continue;
289 }
290
291 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
292 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
293 return false;
294 }
295 return true;
296 }
297
matchBinaryPredicate(SDValue LHS,SDValue RHS,std::function<bool (ConstantSDNode *,ConstantSDNode *)> Match,bool AllowUndefs,bool AllowTypeMismatch)298 bool ISD::matchBinaryPredicate(
299 SDValue LHS, SDValue RHS,
300 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
301 bool AllowUndefs, bool AllowTypeMismatch) {
302 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
303 return false;
304
305 // TODO: Add support for scalar UNDEF cases?
306 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
307 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
308 return Match(LHSCst, RHSCst);
309
310 // TODO: Add support for vector UNDEF cases?
311 if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
312 ISD::BUILD_VECTOR != RHS.getOpcode())
313 return false;
314
315 EVT SVT = LHS.getValueType().getScalarType();
316 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
317 SDValue LHSOp = LHS.getOperand(i);
318 SDValue RHSOp = RHS.getOperand(i);
319 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
320 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
321 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
322 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
323 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
324 return false;
325 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
326 LHSOp.getValueType() != RHSOp.getValueType()))
327 return false;
328 if (!Match(LHSCst, RHSCst))
329 return false;
330 }
331 return true;
332 }
333
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)334 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
335 switch (ExtType) {
336 case ISD::EXTLOAD:
337 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
338 case ISD::SEXTLOAD:
339 return ISD::SIGN_EXTEND;
340 case ISD::ZEXTLOAD:
341 return ISD::ZERO_EXTEND;
342 default:
343 break;
344 }
345
346 llvm_unreachable("Invalid LoadExtType");
347 }
348
getSetCCSwappedOperands(ISD::CondCode Operation)349 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
350 // To perform this operation, we just need to swap the L and G bits of the
351 // operation.
352 unsigned OldL = (Operation >> 2) & 1;
353 unsigned OldG = (Operation >> 1) & 1;
354 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
355 (OldL << 1) | // New G bit
356 (OldG << 2)); // New L bit.
357 }
358
getSetCCInverseImpl(ISD::CondCode Op,bool isIntegerLike)359 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
360 unsigned Operation = Op;
361 if (isIntegerLike)
362 Operation ^= 7; // Flip L, G, E bits, but not U.
363 else
364 Operation ^= 15; // Flip all of the condition bits.
365
366 if (Operation > ISD::SETTRUE2)
367 Operation &= ~8; // Don't let N and U bits get set.
368
369 return ISD::CondCode(Operation);
370 }
371
getSetCCInverse(ISD::CondCode Op,EVT Type)372 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
373 return getSetCCInverseImpl(Op, Type.isInteger());
374 }
375
getSetCCInverse(ISD::CondCode Op,bool isIntegerLike)376 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
377 bool isIntegerLike) {
378 return getSetCCInverseImpl(Op, isIntegerLike);
379 }
380
381 /// For an integer comparison, return 1 if the comparison is a signed operation
382 /// and 2 if the result is an unsigned comparison. Return zero if the operation
383 /// does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)384 static int isSignedOp(ISD::CondCode Opcode) {
385 switch (Opcode) {
386 default: llvm_unreachable("Illegal integer setcc operation!");
387 case ISD::SETEQ:
388 case ISD::SETNE: return 0;
389 case ISD::SETLT:
390 case ISD::SETLE:
391 case ISD::SETGT:
392 case ISD::SETGE: return 1;
393 case ISD::SETULT:
394 case ISD::SETULE:
395 case ISD::SETUGT:
396 case ISD::SETUGE: return 2;
397 }
398 }
399
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)400 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
401 EVT Type) {
402 bool IsInteger = Type.isInteger();
403 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
404 // Cannot fold a signed integer setcc with an unsigned integer setcc.
405 return ISD::SETCC_INVALID;
406
407 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
408
409 // If the N and U bits get set, then the resultant comparison DOES suddenly
410 // care about orderedness, and it is true when ordered.
411 if (Op > ISD::SETTRUE2)
412 Op &= ~16; // Clear the U bit if the N bit is set.
413
414 // Canonicalize illegal integer setcc's.
415 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
416 Op = ISD::SETNE;
417
418 return ISD::CondCode(Op);
419 }
420
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)421 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
422 EVT Type) {
423 bool IsInteger = Type.isInteger();
424 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
425 // Cannot fold a signed setcc with an unsigned setcc.
426 return ISD::SETCC_INVALID;
427
428 // Combine all of the condition bits.
429 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
430
431 // Canonicalize illegal integer setcc's.
432 if (IsInteger) {
433 switch (Result) {
434 default: break;
435 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
436 case ISD::SETOEQ: // SETEQ & SETU[LG]E
437 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
438 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
439 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
440 }
441 }
442
443 return Result;
444 }
445
446 //===----------------------------------------------------------------------===//
447 // SDNode Profile Support
448 //===----------------------------------------------------------------------===//
449
450 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)451 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
452 ID.AddInteger(OpC);
453 }
454
455 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
456 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)457 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
458 ID.AddPointer(VTList.VTs);
459 }
460
461 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)462 static void AddNodeIDOperands(FoldingSetNodeID &ID,
463 ArrayRef<SDValue> Ops) {
464 for (auto& Op : Ops) {
465 ID.AddPointer(Op.getNode());
466 ID.AddInteger(Op.getResNo());
467 }
468 }
469
470 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)471 static void AddNodeIDOperands(FoldingSetNodeID &ID,
472 ArrayRef<SDUse> Ops) {
473 for (auto& Op : Ops) {
474 ID.AddPointer(Op.getNode());
475 ID.AddInteger(Op.getResNo());
476 }
477 }
478
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)479 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
480 SDVTList VTList, ArrayRef<SDValue> OpList) {
481 AddNodeIDOpcode(ID, OpC);
482 AddNodeIDValueTypes(ID, VTList);
483 AddNodeIDOperands(ID, OpList);
484 }
485
486 /// If this is an SDNode with special info, add this info to the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)487 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
488 switch (N->getOpcode()) {
489 case ISD::TargetExternalSymbol:
490 case ISD::ExternalSymbol:
491 case ISD::MCSymbol:
492 llvm_unreachable("Should only be used on nodes with operands");
493 default: break; // Normal nodes don't need extra info.
494 case ISD::TargetConstant:
495 case ISD::Constant: {
496 const ConstantSDNode *C = cast<ConstantSDNode>(N);
497 ID.AddPointer(C->getConstantIntValue());
498 ID.AddBoolean(C->isOpaque());
499 break;
500 }
501 case ISD::TargetConstantFP:
502 case ISD::ConstantFP:
503 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
504 break;
505 case ISD::TargetGlobalAddress:
506 case ISD::GlobalAddress:
507 case ISD::TargetGlobalTLSAddress:
508 case ISD::GlobalTLSAddress: {
509 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
510 ID.AddPointer(GA->getGlobal());
511 ID.AddInteger(GA->getOffset());
512 ID.AddInteger(GA->getTargetFlags());
513 break;
514 }
515 case ISD::BasicBlock:
516 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
517 break;
518 case ISD::Register:
519 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
520 break;
521 case ISD::RegisterMask:
522 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
523 break;
524 case ISD::SRCVALUE:
525 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
526 break;
527 case ISD::FrameIndex:
528 case ISD::TargetFrameIndex:
529 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
530 break;
531 case ISD::LIFETIME_START:
532 case ISD::LIFETIME_END:
533 if (cast<LifetimeSDNode>(N)->hasOffset()) {
534 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
535 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
536 }
537 break;
538 case ISD::JumpTable:
539 case ISD::TargetJumpTable:
540 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
541 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
542 break;
543 case ISD::ConstantPool:
544 case ISD::TargetConstantPool: {
545 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
546 ID.AddInteger(CP->getAlignment());
547 ID.AddInteger(CP->getOffset());
548 if (CP->isMachineConstantPoolEntry())
549 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
550 else
551 ID.AddPointer(CP->getConstVal());
552 ID.AddInteger(CP->getTargetFlags());
553 break;
554 }
555 case ISD::TargetIndex: {
556 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
557 ID.AddInteger(TI->getIndex());
558 ID.AddInteger(TI->getOffset());
559 ID.AddInteger(TI->getTargetFlags());
560 break;
561 }
562 case ISD::LOAD: {
563 const LoadSDNode *LD = cast<LoadSDNode>(N);
564 ID.AddInteger(LD->getMemoryVT().getRawBits());
565 ID.AddInteger(LD->getRawSubclassData());
566 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
567 break;
568 }
569 case ISD::STORE: {
570 const StoreSDNode *ST = cast<StoreSDNode>(N);
571 ID.AddInteger(ST->getMemoryVT().getRawBits());
572 ID.AddInteger(ST->getRawSubclassData());
573 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
574 break;
575 }
576 case ISD::MLOAD: {
577 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
578 ID.AddInteger(MLD->getMemoryVT().getRawBits());
579 ID.AddInteger(MLD->getRawSubclassData());
580 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
581 break;
582 }
583 case ISD::MSTORE: {
584 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
585 ID.AddInteger(MST->getMemoryVT().getRawBits());
586 ID.AddInteger(MST->getRawSubclassData());
587 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
588 break;
589 }
590 case ISD::MGATHER: {
591 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
592 ID.AddInteger(MG->getMemoryVT().getRawBits());
593 ID.AddInteger(MG->getRawSubclassData());
594 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
595 break;
596 }
597 case ISD::MSCATTER: {
598 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
599 ID.AddInteger(MS->getMemoryVT().getRawBits());
600 ID.AddInteger(MS->getRawSubclassData());
601 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
602 break;
603 }
604 case ISD::ATOMIC_CMP_SWAP:
605 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
606 case ISD::ATOMIC_SWAP:
607 case ISD::ATOMIC_LOAD_ADD:
608 case ISD::ATOMIC_LOAD_SUB:
609 case ISD::ATOMIC_LOAD_AND:
610 case ISD::ATOMIC_LOAD_CLR:
611 case ISD::ATOMIC_LOAD_OR:
612 case ISD::ATOMIC_LOAD_XOR:
613 case ISD::ATOMIC_LOAD_NAND:
614 case ISD::ATOMIC_LOAD_MIN:
615 case ISD::ATOMIC_LOAD_MAX:
616 case ISD::ATOMIC_LOAD_UMIN:
617 case ISD::ATOMIC_LOAD_UMAX:
618 case ISD::ATOMIC_LOAD:
619 case ISD::ATOMIC_STORE: {
620 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
621 ID.AddInteger(AT->getMemoryVT().getRawBits());
622 ID.AddInteger(AT->getRawSubclassData());
623 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
624 break;
625 }
626 case ISD::PREFETCH: {
627 const MemSDNode *PF = cast<MemSDNode>(N);
628 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
629 break;
630 }
631 case ISD::VECTOR_SHUFFLE: {
632 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
633 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
634 i != e; ++i)
635 ID.AddInteger(SVN->getMaskElt(i));
636 break;
637 }
638 case ISD::TargetBlockAddress:
639 case ISD::BlockAddress: {
640 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
641 ID.AddPointer(BA->getBlockAddress());
642 ID.AddInteger(BA->getOffset());
643 ID.AddInteger(BA->getTargetFlags());
644 break;
645 }
646 } // end switch (N->getOpcode())
647
648 // Target specific memory nodes could also have address spaces to check.
649 if (N->isTargetMemoryOpcode())
650 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
651 }
652
653 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
654 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)655 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
656 AddNodeIDOpcode(ID, N->getOpcode());
657 // Add the return value info.
658 AddNodeIDValueTypes(ID, N->getVTList());
659 // Add the operand info.
660 AddNodeIDOperands(ID, N->ops());
661
662 // Handle SDNode leafs with special info.
663 AddNodeIDCustom(ID, N);
664 }
665
666 //===----------------------------------------------------------------------===//
667 // SelectionDAG Class
668 //===----------------------------------------------------------------------===//
669
670 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)671 static bool doNotCSE(SDNode *N) {
672 if (N->getValueType(0) == MVT::Glue)
673 return true; // Never CSE anything that produces a flag.
674
675 switch (N->getOpcode()) {
676 default: break;
677 case ISD::HANDLENODE:
678 case ISD::EH_LABEL:
679 return true; // Never CSE these nodes.
680 }
681
682 // Check that remaining values produced are not flags.
683 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
684 if (N->getValueType(i) == MVT::Glue)
685 return true; // Never CSE anything that produces a flag.
686
687 return false;
688 }
689
690 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
691 /// SelectionDAG.
RemoveDeadNodes()692 void SelectionDAG::RemoveDeadNodes() {
693 // Create a dummy node (which is not added to allnodes), that adds a reference
694 // to the root node, preventing it from being deleted.
695 HandleSDNode Dummy(getRoot());
696
697 SmallVector<SDNode*, 128> DeadNodes;
698
699 // Add all obviously-dead nodes to the DeadNodes worklist.
700 for (SDNode &Node : allnodes())
701 if (Node.use_empty())
702 DeadNodes.push_back(&Node);
703
704 RemoveDeadNodes(DeadNodes);
705
706 // If the root changed (e.g. it was a dead load, update the root).
707 setRoot(Dummy.getValue());
708 }
709
710 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
711 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)712 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
713
714 // Process the worklist, deleting the nodes and adding their uses to the
715 // worklist.
716 while (!DeadNodes.empty()) {
717 SDNode *N = DeadNodes.pop_back_val();
718 // Skip to next node if we've already managed to delete the node. This could
719 // happen if replacing a node causes a node previously added to the node to
720 // be deleted.
721 if (N->getOpcode() == ISD::DELETED_NODE)
722 continue;
723
724 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
725 DUL->NodeDeleted(N, nullptr);
726
727 // Take the node out of the appropriate CSE map.
728 RemoveNodeFromCSEMaps(N);
729
730 // Next, brutally remove the operand list. This is safe to do, as there are
731 // no cycles in the graph.
732 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
733 SDUse &Use = *I++;
734 SDNode *Operand = Use.getNode();
735 Use.set(SDValue());
736
737 // Now that we removed this operand, see if there are no uses of it left.
738 if (Operand->use_empty())
739 DeadNodes.push_back(Operand);
740 }
741
742 DeallocateNode(N);
743 }
744 }
745
RemoveDeadNode(SDNode * N)746 void SelectionDAG::RemoveDeadNode(SDNode *N){
747 SmallVector<SDNode*, 16> DeadNodes(1, N);
748
749 // Create a dummy node that adds a reference to the root node, preventing
750 // it from being deleted. (This matters if the root is an operand of the
751 // dead node.)
752 HandleSDNode Dummy(getRoot());
753
754 RemoveDeadNodes(DeadNodes);
755 }
756
DeleteNode(SDNode * N)757 void SelectionDAG::DeleteNode(SDNode *N) {
758 // First take this out of the appropriate CSE map.
759 RemoveNodeFromCSEMaps(N);
760
761 // Finally, remove uses due to operands of this node, remove from the
762 // AllNodes list, and delete the node.
763 DeleteNodeNotInCSEMaps(N);
764 }
765
DeleteNodeNotInCSEMaps(SDNode * N)766 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
767 assert(N->getIterator() != AllNodes.begin() &&
768 "Cannot delete the entry node!");
769 assert(N->use_empty() && "Cannot delete a node that is not dead!");
770
771 // Drop all of the operands and decrement used node's use counts.
772 N->DropOperands();
773
774 DeallocateNode(N);
775 }
776
erase(const SDNode * Node)777 void SDDbgInfo::erase(const SDNode *Node) {
778 DbgValMapType::iterator I = DbgValMap.find(Node);
779 if (I == DbgValMap.end())
780 return;
781 for (auto &Val: I->second)
782 Val->setIsInvalidated();
783 DbgValMap.erase(I);
784 }
785
DeallocateNode(SDNode * N)786 void SelectionDAG::DeallocateNode(SDNode *N) {
787 // If we have operands, deallocate them.
788 removeOperands(N);
789
790 NodeAllocator.Deallocate(AllNodes.remove(N));
791
792 // Set the opcode to DELETED_NODE to help catch bugs when node
793 // memory is reallocated.
794 // FIXME: There are places in SDag that have grown a dependency on the opcode
795 // value in the released node.
796 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
797 N->NodeType = ISD::DELETED_NODE;
798
799 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
800 // them and forget about that node.
801 DbgInfo->erase(N);
802 }
803
804 #ifndef NDEBUG
805 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
VerifySDNode(SDNode * N)806 static void VerifySDNode(SDNode *N) {
807 switch (N->getOpcode()) {
808 default:
809 break;
810 case ISD::BUILD_PAIR: {
811 EVT VT = N->getValueType(0);
812 assert(N->getNumValues() == 1 && "Too many results!");
813 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
814 "Wrong return type!");
815 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
816 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
817 "Mismatched operand types!");
818 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
819 "Wrong operand type!");
820 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
821 "Wrong return type size");
822 break;
823 }
824 case ISD::BUILD_VECTOR: {
825 assert(N->getNumValues() == 1 && "Too many results!");
826 assert(N->getValueType(0).isVector() && "Wrong return type!");
827 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
828 "Wrong number of operands!");
829 EVT EltVT = N->getValueType(0).getVectorElementType();
830 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
831 assert((I->getValueType() == EltVT ||
832 (EltVT.isInteger() && I->getValueType().isInteger() &&
833 EltVT.bitsLE(I->getValueType()))) &&
834 "Wrong operand type!");
835 assert(I->getValueType() == N->getOperand(0).getValueType() &&
836 "Operands must all have the same type");
837 }
838 break;
839 }
840 }
841 }
842 #endif // NDEBUG
843
844 /// Insert a newly allocated node into the DAG.
845 ///
846 /// Handles insertion into the all nodes list and CSE map, as well as
847 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)848 void SelectionDAG::InsertNode(SDNode *N) {
849 AllNodes.push_back(N);
850 #ifndef NDEBUG
851 N->PersistentId = NextPersistentId++;
852 VerifySDNode(N);
853 #endif
854 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
855 DUL->NodeInserted(N);
856 }
857
858 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
859 /// correspond to it. This is useful when we're about to delete or repurpose
860 /// the node. We don't want future request for structurally identical nodes
861 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)862 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
863 bool Erased = false;
864 switch (N->getOpcode()) {
865 case ISD::HANDLENODE: return false; // noop.
866 case ISD::CONDCODE:
867 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
868 "Cond code doesn't exist!");
869 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
870 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
871 break;
872 case ISD::ExternalSymbol:
873 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
874 break;
875 case ISD::TargetExternalSymbol: {
876 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
877 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
878 ESN->getSymbol(), ESN->getTargetFlags()));
879 break;
880 }
881 case ISD::MCSymbol: {
882 auto *MCSN = cast<MCSymbolSDNode>(N);
883 Erased = MCSymbols.erase(MCSN->getMCSymbol());
884 break;
885 }
886 case ISD::VALUETYPE: {
887 EVT VT = cast<VTSDNode>(N)->getVT();
888 if (VT.isExtended()) {
889 Erased = ExtendedValueTypeNodes.erase(VT);
890 } else {
891 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
892 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
893 }
894 break;
895 }
896 default:
897 // Remove it from the CSE Map.
898 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
899 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
900 Erased = CSEMap.RemoveNode(N);
901 break;
902 }
903 #ifndef NDEBUG
904 // Verify that the node was actually in one of the CSE maps, unless it has a
905 // flag result (which cannot be CSE'd) or is one of the special cases that are
906 // not subject to CSE.
907 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
908 !N->isMachineOpcode() && !doNotCSE(N)) {
909 N->dump(this);
910 dbgs() << "\n";
911 llvm_unreachable("Node is not in map!");
912 }
913 #endif
914 return Erased;
915 }
916
917 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
918 /// maps and modified in place. Add it back to the CSE maps, unless an identical
919 /// node already exists, in which case transfer all its users to the existing
920 /// node. This transfer can potentially trigger recursive merging.
921 void
AddModifiedNodeToCSEMaps(SDNode * N)922 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
923 // For node types that aren't CSE'd, just act as if no identical node
924 // already exists.
925 if (!doNotCSE(N)) {
926 SDNode *Existing = CSEMap.GetOrInsertNode(N);
927 if (Existing != N) {
928 // If there was already an existing matching node, use ReplaceAllUsesWith
929 // to replace the dead one with the existing one. This can cause
930 // recursive merging of other unrelated nodes down the line.
931 ReplaceAllUsesWith(N, Existing);
932
933 // N is now dead. Inform the listeners and delete it.
934 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
935 DUL->NodeDeleted(N, Existing);
936 DeleteNodeNotInCSEMaps(N);
937 return;
938 }
939 }
940
941 // If the node doesn't already exist, we updated it. Inform listeners.
942 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
943 DUL->NodeUpdated(N);
944 }
945
946 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
947 /// were replaced with those specified. If this node is never memoized,
948 /// return null, otherwise return a pointer to the slot it would take. If a
949 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)950 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
951 void *&InsertPos) {
952 if (doNotCSE(N))
953 return nullptr;
954
955 SDValue Ops[] = { Op };
956 FoldingSetNodeID ID;
957 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
958 AddNodeIDCustom(ID, N);
959 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
960 if (Node)
961 Node->intersectFlagsWith(N->getFlags());
962 return Node;
963 }
964
965 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
966 /// were replaced with those specified. If this node is never memoized,
967 /// return null, otherwise return a pointer to the slot it would take. If a
968 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)969 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
970 SDValue Op1, SDValue Op2,
971 void *&InsertPos) {
972 if (doNotCSE(N))
973 return nullptr;
974
975 SDValue Ops[] = { Op1, Op2 };
976 FoldingSetNodeID ID;
977 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
978 AddNodeIDCustom(ID, N);
979 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
980 if (Node)
981 Node->intersectFlagsWith(N->getFlags());
982 return Node;
983 }
984
985 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
986 /// were replaced with those specified. If this node is never memoized,
987 /// return null, otherwise return a pointer to the slot it would take. If a
988 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)989 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
990 void *&InsertPos) {
991 if (doNotCSE(N))
992 return nullptr;
993
994 FoldingSetNodeID ID;
995 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
996 AddNodeIDCustom(ID, N);
997 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
998 if (Node)
999 Node->intersectFlagsWith(N->getFlags());
1000 return Node;
1001 }
1002
getEVTAlignment(EVT VT) const1003 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
1004 Type *Ty = VT == MVT::iPTR ?
1005 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1006 VT.getTypeForEVT(*getContext());
1007
1008 return getDataLayout().getABITypeAlignment(Ty);
1009 }
1010
1011 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)1012 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1013 : TM(tm), OptLevel(OL),
1014 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1015 Root(getEntryNode()) {
1016 InsertNode(&EntryNode);
1017 DbgInfo = new SDDbgInfo();
1018 }
1019
init(MachineFunction & NewMF,OptimizationRemarkEmitter & NewORE,Pass * PassPtr,const TargetLibraryInfo * LibraryInfo,LegacyDivergenceAnalysis * Divergence,ProfileSummaryInfo * PSIin,BlockFrequencyInfo * BFIin)1020 void SelectionDAG::init(MachineFunction &NewMF,
1021 OptimizationRemarkEmitter &NewORE,
1022 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1023 LegacyDivergenceAnalysis * Divergence,
1024 ProfileSummaryInfo *PSIin,
1025 BlockFrequencyInfo *BFIin) {
1026 MF = &NewMF;
1027 SDAGISelPass = PassPtr;
1028 ORE = &NewORE;
1029 TLI = getSubtarget().getTargetLowering();
1030 TSI = getSubtarget().getSelectionDAGInfo();
1031 LibInfo = LibraryInfo;
1032 Context = &MF->getFunction().getContext();
1033 DA = Divergence;
1034 PSI = PSIin;
1035 BFI = BFIin;
1036 }
1037
~SelectionDAG()1038 SelectionDAG::~SelectionDAG() {
1039 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1040 allnodes_clear();
1041 OperandRecycler.clear(OperandAllocator);
1042 delete DbgInfo;
1043 }
1044
shouldOptForSize() const1045 bool SelectionDAG::shouldOptForSize() const {
1046 return MF->getFunction().hasOptSize() ||
1047 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1048 }
1049
allnodes_clear()1050 void SelectionDAG::allnodes_clear() {
1051 assert(&*AllNodes.begin() == &EntryNode);
1052 AllNodes.remove(AllNodes.begin());
1053 while (!AllNodes.empty())
1054 DeallocateNode(&AllNodes.front());
1055 #ifndef NDEBUG
1056 NextPersistentId = 0;
1057 #endif
1058 }
1059
FindNodeOrInsertPos(const FoldingSetNodeID & ID,void * & InsertPos)1060 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1061 void *&InsertPos) {
1062 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1063 if (N) {
1064 switch (N->getOpcode()) {
1065 default: break;
1066 case ISD::Constant:
1067 case ISD::ConstantFP:
1068 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1069 "debug location. Use another overload.");
1070 }
1071 }
1072 return N;
1073 }
1074
FindNodeOrInsertPos(const FoldingSetNodeID & ID,const SDLoc & DL,void * & InsertPos)1075 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1076 const SDLoc &DL, void *&InsertPos) {
1077 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1078 if (N) {
1079 switch (N->getOpcode()) {
1080 case ISD::Constant:
1081 case ISD::ConstantFP:
1082 // Erase debug location from the node if the node is used at several
1083 // different places. Do not propagate one location to all uses as it
1084 // will cause a worse single stepping debugging experience.
1085 if (N->getDebugLoc() != DL.getDebugLoc())
1086 N->setDebugLoc(DebugLoc());
1087 break;
1088 default:
1089 // When the node's point of use is located earlier in the instruction
1090 // sequence than its prior point of use, update its debug info to the
1091 // earlier location.
1092 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1093 N->setDebugLoc(DL.getDebugLoc());
1094 break;
1095 }
1096 }
1097 return N;
1098 }
1099
clear()1100 void SelectionDAG::clear() {
1101 allnodes_clear();
1102 OperandRecycler.clear(OperandAllocator);
1103 OperandAllocator.Reset();
1104 CSEMap.clear();
1105
1106 ExtendedValueTypeNodes.clear();
1107 ExternalSymbols.clear();
1108 TargetExternalSymbols.clear();
1109 MCSymbols.clear();
1110 SDCallSiteDbgInfo.clear();
1111 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1112 static_cast<CondCodeSDNode*>(nullptr));
1113 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1114 static_cast<SDNode*>(nullptr));
1115
1116 EntryNode.UseList = nullptr;
1117 InsertNode(&EntryNode);
1118 Root = getEntryNode();
1119 DbgInfo->clear();
1120 }
1121
getFPExtendOrRound(SDValue Op,const SDLoc & DL,EVT VT)1122 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1123 return VT.bitsGT(Op.getValueType())
1124 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1125 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1126 }
1127
1128 std::pair<SDValue, SDValue>
getStrictFPExtendOrRound(SDValue Op,SDValue Chain,const SDLoc & DL,EVT VT)1129 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1130 const SDLoc &DL, EVT VT) {
1131 assert(!VT.bitsEq(Op.getValueType()) &&
1132 "Strict no-op FP extend/round not allowed.");
1133 SDValue Res =
1134 VT.bitsGT(Op.getValueType())
1135 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1136 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1137 {Chain, Op, getIntPtrConstant(0, DL)});
1138
1139 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1140 }
1141
getAnyExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1142 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1143 return VT.bitsGT(Op.getValueType()) ?
1144 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1145 getNode(ISD::TRUNCATE, DL, VT, Op);
1146 }
1147
getSExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1148 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1149 return VT.bitsGT(Op.getValueType()) ?
1150 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1151 getNode(ISD::TRUNCATE, DL, VT, Op);
1152 }
1153
getZExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1154 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1155 return VT.bitsGT(Op.getValueType()) ?
1156 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1157 getNode(ISD::TRUNCATE, DL, VT, Op);
1158 }
1159
getBoolExtOrTrunc(SDValue Op,const SDLoc & SL,EVT VT,EVT OpVT)1160 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1161 EVT OpVT) {
1162 if (VT.bitsLE(Op.getValueType()))
1163 return getNode(ISD::TRUNCATE, SL, VT, Op);
1164
1165 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1166 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1167 }
1168
getZeroExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1169 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1170 assert(!VT.isVector() &&
1171 "getZeroExtendInReg should use the vector element type instead of "
1172 "the vector type!");
1173 if (Op.getValueType().getScalarType() == VT) return Op;
1174 unsigned BitWidth = Op.getScalarValueSizeInBits();
1175 APInt Imm = APInt::getLowBitsSet(BitWidth,
1176 VT.getSizeInBits());
1177 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1178 getConstant(Imm, DL, Op.getValueType()));
1179 }
1180
getPtrExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1181 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1182 // Only unsigned pointer semantics are supported right now. In the future this
1183 // might delegate to TLI to check pointer signedness.
1184 return getZExtOrTrunc(Op, DL, VT);
1185 }
1186
getPtrExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1187 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1188 // Only unsigned pointer semantics are supported right now. In the future this
1189 // might delegate to TLI to check pointer signedness.
1190 return getZeroExtendInReg(Op, DL, VT);
1191 }
1192
1193 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
getNOT(const SDLoc & DL,SDValue Val,EVT VT)1194 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1195 EVT EltVT = VT.getScalarType();
1196 SDValue NegOne =
1197 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1198 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1199 }
1200
getLogicalNOT(const SDLoc & DL,SDValue Val,EVT VT)1201 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1202 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1203 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1204 }
1205
getBoolConstant(bool V,const SDLoc & DL,EVT VT,EVT OpVT)1206 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1207 EVT OpVT) {
1208 if (!V)
1209 return getConstant(0, DL, VT);
1210
1211 switch (TLI->getBooleanContents(OpVT)) {
1212 case TargetLowering::ZeroOrOneBooleanContent:
1213 case TargetLowering::UndefinedBooleanContent:
1214 return getConstant(1, DL, VT);
1215 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1216 return getAllOnesConstant(DL, VT);
1217 }
1218 llvm_unreachable("Unexpected boolean content enum!");
1219 }
1220
getConstant(uint64_t Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1221 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1222 bool isT, bool isO) {
1223 EVT EltVT = VT.getScalarType();
1224 assert((EltVT.getSizeInBits() >= 64 ||
1225 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1226 "getConstant with a uint64_t value that doesn't fit in the type!");
1227 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1228 }
1229
getConstant(const APInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1230 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1231 bool isT, bool isO) {
1232 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1233 }
1234
getConstant(const ConstantInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1235 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1236 EVT VT, bool isT, bool isO) {
1237 assert(VT.isInteger() && "Cannot create FP integer constant!");
1238
1239 EVT EltVT = VT.getScalarType();
1240 const ConstantInt *Elt = &Val;
1241
1242 // In some cases the vector type is legal but the element type is illegal and
1243 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1244 // inserted value (the type does not need to match the vector element type).
1245 // Any extra bits introduced will be truncated away.
1246 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1247 TargetLowering::TypePromoteInteger) {
1248 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1249 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1250 Elt = ConstantInt::get(*getContext(), NewVal);
1251 }
1252 // In other cases the element type is illegal and needs to be expanded, for
1253 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1254 // the value into n parts and use a vector type with n-times the elements.
1255 // Then bitcast to the type requested.
1256 // Legalizing constants too early makes the DAGCombiner's job harder so we
1257 // only legalize if the DAG tells us we must produce legal types.
1258 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1259 TLI->getTypeAction(*getContext(), EltVT) ==
1260 TargetLowering::TypeExpandInteger) {
1261 const APInt &NewVal = Elt->getValue();
1262 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1263 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1264 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1265 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1266
1267 // Check the temporary vector is the correct size. If this fails then
1268 // getTypeToTransformTo() probably returned a type whose size (in bits)
1269 // isn't a power-of-2 factor of the requested type size.
1270 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1271
1272 SmallVector<SDValue, 2> EltParts;
1273 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1274 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1275 .zextOrTrunc(ViaEltSizeInBits), DL,
1276 ViaEltVT, isT, isO));
1277 }
1278
1279 // EltParts is currently in little endian order. If we actually want
1280 // big-endian order then reverse it now.
1281 if (getDataLayout().isBigEndian())
1282 std::reverse(EltParts.begin(), EltParts.end());
1283
1284 // The elements must be reversed when the element order is different
1285 // to the endianness of the elements (because the BITCAST is itself a
1286 // vector shuffle in this situation). However, we do not need any code to
1287 // perform this reversal because getConstant() is producing a vector
1288 // splat.
1289 // This situation occurs in MIPS MSA.
1290
1291 SmallVector<SDValue, 8> Ops;
1292 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1293 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1294
1295 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1296 return V;
1297 }
1298
1299 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1300 "APInt size does not match type size!");
1301 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1302 FoldingSetNodeID ID;
1303 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1304 ID.AddPointer(Elt);
1305 ID.AddBoolean(isO);
1306 void *IP = nullptr;
1307 SDNode *N = nullptr;
1308 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1309 if (!VT.isVector())
1310 return SDValue(N, 0);
1311
1312 if (!N) {
1313 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1314 CSEMap.InsertNode(N, IP);
1315 InsertNode(N);
1316 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1317 }
1318
1319 SDValue Result(N, 0);
1320 if (VT.isScalableVector())
1321 Result = getSplatVector(VT, DL, Result);
1322 else if (VT.isVector())
1323 Result = getSplatBuildVector(VT, DL, Result);
1324
1325 return Result;
1326 }
1327
getIntPtrConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1328 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1329 bool isTarget) {
1330 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1331 }
1332
getShiftAmountConstant(uint64_t Val,EVT VT,const SDLoc & DL,bool LegalTypes)1333 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1334 const SDLoc &DL, bool LegalTypes) {
1335 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1336 return getConstant(Val, DL, ShiftVT);
1337 }
1338
getConstantFP(const APFloat & V,const SDLoc & DL,EVT VT,bool isTarget)1339 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1340 bool isTarget) {
1341 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1342 }
1343
getConstantFP(const ConstantFP & V,const SDLoc & DL,EVT VT,bool isTarget)1344 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1345 EVT VT, bool isTarget) {
1346 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1347
1348 EVT EltVT = VT.getScalarType();
1349
1350 // Do the map lookup using the actual bit pattern for the floating point
1351 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1352 // we don't have issues with SNANs.
1353 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1354 FoldingSetNodeID ID;
1355 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1356 ID.AddPointer(&V);
1357 void *IP = nullptr;
1358 SDNode *N = nullptr;
1359 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1360 if (!VT.isVector())
1361 return SDValue(N, 0);
1362
1363 if (!N) {
1364 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1365 CSEMap.InsertNode(N, IP);
1366 InsertNode(N);
1367 }
1368
1369 SDValue Result(N, 0);
1370 if (VT.isVector())
1371 Result = getSplatBuildVector(VT, DL, Result);
1372 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1373 return Result;
1374 }
1375
getConstantFP(double Val,const SDLoc & DL,EVT VT,bool isTarget)1376 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1377 bool isTarget) {
1378 EVT EltVT = VT.getScalarType();
1379 if (EltVT == MVT::f32)
1380 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1381 else if (EltVT == MVT::f64)
1382 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1383 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1384 EltVT == MVT::f16) {
1385 bool Ignored;
1386 APFloat APF = APFloat(Val);
1387 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1388 &Ignored);
1389 return getConstantFP(APF, DL, VT, isTarget);
1390 } else
1391 llvm_unreachable("Unsupported type in getConstantFP");
1392 }
1393
getGlobalAddress(const GlobalValue * GV,const SDLoc & DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned TargetFlags)1394 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1395 EVT VT, int64_t Offset, bool isTargetGA,
1396 unsigned TargetFlags) {
1397 assert((TargetFlags == 0 || isTargetGA) &&
1398 "Cannot set target flags on target-independent globals");
1399
1400 // Truncate (with sign-extension) the offset value to the pointer size.
1401 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1402 if (BitWidth < 64)
1403 Offset = SignExtend64(Offset, BitWidth);
1404
1405 unsigned Opc;
1406 if (GV->isThreadLocal())
1407 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1408 else
1409 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1410
1411 FoldingSetNodeID ID;
1412 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1413 ID.AddPointer(GV);
1414 ID.AddInteger(Offset);
1415 ID.AddInteger(TargetFlags);
1416 void *IP = nullptr;
1417 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1418 return SDValue(E, 0);
1419
1420 auto *N = newSDNode<GlobalAddressSDNode>(
1421 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1422 CSEMap.InsertNode(N, IP);
1423 InsertNode(N);
1424 return SDValue(N, 0);
1425 }
1426
getFrameIndex(int FI,EVT VT,bool isTarget)1427 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1428 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1429 FoldingSetNodeID ID;
1430 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1431 ID.AddInteger(FI);
1432 void *IP = nullptr;
1433 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1434 return SDValue(E, 0);
1435
1436 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1437 CSEMap.InsertNode(N, IP);
1438 InsertNode(N);
1439 return SDValue(N, 0);
1440 }
1441
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned TargetFlags)1442 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1443 unsigned TargetFlags) {
1444 assert((TargetFlags == 0 || isTarget) &&
1445 "Cannot set target flags on target-independent jump tables");
1446 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1447 FoldingSetNodeID ID;
1448 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1449 ID.AddInteger(JTI);
1450 ID.AddInteger(TargetFlags);
1451 void *IP = nullptr;
1452 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1453 return SDValue(E, 0);
1454
1455 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1456 CSEMap.InsertNode(N, IP);
1457 InsertNode(N);
1458 return SDValue(N, 0);
1459 }
1460
getConstantPool(const Constant * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned TargetFlags)1461 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1462 unsigned Alignment, int Offset,
1463 bool isTarget,
1464 unsigned TargetFlags) {
1465 assert((TargetFlags == 0 || isTarget) &&
1466 "Cannot set target flags on target-independent globals");
1467 if (Alignment == 0)
1468 Alignment = shouldOptForSize()
1469 ? getDataLayout().getABITypeAlignment(C->getType())
1470 : getDataLayout().getPrefTypeAlignment(C->getType());
1471 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1472 FoldingSetNodeID ID;
1473 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1474 ID.AddInteger(Alignment);
1475 ID.AddInteger(Offset);
1476 ID.AddPointer(C);
1477 ID.AddInteger(TargetFlags);
1478 void *IP = nullptr;
1479 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1480 return SDValue(E, 0);
1481
1482 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1483 TargetFlags);
1484 CSEMap.InsertNode(N, IP);
1485 InsertNode(N);
1486 return SDValue(N, 0);
1487 }
1488
getConstantPool(MachineConstantPoolValue * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned TargetFlags)1489 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1490 unsigned Alignment, int Offset,
1491 bool isTarget,
1492 unsigned TargetFlags) {
1493 assert((TargetFlags == 0 || isTarget) &&
1494 "Cannot set target flags on target-independent globals");
1495 if (Alignment == 0)
1496 Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1497 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1498 FoldingSetNodeID ID;
1499 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1500 ID.AddInteger(Alignment);
1501 ID.AddInteger(Offset);
1502 C->addSelectionDAGCSEId(ID);
1503 ID.AddInteger(TargetFlags);
1504 void *IP = nullptr;
1505 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1506 return SDValue(E, 0);
1507
1508 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1509 TargetFlags);
1510 CSEMap.InsertNode(N, IP);
1511 InsertNode(N);
1512 return SDValue(N, 0);
1513 }
1514
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned TargetFlags)1515 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1516 unsigned TargetFlags) {
1517 FoldingSetNodeID ID;
1518 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1519 ID.AddInteger(Index);
1520 ID.AddInteger(Offset);
1521 ID.AddInteger(TargetFlags);
1522 void *IP = nullptr;
1523 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1524 return SDValue(E, 0);
1525
1526 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1527 CSEMap.InsertNode(N, IP);
1528 InsertNode(N);
1529 return SDValue(N, 0);
1530 }
1531
getBasicBlock(MachineBasicBlock * MBB)1532 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1533 FoldingSetNodeID ID;
1534 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1535 ID.AddPointer(MBB);
1536 void *IP = nullptr;
1537 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1538 return SDValue(E, 0);
1539
1540 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1541 CSEMap.InsertNode(N, IP);
1542 InsertNode(N);
1543 return SDValue(N, 0);
1544 }
1545
getValueType(EVT VT)1546 SDValue SelectionDAG::getValueType(EVT VT) {
1547 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1548 ValueTypeNodes.size())
1549 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1550
1551 SDNode *&N = VT.isExtended() ?
1552 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1553
1554 if (N) return SDValue(N, 0);
1555 N = newSDNode<VTSDNode>(VT);
1556 InsertNode(N);
1557 return SDValue(N, 0);
1558 }
1559
getExternalSymbol(const char * Sym,EVT VT)1560 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1561 SDNode *&N = ExternalSymbols[Sym];
1562 if (N) return SDValue(N, 0);
1563 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1564 InsertNode(N);
1565 return SDValue(N, 0);
1566 }
1567
getMCSymbol(MCSymbol * Sym,EVT VT)1568 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1569 SDNode *&N = MCSymbols[Sym];
1570 if (N)
1571 return SDValue(N, 0);
1572 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1573 InsertNode(N);
1574 return SDValue(N, 0);
1575 }
1576
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned TargetFlags)1577 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1578 unsigned TargetFlags) {
1579 SDNode *&N =
1580 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1581 if (N) return SDValue(N, 0);
1582 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1583 InsertNode(N);
1584 return SDValue(N, 0);
1585 }
1586
getCondCode(ISD::CondCode Cond)1587 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1588 if ((unsigned)Cond >= CondCodeNodes.size())
1589 CondCodeNodes.resize(Cond+1);
1590
1591 if (!CondCodeNodes[Cond]) {
1592 auto *N = newSDNode<CondCodeSDNode>(Cond);
1593 CondCodeNodes[Cond] = N;
1594 InsertNode(N);
1595 }
1596
1597 return SDValue(CondCodeNodes[Cond], 0);
1598 }
1599
1600 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1601 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,MutableArrayRef<int> M)1602 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1603 std::swap(N1, N2);
1604 ShuffleVectorSDNode::commuteMask(M);
1605 }
1606
getVectorShuffle(EVT VT,const SDLoc & dl,SDValue N1,SDValue N2,ArrayRef<int> Mask)1607 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1608 SDValue N2, ArrayRef<int> Mask) {
1609 assert(VT.getVectorNumElements() == Mask.size() &&
1610 "Must have the same number of vector elements as mask elements!");
1611 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1612 "Invalid VECTOR_SHUFFLE");
1613
1614 // Canonicalize shuffle undef, undef -> undef
1615 if (N1.isUndef() && N2.isUndef())
1616 return getUNDEF(VT);
1617
1618 // Validate that all indices in Mask are within the range of the elements
1619 // input to the shuffle.
1620 int NElts = Mask.size();
1621 assert(llvm::all_of(Mask,
1622 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1623 "Index out of range");
1624
1625 // Copy the mask so we can do any needed cleanup.
1626 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1627
1628 // Canonicalize shuffle v, v -> v, undef
1629 if (N1 == N2) {
1630 N2 = getUNDEF(VT);
1631 for (int i = 0; i != NElts; ++i)
1632 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1633 }
1634
1635 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1636 if (N1.isUndef())
1637 commuteShuffle(N1, N2, MaskVec);
1638
1639 if (TLI->hasVectorBlend()) {
1640 // If shuffling a splat, try to blend the splat instead. We do this here so
1641 // that even when this arises during lowering we don't have to re-handle it.
1642 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1643 BitVector UndefElements;
1644 SDValue Splat = BV->getSplatValue(&UndefElements);
1645 if (!Splat)
1646 return;
1647
1648 for (int i = 0; i < NElts; ++i) {
1649 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1650 continue;
1651
1652 // If this input comes from undef, mark it as such.
1653 if (UndefElements[MaskVec[i] - Offset]) {
1654 MaskVec[i] = -1;
1655 continue;
1656 }
1657
1658 // If we can blend a non-undef lane, use that instead.
1659 if (!UndefElements[i])
1660 MaskVec[i] = i + Offset;
1661 }
1662 };
1663 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1664 BlendSplat(N1BV, 0);
1665 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1666 BlendSplat(N2BV, NElts);
1667 }
1668
1669 // Canonicalize all index into lhs, -> shuffle lhs, undef
1670 // Canonicalize all index into rhs, -> shuffle rhs, undef
1671 bool AllLHS = true, AllRHS = true;
1672 bool N2Undef = N2.isUndef();
1673 for (int i = 0; i != NElts; ++i) {
1674 if (MaskVec[i] >= NElts) {
1675 if (N2Undef)
1676 MaskVec[i] = -1;
1677 else
1678 AllLHS = false;
1679 } else if (MaskVec[i] >= 0) {
1680 AllRHS = false;
1681 }
1682 }
1683 if (AllLHS && AllRHS)
1684 return getUNDEF(VT);
1685 if (AllLHS && !N2Undef)
1686 N2 = getUNDEF(VT);
1687 if (AllRHS) {
1688 N1 = getUNDEF(VT);
1689 commuteShuffle(N1, N2, MaskVec);
1690 }
1691 // Reset our undef status after accounting for the mask.
1692 N2Undef = N2.isUndef();
1693 // Re-check whether both sides ended up undef.
1694 if (N1.isUndef() && N2Undef)
1695 return getUNDEF(VT);
1696
1697 // If Identity shuffle return that node.
1698 bool Identity = true, AllSame = true;
1699 for (int i = 0; i != NElts; ++i) {
1700 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1701 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1702 }
1703 if (Identity && NElts)
1704 return N1;
1705
1706 // Shuffling a constant splat doesn't change the result.
1707 if (N2Undef) {
1708 SDValue V = N1;
1709
1710 // Look through any bitcasts. We check that these don't change the number
1711 // (and size) of elements and just changes their types.
1712 while (V.getOpcode() == ISD::BITCAST)
1713 V = V->getOperand(0);
1714
1715 // A splat should always show up as a build vector node.
1716 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1717 BitVector UndefElements;
1718 SDValue Splat = BV->getSplatValue(&UndefElements);
1719 // If this is a splat of an undef, shuffling it is also undef.
1720 if (Splat && Splat.isUndef())
1721 return getUNDEF(VT);
1722
1723 bool SameNumElts =
1724 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1725
1726 // We only have a splat which can skip shuffles if there is a splatted
1727 // value and no undef lanes rearranged by the shuffle.
1728 if (Splat && UndefElements.none()) {
1729 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1730 // number of elements match or the value splatted is a zero constant.
1731 if (SameNumElts)
1732 return N1;
1733 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1734 if (C->isNullValue())
1735 return N1;
1736 }
1737
1738 // If the shuffle itself creates a splat, build the vector directly.
1739 if (AllSame && SameNumElts) {
1740 EVT BuildVT = BV->getValueType(0);
1741 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1742 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1743
1744 // We may have jumped through bitcasts, so the type of the
1745 // BUILD_VECTOR may not match the type of the shuffle.
1746 if (BuildVT != VT)
1747 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1748 return NewBV;
1749 }
1750 }
1751 }
1752
1753 FoldingSetNodeID ID;
1754 SDValue Ops[2] = { N1, N2 };
1755 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1756 for (int i = 0; i != NElts; ++i)
1757 ID.AddInteger(MaskVec[i]);
1758
1759 void* IP = nullptr;
1760 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1761 return SDValue(E, 0);
1762
1763 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1764 // SDNode doesn't have access to it. This memory will be "leaked" when
1765 // the node is deallocated, but recovered when the NodeAllocator is released.
1766 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1767 llvm::copy(MaskVec, MaskAlloc);
1768
1769 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1770 dl.getDebugLoc(), MaskAlloc);
1771 createOperands(N, Ops);
1772
1773 CSEMap.InsertNode(N, IP);
1774 InsertNode(N);
1775 SDValue V = SDValue(N, 0);
1776 NewSDValueDbgMsg(V, "Creating new node: ", this);
1777 return V;
1778 }
1779
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)1780 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1781 EVT VT = SV.getValueType(0);
1782 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1783 ShuffleVectorSDNode::commuteMask(MaskVec);
1784
1785 SDValue Op0 = SV.getOperand(0);
1786 SDValue Op1 = SV.getOperand(1);
1787 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1788 }
1789
getRegister(unsigned RegNo,EVT VT)1790 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1791 FoldingSetNodeID ID;
1792 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1793 ID.AddInteger(RegNo);
1794 void *IP = nullptr;
1795 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1796 return SDValue(E, 0);
1797
1798 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1799 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1800 CSEMap.InsertNode(N, IP);
1801 InsertNode(N);
1802 return SDValue(N, 0);
1803 }
1804
getRegisterMask(const uint32_t * RegMask)1805 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1806 FoldingSetNodeID ID;
1807 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1808 ID.AddPointer(RegMask);
1809 void *IP = nullptr;
1810 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1811 return SDValue(E, 0);
1812
1813 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1814 CSEMap.InsertNode(N, IP);
1815 InsertNode(N);
1816 return SDValue(N, 0);
1817 }
1818
getEHLabel(const SDLoc & dl,SDValue Root,MCSymbol * Label)1819 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1820 MCSymbol *Label) {
1821 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1822 }
1823
getLabelNode(unsigned Opcode,const SDLoc & dl,SDValue Root,MCSymbol * Label)1824 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1825 SDValue Root, MCSymbol *Label) {
1826 FoldingSetNodeID ID;
1827 SDValue Ops[] = { Root };
1828 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1829 ID.AddPointer(Label);
1830 void *IP = nullptr;
1831 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1832 return SDValue(E, 0);
1833
1834 auto *N =
1835 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
1836 createOperands(N, Ops);
1837
1838 CSEMap.InsertNode(N, IP);
1839 InsertNode(N);
1840 return SDValue(N, 0);
1841 }
1842
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned TargetFlags)1843 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1844 int64_t Offset, bool isTarget,
1845 unsigned TargetFlags) {
1846 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1847
1848 FoldingSetNodeID ID;
1849 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1850 ID.AddPointer(BA);
1851 ID.AddInteger(Offset);
1852 ID.AddInteger(TargetFlags);
1853 void *IP = nullptr;
1854 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1855 return SDValue(E, 0);
1856
1857 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1858 CSEMap.InsertNode(N, IP);
1859 InsertNode(N);
1860 return SDValue(N, 0);
1861 }
1862
getSrcValue(const Value * V)1863 SDValue SelectionDAG::getSrcValue(const Value *V) {
1864 assert((!V || V->getType()->isPointerTy()) &&
1865 "SrcValue is not a pointer?");
1866
1867 FoldingSetNodeID ID;
1868 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1869 ID.AddPointer(V);
1870
1871 void *IP = nullptr;
1872 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1873 return SDValue(E, 0);
1874
1875 auto *N = newSDNode<SrcValueSDNode>(V);
1876 CSEMap.InsertNode(N, IP);
1877 InsertNode(N);
1878 return SDValue(N, 0);
1879 }
1880
getMDNode(const MDNode * MD)1881 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1882 FoldingSetNodeID ID;
1883 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1884 ID.AddPointer(MD);
1885
1886 void *IP = nullptr;
1887 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1888 return SDValue(E, 0);
1889
1890 auto *N = newSDNode<MDNodeSDNode>(MD);
1891 CSEMap.InsertNode(N, IP);
1892 InsertNode(N);
1893 return SDValue(N, 0);
1894 }
1895
getBitcast(EVT VT,SDValue V)1896 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1897 if (VT == V.getValueType())
1898 return V;
1899
1900 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1901 }
1902
getAddrSpaceCast(const SDLoc & dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)1903 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
1904 unsigned SrcAS, unsigned DestAS) {
1905 SDValue Ops[] = {Ptr};
1906 FoldingSetNodeID ID;
1907 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1908 ID.AddInteger(SrcAS);
1909 ID.AddInteger(DestAS);
1910
1911 void *IP = nullptr;
1912 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1913 return SDValue(E, 0);
1914
1915 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1916 VT, SrcAS, DestAS);
1917 createOperands(N, Ops);
1918
1919 CSEMap.InsertNode(N, IP);
1920 InsertNode(N);
1921 return SDValue(N, 0);
1922 }
1923
1924 /// getShiftAmountOperand - Return the specified value casted to
1925 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)1926 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1927 EVT OpTy = Op.getValueType();
1928 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1929 if (OpTy == ShTy || OpTy.isVector()) return Op;
1930
1931 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1932 }
1933
expandVAArg(SDNode * Node)1934 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1935 SDLoc dl(Node);
1936 const TargetLowering &TLI = getTargetLoweringInfo();
1937 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1938 EVT VT = Node->getValueType(0);
1939 SDValue Tmp1 = Node->getOperand(0);
1940 SDValue Tmp2 = Node->getOperand(1);
1941 const MaybeAlign MA(Node->getConstantOperandVal(3));
1942
1943 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1944 Tmp2, MachinePointerInfo(V));
1945 SDValue VAList = VAListLoad;
1946
1947 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
1948 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1949 getConstant(MA->value() - 1, dl, VAList.getValueType()));
1950
1951 VAList =
1952 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1953 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
1954 }
1955
1956 // Increment the pointer, VAList, to the next vaarg
1957 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1958 getConstant(getDataLayout().getTypeAllocSize(
1959 VT.getTypeForEVT(*getContext())),
1960 dl, VAList.getValueType()));
1961 // Store the incremented VAList to the legalized pointer
1962 Tmp1 =
1963 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1964 // Load the actual argument out of the pointer VAList
1965 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1966 }
1967
expandVACopy(SDNode * Node)1968 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1969 SDLoc dl(Node);
1970 const TargetLowering &TLI = getTargetLoweringInfo();
1971 // This defaults to loading a pointer from the input and storing it to the
1972 // output, returning the chain.
1973 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1974 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1975 SDValue Tmp1 =
1976 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1977 Node->getOperand(2), MachinePointerInfo(VS));
1978 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1979 MachinePointerInfo(VD));
1980 }
1981
CreateStackTemporary(EVT VT,unsigned minAlign)1982 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1983 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1984 unsigned ByteSize = VT.getStoreSize();
1985 Type *Ty = VT.getTypeForEVT(*getContext());
1986 unsigned StackAlign =
1987 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1988
1989 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1990 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1991 }
1992
CreateStackTemporary(EVT VT1,EVT VT2)1993 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1994 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1995 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1996 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1997 const DataLayout &DL = getDataLayout();
1998 unsigned Align =
1999 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
2000
2001 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
2002 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
2003 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2004 }
2005
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,const SDLoc & dl)2006 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2007 ISD::CondCode Cond, const SDLoc &dl) {
2008 EVT OpVT = N1.getValueType();
2009
2010 // These setcc operations always fold.
2011 switch (Cond) {
2012 default: break;
2013 case ISD::SETFALSE:
2014 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2015 case ISD::SETTRUE:
2016 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2017
2018 case ISD::SETOEQ:
2019 case ISD::SETOGT:
2020 case ISD::SETOGE:
2021 case ISD::SETOLT:
2022 case ISD::SETOLE:
2023 case ISD::SETONE:
2024 case ISD::SETO:
2025 case ISD::SETUO:
2026 case ISD::SETUEQ:
2027 case ISD::SETUNE:
2028 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2029 break;
2030 }
2031
2032 if (OpVT.isInteger()) {
2033 // For EQ and NE, we can always pick a value for the undef to make the
2034 // predicate pass or fail, so we can return undef.
2035 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2036 // icmp eq/ne X, undef -> undef.
2037 if ((N1.isUndef() || N2.isUndef()) &&
2038 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2039 return getUNDEF(VT);
2040
2041 // If both operands are undef, we can return undef for int comparison.
2042 // icmp undef, undef -> undef.
2043 if (N1.isUndef() && N2.isUndef())
2044 return getUNDEF(VT);
2045
2046 // icmp X, X -> true/false
2047 // icmp X, undef -> true/false because undef could be X.
2048 if (N1 == N2)
2049 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2050 }
2051
2052 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2053 const APInt &C2 = N2C->getAPIntValue();
2054 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2055 const APInt &C1 = N1C->getAPIntValue();
2056
2057 switch (Cond) {
2058 default: llvm_unreachable("Unknown integer setcc!");
2059 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
2060 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
2061 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
2062 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
2063 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
2064 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2065 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2066 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2067 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2068 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2069 }
2070 }
2071 }
2072
2073 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2074 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2075
2076 if (N1CFP && N2CFP) {
2077 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2078 switch (Cond) {
2079 default: break;
2080 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2081 return getUNDEF(VT);
2082 LLVM_FALLTHROUGH;
2083 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2084 OpVT);
2085 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2086 return getUNDEF(VT);
2087 LLVM_FALLTHROUGH;
2088 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2089 R==APFloat::cmpLessThan, dl, VT,
2090 OpVT);
2091 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2092 return getUNDEF(VT);
2093 LLVM_FALLTHROUGH;
2094 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2095 OpVT);
2096 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2097 return getUNDEF(VT);
2098 LLVM_FALLTHROUGH;
2099 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2100 VT, OpVT);
2101 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2102 return getUNDEF(VT);
2103 LLVM_FALLTHROUGH;
2104 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2105 R==APFloat::cmpEqual, dl, VT,
2106 OpVT);
2107 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2108 return getUNDEF(VT);
2109 LLVM_FALLTHROUGH;
2110 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2111 R==APFloat::cmpEqual, dl, VT, OpVT);
2112 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2113 OpVT);
2114 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2115 OpVT);
2116 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2117 R==APFloat::cmpEqual, dl, VT,
2118 OpVT);
2119 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2120 OpVT);
2121 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2122 R==APFloat::cmpLessThan, dl, VT,
2123 OpVT);
2124 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2125 R==APFloat::cmpUnordered, dl, VT,
2126 OpVT);
2127 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2128 VT, OpVT);
2129 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2130 OpVT);
2131 }
2132 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2133 // Ensure that the constant occurs on the RHS.
2134 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2135 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2136 return SDValue();
2137 return getSetCC(dl, VT, N2, N1, SwappedCond);
2138 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2139 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2140 // If an operand is known to be a nan (or undef that could be a nan), we can
2141 // fold it.
2142 // Choosing NaN for the undef will always make unordered comparison succeed
2143 // and ordered comparison fails.
2144 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2145 switch (ISD::getUnorderedFlavor(Cond)) {
2146 default:
2147 llvm_unreachable("Unknown flavor!");
2148 case 0: // Known false.
2149 return getBoolConstant(false, dl, VT, OpVT);
2150 case 1: // Known true.
2151 return getBoolConstant(true, dl, VT, OpVT);
2152 case 2: // Undefined.
2153 return getUNDEF(VT);
2154 }
2155 }
2156
2157 // Could not fold it.
2158 return SDValue();
2159 }
2160
2161 /// See if the specified operand can be simplified with the knowledge that only
2162 /// the bits specified by DemandedBits are used.
2163 /// TODO: really we should be making this into the DAG equivalent of
2164 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits)2165 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2166 EVT VT = V.getValueType();
2167 APInt DemandedElts = VT.isVector()
2168 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2169 : APInt(1, 1);
2170 return GetDemandedBits(V, DemandedBits, DemandedElts);
2171 }
2172
2173 /// See if the specified operand can be simplified with the knowledge that only
2174 /// the bits specified by DemandedBits are used in the elements specified by
2175 /// DemandedElts.
2176 /// TODO: really we should be making this into the DAG equivalent of
2177 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits,const APInt & DemandedElts)2178 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2179 const APInt &DemandedElts) {
2180 switch (V.getOpcode()) {
2181 default:
2182 break;
2183 case ISD::Constant: {
2184 auto *CV = cast<ConstantSDNode>(V.getNode());
2185 assert(CV && "Const value should be ConstSDNode.");
2186 const APInt &CVal = CV->getAPIntValue();
2187 APInt NewVal = CVal & DemandedBits;
2188 if (NewVal != CVal)
2189 return getConstant(NewVal, SDLoc(V), V.getValueType());
2190 break;
2191 }
2192 case ISD::OR:
2193 case ISD::XOR:
2194 case ISD::SIGN_EXTEND_INREG:
2195 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2196 *this, 0);
2197 case ISD::SRL:
2198 // Only look at single-use SRLs.
2199 if (!V.getNode()->hasOneUse())
2200 break;
2201 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2202 // See if we can recursively simplify the LHS.
2203 unsigned Amt = RHSC->getZExtValue();
2204
2205 // Watch out for shift count overflow though.
2206 if (Amt >= DemandedBits.getBitWidth())
2207 break;
2208 APInt SrcDemandedBits = DemandedBits << Amt;
2209 if (SDValue SimplifyLHS =
2210 GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2211 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2212 V.getOperand(1));
2213 }
2214 break;
2215 case ISD::AND: {
2216 // X & -1 -> X (ignoring bits which aren't demanded).
2217 // Also handle the case where masked out bits in X are known to be zero.
2218 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) {
2219 const APInt &AndVal = RHSC->getAPIntValue();
2220 if (DemandedBits.isSubsetOf(AndVal) ||
2221 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero |
2222 AndVal))
2223 return V.getOperand(0);
2224 }
2225 break;
2226 }
2227 case ISD::ANY_EXTEND: {
2228 SDValue Src = V.getOperand(0);
2229 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
2230 // Being conservative here - only peek through if we only demand bits in the
2231 // non-extended source (even though the extended bits are technically
2232 // undef).
2233 if (DemandedBits.getActiveBits() > SrcBitWidth)
2234 break;
2235 APInt SrcDemandedBits = DemandedBits.trunc(SrcBitWidth);
2236 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcDemandedBits))
2237 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc);
2238 break;
2239 }
2240 }
2241 return SDValue();
2242 }
2243
2244 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2245 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const2246 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2247 unsigned BitWidth = Op.getScalarValueSizeInBits();
2248 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2249 }
2250
2251 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2252 /// this predicate to simplify operations downstream. Mask is known to be zero
2253 /// for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,unsigned Depth) const2254 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2255 unsigned Depth) const {
2256 EVT VT = V.getValueType();
2257 APInt DemandedElts = VT.isVector()
2258 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2259 : APInt(1, 1);
2260 return MaskedValueIsZero(V, Mask, DemandedElts, Depth);
2261 }
2262
2263 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2264 /// DemandedElts. We use this predicate to simplify operations downstream.
2265 /// Mask is known to be zero for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,const APInt & DemandedElts,unsigned Depth) const2266 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2267 const APInt &DemandedElts,
2268 unsigned Depth) const {
2269 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2270 }
2271
2272 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
MaskedValueIsAllOnes(SDValue V,const APInt & Mask,unsigned Depth) const2273 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2274 unsigned Depth) const {
2275 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2276 }
2277
2278 /// isSplatValue - Return true if the vector V has the same value
2279 /// across all DemandedElts.
isSplatValue(SDValue V,const APInt & DemandedElts,APInt & UndefElts,unsigned Depth)2280 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2281 APInt &UndefElts, unsigned Depth) {
2282 if (!DemandedElts)
2283 return false; // No demanded elts, better to assume we don't know anything.
2284
2285 if (Depth >= MaxRecursionDepth)
2286 return false; // Limit search depth.
2287
2288 EVT VT = V.getValueType();
2289 assert(VT.isVector() && "Vector type expected");
2290
2291 unsigned NumElts = VT.getVectorNumElements();
2292 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2293 UndefElts = APInt::getNullValue(NumElts);
2294
2295 switch (V.getOpcode()) {
2296 case ISD::BUILD_VECTOR: {
2297 SDValue Scl;
2298 for (unsigned i = 0; i != NumElts; ++i) {
2299 SDValue Op = V.getOperand(i);
2300 if (Op.isUndef()) {
2301 UndefElts.setBit(i);
2302 continue;
2303 }
2304 if (!DemandedElts[i])
2305 continue;
2306 if (Scl && Scl != Op)
2307 return false;
2308 Scl = Op;
2309 }
2310 return true;
2311 }
2312 case ISD::VECTOR_SHUFFLE: {
2313 // Check if this is a shuffle node doing a splat.
2314 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2315 int SplatIndex = -1;
2316 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2317 for (int i = 0; i != (int)NumElts; ++i) {
2318 int M = Mask[i];
2319 if (M < 0) {
2320 UndefElts.setBit(i);
2321 continue;
2322 }
2323 if (!DemandedElts[i])
2324 continue;
2325 if (0 <= SplatIndex && SplatIndex != M)
2326 return false;
2327 SplatIndex = M;
2328 }
2329 return true;
2330 }
2331 case ISD::EXTRACT_SUBVECTOR: {
2332 SDValue Src = V.getOperand(0);
2333 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1));
2334 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2335 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2336 // Offset the demanded elts by the subvector index.
2337 uint64_t Idx = SubIdx->getZExtValue();
2338 APInt UndefSrcElts;
2339 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2340 if (isSplatValue(Src, DemandedSrc, UndefSrcElts, Depth + 1)) {
2341 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2342 return true;
2343 }
2344 }
2345 break;
2346 }
2347 case ISD::ADD:
2348 case ISD::SUB:
2349 case ISD::AND: {
2350 APInt UndefLHS, UndefRHS;
2351 SDValue LHS = V.getOperand(0);
2352 SDValue RHS = V.getOperand(1);
2353 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2354 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2355 UndefElts = UndefLHS | UndefRHS;
2356 return true;
2357 }
2358 break;
2359 }
2360 }
2361
2362 return false;
2363 }
2364
2365 /// Helper wrapper to main isSplatValue function.
isSplatValue(SDValue V,bool AllowUndefs)2366 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2367 EVT VT = V.getValueType();
2368 assert(VT.isVector() && "Vector type expected");
2369 unsigned NumElts = VT.getVectorNumElements();
2370
2371 APInt UndefElts;
2372 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
2373 return isSplatValue(V, DemandedElts, UndefElts) &&
2374 (AllowUndefs || !UndefElts);
2375 }
2376
getSplatSourceVector(SDValue V,int & SplatIdx)2377 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2378 V = peekThroughExtractSubvectors(V);
2379
2380 EVT VT = V.getValueType();
2381 unsigned Opcode = V.getOpcode();
2382 switch (Opcode) {
2383 default: {
2384 APInt UndefElts;
2385 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2386 if (isSplatValue(V, DemandedElts, UndefElts)) {
2387 // Handle case where all demanded elements are UNDEF.
2388 if (DemandedElts.isSubsetOf(UndefElts)) {
2389 SplatIdx = 0;
2390 return getUNDEF(VT);
2391 }
2392 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2393 return V;
2394 }
2395 break;
2396 }
2397 case ISD::VECTOR_SHUFFLE: {
2398 // Check if this is a shuffle node doing a splat.
2399 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2400 // getTargetVShiftNode currently struggles without the splat source.
2401 auto *SVN = cast<ShuffleVectorSDNode>(V);
2402 if (!SVN->isSplat())
2403 break;
2404 int Idx = SVN->getSplatIndex();
2405 int NumElts = V.getValueType().getVectorNumElements();
2406 SplatIdx = Idx % NumElts;
2407 return V.getOperand(Idx / NumElts);
2408 }
2409 }
2410
2411 return SDValue();
2412 }
2413
getSplatValue(SDValue V)2414 SDValue SelectionDAG::getSplatValue(SDValue V) {
2415 int SplatIdx;
2416 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx))
2417 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V),
2418 SrcVector.getValueType().getScalarType(), SrcVector,
2419 getIntPtrConstant(SplatIdx, SDLoc(V)));
2420 return SDValue();
2421 }
2422
2423 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
2424 /// is less than the element bit-width of the shift node, return it.
getValidShiftAmountConstant(SDValue V,const APInt & DemandedElts)2425 static const APInt *getValidShiftAmountConstant(SDValue V,
2426 const APInt &DemandedElts) {
2427 unsigned BitWidth = V.getScalarValueSizeInBits();
2428 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2429 // Shifting more than the bitwidth is not valid.
2430 const APInt &ShAmt = SA->getAPIntValue();
2431 if (ShAmt.ult(BitWidth))
2432 return &ShAmt;
2433 }
2434 return nullptr;
2435 }
2436
2437 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less
2438 /// than the element bit-width of the shift node, return the minimum value.
2439 static const APInt *
getValidMinimumShiftAmountConstant(SDValue V,const APInt & DemandedElts)2440 getValidMinimumShiftAmountConstant(SDValue V, const APInt &DemandedElts) {
2441 unsigned BitWidth = V.getScalarValueSizeInBits();
2442 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2443 if (!BV)
2444 return nullptr;
2445 const APInt *MinShAmt = nullptr;
2446 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2447 if (!DemandedElts[i])
2448 continue;
2449 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2450 if (!SA)
2451 return nullptr;
2452 // Shifting more than the bitwidth is not valid.
2453 const APInt &ShAmt = SA->getAPIntValue();
2454 if (ShAmt.uge(BitWidth))
2455 return nullptr;
2456 if (MinShAmt && MinShAmt->ule(ShAmt))
2457 continue;
2458 MinShAmt = &ShAmt;
2459 }
2460 return MinShAmt;
2461 }
2462
2463 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less
2464 /// than the element bit-width of the shift node, return the maximum value.
2465 static const APInt *
getValidMaximumShiftAmountConstant(SDValue V,const APInt & DemandedElts)2466 getValidMaximumShiftAmountConstant(SDValue V, const APInt &DemandedElts) {
2467 unsigned BitWidth = V.getScalarValueSizeInBits();
2468 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2469 if (!BV)
2470 return nullptr;
2471 const APInt *MaxShAmt = nullptr;
2472 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2473 if (!DemandedElts[i])
2474 continue;
2475 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2476 if (!SA)
2477 return nullptr;
2478 // Shifting more than the bitwidth is not valid.
2479 const APInt &ShAmt = SA->getAPIntValue();
2480 if (ShAmt.uge(BitWidth))
2481 return nullptr;
2482 if (MaxShAmt && MaxShAmt->uge(ShAmt))
2483 continue;
2484 MaxShAmt = &ShAmt;
2485 }
2486 return MaxShAmt;
2487 }
2488
2489 /// Determine which bits of Op are known to be either zero or one and return
2490 /// them in Known. For vectors, the known bits are those that are shared by
2491 /// every vector element.
computeKnownBits(SDValue Op,unsigned Depth) const2492 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2493 EVT VT = Op.getValueType();
2494 APInt DemandedElts = VT.isVector()
2495 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2496 : APInt(1, 1);
2497 return computeKnownBits(Op, DemandedElts, Depth);
2498 }
2499
2500 /// Determine which bits of Op are known to be either zero or one and return
2501 /// them in Known. The DemandedElts argument allows us to only collect the known
2502 /// bits that are shared by the requested vector elements.
computeKnownBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const2503 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2504 unsigned Depth) const {
2505 unsigned BitWidth = Op.getScalarValueSizeInBits();
2506
2507 KnownBits Known(BitWidth); // Don't know anything.
2508
2509 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2510 // We know all of the bits for a constant!
2511 Known.One = C->getAPIntValue();
2512 Known.Zero = ~Known.One;
2513 return Known;
2514 }
2515 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2516 // We know all of the bits for a constant fp!
2517 Known.One = C->getValueAPF().bitcastToAPInt();
2518 Known.Zero = ~Known.One;
2519 return Known;
2520 }
2521
2522 if (Depth >= MaxRecursionDepth)
2523 return Known; // Limit search depth.
2524
2525 KnownBits Known2;
2526 unsigned NumElts = DemandedElts.getBitWidth();
2527 assert((!Op.getValueType().isVector() ||
2528 NumElts == Op.getValueType().getVectorNumElements()) &&
2529 "Unexpected vector size");
2530
2531 if (!DemandedElts)
2532 return Known; // No demanded elts, better to assume we don't know anything.
2533
2534 unsigned Opcode = Op.getOpcode();
2535 switch (Opcode) {
2536 case ISD::BUILD_VECTOR:
2537 // Collect the known bits that are shared by every demanded vector element.
2538 Known.Zero.setAllBits(); Known.One.setAllBits();
2539 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2540 if (!DemandedElts[i])
2541 continue;
2542
2543 SDValue SrcOp = Op.getOperand(i);
2544 Known2 = computeKnownBits(SrcOp, Depth + 1);
2545
2546 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2547 if (SrcOp.getValueSizeInBits() != BitWidth) {
2548 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2549 "Expected BUILD_VECTOR implicit truncation");
2550 Known2 = Known2.trunc(BitWidth);
2551 }
2552
2553 // Known bits are the values that are shared by every demanded element.
2554 Known.One &= Known2.One;
2555 Known.Zero &= Known2.Zero;
2556
2557 // If we don't know any bits, early out.
2558 if (Known.isUnknown())
2559 break;
2560 }
2561 break;
2562 case ISD::VECTOR_SHUFFLE: {
2563 // Collect the known bits that are shared by every vector element referenced
2564 // by the shuffle.
2565 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2566 Known.Zero.setAllBits(); Known.One.setAllBits();
2567 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2568 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2569 for (unsigned i = 0; i != NumElts; ++i) {
2570 if (!DemandedElts[i])
2571 continue;
2572
2573 int M = SVN->getMaskElt(i);
2574 if (M < 0) {
2575 // For UNDEF elements, we don't know anything about the common state of
2576 // the shuffle result.
2577 Known.resetAll();
2578 DemandedLHS.clearAllBits();
2579 DemandedRHS.clearAllBits();
2580 break;
2581 }
2582
2583 if ((unsigned)M < NumElts)
2584 DemandedLHS.setBit((unsigned)M % NumElts);
2585 else
2586 DemandedRHS.setBit((unsigned)M % NumElts);
2587 }
2588 // Known bits are the values that are shared by every demanded element.
2589 if (!!DemandedLHS) {
2590 SDValue LHS = Op.getOperand(0);
2591 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2592 Known.One &= Known2.One;
2593 Known.Zero &= Known2.Zero;
2594 }
2595 // If we don't know any bits, early out.
2596 if (Known.isUnknown())
2597 break;
2598 if (!!DemandedRHS) {
2599 SDValue RHS = Op.getOperand(1);
2600 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2601 Known.One &= Known2.One;
2602 Known.Zero &= Known2.Zero;
2603 }
2604 break;
2605 }
2606 case ISD::CONCAT_VECTORS: {
2607 // Split DemandedElts and test each of the demanded subvectors.
2608 Known.Zero.setAllBits(); Known.One.setAllBits();
2609 EVT SubVectorVT = Op.getOperand(0).getValueType();
2610 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2611 unsigned NumSubVectors = Op.getNumOperands();
2612 for (unsigned i = 0; i != NumSubVectors; ++i) {
2613 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2614 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2615 if (!!DemandedSub) {
2616 SDValue Sub = Op.getOperand(i);
2617 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2618 Known.One &= Known2.One;
2619 Known.Zero &= Known2.Zero;
2620 }
2621 // If we don't know any bits, early out.
2622 if (Known.isUnknown())
2623 break;
2624 }
2625 break;
2626 }
2627 case ISD::INSERT_SUBVECTOR: {
2628 // If we know the element index, demand any elements from the subvector and
2629 // the remainder from the src its inserted into, otherwise demand them all.
2630 SDValue Src = Op.getOperand(0);
2631 SDValue Sub = Op.getOperand(1);
2632 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2633 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2634 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
2635 Known.One.setAllBits();
2636 Known.Zero.setAllBits();
2637 uint64_t Idx = SubIdx->getZExtValue();
2638 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2639 if (!!DemandedSubElts) {
2640 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2641 if (Known.isUnknown())
2642 break; // early-out.
2643 }
2644 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
2645 APInt DemandedSrcElts = DemandedElts & ~SubMask;
2646 if (!!DemandedSrcElts) {
2647 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2648 Known.One &= Known2.One;
2649 Known.Zero &= Known2.Zero;
2650 }
2651 } else {
2652 Known = computeKnownBits(Sub, Depth + 1);
2653 if (Known.isUnknown())
2654 break; // early-out.
2655 Known2 = computeKnownBits(Src, Depth + 1);
2656 Known.One &= Known2.One;
2657 Known.Zero &= Known2.Zero;
2658 }
2659 break;
2660 }
2661 case ISD::EXTRACT_SUBVECTOR: {
2662 // If we know the element index, just demand that subvector elements,
2663 // otherwise demand them all.
2664 SDValue Src = Op.getOperand(0);
2665 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2666 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2667 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts);
2668 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2669 // Offset the demanded elts by the subvector index.
2670 uint64_t Idx = SubIdx->getZExtValue();
2671 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2672 }
2673 Known = computeKnownBits(Src, DemandedSrc, Depth + 1);
2674 break;
2675 }
2676 case ISD::SCALAR_TO_VECTOR: {
2677 // We know about scalar_to_vector as much as we know about it source,
2678 // which becomes the first element of otherwise unknown vector.
2679 if (DemandedElts != 1)
2680 break;
2681
2682 SDValue N0 = Op.getOperand(0);
2683 Known = computeKnownBits(N0, Depth + 1);
2684 if (N0.getValueSizeInBits() != BitWidth)
2685 Known = Known.trunc(BitWidth);
2686
2687 break;
2688 }
2689 case ISD::BITCAST: {
2690 SDValue N0 = Op.getOperand(0);
2691 EVT SubVT = N0.getValueType();
2692 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2693
2694 // Ignore bitcasts from unsupported types.
2695 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2696 break;
2697
2698 // Fast handling of 'identity' bitcasts.
2699 if (BitWidth == SubBitWidth) {
2700 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2701 break;
2702 }
2703
2704 bool IsLE = getDataLayout().isLittleEndian();
2705
2706 // Bitcast 'small element' vector to 'large element' scalar/vector.
2707 if ((BitWidth % SubBitWidth) == 0) {
2708 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2709
2710 // Collect known bits for the (larger) output by collecting the known
2711 // bits from each set of sub elements and shift these into place.
2712 // We need to separately call computeKnownBits for each set of
2713 // sub elements as the knownbits for each is likely to be different.
2714 unsigned SubScale = BitWidth / SubBitWidth;
2715 APInt SubDemandedElts(NumElts * SubScale, 0);
2716 for (unsigned i = 0; i != NumElts; ++i)
2717 if (DemandedElts[i])
2718 SubDemandedElts.setBit(i * SubScale);
2719
2720 for (unsigned i = 0; i != SubScale; ++i) {
2721 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2722 Depth + 1);
2723 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2724 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts);
2725 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts);
2726 }
2727 }
2728
2729 // Bitcast 'large element' scalar/vector to 'small element' vector.
2730 if ((SubBitWidth % BitWidth) == 0) {
2731 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2732
2733 // Collect known bits for the (smaller) output by collecting the known
2734 // bits from the overlapping larger input elements and extracting the
2735 // sub sections we actually care about.
2736 unsigned SubScale = SubBitWidth / BitWidth;
2737 APInt SubDemandedElts(NumElts / SubScale, 0);
2738 for (unsigned i = 0; i != NumElts; ++i)
2739 if (DemandedElts[i])
2740 SubDemandedElts.setBit(i / SubScale);
2741
2742 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
2743
2744 Known.Zero.setAllBits(); Known.One.setAllBits();
2745 for (unsigned i = 0; i != NumElts; ++i)
2746 if (DemandedElts[i]) {
2747 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
2748 unsigned Offset = (Shifts % SubScale) * BitWidth;
2749 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2750 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2751 // If we don't know any bits, early out.
2752 if (Known.isUnknown())
2753 break;
2754 }
2755 }
2756 break;
2757 }
2758 case ISD::AND:
2759 // If either the LHS or the RHS are Zero, the result is zero.
2760 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2761 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2762
2763 // Output known-1 bits are only known if set in both the LHS & RHS.
2764 Known.One &= Known2.One;
2765 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2766 Known.Zero |= Known2.Zero;
2767 break;
2768 case ISD::OR:
2769 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2770 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2771
2772 // Output known-0 bits are only known if clear in both the LHS & RHS.
2773 Known.Zero &= Known2.Zero;
2774 // Output known-1 are known to be set if set in either the LHS | RHS.
2775 Known.One |= Known2.One;
2776 break;
2777 case ISD::XOR: {
2778 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2779 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2780
2781 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2782 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
2783 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2784 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
2785 Known.Zero = KnownZeroOut;
2786 break;
2787 }
2788 case ISD::MUL: {
2789 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2790 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2791
2792 // If low bits are zero in either operand, output low known-0 bits.
2793 // Also compute a conservative estimate for high known-0 bits.
2794 // More trickiness is possible, but this is sufficient for the
2795 // interesting case of alignment computation.
2796 unsigned TrailZ = Known.countMinTrailingZeros() +
2797 Known2.countMinTrailingZeros();
2798 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
2799 Known2.countMinLeadingZeros(),
2800 BitWidth) - BitWidth;
2801
2802 Known.resetAll();
2803 Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
2804 Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
2805 break;
2806 }
2807 case ISD::UDIV: {
2808 // For the purposes of computing leading zeros we can conservatively
2809 // treat a udiv as a logical right shift by the power of 2 known to
2810 // be less than the denominator.
2811 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2812 unsigned LeadZ = Known2.countMinLeadingZeros();
2813
2814 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2815 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
2816 if (RHSMaxLeadingZeros != BitWidth)
2817 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
2818
2819 Known.Zero.setHighBits(LeadZ);
2820 break;
2821 }
2822 case ISD::SELECT:
2823 case ISD::VSELECT:
2824 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2825 // If we don't know any bits, early out.
2826 if (Known.isUnknown())
2827 break;
2828 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
2829
2830 // Only known if known in both the LHS and RHS.
2831 Known.One &= Known2.One;
2832 Known.Zero &= Known2.Zero;
2833 break;
2834 case ISD::SELECT_CC:
2835 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
2836 // If we don't know any bits, early out.
2837 if (Known.isUnknown())
2838 break;
2839 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2840
2841 // Only known if known in both the LHS and RHS.
2842 Known.One &= Known2.One;
2843 Known.Zero &= Known2.Zero;
2844 break;
2845 case ISD::SMULO:
2846 case ISD::UMULO:
2847 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
2848 if (Op.getResNo() != 1)
2849 break;
2850 // The boolean result conforms to getBooleanContents.
2851 // If we know the result of a setcc has the top bits zero, use this info.
2852 // We know that we have an integer-based boolean since these operations
2853 // are only available for integer.
2854 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2855 TargetLowering::ZeroOrOneBooleanContent &&
2856 BitWidth > 1)
2857 Known.Zero.setBitsFrom(1);
2858 break;
2859 case ISD::SETCC:
2860 case ISD::STRICT_FSETCC:
2861 case ISD::STRICT_FSETCCS: {
2862 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
2863 // If we know the result of a setcc has the top bits zero, use this info.
2864 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
2865 TargetLowering::ZeroOrOneBooleanContent &&
2866 BitWidth > 1)
2867 Known.Zero.setBitsFrom(1);
2868 break;
2869 }
2870 case ISD::SHL:
2871 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2872
2873 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) {
2874 unsigned Shift = ShAmt->getZExtValue();
2875 Known.Zero <<= Shift;
2876 Known.One <<= Shift;
2877 // Low bits are known zero.
2878 Known.Zero.setLowBits(Shift);
2879 break;
2880 }
2881
2882 // No matter the shift amount, the trailing zeros will stay zero.
2883 Known.Zero = APInt::getLowBitsSet(BitWidth, Known.countMinTrailingZeros());
2884 Known.One.clearAllBits();
2885
2886 // Minimum shift low bits are known zero.
2887 if (const APInt *ShMinAmt =
2888 getValidMinimumShiftAmountConstant(Op, DemandedElts))
2889 Known.Zero.setLowBits(ShMinAmt->getZExtValue());
2890 break;
2891 case ISD::SRL:
2892 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2893
2894 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) {
2895 unsigned Shift = ShAmt->getZExtValue();
2896 Known.Zero.lshrInPlace(Shift);
2897 Known.One.lshrInPlace(Shift);
2898 // High bits are known zero.
2899 Known.Zero.setHighBits(Shift);
2900 break;
2901 }
2902
2903 // No matter the shift amount, the leading zeros will stay zero.
2904 Known.Zero = APInt::getHighBitsSet(BitWidth, Known.countMinLeadingZeros());
2905 Known.One.clearAllBits();
2906
2907 // Minimum shift high bits are known zero.
2908 if (const APInt *ShMinAmt =
2909 getValidMinimumShiftAmountConstant(Op, DemandedElts))
2910 Known.Zero.setHighBits(ShMinAmt->getZExtValue());
2911 break;
2912 case ISD::SRA:
2913 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) {
2914 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2915 unsigned Shift = ShAmt->getZExtValue();
2916 // Sign extend known zero/one bit (else is unknown).
2917 Known.Zero.ashrInPlace(Shift);
2918 Known.One.ashrInPlace(Shift);
2919 }
2920 break;
2921 case ISD::FSHL:
2922 case ISD::FSHR:
2923 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
2924 unsigned Amt = C->getAPIntValue().urem(BitWidth);
2925
2926 // For fshl, 0-shift returns the 1st arg.
2927 // For fshr, 0-shift returns the 2nd arg.
2928 if (Amt == 0) {
2929 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
2930 DemandedElts, Depth + 1);
2931 break;
2932 }
2933
2934 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2935 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2936 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2937 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2938 if (Opcode == ISD::FSHL) {
2939 Known.One <<= Amt;
2940 Known.Zero <<= Amt;
2941 Known2.One.lshrInPlace(BitWidth - Amt);
2942 Known2.Zero.lshrInPlace(BitWidth - Amt);
2943 } else {
2944 Known.One <<= BitWidth - Amt;
2945 Known.Zero <<= BitWidth - Amt;
2946 Known2.One.lshrInPlace(Amt);
2947 Known2.Zero.lshrInPlace(Amt);
2948 }
2949 Known.One |= Known2.One;
2950 Known.Zero |= Known2.Zero;
2951 }
2952 break;
2953 case ISD::SIGN_EXTEND_INREG: {
2954 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2955 unsigned EBits = EVT.getScalarSizeInBits();
2956
2957 // Sign extension. Compute the demanded bits in the result that are not
2958 // present in the input.
2959 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2960
2961 APInt InSignMask = APInt::getSignMask(EBits);
2962 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2963
2964 // If the sign extended bits are demanded, we know that the sign
2965 // bit is demanded.
2966 InSignMask = InSignMask.zext(BitWidth);
2967 if (NewBits.getBoolValue())
2968 InputDemandedBits |= InSignMask;
2969
2970 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2971 Known.One &= InputDemandedBits;
2972 Known.Zero &= InputDemandedBits;
2973
2974 // If the sign bit of the input is known set or clear, then we know the
2975 // top bits of the result.
2976 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear
2977 Known.Zero |= NewBits;
2978 Known.One &= ~NewBits;
2979 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set
2980 Known.One |= NewBits;
2981 Known.Zero &= ~NewBits;
2982 } else { // Input sign bit unknown
2983 Known.Zero &= ~NewBits;
2984 Known.One &= ~NewBits;
2985 }
2986 break;
2987 }
2988 case ISD::CTTZ:
2989 case ISD::CTTZ_ZERO_UNDEF: {
2990 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2991 // If we have a known 1, its position is our upper bound.
2992 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
2993 unsigned LowBits = Log2_32(PossibleTZ) + 1;
2994 Known.Zero.setBitsFrom(LowBits);
2995 break;
2996 }
2997 case ISD::CTLZ:
2998 case ISD::CTLZ_ZERO_UNDEF: {
2999 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3000 // If we have a known 1, its position is our upper bound.
3001 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3002 unsigned LowBits = Log2_32(PossibleLZ) + 1;
3003 Known.Zero.setBitsFrom(LowBits);
3004 break;
3005 }
3006 case ISD::CTPOP: {
3007 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3008 // If we know some of the bits are zero, they can't be one.
3009 unsigned PossibleOnes = Known2.countMaxPopulation();
3010 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3011 break;
3012 }
3013 case ISD::LOAD: {
3014 LoadSDNode *LD = cast<LoadSDNode>(Op);
3015 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3016 if (ISD::isNON_EXTLoad(LD) && Cst) {
3017 // Determine any common known bits from the loaded constant pool value.
3018 Type *CstTy = Cst->getType();
3019 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3020 // If its a vector splat, then we can (quickly) reuse the scalar path.
3021 // NOTE: We assume all elements match and none are UNDEF.
3022 if (CstTy->isVectorTy()) {
3023 if (const Constant *Splat = Cst->getSplatValue()) {
3024 Cst = Splat;
3025 CstTy = Cst->getType();
3026 }
3027 }
3028 // TODO - do we need to handle different bitwidths?
3029 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3030 // Iterate across all vector elements finding common known bits.
3031 Known.One.setAllBits();
3032 Known.Zero.setAllBits();
3033 for (unsigned i = 0; i != NumElts; ++i) {
3034 if (!DemandedElts[i])
3035 continue;
3036 if (Constant *Elt = Cst->getAggregateElement(i)) {
3037 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3038 const APInt &Value = CInt->getValue();
3039 Known.One &= Value;
3040 Known.Zero &= ~Value;
3041 continue;
3042 }
3043 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3044 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3045 Known.One &= Value;
3046 Known.Zero &= ~Value;
3047 continue;
3048 }
3049 }
3050 Known.One.clearAllBits();
3051 Known.Zero.clearAllBits();
3052 break;
3053 }
3054 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3055 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3056 const APInt &Value = CInt->getValue();
3057 Known.One = Value;
3058 Known.Zero = ~Value;
3059 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3060 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3061 Known.One = Value;
3062 Known.Zero = ~Value;
3063 }
3064 }
3065 }
3066 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3067 // If this is a ZEXTLoad and we are looking at the loaded value.
3068 EVT VT = LD->getMemoryVT();
3069 unsigned MemBits = VT.getScalarSizeInBits();
3070 Known.Zero.setBitsFrom(MemBits);
3071 } else if (const MDNode *Ranges = LD->getRanges()) {
3072 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3073 computeKnownBitsFromRangeMetadata(*Ranges, Known);
3074 }
3075 break;
3076 }
3077 case ISD::ZERO_EXTEND_VECTOR_INREG: {
3078 EVT InVT = Op.getOperand(0).getValueType();
3079 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3080 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3081 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
3082 break;
3083 }
3084 case ISD::ZERO_EXTEND: {
3085 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3086 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
3087 break;
3088 }
3089 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3090 EVT InVT = Op.getOperand(0).getValueType();
3091 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3092 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3093 // If the sign bit is known to be zero or one, then sext will extend
3094 // it to the top bits, else it will just zext.
3095 Known = Known.sext(BitWidth);
3096 break;
3097 }
3098 case ISD::SIGN_EXTEND: {
3099 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3100 // If the sign bit is known to be zero or one, then sext will extend
3101 // it to the top bits, else it will just zext.
3102 Known = Known.sext(BitWidth);
3103 break;
3104 }
3105 case ISD::ANY_EXTEND: {
3106 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3107 Known = Known.zext(BitWidth, false /* ExtendedBitsAreKnownZero */);
3108 break;
3109 }
3110 case ISD::TRUNCATE: {
3111 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3112 Known = Known.trunc(BitWidth);
3113 break;
3114 }
3115 case ISD::AssertZext: {
3116 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3117 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3118 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3119 Known.Zero |= (~InMask);
3120 Known.One &= (~Known.Zero);
3121 break;
3122 }
3123 case ISD::FGETSIGN:
3124 // All bits are zero except the low bit.
3125 Known.Zero.setBitsFrom(1);
3126 break;
3127 case ISD::USUBO:
3128 case ISD::SSUBO:
3129 if (Op.getResNo() == 1) {
3130 // If we know the result of a setcc has the top bits zero, use this info.
3131 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3132 TargetLowering::ZeroOrOneBooleanContent &&
3133 BitWidth > 1)
3134 Known.Zero.setBitsFrom(1);
3135 break;
3136 }
3137 LLVM_FALLTHROUGH;
3138 case ISD::SUB:
3139 case ISD::SUBC: {
3140 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3141 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3142 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3143 Known, Known2);
3144 break;
3145 }
3146 case ISD::UADDO:
3147 case ISD::SADDO:
3148 case ISD::ADDCARRY:
3149 if (Op.getResNo() == 1) {
3150 // If we know the result of a setcc has the top bits zero, use this info.
3151 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3152 TargetLowering::ZeroOrOneBooleanContent &&
3153 BitWidth > 1)
3154 Known.Zero.setBitsFrom(1);
3155 break;
3156 }
3157 LLVM_FALLTHROUGH;
3158 case ISD::ADD:
3159 case ISD::ADDC:
3160 case ISD::ADDE: {
3161 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3162
3163 // With ADDE and ADDCARRY, a carry bit may be added in.
3164 KnownBits Carry(1);
3165 if (Opcode == ISD::ADDE)
3166 // Can't track carry from glue, set carry to unknown.
3167 Carry.resetAll();
3168 else if (Opcode == ISD::ADDCARRY)
3169 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3170 // the trouble (how often will we find a known carry bit). And I haven't
3171 // tested this very much yet, but something like this might work:
3172 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3173 // Carry = Carry.zextOrTrunc(1, false);
3174 Carry.resetAll();
3175 else
3176 Carry.setAllZero();
3177
3178 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3179 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3180 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3181 break;
3182 }
3183 case ISD::SREM:
3184 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
3185 const APInt &RA = Rem->getAPIntValue().abs();
3186 if (RA.isPowerOf2()) {
3187 APInt LowBits = RA - 1;
3188 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3189
3190 // The low bits of the first operand are unchanged by the srem.
3191 Known.Zero = Known2.Zero & LowBits;
3192 Known.One = Known2.One & LowBits;
3193
3194 // If the first operand is non-negative or has all low bits zero, then
3195 // the upper bits are all zero.
3196 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
3197 Known.Zero |= ~LowBits;
3198
3199 // If the first operand is negative and not all low bits are zero, then
3200 // the upper bits are all one.
3201 if (Known2.isNegative() && LowBits.intersects(Known2.One))
3202 Known.One |= ~LowBits;
3203 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?");
3204 }
3205 }
3206 break;
3207 case ISD::UREM: {
3208 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
3209 const APInt &RA = Rem->getAPIntValue();
3210 if (RA.isPowerOf2()) {
3211 APInt LowBits = (RA - 1);
3212 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3213
3214 // The upper bits are all zero, the lower ones are unchanged.
3215 Known.Zero = Known2.Zero | ~LowBits;
3216 Known.One = Known2.One & LowBits;
3217 break;
3218 }
3219 }
3220
3221 // Since the result is less than or equal to either operand, any leading
3222 // zero bits in either operand must also exist in the result.
3223 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3224 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3225
3226 uint32_t Leaders =
3227 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
3228 Known.resetAll();
3229 Known.Zero.setHighBits(Leaders);
3230 break;
3231 }
3232 case ISD::EXTRACT_ELEMENT: {
3233 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3234 const unsigned Index = Op.getConstantOperandVal(1);
3235 const unsigned EltBitWidth = Op.getValueSizeInBits();
3236
3237 // Remove low part of known bits mask
3238 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3239 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3240
3241 // Remove high part of known bit mask
3242 Known = Known.trunc(EltBitWidth);
3243 break;
3244 }
3245 case ISD::EXTRACT_VECTOR_ELT: {
3246 SDValue InVec = Op.getOperand(0);
3247 SDValue EltNo = Op.getOperand(1);
3248 EVT VecVT = InVec.getValueType();
3249 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3250 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3251 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3252 // anything about the extended bits.
3253 if (BitWidth > EltBitWidth)
3254 Known = Known.trunc(EltBitWidth);
3255 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3256 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
3257 // If we know the element index, just demand that vector element.
3258 unsigned Idx = ConstEltNo->getZExtValue();
3259 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
3260 Known = computeKnownBits(InVec, DemandedElt, Depth + 1);
3261 } else {
3262 // Unknown element index, so ignore DemandedElts and demand them all.
3263 Known = computeKnownBits(InVec, Depth + 1);
3264 }
3265 if (BitWidth > EltBitWidth)
3266 Known = Known.zext(BitWidth, false /* => any extend */);
3267 break;
3268 }
3269 case ISD::INSERT_VECTOR_ELT: {
3270 SDValue InVec = Op.getOperand(0);
3271 SDValue InVal = Op.getOperand(1);
3272 SDValue EltNo = Op.getOperand(2);
3273
3274 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3275 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3276 // If we know the element index, split the demand between the
3277 // source vector and the inserted element.
3278 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth);
3279 unsigned EltIdx = CEltNo->getZExtValue();
3280
3281 // If we demand the inserted element then add its common known bits.
3282 if (DemandedElts[EltIdx]) {
3283 Known2 = computeKnownBits(InVal, Depth + 1);
3284 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
3285 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
3286 }
3287
3288 // If we demand the source vector then add its common known bits, ensuring
3289 // that we don't demand the inserted element.
3290 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
3291 if (!!VectorElts) {
3292 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1);
3293 Known.One &= Known2.One;
3294 Known.Zero &= Known2.Zero;
3295 }
3296 } else {
3297 // Unknown element index, so ignore DemandedElts and demand them all.
3298 Known = computeKnownBits(InVec, Depth + 1);
3299 Known2 = computeKnownBits(InVal, Depth + 1);
3300 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
3301 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
3302 }
3303 break;
3304 }
3305 case ISD::BITREVERSE: {
3306 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3307 Known.Zero = Known2.Zero.reverseBits();
3308 Known.One = Known2.One.reverseBits();
3309 break;
3310 }
3311 case ISD::BSWAP: {
3312 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3313 Known.Zero = Known2.Zero.byteSwap();
3314 Known.One = Known2.One.byteSwap();
3315 break;
3316 }
3317 case ISD::ABS: {
3318 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3319
3320 // If the source's MSB is zero then we know the rest of the bits already.
3321 if (Known2.isNonNegative()) {
3322 Known.Zero = Known2.Zero;
3323 Known.One = Known2.One;
3324 break;
3325 }
3326
3327 // We only know that the absolute values's MSB will be zero iff there is
3328 // a set bit that isn't the sign bit (otherwise it could be INT_MIN).
3329 Known2.One.clearSignBit();
3330 if (Known2.One.getBoolValue()) {
3331 Known.Zero = APInt::getSignMask(BitWidth);
3332 break;
3333 }
3334 break;
3335 }
3336 case ISD::UMIN: {
3337 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3338 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3339
3340 // UMIN - we know that the result will have the maximum of the
3341 // known zero leading bits of the inputs.
3342 unsigned LeadZero = Known.countMinLeadingZeros();
3343 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros());
3344
3345 Known.Zero &= Known2.Zero;
3346 Known.One &= Known2.One;
3347 Known.Zero.setHighBits(LeadZero);
3348 break;
3349 }
3350 case ISD::UMAX: {
3351 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3352 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3353
3354 // UMAX - we know that the result will have the maximum of the
3355 // known one leading bits of the inputs.
3356 unsigned LeadOne = Known.countMinLeadingOnes();
3357 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes());
3358
3359 Known.Zero &= Known2.Zero;
3360 Known.One &= Known2.One;
3361 Known.One.setHighBits(LeadOne);
3362 break;
3363 }
3364 case ISD::SMIN:
3365 case ISD::SMAX: {
3366 // If we have a clamp pattern, we know that the number of sign bits will be
3367 // the minimum of the clamp min/max range.
3368 bool IsMax = (Opcode == ISD::SMAX);
3369 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3370 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3371 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3372 CstHigh =
3373 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3374 if (CstLow && CstHigh) {
3375 if (!IsMax)
3376 std::swap(CstLow, CstHigh);
3377
3378 const APInt &ValueLow = CstLow->getAPIntValue();
3379 const APInt &ValueHigh = CstHigh->getAPIntValue();
3380 if (ValueLow.sle(ValueHigh)) {
3381 unsigned LowSignBits = ValueLow.getNumSignBits();
3382 unsigned HighSignBits = ValueHigh.getNumSignBits();
3383 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3384 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3385 Known.One.setHighBits(MinSignBits);
3386 break;
3387 }
3388 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3389 Known.Zero.setHighBits(MinSignBits);
3390 break;
3391 }
3392 }
3393 }
3394
3395 // Fallback - just get the shared known bits of the operands.
3396 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3397 if (Known.isUnknown()) break; // Early-out
3398 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3399 Known.Zero &= Known2.Zero;
3400 Known.One &= Known2.One;
3401 break;
3402 }
3403 case ISD::FrameIndex:
3404 case ISD::TargetFrameIndex:
3405 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth);
3406 break;
3407
3408 default:
3409 if (Opcode < ISD::BUILTIN_OP_END)
3410 break;
3411 LLVM_FALLTHROUGH;
3412 case ISD::INTRINSIC_WO_CHAIN:
3413 case ISD::INTRINSIC_W_CHAIN:
3414 case ISD::INTRINSIC_VOID:
3415 // Allow the target to implement this method for its nodes.
3416 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3417 break;
3418 }
3419
3420 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3421 return Known;
3422 }
3423
computeOverflowKind(SDValue N0,SDValue N1) const3424 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3425 SDValue N1) const {
3426 // X + 0 never overflow
3427 if (isNullConstant(N1))
3428 return OFK_Never;
3429
3430 KnownBits N1Known = computeKnownBits(N1);
3431 if (N1Known.Zero.getBoolValue()) {
3432 KnownBits N0Known = computeKnownBits(N0);
3433
3434 bool overflow;
3435 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3436 if (!overflow)
3437 return OFK_Never;
3438 }
3439
3440 // mulhi + 1 never overflow
3441 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3442 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3443 return OFK_Never;
3444
3445 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3446 KnownBits N0Known = computeKnownBits(N0);
3447
3448 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3449 return OFK_Never;
3450 }
3451
3452 return OFK_Sometime;
3453 }
3454
isKnownToBeAPowerOfTwo(SDValue Val) const3455 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3456 EVT OpVT = Val.getValueType();
3457 unsigned BitWidth = OpVT.getScalarSizeInBits();
3458
3459 // Is the constant a known power of 2?
3460 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3461 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3462
3463 // A left-shift of a constant one will have exactly one bit set because
3464 // shifting the bit off the end is undefined.
3465 if (Val.getOpcode() == ISD::SHL) {
3466 auto *C = isConstOrConstSplat(Val.getOperand(0));
3467 if (C && C->getAPIntValue() == 1)
3468 return true;
3469 }
3470
3471 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3472 // one bit set.
3473 if (Val.getOpcode() == ISD::SRL) {
3474 auto *C = isConstOrConstSplat(Val.getOperand(0));
3475 if (C && C->getAPIntValue().isSignMask())
3476 return true;
3477 }
3478
3479 // Are all operands of a build vector constant powers of two?
3480 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3481 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3482 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3483 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3484 return false;
3485 }))
3486 return true;
3487
3488 // More could be done here, though the above checks are enough
3489 // to handle some common cases.
3490
3491 // Fall back to computeKnownBits to catch other known cases.
3492 KnownBits Known = computeKnownBits(Val);
3493 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3494 }
3495
ComputeNumSignBits(SDValue Op,unsigned Depth) const3496 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3497 EVT VT = Op.getValueType();
3498 APInt DemandedElts = VT.isVector()
3499 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3500 : APInt(1, 1);
3501 return ComputeNumSignBits(Op, DemandedElts, Depth);
3502 }
3503
ComputeNumSignBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const3504 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3505 unsigned Depth) const {
3506 EVT VT = Op.getValueType();
3507 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3508 unsigned VTBits = VT.getScalarSizeInBits();
3509 unsigned NumElts = DemandedElts.getBitWidth();
3510 unsigned Tmp, Tmp2;
3511 unsigned FirstAnswer = 1;
3512
3513 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3514 const APInt &Val = C->getAPIntValue();
3515 return Val.getNumSignBits();
3516 }
3517
3518 if (Depth >= MaxRecursionDepth)
3519 return 1; // Limit search depth.
3520
3521 if (!DemandedElts)
3522 return 1; // No demanded elts, better to assume we don't know anything.
3523
3524 unsigned Opcode = Op.getOpcode();
3525 switch (Opcode) {
3526 default: break;
3527 case ISD::AssertSext:
3528 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3529 return VTBits-Tmp+1;
3530 case ISD::AssertZext:
3531 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3532 return VTBits-Tmp;
3533
3534 case ISD::BUILD_VECTOR:
3535 Tmp = VTBits;
3536 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3537 if (!DemandedElts[i])
3538 continue;
3539
3540 SDValue SrcOp = Op.getOperand(i);
3541 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1);
3542
3543 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3544 if (SrcOp.getValueSizeInBits() != VTBits) {
3545 assert(SrcOp.getValueSizeInBits() > VTBits &&
3546 "Expected BUILD_VECTOR implicit truncation");
3547 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3548 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3549 }
3550 Tmp = std::min(Tmp, Tmp2);
3551 }
3552 return Tmp;
3553
3554 case ISD::VECTOR_SHUFFLE: {
3555 // Collect the minimum number of sign bits that are shared by every vector
3556 // element referenced by the shuffle.
3557 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3558 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3559 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3560 for (unsigned i = 0; i != NumElts; ++i) {
3561 int M = SVN->getMaskElt(i);
3562 if (!DemandedElts[i])
3563 continue;
3564 // For UNDEF elements, we don't know anything about the common state of
3565 // the shuffle result.
3566 if (M < 0)
3567 return 1;
3568 if ((unsigned)M < NumElts)
3569 DemandedLHS.setBit((unsigned)M % NumElts);
3570 else
3571 DemandedRHS.setBit((unsigned)M % NumElts);
3572 }
3573 Tmp = std::numeric_limits<unsigned>::max();
3574 if (!!DemandedLHS)
3575 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3576 if (!!DemandedRHS) {
3577 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3578 Tmp = std::min(Tmp, Tmp2);
3579 }
3580 // If we don't know anything, early out and try computeKnownBits fall-back.
3581 if (Tmp == 1)
3582 break;
3583 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3584 return Tmp;
3585 }
3586
3587 case ISD::BITCAST: {
3588 SDValue N0 = Op.getOperand(0);
3589 EVT SrcVT = N0.getValueType();
3590 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3591
3592 // Ignore bitcasts from unsupported types..
3593 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3594 break;
3595
3596 // Fast handling of 'identity' bitcasts.
3597 if (VTBits == SrcBits)
3598 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3599
3600 bool IsLE = getDataLayout().isLittleEndian();
3601
3602 // Bitcast 'large element' scalar/vector to 'small element' vector.
3603 if ((SrcBits % VTBits) == 0) {
3604 assert(VT.isVector() && "Expected bitcast to vector");
3605
3606 unsigned Scale = SrcBits / VTBits;
3607 APInt SrcDemandedElts(NumElts / Scale, 0);
3608 for (unsigned i = 0; i != NumElts; ++i)
3609 if (DemandedElts[i])
3610 SrcDemandedElts.setBit(i / Scale);
3611
3612 // Fast case - sign splat can be simply split across the small elements.
3613 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3614 if (Tmp == SrcBits)
3615 return VTBits;
3616
3617 // Slow case - determine how far the sign extends into each sub-element.
3618 Tmp2 = VTBits;
3619 for (unsigned i = 0; i != NumElts; ++i)
3620 if (DemandedElts[i]) {
3621 unsigned SubOffset = i % Scale;
3622 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3623 SubOffset = SubOffset * VTBits;
3624 if (Tmp <= SubOffset)
3625 return 1;
3626 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3627 }
3628 return Tmp2;
3629 }
3630 break;
3631 }
3632
3633 case ISD::SIGN_EXTEND:
3634 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3635 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3636 case ISD::SIGN_EXTEND_INREG:
3637 // Max of the input and what this extends.
3638 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3639 Tmp = VTBits-Tmp+1;
3640 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3641 return std::max(Tmp, Tmp2);
3642 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3643 SDValue Src = Op.getOperand(0);
3644 EVT SrcVT = Src.getValueType();
3645 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3646 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3647 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3648 }
3649 case ISD::SRA:
3650 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3651 // SRA X, C -> adds C sign bits.
3652 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts))
3653 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3654 else if (const APInt *ShAmt =
3655 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3656 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3657 return Tmp;
3658 case ISD::SHL:
3659 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) {
3660 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3661 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3662 if (ShAmt->ult(Tmp))
3663 return Tmp - ShAmt->getZExtValue();
3664 } else if (const APInt *ShAmt =
3665 getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3666 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3667 if (ShAmt->ult(Tmp))
3668 return Tmp - ShAmt->getZExtValue();
3669 }
3670 break;
3671 case ISD::AND:
3672 case ISD::OR:
3673 case ISD::XOR: // NOT is handled here.
3674 // Logical binary ops preserve the number of sign bits at the worst.
3675 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3676 if (Tmp != 1) {
3677 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3678 FirstAnswer = std::min(Tmp, Tmp2);
3679 // We computed what we know about the sign bits as our first
3680 // answer. Now proceed to the generic code that uses
3681 // computeKnownBits, and pick whichever answer is better.
3682 }
3683 break;
3684
3685 case ISD::SELECT:
3686 case ISD::VSELECT:
3687 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3688 if (Tmp == 1) return 1; // Early out.
3689 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3690 return std::min(Tmp, Tmp2);
3691 case ISD::SELECT_CC:
3692 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3693 if (Tmp == 1) return 1; // Early out.
3694 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3695 return std::min(Tmp, Tmp2);
3696
3697 case ISD::SMIN:
3698 case ISD::SMAX: {
3699 // If we have a clamp pattern, we know that the number of sign bits will be
3700 // the minimum of the clamp min/max range.
3701 bool IsMax = (Opcode == ISD::SMAX);
3702 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3703 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3704 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3705 CstHigh =
3706 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3707 if (CstLow && CstHigh) {
3708 if (!IsMax)
3709 std::swap(CstLow, CstHigh);
3710 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3711 Tmp = CstLow->getAPIntValue().getNumSignBits();
3712 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3713 return std::min(Tmp, Tmp2);
3714 }
3715 }
3716
3717 // Fallback - just get the minimum number of sign bits of the operands.
3718 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3719 if (Tmp == 1)
3720 return 1; // Early out.
3721 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3722 return std::min(Tmp, Tmp2);
3723 }
3724 case ISD::UMIN:
3725 case ISD::UMAX:
3726 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3727 if (Tmp == 1)
3728 return 1; // Early out.
3729 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3730 return std::min(Tmp, Tmp2);
3731 case ISD::SADDO:
3732 case ISD::UADDO:
3733 case ISD::SSUBO:
3734 case ISD::USUBO:
3735 case ISD::SMULO:
3736 case ISD::UMULO:
3737 if (Op.getResNo() != 1)
3738 break;
3739 // The boolean result conforms to getBooleanContents. Fall through.
3740 // If setcc returns 0/-1, all bits are sign bits.
3741 // We know that we have an integer-based boolean since these operations
3742 // are only available for integer.
3743 if (TLI->getBooleanContents(VT.isVector(), false) ==
3744 TargetLowering::ZeroOrNegativeOneBooleanContent)
3745 return VTBits;
3746 break;
3747 case ISD::SETCC:
3748 case ISD::STRICT_FSETCC:
3749 case ISD::STRICT_FSETCCS: {
3750 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3751 // If setcc returns 0/-1, all bits are sign bits.
3752 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3753 TargetLowering::ZeroOrNegativeOneBooleanContent)
3754 return VTBits;
3755 break;
3756 }
3757 case ISD::ROTL:
3758 case ISD::ROTR:
3759 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3760 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3761
3762 // Handle rotate right by N like a rotate left by 32-N.
3763 if (Opcode == ISD::ROTR)
3764 RotAmt = (VTBits - RotAmt) % VTBits;
3765
3766 // If we aren't rotating out all of the known-in sign bits, return the
3767 // number that are left. This handles rotl(sext(x), 1) for example.
3768 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3769 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3770 }
3771 break;
3772 case ISD::ADD:
3773 case ISD::ADDC:
3774 // Add can have at most one carry bit. Thus we know that the output
3775 // is, at worst, one more bit than the inputs.
3776 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3777 if (Tmp == 1) return 1; // Early out.
3778
3779 // Special case decrementing a value (ADD X, -1):
3780 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3781 if (CRHS->isAllOnesValue()) {
3782 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1);
3783
3784 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3785 // sign bits set.
3786 if ((Known.Zero | 1).isAllOnesValue())
3787 return VTBits;
3788
3789 // If we are subtracting one from a positive number, there is no carry
3790 // out of the result.
3791 if (Known.isNonNegative())
3792 return Tmp;
3793 }
3794
3795 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3796 if (Tmp2 == 1) return 1;
3797 return std::min(Tmp, Tmp2)-1;
3798
3799 case ISD::SUB:
3800 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3801 if (Tmp2 == 1) return 1;
3802
3803 // Handle NEG.
3804 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
3805 if (CLHS->isNullValue()) {
3806 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1);
3807 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3808 // sign bits set.
3809 if ((Known.Zero | 1).isAllOnesValue())
3810 return VTBits;
3811
3812 // If the input is known to be positive (the sign bit is known clear),
3813 // the output of the NEG has the same number of sign bits as the input.
3814 if (Known.isNonNegative())
3815 return Tmp2;
3816
3817 // Otherwise, we treat this like a SUB.
3818 }
3819
3820 // Sub can have at most one carry bit. Thus we know that the output
3821 // is, at worst, one more bit than the inputs.
3822 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3823 if (Tmp == 1) return 1; // Early out.
3824 return std::min(Tmp, Tmp2)-1;
3825 case ISD::MUL: {
3826 // The output of the Mul can be at most twice the valid bits in the inputs.
3827 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3828 if (SignBitsOp0 == 1)
3829 break;
3830 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3831 if (SignBitsOp1 == 1)
3832 break;
3833 unsigned OutValidBits =
3834 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
3835 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
3836 }
3837 case ISD::TRUNCATE: {
3838 // Check if the sign bits of source go down as far as the truncated value.
3839 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
3840 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3841 if (NumSrcSignBits > (NumSrcBits - VTBits))
3842 return NumSrcSignBits - (NumSrcBits - VTBits);
3843 break;
3844 }
3845 case ISD::EXTRACT_ELEMENT: {
3846 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3847 const int BitWidth = Op.getValueSizeInBits();
3848 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
3849
3850 // Get reverse index (starting from 1), Op1 value indexes elements from
3851 // little end. Sign starts at big end.
3852 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
3853
3854 // If the sign portion ends in our element the subtraction gives correct
3855 // result. Otherwise it gives either negative or > bitwidth result
3856 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
3857 }
3858 case ISD::INSERT_VECTOR_ELT: {
3859 SDValue InVec = Op.getOperand(0);
3860 SDValue InVal = Op.getOperand(1);
3861 SDValue EltNo = Op.getOperand(2);
3862
3863 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3864 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3865 // If we know the element index, split the demand between the
3866 // source vector and the inserted element.
3867 unsigned EltIdx = CEltNo->getZExtValue();
3868
3869 // If we demand the inserted element then get its sign bits.
3870 Tmp = std::numeric_limits<unsigned>::max();
3871 if (DemandedElts[EltIdx]) {
3872 // TODO - handle implicit truncation of inserted elements.
3873 if (InVal.getScalarValueSizeInBits() != VTBits)
3874 break;
3875 Tmp = ComputeNumSignBits(InVal, Depth + 1);
3876 }
3877
3878 // If we demand the source vector then get its sign bits, and determine
3879 // the minimum.
3880 APInt VectorElts = DemandedElts;
3881 VectorElts.clearBit(EltIdx);
3882 if (!!VectorElts) {
3883 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1);
3884 Tmp = std::min(Tmp, Tmp2);
3885 }
3886 } else {
3887 // Unknown element index, so ignore DemandedElts and demand them all.
3888 Tmp = ComputeNumSignBits(InVec, Depth + 1);
3889 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
3890 Tmp = std::min(Tmp, Tmp2);
3891 }
3892 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3893 return Tmp;
3894 }
3895 case ISD::EXTRACT_VECTOR_ELT: {
3896 SDValue InVec = Op.getOperand(0);
3897 SDValue EltNo = Op.getOperand(1);
3898 EVT VecVT = InVec.getValueType();
3899 const unsigned BitWidth = Op.getValueSizeInBits();
3900 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
3901 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3902
3903 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3904 // anything about sign bits. But if the sizes match we can derive knowledge
3905 // about sign bits from the vector operand.
3906 if (BitWidth != EltBitWidth)
3907 break;
3908
3909 // If we know the element index, just demand that vector element, else for
3910 // an unknown element index, ignore DemandedElts and demand them all.
3911 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3912 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3913 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3914 DemandedSrcElts =
3915 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3916
3917 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
3918 }
3919 case ISD::EXTRACT_SUBVECTOR: {
3920 // If we know the element index, just demand that subvector elements,
3921 // otherwise demand them all.
3922 SDValue Src = Op.getOperand(0);
3923 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
3924 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3925 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts);
3926 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
3927 // Offset the demanded elts by the subvector index.
3928 uint64_t Idx = SubIdx->getZExtValue();
3929 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
3930 }
3931 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
3932 }
3933 case ISD::CONCAT_VECTORS: {
3934 // Determine the minimum number of sign bits across all demanded
3935 // elts of the input vectors. Early out if the result is already 1.
3936 Tmp = std::numeric_limits<unsigned>::max();
3937 EVT SubVectorVT = Op.getOperand(0).getValueType();
3938 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3939 unsigned NumSubVectors = Op.getNumOperands();
3940 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
3941 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
3942 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
3943 if (!DemandedSub)
3944 continue;
3945 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
3946 Tmp = std::min(Tmp, Tmp2);
3947 }
3948 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3949 return Tmp;
3950 }
3951 case ISD::INSERT_SUBVECTOR: {
3952 // If we know the element index, demand any elements from the subvector and
3953 // the remainder from the src its inserted into, otherwise demand them all.
3954 SDValue Src = Op.getOperand(0);
3955 SDValue Sub = Op.getOperand(1);
3956 auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3957 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3958 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
3959 Tmp = std::numeric_limits<unsigned>::max();
3960 uint64_t Idx = SubIdx->getZExtValue();
3961 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3962 if (!!DemandedSubElts) {
3963 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
3964 if (Tmp == 1) return 1; // early-out
3965 }
3966 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
3967 APInt DemandedSrcElts = DemandedElts & ~SubMask;
3968 if (!!DemandedSrcElts) {
3969 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
3970 Tmp = std::min(Tmp, Tmp2);
3971 }
3972 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3973 return Tmp;
3974 }
3975
3976 // Not able to determine the index so just assume worst case.
3977 Tmp = ComputeNumSignBits(Sub, Depth + 1);
3978 if (Tmp == 1) return 1; // early-out
3979 Tmp2 = ComputeNumSignBits(Src, Depth + 1);
3980 Tmp = std::min(Tmp, Tmp2);
3981 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3982 return Tmp;
3983 }
3984 }
3985
3986 // If we are looking at the loaded value of the SDNode.
3987 if (Op.getResNo() == 0) {
3988 // Handle LOADX separately here. EXTLOAD case will fallthrough.
3989 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
3990 unsigned ExtType = LD->getExtensionType();
3991 switch (ExtType) {
3992 default: break;
3993 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
3994 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3995 return VTBits - Tmp + 1;
3996 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
3997 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3998 return VTBits - Tmp;
3999 case ISD::NON_EXTLOAD:
4000 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
4001 // We only need to handle vectors - computeKnownBits should handle
4002 // scalar cases.
4003 Type *CstTy = Cst->getType();
4004 if (CstTy->isVectorTy() &&
4005 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
4006 Tmp = VTBits;
4007 for (unsigned i = 0; i != NumElts; ++i) {
4008 if (!DemandedElts[i])
4009 continue;
4010 if (Constant *Elt = Cst->getAggregateElement(i)) {
4011 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4012 const APInt &Value = CInt->getValue();
4013 Tmp = std::min(Tmp, Value.getNumSignBits());
4014 continue;
4015 }
4016 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4017 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4018 Tmp = std::min(Tmp, Value.getNumSignBits());
4019 continue;
4020 }
4021 }
4022 // Unknown type. Conservatively assume no bits match sign bit.
4023 return 1;
4024 }
4025 return Tmp;
4026 }
4027 }
4028 break;
4029 }
4030 }
4031 }
4032
4033 // Allow the target to implement this method for its nodes.
4034 if (Opcode >= ISD::BUILTIN_OP_END ||
4035 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4036 Opcode == ISD::INTRINSIC_W_CHAIN ||
4037 Opcode == ISD::INTRINSIC_VOID) {
4038 unsigned NumBits =
4039 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4040 if (NumBits > 1)
4041 FirstAnswer = std::max(FirstAnswer, NumBits);
4042 }
4043
4044 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4045 // use this information.
4046 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4047
4048 APInt Mask;
4049 if (Known.isNonNegative()) { // sign bit is 0
4050 Mask = Known.Zero;
4051 } else if (Known.isNegative()) { // sign bit is 1;
4052 Mask = Known.One;
4053 } else {
4054 // Nothing known.
4055 return FirstAnswer;
4056 }
4057
4058 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
4059 // the number of identical bits in the top of the input value.
4060 Mask = ~Mask;
4061 Mask <<= Mask.getBitWidth()-VTBits;
4062 // Return # leading zeros. We use 'min' here in case Val was zero before
4063 // shifting. We don't want to return '64' as for an i32 "0".
4064 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
4065 }
4066
isBaseWithConstantOffset(SDValue Op) const4067 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4068 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4069 !isa<ConstantSDNode>(Op.getOperand(1)))
4070 return false;
4071
4072 if (Op.getOpcode() == ISD::OR &&
4073 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4074 return false;
4075
4076 return true;
4077 }
4078
isKnownNeverNaN(SDValue Op,bool SNaN,unsigned Depth) const4079 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4080 // If we're told that NaNs won't happen, assume they won't.
4081 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4082 return true;
4083
4084 if (Depth >= MaxRecursionDepth)
4085 return false; // Limit search depth.
4086
4087 // TODO: Handle vectors.
4088 // If the value is a constant, we can obviously see if it is a NaN or not.
4089 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4090 return !C->getValueAPF().isNaN() ||
4091 (SNaN && !C->getValueAPF().isSignaling());
4092 }
4093
4094 unsigned Opcode = Op.getOpcode();
4095 switch (Opcode) {
4096 case ISD::FADD:
4097 case ISD::FSUB:
4098 case ISD::FMUL:
4099 case ISD::FDIV:
4100 case ISD::FREM:
4101 case ISD::FSIN:
4102 case ISD::FCOS: {
4103 if (SNaN)
4104 return true;
4105 // TODO: Need isKnownNeverInfinity
4106 return false;
4107 }
4108 case ISD::FCANONICALIZE:
4109 case ISD::FEXP:
4110 case ISD::FEXP2:
4111 case ISD::FTRUNC:
4112 case ISD::FFLOOR:
4113 case ISD::FCEIL:
4114 case ISD::FROUND:
4115 case ISD::FRINT:
4116 case ISD::FNEARBYINT: {
4117 if (SNaN)
4118 return true;
4119 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4120 }
4121 case ISD::FABS:
4122 case ISD::FNEG:
4123 case ISD::FCOPYSIGN: {
4124 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4125 }
4126 case ISD::SELECT:
4127 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4128 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4129 case ISD::FP_EXTEND:
4130 case ISD::FP_ROUND: {
4131 if (SNaN)
4132 return true;
4133 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4134 }
4135 case ISD::SINT_TO_FP:
4136 case ISD::UINT_TO_FP:
4137 return true;
4138 case ISD::FMA:
4139 case ISD::FMAD: {
4140 if (SNaN)
4141 return true;
4142 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4143 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4144 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4145 }
4146 case ISD::FSQRT: // Need is known positive
4147 case ISD::FLOG:
4148 case ISD::FLOG2:
4149 case ISD::FLOG10:
4150 case ISD::FPOWI:
4151 case ISD::FPOW: {
4152 if (SNaN)
4153 return true;
4154 // TODO: Refine on operand
4155 return false;
4156 }
4157 case ISD::FMINNUM:
4158 case ISD::FMAXNUM: {
4159 // Only one needs to be known not-nan, since it will be returned if the
4160 // other ends up being one.
4161 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4162 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4163 }
4164 case ISD::FMINNUM_IEEE:
4165 case ISD::FMAXNUM_IEEE: {
4166 if (SNaN)
4167 return true;
4168 // This can return a NaN if either operand is an sNaN, or if both operands
4169 // are NaN.
4170 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4171 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4172 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4173 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4174 }
4175 case ISD::FMINIMUM:
4176 case ISD::FMAXIMUM: {
4177 // TODO: Does this quiet or return the origina NaN as-is?
4178 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4179 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4180 }
4181 case ISD::EXTRACT_VECTOR_ELT: {
4182 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4183 }
4184 default:
4185 if (Opcode >= ISD::BUILTIN_OP_END ||
4186 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4187 Opcode == ISD::INTRINSIC_W_CHAIN ||
4188 Opcode == ISD::INTRINSIC_VOID) {
4189 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4190 }
4191
4192 return false;
4193 }
4194 }
4195
isKnownNeverZeroFloat(SDValue Op) const4196 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4197 assert(Op.getValueType().isFloatingPoint() &&
4198 "Floating point type expected");
4199
4200 // If the value is a constant, we can obviously see if it is a zero or not.
4201 // TODO: Add BuildVector support.
4202 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4203 return !C->isZero();
4204 return false;
4205 }
4206
isKnownNeverZero(SDValue Op) const4207 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4208 assert(!Op.getValueType().isFloatingPoint() &&
4209 "Floating point types unsupported - use isKnownNeverZeroFloat");
4210
4211 // If the value is a constant, we can obviously see if it is a zero or not.
4212 if (ISD::matchUnaryPredicate(
4213 Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
4214 return true;
4215
4216 // TODO: Recognize more cases here.
4217 switch (Op.getOpcode()) {
4218 default: break;
4219 case ISD::OR:
4220 if (isKnownNeverZero(Op.getOperand(1)) ||
4221 isKnownNeverZero(Op.getOperand(0)))
4222 return true;
4223 break;
4224 }
4225
4226 return false;
4227 }
4228
isEqualTo(SDValue A,SDValue B) const4229 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4230 // Check the obvious case.
4231 if (A == B) return true;
4232
4233 // For for negative and positive zero.
4234 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4235 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4236 if (CA->isZero() && CB->isZero()) return true;
4237
4238 // Otherwise they may not be equal.
4239 return false;
4240 }
4241
4242 // FIXME: unify with llvm::haveNoCommonBitsSet.
4243 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
haveNoCommonBitsSet(SDValue A,SDValue B) const4244 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4245 assert(A.getValueType() == B.getValueType() &&
4246 "Values must have the same type");
4247 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue();
4248 }
4249
FoldBUILD_VECTOR(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4250 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4251 ArrayRef<SDValue> Ops,
4252 SelectionDAG &DAG) {
4253 int NumOps = Ops.size();
4254 assert(NumOps != 0 && "Can't build an empty vector!");
4255 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
4256 "Incorrect element count in BUILD_VECTOR!");
4257
4258 // BUILD_VECTOR of UNDEFs is UNDEF.
4259 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4260 return DAG.getUNDEF(VT);
4261
4262 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4263 SDValue IdentitySrc;
4264 bool IsIdentity = true;
4265 for (int i = 0; i != NumOps; ++i) {
4266 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4267 Ops[i].getOperand(0).getValueType() != VT ||
4268 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4269 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4270 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4271 IsIdentity = false;
4272 break;
4273 }
4274 IdentitySrc = Ops[i].getOperand(0);
4275 }
4276 if (IsIdentity)
4277 return IdentitySrc;
4278
4279 return SDValue();
4280 }
4281
4282 /// Try to simplify vector concatenation to an input value, undef, or build
4283 /// vector.
foldCONCAT_VECTORS(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4284 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4285 ArrayRef<SDValue> Ops,
4286 SelectionDAG &DAG) {
4287 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4288 assert(llvm::all_of(Ops,
4289 [Ops](SDValue Op) {
4290 return Ops[0].getValueType() == Op.getValueType();
4291 }) &&
4292 "Concatenation of vectors with inconsistent value types!");
4293 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
4294 VT.getVectorNumElements() &&
4295 "Incorrect element count in vector concatenation!");
4296
4297 if (Ops.size() == 1)
4298 return Ops[0];
4299
4300 // Concat of UNDEFs is UNDEF.
4301 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4302 return DAG.getUNDEF(VT);
4303
4304 // Scan the operands and look for extract operations from a single source
4305 // that correspond to insertion at the same location via this concatenation:
4306 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4307 SDValue IdentitySrc;
4308 bool IsIdentity = true;
4309 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4310 SDValue Op = Ops[i];
4311 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements();
4312 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4313 Op.getOperand(0).getValueType() != VT ||
4314 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4315 !isa<ConstantSDNode>(Op.getOperand(1)) ||
4316 Op.getConstantOperandVal(1) != IdentityIndex) {
4317 IsIdentity = false;
4318 break;
4319 }
4320 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
4321 "Unexpected identity source vector for concat of extracts");
4322 IdentitySrc = Op.getOperand(0);
4323 }
4324 if (IsIdentity) {
4325 assert(IdentitySrc && "Failed to set source vector of extracts");
4326 return IdentitySrc;
4327 }
4328
4329 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4330 // simplified to one big BUILD_VECTOR.
4331 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4332 EVT SVT = VT.getScalarType();
4333 SmallVector<SDValue, 16> Elts;
4334 for (SDValue Op : Ops) {
4335 EVT OpVT = Op.getValueType();
4336 if (Op.isUndef())
4337 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4338 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4339 Elts.append(Op->op_begin(), Op->op_end());
4340 else
4341 return SDValue();
4342 }
4343
4344 // BUILD_VECTOR requires all inputs to be of the same type, find the
4345 // maximum type and extend them all.
4346 for (SDValue Op : Elts)
4347 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4348
4349 if (SVT.bitsGT(VT.getScalarType()))
4350 for (SDValue &Op : Elts)
4351 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4352 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4353 : DAG.getSExtOrTrunc(Op, DL, SVT);
4354
4355 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4356 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4357 return V;
4358 }
4359
4360 /// Gets or creates the specified node.
getNode(unsigned Opcode,const SDLoc & DL,EVT VT)4361 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4362 FoldingSetNodeID ID;
4363 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4364 void *IP = nullptr;
4365 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4366 return SDValue(E, 0);
4367
4368 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4369 getVTList(VT));
4370 CSEMap.InsertNode(N, IP);
4371
4372 InsertNode(N);
4373 SDValue V = SDValue(N, 0);
4374 NewSDValueDbgMsg(V, "Creating new node: ", this);
4375 return V;
4376 }
4377
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand,const SDNodeFlags Flags)4378 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4379 SDValue Operand, const SDNodeFlags Flags) {
4380 // Constant fold unary operations with an integer constant operand. Even
4381 // opaque constant will be folded, because the folding of unary operations
4382 // doesn't create new constants with different values. Nevertheless, the
4383 // opaque flag is preserved during folding to prevent future folding with
4384 // other constants.
4385 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4386 const APInt &Val = C->getAPIntValue();
4387 switch (Opcode) {
4388 default: break;
4389 case ISD::SIGN_EXTEND:
4390 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4391 C->isTargetOpcode(), C->isOpaque());
4392 case ISD::TRUNCATE:
4393 if (C->isOpaque())
4394 break;
4395 LLVM_FALLTHROUGH;
4396 case ISD::ANY_EXTEND:
4397 case ISD::ZERO_EXTEND:
4398 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4399 C->isTargetOpcode(), C->isOpaque());
4400 case ISD::UINT_TO_FP:
4401 case ISD::SINT_TO_FP: {
4402 APFloat apf(EVTToAPFloatSemantics(VT),
4403 APInt::getNullValue(VT.getSizeInBits()));
4404 (void)apf.convertFromAPInt(Val,
4405 Opcode==ISD::SINT_TO_FP,
4406 APFloat::rmNearestTiesToEven);
4407 return getConstantFP(apf, DL, VT);
4408 }
4409 case ISD::BITCAST:
4410 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4411 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4412 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4413 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4414 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4415 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4416 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4417 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4418 break;
4419 case ISD::ABS:
4420 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4421 C->isOpaque());
4422 case ISD::BITREVERSE:
4423 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4424 C->isOpaque());
4425 case ISD::BSWAP:
4426 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4427 C->isOpaque());
4428 case ISD::CTPOP:
4429 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4430 C->isOpaque());
4431 case ISD::CTLZ:
4432 case ISD::CTLZ_ZERO_UNDEF:
4433 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4434 C->isOpaque());
4435 case ISD::CTTZ:
4436 case ISD::CTTZ_ZERO_UNDEF:
4437 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4438 C->isOpaque());
4439 case ISD::FP16_TO_FP: {
4440 bool Ignored;
4441 APFloat FPV(APFloat::IEEEhalf(),
4442 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4443
4444 // This can return overflow, underflow, or inexact; we don't care.
4445 // FIXME need to be more flexible about rounding mode.
4446 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4447 APFloat::rmNearestTiesToEven, &Ignored);
4448 return getConstantFP(FPV, DL, VT);
4449 }
4450 }
4451 }
4452
4453 // Constant fold unary operations with a floating point constant operand.
4454 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4455 APFloat V = C->getValueAPF(); // make copy
4456 switch (Opcode) {
4457 case ISD::FNEG:
4458 V.changeSign();
4459 return getConstantFP(V, DL, VT);
4460 case ISD::FABS:
4461 V.clearSign();
4462 return getConstantFP(V, DL, VT);
4463 case ISD::FCEIL: {
4464 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4465 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4466 return getConstantFP(V, DL, VT);
4467 break;
4468 }
4469 case ISD::FTRUNC: {
4470 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4471 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4472 return getConstantFP(V, DL, VT);
4473 break;
4474 }
4475 case ISD::FFLOOR: {
4476 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4477 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4478 return getConstantFP(V, DL, VT);
4479 break;
4480 }
4481 case ISD::FP_EXTEND: {
4482 bool ignored;
4483 // This can return overflow, underflow, or inexact; we don't care.
4484 // FIXME need to be more flexible about rounding mode.
4485 (void)V.convert(EVTToAPFloatSemantics(VT),
4486 APFloat::rmNearestTiesToEven, &ignored);
4487 return getConstantFP(V, DL, VT);
4488 }
4489 case ISD::FP_TO_SINT:
4490 case ISD::FP_TO_UINT: {
4491 bool ignored;
4492 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4493 // FIXME need to be more flexible about rounding mode.
4494 APFloat::opStatus s =
4495 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4496 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4497 break;
4498 return getConstant(IntVal, DL, VT);
4499 }
4500 case ISD::BITCAST:
4501 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4502 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4503 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4504 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4505 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4506 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4507 break;
4508 case ISD::FP_TO_FP16: {
4509 bool Ignored;
4510 // This can return overflow, underflow, or inexact; we don't care.
4511 // FIXME need to be more flexible about rounding mode.
4512 (void)V.convert(APFloat::IEEEhalf(),
4513 APFloat::rmNearestTiesToEven, &Ignored);
4514 return getConstant(V.bitcastToAPInt(), DL, VT);
4515 }
4516 }
4517 }
4518
4519 // Constant fold unary operations with a vector integer or float operand.
4520 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
4521 if (BV->isConstant()) {
4522 switch (Opcode) {
4523 default:
4524 // FIXME: Entirely reasonable to perform folding of other unary
4525 // operations here as the need arises.
4526 break;
4527 case ISD::FNEG:
4528 case ISD::FABS:
4529 case ISD::FCEIL:
4530 case ISD::FTRUNC:
4531 case ISD::FFLOOR:
4532 case ISD::FP_EXTEND:
4533 case ISD::FP_TO_SINT:
4534 case ISD::FP_TO_UINT:
4535 case ISD::TRUNCATE:
4536 case ISD::ANY_EXTEND:
4537 case ISD::ZERO_EXTEND:
4538 case ISD::SIGN_EXTEND:
4539 case ISD::UINT_TO_FP:
4540 case ISD::SINT_TO_FP:
4541 case ISD::ABS:
4542 case ISD::BITREVERSE:
4543 case ISD::BSWAP:
4544 case ISD::CTLZ:
4545 case ISD::CTLZ_ZERO_UNDEF:
4546 case ISD::CTTZ:
4547 case ISD::CTTZ_ZERO_UNDEF:
4548 case ISD::CTPOP: {
4549 SDValue Ops = { Operand };
4550 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4551 return Fold;
4552 }
4553 }
4554 }
4555 }
4556
4557 unsigned OpOpcode = Operand.getNode()->getOpcode();
4558 switch (Opcode) {
4559 case ISD::TokenFactor:
4560 case ISD::MERGE_VALUES:
4561 case ISD::CONCAT_VECTORS:
4562 return Operand; // Factor, merge or concat of one node? No need.
4563 case ISD::BUILD_VECTOR: {
4564 // Attempt to simplify BUILD_VECTOR.
4565 SDValue Ops[] = {Operand};
4566 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4567 return V;
4568 break;
4569 }
4570 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4571 case ISD::FP_EXTEND:
4572 assert(VT.isFloatingPoint() &&
4573 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4574 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4575 assert((!VT.isVector() ||
4576 VT.getVectorNumElements() ==
4577 Operand.getValueType().getVectorNumElements()) &&
4578 "Vector element count mismatch!");
4579 assert(Operand.getValueType().bitsLT(VT) &&
4580 "Invalid fpext node, dst < src!");
4581 if (Operand.isUndef())
4582 return getUNDEF(VT);
4583 break;
4584 case ISD::FP_TO_SINT:
4585 case ISD::FP_TO_UINT:
4586 if (Operand.isUndef())
4587 return getUNDEF(VT);
4588 break;
4589 case ISD::SINT_TO_FP:
4590 case ISD::UINT_TO_FP:
4591 // [us]itofp(undef) = 0, because the result value is bounded.
4592 if (Operand.isUndef())
4593 return getConstantFP(0.0, DL, VT);
4594 break;
4595 case ISD::SIGN_EXTEND:
4596 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4597 "Invalid SIGN_EXTEND!");
4598 assert(VT.isVector() == Operand.getValueType().isVector() &&
4599 "SIGN_EXTEND result type type should be vector iff the operand "
4600 "type is vector!");
4601 if (Operand.getValueType() == VT) return Operand; // noop extension
4602 assert((!VT.isVector() ||
4603 VT.getVectorNumElements() ==
4604 Operand.getValueType().getVectorNumElements()) &&
4605 "Vector element count mismatch!");
4606 assert(Operand.getValueType().bitsLT(VT) &&
4607 "Invalid sext node, dst < src!");
4608 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4609 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4610 else if (OpOpcode == ISD::UNDEF)
4611 // sext(undef) = 0, because the top bits will all be the same.
4612 return getConstant(0, DL, VT);
4613 break;
4614 case ISD::ZERO_EXTEND:
4615 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4616 "Invalid ZERO_EXTEND!");
4617 assert(VT.isVector() == Operand.getValueType().isVector() &&
4618 "ZERO_EXTEND result type type should be vector iff the operand "
4619 "type is vector!");
4620 if (Operand.getValueType() == VT) return Operand; // noop extension
4621 assert((!VT.isVector() ||
4622 VT.getVectorNumElements() ==
4623 Operand.getValueType().getVectorNumElements()) &&
4624 "Vector element count mismatch!");
4625 assert(Operand.getValueType().bitsLT(VT) &&
4626 "Invalid zext node, dst < src!");
4627 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4628 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4629 else if (OpOpcode == ISD::UNDEF)
4630 // zext(undef) = 0, because the top bits will be zero.
4631 return getConstant(0, DL, VT);
4632 break;
4633 case ISD::ANY_EXTEND:
4634 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4635 "Invalid ANY_EXTEND!");
4636 assert(VT.isVector() == Operand.getValueType().isVector() &&
4637 "ANY_EXTEND result type type should be vector iff the operand "
4638 "type is vector!");
4639 if (Operand.getValueType() == VT) return Operand; // noop extension
4640 assert((!VT.isVector() ||
4641 VT.getVectorNumElements() ==
4642 Operand.getValueType().getVectorNumElements()) &&
4643 "Vector element count mismatch!");
4644 assert(Operand.getValueType().bitsLT(VT) &&
4645 "Invalid anyext node, dst < src!");
4646
4647 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4648 OpOpcode == ISD::ANY_EXTEND)
4649 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4650 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4651 else if (OpOpcode == ISD::UNDEF)
4652 return getUNDEF(VT);
4653
4654 // (ext (trunc x)) -> x
4655 if (OpOpcode == ISD::TRUNCATE) {
4656 SDValue OpOp = Operand.getOperand(0);
4657 if (OpOp.getValueType() == VT) {
4658 transferDbgValues(Operand, OpOp);
4659 return OpOp;
4660 }
4661 }
4662 break;
4663 case ISD::TRUNCATE:
4664 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4665 "Invalid TRUNCATE!");
4666 assert(VT.isVector() == Operand.getValueType().isVector() &&
4667 "TRUNCATE result type type should be vector iff the operand "
4668 "type is vector!");
4669 if (Operand.getValueType() == VT) return Operand; // noop truncate
4670 assert((!VT.isVector() ||
4671 VT.getVectorNumElements() ==
4672 Operand.getValueType().getVectorNumElements()) &&
4673 "Vector element count mismatch!");
4674 assert(Operand.getValueType().bitsGT(VT) &&
4675 "Invalid truncate node, src < dst!");
4676 if (OpOpcode == ISD::TRUNCATE)
4677 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4678 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4679 OpOpcode == ISD::ANY_EXTEND) {
4680 // If the source is smaller than the dest, we still need an extend.
4681 if (Operand.getOperand(0).getValueType().getScalarType()
4682 .bitsLT(VT.getScalarType()))
4683 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4684 if (Operand.getOperand(0).getValueType().bitsGT(VT))
4685 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4686 return Operand.getOperand(0);
4687 }
4688 if (OpOpcode == ISD::UNDEF)
4689 return getUNDEF(VT);
4690 break;
4691 case ISD::ANY_EXTEND_VECTOR_INREG:
4692 case ISD::ZERO_EXTEND_VECTOR_INREG:
4693 case ISD::SIGN_EXTEND_VECTOR_INREG:
4694 assert(VT.isVector() && "This DAG node is restricted to vector types.");
4695 assert(Operand.getValueType().bitsLE(VT) &&
4696 "The input must be the same size or smaller than the result.");
4697 assert(VT.getVectorNumElements() <
4698 Operand.getValueType().getVectorNumElements() &&
4699 "The destination vector type must have fewer lanes than the input.");
4700 break;
4701 case ISD::ABS:
4702 assert(VT.isInteger() && VT == Operand.getValueType() &&
4703 "Invalid ABS!");
4704 if (OpOpcode == ISD::UNDEF)
4705 return getUNDEF(VT);
4706 break;
4707 case ISD::BSWAP:
4708 assert(VT.isInteger() && VT == Operand.getValueType() &&
4709 "Invalid BSWAP!");
4710 assert((VT.getScalarSizeInBits() % 16 == 0) &&
4711 "BSWAP types must be a multiple of 16 bits!");
4712 if (OpOpcode == ISD::UNDEF)
4713 return getUNDEF(VT);
4714 break;
4715 case ISD::BITREVERSE:
4716 assert(VT.isInteger() && VT == Operand.getValueType() &&
4717 "Invalid BITREVERSE!");
4718 if (OpOpcode == ISD::UNDEF)
4719 return getUNDEF(VT);
4720 break;
4721 case ISD::BITCAST:
4722 // Basic sanity checking.
4723 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
4724 "Cannot BITCAST between types of different sizes!");
4725 if (VT == Operand.getValueType()) return Operand; // noop conversion.
4726 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
4727 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
4728 if (OpOpcode == ISD::UNDEF)
4729 return getUNDEF(VT);
4730 break;
4731 case ISD::SCALAR_TO_VECTOR:
4732 assert(VT.isVector() && !Operand.getValueType().isVector() &&
4733 (VT.getVectorElementType() == Operand.getValueType() ||
4734 (VT.getVectorElementType().isInteger() &&
4735 Operand.getValueType().isInteger() &&
4736 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
4737 "Illegal SCALAR_TO_VECTOR node!");
4738 if (OpOpcode == ISD::UNDEF)
4739 return getUNDEF(VT);
4740 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4741 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
4742 isa<ConstantSDNode>(Operand.getOperand(1)) &&
4743 Operand.getConstantOperandVal(1) == 0 &&
4744 Operand.getOperand(0).getValueType() == VT)
4745 return Operand.getOperand(0);
4746 break;
4747 case ISD::FNEG:
4748 // Negation of an unknown bag of bits is still completely undefined.
4749 if (OpOpcode == ISD::UNDEF)
4750 return getUNDEF(VT);
4751
4752 if (OpOpcode == ISD::FNEG) // --X -> X
4753 return Operand.getOperand(0);
4754 break;
4755 case ISD::FABS:
4756 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
4757 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
4758 break;
4759 }
4760
4761 SDNode *N;
4762 SDVTList VTs = getVTList(VT);
4763 SDValue Ops[] = {Operand};
4764 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
4765 FoldingSetNodeID ID;
4766 AddNodeIDNode(ID, Opcode, VTs, Ops);
4767 void *IP = nullptr;
4768 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4769 E->intersectFlagsWith(Flags);
4770 return SDValue(E, 0);
4771 }
4772
4773 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4774 N->setFlags(Flags);
4775 createOperands(N, Ops);
4776 CSEMap.InsertNode(N, IP);
4777 } else {
4778 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4779 createOperands(N, Ops);
4780 }
4781
4782 InsertNode(N);
4783 SDValue V = SDValue(N, 0);
4784 NewSDValueDbgMsg(V, "Creating new node: ", this);
4785 return V;
4786 }
4787
FoldValue(unsigned Opcode,const APInt & C1,const APInt & C2)4788 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
4789 const APInt &C2) {
4790 switch (Opcode) {
4791 case ISD::ADD: return C1 + C2;
4792 case ISD::SUB: return C1 - C2;
4793 case ISD::MUL: return C1 * C2;
4794 case ISD::AND: return C1 & C2;
4795 case ISD::OR: return C1 | C2;
4796 case ISD::XOR: return C1 ^ C2;
4797 case ISD::SHL: return C1 << C2;
4798 case ISD::SRL: return C1.lshr(C2);
4799 case ISD::SRA: return C1.ashr(C2);
4800 case ISD::ROTL: return C1.rotl(C2);
4801 case ISD::ROTR: return C1.rotr(C2);
4802 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
4803 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
4804 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
4805 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
4806 case ISD::SADDSAT: return C1.sadd_sat(C2);
4807 case ISD::UADDSAT: return C1.uadd_sat(C2);
4808 case ISD::SSUBSAT: return C1.ssub_sat(C2);
4809 case ISD::USUBSAT: return C1.usub_sat(C2);
4810 case ISD::UDIV:
4811 if (!C2.getBoolValue())
4812 break;
4813 return C1.udiv(C2);
4814 case ISD::UREM:
4815 if (!C2.getBoolValue())
4816 break;
4817 return C1.urem(C2);
4818 case ISD::SDIV:
4819 if (!C2.getBoolValue())
4820 break;
4821 return C1.sdiv(C2);
4822 case ISD::SREM:
4823 if (!C2.getBoolValue())
4824 break;
4825 return C1.srem(C2);
4826 }
4827 return llvm::None;
4828 }
4829
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,const ConstantSDNode * C1,const ConstantSDNode * C2)4830 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4831 EVT VT, const ConstantSDNode *C1,
4832 const ConstantSDNode *C2) {
4833 if (C1->isOpaque() || C2->isOpaque())
4834 return SDValue();
4835 if (Optional<APInt> Folded =
4836 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()))
4837 return getConstant(Folded.getValue(), DL, VT);
4838 return SDValue();
4839 }
4840
FoldSymbolOffset(unsigned Opcode,EVT VT,const GlobalAddressSDNode * GA,const SDNode * N2)4841 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
4842 const GlobalAddressSDNode *GA,
4843 const SDNode *N2) {
4844 if (GA->getOpcode() != ISD::GlobalAddress)
4845 return SDValue();
4846 if (!TLI->isOffsetFoldingLegal(GA))
4847 return SDValue();
4848 auto *C2 = dyn_cast<ConstantSDNode>(N2);
4849 if (!C2)
4850 return SDValue();
4851 int64_t Offset = C2->getSExtValue();
4852 switch (Opcode) {
4853 case ISD::ADD: break;
4854 case ISD::SUB: Offset = -uint64_t(Offset); break;
4855 default: return SDValue();
4856 }
4857 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
4858 GA->getOffset() + uint64_t(Offset));
4859 }
4860
isUndef(unsigned Opcode,ArrayRef<SDValue> Ops)4861 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
4862 switch (Opcode) {
4863 case ISD::SDIV:
4864 case ISD::UDIV:
4865 case ISD::SREM:
4866 case ISD::UREM: {
4867 // If a divisor is zero/undef or any element of a divisor vector is
4868 // zero/undef, the whole op is undef.
4869 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
4870 SDValue Divisor = Ops[1];
4871 if (Divisor.isUndef() || isNullConstant(Divisor))
4872 return true;
4873
4874 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
4875 llvm::any_of(Divisor->op_values(),
4876 [](SDValue V) { return V.isUndef() ||
4877 isNullConstant(V); });
4878 // TODO: Handle signed overflow.
4879 }
4880 // TODO: Handle oversized shifts.
4881 default:
4882 return false;
4883 }
4884 }
4885
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,SDNode * N1,SDNode * N2)4886 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4887 EVT VT, SDNode *N1, SDNode *N2) {
4888 // If the opcode is a target-specific ISD node, there's nothing we can
4889 // do here and the operand rules may not line up with the below, so
4890 // bail early.
4891 if (Opcode >= ISD::BUILTIN_OP_END)
4892 return SDValue();
4893
4894 if (isUndef(Opcode, {SDValue(N1, 0), SDValue(N2, 0)}))
4895 return getUNDEF(VT);
4896
4897 // Handle the case of two scalars.
4898 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) {
4899 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) {
4900 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, C1, C2);
4901 assert((!Folded || !VT.isVector()) &&
4902 "Can't fold vectors ops with scalar operands");
4903 return Folded;
4904 }
4905 }
4906
4907 // fold (add Sym, c) -> Sym+c
4908 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1))
4909 return FoldSymbolOffset(Opcode, VT, GA, N2);
4910 if (TLI->isCommutativeBinOp(Opcode))
4911 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2))
4912 return FoldSymbolOffset(Opcode, VT, GA, N1);
4913
4914 // For vectors, extract each constant element and fold them individually.
4915 // Either input may be an undef value.
4916 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
4917 if (!BV1 && !N1->isUndef())
4918 return SDValue();
4919 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
4920 if (!BV2 && !N2->isUndef())
4921 return SDValue();
4922 // If both operands are undef, that's handled the same way as scalars.
4923 if (!BV1 && !BV2)
4924 return SDValue();
4925
4926 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) &&
4927 "Vector binop with different number of elements in operands?");
4928
4929 EVT SVT = VT.getScalarType();
4930 EVT LegalSVT = SVT;
4931 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4932 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4933 if (LegalSVT.bitsLT(SVT))
4934 return SDValue();
4935 }
4936 SmallVector<SDValue, 4> Outputs;
4937 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands();
4938 for (unsigned I = 0; I != NumOps; ++I) {
4939 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT);
4940 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT);
4941 if (SVT.isInteger()) {
4942 if (V1->getValueType(0).bitsGT(SVT))
4943 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
4944 if (V2->getValueType(0).bitsGT(SVT))
4945 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
4946 }
4947
4948 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
4949 return SDValue();
4950
4951 // Fold one vector element.
4952 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
4953 if (LegalSVT != SVT)
4954 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4955
4956 // Scalar folding only succeeded if the result is a constant or UNDEF.
4957 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4958 ScalarResult.getOpcode() != ISD::ConstantFP)
4959 return SDValue();
4960 Outputs.push_back(ScalarResult);
4961 }
4962
4963 assert(VT.getVectorNumElements() == Outputs.size() &&
4964 "Vector size mismatch!");
4965
4966 // We may have a vector type but a scalar result. Create a splat.
4967 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
4968
4969 // Build a big vector out of the scalar elements we generated.
4970 return getBuildVector(VT, SDLoc(), Outputs);
4971 }
4972
4973 // TODO: Merge with FoldConstantArithmetic
FoldConstantVectorArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)4974 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
4975 const SDLoc &DL, EVT VT,
4976 ArrayRef<SDValue> Ops,
4977 const SDNodeFlags Flags) {
4978 // If the opcode is a target-specific ISD node, there's nothing we can
4979 // do here and the operand rules may not line up with the below, so
4980 // bail early.
4981 if (Opcode >= ISD::BUILTIN_OP_END)
4982 return SDValue();
4983
4984 if (isUndef(Opcode, Ops))
4985 return getUNDEF(VT);
4986
4987 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
4988 if (!VT.isVector())
4989 return SDValue();
4990
4991 unsigned NumElts = VT.getVectorNumElements();
4992
4993 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
4994 return !Op.getValueType().isVector() ||
4995 Op.getValueType().getVectorNumElements() == NumElts;
4996 };
4997
4998 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
4999 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
5000 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
5001 (BV && BV->isConstant());
5002 };
5003
5004 // All operands must be vector types with the same number of elements as
5005 // the result type and must be either UNDEF or a build vector of constant
5006 // or UNDEF scalars.
5007 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
5008 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5009 return SDValue();
5010
5011 // If we are comparing vectors, then the result needs to be a i1 boolean
5012 // that is then sign-extended back to the legal result type.
5013 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5014
5015 // Find legal integer scalar type for constant promotion and
5016 // ensure that its scalar size is at least as large as source.
5017 EVT LegalSVT = VT.getScalarType();
5018 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5019 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5020 if (LegalSVT.bitsLT(VT.getScalarType()))
5021 return SDValue();
5022 }
5023
5024 // Constant fold each scalar lane separately.
5025 SmallVector<SDValue, 4> ScalarResults;
5026 for (unsigned i = 0; i != NumElts; i++) {
5027 SmallVector<SDValue, 4> ScalarOps;
5028 for (SDValue Op : Ops) {
5029 EVT InSVT = Op.getValueType().getScalarType();
5030 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
5031 if (!InBV) {
5032 // We've checked that this is UNDEF or a constant of some kind.
5033 if (Op.isUndef())
5034 ScalarOps.push_back(getUNDEF(InSVT));
5035 else
5036 ScalarOps.push_back(Op);
5037 continue;
5038 }
5039
5040 SDValue ScalarOp = InBV->getOperand(i);
5041 EVT ScalarVT = ScalarOp.getValueType();
5042
5043 // Build vector (integer) scalar operands may need implicit
5044 // truncation - do this before constant folding.
5045 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5046 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5047
5048 ScalarOps.push_back(ScalarOp);
5049 }
5050
5051 // Constant fold the scalar operands.
5052 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
5053
5054 // Legalize the (integer) scalar constant if necessary.
5055 if (LegalSVT != SVT)
5056 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5057
5058 // Scalar folding only succeeded if the result is a constant or UNDEF.
5059 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5060 ScalarResult.getOpcode() != ISD::ConstantFP)
5061 return SDValue();
5062 ScalarResults.push_back(ScalarResult);
5063 }
5064
5065 SDValue V = getBuildVector(VT, DL, ScalarResults);
5066 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5067 return V;
5068 }
5069
foldConstantFPMath(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)5070 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5071 EVT VT, SDValue N1, SDValue N2) {
5072 // TODO: We don't do any constant folding for strict FP opcodes here, but we
5073 // should. That will require dealing with a potentially non-default
5074 // rounding mode, checking the "opStatus" return value from the APFloat
5075 // math calculations, and possibly other variations.
5076 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
5077 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
5078 if (N1CFP && N2CFP) {
5079 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF();
5080 switch (Opcode) {
5081 case ISD::FADD:
5082 C1.add(C2, APFloat::rmNearestTiesToEven);
5083 return getConstantFP(C1, DL, VT);
5084 case ISD::FSUB:
5085 C1.subtract(C2, APFloat::rmNearestTiesToEven);
5086 return getConstantFP(C1, DL, VT);
5087 case ISD::FMUL:
5088 C1.multiply(C2, APFloat::rmNearestTiesToEven);
5089 return getConstantFP(C1, DL, VT);
5090 case ISD::FDIV:
5091 C1.divide(C2, APFloat::rmNearestTiesToEven);
5092 return getConstantFP(C1, DL, VT);
5093 case ISD::FREM:
5094 C1.mod(C2);
5095 return getConstantFP(C1, DL, VT);
5096 case ISD::FCOPYSIGN:
5097 C1.copySign(C2);
5098 return getConstantFP(C1, DL, VT);
5099 default: break;
5100 }
5101 }
5102 if (N1CFP && Opcode == ISD::FP_ROUND) {
5103 APFloat C1 = N1CFP->getValueAPF(); // make copy
5104 bool Unused;
5105 // This can return overflow, underflow, or inexact; we don't care.
5106 // FIXME need to be more flexible about rounding mode.
5107 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5108 &Unused);
5109 return getConstantFP(C1, DL, VT);
5110 }
5111
5112 switch (Opcode) {
5113 case ISD::FADD:
5114 case ISD::FSUB:
5115 case ISD::FMUL:
5116 case ISD::FDIV:
5117 case ISD::FREM:
5118 // If both operands are undef, the result is undef. If 1 operand is undef,
5119 // the result is NaN. This should match the behavior of the IR optimizer.
5120 if (N1.isUndef() && N2.isUndef())
5121 return getUNDEF(VT);
5122 if (N1.isUndef() || N2.isUndef())
5123 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5124 }
5125 return SDValue();
5126 }
5127
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,const SDNodeFlags Flags)5128 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5129 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5130 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5131 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
5132 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5133 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5134
5135 // Canonicalize constant to RHS if commutative.
5136 if (TLI->isCommutativeBinOp(Opcode)) {
5137 if (N1C && !N2C) {
5138 std::swap(N1C, N2C);
5139 std::swap(N1, N2);
5140 } else if (N1CFP && !N2CFP) {
5141 std::swap(N1CFP, N2CFP);
5142 std::swap(N1, N2);
5143 }
5144 }
5145
5146 switch (Opcode) {
5147 default: break;
5148 case ISD::TokenFactor:
5149 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
5150 N2.getValueType() == MVT::Other && "Invalid token factor!");
5151 // Fold trivial token factors.
5152 if (N1.getOpcode() == ISD::EntryToken) return N2;
5153 if (N2.getOpcode() == ISD::EntryToken) return N1;
5154 if (N1 == N2) return N1;
5155 break;
5156 case ISD::BUILD_VECTOR: {
5157 // Attempt to simplify BUILD_VECTOR.
5158 SDValue Ops[] = {N1, N2};
5159 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5160 return V;
5161 break;
5162 }
5163 case ISD::CONCAT_VECTORS: {
5164 SDValue Ops[] = {N1, N2};
5165 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5166 return V;
5167 break;
5168 }
5169 case ISD::AND:
5170 assert(VT.isInteger() && "This operator does not apply to FP types!");
5171 assert(N1.getValueType() == N2.getValueType() &&
5172 N1.getValueType() == VT && "Binary operator types must match!");
5173 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
5174 // worth handling here.
5175 if (N2C && N2C->isNullValue())
5176 return N2;
5177 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
5178 return N1;
5179 break;
5180 case ISD::OR:
5181 case ISD::XOR:
5182 case ISD::ADD:
5183 case ISD::SUB:
5184 assert(VT.isInteger() && "This operator does not apply to FP types!");
5185 assert(N1.getValueType() == N2.getValueType() &&
5186 N1.getValueType() == VT && "Binary operator types must match!");
5187 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
5188 // it's worth handling here.
5189 if (N2C && N2C->isNullValue())
5190 return N1;
5191 break;
5192 case ISD::UDIV:
5193 case ISD::UREM:
5194 case ISD::MULHU:
5195 case ISD::MULHS:
5196 case ISD::MUL:
5197 case ISD::SDIV:
5198 case ISD::SREM:
5199 case ISD::SMIN:
5200 case ISD::SMAX:
5201 case ISD::UMIN:
5202 case ISD::UMAX:
5203 case ISD::SADDSAT:
5204 case ISD::SSUBSAT:
5205 case ISD::UADDSAT:
5206 case ISD::USUBSAT:
5207 assert(VT.isInteger() && "This operator does not apply to FP types!");
5208 assert(N1.getValueType() == N2.getValueType() &&
5209 N1.getValueType() == VT && "Binary operator types must match!");
5210 break;
5211 case ISD::FADD:
5212 case ISD::FSUB:
5213 case ISD::FMUL:
5214 case ISD::FDIV:
5215 case ISD::FREM:
5216 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5217 assert(N1.getValueType() == N2.getValueType() &&
5218 N1.getValueType() == VT && "Binary operator types must match!");
5219 if (SDValue V = simplifyFPBinop(Opcode, N1, N2))
5220 return V;
5221 break;
5222 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
5223 assert(N1.getValueType() == VT &&
5224 N1.getValueType().isFloatingPoint() &&
5225 N2.getValueType().isFloatingPoint() &&
5226 "Invalid FCOPYSIGN!");
5227 break;
5228 case ISD::SHL:
5229 case ISD::SRA:
5230 case ISD::SRL:
5231 if (SDValue V = simplifyShift(N1, N2))
5232 return V;
5233 LLVM_FALLTHROUGH;
5234 case ISD::ROTL:
5235 case ISD::ROTR:
5236 assert(VT == N1.getValueType() &&
5237 "Shift operators return type must be the same as their first arg");
5238 assert(VT.isInteger() && N2.getValueType().isInteger() &&
5239 "Shifts only work on integers");
5240 assert((!VT.isVector() || VT == N2.getValueType()) &&
5241 "Vector shift amounts must be in the same as their first arg");
5242 // Verify that the shift amount VT is big enough to hold valid shift
5243 // amounts. This catches things like trying to shift an i1024 value by an
5244 // i8, which is easy to fall into in generic code that uses
5245 // TLI.getShiftAmount().
5246 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
5247 "Invalid use of small shift amount with oversized value!");
5248
5249 // Always fold shifts of i1 values so the code generator doesn't need to
5250 // handle them. Since we know the size of the shift has to be less than the
5251 // size of the value, the shift/rotate count is guaranteed to be zero.
5252 if (VT == MVT::i1)
5253 return N1;
5254 if (N2C && N2C->isNullValue())
5255 return N1;
5256 break;
5257 case ISD::FP_ROUND:
5258 assert(VT.isFloatingPoint() &&
5259 N1.getValueType().isFloatingPoint() &&
5260 VT.bitsLE(N1.getValueType()) &&
5261 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
5262 "Invalid FP_ROUND!");
5263 if (N1.getValueType() == VT) return N1; // noop conversion.
5264 break;
5265 case ISD::AssertSext:
5266 case ISD::AssertZext: {
5267 EVT EVT = cast<VTSDNode>(N2)->getVT();
5268 assert(VT == N1.getValueType() && "Not an inreg extend!");
5269 assert(VT.isInteger() && EVT.isInteger() &&
5270 "Cannot *_EXTEND_INREG FP types");
5271 assert(!EVT.isVector() &&
5272 "AssertSExt/AssertZExt type should be the vector element type "
5273 "rather than the vector type!");
5274 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
5275 if (VT.getScalarType() == EVT) return N1; // noop assertion.
5276 break;
5277 }
5278 case ISD::SIGN_EXTEND_INREG: {
5279 EVT EVT = cast<VTSDNode>(N2)->getVT();
5280 assert(VT == N1.getValueType() && "Not an inreg extend!");
5281 assert(VT.isInteger() && EVT.isInteger() &&
5282 "Cannot *_EXTEND_INREG FP types");
5283 assert(EVT.isVector() == VT.isVector() &&
5284 "SIGN_EXTEND_INREG type should be vector iff the operand "
5285 "type is vector!");
5286 assert((!EVT.isVector() ||
5287 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
5288 "Vector element counts must match in SIGN_EXTEND_INREG");
5289 assert(EVT.bitsLE(VT) && "Not extending!");
5290 if (EVT == VT) return N1; // Not actually extending
5291
5292 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5293 unsigned FromBits = EVT.getScalarSizeInBits();
5294 Val <<= Val.getBitWidth() - FromBits;
5295 Val.ashrInPlace(Val.getBitWidth() - FromBits);
5296 return getConstant(Val, DL, ConstantVT);
5297 };
5298
5299 if (N1C) {
5300 const APInt &Val = N1C->getAPIntValue();
5301 return SignExtendInReg(Val, VT);
5302 }
5303 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5304 SmallVector<SDValue, 8> Ops;
5305 llvm::EVT OpVT = N1.getOperand(0).getValueType();
5306 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5307 SDValue Op = N1.getOperand(i);
5308 if (Op.isUndef()) {
5309 Ops.push_back(getUNDEF(OpVT));
5310 continue;
5311 }
5312 ConstantSDNode *C = cast<ConstantSDNode>(Op);
5313 APInt Val = C->getAPIntValue();
5314 Ops.push_back(SignExtendInReg(Val, OpVT));
5315 }
5316 return getBuildVector(VT, DL, Ops);
5317 }
5318 break;
5319 }
5320 case ISD::EXTRACT_VECTOR_ELT:
5321 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
5322 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5323 element type of the vector.");
5324
5325 // Extract from an undefined value or using an undefined index is undefined.
5326 if (N1.isUndef() || N2.isUndef())
5327 return getUNDEF(VT);
5328
5329 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
5330 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5331 return getUNDEF(VT);
5332
5333 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5334 // expanding copies of large vectors from registers.
5335 if (N2C &&
5336 N1.getOpcode() == ISD::CONCAT_VECTORS &&
5337 N1.getNumOperands() > 0) {
5338 unsigned Factor =
5339 N1.getOperand(0).getValueType().getVectorNumElements();
5340 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5341 N1.getOperand(N2C->getZExtValue() / Factor),
5342 getConstant(N2C->getZExtValue() % Factor, DL,
5343 N2.getValueType()));
5344 }
5345
5346 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
5347 // expanding large vector constants.
5348 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
5349 SDValue Elt = N1.getOperand(N2C->getZExtValue());
5350
5351 if (VT != Elt.getValueType())
5352 // If the vector element type is not legal, the BUILD_VECTOR operands
5353 // are promoted and implicitly truncated, and the result implicitly
5354 // extended. Make that explicit here.
5355 Elt = getAnyExtOrTrunc(Elt, DL, VT);
5356
5357 return Elt;
5358 }
5359
5360 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5361 // operations are lowered to scalars.
5362 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5363 // If the indices are the same, return the inserted element else
5364 // if the indices are known different, extract the element from
5365 // the original vector.
5366 SDValue N1Op2 = N1.getOperand(2);
5367 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5368
5369 if (N1Op2C && N2C) {
5370 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5371 if (VT == N1.getOperand(1).getValueType())
5372 return N1.getOperand(1);
5373 else
5374 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5375 }
5376
5377 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5378 }
5379 }
5380
5381 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5382 // when vector types are scalarized and v1iX is legal.
5383 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx)
5384 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5385 N1.getValueType().getVectorNumElements() == 1) {
5386 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5387 N1.getOperand(1));
5388 }
5389 break;
5390 case ISD::EXTRACT_ELEMENT:
5391 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5392 assert(!N1.getValueType().isVector() && !VT.isVector() &&
5393 (N1.getValueType().isInteger() == VT.isInteger()) &&
5394 N1.getValueType() != VT &&
5395 "Wrong types for EXTRACT_ELEMENT!");
5396
5397 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5398 // 64-bit integers into 32-bit parts. Instead of building the extract of
5399 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5400 if (N1.getOpcode() == ISD::BUILD_PAIR)
5401 return N1.getOperand(N2C->getZExtValue());
5402
5403 // EXTRACT_ELEMENT of a constant int is also very common.
5404 if (N1C) {
5405 unsigned ElementSize = VT.getSizeInBits();
5406 unsigned Shift = ElementSize * N2C->getZExtValue();
5407 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
5408 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
5409 }
5410 break;
5411 case ISD::EXTRACT_SUBVECTOR:
5412 if (VT.isSimple() && N1.getValueType().isSimple()) {
5413 assert(VT.isVector() && N1.getValueType().isVector() &&
5414 "Extract subvector VTs must be a vectors!");
5415 assert(VT.getVectorElementType() ==
5416 N1.getValueType().getVectorElementType() &&
5417 "Extract subvector VTs must have the same element type!");
5418 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
5419 "Extract subvector must be from larger vector to smaller vector!");
5420
5421 if (N2C) {
5422 assert((VT.getVectorNumElements() + N2C->getZExtValue()
5423 <= N1.getValueType().getVectorNumElements())
5424 && "Extract subvector overflow!");
5425 }
5426
5427 // Trivial extraction.
5428 if (VT.getSimpleVT() == N1.getSimpleValueType())
5429 return N1;
5430
5431 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5432 if (N1.isUndef())
5433 return getUNDEF(VT);
5434
5435 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5436 // the concat have the same type as the extract.
5437 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
5438 N1.getNumOperands() > 0 &&
5439 VT == N1.getOperand(0).getValueType()) {
5440 unsigned Factor = VT.getVectorNumElements();
5441 return N1.getOperand(N2C->getZExtValue() / Factor);
5442 }
5443
5444 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5445 // during shuffle legalization.
5446 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5447 VT == N1.getOperand(1).getValueType())
5448 return N1.getOperand(1);
5449 }
5450 break;
5451 }
5452
5453 // Perform trivial constant folding.
5454 if (SDValue SV =
5455 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
5456 return SV;
5457
5458 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2))
5459 return V;
5460
5461 // Canonicalize an UNDEF to the RHS, even over a constant.
5462 if (N1.isUndef()) {
5463 if (TLI->isCommutativeBinOp(Opcode)) {
5464 std::swap(N1, N2);
5465 } else {
5466 switch (Opcode) {
5467 case ISD::SIGN_EXTEND_INREG:
5468 case ISD::SUB:
5469 return getUNDEF(VT); // fold op(undef, arg2) -> undef
5470 case ISD::UDIV:
5471 case ISD::SDIV:
5472 case ISD::UREM:
5473 case ISD::SREM:
5474 case ISD::SSUBSAT:
5475 case ISD::USUBSAT:
5476 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
5477 }
5478 }
5479 }
5480
5481 // Fold a bunch of operators when the RHS is undef.
5482 if (N2.isUndef()) {
5483 switch (Opcode) {
5484 case ISD::XOR:
5485 if (N1.isUndef())
5486 // Handle undef ^ undef -> 0 special case. This is a common
5487 // idiom (misuse).
5488 return getConstant(0, DL, VT);
5489 LLVM_FALLTHROUGH;
5490 case ISD::ADD:
5491 case ISD::SUB:
5492 case ISD::UDIV:
5493 case ISD::SDIV:
5494 case ISD::UREM:
5495 case ISD::SREM:
5496 return getUNDEF(VT); // fold op(arg1, undef) -> undef
5497 case ISD::MUL:
5498 case ISD::AND:
5499 case ISD::SSUBSAT:
5500 case ISD::USUBSAT:
5501 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
5502 case ISD::OR:
5503 case ISD::SADDSAT:
5504 case ISD::UADDSAT:
5505 return getAllOnesConstant(DL, VT);
5506 }
5507 }
5508
5509 // Memoize this node if possible.
5510 SDNode *N;
5511 SDVTList VTs = getVTList(VT);
5512 SDValue Ops[] = {N1, N2};
5513 if (VT != MVT::Glue) {
5514 FoldingSetNodeID ID;
5515 AddNodeIDNode(ID, Opcode, VTs, Ops);
5516 void *IP = nullptr;
5517 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5518 E->intersectFlagsWith(Flags);
5519 return SDValue(E, 0);
5520 }
5521
5522 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5523 N->setFlags(Flags);
5524 createOperands(N, Ops);
5525 CSEMap.InsertNode(N, IP);
5526 } else {
5527 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5528 createOperands(N, Ops);
5529 }
5530
5531 InsertNode(N);
5532 SDValue V = SDValue(N, 0);
5533 NewSDValueDbgMsg(V, "Creating new node: ", this);
5534 return V;
5535 }
5536
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,const SDNodeFlags Flags)5537 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5538 SDValue N1, SDValue N2, SDValue N3,
5539 const SDNodeFlags Flags) {
5540 // Perform various simplifications.
5541 switch (Opcode) {
5542 case ISD::FMA: {
5543 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5544 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
5545 N3.getValueType() == VT && "FMA types must match!");
5546 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5547 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5548 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
5549 if (N1CFP && N2CFP && N3CFP) {
5550 APFloat V1 = N1CFP->getValueAPF();
5551 const APFloat &V2 = N2CFP->getValueAPF();
5552 const APFloat &V3 = N3CFP->getValueAPF();
5553 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
5554 return getConstantFP(V1, DL, VT);
5555 }
5556 break;
5557 }
5558 case ISD::BUILD_VECTOR: {
5559 // Attempt to simplify BUILD_VECTOR.
5560 SDValue Ops[] = {N1, N2, N3};
5561 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5562 return V;
5563 break;
5564 }
5565 case ISD::CONCAT_VECTORS: {
5566 SDValue Ops[] = {N1, N2, N3};
5567 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5568 return V;
5569 break;
5570 }
5571 case ISD::SETCC: {
5572 assert(VT.isInteger() && "SETCC result type must be an integer!");
5573 assert(N1.getValueType() == N2.getValueType() &&
5574 "SETCC operands must have the same type!");
5575 assert(VT.isVector() == N1.getValueType().isVector() &&
5576 "SETCC type should be vector iff the operand type is vector!");
5577 assert((!VT.isVector() ||
5578 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) &&
5579 "SETCC vector element counts must match!");
5580 // Use FoldSetCC to simplify SETCC's.
5581 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
5582 return V;
5583 // Vector constant folding.
5584 SDValue Ops[] = {N1, N2, N3};
5585 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
5586 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
5587 return V;
5588 }
5589 break;
5590 }
5591 case ISD::SELECT:
5592 case ISD::VSELECT:
5593 if (SDValue V = simplifySelect(N1, N2, N3))
5594 return V;
5595 break;
5596 case ISD::VECTOR_SHUFFLE:
5597 llvm_unreachable("should use getVectorShuffle constructor!");
5598 case ISD::INSERT_VECTOR_ELT: {
5599 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
5600 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
5601 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
5602 return getUNDEF(VT);
5603
5604 // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
5605 if (N3.isUndef())
5606 return getUNDEF(VT);
5607
5608 // If the inserted element is an UNDEF, just use the input vector.
5609 if (N2.isUndef())
5610 return N1;
5611
5612 break;
5613 }
5614 case ISD::INSERT_SUBVECTOR: {
5615 // Inserting undef into undef is still undef.
5616 if (N1.isUndef() && N2.isUndef())
5617 return getUNDEF(VT);
5618 SDValue Index = N3;
5619 if (VT.isSimple() && N1.getValueType().isSimple()
5620 && N2.getValueType().isSimple()) {
5621 assert(VT.isVector() && N1.getValueType().isVector() &&
5622 N2.getValueType().isVector() &&
5623 "Insert subvector VTs must be a vectors");
5624 assert(VT == N1.getValueType() &&
5625 "Dest and insert subvector source types must match!");
5626 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
5627 "Insert subvector must be from smaller vector to larger vector!");
5628 if (isa<ConstantSDNode>(Index)) {
5629 assert((N2.getValueType().getVectorNumElements() +
5630 cast<ConstantSDNode>(Index)->getZExtValue()
5631 <= VT.getVectorNumElements())
5632 && "Insert subvector overflow!");
5633 }
5634
5635 // Trivial insertion.
5636 if (VT.getSimpleVT() == N2.getSimpleValueType())
5637 return N2;
5638
5639 // If this is an insert of an extracted vector into an undef vector, we
5640 // can just use the input to the extract.
5641 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5642 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
5643 return N2.getOperand(0);
5644 }
5645 break;
5646 }
5647 case ISD::BITCAST:
5648 // Fold bit_convert nodes from a type to themselves.
5649 if (N1.getValueType() == VT)
5650 return N1;
5651 break;
5652 }
5653
5654 // Memoize node if it doesn't produce a flag.
5655 SDNode *N;
5656 SDVTList VTs = getVTList(VT);
5657 SDValue Ops[] = {N1, N2, N3};
5658 if (VT != MVT::Glue) {
5659 FoldingSetNodeID ID;
5660 AddNodeIDNode(ID, Opcode, VTs, Ops);
5661 void *IP = nullptr;
5662 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5663 E->intersectFlagsWith(Flags);
5664 return SDValue(E, 0);
5665 }
5666
5667 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5668 N->setFlags(Flags);
5669 createOperands(N, Ops);
5670 CSEMap.InsertNode(N, IP);
5671 } else {
5672 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5673 createOperands(N, Ops);
5674 }
5675
5676 InsertNode(N);
5677 SDValue V = SDValue(N, 0);
5678 NewSDValueDbgMsg(V, "Creating new node: ", this);
5679 return V;
5680 }
5681
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)5682 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5683 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5684 SDValue Ops[] = { N1, N2, N3, N4 };
5685 return getNode(Opcode, DL, VT, Ops);
5686 }
5687
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)5688 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5689 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5690 SDValue N5) {
5691 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5692 return getNode(Opcode, DL, VT, Ops);
5693 }
5694
5695 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
5696 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)5697 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
5698 SmallVector<SDValue, 8> ArgChains;
5699
5700 // Include the original chain at the beginning of the list. When this is
5701 // used by target LowerCall hooks, this helps legalize find the
5702 // CALLSEQ_BEGIN node.
5703 ArgChains.push_back(Chain);
5704
5705 // Add a chain value for each stack argument.
5706 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
5707 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
5708 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
5709 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
5710 if (FI->getIndex() < 0)
5711 ArgChains.push_back(SDValue(L, 1));
5712
5713 // Build a tokenfactor for all the chains.
5714 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
5715 }
5716
5717 /// getMemsetValue - Vectorized representation of the memset value
5718 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,const SDLoc & dl)5719 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
5720 const SDLoc &dl) {
5721 assert(!Value.isUndef());
5722
5723 unsigned NumBits = VT.getScalarSizeInBits();
5724 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
5725 assert(C->getAPIntValue().getBitWidth() == 8);
5726 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
5727 if (VT.isInteger()) {
5728 bool IsOpaque = VT.getSizeInBits() > 64 ||
5729 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
5730 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
5731 }
5732 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
5733 VT);
5734 }
5735
5736 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
5737 EVT IntVT = VT.getScalarType();
5738 if (!IntVT.isInteger())
5739 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
5740
5741 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
5742 if (NumBits > 8) {
5743 // Use a multiplication with 0x010101... to extend the input to the
5744 // required length.
5745 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
5746 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
5747 DAG.getConstant(Magic, dl, IntVT));
5748 }
5749
5750 if (VT != Value.getValueType() && !VT.isInteger())
5751 Value = DAG.getBitcast(VT.getScalarType(), Value);
5752 if (VT != Value.getValueType())
5753 Value = DAG.getSplatBuildVector(VT, dl, Value);
5754
5755 return Value;
5756 }
5757
5758 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
5759 /// used when a memcpy is turned into a memset when the source is a constant
5760 /// string ptr.
getMemsetStringVal(EVT VT,const SDLoc & dl,SelectionDAG & DAG,const TargetLowering & TLI,const ConstantDataArraySlice & Slice)5761 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
5762 const TargetLowering &TLI,
5763 const ConstantDataArraySlice &Slice) {
5764 // Handle vector with all elements zero.
5765 if (Slice.Array == nullptr) {
5766 if (VT.isInteger())
5767 return DAG.getConstant(0, dl, VT);
5768 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
5769 return DAG.getConstantFP(0.0, dl, VT);
5770 else if (VT.isVector()) {
5771 unsigned NumElts = VT.getVectorNumElements();
5772 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
5773 return DAG.getNode(ISD::BITCAST, dl, VT,
5774 DAG.getConstant(0, dl,
5775 EVT::getVectorVT(*DAG.getContext(),
5776 EltVT, NumElts)));
5777 } else
5778 llvm_unreachable("Expected type!");
5779 }
5780
5781 assert(!VT.isVector() && "Can't handle vector type here!");
5782 unsigned NumVTBits = VT.getSizeInBits();
5783 unsigned NumVTBytes = NumVTBits / 8;
5784 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
5785
5786 APInt Val(NumVTBits, 0);
5787 if (DAG.getDataLayout().isLittleEndian()) {
5788 for (unsigned i = 0; i != NumBytes; ++i)
5789 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
5790 } else {
5791 for (unsigned i = 0; i != NumBytes; ++i)
5792 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
5793 }
5794
5795 // If the "cost" of materializing the integer immediate is less than the cost
5796 // of a load, then it is cost effective to turn the load into the immediate.
5797 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
5798 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
5799 return DAG.getConstant(Val, dl, VT);
5800 return SDValue(nullptr, 0);
5801 }
5802
getMemBasePlusOffset(SDValue Base,int64_t Offset,const SDLoc & DL,const SDNodeFlags Flags)5803 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, int64_t Offset,
5804 const SDLoc &DL,
5805 const SDNodeFlags Flags) {
5806 EVT VT = Base.getValueType();
5807 return getMemBasePlusOffset(Base, getConstant(Offset, DL, VT), DL, Flags);
5808 }
5809
getMemBasePlusOffset(SDValue Ptr,SDValue Offset,const SDLoc & DL,const SDNodeFlags Flags)5810 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
5811 const SDLoc &DL,
5812 const SDNodeFlags Flags) {
5813 assert(Offset.getValueType().isInteger());
5814 EVT BasePtrVT = Ptr.getValueType();
5815 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags);
5816 }
5817
5818 /// Returns true if memcpy source is constant data.
isMemSrcFromConstant(SDValue Src,ConstantDataArraySlice & Slice)5819 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
5820 uint64_t SrcDelta = 0;
5821 GlobalAddressSDNode *G = nullptr;
5822 if (Src.getOpcode() == ISD::GlobalAddress)
5823 G = cast<GlobalAddressSDNode>(Src);
5824 else if (Src.getOpcode() == ISD::ADD &&
5825 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
5826 Src.getOperand(1).getOpcode() == ISD::Constant) {
5827 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
5828 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
5829 }
5830 if (!G)
5831 return false;
5832
5833 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
5834 SrcDelta + G->getOffset());
5835 }
5836
shouldLowerMemFuncForSize(const MachineFunction & MF,SelectionDAG & DAG)5837 static bool shouldLowerMemFuncForSize(const MachineFunction &MF,
5838 SelectionDAG &DAG) {
5839 // On Darwin, -Os means optimize for size without hurting performance, so
5840 // only really optimize for size when -Oz (MinSize) is used.
5841 if (MF.getTarget().getTargetTriple().isOSDarwin())
5842 return MF.getFunction().hasMinSize();
5843 return DAG.shouldOptForSize();
5844 }
5845
chainLoadsAndStoresForMemcpy(SelectionDAG & DAG,const SDLoc & dl,SmallVector<SDValue,32> & OutChains,unsigned From,unsigned To,SmallVector<SDValue,16> & OutLoadChains,SmallVector<SDValue,16> & OutStoreChains)5846 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
5847 SmallVector<SDValue, 32> &OutChains, unsigned From,
5848 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
5849 SmallVector<SDValue, 16> &OutStoreChains) {
5850 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
5851 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
5852 SmallVector<SDValue, 16> GluedLoadChains;
5853 for (unsigned i = From; i < To; ++i) {
5854 OutChains.push_back(OutLoadChains[i]);
5855 GluedLoadChains.push_back(OutLoadChains[i]);
5856 }
5857
5858 // Chain for all loads.
5859 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
5860 GluedLoadChains);
5861
5862 for (unsigned i = From; i < To; ++i) {
5863 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
5864 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
5865 ST->getBasePtr(), ST->getMemoryVT(),
5866 ST->getMemOperand());
5867 OutChains.push_back(NewStore);
5868 }
5869 }
5870
getMemcpyLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)5871 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5872 SDValue Chain, SDValue Dst, SDValue Src,
5873 uint64_t Size, unsigned Alignment,
5874 bool isVol, bool AlwaysInline,
5875 MachinePointerInfo DstPtrInfo,
5876 MachinePointerInfo SrcPtrInfo) {
5877 // Turn a memcpy of undef to nop.
5878 // FIXME: We need to honor volatile even is Src is undef.
5879 if (Src.isUndef())
5880 return Chain;
5881
5882 // Expand memcpy to a series of load and store ops if the size operand falls
5883 // below a certain threshold.
5884 // TODO: In the AlwaysInline case, if the size is big then generate a loop
5885 // rather than maybe a humongous number of loads and stores.
5886 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5887 const DataLayout &DL = DAG.getDataLayout();
5888 LLVMContext &C = *DAG.getContext();
5889 std::vector<EVT> MemOps;
5890 bool DstAlignCanChange = false;
5891 MachineFunction &MF = DAG.getMachineFunction();
5892 MachineFrameInfo &MFI = MF.getFrameInfo();
5893 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
5894 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5895 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5896 DstAlignCanChange = true;
5897 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5898 if (Alignment > SrcAlign)
5899 SrcAlign = Alignment;
5900 ConstantDataArraySlice Slice;
5901 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
5902 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
5903 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
5904
5905 if (!TLI.findOptimalMemOpLowering(
5906 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment),
5907 (isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false,
5908 /*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant,
5909 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(),
5910 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
5911 return SDValue();
5912
5913 if (DstAlignCanChange) {
5914 Type *Ty = MemOps[0].getTypeForEVT(C);
5915 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5916
5917 // Don't promote to an alignment that would require dynamic stack
5918 // realignment.
5919 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
5920 if (!TRI->needsStackRealignment(MF))
5921 while (NewAlign > Alignment &&
5922 DL.exceedsNaturalStackAlignment(Align(NewAlign)))
5923 NewAlign /= 2;
5924
5925 if (NewAlign > Alignment) {
5926 // Give the stack frame object a larger alignment if needed.
5927 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5928 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5929 Alignment = NewAlign;
5930 }
5931 }
5932
5933 MachineMemOperand::Flags MMOFlags =
5934 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5935 SmallVector<SDValue, 16> OutLoadChains;
5936 SmallVector<SDValue, 16> OutStoreChains;
5937 SmallVector<SDValue, 32> OutChains;
5938 unsigned NumMemOps = MemOps.size();
5939 uint64_t SrcOff = 0, DstOff = 0;
5940 for (unsigned i = 0; i != NumMemOps; ++i) {
5941 EVT VT = MemOps[i];
5942 unsigned VTSize = VT.getSizeInBits() / 8;
5943 SDValue Value, Store;
5944
5945 if (VTSize > Size) {
5946 // Issuing an unaligned load / store pair that overlaps with the previous
5947 // pair. Adjust the offset accordingly.
5948 assert(i == NumMemOps-1 && i != 0);
5949 SrcOff -= VTSize - Size;
5950 DstOff -= VTSize - Size;
5951 }
5952
5953 if (CopyFromConstant &&
5954 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
5955 // It's unlikely a store of a vector immediate can be done in a single
5956 // instruction. It would require a load from a constantpool first.
5957 // We only handle zero vectors here.
5958 // FIXME: Handle other cases where store of vector immediate is done in
5959 // a single instruction.
5960 ConstantDataArraySlice SubSlice;
5961 if (SrcOff < Slice.Length) {
5962 SubSlice = Slice;
5963 SubSlice.move(SrcOff);
5964 } else {
5965 // This is an out-of-bounds access and hence UB. Pretend we read zero.
5966 SubSlice.Array = nullptr;
5967 SubSlice.Offset = 0;
5968 SubSlice.Length = VTSize;
5969 }
5970 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
5971 if (Value.getNode()) {
5972 Store = DAG.getStore(
5973 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5974 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
5975 OutChains.push_back(Store);
5976 }
5977 }
5978
5979 if (!Store.getNode()) {
5980 // The type might not be legal for the target. This should only happen
5981 // if the type is smaller than a legal type, as on PPC, so the right
5982 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
5983 // to Load/Store if NVT==VT.
5984 // FIXME does the case above also need this?
5985 EVT NVT = TLI.getTypeToTransformTo(C, VT);
5986 assert(NVT.bitsGE(VT));
5987
5988 bool isDereferenceable =
5989 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5990 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5991 if (isDereferenceable)
5992 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5993
5994 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
5995 DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5996 SrcPtrInfo.getWithOffset(SrcOff), VT,
5997 MinAlign(SrcAlign, SrcOff), SrcMMOFlags);
5998 OutLoadChains.push_back(Value.getValue(1));
5999
6000 Store = DAG.getTruncStore(
6001 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
6002 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
6003 OutStoreChains.push_back(Store);
6004 }
6005 SrcOff += VTSize;
6006 DstOff += VTSize;
6007 Size -= VTSize;
6008 }
6009
6010 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
6011 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
6012 unsigned NumLdStInMemcpy = OutStoreChains.size();
6013
6014 if (NumLdStInMemcpy) {
6015 // It may be that memcpy might be converted to memset if it's memcpy
6016 // of constants. In such a case, we won't have loads and stores, but
6017 // just stores. In the absence of loads, there is nothing to gang up.
6018 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
6019 // If target does not care, just leave as it.
6020 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
6021 OutChains.push_back(OutLoadChains[i]);
6022 OutChains.push_back(OutStoreChains[i]);
6023 }
6024 } else {
6025 // Ld/St less than/equal limit set by target.
6026 if (NumLdStInMemcpy <= GluedLdStLimit) {
6027 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6028 NumLdStInMemcpy, OutLoadChains,
6029 OutStoreChains);
6030 } else {
6031 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
6032 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
6033 unsigned GlueIter = 0;
6034
6035 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
6036 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
6037 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
6038
6039 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
6040 OutLoadChains, OutStoreChains);
6041 GlueIter += GluedLdStLimit;
6042 }
6043
6044 // Residual ld/st.
6045 if (RemainingLdStInMemcpy) {
6046 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6047 RemainingLdStInMemcpy, OutLoadChains,
6048 OutStoreChains);
6049 }
6050 }
6051 }
6052 }
6053 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6054 }
6055
getMemmoveLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6056 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6057 SDValue Chain, SDValue Dst, SDValue Src,
6058 uint64_t Size, unsigned Align,
6059 bool isVol, bool AlwaysInline,
6060 MachinePointerInfo DstPtrInfo,
6061 MachinePointerInfo SrcPtrInfo) {
6062 // Turn a memmove of undef to nop.
6063 // FIXME: We need to honor volatile even is Src is undef.
6064 if (Src.isUndef())
6065 return Chain;
6066
6067 // Expand memmove to a series of load and store ops if the size operand falls
6068 // below a certain threshold.
6069 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6070 const DataLayout &DL = DAG.getDataLayout();
6071 LLVMContext &C = *DAG.getContext();
6072 std::vector<EVT> MemOps;
6073 bool DstAlignCanChange = false;
6074 MachineFunction &MF = DAG.getMachineFunction();
6075 MachineFrameInfo &MFI = MF.getFrameInfo();
6076 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6077 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6078 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6079 DstAlignCanChange = true;
6080 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
6081 if (Align > SrcAlign)
6082 SrcAlign = Align;
6083 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
6084 // FIXME: `AllowOverlap` should really be `!isVol` but there is a bug in
6085 // findOptimalMemOpLowering. Meanwhile, setting it to `false` produces the
6086 // correct code.
6087 bool AllowOverlap = false;
6088 if (!TLI.findOptimalMemOpLowering(
6089 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align), SrcAlign,
6090 /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
6091 AllowOverlap, DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
6092 MF.getFunction().getAttributes()))
6093 return SDValue();
6094
6095 if (DstAlignCanChange) {
6096 Type *Ty = MemOps[0].getTypeForEVT(C);
6097 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
6098 if (NewAlign > Align) {
6099 // Give the stack frame object a larger alignment if needed.
6100 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
6101 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6102 Align = NewAlign;
6103 }
6104 }
6105
6106 MachineMemOperand::Flags MMOFlags =
6107 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6108 uint64_t SrcOff = 0, DstOff = 0;
6109 SmallVector<SDValue, 8> LoadValues;
6110 SmallVector<SDValue, 8> LoadChains;
6111 SmallVector<SDValue, 8> OutChains;
6112 unsigned NumMemOps = MemOps.size();
6113 for (unsigned i = 0; i < NumMemOps; i++) {
6114 EVT VT = MemOps[i];
6115 unsigned VTSize = VT.getSizeInBits() / 8;
6116 SDValue Value;
6117
6118 bool isDereferenceable =
6119 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6120 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6121 if (isDereferenceable)
6122 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6123
6124 Value =
6125 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
6126 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags);
6127 LoadValues.push_back(Value);
6128 LoadChains.push_back(Value.getValue(1));
6129 SrcOff += VTSize;
6130 }
6131 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
6132 OutChains.clear();
6133 for (unsigned i = 0; i < NumMemOps; i++) {
6134 EVT VT = MemOps[i];
6135 unsigned VTSize = VT.getSizeInBits() / 8;
6136 SDValue Store;
6137
6138 Store = DAG.getStore(Chain, dl, LoadValues[i],
6139 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
6140 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
6141 OutChains.push_back(Store);
6142 DstOff += VTSize;
6143 }
6144
6145 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6146 }
6147
6148 /// Lower the call to 'memset' intrinsic function into a series of store
6149 /// operations.
6150 ///
6151 /// \param DAG Selection DAG where lowered code is placed.
6152 /// \param dl Link to corresponding IR location.
6153 /// \param Chain Control flow dependency.
6154 /// \param Dst Pointer to destination memory location.
6155 /// \param Src Value of byte to write into the memory.
6156 /// \param Size Number of bytes to write.
6157 /// \param Align Alignment of the destination in bytes.
6158 /// \param isVol True if destination is volatile.
6159 /// \param DstPtrInfo IR information on the memory pointer.
6160 /// \returns New head in the control flow, if lowering was successful, empty
6161 /// SDValue otherwise.
6162 ///
6163 /// The function tries to replace 'llvm.memset' intrinsic with several store
6164 /// operations and value calculation code. This is usually profitable for small
6165 /// memory size.
getMemsetStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,MachinePointerInfo DstPtrInfo)6166 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
6167 SDValue Chain, SDValue Dst, SDValue Src,
6168 uint64_t Size, unsigned Align, bool isVol,
6169 MachinePointerInfo DstPtrInfo) {
6170 // Turn a memset of undef to nop.
6171 // FIXME: We need to honor volatile even is Src is undef.
6172 if (Src.isUndef())
6173 return Chain;
6174
6175 // Expand memset to a series of load/store ops if the size operand
6176 // falls below a certain threshold.
6177 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6178 std::vector<EVT> MemOps;
6179 bool DstAlignCanChange = false;
6180 MachineFunction &MF = DAG.getMachineFunction();
6181 MachineFrameInfo &MFI = MF.getFrameInfo();
6182 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6183 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6184 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6185 DstAlignCanChange = true;
6186 bool IsZeroVal =
6187 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
6188 if (!TLI.findOptimalMemOpLowering(
6189 MemOps, TLI.getMaxStoresPerMemset(OptSize), Size,
6190 (DstAlignCanChange ? 0 : Align), 0, /*IsMemset=*/true,
6191 /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false,
6192 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), ~0u,
6193 MF.getFunction().getAttributes()))
6194 return SDValue();
6195
6196 if (DstAlignCanChange) {
6197 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
6198 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
6199 if (NewAlign > Align) {
6200 // Give the stack frame object a larger alignment if needed.
6201 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
6202 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6203 Align = NewAlign;
6204 }
6205 }
6206
6207 SmallVector<SDValue, 8> OutChains;
6208 uint64_t DstOff = 0;
6209 unsigned NumMemOps = MemOps.size();
6210
6211 // Find the largest store and generate the bit pattern for it.
6212 EVT LargestVT = MemOps[0];
6213 for (unsigned i = 1; i < NumMemOps; i++)
6214 if (MemOps[i].bitsGT(LargestVT))
6215 LargestVT = MemOps[i];
6216 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6217
6218 for (unsigned i = 0; i < NumMemOps; i++) {
6219 EVT VT = MemOps[i];
6220 unsigned VTSize = VT.getSizeInBits() / 8;
6221 if (VTSize > Size) {
6222 // Issuing an unaligned load / store pair that overlaps with the previous
6223 // pair. Adjust the offset accordingly.
6224 assert(i == NumMemOps-1 && i != 0);
6225 DstOff -= VTSize - Size;
6226 }
6227
6228 // If this store is smaller than the largest store see whether we can get
6229 // the smaller value for free with a truncate.
6230 SDValue Value = MemSetValue;
6231 if (VT.bitsLT(LargestVT)) {
6232 if (!LargestVT.isVector() && !VT.isVector() &&
6233 TLI.isTruncateFree(LargestVT, VT))
6234 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6235 else
6236 Value = getMemsetValue(Src, VT, DAG, dl);
6237 }
6238 assert(Value.getValueType() == VT && "Value with wrong type.");
6239 SDValue Store = DAG.getStore(
6240 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
6241 DstPtrInfo.getWithOffset(DstOff), Align,
6242 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
6243 OutChains.push_back(Store);
6244 DstOff += VT.getSizeInBits() / 8;
6245 Size -= VTSize;
6246 }
6247
6248 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6249 }
6250
checkAddrSpaceIsValidForLibcall(const TargetLowering * TLI,unsigned AS)6251 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6252 unsigned AS) {
6253 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6254 // pointer operands can be losslessly bitcasted to pointers of address space 0
6255 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
6256 report_fatal_error("cannot lower memory intrinsic in address space " +
6257 Twine(AS));
6258 }
6259 }
6260
getMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool AlwaysInline,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6261 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6262 SDValue Src, SDValue Size, unsigned Align,
6263 bool isVol, bool AlwaysInline, bool isTailCall,
6264 MachinePointerInfo DstPtrInfo,
6265 MachinePointerInfo SrcPtrInfo) {
6266 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6267
6268 // Check to see if we should lower the memcpy to loads and stores first.
6269 // For cases within the target-specified limits, this is the best choice.
6270 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6271 if (ConstantSize) {
6272 // Memcpy with size zero? Just return the original chain.
6273 if (ConstantSize->isNullValue())
6274 return Chain;
6275
6276 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6277 ConstantSize->getZExtValue(),Align,
6278 isVol, false, DstPtrInfo, SrcPtrInfo);
6279 if (Result.getNode())
6280 return Result;
6281 }
6282
6283 // Then check to see if we should lower the memcpy with target-specific
6284 // code. If the target chooses to do this, this is the next best.
6285 if (TSI) {
6286 SDValue Result = TSI->EmitTargetCodeForMemcpy(
6287 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
6288 DstPtrInfo, SrcPtrInfo);
6289 if (Result.getNode())
6290 return Result;
6291 }
6292
6293 // If we really need inline code and the target declined to provide it,
6294 // use a (potentially long) sequence of loads and stores.
6295 if (AlwaysInline) {
6296 assert(ConstantSize && "AlwaysInline requires a constant size!");
6297 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6298 ConstantSize->getZExtValue(), Align, isVol,
6299 true, DstPtrInfo, SrcPtrInfo);
6300 }
6301
6302 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6303 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6304
6305 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6306 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6307 // respect volatile, so they may do things like read or write memory
6308 // beyond the given memory regions. But fixing this isn't easy, and most
6309 // people don't care.
6310
6311 // Emit a library call.
6312 TargetLowering::ArgListTy Args;
6313 TargetLowering::ArgListEntry Entry;
6314 Entry.Ty = Type::getInt8PtrTy(*getContext());
6315 Entry.Node = Dst; Args.push_back(Entry);
6316 Entry.Node = Src; Args.push_back(Entry);
6317
6318 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6319 Entry.Node = Size; Args.push_back(Entry);
6320 // FIXME: pass in SDLoc
6321 TargetLowering::CallLoweringInfo CLI(*this);
6322 CLI.setDebugLoc(dl)
6323 .setChain(Chain)
6324 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
6325 Dst.getValueType().getTypeForEVT(*getContext()),
6326 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
6327 TLI->getPointerTy(getDataLayout())),
6328 std::move(Args))
6329 .setDiscardResult()
6330 .setTailCall(isTailCall);
6331
6332 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6333 return CallResult.second;
6334 }
6335
getAtomicMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6336 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
6337 SDValue Dst, unsigned DstAlign,
6338 SDValue Src, unsigned SrcAlign,
6339 SDValue Size, Type *SizeTy,
6340 unsigned ElemSz, bool isTailCall,
6341 MachinePointerInfo DstPtrInfo,
6342 MachinePointerInfo SrcPtrInfo) {
6343 // Emit a library call.
6344 TargetLowering::ArgListTy Args;
6345 TargetLowering::ArgListEntry Entry;
6346 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6347 Entry.Node = Dst;
6348 Args.push_back(Entry);
6349
6350 Entry.Node = Src;
6351 Args.push_back(Entry);
6352
6353 Entry.Ty = SizeTy;
6354 Entry.Node = Size;
6355 Args.push_back(Entry);
6356
6357 RTLIB::Libcall LibraryCall =
6358 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6359 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6360 report_fatal_error("Unsupported element size");
6361
6362 TargetLowering::CallLoweringInfo CLI(*this);
6363 CLI.setDebugLoc(dl)
6364 .setChain(Chain)
6365 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6366 Type::getVoidTy(*getContext()),
6367 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6368 TLI->getPointerTy(getDataLayout())),
6369 std::move(Args))
6370 .setDiscardResult()
6371 .setTailCall(isTailCall);
6372
6373 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6374 return CallResult.second;
6375 }
6376
getMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6377 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
6378 SDValue Src, SDValue Size, unsigned Align,
6379 bool isVol, bool isTailCall,
6380 MachinePointerInfo DstPtrInfo,
6381 MachinePointerInfo SrcPtrInfo) {
6382 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6383
6384 // Check to see if we should lower the memmove to loads and stores first.
6385 // For cases within the target-specified limits, this is the best choice.
6386 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6387 if (ConstantSize) {
6388 // Memmove with size zero? Just return the original chain.
6389 if (ConstantSize->isNullValue())
6390 return Chain;
6391
6392 SDValue Result =
6393 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
6394 ConstantSize->getZExtValue(), Align, isVol,
6395 false, DstPtrInfo, SrcPtrInfo);
6396 if (Result.getNode())
6397 return Result;
6398 }
6399
6400 // Then check to see if we should lower the memmove with target-specific
6401 // code. If the target chooses to do this, this is the next best.
6402 if (TSI) {
6403 SDValue Result = TSI->EmitTargetCodeForMemmove(
6404 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
6405 if (Result.getNode())
6406 return Result;
6407 }
6408
6409 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6410 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6411
6412 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
6413 // not be safe. See memcpy above for more details.
6414
6415 // Emit a library call.
6416 TargetLowering::ArgListTy Args;
6417 TargetLowering::ArgListEntry Entry;
6418 Entry.Ty = Type::getInt8PtrTy(*getContext());
6419 Entry.Node = Dst; Args.push_back(Entry);
6420 Entry.Node = Src; Args.push_back(Entry);
6421
6422 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6423 Entry.Node = Size; Args.push_back(Entry);
6424 // FIXME: pass in SDLoc
6425 TargetLowering::CallLoweringInfo CLI(*this);
6426 CLI.setDebugLoc(dl)
6427 .setChain(Chain)
6428 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
6429 Dst.getValueType().getTypeForEVT(*getContext()),
6430 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
6431 TLI->getPointerTy(getDataLayout())),
6432 std::move(Args))
6433 .setDiscardResult()
6434 .setTailCall(isTailCall);
6435
6436 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6437 return CallResult.second;
6438 }
6439
getAtomicMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6440 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
6441 SDValue Dst, unsigned DstAlign,
6442 SDValue Src, unsigned SrcAlign,
6443 SDValue Size, Type *SizeTy,
6444 unsigned ElemSz, bool isTailCall,
6445 MachinePointerInfo DstPtrInfo,
6446 MachinePointerInfo SrcPtrInfo) {
6447 // Emit a library call.
6448 TargetLowering::ArgListTy Args;
6449 TargetLowering::ArgListEntry Entry;
6450 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6451 Entry.Node = Dst;
6452 Args.push_back(Entry);
6453
6454 Entry.Node = Src;
6455 Args.push_back(Entry);
6456
6457 Entry.Ty = SizeTy;
6458 Entry.Node = Size;
6459 Args.push_back(Entry);
6460
6461 RTLIB::Libcall LibraryCall =
6462 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6463 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6464 report_fatal_error("Unsupported element size");
6465
6466 TargetLowering::CallLoweringInfo CLI(*this);
6467 CLI.setDebugLoc(dl)
6468 .setChain(Chain)
6469 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6470 Type::getVoidTy(*getContext()),
6471 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6472 TLI->getPointerTy(getDataLayout())),
6473 std::move(Args))
6474 .setDiscardResult()
6475 .setTailCall(isTailCall);
6476
6477 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6478 return CallResult.second;
6479 }
6480
getMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo)6481 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
6482 SDValue Src, SDValue Size, unsigned Align,
6483 bool isVol, bool isTailCall,
6484 MachinePointerInfo DstPtrInfo) {
6485 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6486
6487 // Check to see if we should lower the memset to stores first.
6488 // For cases within the target-specified limits, this is the best choice.
6489 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6490 if (ConstantSize) {
6491 // Memset with size zero? Just return the original chain.
6492 if (ConstantSize->isNullValue())
6493 return Chain;
6494
6495 SDValue Result =
6496 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
6497 Align, isVol, DstPtrInfo);
6498
6499 if (Result.getNode())
6500 return Result;
6501 }
6502
6503 // Then check to see if we should lower the memset with target-specific
6504 // code. If the target chooses to do this, this is the next best.
6505 if (TSI) {
6506 SDValue Result = TSI->EmitTargetCodeForMemset(
6507 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
6508 if (Result.getNode())
6509 return Result;
6510 }
6511
6512 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6513
6514 // Emit a library call.
6515 TargetLowering::ArgListTy Args;
6516 TargetLowering::ArgListEntry Entry;
6517 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext());
6518 Args.push_back(Entry);
6519 Entry.Node = Src;
6520 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
6521 Args.push_back(Entry);
6522 Entry.Node = Size;
6523 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6524 Args.push_back(Entry);
6525
6526 // FIXME: pass in SDLoc
6527 TargetLowering::CallLoweringInfo CLI(*this);
6528 CLI.setDebugLoc(dl)
6529 .setChain(Chain)
6530 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
6531 Dst.getValueType().getTypeForEVT(*getContext()),
6532 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
6533 TLI->getPointerTy(getDataLayout())),
6534 std::move(Args))
6535 .setDiscardResult()
6536 .setTailCall(isTailCall);
6537
6538 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6539 return CallResult.second;
6540 }
6541
getAtomicMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Value,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo)6542 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
6543 SDValue Dst, unsigned DstAlign,
6544 SDValue Value, SDValue Size, Type *SizeTy,
6545 unsigned ElemSz, bool isTailCall,
6546 MachinePointerInfo DstPtrInfo) {
6547 // Emit a library call.
6548 TargetLowering::ArgListTy Args;
6549 TargetLowering::ArgListEntry Entry;
6550 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6551 Entry.Node = Dst;
6552 Args.push_back(Entry);
6553
6554 Entry.Ty = Type::getInt8Ty(*getContext());
6555 Entry.Node = Value;
6556 Args.push_back(Entry);
6557
6558 Entry.Ty = SizeTy;
6559 Entry.Node = Size;
6560 Args.push_back(Entry);
6561
6562 RTLIB::Libcall LibraryCall =
6563 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6564 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6565 report_fatal_error("Unsupported element size");
6566
6567 TargetLowering::CallLoweringInfo CLI(*this);
6568 CLI.setDebugLoc(dl)
6569 .setChain(Chain)
6570 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6571 Type::getVoidTy(*getContext()),
6572 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6573 TLI->getPointerTy(getDataLayout())),
6574 std::move(Args))
6575 .setDiscardResult()
6576 .setTailCall(isTailCall);
6577
6578 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6579 return CallResult.second;
6580 }
6581
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)6582 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6583 SDVTList VTList, ArrayRef<SDValue> Ops,
6584 MachineMemOperand *MMO) {
6585 FoldingSetNodeID ID;
6586 ID.AddInteger(MemVT.getRawBits());
6587 AddNodeIDNode(ID, Opcode, VTList, Ops);
6588 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6589 void* IP = nullptr;
6590 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6591 cast<AtomicSDNode>(E)->refineAlignment(MMO);
6592 return SDValue(E, 0);
6593 }
6594
6595 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6596 VTList, MemVT, MMO);
6597 createOperands(N, Ops);
6598
6599 CSEMap.InsertNode(N, IP);
6600 InsertNode(N);
6601 return SDValue(N, 0);
6602 }
6603
getAtomicCmpSwap(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO)6604 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
6605 EVT MemVT, SDVTList VTs, SDValue Chain,
6606 SDValue Ptr, SDValue Cmp, SDValue Swp,
6607 MachineMemOperand *MMO) {
6608 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6609 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6610 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6611
6612 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
6613 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6614 }
6615
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO)6616 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6617 SDValue Chain, SDValue Ptr, SDValue Val,
6618 MachineMemOperand *MMO) {
6619 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
6620 Opcode == ISD::ATOMIC_LOAD_SUB ||
6621 Opcode == ISD::ATOMIC_LOAD_AND ||
6622 Opcode == ISD::ATOMIC_LOAD_CLR ||
6623 Opcode == ISD::ATOMIC_LOAD_OR ||
6624 Opcode == ISD::ATOMIC_LOAD_XOR ||
6625 Opcode == ISD::ATOMIC_LOAD_NAND ||
6626 Opcode == ISD::ATOMIC_LOAD_MIN ||
6627 Opcode == ISD::ATOMIC_LOAD_MAX ||
6628 Opcode == ISD::ATOMIC_LOAD_UMIN ||
6629 Opcode == ISD::ATOMIC_LOAD_UMAX ||
6630 Opcode == ISD::ATOMIC_LOAD_FADD ||
6631 Opcode == ISD::ATOMIC_LOAD_FSUB ||
6632 Opcode == ISD::ATOMIC_SWAP ||
6633 Opcode == ISD::ATOMIC_STORE) &&
6634 "Invalid Atomic Op");
6635
6636 EVT VT = Val.getValueType();
6637
6638 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
6639 getVTList(VT, MVT::Other);
6640 SDValue Ops[] = {Chain, Ptr, Val};
6641 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6642 }
6643
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)6644 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6645 EVT VT, SDValue Chain, SDValue Ptr,
6646 MachineMemOperand *MMO) {
6647 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
6648
6649 SDVTList VTs = getVTList(VT, MVT::Other);
6650 SDValue Ops[] = {Chain, Ptr};
6651 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6652 }
6653
6654 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,const SDLoc & dl)6655 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
6656 if (Ops.size() == 1)
6657 return Ops[0];
6658
6659 SmallVector<EVT, 4> VTs;
6660 VTs.reserve(Ops.size());
6661 for (unsigned i = 0; i < Ops.size(); ++i)
6662 VTs.push_back(Ops[i].getValueType());
6663 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
6664 }
6665
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,unsigned Align,MachineMemOperand::Flags Flags,uint64_t Size,const AAMDNodes & AAInfo)6666 SDValue SelectionDAG::getMemIntrinsicNode(
6667 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
6668 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align,
6669 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
6670 if (Align == 0) // Ensure that codegen never sees alignment 0
6671 Align = getEVTAlignment(MemVT);
6672
6673 if (!Size && MemVT.isScalableVector())
6674 Size = MemoryLocation::UnknownSize;
6675 else if (!Size)
6676 Size = MemVT.getStoreSize();
6677
6678 MachineFunction &MF = getMachineFunction();
6679 MachineMemOperand *MMO =
6680 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo);
6681
6682 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
6683 }
6684
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)6685 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
6686 SDVTList VTList,
6687 ArrayRef<SDValue> Ops, EVT MemVT,
6688 MachineMemOperand *MMO) {
6689 assert((Opcode == ISD::INTRINSIC_VOID ||
6690 Opcode == ISD::INTRINSIC_W_CHAIN ||
6691 Opcode == ISD::PREFETCH ||
6692 Opcode == ISD::LIFETIME_START ||
6693 Opcode == ISD::LIFETIME_END ||
6694 ((int)Opcode <= std::numeric_limits<int>::max() &&
6695 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
6696 "Opcode is not a memory-accessing opcode!");
6697
6698 // Memoize the node unless it returns a flag.
6699 MemIntrinsicSDNode *N;
6700 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6701 FoldingSetNodeID ID;
6702 AddNodeIDNode(ID, Opcode, VTList, Ops);
6703 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
6704 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
6705 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6706 void *IP = nullptr;
6707 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6708 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
6709 return SDValue(E, 0);
6710 }
6711
6712 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6713 VTList, MemVT, MMO);
6714 createOperands(N, Ops);
6715
6716 CSEMap.InsertNode(N, IP);
6717 } else {
6718 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6719 VTList, MemVT, MMO);
6720 createOperands(N, Ops);
6721 }
6722 InsertNode(N);
6723 SDValue V(N, 0);
6724 NewSDValueDbgMsg(V, "Creating new node: ", this);
6725 return V;
6726 }
6727
getLifetimeNode(bool IsStart,const SDLoc & dl,SDValue Chain,int FrameIndex,int64_t Size,int64_t Offset)6728 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
6729 SDValue Chain, int FrameIndex,
6730 int64_t Size, int64_t Offset) {
6731 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
6732 const auto VTs = getVTList(MVT::Other);
6733 SDValue Ops[2] = {
6734 Chain,
6735 getFrameIndex(FrameIndex,
6736 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
6737 true)};
6738
6739 FoldingSetNodeID ID;
6740 AddNodeIDNode(ID, Opcode, VTs, Ops);
6741 ID.AddInteger(FrameIndex);
6742 ID.AddInteger(Size);
6743 ID.AddInteger(Offset);
6744 void *IP = nullptr;
6745 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6746 return SDValue(E, 0);
6747
6748 LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
6749 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
6750 createOperands(N, Ops);
6751 CSEMap.InsertNode(N, IP);
6752 InsertNode(N);
6753 SDValue V(N, 0);
6754 NewSDValueDbgMsg(V, "Creating new node: ", this);
6755 return V;
6756 }
6757
6758 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6759 /// MachinePointerInfo record from it. This is particularly useful because the
6760 /// code generator has many cases where it doesn't bother passing in a
6761 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,int64_t Offset=0)6762 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6763 SelectionDAG &DAG, SDValue Ptr,
6764 int64_t Offset = 0) {
6765 // If this is FI+Offset, we can model it.
6766 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
6767 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
6768 FI->getIndex(), Offset);
6769
6770 // If this is (FI+Offset1)+Offset2, we can model it.
6771 if (Ptr.getOpcode() != ISD::ADD ||
6772 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
6773 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
6774 return Info;
6775
6776 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6777 return MachinePointerInfo::getFixedStack(
6778 DAG.getMachineFunction(), FI,
6779 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
6780 }
6781
6782 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6783 /// MachinePointerInfo record from it. This is particularly useful because the
6784 /// code generator has many cases where it doesn't bother passing in a
6785 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,SDValue OffsetOp)6786 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6787 SelectionDAG &DAG, SDValue Ptr,
6788 SDValue OffsetOp) {
6789 // If the 'Offset' value isn't a constant, we can't handle this.
6790 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
6791 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
6792 if (OffsetOp.isUndef())
6793 return InferPointerInfo(Info, DAG, Ptr);
6794 return Info;
6795 }
6796
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)6797 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6798 EVT VT, const SDLoc &dl, SDValue Chain,
6799 SDValue Ptr, SDValue Offset,
6800 MachinePointerInfo PtrInfo, EVT MemVT,
6801 unsigned Alignment,
6802 MachineMemOperand::Flags MMOFlags,
6803 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6804 assert(Chain.getValueType() == MVT::Other &&
6805 "Invalid chain type");
6806 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6807 Alignment = getEVTAlignment(MemVT);
6808
6809 MMOFlags |= MachineMemOperand::MOLoad;
6810 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
6811 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
6812 // clients.
6813 if (PtrInfo.V.isNull())
6814 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
6815
6816 MachineFunction &MF = getMachineFunction();
6817 MachineMemOperand *MMO = MF.getMachineMemOperand(
6818 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
6819 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
6820 }
6821
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)6822 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6823 EVT VT, const SDLoc &dl, SDValue Chain,
6824 SDValue Ptr, SDValue Offset, EVT MemVT,
6825 MachineMemOperand *MMO) {
6826 if (VT == MemVT) {
6827 ExtType = ISD::NON_EXTLOAD;
6828 } else if (ExtType == ISD::NON_EXTLOAD) {
6829 assert(VT == MemVT && "Non-extending load from different memory type!");
6830 } else {
6831 // Extending load.
6832 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
6833 "Should only be an extending load, not truncating!");
6834 assert(VT.isInteger() == MemVT.isInteger() &&
6835 "Cannot convert from FP to Int or Int -> FP!");
6836 assert(VT.isVector() == MemVT.isVector() &&
6837 "Cannot use an ext load to convert to or from a vector!");
6838 assert((!VT.isVector() ||
6839 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
6840 "Cannot use an ext load to change the number of vector elements!");
6841 }
6842
6843 bool Indexed = AM != ISD::UNINDEXED;
6844 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
6845
6846 SDVTList VTs = Indexed ?
6847 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
6848 SDValue Ops[] = { Chain, Ptr, Offset };
6849 FoldingSetNodeID ID;
6850 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
6851 ID.AddInteger(MemVT.getRawBits());
6852 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
6853 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
6854 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6855 void *IP = nullptr;
6856 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6857 cast<LoadSDNode>(E)->refineAlignment(MMO);
6858 return SDValue(E, 0);
6859 }
6860 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6861 ExtType, MemVT, MMO);
6862 createOperands(N, Ops);
6863
6864 CSEMap.InsertNode(N, IP);
6865 InsertNode(N);
6866 SDValue V(N, 0);
6867 NewSDValueDbgMsg(V, "Creating new node: ", this);
6868 return V;
6869 }
6870
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)6871 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6872 SDValue Ptr, MachinePointerInfo PtrInfo,
6873 unsigned Alignment,
6874 MachineMemOperand::Flags MMOFlags,
6875 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6876 SDValue Undef = getUNDEF(Ptr.getValueType());
6877 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6878 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
6879 }
6880
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)6881 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6882 SDValue Ptr, MachineMemOperand *MMO) {
6883 SDValue Undef = getUNDEF(Ptr.getValueType());
6884 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6885 VT, MMO);
6886 }
6887
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6888 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6889 EVT VT, SDValue Chain, SDValue Ptr,
6890 MachinePointerInfo PtrInfo, EVT MemVT,
6891 unsigned Alignment,
6892 MachineMemOperand::Flags MMOFlags,
6893 const AAMDNodes &AAInfo) {
6894 SDValue Undef = getUNDEF(Ptr.getValueType());
6895 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
6896 MemVT, Alignment, MMOFlags, AAInfo);
6897 }
6898
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)6899 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6900 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
6901 MachineMemOperand *MMO) {
6902 SDValue Undef = getUNDEF(Ptr.getValueType());
6903 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
6904 MemVT, MMO);
6905 }
6906
getIndexedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)6907 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
6908 SDValue Base, SDValue Offset,
6909 ISD::MemIndexedMode AM) {
6910 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
6911 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
6912 // Don't propagate the invariant or dereferenceable flags.
6913 auto MMOFlags =
6914 LD->getMemOperand()->getFlags() &
6915 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
6916 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
6917 LD->getChain(), Base, Offset, LD->getPointerInfo(),
6918 LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
6919 LD->getAAInfo());
6920 }
6921
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6922 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6923 SDValue Ptr, MachinePointerInfo PtrInfo,
6924 unsigned Alignment,
6925 MachineMemOperand::Flags MMOFlags,
6926 const AAMDNodes &AAInfo) {
6927 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
6928 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6929 Alignment = getEVTAlignment(Val.getValueType());
6930
6931 MMOFlags |= MachineMemOperand::MOStore;
6932 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6933
6934 if (PtrInfo.V.isNull())
6935 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6936
6937 MachineFunction &MF = getMachineFunction();
6938 MachineMemOperand *MMO = MF.getMachineMemOperand(
6939 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
6940 return getStore(Chain, dl, Val, Ptr, MMO);
6941 }
6942
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)6943 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6944 SDValue Ptr, MachineMemOperand *MMO) {
6945 assert(Chain.getValueType() == MVT::Other &&
6946 "Invalid chain type");
6947 EVT VT = Val.getValueType();
6948 SDVTList VTs = getVTList(MVT::Other);
6949 SDValue Undef = getUNDEF(Ptr.getValueType());
6950 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6951 FoldingSetNodeID ID;
6952 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6953 ID.AddInteger(VT.getRawBits());
6954 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6955 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
6956 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6957 void *IP = nullptr;
6958 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6959 cast<StoreSDNode>(E)->refineAlignment(MMO);
6960 return SDValue(E, 0);
6961 }
6962 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6963 ISD::UNINDEXED, false, VT, MMO);
6964 createOperands(N, Ops);
6965
6966 CSEMap.InsertNode(N, IP);
6967 InsertNode(N);
6968 SDValue V(N, 0);
6969 NewSDValueDbgMsg(V, "Creating new node: ", this);
6970 return V;
6971 }
6972
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6973 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6974 SDValue Ptr, MachinePointerInfo PtrInfo,
6975 EVT SVT, unsigned Alignment,
6976 MachineMemOperand::Flags MMOFlags,
6977 const AAMDNodes &AAInfo) {
6978 assert(Chain.getValueType() == MVT::Other &&
6979 "Invalid chain type");
6980 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6981 Alignment = getEVTAlignment(SVT);
6982
6983 MMOFlags |= MachineMemOperand::MOStore;
6984 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6985
6986 if (PtrInfo.V.isNull())
6987 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6988
6989 MachineFunction &MF = getMachineFunction();
6990 MachineMemOperand *MMO = MF.getMachineMemOperand(
6991 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
6992 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
6993 }
6994
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)6995 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6996 SDValue Ptr, EVT SVT,
6997 MachineMemOperand *MMO) {
6998 EVT VT = Val.getValueType();
6999
7000 assert(Chain.getValueType() == MVT::Other &&
7001 "Invalid chain type");
7002 if (VT == SVT)
7003 return getStore(Chain, dl, Val, Ptr, MMO);
7004
7005 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7006 "Should only be a truncating store, not extending!");
7007 assert(VT.isInteger() == SVT.isInteger() &&
7008 "Can't do FP-INT conversion!");
7009 assert(VT.isVector() == SVT.isVector() &&
7010 "Cannot use trunc store to convert to or from a vector!");
7011 assert((!VT.isVector() ||
7012 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
7013 "Cannot use trunc store to change the number of vector elements!");
7014
7015 SDVTList VTs = getVTList(MVT::Other);
7016 SDValue Undef = getUNDEF(Ptr.getValueType());
7017 SDValue Ops[] = { Chain, Val, Ptr, Undef };
7018 FoldingSetNodeID ID;
7019 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7020 ID.AddInteger(SVT.getRawBits());
7021 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7022 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
7023 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7024 void *IP = nullptr;
7025 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7026 cast<StoreSDNode>(E)->refineAlignment(MMO);
7027 return SDValue(E, 0);
7028 }
7029 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7030 ISD::UNINDEXED, true, SVT, MMO);
7031 createOperands(N, Ops);
7032
7033 CSEMap.InsertNode(N, IP);
7034 InsertNode(N);
7035 SDValue V(N, 0);
7036 NewSDValueDbgMsg(V, "Creating new node: ", this);
7037 return V;
7038 }
7039
getIndexedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7040 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
7041 SDValue Base, SDValue Offset,
7042 ISD::MemIndexedMode AM) {
7043 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
7044 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
7045 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
7046 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
7047 FoldingSetNodeID ID;
7048 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7049 ID.AddInteger(ST->getMemoryVT().getRawBits());
7050 ID.AddInteger(ST->getRawSubclassData());
7051 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
7052 void *IP = nullptr;
7053 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7054 return SDValue(E, 0);
7055
7056 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7057 ST->isTruncatingStore(), ST->getMemoryVT(),
7058 ST->getMemOperand());
7059 createOperands(N, Ops);
7060
7061 CSEMap.InsertNode(N, IP);
7062 InsertNode(N);
7063 SDValue V(N, 0);
7064 NewSDValueDbgMsg(V, "Creating new node: ", this);
7065 return V;
7066 }
7067
getMaskedLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Base,SDValue Offset,SDValue Mask,SDValue PassThru,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,ISD::LoadExtType ExtTy,bool isExpanding)7068 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7069 SDValue Base, SDValue Offset, SDValue Mask,
7070 SDValue PassThru, EVT MemVT,
7071 MachineMemOperand *MMO,
7072 ISD::MemIndexedMode AM,
7073 ISD::LoadExtType ExtTy, bool isExpanding) {
7074 bool Indexed = AM != ISD::UNINDEXED;
7075 assert((Indexed || Offset.isUndef()) &&
7076 "Unindexed masked load with an offset!");
7077 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
7078 : getVTList(VT, MVT::Other);
7079 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
7080 FoldingSetNodeID ID;
7081 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
7082 ID.AddInteger(MemVT.getRawBits());
7083 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
7084 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
7085 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7086 void *IP = nullptr;
7087 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7088 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
7089 return SDValue(E, 0);
7090 }
7091 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7092 AM, ExtTy, isExpanding, MemVT, MMO);
7093 createOperands(N, Ops);
7094
7095 CSEMap.InsertNode(N, IP);
7096 InsertNode(N);
7097 SDValue V(N, 0);
7098 NewSDValueDbgMsg(V, "Creating new node: ", this);
7099 return V;
7100 }
7101
getIndexedMaskedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7102 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
7103 SDValue Base, SDValue Offset,
7104 ISD::MemIndexedMode AM) {
7105 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
7106 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
7107 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
7108 Offset, LD->getMask(), LD->getPassThru(),
7109 LD->getMemoryVT(), LD->getMemOperand(), AM,
7110 LD->getExtensionType(), LD->isExpandingLoad());
7111 }
7112
getMaskedStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Base,SDValue Offset,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,bool IsTruncating,bool IsCompressing)7113 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
7114 SDValue Val, SDValue Base, SDValue Offset,
7115 SDValue Mask, EVT MemVT,
7116 MachineMemOperand *MMO,
7117 ISD::MemIndexedMode AM, bool IsTruncating,
7118 bool IsCompressing) {
7119 assert(Chain.getValueType() == MVT::Other &&
7120 "Invalid chain type");
7121 bool Indexed = AM != ISD::UNINDEXED;
7122 assert((Indexed || Offset.isUndef()) &&
7123 "Unindexed masked store with an offset!");
7124 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
7125 : getVTList(MVT::Other);
7126 SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
7127 FoldingSetNodeID ID;
7128 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
7129 ID.AddInteger(MemVT.getRawBits());
7130 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
7131 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
7132 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7133 void *IP = nullptr;
7134 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7135 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
7136 return SDValue(E, 0);
7137 }
7138 auto *N =
7139 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7140 IsTruncating, IsCompressing, MemVT, MMO);
7141 createOperands(N, Ops);
7142
7143 CSEMap.InsertNode(N, IP);
7144 InsertNode(N);
7145 SDValue V(N, 0);
7146 NewSDValueDbgMsg(V, "Creating new node: ", this);
7147 return V;
7148 }
7149
getIndexedMaskedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7150 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
7151 SDValue Base, SDValue Offset,
7152 ISD::MemIndexedMode AM) {
7153 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
7154 assert(ST->getOffset().isUndef() &&
7155 "Masked store is already a indexed store!");
7156 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
7157 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
7158 AM, ST->isTruncatingStore(), ST->isCompressingStore());
7159 }
7160
getMaskedGather(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)7161 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
7162 ArrayRef<SDValue> Ops,
7163 MachineMemOperand *MMO,
7164 ISD::MemIndexType IndexType) {
7165 assert(Ops.size() == 6 && "Incompatible number of operands");
7166
7167 FoldingSetNodeID ID;
7168 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
7169 ID.AddInteger(VT.getRawBits());
7170 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
7171 dl.getIROrder(), VTs, VT, MMO, IndexType));
7172 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7173 void *IP = nullptr;
7174 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7175 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
7176 return SDValue(E, 0);
7177 }
7178
7179 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7180 VTs, VT, MMO, IndexType);
7181 createOperands(N, Ops);
7182
7183 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
7184 "Incompatible type of the PassThru value in MaskedGatherSDNode");
7185 assert(N->getMask().getValueType().getVectorNumElements() ==
7186 N->getValueType(0).getVectorNumElements() &&
7187 "Vector width mismatch between mask and data");
7188 assert(N->getIndex().getValueType().getVectorNumElements() >=
7189 N->getValueType(0).getVectorNumElements() &&
7190 "Vector width mismatch between index and data");
7191 assert(isa<ConstantSDNode>(N->getScale()) &&
7192 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7193 "Scale should be a constant power of 2");
7194
7195 CSEMap.InsertNode(N, IP);
7196 InsertNode(N);
7197 SDValue V(N, 0);
7198 NewSDValueDbgMsg(V, "Creating new node: ", this);
7199 return V;
7200 }
7201
getMaskedScatter(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)7202 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
7203 ArrayRef<SDValue> Ops,
7204 MachineMemOperand *MMO,
7205 ISD::MemIndexType IndexType) {
7206 assert(Ops.size() == 6 && "Incompatible number of operands");
7207
7208 FoldingSetNodeID ID;
7209 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
7210 ID.AddInteger(VT.getRawBits());
7211 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
7212 dl.getIROrder(), VTs, VT, MMO, IndexType));
7213 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7214 void *IP = nullptr;
7215 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7216 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
7217 return SDValue(E, 0);
7218 }
7219 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7220 VTs, VT, MMO, IndexType);
7221 createOperands(N, Ops);
7222
7223 assert(N->getMask().getValueType().getVectorNumElements() ==
7224 N->getValue().getValueType().getVectorNumElements() &&
7225 "Vector width mismatch between mask and data");
7226 assert(N->getIndex().getValueType().getVectorNumElements() >=
7227 N->getValue().getValueType().getVectorNumElements() &&
7228 "Vector width mismatch between index and data");
7229 assert(isa<ConstantSDNode>(N->getScale()) &&
7230 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7231 "Scale should be a constant power of 2");
7232
7233 CSEMap.InsertNode(N, IP);
7234 InsertNode(N);
7235 SDValue V(N, 0);
7236 NewSDValueDbgMsg(V, "Creating new node: ", this);
7237 return V;
7238 }
7239
simplifySelect(SDValue Cond,SDValue T,SDValue F)7240 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
7241 // select undef, T, F --> T (if T is a constant), otherwise F
7242 // select, ?, undef, F --> F
7243 // select, ?, T, undef --> T
7244 if (Cond.isUndef())
7245 return isConstantValueOfAnyType(T) ? T : F;
7246 if (T.isUndef())
7247 return F;
7248 if (F.isUndef())
7249 return T;
7250
7251 // select true, T, F --> T
7252 // select false, T, F --> F
7253 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
7254 return CondC->isNullValue() ? F : T;
7255
7256 // TODO: This should simplify VSELECT with constant condition using something
7257 // like this (but check boolean contents to be complete?):
7258 // if (ISD::isBuildVectorAllOnes(Cond.getNode()))
7259 // return T;
7260 // if (ISD::isBuildVectorAllZeros(Cond.getNode()))
7261 // return F;
7262
7263 // select ?, T, T --> T
7264 if (T == F)
7265 return T;
7266
7267 return SDValue();
7268 }
7269
simplifyShift(SDValue X,SDValue Y)7270 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
7271 // shift undef, Y --> 0 (can always assume that the undef value is 0)
7272 if (X.isUndef())
7273 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
7274 // shift X, undef --> undef (because it may shift by the bitwidth)
7275 if (Y.isUndef())
7276 return getUNDEF(X.getValueType());
7277
7278 // shift 0, Y --> 0
7279 // shift X, 0 --> X
7280 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
7281 return X;
7282
7283 // shift X, C >= bitwidth(X) --> undef
7284 // All vector elements must be too big (or undef) to avoid partial undefs.
7285 auto isShiftTooBig = [X](ConstantSDNode *Val) {
7286 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
7287 };
7288 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
7289 return getUNDEF(X.getValueType());
7290
7291 return SDValue();
7292 }
7293
7294 // TODO: Use fast-math-flags to enable more simplifications.
simplifyFPBinop(unsigned Opcode,SDValue X,SDValue Y)7295 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y) {
7296 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
7297 if (!YC)
7298 return SDValue();
7299
7300 // X + -0.0 --> X
7301 if (Opcode == ISD::FADD)
7302 if (YC->getValueAPF().isNegZero())
7303 return X;
7304
7305 // X - +0.0 --> X
7306 if (Opcode == ISD::FSUB)
7307 if (YC->getValueAPF().isPosZero())
7308 return X;
7309
7310 // X * 1.0 --> X
7311 // X / 1.0 --> X
7312 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
7313 if (YC->getValueAPF().isExactlyValue(1.0))
7314 return X;
7315
7316 return SDValue();
7317 }
7318
getVAArg(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)7319 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
7320 SDValue Ptr, SDValue SV, unsigned Align) {
7321 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
7322 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
7323 }
7324
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDUse> Ops)7325 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7326 ArrayRef<SDUse> Ops) {
7327 switch (Ops.size()) {
7328 case 0: return getNode(Opcode, DL, VT);
7329 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
7330 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
7331 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
7332 default: break;
7333 }
7334
7335 // Copy from an SDUse array into an SDValue array for use with
7336 // the regular getNode logic.
7337 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
7338 return getNode(Opcode, DL, VT, NewOps);
7339 }
7340
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)7341 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7342 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7343 unsigned NumOps = Ops.size();
7344 switch (NumOps) {
7345 case 0: return getNode(Opcode, DL, VT);
7346 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
7347 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
7348 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
7349 default: break;
7350 }
7351
7352 switch (Opcode) {
7353 default: break;
7354 case ISD::BUILD_VECTOR:
7355 // Attempt to simplify BUILD_VECTOR.
7356 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7357 return V;
7358 break;
7359 case ISD::CONCAT_VECTORS:
7360 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
7361 return V;
7362 break;
7363 case ISD::SELECT_CC:
7364 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
7365 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
7366 "LHS and RHS of condition must have same type!");
7367 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7368 "True and False arms of SelectCC must have same type!");
7369 assert(Ops[2].getValueType() == VT &&
7370 "select_cc node must be of same type as true and false value!");
7371 break;
7372 case ISD::BR_CC:
7373 assert(NumOps == 5 && "BR_CC takes 5 operands!");
7374 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7375 "LHS/RHS of comparison should match types!");
7376 break;
7377 }
7378
7379 // Memoize nodes.
7380 SDNode *N;
7381 SDVTList VTs = getVTList(VT);
7382
7383 if (VT != MVT::Glue) {
7384 FoldingSetNodeID ID;
7385 AddNodeIDNode(ID, Opcode, VTs, Ops);
7386 void *IP = nullptr;
7387
7388 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7389 return SDValue(E, 0);
7390
7391 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7392 createOperands(N, Ops);
7393
7394 CSEMap.InsertNode(N, IP);
7395 } else {
7396 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7397 createOperands(N, Ops);
7398 }
7399
7400 InsertNode(N);
7401 SDValue V(N, 0);
7402 NewSDValueDbgMsg(V, "Creating new node: ", this);
7403 return V;
7404 }
7405
getNode(unsigned Opcode,const SDLoc & DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)7406 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7407 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
7408 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
7409 }
7410
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops)7411 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7412 ArrayRef<SDValue> Ops) {
7413 if (VTList.NumVTs == 1)
7414 return getNode(Opcode, DL, VTList.VTs[0], Ops);
7415
7416 switch (Opcode) {
7417 case ISD::STRICT_FP_EXTEND:
7418 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
7419 "Invalid STRICT_FP_EXTEND!");
7420 assert(VTList.VTs[0].isFloatingPoint() &&
7421 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
7422 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7423 "STRICT_FP_EXTEND result type should be vector iff the operand "
7424 "type is vector!");
7425 assert((!VTList.VTs[0].isVector() ||
7426 VTList.VTs[0].getVectorNumElements() ==
7427 Ops[1].getValueType().getVectorNumElements()) &&
7428 "Vector element count mismatch!");
7429 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
7430 "Invalid fpext node, dst <= src!");
7431 break;
7432 case ISD::STRICT_FP_ROUND:
7433 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
7434 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7435 "STRICT_FP_ROUND result type should be vector iff the operand "
7436 "type is vector!");
7437 assert((!VTList.VTs[0].isVector() ||
7438 VTList.VTs[0].getVectorNumElements() ==
7439 Ops[1].getValueType().getVectorNumElements()) &&
7440 "Vector element count mismatch!");
7441 assert(VTList.VTs[0].isFloatingPoint() &&
7442 Ops[1].getValueType().isFloatingPoint() &&
7443 VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
7444 isa<ConstantSDNode>(Ops[2]) &&
7445 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
7446 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
7447 "Invalid STRICT_FP_ROUND!");
7448 break;
7449 #if 0
7450 // FIXME: figure out how to safely handle things like
7451 // int foo(int x) { return 1 << (x & 255); }
7452 // int bar() { return foo(256); }
7453 case ISD::SRA_PARTS:
7454 case ISD::SRL_PARTS:
7455 case ISD::SHL_PARTS:
7456 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
7457 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
7458 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7459 else if (N3.getOpcode() == ISD::AND)
7460 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
7461 // If the and is only masking out bits that cannot effect the shift,
7462 // eliminate the and.
7463 unsigned NumBits = VT.getScalarSizeInBits()*2;
7464 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
7465 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7466 }
7467 break;
7468 #endif
7469 }
7470
7471 // Memoize the node unless it returns a flag.
7472 SDNode *N;
7473 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7474 FoldingSetNodeID ID;
7475 AddNodeIDNode(ID, Opcode, VTList, Ops);
7476 void *IP = nullptr;
7477 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7478 return SDValue(E, 0);
7479
7480 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7481 createOperands(N, Ops);
7482 CSEMap.InsertNode(N, IP);
7483 } else {
7484 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7485 createOperands(N, Ops);
7486 }
7487 InsertNode(N);
7488 SDValue V(N, 0);
7489 NewSDValueDbgMsg(V, "Creating new node: ", this);
7490 return V;
7491 }
7492
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList)7493 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7494 SDVTList VTList) {
7495 return getNode(Opcode, DL, VTList, None);
7496 }
7497
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1)7498 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7499 SDValue N1) {
7500 SDValue Ops[] = { N1 };
7501 return getNode(Opcode, DL, VTList, Ops);
7502 }
7503
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2)7504 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7505 SDValue N1, SDValue N2) {
7506 SDValue Ops[] = { N1, N2 };
7507 return getNode(Opcode, DL, VTList, Ops);
7508 }
7509
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)7510 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7511 SDValue N1, SDValue N2, SDValue N3) {
7512 SDValue Ops[] = { N1, N2, N3 };
7513 return getNode(Opcode, DL, VTList, Ops);
7514 }
7515
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)7516 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7517 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
7518 SDValue Ops[] = { N1, N2, N3, N4 };
7519 return getNode(Opcode, DL, VTList, Ops);
7520 }
7521
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)7522 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7523 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
7524 SDValue N5) {
7525 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7526 return getNode(Opcode, DL, VTList, Ops);
7527 }
7528
getVTList(EVT VT)7529 SDVTList SelectionDAG::getVTList(EVT VT) {
7530 return makeVTList(SDNode::getValueTypeList(VT), 1);
7531 }
7532
getVTList(EVT VT1,EVT VT2)7533 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
7534 FoldingSetNodeID ID;
7535 ID.AddInteger(2U);
7536 ID.AddInteger(VT1.getRawBits());
7537 ID.AddInteger(VT2.getRawBits());
7538
7539 void *IP = nullptr;
7540 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7541 if (!Result) {
7542 EVT *Array = Allocator.Allocate<EVT>(2);
7543 Array[0] = VT1;
7544 Array[1] = VT2;
7545 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
7546 VTListMap.InsertNode(Result, IP);
7547 }
7548 return Result->getSDVTList();
7549 }
7550
getVTList(EVT VT1,EVT VT2,EVT VT3)7551 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
7552 FoldingSetNodeID ID;
7553 ID.AddInteger(3U);
7554 ID.AddInteger(VT1.getRawBits());
7555 ID.AddInteger(VT2.getRawBits());
7556 ID.AddInteger(VT3.getRawBits());
7557
7558 void *IP = nullptr;
7559 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7560 if (!Result) {
7561 EVT *Array = Allocator.Allocate<EVT>(3);
7562 Array[0] = VT1;
7563 Array[1] = VT2;
7564 Array[2] = VT3;
7565 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
7566 VTListMap.InsertNode(Result, IP);
7567 }
7568 return Result->getSDVTList();
7569 }
7570
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)7571 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
7572 FoldingSetNodeID ID;
7573 ID.AddInteger(4U);
7574 ID.AddInteger(VT1.getRawBits());
7575 ID.AddInteger(VT2.getRawBits());
7576 ID.AddInteger(VT3.getRawBits());
7577 ID.AddInteger(VT4.getRawBits());
7578
7579 void *IP = nullptr;
7580 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7581 if (!Result) {
7582 EVT *Array = Allocator.Allocate<EVT>(4);
7583 Array[0] = VT1;
7584 Array[1] = VT2;
7585 Array[2] = VT3;
7586 Array[3] = VT4;
7587 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
7588 VTListMap.InsertNode(Result, IP);
7589 }
7590 return Result->getSDVTList();
7591 }
7592
getVTList(ArrayRef<EVT> VTs)7593 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
7594 unsigned NumVTs = VTs.size();
7595 FoldingSetNodeID ID;
7596 ID.AddInteger(NumVTs);
7597 for (unsigned index = 0; index < NumVTs; index++) {
7598 ID.AddInteger(VTs[index].getRawBits());
7599 }
7600
7601 void *IP = nullptr;
7602 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7603 if (!Result) {
7604 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
7605 llvm::copy(VTs, Array);
7606 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
7607 VTListMap.InsertNode(Result, IP);
7608 }
7609 return Result->getSDVTList();
7610 }
7611
7612
7613 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
7614 /// specified operands. If the resultant node already exists in the DAG,
7615 /// this does not modify the specified node, instead it returns the node that
7616 /// already exists. If the resultant node does not exist in the DAG, the
7617 /// input node is returned. As a degenerate case, if you specify the same
7618 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)7619 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
7620 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
7621
7622 // Check to see if there is no change.
7623 if (Op == N->getOperand(0)) return N;
7624
7625 // See if the modified node already exists.
7626 void *InsertPos = nullptr;
7627 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
7628 return Existing;
7629
7630 // Nope it doesn't. Remove the node from its current place in the maps.
7631 if (InsertPos)
7632 if (!RemoveNodeFromCSEMaps(N))
7633 InsertPos = nullptr;
7634
7635 // Now we update the operands.
7636 N->OperandList[0].set(Op);
7637
7638 updateDivergence(N);
7639 // If this gets put into a CSE map, add it.
7640 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7641 return N;
7642 }
7643
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)7644 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
7645 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
7646
7647 // Check to see if there is no change.
7648 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
7649 return N; // No operands changed, just return the input node.
7650
7651 // See if the modified node already exists.
7652 void *InsertPos = nullptr;
7653 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
7654 return Existing;
7655
7656 // Nope it doesn't. Remove the node from its current place in the maps.
7657 if (InsertPos)
7658 if (!RemoveNodeFromCSEMaps(N))
7659 InsertPos = nullptr;
7660
7661 // Now we update the operands.
7662 if (N->OperandList[0] != Op1)
7663 N->OperandList[0].set(Op1);
7664 if (N->OperandList[1] != Op2)
7665 N->OperandList[1].set(Op2);
7666
7667 updateDivergence(N);
7668 // If this gets put into a CSE map, add it.
7669 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7670 return N;
7671 }
7672
7673 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)7674 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
7675 SDValue Ops[] = { Op1, Op2, Op3 };
7676 return UpdateNodeOperands(N, Ops);
7677 }
7678
7679 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)7680 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7681 SDValue Op3, SDValue Op4) {
7682 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
7683 return UpdateNodeOperands(N, Ops);
7684 }
7685
7686 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)7687 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7688 SDValue Op3, SDValue Op4, SDValue Op5) {
7689 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
7690 return UpdateNodeOperands(N, Ops);
7691 }
7692
7693 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)7694 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
7695 unsigned NumOps = Ops.size();
7696 assert(N->getNumOperands() == NumOps &&
7697 "Update with wrong number of operands");
7698
7699 // If no operands changed just return the input node.
7700 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
7701 return N;
7702
7703 // See if the modified node already exists.
7704 void *InsertPos = nullptr;
7705 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
7706 return Existing;
7707
7708 // Nope it doesn't. Remove the node from its current place in the maps.
7709 if (InsertPos)
7710 if (!RemoveNodeFromCSEMaps(N))
7711 InsertPos = nullptr;
7712
7713 // Now we update the operands.
7714 for (unsigned i = 0; i != NumOps; ++i)
7715 if (N->OperandList[i] != Ops[i])
7716 N->OperandList[i].set(Ops[i]);
7717
7718 updateDivergence(N);
7719 // If this gets put into a CSE map, add it.
7720 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7721 return N;
7722 }
7723
7724 /// DropOperands - Release the operands and set this node to have
7725 /// zero operands.
DropOperands()7726 void SDNode::DropOperands() {
7727 // Unlike the code in MorphNodeTo that does this, we don't need to
7728 // watch for dead nodes here.
7729 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
7730 SDUse &Use = *I++;
7731 Use.set(SDValue());
7732 }
7733 }
7734
setNodeMemRefs(MachineSDNode * N,ArrayRef<MachineMemOperand * > NewMemRefs)7735 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
7736 ArrayRef<MachineMemOperand *> NewMemRefs) {
7737 if (NewMemRefs.empty()) {
7738 N->clearMemRefs();
7739 return;
7740 }
7741
7742 // Check if we can avoid allocating by storing a single reference directly.
7743 if (NewMemRefs.size() == 1) {
7744 N->MemRefs = NewMemRefs[0];
7745 N->NumMemRefs = 1;
7746 return;
7747 }
7748
7749 MachineMemOperand **MemRefsBuffer =
7750 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
7751 llvm::copy(NewMemRefs, MemRefsBuffer);
7752 N->MemRefs = MemRefsBuffer;
7753 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
7754 }
7755
7756 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
7757 /// machine opcode.
7758 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)7759 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7760 EVT VT) {
7761 SDVTList VTs = getVTList(VT);
7762 return SelectNodeTo(N, MachineOpc, VTs, None);
7763 }
7764
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)7765 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7766 EVT VT, SDValue Op1) {
7767 SDVTList VTs = getVTList(VT);
7768 SDValue Ops[] = { Op1 };
7769 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7770 }
7771
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)7772 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7773 EVT VT, SDValue Op1,
7774 SDValue Op2) {
7775 SDVTList VTs = getVTList(VT);
7776 SDValue Ops[] = { Op1, Op2 };
7777 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7778 }
7779
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)7780 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7781 EVT VT, SDValue Op1,
7782 SDValue Op2, SDValue Op3) {
7783 SDVTList VTs = getVTList(VT);
7784 SDValue Ops[] = { Op1, Op2, Op3 };
7785 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7786 }
7787
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)7788 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7789 EVT VT, ArrayRef<SDValue> Ops) {
7790 SDVTList VTs = getVTList(VT);
7791 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7792 }
7793
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)7794 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7795 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
7796 SDVTList VTs = getVTList(VT1, VT2);
7797 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7798 }
7799
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)7800 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7801 EVT VT1, EVT VT2) {
7802 SDVTList VTs = getVTList(VT1, VT2);
7803 return SelectNodeTo(N, MachineOpc, VTs, None);
7804 }
7805
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)7806 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7807 EVT VT1, EVT VT2, EVT VT3,
7808 ArrayRef<SDValue> Ops) {
7809 SDVTList VTs = getVTList(VT1, VT2, VT3);
7810 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7811 }
7812
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)7813 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7814 EVT VT1, EVT VT2,
7815 SDValue Op1, SDValue Op2) {
7816 SDVTList VTs = getVTList(VT1, VT2);
7817 SDValue Ops[] = { Op1, Op2 };
7818 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7819 }
7820
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)7821 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7822 SDVTList VTs,ArrayRef<SDValue> Ops) {
7823 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
7824 // Reset the NodeID to -1.
7825 New->setNodeId(-1);
7826 if (New != N) {
7827 ReplaceAllUsesWith(N, New);
7828 RemoveDeadNode(N);
7829 }
7830 return New;
7831 }
7832
7833 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
7834 /// the line number information on the merged node since it is not possible to
7835 /// preserve the information that operation is associated with multiple lines.
7836 /// This will make the debugger working better at -O0, were there is a higher
7837 /// probability having other instructions associated with that line.
7838 ///
7839 /// For IROrder, we keep the smaller of the two
UpdateSDLocOnMergeSDNode(SDNode * N,const SDLoc & OLoc)7840 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
7841 DebugLoc NLoc = N->getDebugLoc();
7842 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
7843 N->setDebugLoc(DebugLoc());
7844 }
7845 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
7846 N->setIROrder(Order);
7847 return N;
7848 }
7849
7850 /// MorphNodeTo - This *mutates* the specified node to have the specified
7851 /// return type, opcode, and operands.
7852 ///
7853 /// Note that MorphNodeTo returns the resultant node. If there is already a
7854 /// node of the specified opcode and operands, it returns that node instead of
7855 /// the current one. Note that the SDLoc need not be the same.
7856 ///
7857 /// Using MorphNodeTo is faster than creating a new node and swapping it in
7858 /// with ReplaceAllUsesWith both because it often avoids allocating a new
7859 /// node, and because it doesn't require CSE recalculation for any of
7860 /// the node's users.
7861 ///
7862 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
7863 /// As a consequence it isn't appropriate to use from within the DAG combiner or
7864 /// the legalizer which maintain worklists that would need to be updated when
7865 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)7866 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
7867 SDVTList VTs, ArrayRef<SDValue> Ops) {
7868 // If an identical node already exists, use it.
7869 void *IP = nullptr;
7870 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
7871 FoldingSetNodeID ID;
7872 AddNodeIDNode(ID, Opc, VTs, Ops);
7873 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
7874 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
7875 }
7876
7877 if (!RemoveNodeFromCSEMaps(N))
7878 IP = nullptr;
7879
7880 // Start the morphing.
7881 N->NodeType = Opc;
7882 N->ValueList = VTs.VTs;
7883 N->NumValues = VTs.NumVTs;
7884
7885 // Clear the operands list, updating used nodes to remove this from their
7886 // use list. Keep track of any operands that become dead as a result.
7887 SmallPtrSet<SDNode*, 16> DeadNodeSet;
7888 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
7889 SDUse &Use = *I++;
7890 SDNode *Used = Use.getNode();
7891 Use.set(SDValue());
7892 if (Used->use_empty())
7893 DeadNodeSet.insert(Used);
7894 }
7895
7896 // For MachineNode, initialize the memory references information.
7897 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
7898 MN->clearMemRefs();
7899
7900 // Swap for an appropriately sized array from the recycler.
7901 removeOperands(N);
7902 createOperands(N, Ops);
7903
7904 // Delete any nodes that are still dead after adding the uses for the
7905 // new operands.
7906 if (!DeadNodeSet.empty()) {
7907 SmallVector<SDNode *, 16> DeadNodes;
7908 for (SDNode *N : DeadNodeSet)
7909 if (N->use_empty())
7910 DeadNodes.push_back(N);
7911 RemoveDeadNodes(DeadNodes);
7912 }
7913
7914 if (IP)
7915 CSEMap.InsertNode(N, IP); // Memoize the new node.
7916 return N;
7917 }
7918
mutateStrictFPToFP(SDNode * Node)7919 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
7920 unsigned OrigOpc = Node->getOpcode();
7921 unsigned NewOpc;
7922 switch (OrigOpc) {
7923 default:
7924 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
7925 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7926 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
7927 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7928 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
7929 #include "llvm/IR/ConstrainedOps.def"
7930 }
7931
7932 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
7933
7934 // We're taking this node out of the chain, so we need to re-link things.
7935 SDValue InputChain = Node->getOperand(0);
7936 SDValue OutputChain = SDValue(Node, 1);
7937 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
7938
7939 SmallVector<SDValue, 3> Ops;
7940 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
7941 Ops.push_back(Node->getOperand(i));
7942
7943 SDVTList VTs = getVTList(Node->getValueType(0));
7944 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
7945
7946 // MorphNodeTo can operate in two ways: if an existing node with the
7947 // specified operands exists, it can just return it. Otherwise, it
7948 // updates the node in place to have the requested operands.
7949 if (Res == Node) {
7950 // If we updated the node in place, reset the node ID. To the isel,
7951 // this should be just like a newly allocated machine node.
7952 Res->setNodeId(-1);
7953 } else {
7954 ReplaceAllUsesWith(Node, Res);
7955 RemoveDeadNode(Node);
7956 }
7957
7958 return Res;
7959 }
7960
7961 /// getMachineNode - These are used for target selectors to create a new node
7962 /// with specified return type(s), MachineInstr opcode, and operands.
7963 ///
7964 /// Note that getMachineNode returns the resultant node. If there is already a
7965 /// node of the specified opcode and operands, it returns that node instead of
7966 /// the current one.
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT)7967 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7968 EVT VT) {
7969 SDVTList VTs = getVTList(VT);
7970 return getMachineNode(Opcode, dl, VTs, None);
7971 }
7972
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1)7973 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7974 EVT VT, SDValue Op1) {
7975 SDVTList VTs = getVTList(VT);
7976 SDValue Ops[] = { Op1 };
7977 return getMachineNode(Opcode, dl, VTs, Ops);
7978 }
7979
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2)7980 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7981 EVT VT, SDValue Op1, SDValue Op2) {
7982 SDVTList VTs = getVTList(VT);
7983 SDValue Ops[] = { Op1, Op2 };
7984 return getMachineNode(Opcode, dl, VTs, Ops);
7985 }
7986
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)7987 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7988 EVT VT, SDValue Op1, SDValue Op2,
7989 SDValue Op3) {
7990 SDVTList VTs = getVTList(VT);
7991 SDValue Ops[] = { Op1, Op2, Op3 };
7992 return getMachineNode(Opcode, dl, VTs, Ops);
7993 }
7994
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,ArrayRef<SDValue> Ops)7995 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7996 EVT VT, ArrayRef<SDValue> Ops) {
7997 SDVTList VTs = getVTList(VT);
7998 return getMachineNode(Opcode, dl, VTs, Ops);
7999 }
8000
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)8001 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8002 EVT VT1, EVT VT2, SDValue Op1,
8003 SDValue Op2) {
8004 SDVTList VTs = getVTList(VT1, VT2);
8005 SDValue Ops[] = { Op1, Op2 };
8006 return getMachineNode(Opcode, dl, VTs, Ops);
8007 }
8008
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)8009 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8010 EVT VT1, EVT VT2, SDValue Op1,
8011 SDValue Op2, SDValue Op3) {
8012 SDVTList VTs = getVTList(VT1, VT2);
8013 SDValue Ops[] = { Op1, Op2, Op3 };
8014 return getMachineNode(Opcode, dl, VTs, Ops);
8015 }
8016
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)8017 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8018 EVT VT1, EVT VT2,
8019 ArrayRef<SDValue> Ops) {
8020 SDVTList VTs = getVTList(VT1, VT2);
8021 return getMachineNode(Opcode, dl, VTs, Ops);
8022 }
8023
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)8024 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8025 EVT VT1, EVT VT2, EVT VT3,
8026 SDValue Op1, SDValue Op2) {
8027 SDVTList VTs = getVTList(VT1, VT2, VT3);
8028 SDValue Ops[] = { Op1, Op2 };
8029 return getMachineNode(Opcode, dl, VTs, Ops);
8030 }
8031
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)8032 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8033 EVT VT1, EVT VT2, EVT VT3,
8034 SDValue Op1, SDValue Op2,
8035 SDValue Op3) {
8036 SDVTList VTs = getVTList(VT1, VT2, VT3);
8037 SDValue Ops[] = { Op1, Op2, Op3 };
8038 return getMachineNode(Opcode, dl, VTs, Ops);
8039 }
8040
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)8041 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8042 EVT VT1, EVT VT2, EVT VT3,
8043 ArrayRef<SDValue> Ops) {
8044 SDVTList VTs = getVTList(VT1, VT2, VT3);
8045 return getMachineNode(Opcode, dl, VTs, Ops);
8046 }
8047
getMachineNode(unsigned Opcode,const SDLoc & dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)8048 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8049 ArrayRef<EVT> ResultTys,
8050 ArrayRef<SDValue> Ops) {
8051 SDVTList VTs = getVTList(ResultTys);
8052 return getMachineNode(Opcode, dl, VTs, Ops);
8053 }
8054
getMachineNode(unsigned Opcode,const SDLoc & DL,SDVTList VTs,ArrayRef<SDValue> Ops)8055 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
8056 SDVTList VTs,
8057 ArrayRef<SDValue> Ops) {
8058 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
8059 MachineSDNode *N;
8060 void *IP = nullptr;
8061
8062 if (DoCSE) {
8063 FoldingSetNodeID ID;
8064 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
8065 IP = nullptr;
8066 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
8067 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
8068 }
8069 }
8070
8071 // Allocate a new MachineSDNode.
8072 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8073 createOperands(N, Ops);
8074
8075 if (DoCSE)
8076 CSEMap.InsertNode(N, IP);
8077
8078 InsertNode(N);
8079 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
8080 return N;
8081 }
8082
8083 /// getTargetExtractSubreg - A convenience function for creating
8084 /// TargetOpcode::EXTRACT_SUBREG nodes.
getTargetExtractSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand)8085 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8086 SDValue Operand) {
8087 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8088 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
8089 VT, Operand, SRIdxVal);
8090 return SDValue(Subreg, 0);
8091 }
8092
8093 /// getTargetInsertSubreg - A convenience function for creating
8094 /// TargetOpcode::INSERT_SUBREG nodes.
getTargetInsertSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand,SDValue Subreg)8095 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8096 SDValue Operand, SDValue Subreg) {
8097 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8098 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
8099 VT, Operand, Subreg, SRIdxVal);
8100 return SDValue(Result, 0);
8101 }
8102
8103 /// getNodeIfExists - Get the specified node if it's already available, or
8104 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)8105 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
8106 ArrayRef<SDValue> Ops,
8107 const SDNodeFlags Flags) {
8108 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
8109 FoldingSetNodeID ID;
8110 AddNodeIDNode(ID, Opcode, VTList, Ops);
8111 void *IP = nullptr;
8112 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
8113 E->intersectFlagsWith(Flags);
8114 return E;
8115 }
8116 }
8117 return nullptr;
8118 }
8119
8120 /// getDbgValue - Creates a SDDbgValue node.
8121 ///
8122 /// SDNode
getDbgValue(DIVariable * Var,DIExpression * Expr,SDNode * N,unsigned R,bool IsIndirect,const DebugLoc & DL,unsigned O)8123 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
8124 SDNode *N, unsigned R, bool IsIndirect,
8125 const DebugLoc &DL, unsigned O) {
8126 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8127 "Expected inlined-at fields to agree");
8128 return new (DbgInfo->getAlloc())
8129 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O);
8130 }
8131
8132 /// Constant
getConstantDbgValue(DIVariable * Var,DIExpression * Expr,const Value * C,const DebugLoc & DL,unsigned O)8133 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
8134 DIExpression *Expr,
8135 const Value *C,
8136 const DebugLoc &DL, unsigned O) {
8137 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8138 "Expected inlined-at fields to agree");
8139 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O);
8140 }
8141
8142 /// FrameIndex
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,bool IsIndirect,const DebugLoc & DL,unsigned O)8143 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
8144 DIExpression *Expr, unsigned FI,
8145 bool IsIndirect,
8146 const DebugLoc &DL,
8147 unsigned O) {
8148 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8149 "Expected inlined-at fields to agree");
8150 return new (DbgInfo->getAlloc())
8151 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX);
8152 }
8153
8154 /// VReg
getVRegDbgValue(DIVariable * Var,DIExpression * Expr,unsigned VReg,bool IsIndirect,const DebugLoc & DL,unsigned O)8155 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var,
8156 DIExpression *Expr,
8157 unsigned VReg, bool IsIndirect,
8158 const DebugLoc &DL, unsigned O) {
8159 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8160 "Expected inlined-at fields to agree");
8161 return new (DbgInfo->getAlloc())
8162 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG);
8163 }
8164
transferDbgValues(SDValue From,SDValue To,unsigned OffsetInBits,unsigned SizeInBits,bool InvalidateDbg)8165 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
8166 unsigned OffsetInBits, unsigned SizeInBits,
8167 bool InvalidateDbg) {
8168 SDNode *FromNode = From.getNode();
8169 SDNode *ToNode = To.getNode();
8170 assert(FromNode && ToNode && "Can't modify dbg values");
8171
8172 // PR35338
8173 // TODO: assert(From != To && "Redundant dbg value transfer");
8174 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
8175 if (From == To || FromNode == ToNode)
8176 return;
8177
8178 if (!FromNode->getHasDebugValue())
8179 return;
8180
8181 SmallVector<SDDbgValue *, 2> ClonedDVs;
8182 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
8183 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated())
8184 continue;
8185
8186 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
8187
8188 // Just transfer the dbg value attached to From.
8189 if (Dbg->getResNo() != From.getResNo())
8190 continue;
8191
8192 DIVariable *Var = Dbg->getVariable();
8193 auto *Expr = Dbg->getExpression();
8194 // If a fragment is requested, update the expression.
8195 if (SizeInBits) {
8196 // When splitting a larger (e.g., sign-extended) value whose
8197 // lower bits are described with an SDDbgValue, do not attempt
8198 // to transfer the SDDbgValue to the upper bits.
8199 if (auto FI = Expr->getFragmentInfo())
8200 if (OffsetInBits + SizeInBits > FI->SizeInBits)
8201 continue;
8202 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
8203 SizeInBits);
8204 if (!Fragment)
8205 continue;
8206 Expr = *Fragment;
8207 }
8208 // Clone the SDDbgValue and move it to To.
8209 SDDbgValue *Clone = getDbgValue(
8210 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(),
8211 std::max(ToNode->getIROrder(), Dbg->getOrder()));
8212 ClonedDVs.push_back(Clone);
8213
8214 if (InvalidateDbg) {
8215 // Invalidate value and indicate the SDDbgValue should not be emitted.
8216 Dbg->setIsInvalidated();
8217 Dbg->setIsEmitted();
8218 }
8219 }
8220
8221 for (SDDbgValue *Dbg : ClonedDVs)
8222 AddDbgValue(Dbg, ToNode, false);
8223 }
8224
salvageDebugInfo(SDNode & N)8225 void SelectionDAG::salvageDebugInfo(SDNode &N) {
8226 if (!N.getHasDebugValue())
8227 return;
8228
8229 SmallVector<SDDbgValue *, 2> ClonedDVs;
8230 for (auto DV : GetDbgValues(&N)) {
8231 if (DV->isInvalidated())
8232 continue;
8233 switch (N.getOpcode()) {
8234 default:
8235 break;
8236 case ISD::ADD:
8237 SDValue N0 = N.getOperand(0);
8238 SDValue N1 = N.getOperand(1);
8239 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
8240 isConstantIntBuildVectorOrConstantInt(N1)) {
8241 uint64_t Offset = N.getConstantOperandVal(1);
8242 // Rewrite an ADD constant node into a DIExpression. Since we are
8243 // performing arithmetic to compute the variable's *value* in the
8244 // DIExpression, we need to mark the expression with a
8245 // DW_OP_stack_value.
8246 auto *DIExpr = DV->getExpression();
8247 DIExpr =
8248 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset);
8249 SDDbgValue *Clone =
8250 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(),
8251 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
8252 ClonedDVs.push_back(Clone);
8253 DV->setIsInvalidated();
8254 DV->setIsEmitted();
8255 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
8256 N0.getNode()->dumprFull(this);
8257 dbgs() << " into " << *DIExpr << '\n');
8258 }
8259 }
8260 }
8261
8262 for (SDDbgValue *Dbg : ClonedDVs)
8263 AddDbgValue(Dbg, Dbg->getSDNode(), false);
8264 }
8265
8266 /// Creates a SDDbgLabel node.
getDbgLabel(DILabel * Label,const DebugLoc & DL,unsigned O)8267 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
8268 const DebugLoc &DL, unsigned O) {
8269 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
8270 "Expected inlined-at fields to agree");
8271 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
8272 }
8273
8274 namespace {
8275
8276 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
8277 /// pointed to by a use iterator is deleted, increment the use iterator
8278 /// so that it doesn't dangle.
8279 ///
8280 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
8281 SDNode::use_iterator &UI;
8282 SDNode::use_iterator &UE;
8283
NodeDeleted(SDNode * N,SDNode * E)8284 void NodeDeleted(SDNode *N, SDNode *E) override {
8285 // Increment the iterator as needed.
8286 while (UI != UE && N == *UI)
8287 ++UI;
8288 }
8289
8290 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)8291 RAUWUpdateListener(SelectionDAG &d,
8292 SDNode::use_iterator &ui,
8293 SDNode::use_iterator &ue)
8294 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
8295 };
8296
8297 } // end anonymous namespace
8298
8299 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8300 /// This can cause recursive merging of nodes in the DAG.
8301 ///
8302 /// This version assumes From has a single result value.
8303 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)8304 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
8305 SDNode *From = FromN.getNode();
8306 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
8307 "Cannot replace with this method!");
8308 assert(From != To.getNode() && "Cannot replace uses of with self");
8309
8310 // Preserve Debug Values
8311 transferDbgValues(FromN, To);
8312
8313 // Iterate over all the existing uses of From. New uses will be added
8314 // to the beginning of the use list, which we avoid visiting.
8315 // This specifically avoids visiting uses of From that arise while the
8316 // replacement is happening, because any such uses would be the result
8317 // of CSE: If an existing node looks like From after one of its operands
8318 // is replaced by To, we don't want to replace of all its users with To
8319 // too. See PR3018 for more info.
8320 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8321 RAUWUpdateListener Listener(*this, UI, UE);
8322 while (UI != UE) {
8323 SDNode *User = *UI;
8324
8325 // This node is about to morph, remove its old self from the CSE maps.
8326 RemoveNodeFromCSEMaps(User);
8327
8328 // A user can appear in a use list multiple times, and when this
8329 // happens the uses are usually next to each other in the list.
8330 // To help reduce the number of CSE recomputations, process all
8331 // the uses of this user that we can find this way.
8332 do {
8333 SDUse &Use = UI.getUse();
8334 ++UI;
8335 Use.set(To);
8336 if (To->isDivergent() != From->isDivergent())
8337 updateDivergence(User);
8338 } while (UI != UE && *UI == User);
8339 // Now that we have modified User, add it back to the CSE maps. If it
8340 // already exists there, recursively merge the results together.
8341 AddModifiedNodeToCSEMaps(User);
8342 }
8343
8344 // If we just RAUW'd the root, take note.
8345 if (FromN == getRoot())
8346 setRoot(To);
8347 }
8348
8349 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8350 /// This can cause recursive merging of nodes in the DAG.
8351 ///
8352 /// This version assumes that for each value of From, there is a
8353 /// corresponding value in To in the same position with the same type.
8354 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)8355 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
8356 #ifndef NDEBUG
8357 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8358 assert((!From->hasAnyUseOfValue(i) ||
8359 From->getValueType(i) == To->getValueType(i)) &&
8360 "Cannot use this version of ReplaceAllUsesWith!");
8361 #endif
8362
8363 // Handle the trivial case.
8364 if (From == To)
8365 return;
8366
8367 // Preserve Debug Info. Only do this if there's a use.
8368 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8369 if (From->hasAnyUseOfValue(i)) {
8370 assert((i < To->getNumValues()) && "Invalid To location");
8371 transferDbgValues(SDValue(From, i), SDValue(To, i));
8372 }
8373
8374 // Iterate over just the existing users of From. See the comments in
8375 // the ReplaceAllUsesWith above.
8376 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8377 RAUWUpdateListener Listener(*this, UI, UE);
8378 while (UI != UE) {
8379 SDNode *User = *UI;
8380
8381 // This node is about to morph, remove its old self from the CSE maps.
8382 RemoveNodeFromCSEMaps(User);
8383
8384 // A user can appear in a use list multiple times, and when this
8385 // happens the uses are usually next to each other in the list.
8386 // To help reduce the number of CSE recomputations, process all
8387 // the uses of this user that we can find this way.
8388 do {
8389 SDUse &Use = UI.getUse();
8390 ++UI;
8391 Use.setNode(To);
8392 if (To->isDivergent() != From->isDivergent())
8393 updateDivergence(User);
8394 } while (UI != UE && *UI == User);
8395
8396 // Now that we have modified User, add it back to the CSE maps. If it
8397 // already exists there, recursively merge the results together.
8398 AddModifiedNodeToCSEMaps(User);
8399 }
8400
8401 // If we just RAUW'd the root, take note.
8402 if (From == getRoot().getNode())
8403 setRoot(SDValue(To, getRoot().getResNo()));
8404 }
8405
8406 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8407 /// This can cause recursive merging of nodes in the DAG.
8408 ///
8409 /// This version can replace From with any result values. To must match the
8410 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)8411 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
8412 if (From->getNumValues() == 1) // Handle the simple case efficiently.
8413 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
8414
8415 // Preserve Debug Info.
8416 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8417 transferDbgValues(SDValue(From, i), To[i]);
8418
8419 // Iterate over just the existing users of From. See the comments in
8420 // the ReplaceAllUsesWith above.
8421 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8422 RAUWUpdateListener Listener(*this, UI, UE);
8423 while (UI != UE) {
8424 SDNode *User = *UI;
8425
8426 // This node is about to morph, remove its old self from the CSE maps.
8427 RemoveNodeFromCSEMaps(User);
8428
8429 // A user can appear in a use list multiple times, and when this happens the
8430 // uses are usually next to each other in the list. To help reduce the
8431 // number of CSE and divergence recomputations, process all the uses of this
8432 // user that we can find this way.
8433 bool To_IsDivergent = false;
8434 do {
8435 SDUse &Use = UI.getUse();
8436 const SDValue &ToOp = To[Use.getResNo()];
8437 ++UI;
8438 Use.set(ToOp);
8439 To_IsDivergent |= ToOp->isDivergent();
8440 } while (UI != UE && *UI == User);
8441
8442 if (To_IsDivergent != From->isDivergent())
8443 updateDivergence(User);
8444
8445 // Now that we have modified User, add it back to the CSE maps. If it
8446 // already exists there, recursively merge the results together.
8447 AddModifiedNodeToCSEMaps(User);
8448 }
8449
8450 // If we just RAUW'd the root, take note.
8451 if (From == getRoot().getNode())
8452 setRoot(SDValue(To[getRoot().getResNo()]));
8453 }
8454
8455 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
8456 /// uses of other values produced by From.getNode() alone. The Deleted
8457 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)8458 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
8459 // Handle the really simple, really trivial case efficiently.
8460 if (From == To) return;
8461
8462 // Handle the simple, trivial, case efficiently.
8463 if (From.getNode()->getNumValues() == 1) {
8464 ReplaceAllUsesWith(From, To);
8465 return;
8466 }
8467
8468 // Preserve Debug Info.
8469 transferDbgValues(From, To);
8470
8471 // Iterate over just the existing users of From. See the comments in
8472 // the ReplaceAllUsesWith above.
8473 SDNode::use_iterator UI = From.getNode()->use_begin(),
8474 UE = From.getNode()->use_end();
8475 RAUWUpdateListener Listener(*this, UI, UE);
8476 while (UI != UE) {
8477 SDNode *User = *UI;
8478 bool UserRemovedFromCSEMaps = false;
8479
8480 // A user can appear in a use list multiple times, and when this
8481 // happens the uses are usually next to each other in the list.
8482 // To help reduce the number of CSE recomputations, process all
8483 // the uses of this user that we can find this way.
8484 do {
8485 SDUse &Use = UI.getUse();
8486
8487 // Skip uses of different values from the same node.
8488 if (Use.getResNo() != From.getResNo()) {
8489 ++UI;
8490 continue;
8491 }
8492
8493 // If this node hasn't been modified yet, it's still in the CSE maps,
8494 // so remove its old self from the CSE maps.
8495 if (!UserRemovedFromCSEMaps) {
8496 RemoveNodeFromCSEMaps(User);
8497 UserRemovedFromCSEMaps = true;
8498 }
8499
8500 ++UI;
8501 Use.set(To);
8502 if (To->isDivergent() != From->isDivergent())
8503 updateDivergence(User);
8504 } while (UI != UE && *UI == User);
8505 // We are iterating over all uses of the From node, so if a use
8506 // doesn't use the specific value, no changes are made.
8507 if (!UserRemovedFromCSEMaps)
8508 continue;
8509
8510 // Now that we have modified User, add it back to the CSE maps. If it
8511 // already exists there, recursively merge the results together.
8512 AddModifiedNodeToCSEMaps(User);
8513 }
8514
8515 // If we just RAUW'd the root, take note.
8516 if (From == getRoot())
8517 setRoot(To);
8518 }
8519
8520 namespace {
8521
8522 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
8523 /// to record information about a use.
8524 struct UseMemo {
8525 SDNode *User;
8526 unsigned Index;
8527 SDUse *Use;
8528 };
8529
8530 /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)8531 bool operator<(const UseMemo &L, const UseMemo &R) {
8532 return (intptr_t)L.User < (intptr_t)R.User;
8533 }
8534
8535 } // end anonymous namespace
8536
updateDivergence(SDNode * N)8537 void SelectionDAG::updateDivergence(SDNode * N)
8538 {
8539 if (TLI->isSDNodeAlwaysUniform(N))
8540 return;
8541 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
8542 for (auto &Op : N->ops()) {
8543 if (Op.Val.getValueType() != MVT::Other)
8544 IsDivergent |= Op.getNode()->isDivergent();
8545 }
8546 if (N->SDNodeBits.IsDivergent != IsDivergent) {
8547 N->SDNodeBits.IsDivergent = IsDivergent;
8548 for (auto U : N->uses()) {
8549 updateDivergence(U);
8550 }
8551 }
8552 }
8553
CreateTopologicalOrder(std::vector<SDNode * > & Order)8554 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
8555 DenseMap<SDNode *, unsigned> Degree;
8556 Order.reserve(AllNodes.size());
8557 for (auto &N : allnodes()) {
8558 unsigned NOps = N.getNumOperands();
8559 Degree[&N] = NOps;
8560 if (0 == NOps)
8561 Order.push_back(&N);
8562 }
8563 for (size_t I = 0; I != Order.size(); ++I) {
8564 SDNode *N = Order[I];
8565 for (auto U : N->uses()) {
8566 unsigned &UnsortedOps = Degree[U];
8567 if (0 == --UnsortedOps)
8568 Order.push_back(U);
8569 }
8570 }
8571 }
8572
8573 #ifndef NDEBUG
VerifyDAGDiverence()8574 void SelectionDAG::VerifyDAGDiverence() {
8575 std::vector<SDNode *> TopoOrder;
8576 CreateTopologicalOrder(TopoOrder);
8577 const TargetLowering &TLI = getTargetLoweringInfo();
8578 DenseMap<const SDNode *, bool> DivergenceMap;
8579 for (auto &N : allnodes()) {
8580 DivergenceMap[&N] = false;
8581 }
8582 for (auto N : TopoOrder) {
8583 bool IsDivergent = DivergenceMap[N];
8584 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA);
8585 for (auto &Op : N->ops()) {
8586 if (Op.Val.getValueType() != MVT::Other)
8587 IsSDNodeDivergent |= DivergenceMap[Op.getNode()];
8588 }
8589 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) {
8590 DivergenceMap[N] = true;
8591 }
8592 }
8593 for (auto &N : allnodes()) {
8594 (void)N;
8595 assert(DivergenceMap[&N] == N.isDivergent() &&
8596 "Divergence bit inconsistency detected\n");
8597 }
8598 }
8599 #endif
8600
8601 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
8602 /// uses of other values produced by From.getNode() alone. The same value
8603 /// may appear in both the From and To list. The Deleted vector is
8604 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)8605 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
8606 const SDValue *To,
8607 unsigned Num){
8608 // Handle the simple, trivial case efficiently.
8609 if (Num == 1)
8610 return ReplaceAllUsesOfValueWith(*From, *To);
8611
8612 transferDbgValues(*From, *To);
8613
8614 // Read up all the uses and make records of them. This helps
8615 // processing new uses that are introduced during the
8616 // replacement process.
8617 SmallVector<UseMemo, 4> Uses;
8618 for (unsigned i = 0; i != Num; ++i) {
8619 unsigned FromResNo = From[i].getResNo();
8620 SDNode *FromNode = From[i].getNode();
8621 for (SDNode::use_iterator UI = FromNode->use_begin(),
8622 E = FromNode->use_end(); UI != E; ++UI) {
8623 SDUse &Use = UI.getUse();
8624 if (Use.getResNo() == FromResNo) {
8625 UseMemo Memo = { *UI, i, &Use };
8626 Uses.push_back(Memo);
8627 }
8628 }
8629 }
8630
8631 // Sort the uses, so that all the uses from a given User are together.
8632 llvm::sort(Uses);
8633
8634 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
8635 UseIndex != UseIndexEnd; ) {
8636 // We know that this user uses some value of From. If it is the right
8637 // value, update it.
8638 SDNode *User = Uses[UseIndex].User;
8639
8640 // This node is about to morph, remove its old self from the CSE maps.
8641 RemoveNodeFromCSEMaps(User);
8642
8643 // The Uses array is sorted, so all the uses for a given User
8644 // are next to each other in the list.
8645 // To help reduce the number of CSE recomputations, process all
8646 // the uses of this user that we can find this way.
8647 do {
8648 unsigned i = Uses[UseIndex].Index;
8649 SDUse &Use = *Uses[UseIndex].Use;
8650 ++UseIndex;
8651
8652 Use.set(To[i]);
8653 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
8654
8655 // Now that we have modified User, add it back to the CSE maps. If it
8656 // already exists there, recursively merge the results together.
8657 AddModifiedNodeToCSEMaps(User);
8658 }
8659 }
8660
8661 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
8662 /// based on their topological order. It returns the maximum id and a vector
8663 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()8664 unsigned SelectionDAG::AssignTopologicalOrder() {
8665 unsigned DAGSize = 0;
8666
8667 // SortedPos tracks the progress of the algorithm. Nodes before it are
8668 // sorted, nodes after it are unsorted. When the algorithm completes
8669 // it is at the end of the list.
8670 allnodes_iterator SortedPos = allnodes_begin();
8671
8672 // Visit all the nodes. Move nodes with no operands to the front of
8673 // the list immediately. Annotate nodes that do have operands with their
8674 // operand count. Before we do this, the Node Id fields of the nodes
8675 // may contain arbitrary values. After, the Node Id fields for nodes
8676 // before SortedPos will contain the topological sort index, and the
8677 // Node Id fields for nodes At SortedPos and after will contain the
8678 // count of outstanding operands.
8679 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
8680 SDNode *N = &*I++;
8681 checkForCycles(N, this);
8682 unsigned Degree = N->getNumOperands();
8683 if (Degree == 0) {
8684 // A node with no uses, add it to the result array immediately.
8685 N->setNodeId(DAGSize++);
8686 allnodes_iterator Q(N);
8687 if (Q != SortedPos)
8688 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
8689 assert(SortedPos != AllNodes.end() && "Overran node list");
8690 ++SortedPos;
8691 } else {
8692 // Temporarily use the Node Id as scratch space for the degree count.
8693 N->setNodeId(Degree);
8694 }
8695 }
8696
8697 // Visit all the nodes. As we iterate, move nodes into sorted order,
8698 // such that by the time the end is reached all nodes will be sorted.
8699 for (SDNode &Node : allnodes()) {
8700 SDNode *N = &Node;
8701 checkForCycles(N, this);
8702 // N is in sorted position, so all its uses have one less operand
8703 // that needs to be sorted.
8704 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
8705 UI != UE; ++UI) {
8706 SDNode *P = *UI;
8707 unsigned Degree = P->getNodeId();
8708 assert(Degree != 0 && "Invalid node degree");
8709 --Degree;
8710 if (Degree == 0) {
8711 // All of P's operands are sorted, so P may sorted now.
8712 P->setNodeId(DAGSize++);
8713 if (P->getIterator() != SortedPos)
8714 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
8715 assert(SortedPos != AllNodes.end() && "Overran node list");
8716 ++SortedPos;
8717 } else {
8718 // Update P's outstanding operand count.
8719 P->setNodeId(Degree);
8720 }
8721 }
8722 if (Node.getIterator() == SortedPos) {
8723 #ifndef NDEBUG
8724 allnodes_iterator I(N);
8725 SDNode *S = &*++I;
8726 dbgs() << "Overran sorted position:\n";
8727 S->dumprFull(this); dbgs() << "\n";
8728 dbgs() << "Checking if this is due to cycles\n";
8729 checkForCycles(this, true);
8730 #endif
8731 llvm_unreachable(nullptr);
8732 }
8733 }
8734
8735 assert(SortedPos == AllNodes.end() &&
8736 "Topological sort incomplete!");
8737 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
8738 "First node in topological sort is not the entry token!");
8739 assert(AllNodes.front().getNodeId() == 0 &&
8740 "First node in topological sort has non-zero id!");
8741 assert(AllNodes.front().getNumOperands() == 0 &&
8742 "First node in topological sort has operands!");
8743 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
8744 "Last node in topologic sort has unexpected id!");
8745 assert(AllNodes.back().use_empty() &&
8746 "Last node in topologic sort has users!");
8747 assert(DAGSize == allnodes_size() && "Node count mismatch!");
8748 return DAGSize;
8749 }
8750
8751 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
8752 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,SDNode * SD,bool isParameter)8753 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
8754 if (SD) {
8755 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
8756 SD->setHasDebugValue(true);
8757 }
8758 DbgInfo->add(DB, SD, isParameter);
8759 }
8760
AddDbgLabel(SDDbgLabel * DB)8761 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) {
8762 DbgInfo->add(DB);
8763 }
8764
makeEquivalentMemoryOrdering(LoadSDNode * OldLoad,SDValue NewMemOp)8765 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
8766 SDValue NewMemOp) {
8767 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
8768 // The new memory operation must have the same position as the old load in
8769 // terms of memory dependency. Create a TokenFactor for the old load and new
8770 // memory operation and update uses of the old load's output chain to use that
8771 // TokenFactor.
8772 SDValue OldChain = SDValue(OldLoad, 1);
8773 SDValue NewChain = SDValue(NewMemOp.getNode(), 1);
8774 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1))
8775 return NewChain;
8776
8777 SDValue TokenFactor =
8778 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain);
8779 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
8780 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain);
8781 return TokenFactor;
8782 }
8783
getSymbolFunctionGlobalAddress(SDValue Op,Function ** OutFunction)8784 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
8785 Function **OutFunction) {
8786 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
8787
8788 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
8789 auto *Module = MF->getFunction().getParent();
8790 auto *Function = Module->getFunction(Symbol);
8791
8792 if (OutFunction != nullptr)
8793 *OutFunction = Function;
8794
8795 if (Function != nullptr) {
8796 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
8797 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
8798 }
8799
8800 std::string ErrorStr;
8801 raw_string_ostream ErrorFormatter(ErrorStr);
8802
8803 ErrorFormatter << "Undefined external symbol ";
8804 ErrorFormatter << '"' << Symbol << '"';
8805 ErrorFormatter.flush();
8806
8807 report_fatal_error(ErrorStr);
8808 }
8809
8810 //===----------------------------------------------------------------------===//
8811 // SDNode Class
8812 //===----------------------------------------------------------------------===//
8813
isNullConstant(SDValue V)8814 bool llvm::isNullConstant(SDValue V) {
8815 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8816 return Const != nullptr && Const->isNullValue();
8817 }
8818
isNullFPConstant(SDValue V)8819 bool llvm::isNullFPConstant(SDValue V) {
8820 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
8821 return Const != nullptr && Const->isZero() && !Const->isNegative();
8822 }
8823
isAllOnesConstant(SDValue V)8824 bool llvm::isAllOnesConstant(SDValue V) {
8825 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8826 return Const != nullptr && Const->isAllOnesValue();
8827 }
8828
isOneConstant(SDValue V)8829 bool llvm::isOneConstant(SDValue V) {
8830 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8831 return Const != nullptr && Const->isOne();
8832 }
8833
peekThroughBitcasts(SDValue V)8834 SDValue llvm::peekThroughBitcasts(SDValue V) {
8835 while (V.getOpcode() == ISD::BITCAST)
8836 V = V.getOperand(0);
8837 return V;
8838 }
8839
peekThroughOneUseBitcasts(SDValue V)8840 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
8841 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
8842 V = V.getOperand(0);
8843 return V;
8844 }
8845
peekThroughExtractSubvectors(SDValue V)8846 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
8847 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
8848 V = V.getOperand(0);
8849 return V;
8850 }
8851
isBitwiseNot(SDValue V,bool AllowUndefs)8852 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
8853 if (V.getOpcode() != ISD::XOR)
8854 return false;
8855 V = peekThroughBitcasts(V.getOperand(1));
8856 unsigned NumBits = V.getScalarValueSizeInBits();
8857 ConstantSDNode *C =
8858 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
8859 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits);
8860 }
8861
isConstOrConstSplat(SDValue N,bool AllowUndefs,bool AllowTruncation)8862 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
8863 bool AllowTruncation) {
8864 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
8865 return CN;
8866
8867 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8868 BitVector UndefElements;
8869 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
8870
8871 // BuildVectors can truncate their operands. Ignore that case here unless
8872 // AllowTruncation is set.
8873 if (CN && (UndefElements.none() || AllowUndefs)) {
8874 EVT CVT = CN->getValueType(0);
8875 EVT NSVT = N.getValueType().getScalarType();
8876 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
8877 if (AllowTruncation || (CVT == NSVT))
8878 return CN;
8879 }
8880 }
8881
8882 return nullptr;
8883 }
8884
isConstOrConstSplat(SDValue N,const APInt & DemandedElts,bool AllowUndefs,bool AllowTruncation)8885 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
8886 bool AllowUndefs,
8887 bool AllowTruncation) {
8888 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
8889 return CN;
8890
8891 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8892 BitVector UndefElements;
8893 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
8894
8895 // BuildVectors can truncate their operands. Ignore that case here unless
8896 // AllowTruncation is set.
8897 if (CN && (UndefElements.none() || AllowUndefs)) {
8898 EVT CVT = CN->getValueType(0);
8899 EVT NSVT = N.getValueType().getScalarType();
8900 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
8901 if (AllowTruncation || (CVT == NSVT))
8902 return CN;
8903 }
8904 }
8905
8906 return nullptr;
8907 }
8908
isConstOrConstSplatFP(SDValue N,bool AllowUndefs)8909 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
8910 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
8911 return CN;
8912
8913 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8914 BitVector UndefElements;
8915 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
8916 if (CN && (UndefElements.none() || AllowUndefs))
8917 return CN;
8918 }
8919
8920 return nullptr;
8921 }
8922
isConstOrConstSplatFP(SDValue N,const APInt & DemandedElts,bool AllowUndefs)8923 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
8924 const APInt &DemandedElts,
8925 bool AllowUndefs) {
8926 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
8927 return CN;
8928
8929 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8930 BitVector UndefElements;
8931 ConstantFPSDNode *CN =
8932 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
8933 if (CN && (UndefElements.none() || AllowUndefs))
8934 return CN;
8935 }
8936
8937 return nullptr;
8938 }
8939
isNullOrNullSplat(SDValue N,bool AllowUndefs)8940 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
8941 // TODO: may want to use peekThroughBitcast() here.
8942 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
8943 return C && C->isNullValue();
8944 }
8945
isOneOrOneSplat(SDValue N)8946 bool llvm::isOneOrOneSplat(SDValue N) {
8947 // TODO: may want to use peekThroughBitcast() here.
8948 unsigned BitWidth = N.getScalarValueSizeInBits();
8949 ConstantSDNode *C = isConstOrConstSplat(N);
8950 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
8951 }
8952
isAllOnesOrAllOnesSplat(SDValue N)8953 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) {
8954 N = peekThroughBitcasts(N);
8955 unsigned BitWidth = N.getScalarValueSizeInBits();
8956 ConstantSDNode *C = isConstOrConstSplat(N);
8957 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth;
8958 }
8959
~HandleSDNode()8960 HandleSDNode::~HandleSDNode() {
8961 DropOperands();
8962 }
8963
GlobalAddressSDNode(unsigned Opc,unsigned Order,const DebugLoc & DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned TF)8964 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
8965 const DebugLoc &DL,
8966 const GlobalValue *GA, EVT VT,
8967 int64_t o, unsigned TF)
8968 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
8969 TheGlobal = GA;
8970 }
8971
AddrSpaceCastSDNode(unsigned Order,const DebugLoc & dl,EVT VT,unsigned SrcAS,unsigned DestAS)8972 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
8973 EVT VT, unsigned SrcAS,
8974 unsigned DestAS)
8975 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
8976 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
8977
MemSDNode(unsigned Opc,unsigned Order,const DebugLoc & dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)8978 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
8979 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
8980 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
8981 MemSDNodeBits.IsVolatile = MMO->isVolatile();
8982 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
8983 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
8984 MemSDNodeBits.IsInvariant = MMO->isInvariant();
8985
8986 // We check here that the size of the memory operand fits within the size of
8987 // the MMO. This is because the MMO might indicate only a possible address
8988 // range instead of specifying the affected memory addresses precisely.
8989 // TODO: Make MachineMemOperands aware of scalable vectors.
8990 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() &&
8991 "Size mismatch!");
8992 }
8993
8994 /// Profile - Gather unique data for the node.
8995 ///
Profile(FoldingSetNodeID & ID) const8996 void SDNode::Profile(FoldingSetNodeID &ID) const {
8997 AddNodeIDNode(ID, this);
8998 }
8999
9000 namespace {
9001
9002 struct EVTArray {
9003 std::vector<EVT> VTs;
9004
EVTArray__anona0610ed11011::EVTArray9005 EVTArray() {
9006 VTs.reserve(MVT::LAST_VALUETYPE);
9007 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
9008 VTs.push_back(MVT((MVT::SimpleValueType)i));
9009 }
9010 };
9011
9012 } // end anonymous namespace
9013
9014 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
9015 static ManagedStatic<EVTArray> SimpleVTArray;
9016 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
9017
9018 /// getValueTypeList - Return a pointer to the specified value type.
9019 ///
getValueTypeList(EVT VT)9020 const EVT *SDNode::getValueTypeList(EVT VT) {
9021 if (VT.isExtended()) {
9022 sys::SmartScopedLock<true> Lock(*VTMutex);
9023 return &(*EVTs->insert(VT).first);
9024 } else {
9025 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
9026 "Value type out of range!");
9027 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
9028 }
9029 }
9030
9031 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
9032 /// indicated value. This method ignores uses of other values defined by this
9033 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const9034 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
9035 assert(Value < getNumValues() && "Bad value!");
9036
9037 // TODO: Only iterate over uses of a given value of the node
9038 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
9039 if (UI.getUse().getResNo() == Value) {
9040 if (NUses == 0)
9041 return false;
9042 --NUses;
9043 }
9044 }
9045
9046 // Found exactly the right number of uses?
9047 return NUses == 0;
9048 }
9049
9050 /// hasAnyUseOfValue - Return true if there are any use of the indicated
9051 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const9052 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
9053 assert(Value < getNumValues() && "Bad value!");
9054
9055 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
9056 if (UI.getUse().getResNo() == Value)
9057 return true;
9058
9059 return false;
9060 }
9061
9062 /// isOnlyUserOf - Return true if this node is the only use of N.
isOnlyUserOf(const SDNode * N) const9063 bool SDNode::isOnlyUserOf(const SDNode *N) const {
9064 bool Seen = false;
9065 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9066 SDNode *User = *I;
9067 if (User == this)
9068 Seen = true;
9069 else
9070 return false;
9071 }
9072
9073 return Seen;
9074 }
9075
9076 /// Return true if the only users of N are contained in Nodes.
areOnlyUsersOf(ArrayRef<const SDNode * > Nodes,const SDNode * N)9077 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
9078 bool Seen = false;
9079 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9080 SDNode *User = *I;
9081 if (llvm::any_of(Nodes,
9082 [&User](const SDNode *Node) { return User == Node; }))
9083 Seen = true;
9084 else
9085 return false;
9086 }
9087
9088 return Seen;
9089 }
9090
9091 /// isOperand - Return true if this node is an operand of N.
isOperandOf(const SDNode * N) const9092 bool SDValue::isOperandOf(const SDNode *N) const {
9093 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; });
9094 }
9095
isOperandOf(const SDNode * N) const9096 bool SDNode::isOperandOf(const SDNode *N) const {
9097 return any_of(N->op_values(),
9098 [this](SDValue Op) { return this == Op.getNode(); });
9099 }
9100
9101 /// reachesChainWithoutSideEffects - Return true if this operand (which must
9102 /// be a chain) reaches the specified operand without crossing any
9103 /// side-effecting instructions on any chain path. In practice, this looks
9104 /// through token factors and non-volatile loads. In order to remain efficient,
9105 /// this only looks a couple of nodes in, it does not do an exhaustive search.
9106 ///
9107 /// Note that we only need to examine chains when we're searching for
9108 /// side-effects; SelectionDAG requires that all side-effects are represented
9109 /// by chains, even if another operand would force a specific ordering. This
9110 /// constraint is necessary to allow transformations like splitting loads.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const9111 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
9112 unsigned Depth) const {
9113 if (*this == Dest) return true;
9114
9115 // Don't search too deeply, we just want to be able to see through
9116 // TokenFactor's etc.
9117 if (Depth == 0) return false;
9118
9119 // If this is a token factor, all inputs to the TF happen in parallel.
9120 if (getOpcode() == ISD::TokenFactor) {
9121 // First, try a shallow search.
9122 if (is_contained((*this)->ops(), Dest)) {
9123 // We found the chain we want as an operand of this TokenFactor.
9124 // Essentially, we reach the chain without side-effects if we could
9125 // serialize the TokenFactor into a simple chain of operations with
9126 // Dest as the last operation. This is automatically true if the
9127 // chain has one use: there are no other ordering constraints.
9128 // If the chain has more than one use, we give up: some other
9129 // use of Dest might force a side-effect between Dest and the current
9130 // node.
9131 if (Dest.hasOneUse())
9132 return true;
9133 }
9134 // Next, try a deep search: check whether every operand of the TokenFactor
9135 // reaches Dest.
9136 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
9137 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
9138 });
9139 }
9140
9141 // Loads don't have side effects, look through them.
9142 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
9143 if (Ld->isUnordered())
9144 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
9145 }
9146 return false;
9147 }
9148
hasPredecessor(const SDNode * N) const9149 bool SDNode::hasPredecessor(const SDNode *N) const {
9150 SmallPtrSet<const SDNode *, 32> Visited;
9151 SmallVector<const SDNode *, 16> Worklist;
9152 Worklist.push_back(this);
9153 return hasPredecessorHelper(N, Visited, Worklist);
9154 }
9155
intersectFlagsWith(const SDNodeFlags Flags)9156 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
9157 this->Flags.intersectWith(Flags);
9158 }
9159
9160 SDValue
matchBinOpReduction(SDNode * Extract,ISD::NodeType & BinOp,ArrayRef<ISD::NodeType> CandidateBinOps,bool AllowPartials)9161 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
9162 ArrayRef<ISD::NodeType> CandidateBinOps,
9163 bool AllowPartials) {
9164 // The pattern must end in an extract from index 0.
9165 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9166 !isNullConstant(Extract->getOperand(1)))
9167 return SDValue();
9168
9169 // Match against one of the candidate binary ops.
9170 SDValue Op = Extract->getOperand(0);
9171 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
9172 return Op.getOpcode() == unsigned(BinOp);
9173 }))
9174 return SDValue();
9175
9176 // Floating-point reductions may require relaxed constraints on the final step
9177 // of the reduction because they may reorder intermediate operations.
9178 unsigned CandidateBinOp = Op.getOpcode();
9179 if (Op.getValueType().isFloatingPoint()) {
9180 SDNodeFlags Flags = Op->getFlags();
9181 switch (CandidateBinOp) {
9182 case ISD::FADD:
9183 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
9184 return SDValue();
9185 break;
9186 default:
9187 llvm_unreachable("Unhandled FP opcode for binop reduction");
9188 }
9189 }
9190
9191 // Matching failed - attempt to see if we did enough stages that a partial
9192 // reduction from a subvector is possible.
9193 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
9194 if (!AllowPartials || !Op)
9195 return SDValue();
9196 EVT OpVT = Op.getValueType();
9197 EVT OpSVT = OpVT.getScalarType();
9198 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
9199 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
9200 return SDValue();
9201 BinOp = (ISD::NodeType)CandidateBinOp;
9202 return getNode(
9203 ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
9204 getConstant(0, SDLoc(Op), TLI->getVectorIdxTy(getDataLayout())));
9205 };
9206
9207 // At each stage, we're looking for something that looks like:
9208 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
9209 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
9210 // i32 undef, i32 undef, i32 undef, i32 undef>
9211 // %a = binop <8 x i32> %op, %s
9212 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
9213 // we expect something like:
9214 // <4,5,6,7,u,u,u,u>
9215 // <2,3,u,u,u,u,u,u>
9216 // <1,u,u,u,u,u,u,u>
9217 // While a partial reduction match would be:
9218 // <2,3,u,u,u,u,u,u>
9219 // <1,u,u,u,u,u,u,u>
9220 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
9221 SDValue PrevOp;
9222 for (unsigned i = 0; i < Stages; ++i) {
9223 unsigned MaskEnd = (1 << i);
9224
9225 if (Op.getOpcode() != CandidateBinOp)
9226 return PartialReduction(PrevOp, MaskEnd);
9227
9228 SDValue Op0 = Op.getOperand(0);
9229 SDValue Op1 = Op.getOperand(1);
9230
9231 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
9232 if (Shuffle) {
9233 Op = Op1;
9234 } else {
9235 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
9236 Op = Op0;
9237 }
9238
9239 // The first operand of the shuffle should be the same as the other operand
9240 // of the binop.
9241 if (!Shuffle || Shuffle->getOperand(0) != Op)
9242 return PartialReduction(PrevOp, MaskEnd);
9243
9244 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
9245 for (int Index = 0; Index < (int)MaskEnd; ++Index)
9246 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
9247 return PartialReduction(PrevOp, MaskEnd);
9248
9249 PrevOp = Op;
9250 }
9251
9252 BinOp = (ISD::NodeType)CandidateBinOp;
9253 return Op;
9254 }
9255
UnrollVectorOp(SDNode * N,unsigned ResNE)9256 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
9257 assert(N->getNumValues() == 1 &&
9258 "Can't unroll a vector with multiple results!");
9259
9260 EVT VT = N->getValueType(0);
9261 unsigned NE = VT.getVectorNumElements();
9262 EVT EltVT = VT.getVectorElementType();
9263 SDLoc dl(N);
9264
9265 SmallVector<SDValue, 8> Scalars;
9266 SmallVector<SDValue, 4> Operands(N->getNumOperands());
9267
9268 // If ResNE is 0, fully unroll the vector op.
9269 if (ResNE == 0)
9270 ResNE = NE;
9271 else if (NE > ResNE)
9272 NE = ResNE;
9273
9274 unsigned i;
9275 for (i= 0; i != NE; ++i) {
9276 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
9277 SDValue Operand = N->getOperand(j);
9278 EVT OperandVT = Operand.getValueType();
9279 if (OperandVT.isVector()) {
9280 // A vector operand; extract a single element.
9281 EVT OperandEltVT = OperandVT.getVectorElementType();
9282 Operands[j] =
9283 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
9284 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
9285 } else {
9286 // A scalar operand; just use it as is.
9287 Operands[j] = Operand;
9288 }
9289 }
9290
9291 switch (N->getOpcode()) {
9292 default: {
9293 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
9294 N->getFlags()));
9295 break;
9296 }
9297 case ISD::VSELECT:
9298 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
9299 break;
9300 case ISD::SHL:
9301 case ISD::SRA:
9302 case ISD::SRL:
9303 case ISD::ROTL:
9304 case ISD::ROTR:
9305 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
9306 getShiftAmountOperand(Operands[0].getValueType(),
9307 Operands[1])));
9308 break;
9309 case ISD::SIGN_EXTEND_INREG: {
9310 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
9311 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
9312 Operands[0],
9313 getValueType(ExtVT)));
9314 }
9315 }
9316 }
9317
9318 for (; i < ResNE; ++i)
9319 Scalars.push_back(getUNDEF(EltVT));
9320
9321 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
9322 return getBuildVector(VecVT, dl, Scalars);
9323 }
9324
UnrollVectorOverflowOp(SDNode * N,unsigned ResNE)9325 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
9326 SDNode *N, unsigned ResNE) {
9327 unsigned Opcode = N->getOpcode();
9328 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
9329 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
9330 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
9331 "Expected an overflow opcode");
9332
9333 EVT ResVT = N->getValueType(0);
9334 EVT OvVT = N->getValueType(1);
9335 EVT ResEltVT = ResVT.getVectorElementType();
9336 EVT OvEltVT = OvVT.getVectorElementType();
9337 SDLoc dl(N);
9338
9339 // If ResNE is 0, fully unroll the vector op.
9340 unsigned NE = ResVT.getVectorNumElements();
9341 if (ResNE == 0)
9342 ResNE = NE;
9343 else if (NE > ResNE)
9344 NE = ResNE;
9345
9346 SmallVector<SDValue, 8> LHSScalars;
9347 SmallVector<SDValue, 8> RHSScalars;
9348 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
9349 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
9350
9351 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
9352 SDVTList VTs = getVTList(ResEltVT, SVT);
9353 SmallVector<SDValue, 8> ResScalars;
9354 SmallVector<SDValue, 8> OvScalars;
9355 for (unsigned i = 0; i < NE; ++i) {
9356 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
9357 SDValue Ov =
9358 getSelect(dl, OvEltVT, Res.getValue(1),
9359 getBoolConstant(true, dl, OvEltVT, ResVT),
9360 getConstant(0, dl, OvEltVT));
9361
9362 ResScalars.push_back(Res);
9363 OvScalars.push_back(Ov);
9364 }
9365
9366 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
9367 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
9368
9369 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
9370 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
9371 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
9372 getBuildVector(NewOvVT, dl, OvScalars));
9373 }
9374
areNonVolatileConsecutiveLoads(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const9375 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
9376 LoadSDNode *Base,
9377 unsigned Bytes,
9378 int Dist) const {
9379 if (LD->isVolatile() || Base->isVolatile())
9380 return false;
9381 // TODO: probably too restrictive for atomics, revisit
9382 if (!LD->isSimple())
9383 return false;
9384 if (LD->isIndexed() || Base->isIndexed())
9385 return false;
9386 if (LD->getChain() != Base->getChain())
9387 return false;
9388 EVT VT = LD->getValueType(0);
9389 if (VT.getSizeInBits() / 8 != Bytes)
9390 return false;
9391
9392 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
9393 auto LocDecomp = BaseIndexOffset::match(LD, *this);
9394
9395 int64_t Offset = 0;
9396 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
9397 return (Dist * Bytes == Offset);
9398 return false;
9399 }
9400
9401 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
9402 /// it cannot be inferred.
InferPtrAlignment(SDValue Ptr) const9403 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
9404 // If this is a GlobalAddress + cst, return the alignment.
9405 const GlobalValue *GV = nullptr;
9406 int64_t GVOffset = 0;
9407 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
9408 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
9409 KnownBits Known(PtrWidth);
9410 llvm::computeKnownBits(GV, Known, getDataLayout());
9411 unsigned AlignBits = Known.countMinTrailingZeros();
9412 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
9413 if (Align)
9414 return MinAlign(Align, GVOffset);
9415 }
9416
9417 // If this is a direct reference to a stack slot, use information about the
9418 // stack slot's alignment.
9419 int FrameIdx = INT_MIN;
9420 int64_t FrameOffset = 0;
9421 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
9422 FrameIdx = FI->getIndex();
9423 } else if (isBaseWithConstantOffset(Ptr) &&
9424 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9425 // Handle FI+Cst
9426 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9427 FrameOffset = Ptr.getConstantOperandVal(1);
9428 }
9429
9430 if (FrameIdx != INT_MIN) {
9431 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
9432 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
9433 FrameOffset);
9434 return FIInfoAlign;
9435 }
9436
9437 return 0;
9438 }
9439
9440 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
9441 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const9442 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
9443 // Currently all types are split in half.
9444 EVT LoVT, HiVT;
9445 if (!VT.isVector())
9446 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
9447 else
9448 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
9449
9450 return std::make_pair(LoVT, HiVT);
9451 }
9452
9453 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
9454 /// low/high part.
9455 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)9456 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
9457 const EVT &HiVT) {
9458 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
9459 N.getValueType().getVectorNumElements() &&
9460 "More vector elements requested than available!");
9461 SDValue Lo, Hi;
9462 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
9463 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
9464 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
9465 getConstant(LoVT.getVectorNumElements(), DL,
9466 TLI->getVectorIdxTy(getDataLayout())));
9467 return std::make_pair(Lo, Hi);
9468 }
9469
9470 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
WidenVector(const SDValue & N,const SDLoc & DL)9471 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
9472 EVT VT = N.getValueType();
9473 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
9474 NextPowerOf2(VT.getVectorNumElements()));
9475 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
9476 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
9477 }
9478
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count)9479 void SelectionDAG::ExtractVectorElements(SDValue Op,
9480 SmallVectorImpl<SDValue> &Args,
9481 unsigned Start, unsigned Count) {
9482 EVT VT = Op.getValueType();
9483 if (Count == 0)
9484 Count = VT.getVectorNumElements();
9485
9486 EVT EltVT = VT.getVectorElementType();
9487 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
9488 SDLoc SL(Op);
9489 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
9490 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9491 Op, getConstant(i, SL, IdxTy)));
9492 }
9493 }
9494
9495 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const9496 unsigned GlobalAddressSDNode::getAddressSpace() const {
9497 return getGlobal()->getType()->getAddressSpace();
9498 }
9499
getType() const9500 Type *ConstantPoolSDNode::getType() const {
9501 if (isMachineConstantPoolEntry())
9502 return Val.MachineCPVal->getType();
9503 return Val.ConstVal->getType();
9504 }
9505
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool IsBigEndian) const9506 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
9507 unsigned &SplatBitSize,
9508 bool &HasAnyUndefs,
9509 unsigned MinSplatBits,
9510 bool IsBigEndian) const {
9511 EVT VT = getValueType(0);
9512 assert(VT.isVector() && "Expected a vector type");
9513 unsigned VecWidth = VT.getSizeInBits();
9514 if (MinSplatBits > VecWidth)
9515 return false;
9516
9517 // FIXME: The widths are based on this node's type, but build vectors can
9518 // truncate their operands.
9519 SplatValue = APInt(VecWidth, 0);
9520 SplatUndef = APInt(VecWidth, 0);
9521
9522 // Get the bits. Bits with undefined values (when the corresponding element
9523 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
9524 // in SplatValue. If any of the values are not constant, give up and return
9525 // false.
9526 unsigned int NumOps = getNumOperands();
9527 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
9528 unsigned EltWidth = VT.getScalarSizeInBits();
9529
9530 for (unsigned j = 0; j < NumOps; ++j) {
9531 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
9532 SDValue OpVal = getOperand(i);
9533 unsigned BitPos = j * EltWidth;
9534
9535 if (OpVal.isUndef())
9536 SplatUndef.setBits(BitPos, BitPos + EltWidth);
9537 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
9538 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
9539 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
9540 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
9541 else
9542 return false;
9543 }
9544
9545 // The build_vector is all constants or undefs. Find the smallest element
9546 // size that splats the vector.
9547 HasAnyUndefs = (SplatUndef != 0);
9548
9549 // FIXME: This does not work for vectors with elements less than 8 bits.
9550 while (VecWidth > 8) {
9551 unsigned HalfSize = VecWidth / 2;
9552 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
9553 APInt LowValue = SplatValue.trunc(HalfSize);
9554 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
9555 APInt LowUndef = SplatUndef.trunc(HalfSize);
9556
9557 // If the two halves do not match (ignoring undef bits), stop here.
9558 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
9559 MinSplatBits > HalfSize)
9560 break;
9561
9562 SplatValue = HighValue | LowValue;
9563 SplatUndef = HighUndef & LowUndef;
9564
9565 VecWidth = HalfSize;
9566 }
9567
9568 SplatBitSize = VecWidth;
9569 return true;
9570 }
9571
getSplatValue(const APInt & DemandedElts,BitVector * UndefElements) const9572 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
9573 BitVector *UndefElements) const {
9574 if (UndefElements) {
9575 UndefElements->clear();
9576 UndefElements->resize(getNumOperands());
9577 }
9578 assert(getNumOperands() == DemandedElts.getBitWidth() &&
9579 "Unexpected vector size");
9580 if (!DemandedElts)
9581 return SDValue();
9582 SDValue Splatted;
9583 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
9584 if (!DemandedElts[i])
9585 continue;
9586 SDValue Op = getOperand(i);
9587 if (Op.isUndef()) {
9588 if (UndefElements)
9589 (*UndefElements)[i] = true;
9590 } else if (!Splatted) {
9591 Splatted = Op;
9592 } else if (Splatted != Op) {
9593 return SDValue();
9594 }
9595 }
9596
9597 if (!Splatted) {
9598 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros();
9599 assert(getOperand(FirstDemandedIdx).isUndef() &&
9600 "Can only have a splat without a constant for all undefs.");
9601 return getOperand(FirstDemandedIdx);
9602 }
9603
9604 return Splatted;
9605 }
9606
getSplatValue(BitVector * UndefElements) const9607 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
9608 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
9609 return getSplatValue(DemandedElts, UndefElements);
9610 }
9611
9612 ConstantSDNode *
getConstantSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const9613 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
9614 BitVector *UndefElements) const {
9615 return dyn_cast_or_null<ConstantSDNode>(
9616 getSplatValue(DemandedElts, UndefElements));
9617 }
9618
9619 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const9620 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
9621 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
9622 }
9623
9624 ConstantFPSDNode *
getConstantFPSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const9625 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
9626 BitVector *UndefElements) const {
9627 return dyn_cast_or_null<ConstantFPSDNode>(
9628 getSplatValue(DemandedElts, UndefElements));
9629 }
9630
9631 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const9632 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
9633 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
9634 }
9635
9636 int32_t
getConstantFPSplatPow2ToLog2Int(BitVector * UndefElements,uint32_t BitWidth) const9637 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
9638 uint32_t BitWidth) const {
9639 if (ConstantFPSDNode *CN =
9640 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
9641 bool IsExact;
9642 APSInt IntVal(BitWidth);
9643 const APFloat &APF = CN->getValueAPF();
9644 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
9645 APFloat::opOK ||
9646 !IsExact)
9647 return -1;
9648
9649 return IntVal.exactLogBase2();
9650 }
9651 return -1;
9652 }
9653
isConstant() const9654 bool BuildVectorSDNode::isConstant() const {
9655 for (const SDValue &Op : op_values()) {
9656 unsigned Opc = Op.getOpcode();
9657 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
9658 return false;
9659 }
9660 return true;
9661 }
9662
isSplatMask(const int * Mask,EVT VT)9663 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
9664 // Find the first non-undef value in the shuffle mask.
9665 unsigned i, e;
9666 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
9667 /* search */;
9668
9669 // If all elements are undefined, this shuffle can be considered a splat
9670 // (although it should eventually get simplified away completely).
9671 if (i == e)
9672 return true;
9673
9674 // Make sure all remaining elements are either undef or the same as the first
9675 // non-undef value.
9676 for (int Idx = Mask[i]; i != e; ++i)
9677 if (Mask[i] >= 0 && Mask[i] != Idx)
9678 return false;
9679 return true;
9680 }
9681
9682 // Returns the SDNode if it is a constant integer BuildVector
9683 // or constant integer.
isConstantIntBuildVectorOrConstantInt(SDValue N)9684 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) {
9685 if (isa<ConstantSDNode>(N))
9686 return N.getNode();
9687 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
9688 return N.getNode();
9689 // Treat a GlobalAddress supporting constant offset folding as a
9690 // constant integer.
9691 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
9692 if (GA->getOpcode() == ISD::GlobalAddress &&
9693 TLI->isOffsetFoldingLegal(GA))
9694 return GA;
9695 return nullptr;
9696 }
9697
isConstantFPBuildVectorOrConstantFP(SDValue N)9698 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) {
9699 if (isa<ConstantFPSDNode>(N))
9700 return N.getNode();
9701
9702 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
9703 return N.getNode();
9704
9705 return nullptr;
9706 }
9707
createOperands(SDNode * Node,ArrayRef<SDValue> Vals)9708 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
9709 assert(!Node->OperandList && "Node already has operands");
9710 assert(SDNode::getMaxNumOperands() >= Vals.size() &&
9711 "too many operands to fit into SDNode");
9712 SDUse *Ops = OperandRecycler.allocate(
9713 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
9714
9715 bool IsDivergent = false;
9716 for (unsigned I = 0; I != Vals.size(); ++I) {
9717 Ops[I].setUser(Node);
9718 Ops[I].setInitial(Vals[I]);
9719 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
9720 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent();
9721 }
9722 Node->NumOperands = Vals.size();
9723 Node->OperandList = Ops;
9724 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
9725 if (!TLI->isSDNodeAlwaysUniform(Node))
9726 Node->SDNodeBits.IsDivergent = IsDivergent;
9727 checkForCycles(Node);
9728 }
9729
getTokenFactor(const SDLoc & DL,SmallVectorImpl<SDValue> & Vals)9730 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
9731 SmallVectorImpl<SDValue> &Vals) {
9732 size_t Limit = SDNode::getMaxNumOperands();
9733 while (Vals.size() > Limit) {
9734 unsigned SliceIdx = Vals.size() - Limit;
9735 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
9736 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
9737 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
9738 Vals.emplace_back(NewTF);
9739 }
9740 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
9741 }
9742
9743 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)9744 static void checkForCyclesHelper(const SDNode *N,
9745 SmallPtrSetImpl<const SDNode*> &Visited,
9746 SmallPtrSetImpl<const SDNode*> &Checked,
9747 const llvm::SelectionDAG *DAG) {
9748 // If this node has already been checked, don't check it again.
9749 if (Checked.count(N))
9750 return;
9751
9752 // If a node has already been visited on this depth-first walk, reject it as
9753 // a cycle.
9754 if (!Visited.insert(N).second) {
9755 errs() << "Detected cycle in SelectionDAG\n";
9756 dbgs() << "Offending node:\n";
9757 N->dumprFull(DAG); dbgs() << "\n";
9758 abort();
9759 }
9760
9761 for (const SDValue &Op : N->op_values())
9762 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
9763
9764 Checked.insert(N);
9765 Visited.erase(N);
9766 }
9767 #endif
9768
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)9769 void llvm::checkForCycles(const llvm::SDNode *N,
9770 const llvm::SelectionDAG *DAG,
9771 bool force) {
9772 #ifndef NDEBUG
9773 bool check = force;
9774 #ifdef EXPENSIVE_CHECKS
9775 check = true;
9776 #endif // EXPENSIVE_CHECKS
9777 if (check) {
9778 assert(N && "Checking nonexistent SDNode");
9779 SmallPtrSet<const SDNode*, 32> visited;
9780 SmallPtrSet<const SDNode*, 32> checked;
9781 checkForCyclesHelper(N, visited, checked, DAG);
9782 }
9783 #endif // !NDEBUG
9784 }
9785
checkForCycles(const llvm::SelectionDAG * DAG,bool force)9786 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
9787 checkForCycles(DAG->getRoot().getNode(), DAG, force);
9788 }
9789