• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the SelectionDAG class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DebugInfo.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/ManagedStatic.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Mutex.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetInstrInfo.h"
43 #include "llvm/Target/TargetIntrinsicInfo.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
48 #include "llvm/Target/TargetSelectionDAGInfo.h"
49 #include <algorithm>
50 #include <cmath>
51 
52 using namespace llvm;
53 
54 /// makeVTList - Return an instance of the SDVTList struct initialized with the
55 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)56 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
57   SDVTList Res = {VTs, NumVTs};
58   return Res;
59 }
60 
61 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)62 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)63 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
64 
65 //===----------------------------------------------------------------------===//
66 //                              ConstantFPSDNode Class
67 //===----------------------------------------------------------------------===//
68 
69 /// isExactlyValue - We don't rely on operator== working on double values, as
70 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
71 /// As such, this method can be used to do an exact bit-for-bit comparison of
72 /// two floating point values.
isExactlyValue(const APFloat & V) const73 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
74   return getValueAPF().bitwiseIsEqual(V);
75 }
76 
isValueValidForType(EVT VT,const APFloat & Val)77 bool ConstantFPSDNode::isValueValidForType(EVT VT,
78                                            const APFloat& Val) {
79   assert(VT.isFloatingPoint() && "Can only convert between FP types");
80 
81   // convert modifies in place, so make a copy.
82   APFloat Val2 = APFloat(Val);
83   bool losesInfo;
84   (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
85                       APFloat::rmNearestTiesToEven,
86                       &losesInfo);
87   return !losesInfo;
88 }
89 
90 //===----------------------------------------------------------------------===//
91 //                              ISD Namespace
92 //===----------------------------------------------------------------------===//
93 
94 /// isBuildVectorAllOnes - Return true if the specified node is a
95 /// BUILD_VECTOR where all of the elements are ~0 or undef.
isBuildVectorAllOnes(const SDNode * N)96 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
97   // Look through a bit convert.
98   if (N->getOpcode() == ISD::BITCAST)
99     N = N->getOperand(0).getNode();
100 
101   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
102 
103   unsigned i = 0, e = N->getNumOperands();
104 
105   // Skip over all of the undef values.
106   while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
107     ++i;
108 
109   // Do not accept an all-undef vector.
110   if (i == e) return false;
111 
112   // Do not accept build_vectors that aren't all constants or which have non-~0
113   // elements. We have to be a bit careful here, as the type of the constant
114   // may not be the same as the type of the vector elements due to type
115   // legalization (the elements are promoted to a legal type for the target and
116   // a vector of a type may be legal when the base element type is not).
117   // We only want to check enough bits to cover the vector elements, because
118   // we care if the resultant vector is all ones, not whether the individual
119   // constants are.
120   SDValue NotZero = N->getOperand(i);
121   unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
122   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
123     if (CN->getAPIntValue().countTrailingOnes() < EltSize)
124       return false;
125   } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
126     if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
127       return false;
128   } else
129     return false;
130 
131   // Okay, we have at least one ~0 value, check to see if the rest match or are
132   // undefs. Even with the above element type twiddling, this should be OK, as
133   // the same type legalization should have applied to all the elements.
134   for (++i; i != e; ++i)
135     if (N->getOperand(i) != NotZero &&
136         N->getOperand(i).getOpcode() != ISD::UNDEF)
137       return false;
138   return true;
139 }
140 
141 
142 /// isBuildVectorAllZeros - Return true if the specified node is a
143 /// BUILD_VECTOR where all of the elements are 0 or undef.
isBuildVectorAllZeros(const SDNode * N)144 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
145   // Look through a bit convert.
146   if (N->getOpcode() == ISD::BITCAST)
147     N = N->getOperand(0).getNode();
148 
149   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
150 
151   bool IsAllUndef = true;
152   for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) {
153     if (N->getOperand(i).getOpcode() == ISD::UNDEF)
154       continue;
155     IsAllUndef = false;
156     // Do not accept build_vectors that aren't all constants or which have non-0
157     // elements. We have to be a bit careful here, as the type of the constant
158     // may not be the same as the type of the vector elements due to type
159     // legalization (the elements are promoted to a legal type for the target
160     // and a vector of a type may be legal when the base element type is not).
161     // We only want to check enough bits to cover the vector elements, because
162     // we care if the resultant vector is all zeros, not whether the individual
163     // constants are.
164     SDValue Zero = N->getOperand(i);
165     unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
166     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
167       if (CN->getAPIntValue().countTrailingZeros() < EltSize)
168         return false;
169     } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
170       if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
171         return false;
172     } else
173       return false;
174   }
175 
176   // Do not accept an all-undef vector.
177   if (IsAllUndef)
178     return false;
179   return true;
180 }
181 
182 /// \brief Return true if the specified node is a BUILD_VECTOR node of
183 /// all ConstantSDNode or undef.
isBuildVectorOfConstantSDNodes(const SDNode * N)184 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
185   if (N->getOpcode() != ISD::BUILD_VECTOR)
186     return false;
187 
188   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
189     SDValue Op = N->getOperand(i);
190     if (Op.getOpcode() == ISD::UNDEF)
191       continue;
192     if (!isa<ConstantSDNode>(Op))
193       return false;
194   }
195   return true;
196 }
197 
198 /// isScalarToVector - Return true if the specified node is a
199 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
200 /// element is not an undef.
isScalarToVector(const SDNode * N)201 bool ISD::isScalarToVector(const SDNode *N) {
202   if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
203     return true;
204 
205   if (N->getOpcode() != ISD::BUILD_VECTOR)
206     return false;
207   if (N->getOperand(0).getOpcode() == ISD::UNDEF)
208     return false;
209   unsigned NumElems = N->getNumOperands();
210   if (NumElems == 1)
211     return false;
212   for (unsigned i = 1; i < NumElems; ++i) {
213     SDValue V = N->getOperand(i);
214     if (V.getOpcode() != ISD::UNDEF)
215       return false;
216   }
217   return true;
218 }
219 
220 /// allOperandsUndef - Return true if the node has at least one operand
221 /// and all operands of the specified node are ISD::UNDEF.
allOperandsUndef(const SDNode * N)222 bool ISD::allOperandsUndef(const SDNode *N) {
223   // Return false if the node has no operands.
224   // This is "logically inconsistent" with the definition of "all" but
225   // is probably the desired behavior.
226   if (N->getNumOperands() == 0)
227     return false;
228 
229   for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
230     if (N->getOperand(i).getOpcode() != ISD::UNDEF)
231       return false;
232 
233   return true;
234 }
235 
getExtForLoadExtType(ISD::LoadExtType ExtType)236 ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) {
237   switch (ExtType) {
238   case ISD::EXTLOAD:
239     return ISD::ANY_EXTEND;
240   case ISD::SEXTLOAD:
241     return ISD::SIGN_EXTEND;
242   case ISD::ZEXTLOAD:
243     return ISD::ZERO_EXTEND;
244   default:
245     break;
246   }
247 
248   llvm_unreachable("Invalid LoadExtType");
249 }
250 
251 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
252 /// when given the operation for (X op Y).
getSetCCSwappedOperands(ISD::CondCode Operation)253 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
254   // To perform this operation, we just need to swap the L and G bits of the
255   // operation.
256   unsigned OldL = (Operation >> 2) & 1;
257   unsigned OldG = (Operation >> 1) & 1;
258   return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits
259                        (OldL << 1) |       // New G bit
260                        (OldG << 2));       // New L bit.
261 }
262 
263 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
264 /// 'op' is a valid SetCC operation.
getSetCCInverse(ISD::CondCode Op,bool isInteger)265 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
266   unsigned Operation = Op;
267   if (isInteger)
268     Operation ^= 7;   // Flip L, G, E bits, but not U.
269   else
270     Operation ^= 15;  // Flip all of the condition bits.
271 
272   if (Operation > ISD::SETTRUE2)
273     Operation &= ~8;  // Don't let N and U bits get set.
274 
275   return ISD::CondCode(Operation);
276 }
277 
278 
279 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
280 /// signed operation and 2 if the result is an unsigned comparison.  Return zero
281 /// if the operation does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)282 static int isSignedOp(ISD::CondCode Opcode) {
283   switch (Opcode) {
284   default: llvm_unreachable("Illegal integer setcc operation!");
285   case ISD::SETEQ:
286   case ISD::SETNE: return 0;
287   case ISD::SETLT:
288   case ISD::SETLE:
289   case ISD::SETGT:
290   case ISD::SETGE: return 1;
291   case ISD::SETULT:
292   case ISD::SETULE:
293   case ISD::SETUGT:
294   case ISD::SETUGE: return 2;
295   }
296 }
297 
298 /// getSetCCOrOperation - Return the result of a logical OR between different
299 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)).  This function
300 /// returns SETCC_INVALID if it is not possible to represent the resultant
301 /// comparison.
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,bool isInteger)302 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
303                                        bool isInteger) {
304   if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
305     // Cannot fold a signed integer setcc with an unsigned integer setcc.
306     return ISD::SETCC_INVALID;
307 
308   unsigned Op = Op1 | Op2;  // Combine all of the condition bits.
309 
310   // If the N and U bits get set then the resultant comparison DOES suddenly
311   // care about orderedness, and is true when ordered.
312   if (Op > ISD::SETTRUE2)
313     Op &= ~16;     // Clear the U bit if the N bit is set.
314 
315   // Canonicalize illegal integer setcc's.
316   if (isInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT
317     Op = ISD::SETNE;
318 
319   return ISD::CondCode(Op);
320 }
321 
322 /// getSetCCAndOperation - Return the result of a logical AND between different
323 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)).  This
324 /// function returns zero if it is not possible to represent the resultant
325 /// comparison.
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,bool isInteger)326 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
327                                         bool isInteger) {
328   if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
329     // Cannot fold a signed setcc with an unsigned setcc.
330     return ISD::SETCC_INVALID;
331 
332   // Combine all of the condition bits.
333   ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
334 
335   // Canonicalize illegal integer setcc's.
336   if (isInteger) {
337     switch (Result) {
338     default: break;
339     case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
340     case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
341     case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
342     case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
343     case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
344     }
345   }
346 
347   return Result;
348 }
349 
350 //===----------------------------------------------------------------------===//
351 //                           SDNode Profile Support
352 //===----------------------------------------------------------------------===//
353 
354 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
355 ///
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)356 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  {
357   ID.AddInteger(OpC);
358 }
359 
360 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
361 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)362 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
363   ID.AddPointer(VTList.VTs);
364 }
365 
366 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
367 ///
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)368 static void AddNodeIDOperands(FoldingSetNodeID &ID,
369                               ArrayRef<SDValue> Ops) {
370   for (auto& Op : Ops) {
371     ID.AddPointer(Op.getNode());
372     ID.AddInteger(Op.getResNo());
373   }
374 }
375 
376 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
377 ///
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)378 static void AddNodeIDOperands(FoldingSetNodeID &ID,
379                               ArrayRef<SDUse> Ops) {
380   for (auto& Op : Ops) {
381     ID.AddPointer(Op.getNode());
382     ID.AddInteger(Op.getResNo());
383   }
384 }
385 
AddBinaryNodeIDCustom(FoldingSetNodeID & ID,bool nuw,bool nsw,bool exact)386 static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, bool nuw, bool nsw,
387                                   bool exact) {
388   ID.AddBoolean(nuw);
389   ID.AddBoolean(nsw);
390   ID.AddBoolean(exact);
391 }
392 
393 /// AddBinaryNodeIDCustom - Add BinarySDNodes special infos
AddBinaryNodeIDCustom(FoldingSetNodeID & ID,unsigned Opcode,bool nuw,bool nsw,bool exact)394 static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, unsigned Opcode,
395                                   bool nuw, bool nsw, bool exact) {
396   if (isBinOpWithFlags(Opcode))
397     AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
398 }
399 
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)400 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
401                           SDVTList VTList, ArrayRef<SDValue> OpList) {
402   AddNodeIDOpcode(ID, OpC);
403   AddNodeIDValueTypes(ID, VTList);
404   AddNodeIDOperands(ID, OpList);
405 }
406 
407 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
408 /// the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)409 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
410   switch (N->getOpcode()) {
411   case ISD::TargetExternalSymbol:
412   case ISD::ExternalSymbol:
413     llvm_unreachable("Should only be used on nodes with operands");
414   default: break;  // Normal nodes don't need extra info.
415   case ISD::TargetConstant:
416   case ISD::Constant: {
417     const ConstantSDNode *C = cast<ConstantSDNode>(N);
418     ID.AddPointer(C->getConstantIntValue());
419     ID.AddBoolean(C->isOpaque());
420     break;
421   }
422   case ISD::TargetConstantFP:
423   case ISD::ConstantFP: {
424     ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
425     break;
426   }
427   case ISD::TargetGlobalAddress:
428   case ISD::GlobalAddress:
429   case ISD::TargetGlobalTLSAddress:
430   case ISD::GlobalTLSAddress: {
431     const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
432     ID.AddPointer(GA->getGlobal());
433     ID.AddInteger(GA->getOffset());
434     ID.AddInteger(GA->getTargetFlags());
435     ID.AddInteger(GA->getAddressSpace());
436     break;
437   }
438   case ISD::BasicBlock:
439     ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
440     break;
441   case ISD::Register:
442     ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
443     break;
444   case ISD::RegisterMask:
445     ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
446     break;
447   case ISD::SRCVALUE:
448     ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
449     break;
450   case ISD::FrameIndex:
451   case ISD::TargetFrameIndex:
452     ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
453     break;
454   case ISD::JumpTable:
455   case ISD::TargetJumpTable:
456     ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
457     ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
458     break;
459   case ISD::ConstantPool:
460   case ISD::TargetConstantPool: {
461     const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
462     ID.AddInteger(CP->getAlignment());
463     ID.AddInteger(CP->getOffset());
464     if (CP->isMachineConstantPoolEntry())
465       CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
466     else
467       ID.AddPointer(CP->getConstVal());
468     ID.AddInteger(CP->getTargetFlags());
469     break;
470   }
471   case ISD::TargetIndex: {
472     const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
473     ID.AddInteger(TI->getIndex());
474     ID.AddInteger(TI->getOffset());
475     ID.AddInteger(TI->getTargetFlags());
476     break;
477   }
478   case ISD::LOAD: {
479     const LoadSDNode *LD = cast<LoadSDNode>(N);
480     ID.AddInteger(LD->getMemoryVT().getRawBits());
481     ID.AddInteger(LD->getRawSubclassData());
482     ID.AddInteger(LD->getPointerInfo().getAddrSpace());
483     break;
484   }
485   case ISD::STORE: {
486     const StoreSDNode *ST = cast<StoreSDNode>(N);
487     ID.AddInteger(ST->getMemoryVT().getRawBits());
488     ID.AddInteger(ST->getRawSubclassData());
489     ID.AddInteger(ST->getPointerInfo().getAddrSpace());
490     break;
491   }
492   case ISD::SDIV:
493   case ISD::UDIV:
494   case ISD::SRA:
495   case ISD::SRL:
496   case ISD::MUL:
497   case ISD::ADD:
498   case ISD::SUB:
499   case ISD::SHL: {
500     const BinaryWithFlagsSDNode *BinNode = cast<BinaryWithFlagsSDNode>(N);
501     AddBinaryNodeIDCustom(ID, N->getOpcode(), BinNode->hasNoUnsignedWrap(),
502                           BinNode->hasNoSignedWrap(), BinNode->isExact());
503     break;
504   }
505   case ISD::ATOMIC_CMP_SWAP:
506   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
507   case ISD::ATOMIC_SWAP:
508   case ISD::ATOMIC_LOAD_ADD:
509   case ISD::ATOMIC_LOAD_SUB:
510   case ISD::ATOMIC_LOAD_AND:
511   case ISD::ATOMIC_LOAD_OR:
512   case ISD::ATOMIC_LOAD_XOR:
513   case ISD::ATOMIC_LOAD_NAND:
514   case ISD::ATOMIC_LOAD_MIN:
515   case ISD::ATOMIC_LOAD_MAX:
516   case ISD::ATOMIC_LOAD_UMIN:
517   case ISD::ATOMIC_LOAD_UMAX:
518   case ISD::ATOMIC_LOAD:
519   case ISD::ATOMIC_STORE: {
520     const AtomicSDNode *AT = cast<AtomicSDNode>(N);
521     ID.AddInteger(AT->getMemoryVT().getRawBits());
522     ID.AddInteger(AT->getRawSubclassData());
523     ID.AddInteger(AT->getPointerInfo().getAddrSpace());
524     break;
525   }
526   case ISD::PREFETCH: {
527     const MemSDNode *PF = cast<MemSDNode>(N);
528     ID.AddInteger(PF->getPointerInfo().getAddrSpace());
529     break;
530   }
531   case ISD::VECTOR_SHUFFLE: {
532     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
533     for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
534          i != e; ++i)
535       ID.AddInteger(SVN->getMaskElt(i));
536     break;
537   }
538   case ISD::TargetBlockAddress:
539   case ISD::BlockAddress: {
540     const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
541     ID.AddPointer(BA->getBlockAddress());
542     ID.AddInteger(BA->getOffset());
543     ID.AddInteger(BA->getTargetFlags());
544     break;
545   }
546   } // end switch (N->getOpcode())
547 
548   // Target specific memory nodes could also have address spaces to check.
549   if (N->isTargetMemoryOpcode())
550     ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
551 }
552 
553 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
554 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)555 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
556   AddNodeIDOpcode(ID, N->getOpcode());
557   // Add the return value info.
558   AddNodeIDValueTypes(ID, N->getVTList());
559   // Add the operand info.
560   AddNodeIDOperands(ID, N->ops());
561 
562   // Handle SDNode leafs with special info.
563   AddNodeIDCustom(ID, N);
564 }
565 
566 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
567 /// the CSE map that carries volatility, temporalness, indexing mode, and
568 /// extension/truncation information.
569 ///
570 static inline unsigned
encodeMemSDNodeFlags(int ConvType,ISD::MemIndexedMode AM,bool isVolatile,bool isNonTemporal,bool isInvariant)571 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
572                      bool isNonTemporal, bool isInvariant) {
573   assert((ConvType & 3) == ConvType &&
574          "ConvType may not require more than 2 bits!");
575   assert((AM & 7) == AM &&
576          "AM may not require more than 3 bits!");
577   return ConvType |
578          (AM << 2) |
579          (isVolatile << 5) |
580          (isNonTemporal << 6) |
581          (isInvariant << 7);
582 }
583 
584 //===----------------------------------------------------------------------===//
585 //                              SelectionDAG Class
586 //===----------------------------------------------------------------------===//
587 
588 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)589 static bool doNotCSE(SDNode *N) {
590   if (N->getValueType(0) == MVT::Glue)
591     return true; // Never CSE anything that produces a flag.
592 
593   switch (N->getOpcode()) {
594   default: break;
595   case ISD::HANDLENODE:
596   case ISD::EH_LABEL:
597     return true;   // Never CSE these nodes.
598   }
599 
600   // Check that remaining values produced are not flags.
601   for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
602     if (N->getValueType(i) == MVT::Glue)
603       return true; // Never CSE anything that produces a flag.
604 
605   return false;
606 }
607 
608 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
609 /// SelectionDAG.
RemoveDeadNodes()610 void SelectionDAG::RemoveDeadNodes() {
611   // Create a dummy node (which is not added to allnodes), that adds a reference
612   // to the root node, preventing it from being deleted.
613   HandleSDNode Dummy(getRoot());
614 
615   SmallVector<SDNode*, 128> DeadNodes;
616 
617   // Add all obviously-dead nodes to the DeadNodes worklist.
618   for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
619     if (I->use_empty())
620       DeadNodes.push_back(I);
621 
622   RemoveDeadNodes(DeadNodes);
623 
624   // If the root changed (e.g. it was a dead load, update the root).
625   setRoot(Dummy.getValue());
626 }
627 
628 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
629 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)630 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
631 
632   // Process the worklist, deleting the nodes and adding their uses to the
633   // worklist.
634   while (!DeadNodes.empty()) {
635     SDNode *N = DeadNodes.pop_back_val();
636 
637     for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
638       DUL->NodeDeleted(N, nullptr);
639 
640     // Take the node out of the appropriate CSE map.
641     RemoveNodeFromCSEMaps(N);
642 
643     // Next, brutally remove the operand list.  This is safe to do, as there are
644     // no cycles in the graph.
645     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
646       SDUse &Use = *I++;
647       SDNode *Operand = Use.getNode();
648       Use.set(SDValue());
649 
650       // Now that we removed this operand, see if there are no uses of it left.
651       if (Operand->use_empty())
652         DeadNodes.push_back(Operand);
653     }
654 
655     DeallocateNode(N);
656   }
657 }
658 
RemoveDeadNode(SDNode * N)659 void SelectionDAG::RemoveDeadNode(SDNode *N){
660   SmallVector<SDNode*, 16> DeadNodes(1, N);
661 
662   // Create a dummy node that adds a reference to the root node, preventing
663   // it from being deleted.  (This matters if the root is an operand of the
664   // dead node.)
665   HandleSDNode Dummy(getRoot());
666 
667   RemoveDeadNodes(DeadNodes);
668 }
669 
DeleteNode(SDNode * N)670 void SelectionDAG::DeleteNode(SDNode *N) {
671   // First take this out of the appropriate CSE map.
672   RemoveNodeFromCSEMaps(N);
673 
674   // Finally, remove uses due to operands of this node, remove from the
675   // AllNodes list, and delete the node.
676   DeleteNodeNotInCSEMaps(N);
677 }
678 
DeleteNodeNotInCSEMaps(SDNode * N)679 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
680   assert(N != AllNodes.begin() && "Cannot delete the entry node!");
681   assert(N->use_empty() && "Cannot delete a node that is not dead!");
682 
683   // Drop all of the operands and decrement used node's use counts.
684   N->DropOperands();
685 
686   DeallocateNode(N);
687 }
688 
DeallocateNode(SDNode * N)689 void SelectionDAG::DeallocateNode(SDNode *N) {
690   if (N->OperandsNeedDelete)
691     delete[] N->OperandList;
692 
693   // Set the opcode to DELETED_NODE to help catch bugs when node
694   // memory is reallocated.
695   N->NodeType = ISD::DELETED_NODE;
696 
697   NodeAllocator.Deallocate(AllNodes.remove(N));
698 
699   // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
700   ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
701   for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
702     DbgVals[i]->setIsInvalidated();
703 }
704 
705 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
706 /// correspond to it.  This is useful when we're about to delete or repurpose
707 /// the node.  We don't want future request for structurally identical nodes
708 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)709 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
710   bool Erased = false;
711   switch (N->getOpcode()) {
712   case ISD::HANDLENODE: return false;  // noop.
713   case ISD::CONDCODE:
714     assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
715            "Cond code doesn't exist!");
716     Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
717     CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
718     break;
719   case ISD::ExternalSymbol:
720     Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
721     break;
722   case ISD::TargetExternalSymbol: {
723     ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
724     Erased = TargetExternalSymbols.erase(
725                std::pair<std::string,unsigned char>(ESN->getSymbol(),
726                                                     ESN->getTargetFlags()));
727     break;
728   }
729   case ISD::VALUETYPE: {
730     EVT VT = cast<VTSDNode>(N)->getVT();
731     if (VT.isExtended()) {
732       Erased = ExtendedValueTypeNodes.erase(VT);
733     } else {
734       Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
735       ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
736     }
737     break;
738   }
739   default:
740     // Remove it from the CSE Map.
741     assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
742     assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
743     Erased = CSEMap.RemoveNode(N);
744     break;
745   }
746 #ifndef NDEBUG
747   // Verify that the node was actually in one of the CSE maps, unless it has a
748   // flag result (which cannot be CSE'd) or is one of the special cases that are
749   // not subject to CSE.
750   if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
751       !N->isMachineOpcode() && !doNotCSE(N)) {
752     N->dump(this);
753     dbgs() << "\n";
754     llvm_unreachable("Node is not in map!");
755   }
756 #endif
757   return Erased;
758 }
759 
760 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
761 /// maps and modified in place. Add it back to the CSE maps, unless an identical
762 /// node already exists, in which case transfer all its users to the existing
763 /// node. This transfer can potentially trigger recursive merging.
764 ///
765 void
AddModifiedNodeToCSEMaps(SDNode * N)766 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
767   // For node types that aren't CSE'd, just act as if no identical node
768   // already exists.
769   if (!doNotCSE(N)) {
770     SDNode *Existing = CSEMap.GetOrInsertNode(N);
771     if (Existing != N) {
772       // If there was already an existing matching node, use ReplaceAllUsesWith
773       // to replace the dead one with the existing one.  This can cause
774       // recursive merging of other unrelated nodes down the line.
775       ReplaceAllUsesWith(N, Existing);
776 
777       // N is now dead. Inform the listeners and delete it.
778       for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
779         DUL->NodeDeleted(N, Existing);
780       DeleteNodeNotInCSEMaps(N);
781       return;
782     }
783   }
784 
785   // If the node doesn't already exist, we updated it.  Inform listeners.
786   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
787     DUL->NodeUpdated(N);
788 }
789 
790 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
791 /// were replaced with those specified.  If this node is never memoized,
792 /// return null, otherwise return a pointer to the slot it would take.  If a
793 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)794 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
795                                            void *&InsertPos) {
796   if (doNotCSE(N))
797     return nullptr;
798 
799   SDValue Ops[] = { Op };
800   FoldingSetNodeID ID;
801   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
802   AddNodeIDCustom(ID, N);
803   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
804   return Node;
805 }
806 
807 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
808 /// were replaced with those specified.  If this node is never memoized,
809 /// return null, otherwise return a pointer to the slot it would take.  If a
810 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)811 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
812                                            SDValue Op1, SDValue Op2,
813                                            void *&InsertPos) {
814   if (doNotCSE(N))
815     return nullptr;
816 
817   SDValue Ops[] = { Op1, Op2 };
818   FoldingSetNodeID ID;
819   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
820   AddNodeIDCustom(ID, N);
821   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
822   return Node;
823 }
824 
825 
826 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
827 /// were replaced with those specified.  If this node is never memoized,
828 /// return null, otherwise return a pointer to the slot it would take.  If a
829 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)830 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
831                                            void *&InsertPos) {
832   if (doNotCSE(N))
833     return nullptr;
834 
835   FoldingSetNodeID ID;
836   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
837   AddNodeIDCustom(ID, N);
838   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
839   return Node;
840 }
841 
842 #ifndef NDEBUG
843 /// VerifyNodeCommon - Sanity check the given node.  Aborts if it is invalid.
VerifyNodeCommon(SDNode * N)844 static void VerifyNodeCommon(SDNode *N) {
845   switch (N->getOpcode()) {
846   default:
847     break;
848   case ISD::BUILD_PAIR: {
849     EVT VT = N->getValueType(0);
850     assert(N->getNumValues() == 1 && "Too many results!");
851     assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
852            "Wrong return type!");
853     assert(N->getNumOperands() == 2 && "Wrong number of operands!");
854     assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
855            "Mismatched operand types!");
856     assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
857            "Wrong operand type!");
858     assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
859            "Wrong return type size");
860     break;
861   }
862   case ISD::BUILD_VECTOR: {
863     assert(N->getNumValues() == 1 && "Too many results!");
864     assert(N->getValueType(0).isVector() && "Wrong return type!");
865     assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
866            "Wrong number of operands!");
867     EVT EltVT = N->getValueType(0).getVectorElementType();
868     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
869       assert((I->getValueType() == EltVT ||
870              (EltVT.isInteger() && I->getValueType().isInteger() &&
871               EltVT.bitsLE(I->getValueType()))) &&
872             "Wrong operand type!");
873       assert(I->getValueType() == N->getOperand(0).getValueType() &&
874              "Operands must all have the same type");
875     }
876     break;
877   }
878   }
879 }
880 
881 /// VerifySDNode - Sanity check the given SDNode.  Aborts if it is invalid.
VerifySDNode(SDNode * N)882 static void VerifySDNode(SDNode *N) {
883   // The SDNode allocators cannot be used to allocate nodes with fields that are
884   // not present in an SDNode!
885   assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
886   assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
887   assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
888   assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
889   assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
890   assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
891   assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
892   assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
893   assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
894   assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
895   assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
896   assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
897   assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
898   assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
899   assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
900   assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
901   assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
902   assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
903   assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
904 
905   VerifyNodeCommon(N);
906 }
907 
908 /// VerifyMachineNode - Sanity check the given MachineNode.  Aborts if it is
909 /// invalid.
VerifyMachineNode(SDNode * N)910 static void VerifyMachineNode(SDNode *N) {
911   // The MachineNode allocators cannot be used to allocate nodes with fields
912   // that are not present in a MachineNode!
913   // Currently there are no such nodes.
914 
915   VerifyNodeCommon(N);
916 }
917 #endif // NDEBUG
918 
919 /// getEVTAlignment - Compute the default alignment value for the
920 /// given type.
921 ///
getEVTAlignment(EVT VT) const922 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
923   Type *Ty = VT == MVT::iPTR ?
924                    PointerType::get(Type::getInt8Ty(*getContext()), 0) :
925                    VT.getTypeForEVT(*getContext());
926 
927   return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
928 }
929 
930 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)931 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
932   : TM(tm), TSI(*tm.getSelectionDAGInfo()), TLI(nullptr), OptLevel(OL),
933     EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
934     Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
935     UpdateListeners(nullptr) {
936   AllNodes.push_back(&EntryNode);
937   DbgInfo = new SDDbgInfo();
938 }
939 
init(MachineFunction & mf,const TargetLowering * tli)940 void SelectionDAG::init(MachineFunction &mf, const TargetLowering *tli) {
941   MF = &mf;
942   TLI = tli;
943   Context = &mf.getFunction()->getContext();
944 }
945 
~SelectionDAG()946 SelectionDAG::~SelectionDAG() {
947   assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
948   allnodes_clear();
949   delete DbgInfo;
950 }
951 
allnodes_clear()952 void SelectionDAG::allnodes_clear() {
953   assert(&*AllNodes.begin() == &EntryNode);
954   AllNodes.remove(AllNodes.begin());
955   while (!AllNodes.empty())
956     DeallocateNode(AllNodes.begin());
957 }
958 
GetBinarySDNode(unsigned Opcode,SDLoc DL,SDVTList VTs,SDValue N1,SDValue N2,bool nuw,bool nsw,bool exact)959 BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
960                                             SDVTList VTs, SDValue N1,
961                                             SDValue N2, bool nuw, bool nsw,
962                                             bool exact) {
963   if (isBinOpWithFlags(Opcode)) {
964     BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
965         Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
966     FN->setHasNoUnsignedWrap(nuw);
967     FN->setHasNoSignedWrap(nsw);
968     FN->setIsExact(exact);
969 
970     return FN;
971   }
972 
973   BinarySDNode *N = new (NodeAllocator)
974       BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
975   return N;
976 }
977 
clear()978 void SelectionDAG::clear() {
979   allnodes_clear();
980   OperandAllocator.Reset();
981   CSEMap.clear();
982 
983   ExtendedValueTypeNodes.clear();
984   ExternalSymbols.clear();
985   TargetExternalSymbols.clear();
986   std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
987             static_cast<CondCodeSDNode*>(nullptr));
988   std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
989             static_cast<SDNode*>(nullptr));
990 
991   EntryNode.UseList = nullptr;
992   AllNodes.push_back(&EntryNode);
993   Root = getEntryNode();
994   DbgInfo->clear();
995 }
996 
getAnyExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)997 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
998   return VT.bitsGT(Op.getValueType()) ?
999     getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1000     getNode(ISD::TRUNCATE, DL, VT, Op);
1001 }
1002 
getSExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)1003 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
1004   return VT.bitsGT(Op.getValueType()) ?
1005     getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1006     getNode(ISD::TRUNCATE, DL, VT, Op);
1007 }
1008 
getZExtOrTrunc(SDValue Op,SDLoc DL,EVT VT)1009 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
1010   return VT.bitsGT(Op.getValueType()) ?
1011     getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1012     getNode(ISD::TRUNCATE, DL, VT, Op);
1013 }
1014 
getBoolExtOrTrunc(SDValue Op,SDLoc SL,EVT VT,EVT OpVT)1015 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT,
1016                                         EVT OpVT) {
1017   if (VT.bitsLE(Op.getValueType()))
1018     return getNode(ISD::TRUNCATE, SL, VT, Op);
1019 
1020   TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1021   return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1022 }
1023 
getZeroExtendInReg(SDValue Op,SDLoc DL,EVT VT)1024 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
1025   assert(!VT.isVector() &&
1026          "getZeroExtendInReg should use the vector element type instead of "
1027          "the vector type!");
1028   if (Op.getValueType() == VT) return Op;
1029   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1030   APInt Imm = APInt::getLowBitsSet(BitWidth,
1031                                    VT.getSizeInBits());
1032   return getNode(ISD::AND, DL, Op.getValueType(), Op,
1033                  getConstant(Imm, Op.getValueType()));
1034 }
1035 
getAnyExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1036 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1037   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1038   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1039          "The sizes of the input and result must match in order to perform the "
1040          "extend in-register.");
1041   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1042          "The destination vector type must have fewer lanes than the input.");
1043   return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1044 }
1045 
getSignExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1046 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1047   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1048   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1049          "The sizes of the input and result must match in order to perform the "
1050          "extend in-register.");
1051   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1052          "The destination vector type must have fewer lanes than the input.");
1053   return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1054 }
1055 
getZeroExtendVectorInReg(SDValue Op,SDLoc DL,EVT VT)1056 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1057   assert(VT.isVector() && "This DAG node is restricted to vector types.");
1058   assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1059          "The sizes of the input and result must match in order to perform the "
1060          "extend in-register.");
1061   assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1062          "The destination vector type must have fewer lanes than the input.");
1063   return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1064 }
1065 
1066 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1067 ///
getNOT(SDLoc DL,SDValue Val,EVT VT)1068 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
1069   EVT EltVT = VT.getScalarType();
1070   SDValue NegOne =
1071     getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
1072   return getNode(ISD::XOR, DL, VT, Val, NegOne);
1073 }
1074 
getLogicalNOT(SDLoc DL,SDValue Val,EVT VT)1075 SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) {
1076   EVT EltVT = VT.getScalarType();
1077   SDValue TrueValue;
1078   switch (TLI->getBooleanContents(VT)) {
1079     case TargetLowering::ZeroOrOneBooleanContent:
1080     case TargetLowering::UndefinedBooleanContent:
1081       TrueValue = getConstant(1, VT);
1082       break;
1083     case TargetLowering::ZeroOrNegativeOneBooleanContent:
1084       TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()),
1085                               VT);
1086       break;
1087   }
1088   return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1089 }
1090 
getConstant(uint64_t Val,EVT VT,bool isT,bool isO)1091 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) {
1092   EVT EltVT = VT.getScalarType();
1093   assert((EltVT.getSizeInBits() >= 64 ||
1094          (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1095          "getConstant with a uint64_t value that doesn't fit in the type!");
1096   return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO);
1097 }
1098 
getConstant(const APInt & Val,EVT VT,bool isT,bool isO)1099 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO)
1100 {
1101   return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO);
1102 }
1103 
getConstant(const ConstantInt & Val,EVT VT,bool isT,bool isO)1104 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT,
1105                                   bool isO) {
1106   assert(VT.isInteger() && "Cannot create FP integer constant!");
1107 
1108   EVT EltVT = VT.getScalarType();
1109   const ConstantInt *Elt = &Val;
1110 
1111   const TargetLowering *TLI = TM.getTargetLowering();
1112 
1113   // In some cases the vector type is legal but the element type is illegal and
1114   // needs to be promoted, for example v8i8 on ARM.  In this case, promote the
1115   // inserted value (the type does not need to match the vector element type).
1116   // Any extra bits introduced will be truncated away.
1117   if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1118       TargetLowering::TypePromoteInteger) {
1119    EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1120    APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
1121    Elt = ConstantInt::get(*getContext(), NewVal);
1122   }
1123   // In other cases the element type is illegal and needs to be expanded, for
1124   // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1125   // the value into n parts and use a vector type with n-times the elements.
1126   // Then bitcast to the type requested.
1127   // Legalizing constants too early makes the DAGCombiner's job harder so we
1128   // only legalize if the DAG tells us we must produce legal types.
1129   else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1130            TLI->getTypeAction(*getContext(), EltVT) ==
1131            TargetLowering::TypeExpandInteger) {
1132     APInt NewVal = Elt->getValue();
1133     EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1134     unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1135     unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1136     EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1137 
1138     // Check the temporary vector is the correct size. If this fails then
1139     // getTypeToTransformTo() probably returned a type whose size (in bits)
1140     // isn't a power-of-2 factor of the requested type size.
1141     assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1142 
1143     SmallVector<SDValue, 2> EltParts;
1144     for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1145       EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1146                                            .trunc(ViaEltSizeInBits),
1147                                      ViaEltVT, isT, isO));
1148     }
1149 
1150     // EltParts is currently in little endian order. If we actually want
1151     // big-endian order then reverse it now.
1152     if (TLI->isBigEndian())
1153       std::reverse(EltParts.begin(), EltParts.end());
1154 
1155     // The elements must be reversed when the element order is different
1156     // to the endianness of the elements (because the BITCAST is itself a
1157     // vector shuffle in this situation). However, we do not need any code to
1158     // perform this reversal because getConstant() is producing a vector
1159     // splat.
1160     // This situation occurs in MIPS MSA.
1161 
1162     SmallVector<SDValue, 8> Ops;
1163     for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
1164       Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1165 
1166     SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
1167                              getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
1168                                      Ops));
1169     return Result;
1170   }
1171 
1172   assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1173          "APInt size does not match type size!");
1174   unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1175   FoldingSetNodeID ID;
1176   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1177   ID.AddPointer(Elt);
1178   ID.AddBoolean(isO);
1179   void *IP = nullptr;
1180   SDNode *N = nullptr;
1181   if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1182     if (!VT.isVector())
1183       return SDValue(N, 0);
1184 
1185   if (!N) {
1186     N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT);
1187     CSEMap.InsertNode(N, IP);
1188     AllNodes.push_back(N);
1189   }
1190 
1191   SDValue Result(N, 0);
1192   if (VT.isVector()) {
1193     SmallVector<SDValue, 8> Ops;
1194     Ops.assign(VT.getVectorNumElements(), Result);
1195     Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1196   }
1197   return Result;
1198 }
1199 
getIntPtrConstant(uint64_t Val,bool isTarget)1200 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1201   return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
1202 }
1203 
1204 
getConstantFP(const APFloat & V,EVT VT,bool isTarget)1205 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1206   return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1207 }
1208 
getConstantFP(const ConstantFP & V,EVT VT,bool isTarget)1209 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1210   assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1211 
1212   EVT EltVT = VT.getScalarType();
1213 
1214   // Do the map lookup using the actual bit pattern for the floating point
1215   // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1216   // we don't have issues with SNANs.
1217   unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1218   FoldingSetNodeID ID;
1219   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1220   ID.AddPointer(&V);
1221   void *IP = nullptr;
1222   SDNode *N = nullptr;
1223   if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1224     if (!VT.isVector())
1225       return SDValue(N, 0);
1226 
1227   if (!N) {
1228     N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1229     CSEMap.InsertNode(N, IP);
1230     AllNodes.push_back(N);
1231   }
1232 
1233   SDValue Result(N, 0);
1234   if (VT.isVector()) {
1235     SmallVector<SDValue, 8> Ops;
1236     Ops.assign(VT.getVectorNumElements(), Result);
1237     // FIXME SDLoc info might be appropriate here
1238     Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1239   }
1240   return Result;
1241 }
1242 
getConstantFP(double Val,EVT VT,bool isTarget)1243 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1244   EVT EltVT = VT.getScalarType();
1245   if (EltVT==MVT::f32)
1246     return getConstantFP(APFloat((float)Val), VT, isTarget);
1247   else if (EltVT==MVT::f64)
1248     return getConstantFP(APFloat(Val), VT, isTarget);
1249   else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1250            EltVT==MVT::f16) {
1251     bool ignored;
1252     APFloat apf = APFloat(Val);
1253     apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1254                 &ignored);
1255     return getConstantFP(apf, VT, isTarget);
1256   } else
1257     llvm_unreachable("Unsupported type in getConstantFP");
1258 }
1259 
getGlobalAddress(const GlobalValue * GV,SDLoc DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned char TargetFlags)1260 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1261                                        EVT VT, int64_t Offset,
1262                                        bool isTargetGA,
1263                                        unsigned char TargetFlags) {
1264   assert((TargetFlags == 0 || isTargetGA) &&
1265          "Cannot set target flags on target-independent globals");
1266   const TargetLowering *TLI = TM.getTargetLowering();
1267 
1268   // Truncate (with sign-extension) the offset value to the pointer size.
1269   unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
1270   if (BitWidth < 64)
1271     Offset = SignExtend64(Offset, BitWidth);
1272 
1273   unsigned Opc;
1274   if (GV->isThreadLocal())
1275     Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1276   else
1277     Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1278 
1279   FoldingSetNodeID ID;
1280   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1281   ID.AddPointer(GV);
1282   ID.AddInteger(Offset);
1283   ID.AddInteger(TargetFlags);
1284   ID.AddInteger(GV->getType()->getAddressSpace());
1285   void *IP = nullptr;
1286   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1287     return SDValue(E, 0);
1288 
1289   SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1290                                                       DL.getDebugLoc(), GV, VT,
1291                                                       Offset, TargetFlags);
1292   CSEMap.InsertNode(N, IP);
1293   AllNodes.push_back(N);
1294   return SDValue(N, 0);
1295 }
1296 
getFrameIndex(int FI,EVT VT,bool isTarget)1297 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1298   unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1299   FoldingSetNodeID ID;
1300   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1301   ID.AddInteger(FI);
1302   void *IP = nullptr;
1303   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1304     return SDValue(E, 0);
1305 
1306   SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1307   CSEMap.InsertNode(N, IP);
1308   AllNodes.push_back(N);
1309   return SDValue(N, 0);
1310 }
1311 
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned char TargetFlags)1312 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1313                                    unsigned char TargetFlags) {
1314   assert((TargetFlags == 0 || isTarget) &&
1315          "Cannot set target flags on target-independent jump tables");
1316   unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1317   FoldingSetNodeID ID;
1318   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1319   ID.AddInteger(JTI);
1320   ID.AddInteger(TargetFlags);
1321   void *IP = nullptr;
1322   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1323     return SDValue(E, 0);
1324 
1325   SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1326                                                   TargetFlags);
1327   CSEMap.InsertNode(N, IP);
1328   AllNodes.push_back(N);
1329   return SDValue(N, 0);
1330 }
1331 
getConstantPool(const Constant * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned char TargetFlags)1332 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1333                                       unsigned Alignment, int Offset,
1334                                       bool isTarget,
1335                                       unsigned char TargetFlags) {
1336   assert((TargetFlags == 0 || isTarget) &&
1337          "Cannot set target flags on target-independent globals");
1338   if (Alignment == 0)
1339     Alignment =
1340     TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1341   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1342   FoldingSetNodeID ID;
1343   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1344   ID.AddInteger(Alignment);
1345   ID.AddInteger(Offset);
1346   ID.AddPointer(C);
1347   ID.AddInteger(TargetFlags);
1348   void *IP = nullptr;
1349   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1350     return SDValue(E, 0);
1351 
1352   SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1353                                                      Alignment, TargetFlags);
1354   CSEMap.InsertNode(N, IP);
1355   AllNodes.push_back(N);
1356   return SDValue(N, 0);
1357 }
1358 
1359 
getConstantPool(MachineConstantPoolValue * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned char TargetFlags)1360 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1361                                       unsigned Alignment, int Offset,
1362                                       bool isTarget,
1363                                       unsigned char TargetFlags) {
1364   assert((TargetFlags == 0 || isTarget) &&
1365          "Cannot set target flags on target-independent globals");
1366   if (Alignment == 0)
1367     Alignment =
1368     TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1369   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1370   FoldingSetNodeID ID;
1371   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1372   ID.AddInteger(Alignment);
1373   ID.AddInteger(Offset);
1374   C->addSelectionDAGCSEId(ID);
1375   ID.AddInteger(TargetFlags);
1376   void *IP = nullptr;
1377   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1378     return SDValue(E, 0);
1379 
1380   SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1381                                                      Alignment, TargetFlags);
1382   CSEMap.InsertNode(N, IP);
1383   AllNodes.push_back(N);
1384   return SDValue(N, 0);
1385 }
1386 
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned char TargetFlags)1387 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1388                                      unsigned char TargetFlags) {
1389   FoldingSetNodeID ID;
1390   AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1391   ID.AddInteger(Index);
1392   ID.AddInteger(Offset);
1393   ID.AddInteger(TargetFlags);
1394   void *IP = nullptr;
1395   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1396     return SDValue(E, 0);
1397 
1398   SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1399                                                     TargetFlags);
1400   CSEMap.InsertNode(N, IP);
1401   AllNodes.push_back(N);
1402   return SDValue(N, 0);
1403 }
1404 
getBasicBlock(MachineBasicBlock * MBB)1405 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1406   FoldingSetNodeID ID;
1407   AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1408   ID.AddPointer(MBB);
1409   void *IP = nullptr;
1410   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1411     return SDValue(E, 0);
1412 
1413   SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1414   CSEMap.InsertNode(N, IP);
1415   AllNodes.push_back(N);
1416   return SDValue(N, 0);
1417 }
1418 
getValueType(EVT VT)1419 SDValue SelectionDAG::getValueType(EVT VT) {
1420   if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1421       ValueTypeNodes.size())
1422     ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1423 
1424   SDNode *&N = VT.isExtended() ?
1425     ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1426 
1427   if (N) return SDValue(N, 0);
1428   N = new (NodeAllocator) VTSDNode(VT);
1429   AllNodes.push_back(N);
1430   return SDValue(N, 0);
1431 }
1432 
getExternalSymbol(const char * Sym,EVT VT)1433 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1434   SDNode *&N = ExternalSymbols[Sym];
1435   if (N) return SDValue(N, 0);
1436   N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1437   AllNodes.push_back(N);
1438   return SDValue(N, 0);
1439 }
1440 
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned char TargetFlags)1441 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1442                                               unsigned char TargetFlags) {
1443   SDNode *&N =
1444     TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1445                                                                TargetFlags)];
1446   if (N) return SDValue(N, 0);
1447   N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1448   AllNodes.push_back(N);
1449   return SDValue(N, 0);
1450 }
1451 
getCondCode(ISD::CondCode Cond)1452 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1453   if ((unsigned)Cond >= CondCodeNodes.size())
1454     CondCodeNodes.resize(Cond+1);
1455 
1456   if (!CondCodeNodes[Cond]) {
1457     CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1458     CondCodeNodes[Cond] = N;
1459     AllNodes.push_back(N);
1460   }
1461 
1462   return SDValue(CondCodeNodes[Cond], 0);
1463 }
1464 
1465 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1466 // the shuffle mask M that point at N1 to point at N2, and indices that point
1467 // N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,SmallVectorImpl<int> & M)1468 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1469   std::swap(N1, N2);
1470   int NElts = M.size();
1471   for (int i = 0; i != NElts; ++i) {
1472     if (M[i] >= NElts)
1473       M[i] -= NElts;
1474     else if (M[i] >= 0)
1475       M[i] += NElts;
1476   }
1477 }
1478 
getVectorShuffle(EVT VT,SDLoc dl,SDValue N1,SDValue N2,const int * Mask)1479 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1480                                        SDValue N2, const int *Mask) {
1481   assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1482          "Invalid VECTOR_SHUFFLE");
1483 
1484   // Canonicalize shuffle undef, undef -> undef
1485   if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1486     return getUNDEF(VT);
1487 
1488   // Validate that all indices in Mask are within the range of the elements
1489   // input to the shuffle.
1490   unsigned NElts = VT.getVectorNumElements();
1491   SmallVector<int, 8> MaskVec;
1492   for (unsigned i = 0; i != NElts; ++i) {
1493     assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1494     MaskVec.push_back(Mask[i]);
1495   }
1496 
1497   // Canonicalize shuffle v, v -> v, undef
1498   if (N1 == N2) {
1499     N2 = getUNDEF(VT);
1500     for (unsigned i = 0; i != NElts; ++i)
1501       if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1502   }
1503 
1504   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
1505   if (N1.getOpcode() == ISD::UNDEF)
1506     commuteShuffle(N1, N2, MaskVec);
1507 
1508   // Canonicalize all index into lhs, -> shuffle lhs, undef
1509   // Canonicalize all index into rhs, -> shuffle rhs, undef
1510   bool AllLHS = true, AllRHS = true;
1511   bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1512   for (unsigned i = 0; i != NElts; ++i) {
1513     if (MaskVec[i] >= (int)NElts) {
1514       if (N2Undef)
1515         MaskVec[i] = -1;
1516       else
1517         AllLHS = false;
1518     } else if (MaskVec[i] >= 0) {
1519       AllRHS = false;
1520     }
1521   }
1522   if (AllLHS && AllRHS)
1523     return getUNDEF(VT);
1524   if (AllLHS && !N2Undef)
1525     N2 = getUNDEF(VT);
1526   if (AllRHS) {
1527     N1 = getUNDEF(VT);
1528     commuteShuffle(N1, N2, MaskVec);
1529   }
1530   // Reset our undef status after accounting for the mask.
1531   N2Undef = N2.getOpcode() == ISD::UNDEF;
1532   // Re-check whether both sides ended up undef.
1533   if (N1.getOpcode() == ISD::UNDEF && N2Undef)
1534     return getUNDEF(VT);
1535 
1536   // If Identity shuffle return that node.
1537   bool Identity = true;
1538   for (unsigned i = 0; i != NElts; ++i) {
1539     if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1540   }
1541   if (Identity && NElts)
1542     return N1;
1543 
1544   // Shuffling a constant splat doesn't change the result.
1545   if (N2Undef) {
1546     SDValue V = N1;
1547 
1548     // Look through any bitcasts. We check that these don't change the number
1549     // (and size) of elements and just changes their types.
1550     while (V.getOpcode() == ISD::BITCAST)
1551       V = V->getOperand(0);
1552 
1553     // A splat should always show up as a build vector node.
1554     if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1555       BitVector UndefElements;
1556       SDValue Splat = BV->getSplatValue(&UndefElements);
1557       // If this is a splat of an undef, shuffling it is also undef.
1558       if (Splat && Splat.getOpcode() == ISD::UNDEF)
1559         return getUNDEF(VT);
1560 
1561       // We only have a splat which can skip shuffles if there is a splatted
1562       // value and no undef lanes rearranged by the shuffle.
1563       if (Splat && UndefElements.none()) {
1564         // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1565         // number of elements match or the value splatted is a zero constant.
1566         if (V.getValueType().getVectorNumElements() ==
1567             VT.getVectorNumElements())
1568           return N1;
1569         if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1570           if (C->isNullValue())
1571             return N1;
1572       }
1573     }
1574   }
1575 
1576   FoldingSetNodeID ID;
1577   SDValue Ops[2] = { N1, N2 };
1578   AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1579   for (unsigned i = 0; i != NElts; ++i)
1580     ID.AddInteger(MaskVec[i]);
1581 
1582   void* IP = nullptr;
1583   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1584     return SDValue(E, 0);
1585 
1586   // Allocate the mask array for the node out of the BumpPtrAllocator, since
1587   // SDNode doesn't have access to it.  This memory will be "leaked" when
1588   // the node is deallocated, but recovered when the NodeAllocator is released.
1589   int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1590   memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1591 
1592   ShuffleVectorSDNode *N =
1593     new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1594                                             dl.getDebugLoc(), N1, N2,
1595                                             MaskAlloc);
1596   CSEMap.InsertNode(N, IP);
1597   AllNodes.push_back(N);
1598   return SDValue(N, 0);
1599 }
1600 
getConvertRndSat(EVT VT,SDLoc dl,SDValue Val,SDValue DTy,SDValue STy,SDValue Rnd,SDValue Sat,ISD::CvtCode Code)1601 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1602                                        SDValue Val, SDValue DTy,
1603                                        SDValue STy, SDValue Rnd, SDValue Sat,
1604                                        ISD::CvtCode Code) {
1605   // If the src and dest types are the same and the conversion is between
1606   // integer types of the same sign or two floats, no conversion is necessary.
1607   if (DTy == STy &&
1608       (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1609     return Val;
1610 
1611   FoldingSetNodeID ID;
1612   SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1613   AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops);
1614   void* IP = nullptr;
1615   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1616     return SDValue(E, 0);
1617 
1618   CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1619                                                            dl.getDebugLoc(),
1620                                                            Ops, Code);
1621   CSEMap.InsertNode(N, IP);
1622   AllNodes.push_back(N);
1623   return SDValue(N, 0);
1624 }
1625 
getRegister(unsigned RegNo,EVT VT)1626 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1627   FoldingSetNodeID ID;
1628   AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1629   ID.AddInteger(RegNo);
1630   void *IP = nullptr;
1631   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1632     return SDValue(E, 0);
1633 
1634   SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1635   CSEMap.InsertNode(N, IP);
1636   AllNodes.push_back(N);
1637   return SDValue(N, 0);
1638 }
1639 
getRegisterMask(const uint32_t * RegMask)1640 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1641   FoldingSetNodeID ID;
1642   AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1643   ID.AddPointer(RegMask);
1644   void *IP = nullptr;
1645   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1646     return SDValue(E, 0);
1647 
1648   SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1649   CSEMap.InsertNode(N, IP);
1650   AllNodes.push_back(N);
1651   return SDValue(N, 0);
1652 }
1653 
getEHLabel(SDLoc dl,SDValue Root,MCSymbol * Label)1654 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1655   FoldingSetNodeID ID;
1656   SDValue Ops[] = { Root };
1657   AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
1658   ID.AddPointer(Label);
1659   void *IP = nullptr;
1660   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1661     return SDValue(E, 0);
1662 
1663   SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1664                                                 dl.getDebugLoc(), Root, Label);
1665   CSEMap.InsertNode(N, IP);
1666   AllNodes.push_back(N);
1667   return SDValue(N, 0);
1668 }
1669 
1670 
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned char TargetFlags)1671 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1672                                       int64_t Offset,
1673                                       bool isTarget,
1674                                       unsigned char TargetFlags) {
1675   unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1676 
1677   FoldingSetNodeID ID;
1678   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1679   ID.AddPointer(BA);
1680   ID.AddInteger(Offset);
1681   ID.AddInteger(TargetFlags);
1682   void *IP = nullptr;
1683   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1684     return SDValue(E, 0);
1685 
1686   SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1687                                                      TargetFlags);
1688   CSEMap.InsertNode(N, IP);
1689   AllNodes.push_back(N);
1690   return SDValue(N, 0);
1691 }
1692 
getSrcValue(const Value * V)1693 SDValue SelectionDAG::getSrcValue(const Value *V) {
1694   assert((!V || V->getType()->isPointerTy()) &&
1695          "SrcValue is not a pointer?");
1696 
1697   FoldingSetNodeID ID;
1698   AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1699   ID.AddPointer(V);
1700 
1701   void *IP = nullptr;
1702   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1703     return SDValue(E, 0);
1704 
1705   SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1706   CSEMap.InsertNode(N, IP);
1707   AllNodes.push_back(N);
1708   return SDValue(N, 0);
1709 }
1710 
1711 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
getMDNode(const MDNode * MD)1712 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1713   FoldingSetNodeID ID;
1714   AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1715   ID.AddPointer(MD);
1716 
1717   void *IP = nullptr;
1718   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1719     return SDValue(E, 0);
1720 
1721   SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1722   CSEMap.InsertNode(N, IP);
1723   AllNodes.push_back(N);
1724   return SDValue(N, 0);
1725 }
1726 
1727 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
getAddrSpaceCast(SDLoc dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)1728 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
1729                                        unsigned SrcAS, unsigned DestAS) {
1730   SDValue Ops[] = {Ptr};
1731   FoldingSetNodeID ID;
1732   AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1733   ID.AddInteger(SrcAS);
1734   ID.AddInteger(DestAS);
1735 
1736   void *IP = nullptr;
1737   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1738     return SDValue(E, 0);
1739 
1740   SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
1741                                                       dl.getDebugLoc(),
1742                                                       VT, Ptr, SrcAS, DestAS);
1743   CSEMap.InsertNode(N, IP);
1744   AllNodes.push_back(N);
1745   return SDValue(N, 0);
1746 }
1747 
1748 /// getShiftAmountOperand - Return the specified value casted to
1749 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)1750 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1751   EVT OpTy = Op.getValueType();
1752   EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
1753   if (OpTy == ShTy || OpTy.isVector()) return Op;
1754 
1755   ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ?  ISD::TRUNCATE : ISD::ZERO_EXTEND;
1756   return getNode(Opcode, SDLoc(Op), ShTy, Op);
1757 }
1758 
1759 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1760 /// specified value type.
CreateStackTemporary(EVT VT,unsigned minAlign)1761 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1762   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1763   unsigned ByteSize = VT.getStoreSize();
1764   Type *Ty = VT.getTypeForEVT(*getContext());
1765   const TargetLowering *TLI = TM.getTargetLowering();
1766   unsigned StackAlign =
1767   std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1768 
1769   int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1770   return getFrameIndex(FrameIdx, TLI->getPointerTy());
1771 }
1772 
1773 /// CreateStackTemporary - Create a stack temporary suitable for holding
1774 /// either of the specified value types.
CreateStackTemporary(EVT VT1,EVT VT2)1775 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1776   unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1777                             VT2.getStoreSizeInBits())/8;
1778   Type *Ty1 = VT1.getTypeForEVT(*getContext());
1779   Type *Ty2 = VT2.getTypeForEVT(*getContext());
1780   const TargetLowering *TLI = TM.getTargetLowering();
1781   const DataLayout *TD = TLI->getDataLayout();
1782   unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1783                             TD->getPrefTypeAlignment(Ty2));
1784 
1785   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1786   int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1787   return getFrameIndex(FrameIdx, TLI->getPointerTy());
1788 }
1789 
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,SDLoc dl)1790 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1791                                 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1792   // These setcc operations always fold.
1793   switch (Cond) {
1794   default: break;
1795   case ISD::SETFALSE:
1796   case ISD::SETFALSE2: return getConstant(0, VT);
1797   case ISD::SETTRUE:
1798   case ISD::SETTRUE2: {
1799     const TargetLowering *TLI = TM.getTargetLowering();
1800     TargetLowering::BooleanContent Cnt =
1801         TLI->getBooleanContents(N1->getValueType(0));
1802     return getConstant(
1803         Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1804   }
1805 
1806   case ISD::SETOEQ:
1807   case ISD::SETOGT:
1808   case ISD::SETOGE:
1809   case ISD::SETOLT:
1810   case ISD::SETOLE:
1811   case ISD::SETONE:
1812   case ISD::SETO:
1813   case ISD::SETUO:
1814   case ISD::SETUEQ:
1815   case ISD::SETUNE:
1816     assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1817     break;
1818   }
1819 
1820   if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1821     const APInt &C2 = N2C->getAPIntValue();
1822     if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1823       const APInt &C1 = N1C->getAPIntValue();
1824 
1825       switch (Cond) {
1826       default: llvm_unreachable("Unknown integer setcc!");
1827       case ISD::SETEQ:  return getConstant(C1 == C2, VT);
1828       case ISD::SETNE:  return getConstant(C1 != C2, VT);
1829       case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1830       case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1831       case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1832       case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1833       case ISD::SETLT:  return getConstant(C1.slt(C2), VT);
1834       case ISD::SETGT:  return getConstant(C1.sgt(C2), VT);
1835       case ISD::SETLE:  return getConstant(C1.sle(C2), VT);
1836       case ISD::SETGE:  return getConstant(C1.sge(C2), VT);
1837       }
1838     }
1839   }
1840   if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1841     if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1842       APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1843       switch (Cond) {
1844       default: break;
1845       case ISD::SETEQ:  if (R==APFloat::cmpUnordered)
1846                           return getUNDEF(VT);
1847                         // fall through
1848       case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1849       case ISD::SETNE:  if (R==APFloat::cmpUnordered)
1850                           return getUNDEF(VT);
1851                         // fall through
1852       case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1853                                            R==APFloat::cmpLessThan, VT);
1854       case ISD::SETLT:  if (R==APFloat::cmpUnordered)
1855                           return getUNDEF(VT);
1856                         // fall through
1857       case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1858       case ISD::SETGT:  if (R==APFloat::cmpUnordered)
1859                           return getUNDEF(VT);
1860                         // fall through
1861       case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1862       case ISD::SETLE:  if (R==APFloat::cmpUnordered)
1863                           return getUNDEF(VT);
1864                         // fall through
1865       case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1866                                            R==APFloat::cmpEqual, VT);
1867       case ISD::SETGE:  if (R==APFloat::cmpUnordered)
1868                           return getUNDEF(VT);
1869                         // fall through
1870       case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1871                                            R==APFloat::cmpEqual, VT);
1872       case ISD::SETO:   return getConstant(R!=APFloat::cmpUnordered, VT);
1873       case ISD::SETUO:  return getConstant(R==APFloat::cmpUnordered, VT);
1874       case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1875                                            R==APFloat::cmpEqual, VT);
1876       case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1877       case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1878                                            R==APFloat::cmpLessThan, VT);
1879       case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1880                                            R==APFloat::cmpUnordered, VT);
1881       case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1882       case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1883       }
1884     } else {
1885       // Ensure that the constant occurs on the RHS.
1886       ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1887       MVT CompVT = N1.getValueType().getSimpleVT();
1888       if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT))
1889         return SDValue();
1890 
1891       return getSetCC(dl, VT, N2, N1, SwappedCond);
1892     }
1893   }
1894 
1895   // Could not fold it.
1896   return SDValue();
1897 }
1898 
1899 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We
1900 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const1901 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1902   // This predicate is not safe for vector operations.
1903   if (Op.getValueType().isVector())
1904     return false;
1905 
1906   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1907   return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1908 }
1909 
1910 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
1911 /// this predicate to simplify operations downstream.  Mask is known to be zero
1912 /// for bits that V cannot have.
MaskedValueIsZero(SDValue Op,const APInt & Mask,unsigned Depth) const1913 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1914                                      unsigned Depth) const {
1915   APInt KnownZero, KnownOne;
1916   computeKnownBits(Op, KnownZero, KnownOne, Depth);
1917   return (KnownZero & Mask) == Mask;
1918 }
1919 
1920 /// Determine which bits of Op are known to be either zero or one and return
1921 /// them in the KnownZero/KnownOne bitsets.
computeKnownBits(SDValue Op,APInt & KnownZero,APInt & KnownOne,unsigned Depth) const1922 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
1923                                     APInt &KnownOne, unsigned Depth) const {
1924   const TargetLowering *TLI = TM.getTargetLowering();
1925   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1926 
1927   KnownZero = KnownOne = APInt(BitWidth, 0);   // Don't know anything.
1928   if (Depth == 6)
1929     return;  // Limit search depth.
1930 
1931   APInt KnownZero2, KnownOne2;
1932 
1933   switch (Op.getOpcode()) {
1934   case ISD::Constant:
1935     // We know all of the bits for a constant!
1936     KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1937     KnownZero = ~KnownOne;
1938     break;
1939   case ISD::AND:
1940     // If either the LHS or the RHS are Zero, the result is zero.
1941     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1942     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1943 
1944     // Output known-1 bits are only known if set in both the LHS & RHS.
1945     KnownOne &= KnownOne2;
1946     // Output known-0 are known to be clear if zero in either the LHS | RHS.
1947     KnownZero |= KnownZero2;
1948     break;
1949   case ISD::OR:
1950     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1951     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1952 
1953     // Output known-0 bits are only known if clear in both the LHS & RHS.
1954     KnownZero &= KnownZero2;
1955     // Output known-1 are known to be set if set in either the LHS | RHS.
1956     KnownOne |= KnownOne2;
1957     break;
1958   case ISD::XOR: {
1959     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1960     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1961 
1962     // Output known-0 bits are known if clear or set in both the LHS & RHS.
1963     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1964     // Output known-1 are known to be set if set in only one of the LHS, RHS.
1965     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1966     KnownZero = KnownZeroOut;
1967     break;
1968   }
1969   case ISD::MUL: {
1970     computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1971     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1972 
1973     // If low bits are zero in either operand, output low known-0 bits.
1974     // Also compute a conserative estimate for high known-0 bits.
1975     // More trickiness is possible, but this is sufficient for the
1976     // interesting case of alignment computation.
1977     KnownOne.clearAllBits();
1978     unsigned TrailZ = KnownZero.countTrailingOnes() +
1979                       KnownZero2.countTrailingOnes();
1980     unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
1981                                KnownZero2.countLeadingOnes(),
1982                                BitWidth) - BitWidth;
1983 
1984     TrailZ = std::min(TrailZ, BitWidth);
1985     LeadZ = std::min(LeadZ, BitWidth);
1986     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1987                 APInt::getHighBitsSet(BitWidth, LeadZ);
1988     break;
1989   }
1990   case ISD::UDIV: {
1991     // For the purposes of computing leading zeros we can conservatively
1992     // treat a udiv as a logical right shift by the power of 2 known to
1993     // be less than the denominator.
1994     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1995     unsigned LeadZ = KnownZero2.countLeadingOnes();
1996 
1997     KnownOne2.clearAllBits();
1998     KnownZero2.clearAllBits();
1999     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2000     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
2001     if (RHSUnknownLeadingOnes != BitWidth)
2002       LeadZ = std::min(BitWidth,
2003                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
2004 
2005     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
2006     break;
2007   }
2008   case ISD::SELECT:
2009     computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
2010     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2011 
2012     // Only known if known in both the LHS and RHS.
2013     KnownOne &= KnownOne2;
2014     KnownZero &= KnownZero2;
2015     break;
2016   case ISD::SELECT_CC:
2017     computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
2018     computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
2019 
2020     // Only known if known in both the LHS and RHS.
2021     KnownOne &= KnownOne2;
2022     KnownZero &= KnownZero2;
2023     break;
2024   case ISD::SADDO:
2025   case ISD::UADDO:
2026   case ISD::SSUBO:
2027   case ISD::USUBO:
2028   case ISD::SMULO:
2029   case ISD::UMULO:
2030     if (Op.getResNo() != 1)
2031       break;
2032     // The boolean result conforms to getBooleanContents.
2033     // If we know the result of a setcc has the top bits zero, use this info.
2034     // We know that we have an integer-based boolean since these operations
2035     // are only available for integer.
2036     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2037             TargetLowering::ZeroOrOneBooleanContent &&
2038         BitWidth > 1)
2039       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2040     break;
2041   case ISD::SETCC:
2042     // If we know the result of a setcc has the top bits zero, use this info.
2043     if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2044             TargetLowering::ZeroOrOneBooleanContent &&
2045         BitWidth > 1)
2046       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2047     break;
2048   case ISD::SHL:
2049     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
2050     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2051       unsigned ShAmt = SA->getZExtValue();
2052 
2053       // If the shift count is an invalid immediate, don't do anything.
2054       if (ShAmt >= BitWidth)
2055         break;
2056 
2057       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2058       KnownZero <<= ShAmt;
2059       KnownOne  <<= ShAmt;
2060       // low bits known zero.
2061       KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
2062     }
2063     break;
2064   case ISD::SRL:
2065     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
2066     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2067       unsigned ShAmt = SA->getZExtValue();
2068 
2069       // If the shift count is an invalid immediate, don't do anything.
2070       if (ShAmt >= BitWidth)
2071         break;
2072 
2073       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2074       KnownZero = KnownZero.lshr(ShAmt);
2075       KnownOne  = KnownOne.lshr(ShAmt);
2076 
2077       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
2078       KnownZero |= HighBits;  // High bits known zero.
2079     }
2080     break;
2081   case ISD::SRA:
2082     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2083       unsigned ShAmt = SA->getZExtValue();
2084 
2085       // If the shift count is an invalid immediate, don't do anything.
2086       if (ShAmt >= BitWidth)
2087         break;
2088 
2089       // If any of the demanded bits are produced by the sign extension, we also
2090       // demand the input sign bit.
2091       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
2092 
2093       computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2094       KnownZero = KnownZero.lshr(ShAmt);
2095       KnownOne  = KnownOne.lshr(ShAmt);
2096 
2097       // Handle the sign bits.
2098       APInt SignBit = APInt::getSignBit(BitWidth);
2099       SignBit = SignBit.lshr(ShAmt);  // Adjust to where it is now in the mask.
2100 
2101       if (KnownZero.intersects(SignBit)) {
2102         KnownZero |= HighBits;  // New bits are known zero.
2103       } else if (KnownOne.intersects(SignBit)) {
2104         KnownOne  |= HighBits;  // New bits are known one.
2105       }
2106     }
2107     break;
2108   case ISD::SIGN_EXTEND_INREG: {
2109     EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2110     unsigned EBits = EVT.getScalarType().getSizeInBits();
2111 
2112     // Sign extension.  Compute the demanded bits in the result that are not
2113     // present in the input.
2114     APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2115 
2116     APInt InSignBit = APInt::getSignBit(EBits);
2117     APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2118 
2119     // If the sign extended bits are demanded, we know that the sign
2120     // bit is demanded.
2121     InSignBit = InSignBit.zext(BitWidth);
2122     if (NewBits.getBoolValue())
2123       InputDemandedBits |= InSignBit;
2124 
2125     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2126     KnownOne &= InputDemandedBits;
2127     KnownZero &= InputDemandedBits;
2128 
2129     // If the sign bit of the input is known set or clear, then we know the
2130     // top bits of the result.
2131     if (KnownZero.intersects(InSignBit)) {         // Input sign bit known clear
2132       KnownZero |= NewBits;
2133       KnownOne  &= ~NewBits;
2134     } else if (KnownOne.intersects(InSignBit)) {   // Input sign bit known set
2135       KnownOne  |= NewBits;
2136       KnownZero &= ~NewBits;
2137     } else {                              // Input sign bit unknown
2138       KnownZero &= ~NewBits;
2139       KnownOne  &= ~NewBits;
2140     }
2141     break;
2142   }
2143   case ISD::CTTZ:
2144   case ISD::CTTZ_ZERO_UNDEF:
2145   case ISD::CTLZ:
2146   case ISD::CTLZ_ZERO_UNDEF:
2147   case ISD::CTPOP: {
2148     unsigned LowBits = Log2_32(BitWidth)+1;
2149     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2150     KnownOne.clearAllBits();
2151     break;
2152   }
2153   case ISD::LOAD: {
2154     LoadSDNode *LD = cast<LoadSDNode>(Op);
2155     // If this is a ZEXTLoad and we are looking at the loaded value.
2156     if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2157       EVT VT = LD->getMemoryVT();
2158       unsigned MemBits = VT.getScalarType().getSizeInBits();
2159       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2160     } else if (const MDNode *Ranges = LD->getRanges()) {
2161       computeKnownBitsFromRangeMetadata(*Ranges, KnownZero);
2162     }
2163     break;
2164   }
2165   case ISD::ZERO_EXTEND: {
2166     EVT InVT = Op.getOperand(0).getValueType();
2167     unsigned InBits = InVT.getScalarType().getSizeInBits();
2168     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2169     KnownZero = KnownZero.trunc(InBits);
2170     KnownOne = KnownOne.trunc(InBits);
2171     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2172     KnownZero = KnownZero.zext(BitWidth);
2173     KnownOne = KnownOne.zext(BitWidth);
2174     KnownZero |= NewBits;
2175     break;
2176   }
2177   case ISD::SIGN_EXTEND: {
2178     EVT InVT = Op.getOperand(0).getValueType();
2179     unsigned InBits = InVT.getScalarType().getSizeInBits();
2180     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2181 
2182     KnownZero = KnownZero.trunc(InBits);
2183     KnownOne = KnownOne.trunc(InBits);
2184     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2185 
2186     // Note if the sign bit is known to be zero or one.
2187     bool SignBitKnownZero = KnownZero.isNegative();
2188     bool SignBitKnownOne  = KnownOne.isNegative();
2189 
2190     KnownZero = KnownZero.zext(BitWidth);
2191     KnownOne = KnownOne.zext(BitWidth);
2192 
2193     // If the sign bit is known zero or one, the top bits match.
2194     if (SignBitKnownZero)
2195       KnownZero |= NewBits;
2196     else if (SignBitKnownOne)
2197       KnownOne  |= NewBits;
2198     break;
2199   }
2200   case ISD::ANY_EXTEND: {
2201     EVT InVT = Op.getOperand(0).getValueType();
2202     unsigned InBits = InVT.getScalarType().getSizeInBits();
2203     KnownZero = KnownZero.trunc(InBits);
2204     KnownOne = KnownOne.trunc(InBits);
2205     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2206     KnownZero = KnownZero.zext(BitWidth);
2207     KnownOne = KnownOne.zext(BitWidth);
2208     break;
2209   }
2210   case ISD::TRUNCATE: {
2211     EVT InVT = Op.getOperand(0).getValueType();
2212     unsigned InBits = InVT.getScalarType().getSizeInBits();
2213     KnownZero = KnownZero.zext(InBits);
2214     KnownOne = KnownOne.zext(InBits);
2215     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2216     KnownZero = KnownZero.trunc(BitWidth);
2217     KnownOne = KnownOne.trunc(BitWidth);
2218     break;
2219   }
2220   case ISD::AssertZext: {
2221     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2222     APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2223     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2224     KnownZero |= (~InMask);
2225     KnownOne  &= (~KnownZero);
2226     break;
2227   }
2228   case ISD::FGETSIGN:
2229     // All bits are zero except the low bit.
2230     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2231     break;
2232 
2233   case ISD::SUB: {
2234     if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2235       // We know that the top bits of C-X are clear if X contains less bits
2236       // than C (i.e. no wrap-around can happen).  For example, 20-X is
2237       // positive if we can prove that X is >= 0 and < 16.
2238       if (CLHS->getAPIntValue().isNonNegative()) {
2239         unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2240         // NLZ can't be BitWidth with no sign bit
2241         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2242         computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2243 
2244         // If all of the MaskV bits are known to be zero, then we know the
2245         // output top bits are zero, because we now know that the output is
2246         // from [0-C].
2247         if ((KnownZero2 & MaskV) == MaskV) {
2248           unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2249           // Top bits known zero.
2250           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2251         }
2252       }
2253     }
2254   }
2255   // fall through
2256   case ISD::ADD:
2257   case ISD::ADDE: {
2258     // Output known-0 bits are known if clear or set in both the low clear bits
2259     // common to both LHS & RHS.  For example, 8+(X<<3) is known to have the
2260     // low 3 bits clear.
2261     computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2262     unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2263 
2264     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2265     KnownZeroOut = std::min(KnownZeroOut,
2266                             KnownZero2.countTrailingOnes());
2267 
2268     if (Op.getOpcode() == ISD::ADD) {
2269       KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2270       break;
2271     }
2272 
2273     // With ADDE, a carry bit may be added in, so we can only use this
2274     // information if we know (at least) that the low two bits are clear.  We
2275     // then return to the caller that the low bit is unknown but that other bits
2276     // are known zero.
2277     if (KnownZeroOut >= 2) // ADDE
2278       KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2279     break;
2280   }
2281   case ISD::SREM:
2282     if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2283       const APInt &RA = Rem->getAPIntValue().abs();
2284       if (RA.isPowerOf2()) {
2285         APInt LowBits = RA - 1;
2286         computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2287 
2288         // The low bits of the first operand are unchanged by the srem.
2289         KnownZero = KnownZero2 & LowBits;
2290         KnownOne = KnownOne2 & LowBits;
2291 
2292         // If the first operand is non-negative or has all low bits zero, then
2293         // the upper bits are all zero.
2294         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2295           KnownZero |= ~LowBits;
2296 
2297         // If the first operand is negative and not all low bits are zero, then
2298         // the upper bits are all one.
2299         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2300           KnownOne |= ~LowBits;
2301         assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2302       }
2303     }
2304     break;
2305   case ISD::UREM: {
2306     if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2307       const APInt &RA = Rem->getAPIntValue();
2308       if (RA.isPowerOf2()) {
2309         APInt LowBits = (RA - 1);
2310         computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1);
2311 
2312         // The upper bits are all zero, the lower ones are unchanged.
2313         KnownZero = KnownZero2 | ~LowBits;
2314         KnownOne = KnownOne2 & LowBits;
2315         break;
2316       }
2317     }
2318 
2319     // Since the result is less than or equal to either operand, any leading
2320     // zero bits in either operand must also exist in the result.
2321     computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2322     computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2323 
2324     uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2325                                 KnownZero2.countLeadingOnes());
2326     KnownOne.clearAllBits();
2327     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2328     break;
2329   }
2330   case ISD::FrameIndex:
2331   case ISD::TargetFrameIndex:
2332     if (unsigned Align = InferPtrAlignment(Op)) {
2333       // The low bits are known zero if the pointer is aligned.
2334       KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2335       break;
2336     }
2337     break;
2338 
2339   default:
2340     if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2341       break;
2342     // Fallthrough
2343   case ISD::INTRINSIC_WO_CHAIN:
2344   case ISD::INTRINSIC_W_CHAIN:
2345   case ISD::INTRINSIC_VOID:
2346     // Allow the target to implement this method for its nodes.
2347     TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2348     break;
2349   }
2350 
2351   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2352 }
2353 
2354 /// ComputeNumSignBits - Return the number of times the sign bit of the
2355 /// register is replicated into the other bits.  We know that at least 1 bit
2356 /// is always equal to the sign bit (itself), but other cases can give us
2357 /// information.  For example, immediately after an "SRA X, 2", we know that
2358 /// the top 3 bits are all equal to each other, so we return 3.
ComputeNumSignBits(SDValue Op,unsigned Depth) const2359 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2360   const TargetLowering *TLI = TM.getTargetLowering();
2361   EVT VT = Op.getValueType();
2362   assert(VT.isInteger() && "Invalid VT!");
2363   unsigned VTBits = VT.getScalarType().getSizeInBits();
2364   unsigned Tmp, Tmp2;
2365   unsigned FirstAnswer = 1;
2366 
2367   if (Depth == 6)
2368     return 1;  // Limit search depth.
2369 
2370   switch (Op.getOpcode()) {
2371   default: break;
2372   case ISD::AssertSext:
2373     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2374     return VTBits-Tmp+1;
2375   case ISD::AssertZext:
2376     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2377     return VTBits-Tmp;
2378 
2379   case ISD::Constant: {
2380     const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2381     return Val.getNumSignBits();
2382   }
2383 
2384   case ISD::SIGN_EXTEND:
2385     Tmp =
2386         VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2387     return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2388 
2389   case ISD::SIGN_EXTEND_INREG:
2390     // Max of the input and what this extends.
2391     Tmp =
2392       cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2393     Tmp = VTBits-Tmp+1;
2394 
2395     Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2396     return std::max(Tmp, Tmp2);
2397 
2398   case ISD::SRA:
2399     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2400     // SRA X, C   -> adds C sign bits.
2401     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2402       Tmp += C->getZExtValue();
2403       if (Tmp > VTBits) Tmp = VTBits;
2404     }
2405     return Tmp;
2406   case ISD::SHL:
2407     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2408       // shl destroys sign bits.
2409       Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2410       if (C->getZExtValue() >= VTBits ||      // Bad shift.
2411           C->getZExtValue() >= Tmp) break;    // Shifted all sign bits out.
2412       return Tmp - C->getZExtValue();
2413     }
2414     break;
2415   case ISD::AND:
2416   case ISD::OR:
2417   case ISD::XOR:    // NOT is handled here.
2418     // Logical binary ops preserve the number of sign bits at the worst.
2419     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2420     if (Tmp != 1) {
2421       Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2422       FirstAnswer = std::min(Tmp, Tmp2);
2423       // We computed what we know about the sign bits as our first
2424       // answer. Now proceed to the generic code that uses
2425       // computeKnownBits, and pick whichever answer is better.
2426     }
2427     break;
2428 
2429   case ISD::SELECT:
2430     Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2431     if (Tmp == 1) return 1;  // Early out.
2432     Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2433     return std::min(Tmp, Tmp2);
2434 
2435   case ISD::SADDO:
2436   case ISD::UADDO:
2437   case ISD::SSUBO:
2438   case ISD::USUBO:
2439   case ISD::SMULO:
2440   case ISD::UMULO:
2441     if (Op.getResNo() != 1)
2442       break;
2443     // The boolean result conforms to getBooleanContents.  Fall through.
2444     // If setcc returns 0/-1, all bits are sign bits.
2445     // We know that we have an integer-based boolean since these operations
2446     // are only available for integer.
2447     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2448         TargetLowering::ZeroOrNegativeOneBooleanContent)
2449       return VTBits;
2450     break;
2451   case ISD::SETCC:
2452     // If setcc returns 0/-1, all bits are sign bits.
2453     if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2454         TargetLowering::ZeroOrNegativeOneBooleanContent)
2455       return VTBits;
2456     break;
2457   case ISD::ROTL:
2458   case ISD::ROTR:
2459     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2460       unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2461 
2462       // Handle rotate right by N like a rotate left by 32-N.
2463       if (Op.getOpcode() == ISD::ROTR)
2464         RotAmt = (VTBits-RotAmt) & (VTBits-1);
2465 
2466       // If we aren't rotating out all of the known-in sign bits, return the
2467       // number that are left.  This handles rotl(sext(x), 1) for example.
2468       Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2469       if (Tmp > RotAmt+1) return Tmp-RotAmt;
2470     }
2471     break;
2472   case ISD::ADD:
2473     // Add can have at most one carry bit.  Thus we know that the output
2474     // is, at worst, one more bit than the inputs.
2475     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2476     if (Tmp == 1) return 1;  // Early out.
2477 
2478     // Special case decrementing a value (ADD X, -1):
2479     if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2480       if (CRHS->isAllOnesValue()) {
2481         APInt KnownZero, KnownOne;
2482         computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2483 
2484         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2485         // sign bits set.
2486         if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2487           return VTBits;
2488 
2489         // If we are subtracting one from a positive number, there is no carry
2490         // out of the result.
2491         if (KnownZero.isNegative())
2492           return Tmp;
2493       }
2494 
2495     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2496     if (Tmp2 == 1) return 1;
2497     return std::min(Tmp, Tmp2)-1;
2498 
2499   case ISD::SUB:
2500     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2501     if (Tmp2 == 1) return 1;
2502 
2503     // Handle NEG.
2504     if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2505       if (CLHS->isNullValue()) {
2506         APInt KnownZero, KnownOne;
2507         computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2508         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2509         // sign bits set.
2510         if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2511           return VTBits;
2512 
2513         // If the input is known to be positive (the sign bit is known clear),
2514         // the output of the NEG has the same number of sign bits as the input.
2515         if (KnownZero.isNegative())
2516           return Tmp2;
2517 
2518         // Otherwise, we treat this like a SUB.
2519       }
2520 
2521     // Sub can have at most one carry bit.  Thus we know that the output
2522     // is, at worst, one more bit than the inputs.
2523     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2524     if (Tmp == 1) return 1;  // Early out.
2525     return std::min(Tmp, Tmp2)-1;
2526   case ISD::TRUNCATE:
2527     // FIXME: it's tricky to do anything useful for this, but it is an important
2528     // case for targets like X86.
2529     break;
2530   }
2531 
2532   // If we are looking at the loaded value of the SDNode.
2533   if (Op.getResNo() == 0) {
2534     // Handle LOADX separately here. EXTLOAD case will fallthrough.
2535     if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2536       unsigned ExtType = LD->getExtensionType();
2537       switch (ExtType) {
2538         default: break;
2539         case ISD::SEXTLOAD:    // '17' bits known
2540           Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2541           return VTBits-Tmp+1;
2542         case ISD::ZEXTLOAD:    // '16' bits known
2543           Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2544           return VTBits-Tmp;
2545       }
2546     }
2547   }
2548 
2549   // Allow the target to implement this method for its nodes.
2550   if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2551       Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2552       Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2553       Op.getOpcode() == ISD::INTRINSIC_VOID) {
2554     unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
2555     if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2556   }
2557 
2558   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2559   // use this information.
2560   APInt KnownZero, KnownOne;
2561   computeKnownBits(Op, KnownZero, KnownOne, Depth);
2562 
2563   APInt Mask;
2564   if (KnownZero.isNegative()) {        // sign bit is 0
2565     Mask = KnownZero;
2566   } else if (KnownOne.isNegative()) {  // sign bit is 1;
2567     Mask = KnownOne;
2568   } else {
2569     // Nothing known.
2570     return FirstAnswer;
2571   }
2572 
2573   // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
2574   // the number of identical bits in the top of the input value.
2575   Mask = ~Mask;
2576   Mask <<= Mask.getBitWidth()-VTBits;
2577   // Return # leading zeros.  We use 'min' here in case Val was zero before
2578   // shifting.  We don't want to return '64' as for an i32 "0".
2579   return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2580 }
2581 
2582 /// isBaseWithConstantOffset - Return true if the specified operand is an
2583 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2584 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2585 /// semantics as an ADD.  This handles the equivalence:
2586 ///     X|Cst == X+Cst iff X&Cst = 0.
isBaseWithConstantOffset(SDValue Op) const2587 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2588   if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2589       !isa<ConstantSDNode>(Op.getOperand(1)))
2590     return false;
2591 
2592   if (Op.getOpcode() == ISD::OR &&
2593       !MaskedValueIsZero(Op.getOperand(0),
2594                      cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2595     return false;
2596 
2597   return true;
2598 }
2599 
2600 
isKnownNeverNaN(SDValue Op) const2601 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2602   // If we're told that NaNs won't happen, assume they won't.
2603   if (getTarget().Options.NoNaNsFPMath)
2604     return true;
2605 
2606   // If the value is a constant, we can obviously see if it is a NaN or not.
2607   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2608     return !C->getValueAPF().isNaN();
2609 
2610   // TODO: Recognize more cases here.
2611 
2612   return false;
2613 }
2614 
isKnownNeverZero(SDValue Op) const2615 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2616   // If the value is a constant, we can obviously see if it is a zero or not.
2617   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2618     return !C->isZero();
2619 
2620   // TODO: Recognize more cases here.
2621   switch (Op.getOpcode()) {
2622   default: break;
2623   case ISD::OR:
2624     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2625       return !C->isNullValue();
2626     break;
2627   }
2628 
2629   return false;
2630 }
2631 
isEqualTo(SDValue A,SDValue B) const2632 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2633   // Check the obvious case.
2634   if (A == B) return true;
2635 
2636   // For for negative and positive zero.
2637   if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2638     if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2639       if (CA->isZero() && CB->isZero()) return true;
2640 
2641   // Otherwise they may not be equal.
2642   return false;
2643 }
2644 
2645 /// getNode - Gets or creates the specified node.
2646 ///
getNode(unsigned Opcode,SDLoc DL,EVT VT)2647 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2648   FoldingSetNodeID ID;
2649   AddNodeIDNode(ID, Opcode, getVTList(VT), None);
2650   void *IP = nullptr;
2651   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2652     return SDValue(E, 0);
2653 
2654   SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2655                                          DL.getDebugLoc(), getVTList(VT));
2656   CSEMap.InsertNode(N, IP);
2657 
2658   AllNodes.push_back(N);
2659 #ifndef NDEBUG
2660   VerifySDNode(N);
2661 #endif
2662   return SDValue(N, 0);
2663 }
2664 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue Operand)2665 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2666                               EVT VT, SDValue Operand) {
2667   // Constant fold unary operations with an integer constant operand. Even
2668   // opaque constant will be folded, because the folding of unary operations
2669   // doesn't create new constants with different values. Nevertheless, the
2670   // opaque flag is preserved during folding to prevent future folding with
2671   // other constants.
2672   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2673     const APInt &Val = C->getAPIntValue();
2674     switch (Opcode) {
2675     default: break;
2676     case ISD::SIGN_EXTEND:
2677       return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT,
2678                          C->isTargetOpcode(), C->isOpaque());
2679     case ISD::ANY_EXTEND:
2680     case ISD::ZERO_EXTEND:
2681     case ISD::TRUNCATE:
2682       return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT,
2683                          C->isTargetOpcode(), C->isOpaque());
2684     case ISD::UINT_TO_FP:
2685     case ISD::SINT_TO_FP: {
2686       APFloat apf(EVTToAPFloatSemantics(VT),
2687                   APInt::getNullValue(VT.getSizeInBits()));
2688       (void)apf.convertFromAPInt(Val,
2689                                  Opcode==ISD::SINT_TO_FP,
2690                                  APFloat::rmNearestTiesToEven);
2691       return getConstantFP(apf, VT);
2692     }
2693     case ISD::BITCAST:
2694       if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2695         return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2696       else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2697         return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2698       break;
2699     case ISD::BSWAP:
2700       return getConstant(Val.byteSwap(), VT, C->isTargetOpcode(),
2701                          C->isOpaque());
2702     case ISD::CTPOP:
2703       return getConstant(Val.countPopulation(), VT, C->isTargetOpcode(),
2704                          C->isOpaque());
2705     case ISD::CTLZ:
2706     case ISD::CTLZ_ZERO_UNDEF:
2707       return getConstant(Val.countLeadingZeros(), VT, C->isTargetOpcode(),
2708                          C->isOpaque());
2709     case ISD::CTTZ:
2710     case ISD::CTTZ_ZERO_UNDEF:
2711       return getConstant(Val.countTrailingZeros(), VT, C->isTargetOpcode(),
2712                          C->isOpaque());
2713     }
2714   }
2715 
2716   // Constant fold unary operations with a floating point constant operand.
2717   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2718     APFloat V = C->getValueAPF();    // make copy
2719     switch (Opcode) {
2720     case ISD::FNEG:
2721       V.changeSign();
2722       return getConstantFP(V, VT);
2723     case ISD::FABS:
2724       V.clearSign();
2725       return getConstantFP(V, VT);
2726     case ISD::FCEIL: {
2727       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2728       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2729         return getConstantFP(V, VT);
2730       break;
2731     }
2732     case ISD::FTRUNC: {
2733       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2734       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2735         return getConstantFP(V, VT);
2736       break;
2737     }
2738     case ISD::FFLOOR: {
2739       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2740       if (fs == APFloat::opOK || fs == APFloat::opInexact)
2741         return getConstantFP(V, VT);
2742       break;
2743     }
2744     case ISD::FP_EXTEND: {
2745       bool ignored;
2746       // This can return overflow, underflow, or inexact; we don't care.
2747       // FIXME need to be more flexible about rounding mode.
2748       (void)V.convert(EVTToAPFloatSemantics(VT),
2749                       APFloat::rmNearestTiesToEven, &ignored);
2750       return getConstantFP(V, VT);
2751     }
2752     case ISD::FP_TO_SINT:
2753     case ISD::FP_TO_UINT: {
2754       integerPart x[2];
2755       bool ignored;
2756       assert(integerPartWidth >= 64);
2757       // FIXME need to be more flexible about rounding mode.
2758       APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2759                             Opcode==ISD::FP_TO_SINT,
2760                             APFloat::rmTowardZero, &ignored);
2761       if (s==APFloat::opInvalidOp)     // inexact is OK, in fact usual
2762         break;
2763       APInt api(VT.getSizeInBits(), x);
2764       return getConstant(api, VT);
2765     }
2766     case ISD::BITCAST:
2767       if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2768         return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2769       else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2770         return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2771       break;
2772     }
2773   }
2774 
2775   unsigned OpOpcode = Operand.getNode()->getOpcode();
2776   switch (Opcode) {
2777   case ISD::TokenFactor:
2778   case ISD::MERGE_VALUES:
2779   case ISD::CONCAT_VECTORS:
2780     return Operand;         // Factor, merge or concat of one node?  No need.
2781   case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2782   case ISD::FP_EXTEND:
2783     assert(VT.isFloatingPoint() &&
2784            Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2785     if (Operand.getValueType() == VT) return Operand;  // noop conversion.
2786     assert((!VT.isVector() ||
2787             VT.getVectorNumElements() ==
2788             Operand.getValueType().getVectorNumElements()) &&
2789            "Vector element count mismatch!");
2790     if (Operand.getOpcode() == ISD::UNDEF)
2791       return getUNDEF(VT);
2792     break;
2793   case ISD::SIGN_EXTEND:
2794     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2795            "Invalid SIGN_EXTEND!");
2796     if (Operand.getValueType() == VT) return Operand;   // noop extension
2797     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2798            "Invalid sext node, dst < src!");
2799     assert((!VT.isVector() ||
2800             VT.getVectorNumElements() ==
2801             Operand.getValueType().getVectorNumElements()) &&
2802            "Vector element count mismatch!");
2803     if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2804       return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2805     else if (OpOpcode == ISD::UNDEF)
2806       // sext(undef) = 0, because the top bits will all be the same.
2807       return getConstant(0, VT);
2808     break;
2809   case ISD::ZERO_EXTEND:
2810     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2811            "Invalid ZERO_EXTEND!");
2812     if (Operand.getValueType() == VT) return Operand;   // noop extension
2813     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2814            "Invalid zext node, dst < src!");
2815     assert((!VT.isVector() ||
2816             VT.getVectorNumElements() ==
2817             Operand.getValueType().getVectorNumElements()) &&
2818            "Vector element count mismatch!");
2819     if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
2820       return getNode(ISD::ZERO_EXTEND, DL, VT,
2821                      Operand.getNode()->getOperand(0));
2822     else if (OpOpcode == ISD::UNDEF)
2823       // zext(undef) = 0, because the top bits will be zero.
2824       return getConstant(0, VT);
2825     break;
2826   case ISD::ANY_EXTEND:
2827     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2828            "Invalid ANY_EXTEND!");
2829     if (Operand.getValueType() == VT) return Operand;   // noop extension
2830     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2831            "Invalid anyext node, dst < src!");
2832     assert((!VT.isVector() ||
2833             VT.getVectorNumElements() ==
2834             Operand.getValueType().getVectorNumElements()) &&
2835            "Vector element count mismatch!");
2836 
2837     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2838         OpOpcode == ISD::ANY_EXTEND)
2839       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
2840       return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2841     else if (OpOpcode == ISD::UNDEF)
2842       return getUNDEF(VT);
2843 
2844     // (ext (trunx x)) -> x
2845     if (OpOpcode == ISD::TRUNCATE) {
2846       SDValue OpOp = Operand.getNode()->getOperand(0);
2847       if (OpOp.getValueType() == VT)
2848         return OpOp;
2849     }
2850     break;
2851   case ISD::TRUNCATE:
2852     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2853            "Invalid TRUNCATE!");
2854     if (Operand.getValueType() == VT) return Operand;   // noop truncate
2855     assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2856            "Invalid truncate node, src < dst!");
2857     assert((!VT.isVector() ||
2858             VT.getVectorNumElements() ==
2859             Operand.getValueType().getVectorNumElements()) &&
2860            "Vector element count mismatch!");
2861     if (OpOpcode == ISD::TRUNCATE)
2862       return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2863     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2864         OpOpcode == ISD::ANY_EXTEND) {
2865       // If the source is smaller than the dest, we still need an extend.
2866       if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2867             .bitsLT(VT.getScalarType()))
2868         return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2869       if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2870         return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2871       return Operand.getNode()->getOperand(0);
2872     }
2873     if (OpOpcode == ISD::UNDEF)
2874       return getUNDEF(VT);
2875     break;
2876   case ISD::BITCAST:
2877     // Basic sanity checking.
2878     assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2879            && "Cannot BITCAST between types of different sizes!");
2880     if (VT == Operand.getValueType()) return Operand;  // noop conversion.
2881     if (OpOpcode == ISD::BITCAST)  // bitconv(bitconv(x)) -> bitconv(x)
2882       return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2883     if (OpOpcode == ISD::UNDEF)
2884       return getUNDEF(VT);
2885     break;
2886   case ISD::SCALAR_TO_VECTOR:
2887     assert(VT.isVector() && !Operand.getValueType().isVector() &&
2888            (VT.getVectorElementType() == Operand.getValueType() ||
2889             (VT.getVectorElementType().isInteger() &&
2890              Operand.getValueType().isInteger() &&
2891              VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2892            "Illegal SCALAR_TO_VECTOR node!");
2893     if (OpOpcode == ISD::UNDEF)
2894       return getUNDEF(VT);
2895     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2896     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2897         isa<ConstantSDNode>(Operand.getOperand(1)) &&
2898         Operand.getConstantOperandVal(1) == 0 &&
2899         Operand.getOperand(0).getValueType() == VT)
2900       return Operand.getOperand(0);
2901     break;
2902   case ISD::FNEG:
2903     // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2904     if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2905       return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2906                      Operand.getNode()->getOperand(0));
2907     if (OpOpcode == ISD::FNEG)  // --X -> X
2908       return Operand.getNode()->getOperand(0);
2909     break;
2910   case ISD::FABS:
2911     if (OpOpcode == ISD::FNEG)  // abs(-X) -> abs(X)
2912       return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2913     break;
2914   }
2915 
2916   SDNode *N;
2917   SDVTList VTs = getVTList(VT);
2918   if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2919     FoldingSetNodeID ID;
2920     SDValue Ops[1] = { Operand };
2921     AddNodeIDNode(ID, Opcode, VTs, Ops);
2922     void *IP = nullptr;
2923     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2924       return SDValue(E, 0);
2925 
2926     N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2927                                         DL.getDebugLoc(), VTs, Operand);
2928     CSEMap.InsertNode(N, IP);
2929   } else {
2930     N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2931                                         DL.getDebugLoc(), VTs, Operand);
2932   }
2933 
2934   AllNodes.push_back(N);
2935 #ifndef NDEBUG
2936   VerifySDNode(N);
2937 #endif
2938   return SDValue(N, 0);
2939 }
2940 
FoldConstantArithmetic(unsigned Opcode,EVT VT,SDNode * Cst1,SDNode * Cst2)2941 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2942                                              SDNode *Cst1, SDNode *Cst2) {
2943   // If the opcode is a target-specific ISD node, there's nothing we can
2944   // do here and the operand rules may not line up with the below, so
2945   // bail early.
2946   if (Opcode >= ISD::BUILTIN_OP_END)
2947     return SDValue();
2948 
2949   SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2950   SmallVector<SDValue, 4> Outputs;
2951   EVT SVT = VT.getScalarType();
2952 
2953   ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2954   ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2955   if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque()))
2956     return SDValue();
2957 
2958   if (Scalar1 && Scalar2)
2959     // Scalar instruction.
2960     Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2961   else {
2962     // For vectors extract each constant element into Inputs so we can constant
2963     // fold them individually.
2964     BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2965     BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2966     if (!BV1 || !BV2)
2967       return SDValue();
2968 
2969     assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2970 
2971     for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2972       ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2973       ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2974       if (!V1 || !V2) // Not a constant, bail.
2975         return SDValue();
2976 
2977       if (V1->isOpaque() || V2->isOpaque())
2978         return SDValue();
2979 
2980       // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2981       // FIXME: This is valid and could be handled by truncating the APInts.
2982       if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2983         return SDValue();
2984 
2985       Inputs.push_back(std::make_pair(V1, V2));
2986     }
2987   }
2988 
2989   // We have a number of constant values, constant fold them element by element.
2990   for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2991     const APInt &C1 = Inputs[I].first->getAPIntValue();
2992     const APInt &C2 = Inputs[I].second->getAPIntValue();
2993 
2994     switch (Opcode) {
2995     case ISD::ADD:
2996       Outputs.push_back(getConstant(C1 + C2, SVT));
2997       break;
2998     case ISD::SUB:
2999       Outputs.push_back(getConstant(C1 - C2, SVT));
3000       break;
3001     case ISD::MUL:
3002       Outputs.push_back(getConstant(C1 * C2, SVT));
3003       break;
3004     case ISD::UDIV:
3005       if (!C2.getBoolValue())
3006         return SDValue();
3007       Outputs.push_back(getConstant(C1.udiv(C2), SVT));
3008       break;
3009     case ISD::UREM:
3010       if (!C2.getBoolValue())
3011         return SDValue();
3012       Outputs.push_back(getConstant(C1.urem(C2), SVT));
3013       break;
3014     case ISD::SDIV:
3015       if (!C2.getBoolValue())
3016         return SDValue();
3017       Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
3018       break;
3019     case ISD::SREM:
3020       if (!C2.getBoolValue())
3021         return SDValue();
3022       Outputs.push_back(getConstant(C1.srem(C2), SVT));
3023       break;
3024     case ISD::AND:
3025       Outputs.push_back(getConstant(C1 & C2, SVT));
3026       break;
3027     case ISD::OR:
3028       Outputs.push_back(getConstant(C1 | C2, SVT));
3029       break;
3030     case ISD::XOR:
3031       Outputs.push_back(getConstant(C1 ^ C2, SVT));
3032       break;
3033     case ISD::SHL:
3034       Outputs.push_back(getConstant(C1 << C2, SVT));
3035       break;
3036     case ISD::SRL:
3037       Outputs.push_back(getConstant(C1.lshr(C2), SVT));
3038       break;
3039     case ISD::SRA:
3040       Outputs.push_back(getConstant(C1.ashr(C2), SVT));
3041       break;
3042     case ISD::ROTL:
3043       Outputs.push_back(getConstant(C1.rotl(C2), SVT));
3044       break;
3045     case ISD::ROTR:
3046       Outputs.push_back(getConstant(C1.rotr(C2), SVT));
3047       break;
3048     default:
3049       return SDValue();
3050     }
3051   }
3052 
3053   assert((Scalar1 && Scalar2) || (VT.getVectorNumElements() == Outputs.size() &&
3054                                   "Expected a scalar or vector!"));
3055 
3056   // Handle the scalar case first.
3057   if (!VT.isVector())
3058     return Outputs.back();
3059 
3060   // We may have a vector type but a scalar result. Create a splat.
3061   Outputs.resize(VT.getVectorNumElements(), Outputs.back());
3062 
3063   // Build a big vector out of the scalar elements we generated.
3064   return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
3065 }
3066 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,bool nuw,bool nsw,bool exact)3067 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
3068                               SDValue N2, bool nuw, bool nsw, bool exact) {
3069   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3070   ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
3071   switch (Opcode) {
3072   default: break;
3073   case ISD::TokenFactor:
3074     assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
3075            N2.getValueType() == MVT::Other && "Invalid token factor!");
3076     // Fold trivial token factors.
3077     if (N1.getOpcode() == ISD::EntryToken) return N2;
3078     if (N2.getOpcode() == ISD::EntryToken) return N1;
3079     if (N1 == N2) return N1;
3080     break;
3081   case ISD::CONCAT_VECTORS:
3082     // Concat of UNDEFs is UNDEF.
3083     if (N1.getOpcode() == ISD::UNDEF &&
3084         N2.getOpcode() == ISD::UNDEF)
3085       return getUNDEF(VT);
3086 
3087     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3088     // one big BUILD_VECTOR.
3089     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3090         N2.getOpcode() == ISD::BUILD_VECTOR) {
3091       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3092                                     N1.getNode()->op_end());
3093       Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3094       return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3095     }
3096     break;
3097   case ISD::AND:
3098     assert(VT.isInteger() && "This operator does not apply to FP types!");
3099     assert(N1.getValueType() == N2.getValueType() &&
3100            N1.getValueType() == VT && "Binary operator types must match!");
3101     // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
3102     // worth handling here.
3103     if (N2C && N2C->isNullValue())
3104       return N2;
3105     if (N2C && N2C->isAllOnesValue())  // X & -1 -> X
3106       return N1;
3107     break;
3108   case ISD::OR:
3109   case ISD::XOR:
3110   case ISD::ADD:
3111   case ISD::SUB:
3112     assert(VT.isInteger() && "This operator does not apply to FP types!");
3113     assert(N1.getValueType() == N2.getValueType() &&
3114            N1.getValueType() == VT && "Binary operator types must match!");
3115     // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
3116     // it's worth handling here.
3117     if (N2C && N2C->isNullValue())
3118       return N1;
3119     break;
3120   case ISD::UDIV:
3121   case ISD::UREM:
3122   case ISD::MULHU:
3123   case ISD::MULHS:
3124   case ISD::MUL:
3125   case ISD::SDIV:
3126   case ISD::SREM:
3127     assert(VT.isInteger() && "This operator does not apply to FP types!");
3128     assert(N1.getValueType() == N2.getValueType() &&
3129            N1.getValueType() == VT && "Binary operator types must match!");
3130     break;
3131   case ISD::FADD:
3132   case ISD::FSUB:
3133   case ISD::FMUL:
3134   case ISD::FDIV:
3135   case ISD::FREM:
3136     if (getTarget().Options.UnsafeFPMath) {
3137       if (Opcode == ISD::FADD) {
3138         // 0+x --> x
3139         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
3140           if (CFP->getValueAPF().isZero())
3141             return N2;
3142         // x+0 --> x
3143         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3144           if (CFP->getValueAPF().isZero())
3145             return N1;
3146       } else if (Opcode == ISD::FSUB) {
3147         // x-0 --> x
3148         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3149           if (CFP->getValueAPF().isZero())
3150             return N1;
3151       } else if (Opcode == ISD::FMUL) {
3152         ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
3153         SDValue V = N2;
3154 
3155         // If the first operand isn't the constant, try the second
3156         if (!CFP) {
3157           CFP = dyn_cast<ConstantFPSDNode>(N2);
3158           V = N1;
3159         }
3160 
3161         if (CFP) {
3162           // 0*x --> 0
3163           if (CFP->isZero())
3164             return SDValue(CFP,0);
3165           // 1*x --> x
3166           if (CFP->isExactlyValue(1.0))
3167             return V;
3168         }
3169       }
3170     }
3171     assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3172     assert(N1.getValueType() == N2.getValueType() &&
3173            N1.getValueType() == VT && "Binary operator types must match!");
3174     break;
3175   case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
3176     assert(N1.getValueType() == VT &&
3177            N1.getValueType().isFloatingPoint() &&
3178            N2.getValueType().isFloatingPoint() &&
3179            "Invalid FCOPYSIGN!");
3180     break;
3181   case ISD::SHL:
3182   case ISD::SRA:
3183   case ISD::SRL:
3184   case ISD::ROTL:
3185   case ISD::ROTR:
3186     assert(VT == N1.getValueType() &&
3187            "Shift operators return type must be the same as their first arg");
3188     assert(VT.isInteger() && N2.getValueType().isInteger() &&
3189            "Shifts only work on integers");
3190     assert((!VT.isVector() || VT == N2.getValueType()) &&
3191            "Vector shift amounts must be in the same as their first arg");
3192     // Verify that the shift amount VT is bit enough to hold valid shift
3193     // amounts.  This catches things like trying to shift an i1024 value by an
3194     // i8, which is easy to fall into in generic code that uses
3195     // TLI.getShiftAmount().
3196     assert(N2.getValueType().getSizeInBits() >=
3197                    Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
3198            "Invalid use of small shift amount with oversized value!");
3199 
3200     // Always fold shifts of i1 values so the code generator doesn't need to
3201     // handle them.  Since we know the size of the shift has to be less than the
3202     // size of the value, the shift/rotate count is guaranteed to be zero.
3203     if (VT == MVT::i1)
3204       return N1;
3205     if (N2C && N2C->isNullValue())
3206       return N1;
3207     break;
3208   case ISD::FP_ROUND_INREG: {
3209     EVT EVT = cast<VTSDNode>(N2)->getVT();
3210     assert(VT == N1.getValueType() && "Not an inreg round!");
3211     assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3212            "Cannot FP_ROUND_INREG integer types");
3213     assert(EVT.isVector() == VT.isVector() &&
3214            "FP_ROUND_INREG type should be vector iff the operand "
3215            "type is vector!");
3216     assert((!EVT.isVector() ||
3217             EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3218            "Vector element counts must match in FP_ROUND_INREG");
3219     assert(EVT.bitsLE(VT) && "Not rounding down!");
3220     (void)EVT;
3221     if (cast<VTSDNode>(N2)->getVT() == VT) return N1;  // Not actually rounding.
3222     break;
3223   }
3224   case ISD::FP_ROUND:
3225     assert(VT.isFloatingPoint() &&
3226            N1.getValueType().isFloatingPoint() &&
3227            VT.bitsLE(N1.getValueType()) &&
3228            isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
3229     if (N1.getValueType() == VT) return N1;  // noop conversion.
3230     break;
3231   case ISD::AssertSext:
3232   case ISD::AssertZext: {
3233     EVT EVT = cast<VTSDNode>(N2)->getVT();
3234     assert(VT == N1.getValueType() && "Not an inreg extend!");
3235     assert(VT.isInteger() && EVT.isInteger() &&
3236            "Cannot *_EXTEND_INREG FP types");
3237     assert(!EVT.isVector() &&
3238            "AssertSExt/AssertZExt type should be the vector element type "
3239            "rather than the vector type!");
3240     assert(EVT.bitsLE(VT) && "Not extending!");
3241     if (VT == EVT) return N1; // noop assertion.
3242     break;
3243   }
3244   case ISD::SIGN_EXTEND_INREG: {
3245     EVT EVT = cast<VTSDNode>(N2)->getVT();
3246     assert(VT == N1.getValueType() && "Not an inreg extend!");
3247     assert(VT.isInteger() && EVT.isInteger() &&
3248            "Cannot *_EXTEND_INREG FP types");
3249     assert(EVT.isVector() == VT.isVector() &&
3250            "SIGN_EXTEND_INREG type should be vector iff the operand "
3251            "type is vector!");
3252     assert((!EVT.isVector() ||
3253             EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3254            "Vector element counts must match in SIGN_EXTEND_INREG");
3255     assert(EVT.bitsLE(VT) && "Not extending!");
3256     if (EVT == VT) return N1;  // Not actually extending
3257 
3258     if (N1C) {
3259       APInt Val = N1C->getAPIntValue();
3260       unsigned FromBits = EVT.getScalarType().getSizeInBits();
3261       Val <<= Val.getBitWidth()-FromBits;
3262       Val = Val.ashr(Val.getBitWidth()-FromBits);
3263       return getConstant(Val, VT);
3264     }
3265     break;
3266   }
3267   case ISD::EXTRACT_VECTOR_ELT:
3268     // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3269     if (N1.getOpcode() == ISD::UNDEF)
3270       return getUNDEF(VT);
3271 
3272     // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3273     // expanding copies of large vectors from registers.
3274     if (N2C &&
3275         N1.getOpcode() == ISD::CONCAT_VECTORS &&
3276         N1.getNumOperands() > 0) {
3277       unsigned Factor =
3278         N1.getOperand(0).getValueType().getVectorNumElements();
3279       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3280                      N1.getOperand(N2C->getZExtValue() / Factor),
3281                      getConstant(N2C->getZExtValue() % Factor,
3282                                  N2.getValueType()));
3283     }
3284 
3285     // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3286     // expanding large vector constants.
3287     if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3288       SDValue Elt = N1.getOperand(N2C->getZExtValue());
3289 
3290       if (VT != Elt.getValueType())
3291         // If the vector element type is not legal, the BUILD_VECTOR operands
3292         // are promoted and implicitly truncated, and the result implicitly
3293         // extended. Make that explicit here.
3294         Elt = getAnyExtOrTrunc(Elt, DL, VT);
3295 
3296       return Elt;
3297     }
3298 
3299     // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3300     // operations are lowered to scalars.
3301     if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3302       // If the indices are the same, return the inserted element else
3303       // if the indices are known different, extract the element from
3304       // the original vector.
3305       SDValue N1Op2 = N1.getOperand(2);
3306       ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3307 
3308       if (N1Op2C && N2C) {
3309         if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3310           if (VT == N1.getOperand(1).getValueType())
3311             return N1.getOperand(1);
3312           else
3313             return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3314         }
3315 
3316         return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3317       }
3318     }
3319     break;
3320   case ISD::EXTRACT_ELEMENT:
3321     assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3322     assert(!N1.getValueType().isVector() && !VT.isVector() &&
3323            (N1.getValueType().isInteger() == VT.isInteger()) &&
3324            N1.getValueType() != VT &&
3325            "Wrong types for EXTRACT_ELEMENT!");
3326 
3327     // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3328     // 64-bit integers into 32-bit parts.  Instead of building the extract of
3329     // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3330     if (N1.getOpcode() == ISD::BUILD_PAIR)
3331       return N1.getOperand(N2C->getZExtValue());
3332 
3333     // EXTRACT_ELEMENT of a constant int is also very common.
3334     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3335       unsigned ElementSize = VT.getSizeInBits();
3336       unsigned Shift = ElementSize * N2C->getZExtValue();
3337       APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3338       return getConstant(ShiftedVal.trunc(ElementSize), VT);
3339     }
3340     break;
3341   case ISD::EXTRACT_SUBVECTOR: {
3342     SDValue Index = N2;
3343     if (VT.isSimple() && N1.getValueType().isSimple()) {
3344       assert(VT.isVector() && N1.getValueType().isVector() &&
3345              "Extract subvector VTs must be a vectors!");
3346       assert(VT.getVectorElementType() ==
3347              N1.getValueType().getVectorElementType() &&
3348              "Extract subvector VTs must have the same element type!");
3349       assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3350              "Extract subvector must be from larger vector to smaller vector!");
3351 
3352       if (isa<ConstantSDNode>(Index.getNode())) {
3353         assert((VT.getVectorNumElements() +
3354                 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3355                 <= N1.getValueType().getVectorNumElements())
3356                && "Extract subvector overflow!");
3357       }
3358 
3359       // Trivial extraction.
3360       if (VT.getSimpleVT() == N1.getSimpleValueType())
3361         return N1;
3362     }
3363     break;
3364   }
3365   }
3366 
3367   // Perform trivial constant folding.
3368   SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3369   if (SV.getNode()) return SV;
3370 
3371   // Canonicalize constant to RHS if commutative.
3372   if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3373     std::swap(N1C, N2C);
3374     std::swap(N1, N2);
3375   }
3376 
3377   // Constant fold FP operations.
3378   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3379   ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3380   if (N1CFP) {
3381     if (!N2CFP && isCommutativeBinOp(Opcode)) {
3382       // Canonicalize constant to RHS if commutative.
3383       std::swap(N1CFP, N2CFP);
3384       std::swap(N1, N2);
3385     } else if (N2CFP) {
3386       APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3387       APFloat::opStatus s;
3388       switch (Opcode) {
3389       case ISD::FADD:
3390         s = V1.add(V2, APFloat::rmNearestTiesToEven);
3391         if (s != APFloat::opInvalidOp)
3392           return getConstantFP(V1, VT);
3393         break;
3394       case ISD::FSUB:
3395         s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3396         if (s!=APFloat::opInvalidOp)
3397           return getConstantFP(V1, VT);
3398         break;
3399       case ISD::FMUL:
3400         s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3401         if (s!=APFloat::opInvalidOp)
3402           return getConstantFP(V1, VT);
3403         break;
3404       case ISD::FDIV:
3405         s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3406         if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3407           return getConstantFP(V1, VT);
3408         break;
3409       case ISD::FREM :
3410         s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3411         if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3412           return getConstantFP(V1, VT);
3413         break;
3414       case ISD::FCOPYSIGN:
3415         V1.copySign(V2);
3416         return getConstantFP(V1, VT);
3417       default: break;
3418       }
3419     }
3420 
3421     if (Opcode == ISD::FP_ROUND) {
3422       APFloat V = N1CFP->getValueAPF();    // make copy
3423       bool ignored;
3424       // This can return overflow, underflow, or inexact; we don't care.
3425       // FIXME need to be more flexible about rounding mode.
3426       (void)V.convert(EVTToAPFloatSemantics(VT),
3427                       APFloat::rmNearestTiesToEven, &ignored);
3428       return getConstantFP(V, VT);
3429     }
3430   }
3431 
3432   // Canonicalize an UNDEF to the RHS, even over a constant.
3433   if (N1.getOpcode() == ISD::UNDEF) {
3434     if (isCommutativeBinOp(Opcode)) {
3435       std::swap(N1, N2);
3436     } else {
3437       switch (Opcode) {
3438       case ISD::FP_ROUND_INREG:
3439       case ISD::SIGN_EXTEND_INREG:
3440       case ISD::SUB:
3441       case ISD::FSUB:
3442       case ISD::FDIV:
3443       case ISD::FREM:
3444       case ISD::SRA:
3445         return N1;     // fold op(undef, arg2) -> undef
3446       case ISD::UDIV:
3447       case ISD::SDIV:
3448       case ISD::UREM:
3449       case ISD::SREM:
3450       case ISD::SRL:
3451       case ISD::SHL:
3452         if (!VT.isVector())
3453           return getConstant(0, VT);    // fold op(undef, arg2) -> 0
3454         // For vectors, we can't easily build an all zero vector, just return
3455         // the LHS.
3456         return N2;
3457       }
3458     }
3459   }
3460 
3461   // Fold a bunch of operators when the RHS is undef.
3462   if (N2.getOpcode() == ISD::UNDEF) {
3463     switch (Opcode) {
3464     case ISD::XOR:
3465       if (N1.getOpcode() == ISD::UNDEF)
3466         // Handle undef ^ undef -> 0 special case. This is a common
3467         // idiom (misuse).
3468         return getConstant(0, VT);
3469       // fallthrough
3470     case ISD::ADD:
3471     case ISD::ADDC:
3472     case ISD::ADDE:
3473     case ISD::SUB:
3474     case ISD::UDIV:
3475     case ISD::SDIV:
3476     case ISD::UREM:
3477     case ISD::SREM:
3478       return N2;       // fold op(arg1, undef) -> undef
3479     case ISD::FADD:
3480     case ISD::FSUB:
3481     case ISD::FMUL:
3482     case ISD::FDIV:
3483     case ISD::FREM:
3484       if (getTarget().Options.UnsafeFPMath)
3485         return N2;
3486       break;
3487     case ISD::MUL:
3488     case ISD::AND:
3489     case ISD::SRL:
3490     case ISD::SHL:
3491       if (!VT.isVector())
3492         return getConstant(0, VT);  // fold op(arg1, undef) -> 0
3493       // For vectors, we can't easily build an all zero vector, just return
3494       // the LHS.
3495       return N1;
3496     case ISD::OR:
3497       if (!VT.isVector())
3498         return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3499       // For vectors, we can't easily build an all one vector, just return
3500       // the LHS.
3501       return N1;
3502     case ISD::SRA:
3503       return N1;
3504     }
3505   }
3506 
3507   // Memoize this node if possible.
3508   BinarySDNode *N;
3509   SDVTList VTs = getVTList(VT);
3510   const bool BinOpHasFlags = isBinOpWithFlags(Opcode);
3511   if (VT != MVT::Glue) {
3512     SDValue Ops[] = {N1, N2};
3513     FoldingSetNodeID ID;
3514     AddNodeIDNode(ID, Opcode, VTs, Ops);
3515     if (BinOpHasFlags)
3516       AddBinaryNodeIDCustom(ID, Opcode, nuw, nsw, exact);
3517     void *IP = nullptr;
3518     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3519       return SDValue(E, 0);
3520 
3521     N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
3522 
3523     CSEMap.InsertNode(N, IP);
3524   } else {
3525 
3526     N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
3527   }
3528 
3529   AllNodes.push_back(N);
3530 #ifndef NDEBUG
3531   VerifySDNode(N);
3532 #endif
3533   return SDValue(N, 0);
3534 }
3535 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3)3536 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3537                               SDValue N1, SDValue N2, SDValue N3) {
3538   // Perform various simplifications.
3539   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3540   switch (Opcode) {
3541   case ISD::FMA: {
3542     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3543     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3544     ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3545     if (N1CFP && N2CFP && N3CFP) {
3546       APFloat  V1 = N1CFP->getValueAPF();
3547       const APFloat &V2 = N2CFP->getValueAPF();
3548       const APFloat &V3 = N3CFP->getValueAPF();
3549       APFloat::opStatus s =
3550         V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3551       if (s != APFloat::opInvalidOp)
3552         return getConstantFP(V1, VT);
3553     }
3554     break;
3555   }
3556   case ISD::CONCAT_VECTORS:
3557     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3558     // one big BUILD_VECTOR.
3559     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3560         N2.getOpcode() == ISD::BUILD_VECTOR &&
3561         N3.getOpcode() == ISD::BUILD_VECTOR) {
3562       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3563                                     N1.getNode()->op_end());
3564       Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3565       Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3566       return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3567     }
3568     break;
3569   case ISD::SETCC: {
3570     // Use FoldSetCC to simplify SETCC's.
3571     SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3572     if (Simp.getNode()) return Simp;
3573     break;
3574   }
3575   case ISD::SELECT:
3576     if (N1C) {
3577      if (N1C->getZExtValue())
3578        return N2;             // select true, X, Y -> X
3579      return N3;             // select false, X, Y -> Y
3580     }
3581 
3582     if (N2 == N3) return N2;   // select C, X, X -> X
3583     break;
3584   case ISD::VECTOR_SHUFFLE:
3585     llvm_unreachable("should use getVectorShuffle constructor!");
3586   case ISD::INSERT_SUBVECTOR: {
3587     SDValue Index = N3;
3588     if (VT.isSimple() && N1.getValueType().isSimple()
3589         && N2.getValueType().isSimple()) {
3590       assert(VT.isVector() && N1.getValueType().isVector() &&
3591              N2.getValueType().isVector() &&
3592              "Insert subvector VTs must be a vectors");
3593       assert(VT == N1.getValueType() &&
3594              "Dest and insert subvector source types must match!");
3595       assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3596              "Insert subvector must be from smaller vector to larger vector!");
3597       if (isa<ConstantSDNode>(Index.getNode())) {
3598         assert((N2.getValueType().getVectorNumElements() +
3599                 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3600                 <= VT.getVectorNumElements())
3601                && "Insert subvector overflow!");
3602       }
3603 
3604       // Trivial insertion.
3605       if (VT.getSimpleVT() == N2.getSimpleValueType())
3606         return N2;
3607     }
3608     break;
3609   }
3610   case ISD::BITCAST:
3611     // Fold bit_convert nodes from a type to themselves.
3612     if (N1.getValueType() == VT)
3613       return N1;
3614     break;
3615   }
3616 
3617   // Memoize node if it doesn't produce a flag.
3618   SDNode *N;
3619   SDVTList VTs = getVTList(VT);
3620   if (VT != MVT::Glue) {
3621     SDValue Ops[] = { N1, N2, N3 };
3622     FoldingSetNodeID ID;
3623     AddNodeIDNode(ID, Opcode, VTs, Ops);
3624     void *IP = nullptr;
3625     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3626       return SDValue(E, 0);
3627 
3628     N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3629                                           DL.getDebugLoc(), VTs, N1, N2, N3);
3630     CSEMap.InsertNode(N, IP);
3631   } else {
3632     N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3633                                           DL.getDebugLoc(), VTs, N1, N2, N3);
3634   }
3635 
3636   AllNodes.push_back(N);
3637 #ifndef NDEBUG
3638   VerifySDNode(N);
3639 #endif
3640   return SDValue(N, 0);
3641 }
3642 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)3643 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3644                               SDValue N1, SDValue N2, SDValue N3,
3645                               SDValue N4) {
3646   SDValue Ops[] = { N1, N2, N3, N4 };
3647   return getNode(Opcode, DL, VT, Ops);
3648 }
3649 
getNode(unsigned Opcode,SDLoc DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)3650 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3651                               SDValue N1, SDValue N2, SDValue N3,
3652                               SDValue N4, SDValue N5) {
3653   SDValue Ops[] = { N1, N2, N3, N4, N5 };
3654   return getNode(Opcode, DL, VT, Ops);
3655 }
3656 
3657 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3658 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)3659 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3660   SmallVector<SDValue, 8> ArgChains;
3661 
3662   // Include the original chain at the beginning of the list. When this is
3663   // used by target LowerCall hooks, this helps legalize find the
3664   // CALLSEQ_BEGIN node.
3665   ArgChains.push_back(Chain);
3666 
3667   // Add a chain value for each stack argument.
3668   for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3669        UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3670     if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3671       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3672         if (FI->getIndex() < 0)
3673           ArgChains.push_back(SDValue(L, 1));
3674 
3675   // Build a tokenfactor for all the chains.
3676   return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
3677 }
3678 
3679 /// getMemsetValue - Vectorized representation of the memset value
3680 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,SDLoc dl)3681 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3682                               SDLoc dl) {
3683   assert(Value.getOpcode() != ISD::UNDEF);
3684 
3685   unsigned NumBits = VT.getScalarType().getSizeInBits();
3686   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3687     assert(C->getAPIntValue().getBitWidth() == 8);
3688     APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3689     if (VT.isInteger())
3690       return DAG.getConstant(Val, VT);
3691     return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3692   }
3693 
3694   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3695   if (NumBits > 8) {
3696     // Use a multiplication with 0x010101... to extend the input to the
3697     // required length.
3698     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3699     Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3700   }
3701 
3702   return Value;
3703 }
3704 
3705 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3706 /// used when a memcpy is turned into a memset when the source is a constant
3707 /// string ptr.
getMemsetStringVal(EVT VT,SDLoc dl,SelectionDAG & DAG,const TargetLowering & TLI,StringRef Str)3708 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3709                                   const TargetLowering &TLI, StringRef Str) {
3710   // Handle vector with all elements zero.
3711   if (Str.empty()) {
3712     if (VT.isInteger())
3713       return DAG.getConstant(0, VT);
3714     else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
3715       return DAG.getConstantFP(0.0, VT);
3716     else if (VT.isVector()) {
3717       unsigned NumElts = VT.getVectorNumElements();
3718       MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3719       return DAG.getNode(ISD::BITCAST, dl, VT,
3720                          DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3721                                                              EltVT, NumElts)));
3722     } else
3723       llvm_unreachable("Expected type!");
3724   }
3725 
3726   assert(!VT.isVector() && "Can't handle vector type here!");
3727   unsigned NumVTBits = VT.getSizeInBits();
3728   unsigned NumVTBytes = NumVTBits / 8;
3729   unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3730 
3731   APInt Val(NumVTBits, 0);
3732   if (TLI.isLittleEndian()) {
3733     for (unsigned i = 0; i != NumBytes; ++i)
3734       Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3735   } else {
3736     for (unsigned i = 0; i != NumBytes; ++i)
3737       Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3738   }
3739 
3740   // If the "cost" of materializing the integer immediate is less than the cost
3741   // of a load, then it is cost effective to turn the load into the immediate.
3742   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
3743   if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
3744     return DAG.getConstant(Val, VT);
3745   return SDValue(nullptr, 0);
3746 }
3747 
3748 /// getMemBasePlusOffset - Returns base and offset node for the
3749 ///
getMemBasePlusOffset(SDValue Base,unsigned Offset,SDLoc dl,SelectionDAG & DAG)3750 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3751                                       SelectionDAG &DAG) {
3752   EVT VT = Base.getValueType();
3753   return DAG.getNode(ISD::ADD, dl,
3754                      VT, Base, DAG.getConstant(Offset, VT));
3755 }
3756 
3757 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3758 ///
isMemSrcFromString(SDValue Src,StringRef & Str)3759 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3760   unsigned SrcDelta = 0;
3761   GlobalAddressSDNode *G = nullptr;
3762   if (Src.getOpcode() == ISD::GlobalAddress)
3763     G = cast<GlobalAddressSDNode>(Src);
3764   else if (Src.getOpcode() == ISD::ADD &&
3765            Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3766            Src.getOperand(1).getOpcode() == ISD::Constant) {
3767     G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3768     SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3769   }
3770   if (!G)
3771     return false;
3772 
3773   return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3774 }
3775 
3776 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3777 /// to replace the memset / memcpy. Return true if the number of memory ops
3778 /// is below the threshold. It returns the types of the sequence of
3779 /// memory ops to perform memset / memcpy by reference.
FindOptimalMemOpLowering(std::vector<EVT> & MemOps,unsigned Limit,uint64_t Size,unsigned DstAlign,unsigned SrcAlign,bool IsMemset,bool ZeroMemset,bool MemcpyStrSrc,bool AllowOverlap,SelectionDAG & DAG,const TargetLowering & TLI)3780 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3781                                      unsigned Limit, uint64_t Size,
3782                                      unsigned DstAlign, unsigned SrcAlign,
3783                                      bool IsMemset,
3784                                      bool ZeroMemset,
3785                                      bool MemcpyStrSrc,
3786                                      bool AllowOverlap,
3787                                      SelectionDAG &DAG,
3788                                      const TargetLowering &TLI) {
3789   assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3790          "Expecting memcpy / memset source to meet alignment requirement!");
3791   // If 'SrcAlign' is zero, that means the memory operation does not need to
3792   // load the value, i.e. memset or memcpy from constant string. Otherwise,
3793   // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3794   // is the specified alignment of the memory operation. If it is zero, that
3795   // means it's possible to change the alignment of the destination.
3796   // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3797   // not need to be loaded.
3798   EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3799                                    IsMemset, ZeroMemset, MemcpyStrSrc,
3800                                    DAG.getMachineFunction());
3801 
3802   if (VT == MVT::Other) {
3803     unsigned AS = 0;
3804     if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
3805         TLI.allowsUnalignedMemoryAccesses(VT, AS)) {
3806       VT = TLI.getPointerTy();
3807     } else {
3808       switch (DstAlign & 7) {
3809       case 0:  VT = MVT::i64; break;
3810       case 4:  VT = MVT::i32; break;
3811       case 2:  VT = MVT::i16; break;
3812       default: VT = MVT::i8;  break;
3813       }
3814     }
3815 
3816     MVT LVT = MVT::i64;
3817     while (!TLI.isTypeLegal(LVT))
3818       LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3819     assert(LVT.isInteger());
3820 
3821     if (VT.bitsGT(LVT))
3822       VT = LVT;
3823   }
3824 
3825   unsigned NumMemOps = 0;
3826   while (Size != 0) {
3827     unsigned VTSize = VT.getSizeInBits() / 8;
3828     while (VTSize > Size) {
3829       // For now, only use non-vector load / store's for the left-over pieces.
3830       EVT NewVT = VT;
3831       unsigned NewVTSize;
3832 
3833       bool Found = false;
3834       if (VT.isVector() || VT.isFloatingPoint()) {
3835         NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3836         if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3837             TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3838           Found = true;
3839         else if (NewVT == MVT::i64 &&
3840                  TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3841                  TLI.isSafeMemOpType(MVT::f64)) {
3842           // i64 is usually not legal on 32-bit targets, but f64 may be.
3843           NewVT = MVT::f64;
3844           Found = true;
3845         }
3846       }
3847 
3848       if (!Found) {
3849         do {
3850           NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3851           if (NewVT == MVT::i8)
3852             break;
3853         } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3854       }
3855       NewVTSize = NewVT.getSizeInBits() / 8;
3856 
3857       // If the new VT cannot cover all of the remaining bits, then consider
3858       // issuing a (or a pair of) unaligned and overlapping load / store.
3859       // FIXME: Only does this for 64-bit or more since we don't have proper
3860       // cost model for unaligned load / store.
3861       bool Fast;
3862       unsigned AS = 0;
3863       if (NumMemOps && AllowOverlap &&
3864           VTSize >= 8 && NewVTSize < Size &&
3865           TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast)
3866         VTSize = Size;
3867       else {
3868         VT = NewVT;
3869         VTSize = NewVTSize;
3870       }
3871     }
3872 
3873     if (++NumMemOps > Limit)
3874       return false;
3875 
3876     MemOps.push_back(VT);
3877     Size -= VTSize;
3878   }
3879 
3880   return true;
3881 }
3882 
getMemcpyLoadsAndStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)3883 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3884                                        SDValue Chain, SDValue Dst,
3885                                        SDValue Src, uint64_t Size,
3886                                        unsigned Align, bool isVol,
3887                                        bool AlwaysInline,
3888                                        MachinePointerInfo DstPtrInfo,
3889                                        MachinePointerInfo SrcPtrInfo) {
3890   // Turn a memcpy of undef to nop.
3891   if (Src.getOpcode() == ISD::UNDEF)
3892     return Chain;
3893 
3894   // Expand memcpy to a series of load and store ops if the size operand falls
3895   // below a certain threshold.
3896   // TODO: In the AlwaysInline case, if the size is big then generate a loop
3897   // rather than maybe a humongous number of loads and stores.
3898   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3899   std::vector<EVT> MemOps;
3900   bool DstAlignCanChange = false;
3901   MachineFunction &MF = DAG.getMachineFunction();
3902   MachineFrameInfo *MFI = MF.getFrameInfo();
3903   bool OptSize =
3904     MF.getFunction()->getAttributes().
3905       hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3906   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3907   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3908     DstAlignCanChange = true;
3909   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3910   if (Align > SrcAlign)
3911     SrcAlign = Align;
3912   StringRef Str;
3913   bool CopyFromStr = isMemSrcFromString(Src, Str);
3914   bool isZeroStr = CopyFromStr && Str.empty();
3915   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3916 
3917   if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3918                                 (DstAlignCanChange ? 0 : Align),
3919                                 (isZeroStr ? 0 : SrcAlign),
3920                                 false, false, CopyFromStr, true, DAG, TLI))
3921     return SDValue();
3922 
3923   if (DstAlignCanChange) {
3924     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3925     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3926 
3927     // Don't promote to an alignment that would require dynamic stack
3928     // realignment.
3929     const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3930     if (!TRI->needsStackRealignment(MF))
3931        while (NewAlign > Align &&
3932              TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3933           NewAlign /= 2;
3934 
3935     if (NewAlign > Align) {
3936       // Give the stack frame object a larger alignment if needed.
3937       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3938         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3939       Align = NewAlign;
3940     }
3941   }
3942 
3943   SmallVector<SDValue, 8> OutChains;
3944   unsigned NumMemOps = MemOps.size();
3945   uint64_t SrcOff = 0, DstOff = 0;
3946   for (unsigned i = 0; i != NumMemOps; ++i) {
3947     EVT VT = MemOps[i];
3948     unsigned VTSize = VT.getSizeInBits() / 8;
3949     SDValue Value, Store;
3950 
3951     if (VTSize > Size) {
3952       // Issuing an unaligned load / store pair  that overlaps with the previous
3953       // pair. Adjust the offset accordingly.
3954       assert(i == NumMemOps-1 && i != 0);
3955       SrcOff -= VTSize - Size;
3956       DstOff -= VTSize - Size;
3957     }
3958 
3959     if (CopyFromStr &&
3960         (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3961       // It's unlikely a store of a vector immediate can be done in a single
3962       // instruction. It would require a load from a constantpool first.
3963       // We only handle zero vectors here.
3964       // FIXME: Handle other cases where store of vector immediate is done in
3965       // a single instruction.
3966       Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3967       if (Value.getNode())
3968         Store = DAG.getStore(Chain, dl, Value,
3969                              getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3970                              DstPtrInfo.getWithOffset(DstOff), isVol,
3971                              false, Align);
3972     }
3973 
3974     if (!Store.getNode()) {
3975       // The type might not be legal for the target.  This should only happen
3976       // if the type is smaller than a legal type, as on PPC, so the right
3977       // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify
3978       // to Load/Store if NVT==VT.
3979       // FIXME does the case above also need this?
3980       EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3981       assert(NVT.bitsGE(VT));
3982       Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3983                              getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3984                              SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3985                              MinAlign(SrcAlign, SrcOff));
3986       Store = DAG.getTruncStore(Chain, dl, Value,
3987                                 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3988                                 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3989                                 false, Align);
3990     }
3991     OutChains.push_back(Store);
3992     SrcOff += VTSize;
3993     DstOff += VTSize;
3994     Size -= VTSize;
3995   }
3996 
3997   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3998 }
3999 
getMemmoveLoadsAndStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4000 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
4001                                         SDValue Chain, SDValue Dst,
4002                                         SDValue Src, uint64_t Size,
4003                                         unsigned Align,  bool isVol,
4004                                         bool AlwaysInline,
4005                                         MachinePointerInfo DstPtrInfo,
4006                                         MachinePointerInfo SrcPtrInfo) {
4007   // Turn a memmove of undef to nop.
4008   if (Src.getOpcode() == ISD::UNDEF)
4009     return Chain;
4010 
4011   // Expand memmove to a series of load and store ops if the size operand falls
4012   // below a certain threshold.
4013   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4014   std::vector<EVT> MemOps;
4015   bool DstAlignCanChange = false;
4016   MachineFunction &MF = DAG.getMachineFunction();
4017   MachineFrameInfo *MFI = MF.getFrameInfo();
4018   bool OptSize = MF.getFunction()->getAttributes().
4019     hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
4020   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4021   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4022     DstAlignCanChange = true;
4023   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4024   if (Align > SrcAlign)
4025     SrcAlign = Align;
4026   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
4027 
4028   if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4029                                 (DstAlignCanChange ? 0 : Align), SrcAlign,
4030                                 false, false, false, false, DAG, TLI))
4031     return SDValue();
4032 
4033   if (DstAlignCanChange) {
4034     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4035     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
4036     if (NewAlign > Align) {
4037       // Give the stack frame object a larger alignment if needed.
4038       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4039         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4040       Align = NewAlign;
4041     }
4042   }
4043 
4044   uint64_t SrcOff = 0, DstOff = 0;
4045   SmallVector<SDValue, 8> LoadValues;
4046   SmallVector<SDValue, 8> LoadChains;
4047   SmallVector<SDValue, 8> OutChains;
4048   unsigned NumMemOps = MemOps.size();
4049   for (unsigned i = 0; i < NumMemOps; i++) {
4050     EVT VT = MemOps[i];
4051     unsigned VTSize = VT.getSizeInBits() / 8;
4052     SDValue Value;
4053 
4054     Value = DAG.getLoad(VT, dl, Chain,
4055                         getMemBasePlusOffset(Src, SrcOff, dl, DAG),
4056                         SrcPtrInfo.getWithOffset(SrcOff), isVol,
4057                         false, false, SrcAlign);
4058     LoadValues.push_back(Value);
4059     LoadChains.push_back(Value.getValue(1));
4060     SrcOff += VTSize;
4061   }
4062   Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
4063   OutChains.clear();
4064   for (unsigned i = 0; i < NumMemOps; i++) {
4065     EVT VT = MemOps[i];
4066     unsigned VTSize = VT.getSizeInBits() / 8;
4067     SDValue Store;
4068 
4069     Store = DAG.getStore(Chain, dl, LoadValues[i],
4070                          getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4071                          DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
4072     OutChains.push_back(Store);
4073     DstOff += VTSize;
4074   }
4075 
4076   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4077 }
4078 
4079 /// \brief Lower the call to 'memset' intrinsic function into a series of store
4080 /// operations.
4081 ///
4082 /// \param DAG Selection DAG where lowered code is placed.
4083 /// \param dl Link to corresponding IR location.
4084 /// \param Chain Control flow dependency.
4085 /// \param Dst Pointer to destination memory location.
4086 /// \param Src Value of byte to write into the memory.
4087 /// \param Size Number of bytes to write.
4088 /// \param Align Alignment of the destination in bytes.
4089 /// \param isVol True if destination is volatile.
4090 /// \param DstPtrInfo IR information on the memory pointer.
4091 /// \returns New head in the control flow, if lowering was successful, empty
4092 /// SDValue otherwise.
4093 ///
4094 /// The function tries to replace 'llvm.memset' intrinsic with several store
4095 /// operations and value calculation code. This is usually profitable for small
4096 /// memory size.
getMemsetStores(SelectionDAG & DAG,SDLoc dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,MachinePointerInfo DstPtrInfo)4097 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
4098                                SDValue Chain, SDValue Dst,
4099                                SDValue Src, uint64_t Size,
4100                                unsigned Align, bool isVol,
4101                                MachinePointerInfo DstPtrInfo) {
4102   // Turn a memset of undef to nop.
4103   if (Src.getOpcode() == ISD::UNDEF)
4104     return Chain;
4105 
4106   // Expand memset to a series of load/store ops if the size operand
4107   // falls below a certain threshold.
4108   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4109   std::vector<EVT> MemOps;
4110   bool DstAlignCanChange = false;
4111   MachineFunction &MF = DAG.getMachineFunction();
4112   MachineFrameInfo *MFI = MF.getFrameInfo();
4113   bool OptSize = MF.getFunction()->getAttributes().
4114     hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
4115   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4116   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4117     DstAlignCanChange = true;
4118   bool IsZeroVal =
4119     isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
4120   if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
4121                                 Size, (DstAlignCanChange ? 0 : Align), 0,
4122                                 true, IsZeroVal, false, true, DAG, TLI))
4123     return SDValue();
4124 
4125   if (DstAlignCanChange) {
4126     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4127     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
4128     if (NewAlign > Align) {
4129       // Give the stack frame object a larger alignment if needed.
4130       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4131         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4132       Align = NewAlign;
4133     }
4134   }
4135 
4136   SmallVector<SDValue, 8> OutChains;
4137   uint64_t DstOff = 0;
4138   unsigned NumMemOps = MemOps.size();
4139 
4140   // Find the largest store and generate the bit pattern for it.
4141   EVT LargestVT = MemOps[0];
4142   for (unsigned i = 1; i < NumMemOps; i++)
4143     if (MemOps[i].bitsGT(LargestVT))
4144       LargestVT = MemOps[i];
4145   SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4146 
4147   for (unsigned i = 0; i < NumMemOps; i++) {
4148     EVT VT = MemOps[i];
4149     unsigned VTSize = VT.getSizeInBits() / 8;
4150     if (VTSize > Size) {
4151       // Issuing an unaligned load / store pair  that overlaps with the previous
4152       // pair. Adjust the offset accordingly.
4153       assert(i == NumMemOps-1 && i != 0);
4154       DstOff -= VTSize - Size;
4155     }
4156 
4157     // If this store is smaller than the largest store see whether we can get
4158     // the smaller value for free with a truncate.
4159     SDValue Value = MemSetValue;
4160     if (VT.bitsLT(LargestVT)) {
4161       if (!LargestVT.isVector() && !VT.isVector() &&
4162           TLI.isTruncateFree(LargestVT, VT))
4163         Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4164       else
4165         Value = getMemsetValue(Src, VT, DAG, dl);
4166     }
4167     assert(Value.getValueType() == VT && "Value with wrong type.");
4168     SDValue Store = DAG.getStore(Chain, dl, Value,
4169                                  getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4170                                  DstPtrInfo.getWithOffset(DstOff),
4171                                  isVol, false, Align);
4172     OutChains.push_back(Store);
4173     DstOff += VT.getSizeInBits() / 8;
4174     Size -= VTSize;
4175   }
4176 
4177   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4178 }
4179 
getMemcpy(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4180 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
4181                                 SDValue Src, SDValue Size,
4182                                 unsigned Align, bool isVol, bool AlwaysInline,
4183                                 MachinePointerInfo DstPtrInfo,
4184                                 MachinePointerInfo SrcPtrInfo) {
4185   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4186 
4187   // Check to see if we should lower the memcpy to loads and stores first.
4188   // For cases within the target-specified limits, this is the best choice.
4189   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4190   if (ConstantSize) {
4191     // Memcpy with size zero? Just return the original chain.
4192     if (ConstantSize->isNullValue())
4193       return Chain;
4194 
4195     SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4196                                              ConstantSize->getZExtValue(),Align,
4197                                 isVol, false, DstPtrInfo, SrcPtrInfo);
4198     if (Result.getNode())
4199       return Result;
4200   }
4201 
4202   // Then check to see if we should lower the memcpy with target-specific
4203   // code. If the target chooses to do this, this is the next best.
4204   SDValue Result =
4205     TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
4206                                 isVol, AlwaysInline,
4207                                 DstPtrInfo, SrcPtrInfo);
4208   if (Result.getNode())
4209     return Result;
4210 
4211   // If we really need inline code and the target declined to provide it,
4212   // use a (potentially long) sequence of loads and stores.
4213   if (AlwaysInline) {
4214     assert(ConstantSize && "AlwaysInline requires a constant size!");
4215     return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4216                                    ConstantSize->getZExtValue(), Align, isVol,
4217                                    true, DstPtrInfo, SrcPtrInfo);
4218   }
4219 
4220   // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4221   // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4222   // respect volatile, so they may do things like read or write memory
4223   // beyond the given memory regions. But fixing this isn't easy, and most
4224   // people don't care.
4225 
4226   const TargetLowering *TLI = TM.getTargetLowering();
4227 
4228   // Emit a library call.
4229   TargetLowering::ArgListTy Args;
4230   TargetLowering::ArgListEntry Entry;
4231   Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4232   Entry.Node = Dst; Args.push_back(Entry);
4233   Entry.Node = Src; Args.push_back(Entry);
4234   Entry.Node = Size; Args.push_back(Entry);
4235   // FIXME: pass in SDLoc
4236   TargetLowering::CallLoweringInfo CLI(*this);
4237   CLI.setDebugLoc(dl).setChain(Chain)
4238     .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4239                Type::getVoidTy(*getContext()),
4240                getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4241                                  TLI->getPointerTy()), std::move(Args), 0)
4242     .setDiscardResult();
4243   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4244 
4245   return CallResult.second;
4246 }
4247 
getMemmove(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)4248 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
4249                                  SDValue Src, SDValue Size,
4250                                  unsigned Align, bool isVol,
4251                                  MachinePointerInfo DstPtrInfo,
4252                                  MachinePointerInfo SrcPtrInfo) {
4253   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4254 
4255   // Check to see if we should lower the memmove to loads and stores first.
4256   // For cases within the target-specified limits, this is the best choice.
4257   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4258   if (ConstantSize) {
4259     // Memmove with size zero? Just return the original chain.
4260     if (ConstantSize->isNullValue())
4261       return Chain;
4262 
4263     SDValue Result =
4264       getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4265                                ConstantSize->getZExtValue(), Align, isVol,
4266                                false, DstPtrInfo, SrcPtrInfo);
4267     if (Result.getNode())
4268       return Result;
4269   }
4270 
4271   // Then check to see if we should lower the memmove with target-specific
4272   // code. If the target chooses to do this, this is the next best.
4273   SDValue Result =
4274     TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4275                                  DstPtrInfo, SrcPtrInfo);
4276   if (Result.getNode())
4277     return Result;
4278 
4279   // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4280   // not be safe.  See memcpy above for more details.
4281 
4282   const TargetLowering *TLI = TM.getTargetLowering();
4283 
4284   // Emit a library call.
4285   TargetLowering::ArgListTy Args;
4286   TargetLowering::ArgListEntry Entry;
4287   Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4288   Entry.Node = Dst; Args.push_back(Entry);
4289   Entry.Node = Src; Args.push_back(Entry);
4290   Entry.Node = Size; Args.push_back(Entry);
4291   // FIXME:  pass in SDLoc
4292   TargetLowering::CallLoweringInfo CLI(*this);
4293   CLI.setDebugLoc(dl).setChain(Chain)
4294     .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4295                Type::getVoidTy(*getContext()),
4296                getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4297                                  TLI->getPointerTy()), std::move(Args), 0)
4298     .setDiscardResult();
4299   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4300 
4301   return CallResult.second;
4302 }
4303 
getMemset(SDValue Chain,SDLoc dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,MachinePointerInfo DstPtrInfo)4304 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4305                                 SDValue Src, SDValue Size,
4306                                 unsigned Align, bool isVol,
4307                                 MachinePointerInfo DstPtrInfo) {
4308   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4309 
4310   // Check to see if we should lower the memset to stores first.
4311   // For cases within the target-specified limits, this is the best choice.
4312   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4313   if (ConstantSize) {
4314     // Memset with size zero? Just return the original chain.
4315     if (ConstantSize->isNullValue())
4316       return Chain;
4317 
4318     SDValue Result =
4319       getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4320                       Align, isVol, DstPtrInfo);
4321 
4322     if (Result.getNode())
4323       return Result;
4324   }
4325 
4326   // Then check to see if we should lower the memset with target-specific
4327   // code. If the target chooses to do this, this is the next best.
4328   SDValue Result =
4329     TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4330                                 DstPtrInfo);
4331   if (Result.getNode())
4332     return Result;
4333 
4334   // Emit a library call.
4335   const TargetLowering *TLI = TM.getTargetLowering();
4336   Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4337   TargetLowering::ArgListTy Args;
4338   TargetLowering::ArgListEntry Entry;
4339   Entry.Node = Dst; Entry.Ty = IntPtrTy;
4340   Args.push_back(Entry);
4341   // Extend or truncate the argument to be an i32 value for the call.
4342   if (Src.getValueType().bitsGT(MVT::i32))
4343     Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4344   else
4345     Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4346   Entry.Node = Src;
4347   Entry.Ty = Type::getInt32Ty(*getContext());
4348   Entry.isSExt = true;
4349   Args.push_back(Entry);
4350   Entry.Node = Size;
4351   Entry.Ty = IntPtrTy;
4352   Entry.isSExt = false;
4353   Args.push_back(Entry);
4354 
4355   // FIXME: pass in SDLoc
4356   TargetLowering::CallLoweringInfo CLI(*this);
4357   CLI.setDebugLoc(dl).setChain(Chain)
4358     .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
4359                Type::getVoidTy(*getContext()),
4360                getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4361                                  TLI->getPointerTy()), std::move(Args), 0)
4362     .setDiscardResult();
4363 
4364   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4365   return CallResult.second;
4366 }
4367 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4368 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4369                                 SDVTList VTList, ArrayRef<SDValue> Ops,
4370                                 MachineMemOperand *MMO,
4371                                 AtomicOrdering SuccessOrdering,
4372                                 AtomicOrdering FailureOrdering,
4373                                 SynchronizationScope SynchScope) {
4374   FoldingSetNodeID ID;
4375   ID.AddInteger(MemVT.getRawBits());
4376   AddNodeIDNode(ID, Opcode, VTList, Ops);
4377   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4378   void* IP = nullptr;
4379   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4380     cast<AtomicSDNode>(E)->refineAlignment(MMO);
4381     return SDValue(E, 0);
4382   }
4383 
4384   // Allocate the operands array for the node out of the BumpPtrAllocator, since
4385   // SDNode doesn't have access to it.  This memory will be "leaked" when
4386   // the node is deallocated, but recovered when the allocator is released.
4387   // If the number of operands is less than 5 we use AtomicSDNode's internal
4388   // storage.
4389   unsigned NumOps = Ops.size();
4390   SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps)
4391                              : nullptr;
4392 
4393   SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4394                                                dl.getDebugLoc(), VTList, MemVT,
4395                                                Ops.data(), DynOps, NumOps, MMO,
4396                                                SuccessOrdering, FailureOrdering,
4397                                                SynchScope);
4398   CSEMap.InsertNode(N, IP);
4399   AllNodes.push_back(N);
4400   return SDValue(N, 0);
4401 }
4402 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4403 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4404                                 SDVTList VTList, ArrayRef<SDValue> Ops,
4405                                 MachineMemOperand *MMO,
4406                                 AtomicOrdering Ordering,
4407                                 SynchronizationScope SynchScope) {
4408   return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering,
4409                    Ordering, SynchScope);
4410 }
4411 
getAtomicCmpSwap(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachinePointerInfo PtrInfo,unsigned Alignment,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4412 SDValue SelectionDAG::getAtomicCmpSwap(
4413     unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain,
4414     SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
4415     unsigned Alignment, AtomicOrdering SuccessOrdering,
4416     AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
4417   assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
4418          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
4419   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4420 
4421   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4422     Alignment = getEVTAlignment(MemVT);
4423 
4424   MachineFunction &MF = getMachineFunction();
4425 
4426   // FIXME: Volatile isn't really correct; we should keep track of atomic
4427   // orderings in the memoperand.
4428   unsigned Flags = MachineMemOperand::MOVolatile;
4429   Flags |= MachineMemOperand::MOLoad;
4430   Flags |= MachineMemOperand::MOStore;
4431 
4432   MachineMemOperand *MMO =
4433     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4434 
4435   return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO,
4436                           SuccessOrdering, FailureOrdering, SynchScope);
4437 }
4438 
getAtomicCmpSwap(unsigned Opcode,SDLoc dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SynchronizationScope SynchScope)4439 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT,
4440                                        SDVTList VTs, SDValue Chain, SDValue Ptr,
4441                                        SDValue Cmp, SDValue Swp,
4442                                        MachineMemOperand *MMO,
4443                                        AtomicOrdering SuccessOrdering,
4444                                        AtomicOrdering FailureOrdering,
4445                                        SynchronizationScope SynchScope) {
4446   assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
4447          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
4448   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4449 
4450   SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4451   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO,
4452                    SuccessOrdering, FailureOrdering, SynchScope);
4453 }
4454 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,const Value * PtrVal,unsigned Alignment,AtomicOrdering Ordering,SynchronizationScope SynchScope)4455 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4456                                 SDValue Chain,
4457                                 SDValue Ptr, SDValue Val,
4458                                 const Value* PtrVal,
4459                                 unsigned Alignment,
4460                                 AtomicOrdering Ordering,
4461                                 SynchronizationScope SynchScope) {
4462   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4463     Alignment = getEVTAlignment(MemVT);
4464 
4465   MachineFunction &MF = getMachineFunction();
4466   // An atomic store does not load. An atomic load does not store.
4467   // (An atomicrmw obviously both loads and stores.)
4468   // For now, atomics are considered to be volatile always, and they are
4469   // chained as such.
4470   // FIXME: Volatile isn't really correct; we should keep track of atomic
4471   // orderings in the memoperand.
4472   unsigned Flags = MachineMemOperand::MOVolatile;
4473   if (Opcode != ISD::ATOMIC_STORE)
4474     Flags |= MachineMemOperand::MOLoad;
4475   if (Opcode != ISD::ATOMIC_LOAD)
4476     Flags |= MachineMemOperand::MOStore;
4477 
4478   MachineMemOperand *MMO =
4479     MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4480                             MemVT.getStoreSize(), Alignment);
4481 
4482   return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4483                    Ordering, SynchScope);
4484 }
4485 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4486 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4487                                 SDValue Chain,
4488                                 SDValue Ptr, SDValue Val,
4489                                 MachineMemOperand *MMO,
4490                                 AtomicOrdering Ordering,
4491                                 SynchronizationScope SynchScope) {
4492   assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4493           Opcode == ISD::ATOMIC_LOAD_SUB ||
4494           Opcode == ISD::ATOMIC_LOAD_AND ||
4495           Opcode == ISD::ATOMIC_LOAD_OR ||
4496           Opcode == ISD::ATOMIC_LOAD_XOR ||
4497           Opcode == ISD::ATOMIC_LOAD_NAND ||
4498           Opcode == ISD::ATOMIC_LOAD_MIN ||
4499           Opcode == ISD::ATOMIC_LOAD_MAX ||
4500           Opcode == ISD::ATOMIC_LOAD_UMIN ||
4501           Opcode == ISD::ATOMIC_LOAD_UMAX ||
4502           Opcode == ISD::ATOMIC_SWAP ||
4503           Opcode == ISD::ATOMIC_STORE) &&
4504          "Invalid Atomic Op");
4505 
4506   EVT VT = Val.getValueType();
4507 
4508   SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4509                                                getVTList(VT, MVT::Other);
4510   SDValue Ops[] = {Chain, Ptr, Val};
4511   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
4512 }
4513 
getAtomic(unsigned Opcode,SDLoc dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO,AtomicOrdering Ordering,SynchronizationScope SynchScope)4514 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4515                                 EVT VT, SDValue Chain,
4516                                 SDValue Ptr,
4517                                 MachineMemOperand *MMO,
4518                                 AtomicOrdering Ordering,
4519                                 SynchronizationScope SynchScope) {
4520   assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4521 
4522   SDVTList VTs = getVTList(VT, MVT::Other);
4523   SDValue Ops[] = {Chain, Ptr};
4524   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
4525 }
4526 
4527 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,SDLoc dl)4528 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) {
4529   if (Ops.size() == 1)
4530     return Ops[0];
4531 
4532   SmallVector<EVT, 4> VTs;
4533   VTs.reserve(Ops.size());
4534   for (unsigned i = 0; i < Ops.size(); ++i)
4535     VTs.push_back(Ops[i].getValueType());
4536   return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
4537 }
4538 
4539 SDValue
getMemIntrinsicNode(unsigned Opcode,SDLoc dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,unsigned Align,bool Vol,bool ReadMem,bool WriteMem)4540 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4541                                   ArrayRef<SDValue> Ops,
4542                                   EVT MemVT, MachinePointerInfo PtrInfo,
4543                                   unsigned Align, bool Vol,
4544                                   bool ReadMem, bool WriteMem) {
4545   if (Align == 0)  // Ensure that codegen never sees alignment 0
4546     Align = getEVTAlignment(MemVT);
4547 
4548   MachineFunction &MF = getMachineFunction();
4549   unsigned Flags = 0;
4550   if (WriteMem)
4551     Flags |= MachineMemOperand::MOStore;
4552   if (ReadMem)
4553     Flags |= MachineMemOperand::MOLoad;
4554   if (Vol)
4555     Flags |= MachineMemOperand::MOVolatile;
4556   MachineMemOperand *MMO =
4557     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4558 
4559   return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
4560 }
4561 
4562 SDValue
getMemIntrinsicNode(unsigned Opcode,SDLoc dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)4563 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4564                                   ArrayRef<SDValue> Ops, EVT MemVT,
4565                                   MachineMemOperand *MMO) {
4566   assert((Opcode == ISD::INTRINSIC_VOID ||
4567           Opcode == ISD::INTRINSIC_W_CHAIN ||
4568           Opcode == ISD::PREFETCH ||
4569           Opcode == ISD::LIFETIME_START ||
4570           Opcode == ISD::LIFETIME_END ||
4571           (Opcode <= INT_MAX &&
4572            (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4573          "Opcode is not a memory-accessing opcode!");
4574 
4575   // Memoize the node unless it returns a flag.
4576   MemIntrinsicSDNode *N;
4577   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4578     FoldingSetNodeID ID;
4579     AddNodeIDNode(ID, Opcode, VTList, Ops);
4580     ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4581     void *IP = nullptr;
4582     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4583       cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4584       return SDValue(E, 0);
4585     }
4586 
4587     N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4588                                                dl.getDebugLoc(), VTList, Ops,
4589                                                MemVT, MMO);
4590     CSEMap.InsertNode(N, IP);
4591   } else {
4592     N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4593                                                dl.getDebugLoc(), VTList, Ops,
4594                                                MemVT, MMO);
4595   }
4596   AllNodes.push_back(N);
4597   return SDValue(N, 0);
4598 }
4599 
4600 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4601 /// MachinePointerInfo record from it.  This is particularly useful because the
4602 /// code generator has many cases where it doesn't bother passing in a
4603 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(SDValue Ptr,int64_t Offset=0)4604 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4605   // If this is FI+Offset, we can model it.
4606   if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4607     return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4608 
4609   // If this is (FI+Offset1)+Offset2, we can model it.
4610   if (Ptr.getOpcode() != ISD::ADD ||
4611       !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4612       !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4613     return MachinePointerInfo();
4614 
4615   int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4616   return MachinePointerInfo::getFixedStack(FI, Offset+
4617                        cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4618 }
4619 
4620 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4621 /// MachinePointerInfo record from it.  This is particularly useful because the
4622 /// code generator has many cases where it doesn't bother passing in a
4623 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(SDValue Ptr,SDValue OffsetOp)4624 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4625   // If the 'Offset' value isn't a constant, we can't handle this.
4626   if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4627     return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4628   if (OffsetOp.getOpcode() == ISD::UNDEF)
4629     return InferPointerInfo(Ptr);
4630   return MachinePointerInfo();
4631 }
4632 
4633 
4634 SDValue
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,bool isVolatile,bool isNonTemporal,bool isInvariant,unsigned Alignment,const MDNode * TBAAInfo,const MDNode * Ranges)4635 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4636                       EVT VT, SDLoc dl, SDValue Chain,
4637                       SDValue Ptr, SDValue Offset,
4638                       MachinePointerInfo PtrInfo, EVT MemVT,
4639                       bool isVolatile, bool isNonTemporal, bool isInvariant,
4640                       unsigned Alignment, const MDNode *TBAAInfo,
4641                       const MDNode *Ranges) {
4642   assert(Chain.getValueType() == MVT::Other &&
4643         "Invalid chain type");
4644   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4645     Alignment = getEVTAlignment(VT);
4646 
4647   unsigned Flags = MachineMemOperand::MOLoad;
4648   if (isVolatile)
4649     Flags |= MachineMemOperand::MOVolatile;
4650   if (isNonTemporal)
4651     Flags |= MachineMemOperand::MONonTemporal;
4652   if (isInvariant)
4653     Flags |= MachineMemOperand::MOInvariant;
4654 
4655   // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4656   // clients.
4657   if (PtrInfo.V.isNull())
4658     PtrInfo = InferPointerInfo(Ptr, Offset);
4659 
4660   MachineFunction &MF = getMachineFunction();
4661   MachineMemOperand *MMO =
4662     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4663                             TBAAInfo, Ranges);
4664   return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4665 }
4666 
4667 SDValue
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)4668 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4669                       EVT VT, SDLoc dl, SDValue Chain,
4670                       SDValue Ptr, SDValue Offset, EVT MemVT,
4671                       MachineMemOperand *MMO) {
4672   if (VT == MemVT) {
4673     ExtType = ISD::NON_EXTLOAD;
4674   } else if (ExtType == ISD::NON_EXTLOAD) {
4675     assert(VT == MemVT && "Non-extending load from different memory type!");
4676   } else {
4677     // Extending load.
4678     assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4679            "Should only be an extending load, not truncating!");
4680     assert(VT.isInteger() == MemVT.isInteger() &&
4681            "Cannot convert from FP to Int or Int -> FP!");
4682     assert(VT.isVector() == MemVT.isVector() &&
4683            "Cannot use trunc store to convert to or from a vector!");
4684     assert((!VT.isVector() ||
4685             VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4686            "Cannot use trunc store to change the number of vector elements!");
4687   }
4688 
4689   bool Indexed = AM != ISD::UNINDEXED;
4690   assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4691          "Unindexed load with an offset!");
4692 
4693   SDVTList VTs = Indexed ?
4694     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4695   SDValue Ops[] = { Chain, Ptr, Offset };
4696   FoldingSetNodeID ID;
4697   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
4698   ID.AddInteger(MemVT.getRawBits());
4699   ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4700                                      MMO->isNonTemporal(),
4701                                      MMO->isInvariant()));
4702   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4703   void *IP = nullptr;
4704   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4705     cast<LoadSDNode>(E)->refineAlignment(MMO);
4706     return SDValue(E, 0);
4707   }
4708   SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
4709                                              dl.getDebugLoc(), VTs, AM, ExtType,
4710                                              MemVT, MMO);
4711   CSEMap.InsertNode(N, IP);
4712   AllNodes.push_back(N);
4713   return SDValue(N, 0);
4714 }
4715 
getLoad(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,bool isVolatile,bool isNonTemporal,bool isInvariant,unsigned Alignment,const MDNode * TBAAInfo,const MDNode * Ranges)4716 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4717                               SDValue Chain, SDValue Ptr,
4718                               MachinePointerInfo PtrInfo,
4719                               bool isVolatile, bool isNonTemporal,
4720                               bool isInvariant, unsigned Alignment,
4721                               const MDNode *TBAAInfo,
4722                               const MDNode *Ranges) {
4723   SDValue Undef = getUNDEF(Ptr.getValueType());
4724   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4725                  PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4726                  TBAAInfo, Ranges);
4727 }
4728 
getLoad(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)4729 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4730                               SDValue Chain, SDValue Ptr,
4731                               MachineMemOperand *MMO) {
4732   SDValue Undef = getUNDEF(Ptr.getValueType());
4733   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4734                  VT, MMO);
4735 }
4736 
getExtLoad(ISD::LoadExtType ExtType,SDLoc dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,bool isVolatile,bool isNonTemporal,unsigned Alignment,const MDNode * TBAAInfo)4737 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4738                                  SDValue Chain, SDValue Ptr,
4739                                  MachinePointerInfo PtrInfo, EVT MemVT,
4740                                  bool isVolatile, bool isNonTemporal,
4741                                  unsigned Alignment, const MDNode *TBAAInfo) {
4742   SDValue Undef = getUNDEF(Ptr.getValueType());
4743   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4744                  PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4745                  TBAAInfo);
4746 }
4747 
4748 
getExtLoad(ISD::LoadExtType ExtType,SDLoc dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)4749 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4750                                  SDValue Chain, SDValue Ptr, EVT MemVT,
4751                                  MachineMemOperand *MMO) {
4752   SDValue Undef = getUNDEF(Ptr.getValueType());
4753   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4754                  MemVT, MMO);
4755 }
4756 
4757 SDValue
getIndexedLoad(SDValue OrigLoad,SDLoc dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)4758 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4759                              SDValue Offset, ISD::MemIndexedMode AM) {
4760   LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4761   assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4762          "Load is already a indexed load!");
4763   return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4764                  LD->getChain(), Base, Offset, LD->getPointerInfo(),
4765                  LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4766                  false, LD->getAlignment());
4767 }
4768 
getStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,bool isVolatile,bool isNonTemporal,unsigned Alignment,const MDNode * TBAAInfo)4769 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4770                                SDValue Ptr, MachinePointerInfo PtrInfo,
4771                                bool isVolatile, bool isNonTemporal,
4772                                unsigned Alignment, const MDNode *TBAAInfo) {
4773   assert(Chain.getValueType() == MVT::Other &&
4774         "Invalid chain type");
4775   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4776     Alignment = getEVTAlignment(Val.getValueType());
4777 
4778   unsigned Flags = MachineMemOperand::MOStore;
4779   if (isVolatile)
4780     Flags |= MachineMemOperand::MOVolatile;
4781   if (isNonTemporal)
4782     Flags |= MachineMemOperand::MONonTemporal;
4783 
4784   if (PtrInfo.V.isNull())
4785     PtrInfo = InferPointerInfo(Ptr);
4786 
4787   MachineFunction &MF = getMachineFunction();
4788   MachineMemOperand *MMO =
4789     MF.getMachineMemOperand(PtrInfo, Flags,
4790                             Val.getValueType().getStoreSize(), Alignment,
4791                             TBAAInfo);
4792 
4793   return getStore(Chain, dl, Val, Ptr, MMO);
4794 }
4795 
getStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)4796 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4797                                SDValue Ptr, MachineMemOperand *MMO) {
4798   assert(Chain.getValueType() == MVT::Other &&
4799         "Invalid chain type");
4800   EVT VT = Val.getValueType();
4801   SDVTList VTs = getVTList(MVT::Other);
4802   SDValue Undef = getUNDEF(Ptr.getValueType());
4803   SDValue Ops[] = { Chain, Val, Ptr, Undef };
4804   FoldingSetNodeID ID;
4805   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
4806   ID.AddInteger(VT.getRawBits());
4807   ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4808                                      MMO->isNonTemporal(), MMO->isInvariant()));
4809   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4810   void *IP = nullptr;
4811   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4812     cast<StoreSDNode>(E)->refineAlignment(MMO);
4813     return SDValue(E, 0);
4814   }
4815   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4816                                               dl.getDebugLoc(), VTs,
4817                                               ISD::UNINDEXED, false, VT, MMO);
4818   CSEMap.InsertNode(N, IP);
4819   AllNodes.push_back(N);
4820   return SDValue(N, 0);
4821 }
4822 
getTruncStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,bool isVolatile,bool isNonTemporal,unsigned Alignment,const MDNode * TBAAInfo)4823 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4824                                     SDValue Ptr, MachinePointerInfo PtrInfo,
4825                                     EVT SVT,bool isVolatile, bool isNonTemporal,
4826                                     unsigned Alignment,
4827                                     const MDNode *TBAAInfo) {
4828   assert(Chain.getValueType() == MVT::Other &&
4829         "Invalid chain type");
4830   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
4831     Alignment = getEVTAlignment(SVT);
4832 
4833   unsigned Flags = MachineMemOperand::MOStore;
4834   if (isVolatile)
4835     Flags |= MachineMemOperand::MOVolatile;
4836   if (isNonTemporal)
4837     Flags |= MachineMemOperand::MONonTemporal;
4838 
4839   if (PtrInfo.V.isNull())
4840     PtrInfo = InferPointerInfo(Ptr);
4841 
4842   MachineFunction &MF = getMachineFunction();
4843   MachineMemOperand *MMO =
4844     MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4845                             TBAAInfo);
4846 
4847   return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4848 }
4849 
getTruncStore(SDValue Chain,SDLoc dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)4850 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4851                                     SDValue Ptr, EVT SVT,
4852                                     MachineMemOperand *MMO) {
4853   EVT VT = Val.getValueType();
4854 
4855   assert(Chain.getValueType() == MVT::Other &&
4856         "Invalid chain type");
4857   if (VT == SVT)
4858     return getStore(Chain, dl, Val, Ptr, MMO);
4859 
4860   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4861          "Should only be a truncating store, not extending!");
4862   assert(VT.isInteger() == SVT.isInteger() &&
4863          "Can't do FP-INT conversion!");
4864   assert(VT.isVector() == SVT.isVector() &&
4865          "Cannot use trunc store to convert to or from a vector!");
4866   assert((!VT.isVector() ||
4867           VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4868          "Cannot use trunc store to change the number of vector elements!");
4869 
4870   SDVTList VTs = getVTList(MVT::Other);
4871   SDValue Undef = getUNDEF(Ptr.getValueType());
4872   SDValue Ops[] = { Chain, Val, Ptr, Undef };
4873   FoldingSetNodeID ID;
4874   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
4875   ID.AddInteger(SVT.getRawBits());
4876   ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4877                                      MMO->isNonTemporal(), MMO->isInvariant()));
4878   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4879   void *IP = nullptr;
4880   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4881     cast<StoreSDNode>(E)->refineAlignment(MMO);
4882     return SDValue(E, 0);
4883   }
4884   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4885                                               dl.getDebugLoc(), VTs,
4886                                               ISD::UNINDEXED, true, SVT, MMO);
4887   CSEMap.InsertNode(N, IP);
4888   AllNodes.push_back(N);
4889   return SDValue(N, 0);
4890 }
4891 
4892 SDValue
getIndexedStore(SDValue OrigStore,SDLoc dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)4893 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
4894                               SDValue Offset, ISD::MemIndexedMode AM) {
4895   StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4896   assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4897          "Store is already a indexed store!");
4898   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4899   SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4900   FoldingSetNodeID ID;
4901   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
4902   ID.AddInteger(ST->getMemoryVT().getRawBits());
4903   ID.AddInteger(ST->getRawSubclassData());
4904   ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4905   void *IP = nullptr;
4906   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4907     return SDValue(E, 0);
4908 
4909   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4910                                               dl.getDebugLoc(), VTs, AM,
4911                                               ST->isTruncatingStore(),
4912                                               ST->getMemoryVT(),
4913                                               ST->getMemOperand());
4914   CSEMap.InsertNode(N, IP);
4915   AllNodes.push_back(N);
4916   return SDValue(N, 0);
4917 }
4918 
getVAArg(EVT VT,SDLoc dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)4919 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
4920                                SDValue Chain, SDValue Ptr,
4921                                SDValue SV,
4922                                unsigned Align) {
4923   SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4924   return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
4925 }
4926 
getNode(unsigned Opcode,SDLoc DL,EVT VT,ArrayRef<SDUse> Ops)4927 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4928                               ArrayRef<SDUse> Ops) {
4929   switch (Ops.size()) {
4930   case 0: return getNode(Opcode, DL, VT);
4931   case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
4932   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4933   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4934   default: break;
4935   }
4936 
4937   // Copy from an SDUse array into an SDValue array for use with
4938   // the regular getNode logic.
4939   SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
4940   return getNode(Opcode, DL, VT, NewOps);
4941 }
4942 
getNode(unsigned Opcode,SDLoc DL,EVT VT,ArrayRef<SDValue> Ops)4943 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4944                               ArrayRef<SDValue> Ops) {
4945   unsigned NumOps = Ops.size();
4946   switch (NumOps) {
4947   case 0: return getNode(Opcode, DL, VT);
4948   case 1: return getNode(Opcode, DL, VT, Ops[0]);
4949   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4950   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4951   default: break;
4952   }
4953 
4954   switch (Opcode) {
4955   default: break;
4956   case ISD::SELECT_CC: {
4957     assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4958     assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4959            "LHS and RHS of condition must have same type!");
4960     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4961            "True and False arms of SelectCC must have same type!");
4962     assert(Ops[2].getValueType() == VT &&
4963            "select_cc node must be of same type as true and false value!");
4964     break;
4965   }
4966   case ISD::BR_CC: {
4967     assert(NumOps == 5 && "BR_CC takes 5 operands!");
4968     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4969            "LHS/RHS of comparison should match types!");
4970     break;
4971   }
4972   }
4973 
4974   // Memoize nodes.
4975   SDNode *N;
4976   SDVTList VTs = getVTList(VT);
4977 
4978   if (VT != MVT::Glue) {
4979     FoldingSetNodeID ID;
4980     AddNodeIDNode(ID, Opcode, VTs, Ops);
4981     void *IP = nullptr;
4982 
4983     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4984       return SDValue(E, 0);
4985 
4986     N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4987                                    VTs, Ops);
4988     CSEMap.InsertNode(N, IP);
4989   } else {
4990     N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4991                                    VTs, Ops);
4992   }
4993 
4994   AllNodes.push_back(N);
4995 #ifndef NDEBUG
4996   VerifySDNode(N);
4997 #endif
4998   return SDValue(N, 0);
4999 }
5000 
getNode(unsigned Opcode,SDLoc DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)5001 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
5002                               ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
5003   return getNode(Opcode, DL, getVTList(ResultTys), Ops);
5004 }
5005 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,ArrayRef<SDValue> Ops)5006 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5007                               ArrayRef<SDValue> Ops) {
5008   if (VTList.NumVTs == 1)
5009     return getNode(Opcode, DL, VTList.VTs[0], Ops);
5010 
5011 #if 0
5012   switch (Opcode) {
5013   // FIXME: figure out how to safely handle things like
5014   // int foo(int x) { return 1 << (x & 255); }
5015   // int bar() { return foo(256); }
5016   case ISD::SRA_PARTS:
5017   case ISD::SRL_PARTS:
5018   case ISD::SHL_PARTS:
5019     if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
5020         cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
5021       return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5022     else if (N3.getOpcode() == ISD::AND)
5023       if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
5024         // If the and is only masking out bits that cannot effect the shift,
5025         // eliminate the and.
5026         unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
5027         if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
5028           return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5029       }
5030     break;
5031   }
5032 #endif
5033 
5034   // Memoize the node unless it returns a flag.
5035   SDNode *N;
5036   unsigned NumOps = Ops.size();
5037   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5038     FoldingSetNodeID ID;
5039     AddNodeIDNode(ID, Opcode, VTList, Ops);
5040     void *IP = nullptr;
5041     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5042       return SDValue(E, 0);
5043 
5044     if (NumOps == 1) {
5045       N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
5046                                           DL.getDebugLoc(), VTList, Ops[0]);
5047     } else if (NumOps == 2) {
5048       N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
5049                                            DL.getDebugLoc(), VTList, Ops[0],
5050                                            Ops[1]);
5051     } else if (NumOps == 3) {
5052       N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
5053                                             DL.getDebugLoc(), VTList, Ops[0],
5054                                             Ops[1], Ops[2]);
5055     } else {
5056       N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5057                                      VTList, Ops);
5058     }
5059     CSEMap.InsertNode(N, IP);
5060   } else {
5061     if (NumOps == 1) {
5062       N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
5063                                           DL.getDebugLoc(), VTList, Ops[0]);
5064     } else if (NumOps == 2) {
5065       N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
5066                                            DL.getDebugLoc(), VTList, Ops[0],
5067                                            Ops[1]);
5068     } else if (NumOps == 3) {
5069       N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
5070                                             DL.getDebugLoc(), VTList, Ops[0],
5071                                             Ops[1], Ops[2]);
5072     } else {
5073       N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5074                                      VTList, Ops);
5075     }
5076   }
5077   AllNodes.push_back(N);
5078 #ifndef NDEBUG
5079   VerifySDNode(N);
5080 #endif
5081   return SDValue(N, 0);
5082 }
5083 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList)5084 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
5085   return getNode(Opcode, DL, VTList, ArrayRef<SDValue>());
5086 }
5087 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1)5088 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5089                               SDValue N1) {
5090   SDValue Ops[] = { N1 };
5091   return getNode(Opcode, DL, VTList, Ops);
5092 }
5093 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2)5094 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5095                               SDValue N1, SDValue N2) {
5096   SDValue Ops[] = { N1, N2 };
5097   return getNode(Opcode, DL, VTList, Ops);
5098 }
5099 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)5100 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5101                               SDValue N1, SDValue N2, SDValue N3) {
5102   SDValue Ops[] = { N1, N2, N3 };
5103   return getNode(Opcode, DL, VTList, Ops);
5104 }
5105 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)5106 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5107                               SDValue N1, SDValue N2, SDValue N3,
5108                               SDValue N4) {
5109   SDValue Ops[] = { N1, N2, N3, N4 };
5110   return getNode(Opcode, DL, VTList, Ops);
5111 }
5112 
getNode(unsigned Opcode,SDLoc DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)5113 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5114                               SDValue N1, SDValue N2, SDValue N3,
5115                               SDValue N4, SDValue N5) {
5116   SDValue Ops[] = { N1, N2, N3, N4, N5 };
5117   return getNode(Opcode, DL, VTList, Ops);
5118 }
5119 
getVTList(EVT VT)5120 SDVTList SelectionDAG::getVTList(EVT VT) {
5121   return makeVTList(SDNode::getValueTypeList(VT), 1);
5122 }
5123 
getVTList(EVT VT1,EVT VT2)5124 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
5125   FoldingSetNodeID ID;
5126   ID.AddInteger(2U);
5127   ID.AddInteger(VT1.getRawBits());
5128   ID.AddInteger(VT2.getRawBits());
5129 
5130   void *IP = nullptr;
5131   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5132   if (!Result) {
5133     EVT *Array = Allocator.Allocate<EVT>(2);
5134     Array[0] = VT1;
5135     Array[1] = VT2;
5136     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5137     VTListMap.InsertNode(Result, IP);
5138   }
5139   return Result->getSDVTList();
5140 }
5141 
getVTList(EVT VT1,EVT VT2,EVT VT3)5142 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5143   FoldingSetNodeID ID;
5144   ID.AddInteger(3U);
5145   ID.AddInteger(VT1.getRawBits());
5146   ID.AddInteger(VT2.getRawBits());
5147   ID.AddInteger(VT3.getRawBits());
5148 
5149   void *IP = nullptr;
5150   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5151   if (!Result) {
5152     EVT *Array = Allocator.Allocate<EVT>(3);
5153     Array[0] = VT1;
5154     Array[1] = VT2;
5155     Array[2] = VT3;
5156     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5157     VTListMap.InsertNode(Result, IP);
5158   }
5159   return Result->getSDVTList();
5160 }
5161 
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)5162 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5163   FoldingSetNodeID ID;
5164   ID.AddInteger(4U);
5165   ID.AddInteger(VT1.getRawBits());
5166   ID.AddInteger(VT2.getRawBits());
5167   ID.AddInteger(VT3.getRawBits());
5168   ID.AddInteger(VT4.getRawBits());
5169 
5170   void *IP = nullptr;
5171   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5172   if (!Result) {
5173     EVT *Array = Allocator.Allocate<EVT>(4);
5174     Array[0] = VT1;
5175     Array[1] = VT2;
5176     Array[2] = VT3;
5177     Array[3] = VT4;
5178     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5179     VTListMap.InsertNode(Result, IP);
5180   }
5181   return Result->getSDVTList();
5182 }
5183 
getVTList(ArrayRef<EVT> VTs)5184 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
5185   unsigned NumVTs = VTs.size();
5186   FoldingSetNodeID ID;
5187   ID.AddInteger(NumVTs);
5188   for (unsigned index = 0; index < NumVTs; index++) {
5189     ID.AddInteger(VTs[index].getRawBits());
5190   }
5191 
5192   void *IP = nullptr;
5193   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5194   if (!Result) {
5195     EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5196     std::copy(VTs.begin(), VTs.end(), Array);
5197     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5198     VTListMap.InsertNode(Result, IP);
5199   }
5200   return Result->getSDVTList();
5201 }
5202 
5203 
5204 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5205 /// specified operands.  If the resultant node already exists in the DAG,
5206 /// this does not modify the specified node, instead it returns the node that
5207 /// already exists.  If the resultant node does not exist in the DAG, the
5208 /// input node is returned.  As a degenerate case, if you specify the same
5209 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)5210 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5211   assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5212 
5213   // Check to see if there is no change.
5214   if (Op == N->getOperand(0)) return N;
5215 
5216   // See if the modified node already exists.
5217   void *InsertPos = nullptr;
5218   if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5219     return Existing;
5220 
5221   // Nope it doesn't.  Remove the node from its current place in the maps.
5222   if (InsertPos)
5223     if (!RemoveNodeFromCSEMaps(N))
5224       InsertPos = nullptr;
5225 
5226   // Now we update the operands.
5227   N->OperandList[0].set(Op);
5228 
5229   // If this gets put into a CSE map, add it.
5230   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5231   return N;
5232 }
5233 
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)5234 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5235   assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5236 
5237   // Check to see if there is no change.
5238   if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5239     return N;   // No operands changed, just return the input node.
5240 
5241   // See if the modified node already exists.
5242   void *InsertPos = nullptr;
5243   if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5244     return Existing;
5245 
5246   // Nope it doesn't.  Remove the node from its current place in the maps.
5247   if (InsertPos)
5248     if (!RemoveNodeFromCSEMaps(N))
5249       InsertPos = nullptr;
5250 
5251   // Now we update the operands.
5252   if (N->OperandList[0] != Op1)
5253     N->OperandList[0].set(Op1);
5254   if (N->OperandList[1] != Op2)
5255     N->OperandList[1].set(Op2);
5256 
5257   // If this gets put into a CSE map, add it.
5258   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5259   return N;
5260 }
5261 
5262 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)5263 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5264   SDValue Ops[] = { Op1, Op2, Op3 };
5265   return UpdateNodeOperands(N, Ops);
5266 }
5267 
5268 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)5269 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5270                    SDValue Op3, SDValue Op4) {
5271   SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5272   return UpdateNodeOperands(N, Ops);
5273 }
5274 
5275 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)5276 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5277                    SDValue Op3, SDValue Op4, SDValue Op5) {
5278   SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5279   return UpdateNodeOperands(N, Ops);
5280 }
5281 
5282 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)5283 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
5284   unsigned NumOps = Ops.size();
5285   assert(N->getNumOperands() == NumOps &&
5286          "Update with wrong number of operands");
5287 
5288   // Check to see if there is no change.
5289   bool AnyChange = false;
5290   for (unsigned i = 0; i != NumOps; ++i) {
5291     if (Ops[i] != N->getOperand(i)) {
5292       AnyChange = true;
5293       break;
5294     }
5295   }
5296 
5297   // No operands changed, just return the input node.
5298   if (!AnyChange) return N;
5299 
5300   // See if the modified node already exists.
5301   void *InsertPos = nullptr;
5302   if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
5303     return Existing;
5304 
5305   // Nope it doesn't.  Remove the node from its current place in the maps.
5306   if (InsertPos)
5307     if (!RemoveNodeFromCSEMaps(N))
5308       InsertPos = nullptr;
5309 
5310   // Now we update the operands.
5311   for (unsigned i = 0; i != NumOps; ++i)
5312     if (N->OperandList[i] != Ops[i])
5313       N->OperandList[i].set(Ops[i]);
5314 
5315   // If this gets put into a CSE map, add it.
5316   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5317   return N;
5318 }
5319 
5320 /// DropOperands - Release the operands and set this node to have
5321 /// zero operands.
DropOperands()5322 void SDNode::DropOperands() {
5323   // Unlike the code in MorphNodeTo that does this, we don't need to
5324   // watch for dead nodes here.
5325   for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5326     SDUse &Use = *I++;
5327     Use.set(SDValue());
5328   }
5329 }
5330 
5331 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5332 /// machine opcode.
5333 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)5334 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5335                                    EVT VT) {
5336   SDVTList VTs = getVTList(VT);
5337   return SelectNodeTo(N, MachineOpc, VTs, None);
5338 }
5339 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)5340 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5341                                    EVT VT, SDValue Op1) {
5342   SDVTList VTs = getVTList(VT);
5343   SDValue Ops[] = { Op1 };
5344   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5345 }
5346 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)5347 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5348                                    EVT VT, SDValue Op1,
5349                                    SDValue Op2) {
5350   SDVTList VTs = getVTList(VT);
5351   SDValue Ops[] = { Op1, Op2 };
5352   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5353 }
5354 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)5355 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5356                                    EVT VT, SDValue Op1,
5357                                    SDValue Op2, SDValue Op3) {
5358   SDVTList VTs = getVTList(VT);
5359   SDValue Ops[] = { Op1, Op2, Op3 };
5360   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5361 }
5362 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)5363 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5364                                    EVT VT, ArrayRef<SDValue> Ops) {
5365   SDVTList VTs = getVTList(VT);
5366   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5367 }
5368 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)5369 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5370                                    EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
5371   SDVTList VTs = getVTList(VT1, VT2);
5372   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5373 }
5374 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)5375 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5376                                    EVT VT1, EVT VT2) {
5377   SDVTList VTs = getVTList(VT1, VT2);
5378   return SelectNodeTo(N, MachineOpc, VTs, None);
5379 }
5380 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)5381 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5382                                    EVT VT1, EVT VT2, EVT VT3,
5383                                    ArrayRef<SDValue> Ops) {
5384   SDVTList VTs = getVTList(VT1, VT2, VT3);
5385   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5386 }
5387 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,EVT VT4,ArrayRef<SDValue> Ops)5388 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5389                                    EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5390                                    ArrayRef<SDValue> Ops) {
5391   SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5392   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5393 }
5394 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1)5395 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5396                                    EVT VT1, EVT VT2,
5397                                    SDValue Op1) {
5398   SDVTList VTs = getVTList(VT1, VT2);
5399   SDValue Ops[] = { Op1 };
5400   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5401 }
5402 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)5403 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5404                                    EVT VT1, EVT VT2,
5405                                    SDValue Op1, SDValue Op2) {
5406   SDVTList VTs = getVTList(VT1, VT2);
5407   SDValue Ops[] = { Op1, Op2 };
5408   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5409 }
5410 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)5411 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5412                                    EVT VT1, EVT VT2,
5413                                    SDValue Op1, SDValue Op2,
5414                                    SDValue Op3) {
5415   SDVTList VTs = getVTList(VT1, VT2);
5416   SDValue Ops[] = { Op1, Op2, Op3 };
5417   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5418 }
5419 
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)5420 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5421                                    EVT VT1, EVT VT2, EVT VT3,
5422                                    SDValue Op1, SDValue Op2,
5423                                    SDValue Op3) {
5424   SDVTList VTs = getVTList(VT1, VT2, VT3);
5425   SDValue Ops[] = { Op1, Op2, Op3 };
5426   return SelectNodeTo(N, MachineOpc, VTs, Ops);
5427 }
5428 
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)5429 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5430                                    SDVTList VTs,ArrayRef<SDValue> Ops) {
5431   N = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
5432   // Reset the NodeID to -1.
5433   N->setNodeId(-1);
5434   return N;
5435 }
5436 
5437 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5438 /// the line number information on the merged node since it is not possible to
5439 /// preserve the information that operation is associated with multiple lines.
5440 /// This will make the debugger working better at -O0, were there is a higher
5441 /// probability having other instructions associated with that line.
5442 ///
5443 /// For IROrder, we keep the smaller of the two
UpdadeSDLocOnMergedSDNode(SDNode * N,SDLoc OLoc)5444 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5445   DebugLoc NLoc = N->getDebugLoc();
5446   if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
5447     (OLoc.getDebugLoc() != NLoc)) {
5448     N->setDebugLoc(DebugLoc());
5449   }
5450   unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5451   N->setIROrder(Order);
5452   return N;
5453 }
5454 
5455 /// MorphNodeTo - This *mutates* the specified node to have the specified
5456 /// return type, opcode, and operands.
5457 ///
5458 /// Note that MorphNodeTo returns the resultant node.  If there is already a
5459 /// node of the specified opcode and operands, it returns that node instead of
5460 /// the current one.  Note that the SDLoc need not be the same.
5461 ///
5462 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5463 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5464 /// node, and because it doesn't require CSE recalculation for any of
5465 /// the node's users.
5466 ///
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)5467 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5468                                   SDVTList VTs, ArrayRef<SDValue> Ops) {
5469   unsigned NumOps = Ops.size();
5470   // If an identical node already exists, use it.
5471   void *IP = nullptr;
5472   if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5473     FoldingSetNodeID ID;
5474     AddNodeIDNode(ID, Opc, VTs, Ops);
5475     if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5476       return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5477   }
5478 
5479   if (!RemoveNodeFromCSEMaps(N))
5480     IP = nullptr;
5481 
5482   // Start the morphing.
5483   N->NodeType = Opc;
5484   N->ValueList = VTs.VTs;
5485   N->NumValues = VTs.NumVTs;
5486 
5487   // Clear the operands list, updating used nodes to remove this from their
5488   // use list.  Keep track of any operands that become dead as a result.
5489   SmallPtrSet<SDNode*, 16> DeadNodeSet;
5490   for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5491     SDUse &Use = *I++;
5492     SDNode *Used = Use.getNode();
5493     Use.set(SDValue());
5494     if (Used->use_empty())
5495       DeadNodeSet.insert(Used);
5496   }
5497 
5498   if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5499     // Initialize the memory references information.
5500     MN->setMemRefs(nullptr, nullptr);
5501     // If NumOps is larger than the # of operands we can have in a
5502     // MachineSDNode, reallocate the operand list.
5503     if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5504       if (MN->OperandsNeedDelete)
5505         delete[] MN->OperandList;
5506       if (NumOps > array_lengthof(MN->LocalOperands))
5507         // We're creating a final node that will live unmorphed for the
5508         // remainder of the current SelectionDAG iteration, so we can allocate
5509         // the operands directly out of a pool with no recycling metadata.
5510         MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5511                          Ops.data(), NumOps);
5512       else
5513         MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps);
5514       MN->OperandsNeedDelete = false;
5515     } else
5516       MN->InitOperands(MN->OperandList, Ops.data(), NumOps);
5517   } else {
5518     // If NumOps is larger than the # of operands we currently have, reallocate
5519     // the operand list.
5520     if (NumOps > N->NumOperands) {
5521       if (N->OperandsNeedDelete)
5522         delete[] N->OperandList;
5523       N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps);
5524       N->OperandsNeedDelete = true;
5525     } else
5526       N->InitOperands(N->OperandList, Ops.data(), NumOps);
5527   }
5528 
5529   // Delete any nodes that are still dead after adding the uses for the
5530   // new operands.
5531   if (!DeadNodeSet.empty()) {
5532     SmallVector<SDNode *, 16> DeadNodes;
5533     for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5534          E = DeadNodeSet.end(); I != E; ++I)
5535       if ((*I)->use_empty())
5536         DeadNodes.push_back(*I);
5537     RemoveDeadNodes(DeadNodes);
5538   }
5539 
5540   if (IP)
5541     CSEMap.InsertNode(N, IP);   // Memoize the new node.
5542   return N;
5543 }
5544 
5545 
5546 /// getMachineNode - These are used for target selectors to create a new node
5547 /// with specified return type(s), MachineInstr opcode, and operands.
5548 ///
5549 /// Note that getMachineNode returns the resultant node.  If there is already a
5550 /// node of the specified opcode and operands, it returns that node instead of
5551 /// the current one.
5552 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT)5553 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5554   SDVTList VTs = getVTList(VT);
5555   return getMachineNode(Opcode, dl, VTs, None);
5556 }
5557 
5558 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1)5559 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5560   SDVTList VTs = getVTList(VT);
5561   SDValue Ops[] = { Op1 };
5562   return getMachineNode(Opcode, dl, VTs, Ops);
5563 }
5564 
5565 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1,SDValue Op2)5566 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5567                              SDValue Op1, SDValue Op2) {
5568   SDVTList VTs = getVTList(VT);
5569   SDValue Ops[] = { Op1, Op2 };
5570   return getMachineNode(Opcode, dl, VTs, Ops);
5571 }
5572 
5573 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)5574 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5575                              SDValue Op1, SDValue Op2, SDValue Op3) {
5576   SDVTList VTs = getVTList(VT);
5577   SDValue Ops[] = { Op1, Op2, Op3 };
5578   return getMachineNode(Opcode, dl, VTs, Ops);
5579 }
5580 
5581 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT,ArrayRef<SDValue> Ops)5582 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5583                              ArrayRef<SDValue> Ops) {
5584   SDVTList VTs = getVTList(VT);
5585   return getMachineNode(Opcode, dl, VTs, Ops);
5586 }
5587 
5588 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2)5589 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5590   SDVTList VTs = getVTList(VT1, VT2);
5591   return getMachineNode(Opcode, dl, VTs, None);
5592 }
5593 
5594 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1)5595 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5596                              EVT VT1, EVT VT2, SDValue Op1) {
5597   SDVTList VTs = getVTList(VT1, VT2);
5598   SDValue Ops[] = { Op1 };
5599   return getMachineNode(Opcode, dl, VTs, Ops);
5600 }
5601 
5602 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)5603 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5604                              EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5605   SDVTList VTs = getVTList(VT1, VT2);
5606   SDValue Ops[] = { Op1, Op2 };
5607   return getMachineNode(Opcode, dl, VTs, Ops);
5608 }
5609 
5610 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)5611 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5612                              EVT VT1, EVT VT2, SDValue Op1,
5613                              SDValue Op2, SDValue Op3) {
5614   SDVTList VTs = getVTList(VT1, VT2);
5615   SDValue Ops[] = { Op1, Op2, Op3 };
5616   return getMachineNode(Opcode, dl, VTs, Ops);
5617 }
5618 
5619 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)5620 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5621                              EVT VT1, EVT VT2,
5622                              ArrayRef<SDValue> Ops) {
5623   SDVTList VTs = getVTList(VT1, VT2);
5624   return getMachineNode(Opcode, dl, VTs, Ops);
5625 }
5626 
5627 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)5628 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5629                              EVT VT1, EVT VT2, EVT VT3,
5630                              SDValue Op1, SDValue Op2) {
5631   SDVTList VTs = getVTList(VT1, VT2, VT3);
5632   SDValue Ops[] = { Op1, Op2 };
5633   return getMachineNode(Opcode, dl, VTs, Ops);
5634 }
5635 
5636 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)5637 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5638                              EVT VT1, EVT VT2, EVT VT3,
5639                              SDValue Op1, SDValue Op2, SDValue Op3) {
5640   SDVTList VTs = getVTList(VT1, VT2, VT3);
5641   SDValue Ops[] = { Op1, Op2, Op3 };
5642   return getMachineNode(Opcode, dl, VTs, Ops);
5643 }
5644 
5645 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)5646 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5647                              EVT VT1, EVT VT2, EVT VT3,
5648                              ArrayRef<SDValue> Ops) {
5649   SDVTList VTs = getVTList(VT1, VT2, VT3);
5650   return getMachineNode(Opcode, dl, VTs, Ops);
5651 }
5652 
5653 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,EVT VT1,EVT VT2,EVT VT3,EVT VT4,ArrayRef<SDValue> Ops)5654 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5655                              EVT VT2, EVT VT3, EVT VT4,
5656                              ArrayRef<SDValue> Ops) {
5657   SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5658   return getMachineNode(Opcode, dl, VTs, Ops);
5659 }
5660 
5661 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)5662 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5663                              ArrayRef<EVT> ResultTys,
5664                              ArrayRef<SDValue> Ops) {
5665   SDVTList VTs = getVTList(ResultTys);
5666   return getMachineNode(Opcode, dl, VTs, Ops);
5667 }
5668 
5669 MachineSDNode *
getMachineNode(unsigned Opcode,SDLoc DL,SDVTList VTs,ArrayRef<SDValue> OpsArray)5670 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5671                              ArrayRef<SDValue> OpsArray) {
5672   bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5673   MachineSDNode *N;
5674   void *IP = nullptr;
5675   const SDValue *Ops = OpsArray.data();
5676   unsigned NumOps = OpsArray.size();
5677 
5678   if (DoCSE) {
5679     FoldingSetNodeID ID;
5680     AddNodeIDNode(ID, ~Opcode, VTs, OpsArray);
5681     IP = nullptr;
5682     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5683       return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5684     }
5685   }
5686 
5687   // Allocate a new MachineSDNode.
5688   N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
5689                                         DL.getDebugLoc(), VTs);
5690 
5691   // Initialize the operands list.
5692   if (NumOps > array_lengthof(N->LocalOperands))
5693     // We're creating a final node that will live unmorphed for the
5694     // remainder of the current SelectionDAG iteration, so we can allocate
5695     // the operands directly out of a pool with no recycling metadata.
5696     N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5697                     Ops, NumOps);
5698   else
5699     N->InitOperands(N->LocalOperands, Ops, NumOps);
5700   N->OperandsNeedDelete = false;
5701 
5702   if (DoCSE)
5703     CSEMap.InsertNode(N, IP);
5704 
5705   AllNodes.push_back(N);
5706 #ifndef NDEBUG
5707   VerifyMachineNode(N);
5708 #endif
5709   return N;
5710 }
5711 
5712 /// getTargetExtractSubreg - A convenience function for creating
5713 /// TargetOpcode::EXTRACT_SUBREG nodes.
5714 SDValue
getTargetExtractSubreg(int SRIdx,SDLoc DL,EVT VT,SDValue Operand)5715 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5716                                      SDValue Operand) {
5717   SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5718   SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5719                                   VT, Operand, SRIdxVal);
5720   return SDValue(Subreg, 0);
5721 }
5722 
5723 /// getTargetInsertSubreg - A convenience function for creating
5724 /// TargetOpcode::INSERT_SUBREG nodes.
5725 SDValue
getTargetInsertSubreg(int SRIdx,SDLoc DL,EVT VT,SDValue Operand,SDValue Subreg)5726 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5727                                     SDValue Operand, SDValue Subreg) {
5728   SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5729   SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5730                                   VT, Operand, Subreg, SRIdxVal);
5731   return SDValue(Result, 0);
5732 }
5733 
5734 /// getNodeIfExists - Get the specified node if it's already available, or
5735 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,bool nuw,bool nsw,bool exact)5736 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5737                                       ArrayRef<SDValue> Ops, bool nuw, bool nsw,
5738                                       bool exact) {
5739   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
5740     FoldingSetNodeID ID;
5741     AddNodeIDNode(ID, Opcode, VTList, Ops);
5742     if (isBinOpWithFlags(Opcode))
5743       AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
5744     void *IP = nullptr;
5745     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5746       return E;
5747   }
5748   return nullptr;
5749 }
5750 
5751 /// getDbgValue - Creates a SDDbgValue node.
5752 ///
5753 /// SDNode
5754 SDDbgValue *
getDbgValue(MDNode * MDPtr,SDNode * N,unsigned R,bool IsIndirect,uint64_t Off,DebugLoc DL,unsigned O)5755 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R,
5756 			  bool IsIndirect, uint64_t Off,
5757                           DebugLoc DL, unsigned O) {
5758   return new (Allocator) SDDbgValue(MDPtr, N, R, IsIndirect, Off, DL, O);
5759 }
5760 
5761 /// Constant
5762 SDDbgValue *
getConstantDbgValue(MDNode * MDPtr,const Value * C,uint64_t Off,DebugLoc DL,unsigned O)5763 SelectionDAG::getConstantDbgValue(MDNode *MDPtr, const Value *C,
5764 				  uint64_t Off,
5765 				  DebugLoc DL, unsigned O) {
5766   return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5767 }
5768 
5769 /// FrameIndex
5770 SDDbgValue *
getFrameIndexDbgValue(MDNode * MDPtr,unsigned FI,uint64_t Off,DebugLoc DL,unsigned O)5771 SelectionDAG::getFrameIndexDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5772 				    DebugLoc DL, unsigned O) {
5773   return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5774 }
5775 
5776 namespace {
5777 
5778 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5779 /// pointed to by a use iterator is deleted, increment the use iterator
5780 /// so that it doesn't dangle.
5781 ///
5782 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5783   SDNode::use_iterator &UI;
5784   SDNode::use_iterator &UE;
5785 
NodeDeleted(SDNode * N,SDNode * E)5786   void NodeDeleted(SDNode *N, SDNode *E) override {
5787     // Increment the iterator as needed.
5788     while (UI != UE && N == *UI)
5789       ++UI;
5790   }
5791 
5792 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)5793   RAUWUpdateListener(SelectionDAG &d,
5794                      SDNode::use_iterator &ui,
5795                      SDNode::use_iterator &ue)
5796     : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5797 };
5798 
5799 }
5800 
5801 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5802 /// This can cause recursive merging of nodes in the DAG.
5803 ///
5804 /// This version assumes From has a single result value.
5805 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)5806 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5807   SDNode *From = FromN.getNode();
5808   assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5809          "Cannot replace with this method!");
5810   assert(From != To.getNode() && "Cannot replace uses of with self");
5811 
5812   // Iterate over all the existing uses of From. New uses will be added
5813   // to the beginning of the use list, which we avoid visiting.
5814   // This specifically avoids visiting uses of From that arise while the
5815   // replacement is happening, because any such uses would be the result
5816   // of CSE: If an existing node looks like From after one of its operands
5817   // is replaced by To, we don't want to replace of all its users with To
5818   // too. See PR3018 for more info.
5819   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5820   RAUWUpdateListener Listener(*this, UI, UE);
5821   while (UI != UE) {
5822     SDNode *User = *UI;
5823 
5824     // This node is about to morph, remove its old self from the CSE maps.
5825     RemoveNodeFromCSEMaps(User);
5826 
5827     // A user can appear in a use list multiple times, and when this
5828     // happens the uses are usually next to each other in the list.
5829     // To help reduce the number of CSE recomputations, process all
5830     // the uses of this user that we can find this way.
5831     do {
5832       SDUse &Use = UI.getUse();
5833       ++UI;
5834       Use.set(To);
5835     } while (UI != UE && *UI == User);
5836 
5837     // Now that we have modified User, add it back to the CSE maps.  If it
5838     // already exists there, recursively merge the results together.
5839     AddModifiedNodeToCSEMaps(User);
5840   }
5841 
5842   // If we just RAUW'd the root, take note.
5843   if (FromN == getRoot())
5844     setRoot(To);
5845 }
5846 
5847 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5848 /// This can cause recursive merging of nodes in the DAG.
5849 ///
5850 /// This version assumes that for each value of From, there is a
5851 /// corresponding value in To in the same position with the same type.
5852 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)5853 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5854 #ifndef NDEBUG
5855   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5856     assert((!From->hasAnyUseOfValue(i) ||
5857             From->getValueType(i) == To->getValueType(i)) &&
5858            "Cannot use this version of ReplaceAllUsesWith!");
5859 #endif
5860 
5861   // Handle the trivial case.
5862   if (From == To)
5863     return;
5864 
5865   // Iterate over just the existing users of From. See the comments in
5866   // the ReplaceAllUsesWith above.
5867   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5868   RAUWUpdateListener Listener(*this, UI, UE);
5869   while (UI != UE) {
5870     SDNode *User = *UI;
5871 
5872     // This node is about to morph, remove its old self from the CSE maps.
5873     RemoveNodeFromCSEMaps(User);
5874 
5875     // A user can appear in a use list multiple times, and when this
5876     // happens the uses are usually next to each other in the list.
5877     // To help reduce the number of CSE recomputations, process all
5878     // the uses of this user that we can find this way.
5879     do {
5880       SDUse &Use = UI.getUse();
5881       ++UI;
5882       Use.setNode(To);
5883     } while (UI != UE && *UI == User);
5884 
5885     // Now that we have modified User, add it back to the CSE maps.  If it
5886     // already exists there, recursively merge the results together.
5887     AddModifiedNodeToCSEMaps(User);
5888   }
5889 
5890   // If we just RAUW'd the root, take note.
5891   if (From == getRoot().getNode())
5892     setRoot(SDValue(To, getRoot().getResNo()));
5893 }
5894 
5895 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5896 /// This can cause recursive merging of nodes in the DAG.
5897 ///
5898 /// This version can replace From with any result values.  To must match the
5899 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)5900 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5901   if (From->getNumValues() == 1)  // Handle the simple case efficiently.
5902     return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5903 
5904   // Iterate over just the existing users of From. See the comments in
5905   // the ReplaceAllUsesWith above.
5906   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5907   RAUWUpdateListener Listener(*this, UI, UE);
5908   while (UI != UE) {
5909     SDNode *User = *UI;
5910 
5911     // This node is about to morph, remove its old self from the CSE maps.
5912     RemoveNodeFromCSEMaps(User);
5913 
5914     // A user can appear in a use list multiple times, and when this
5915     // happens the uses are usually next to each other in the list.
5916     // To help reduce the number of CSE recomputations, process all
5917     // the uses of this user that we can find this way.
5918     do {
5919       SDUse &Use = UI.getUse();
5920       const SDValue &ToOp = To[Use.getResNo()];
5921       ++UI;
5922       Use.set(ToOp);
5923     } while (UI != UE && *UI == User);
5924 
5925     // Now that we have modified User, add it back to the CSE maps.  If it
5926     // already exists there, recursively merge the results together.
5927     AddModifiedNodeToCSEMaps(User);
5928   }
5929 
5930   // If we just RAUW'd the root, take note.
5931   if (From == getRoot().getNode())
5932     setRoot(SDValue(To[getRoot().getResNo()]));
5933 }
5934 
5935 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5936 /// uses of other values produced by From.getNode() alone.  The Deleted
5937 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)5938 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5939   // Handle the really simple, really trivial case efficiently.
5940   if (From == To) return;
5941 
5942   // Handle the simple, trivial, case efficiently.
5943   if (From.getNode()->getNumValues() == 1) {
5944     ReplaceAllUsesWith(From, To);
5945     return;
5946   }
5947 
5948   // Iterate over just the existing users of From. See the comments in
5949   // the ReplaceAllUsesWith above.
5950   SDNode::use_iterator UI = From.getNode()->use_begin(),
5951                        UE = From.getNode()->use_end();
5952   RAUWUpdateListener Listener(*this, UI, UE);
5953   while (UI != UE) {
5954     SDNode *User = *UI;
5955     bool UserRemovedFromCSEMaps = false;
5956 
5957     // A user can appear in a use list multiple times, and when this
5958     // happens the uses are usually next to each other in the list.
5959     // To help reduce the number of CSE recomputations, process all
5960     // the uses of this user that we can find this way.
5961     do {
5962       SDUse &Use = UI.getUse();
5963 
5964       // Skip uses of different values from the same node.
5965       if (Use.getResNo() != From.getResNo()) {
5966         ++UI;
5967         continue;
5968       }
5969 
5970       // If this node hasn't been modified yet, it's still in the CSE maps,
5971       // so remove its old self from the CSE maps.
5972       if (!UserRemovedFromCSEMaps) {
5973         RemoveNodeFromCSEMaps(User);
5974         UserRemovedFromCSEMaps = true;
5975       }
5976 
5977       ++UI;
5978       Use.set(To);
5979     } while (UI != UE && *UI == User);
5980 
5981     // We are iterating over all uses of the From node, so if a use
5982     // doesn't use the specific value, no changes are made.
5983     if (!UserRemovedFromCSEMaps)
5984       continue;
5985 
5986     // Now that we have modified User, add it back to the CSE maps.  If it
5987     // already exists there, recursively merge the results together.
5988     AddModifiedNodeToCSEMaps(User);
5989   }
5990 
5991   // If we just RAUW'd the root, take note.
5992   if (From == getRoot())
5993     setRoot(To);
5994 }
5995 
5996 namespace {
5997   /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5998   /// to record information about a use.
5999   struct UseMemo {
6000     SDNode *User;
6001     unsigned Index;
6002     SDUse *Use;
6003   };
6004 
6005   /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)6006   bool operator<(const UseMemo &L, const UseMemo &R) {
6007     return (intptr_t)L.User < (intptr_t)R.User;
6008   }
6009 }
6010 
6011 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
6012 /// uses of other values produced by From.getNode() alone.  The same value
6013 /// may appear in both the From and To list.  The Deleted vector is
6014 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)6015 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
6016                                               const SDValue *To,
6017                                               unsigned Num){
6018   // Handle the simple, trivial case efficiently.
6019   if (Num == 1)
6020     return ReplaceAllUsesOfValueWith(*From, *To);
6021 
6022   // Read up all the uses and make records of them. This helps
6023   // processing new uses that are introduced during the
6024   // replacement process.
6025   SmallVector<UseMemo, 4> Uses;
6026   for (unsigned i = 0; i != Num; ++i) {
6027     unsigned FromResNo = From[i].getResNo();
6028     SDNode *FromNode = From[i].getNode();
6029     for (SDNode::use_iterator UI = FromNode->use_begin(),
6030          E = FromNode->use_end(); UI != E; ++UI) {
6031       SDUse &Use = UI.getUse();
6032       if (Use.getResNo() == FromResNo) {
6033         UseMemo Memo = { *UI, i, &Use };
6034         Uses.push_back(Memo);
6035       }
6036     }
6037   }
6038 
6039   // Sort the uses, so that all the uses from a given User are together.
6040   std::sort(Uses.begin(), Uses.end());
6041 
6042   for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
6043        UseIndex != UseIndexEnd; ) {
6044     // We know that this user uses some value of From.  If it is the right
6045     // value, update it.
6046     SDNode *User = Uses[UseIndex].User;
6047 
6048     // This node is about to morph, remove its old self from the CSE maps.
6049     RemoveNodeFromCSEMaps(User);
6050 
6051     // The Uses array is sorted, so all the uses for a given User
6052     // are next to each other in the list.
6053     // To help reduce the number of CSE recomputations, process all
6054     // the uses of this user that we can find this way.
6055     do {
6056       unsigned i = Uses[UseIndex].Index;
6057       SDUse &Use = *Uses[UseIndex].Use;
6058       ++UseIndex;
6059 
6060       Use.set(To[i]);
6061     } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
6062 
6063     // Now that we have modified User, add it back to the CSE maps.  If it
6064     // already exists there, recursively merge the results together.
6065     AddModifiedNodeToCSEMaps(User);
6066   }
6067 }
6068 
6069 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
6070 /// based on their topological order. It returns the maximum id and a vector
6071 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()6072 unsigned SelectionDAG::AssignTopologicalOrder() {
6073 
6074   unsigned DAGSize = 0;
6075 
6076   // SortedPos tracks the progress of the algorithm. Nodes before it are
6077   // sorted, nodes after it are unsorted. When the algorithm completes
6078   // it is at the end of the list.
6079   allnodes_iterator SortedPos = allnodes_begin();
6080 
6081   // Visit all the nodes. Move nodes with no operands to the front of
6082   // the list immediately. Annotate nodes that do have operands with their
6083   // operand count. Before we do this, the Node Id fields of the nodes
6084   // may contain arbitrary values. After, the Node Id fields for nodes
6085   // before SortedPos will contain the topological sort index, and the
6086   // Node Id fields for nodes At SortedPos and after will contain the
6087   // count of outstanding operands.
6088   for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
6089     SDNode *N = I++;
6090     checkForCycles(N, this);
6091     unsigned Degree = N->getNumOperands();
6092     if (Degree == 0) {
6093       // A node with no uses, add it to the result array immediately.
6094       N->setNodeId(DAGSize++);
6095       allnodes_iterator Q = N;
6096       if (Q != SortedPos)
6097         SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
6098       assert(SortedPos != AllNodes.end() && "Overran node list");
6099       ++SortedPos;
6100     } else {
6101       // Temporarily use the Node Id as scratch space for the degree count.
6102       N->setNodeId(Degree);
6103     }
6104   }
6105 
6106   // Visit all the nodes. As we iterate, move nodes into sorted order,
6107   // such that by the time the end is reached all nodes will be sorted.
6108   for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
6109     SDNode *N = I;
6110     checkForCycles(N, this);
6111     // N is in sorted position, so all its uses have one less operand
6112     // that needs to be sorted.
6113     for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
6114          UI != UE; ++UI) {
6115       SDNode *P = *UI;
6116       unsigned Degree = P->getNodeId();
6117       assert(Degree != 0 && "Invalid node degree");
6118       --Degree;
6119       if (Degree == 0) {
6120         // All of P's operands are sorted, so P may sorted now.
6121         P->setNodeId(DAGSize++);
6122         if (P != SortedPos)
6123           SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
6124         assert(SortedPos != AllNodes.end() && "Overran node list");
6125         ++SortedPos;
6126       } else {
6127         // Update P's outstanding operand count.
6128         P->setNodeId(Degree);
6129       }
6130     }
6131     if (I == SortedPos) {
6132 #ifndef NDEBUG
6133       SDNode *S = ++I;
6134       dbgs() << "Overran sorted position:\n";
6135       S->dumprFull(this); dbgs() << "\n";
6136       dbgs() << "Checking if this is due to cycles\n";
6137       checkForCycles(this, true);
6138 #endif
6139       llvm_unreachable(nullptr);
6140     }
6141   }
6142 
6143   assert(SortedPos == AllNodes.end() &&
6144          "Topological sort incomplete!");
6145   assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6146          "First node in topological sort is not the entry token!");
6147   assert(AllNodes.front().getNodeId() == 0 &&
6148          "First node in topological sort has non-zero id!");
6149   assert(AllNodes.front().getNumOperands() == 0 &&
6150          "First node in topological sort has operands!");
6151   assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6152          "Last node in topologic sort has unexpected id!");
6153   assert(AllNodes.back().use_empty() &&
6154          "Last node in topologic sort has users!");
6155   assert(DAGSize == allnodes_size() && "Node count mismatch!");
6156   return DAGSize;
6157 }
6158 
6159 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6160 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,SDNode * SD,bool isParameter)6161 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6162   DbgInfo->add(DB, SD, isParameter);
6163   if (SD)
6164     SD->setHasDebugValue(true);
6165 }
6166 
6167 /// TransferDbgValues - Transfer SDDbgValues.
TransferDbgValues(SDValue From,SDValue To)6168 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6169   if (From == To || !From.getNode()->getHasDebugValue())
6170     return;
6171   SDNode *FromNode = From.getNode();
6172   SDNode *ToNode = To.getNode();
6173   ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6174   SmallVector<SDDbgValue *, 2> ClonedDVs;
6175   for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6176        I != E; ++I) {
6177     SDDbgValue *Dbg = *I;
6178     if (Dbg->getKind() == SDDbgValue::SDNODE) {
6179       SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
6180 				      Dbg->isIndirect(),
6181                                       Dbg->getOffset(), Dbg->getDebugLoc(),
6182                                       Dbg->getOrder());
6183       ClonedDVs.push_back(Clone);
6184     }
6185   }
6186   for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
6187          E = ClonedDVs.end(); I != E; ++I)
6188     AddDbgValue(*I, ToNode, false);
6189 }
6190 
6191 //===----------------------------------------------------------------------===//
6192 //                              SDNode Class
6193 //===----------------------------------------------------------------------===//
6194 
~HandleSDNode()6195 HandleSDNode::~HandleSDNode() {
6196   DropOperands();
6197 }
6198 
GlobalAddressSDNode(unsigned Opc,unsigned Order,DebugLoc DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned char TF)6199 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6200                                          DebugLoc DL, const GlobalValue *GA,
6201                                          EVT VT, int64_t o, unsigned char TF)
6202   : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6203   TheGlobal = GA;
6204 }
6205 
AddrSpaceCastSDNode(unsigned Order,DebugLoc dl,EVT VT,SDValue X,unsigned SrcAS,unsigned DestAS)6206 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
6207                                          SDValue X, unsigned SrcAS,
6208                                          unsigned DestAS)
6209  : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
6210    SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6211 
MemSDNode(unsigned Opc,unsigned Order,DebugLoc dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)6212 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6213                      EVT memvt, MachineMemOperand *mmo)
6214  : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6215   SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6216                                       MMO->isNonTemporal(), MMO->isInvariant());
6217   assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6218   assert(isNonTemporal() == MMO->isNonTemporal() &&
6219          "Non-temporal encoding error!");
6220   assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6221 }
6222 
MemSDNode(unsigned Opc,unsigned Order,DebugLoc dl,SDVTList VTs,ArrayRef<SDValue> Ops,EVT memvt,MachineMemOperand * mmo)6223 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6224                      ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo)
6225    : SDNode(Opc, Order, dl, VTs, Ops),
6226      MemoryVT(memvt), MMO(mmo) {
6227   SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6228                                       MMO->isNonTemporal(), MMO->isInvariant());
6229   assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6230   assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6231 }
6232 
6233 /// Profile - Gather unique data for the node.
6234 ///
Profile(FoldingSetNodeID & ID) const6235 void SDNode::Profile(FoldingSetNodeID &ID) const {
6236   AddNodeIDNode(ID, this);
6237 }
6238 
6239 namespace {
6240   struct EVTArray {
6241     std::vector<EVT> VTs;
6242 
EVTArray__anon4e45a04d0311::EVTArray6243     EVTArray() {
6244       VTs.reserve(MVT::LAST_VALUETYPE);
6245       for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6246         VTs.push_back(MVT((MVT::SimpleValueType)i));
6247     }
6248   };
6249 }
6250 
6251 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6252 static ManagedStatic<EVTArray> SimpleVTArray;
6253 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6254 
6255 /// getValueTypeList - Return a pointer to the specified value type.
6256 ///
getValueTypeList(EVT VT)6257 const EVT *SDNode::getValueTypeList(EVT VT) {
6258   if (VT.isExtended()) {
6259     sys::SmartScopedLock<true> Lock(*VTMutex);
6260     return &(*EVTs->insert(VT).first);
6261   } else {
6262     assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6263            "Value type out of range!");
6264     return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6265   }
6266 }
6267 
6268 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6269 /// indicated value.  This method ignores uses of other values defined by this
6270 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const6271 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6272   assert(Value < getNumValues() && "Bad value!");
6273 
6274   // TODO: Only iterate over uses of a given value of the node
6275   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6276     if (UI.getUse().getResNo() == Value) {
6277       if (NUses == 0)
6278         return false;
6279       --NUses;
6280     }
6281   }
6282 
6283   // Found exactly the right number of uses?
6284   return NUses == 0;
6285 }
6286 
6287 
6288 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6289 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const6290 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6291   assert(Value < getNumValues() && "Bad value!");
6292 
6293   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6294     if (UI.getUse().getResNo() == Value)
6295       return true;
6296 
6297   return false;
6298 }
6299 
6300 
6301 /// isOnlyUserOf - Return true if this node is the only use of N.
6302 ///
isOnlyUserOf(SDNode * N) const6303 bool SDNode::isOnlyUserOf(SDNode *N) const {
6304   bool Seen = false;
6305   for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6306     SDNode *User = *I;
6307     if (User == this)
6308       Seen = true;
6309     else
6310       return false;
6311   }
6312 
6313   return Seen;
6314 }
6315 
6316 /// isOperand - Return true if this node is an operand of N.
6317 ///
isOperandOf(SDNode * N) const6318 bool SDValue::isOperandOf(SDNode *N) const {
6319   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6320     if (*this == N->getOperand(i))
6321       return true;
6322   return false;
6323 }
6324 
isOperandOf(SDNode * N) const6325 bool SDNode::isOperandOf(SDNode *N) const {
6326   for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6327     if (this == N->OperandList[i].getNode())
6328       return true;
6329   return false;
6330 }
6331 
6332 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6333 /// be a chain) reaches the specified operand without crossing any
6334 /// side-effecting instructions on any chain path.  In practice, this looks
6335 /// through token factors and non-volatile loads.  In order to remain efficient,
6336 /// this only looks a couple of nodes in, it does not do an exhaustive search.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const6337 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6338                                                unsigned Depth) const {
6339   if (*this == Dest) return true;
6340 
6341   // Don't search too deeply, we just want to be able to see through
6342   // TokenFactor's etc.
6343   if (Depth == 0) return false;
6344 
6345   // If this is a token factor, all inputs to the TF happen in parallel.  If any
6346   // of the operands of the TF does not reach dest, then we cannot do the xform.
6347   if (getOpcode() == ISD::TokenFactor) {
6348     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6349       if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6350         return false;
6351     return true;
6352   }
6353 
6354   // Loads don't have side effects, look through them.
6355   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6356     if (!Ld->isVolatile())
6357       return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6358   }
6359   return false;
6360 }
6361 
6362 /// hasPredecessor - Return true if N is a predecessor of this node.
6363 /// N is either an operand of this node, or can be reached by recursively
6364 /// traversing up the operands.
6365 /// NOTE: This is an expensive method. Use it carefully.
hasPredecessor(const SDNode * N) const6366 bool SDNode::hasPredecessor(const SDNode *N) const {
6367   SmallPtrSet<const SDNode *, 32> Visited;
6368   SmallVector<const SDNode *, 16> Worklist;
6369   return hasPredecessorHelper(N, Visited, Worklist);
6370 }
6371 
6372 bool
hasPredecessorHelper(const SDNode * N,SmallPtrSet<const SDNode *,32> & Visited,SmallVectorImpl<const SDNode * > & Worklist) const6373 SDNode::hasPredecessorHelper(const SDNode *N,
6374                              SmallPtrSet<const SDNode *, 32> &Visited,
6375                              SmallVectorImpl<const SDNode *> &Worklist) const {
6376   if (Visited.empty()) {
6377     Worklist.push_back(this);
6378   } else {
6379     // Take a look in the visited set. If we've already encountered this node
6380     // we needn't search further.
6381     if (Visited.count(N))
6382       return true;
6383   }
6384 
6385   // Haven't visited N yet. Continue the search.
6386   while (!Worklist.empty()) {
6387     const SDNode *M = Worklist.pop_back_val();
6388     for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6389       SDNode *Op = M->getOperand(i).getNode();
6390       if (Visited.insert(Op))
6391         Worklist.push_back(Op);
6392       if (Op == N)
6393         return true;
6394     }
6395   }
6396 
6397   return false;
6398 }
6399 
getConstantOperandVal(unsigned Num) const6400 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6401   assert(Num < NumOperands && "Invalid child # of SDNode!");
6402   return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6403 }
6404 
UnrollVectorOp(SDNode * N,unsigned ResNE)6405 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6406   assert(N->getNumValues() == 1 &&
6407          "Can't unroll a vector with multiple results!");
6408 
6409   EVT VT = N->getValueType(0);
6410   unsigned NE = VT.getVectorNumElements();
6411   EVT EltVT = VT.getVectorElementType();
6412   SDLoc dl(N);
6413 
6414   SmallVector<SDValue, 8> Scalars;
6415   SmallVector<SDValue, 4> Operands(N->getNumOperands());
6416 
6417   // If ResNE is 0, fully unroll the vector op.
6418   if (ResNE == 0)
6419     ResNE = NE;
6420   else if (NE > ResNE)
6421     NE = ResNE;
6422 
6423   unsigned i;
6424   for (i= 0; i != NE; ++i) {
6425     for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6426       SDValue Operand = N->getOperand(j);
6427       EVT OperandVT = Operand.getValueType();
6428       if (OperandVT.isVector()) {
6429         // A vector operand; extract a single element.
6430         const TargetLowering *TLI = TM.getTargetLowering();
6431         EVT OperandEltVT = OperandVT.getVectorElementType();
6432         Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6433                               OperandEltVT,
6434                               Operand,
6435                               getConstant(i, TLI->getVectorIdxTy()));
6436       } else {
6437         // A scalar operand; just use it as is.
6438         Operands[j] = Operand;
6439       }
6440     }
6441 
6442     switch (N->getOpcode()) {
6443     default:
6444       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands));
6445       break;
6446     case ISD::VSELECT:
6447       Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
6448       break;
6449     case ISD::SHL:
6450     case ISD::SRA:
6451     case ISD::SRL:
6452     case ISD::ROTL:
6453     case ISD::ROTR:
6454       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6455                                getShiftAmountOperand(Operands[0].getValueType(),
6456                                                      Operands[1])));
6457       break;
6458     case ISD::SIGN_EXTEND_INREG:
6459     case ISD::FP_ROUND_INREG: {
6460       EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6461       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6462                                 Operands[0],
6463                                 getValueType(ExtVT)));
6464     }
6465     }
6466   }
6467 
6468   for (; i < ResNE; ++i)
6469     Scalars.push_back(getUNDEF(EltVT));
6470 
6471   return getNode(ISD::BUILD_VECTOR, dl,
6472                  EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
6473 }
6474 
6475 
6476 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6477 /// location that is 'Dist' units away from the location that the 'Base' load
6478 /// is loading from.
isConsecutiveLoad(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const6479 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6480                                      unsigned Bytes, int Dist) const {
6481   if (LD->getChain() != Base->getChain())
6482     return false;
6483   EVT VT = LD->getValueType(0);
6484   if (VT.getSizeInBits() / 8 != Bytes)
6485     return false;
6486 
6487   SDValue Loc = LD->getOperand(1);
6488   SDValue BaseLoc = Base->getOperand(1);
6489   if (Loc.getOpcode() == ISD::FrameIndex) {
6490     if (BaseLoc.getOpcode() != ISD::FrameIndex)
6491       return false;
6492     const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6493     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
6494     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6495     int FS  = MFI->getObjectSize(FI);
6496     int BFS = MFI->getObjectSize(BFI);
6497     if (FS != BFS || FS != (int)Bytes) return false;
6498     return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6499   }
6500 
6501   // Handle X+C
6502   if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6503       cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6504     return true;
6505 
6506   const GlobalValue *GV1 = nullptr;
6507   const GlobalValue *GV2 = nullptr;
6508   int64_t Offset1 = 0;
6509   int64_t Offset2 = 0;
6510   const TargetLowering *TLI = TM.getTargetLowering();
6511   bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6512   bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6513   if (isGA1 && isGA2 && GV1 == GV2)
6514     return Offset1 == (Offset2 + Dist*Bytes);
6515   return false;
6516 }
6517 
6518 
6519 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6520 /// it cannot be inferred.
InferPtrAlignment(SDValue Ptr) const6521 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6522   // If this is a GlobalAddress + cst, return the alignment.
6523   const GlobalValue *GV;
6524   int64_t GVOffset = 0;
6525   const TargetLowering *TLI = TM.getTargetLowering();
6526   if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6527     unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
6528     APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6529     llvm::computeKnownBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6530                            TLI->getDataLayout());
6531     unsigned AlignBits = KnownZero.countTrailingOnes();
6532     unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6533     if (Align)
6534       return MinAlign(Align, GVOffset);
6535   }
6536 
6537   // If this is a direct reference to a stack slot, use information about the
6538   // stack slot's alignment.
6539   int FrameIdx = 1 << 31;
6540   int64_t FrameOffset = 0;
6541   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6542     FrameIdx = FI->getIndex();
6543   } else if (isBaseWithConstantOffset(Ptr) &&
6544              isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6545     // Handle FI+Cst
6546     FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6547     FrameOffset = Ptr.getConstantOperandVal(1);
6548   }
6549 
6550   if (FrameIdx != (1 << 31)) {
6551     const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6552     unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6553                                     FrameOffset);
6554     return FIInfoAlign;
6555   }
6556 
6557   return 0;
6558 }
6559 
6560 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
6561 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const6562 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
6563   // Currently all types are split in half.
6564   EVT LoVT, HiVT;
6565   if (!VT.isVector()) {
6566     LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
6567   } else {
6568     unsigned NumElements = VT.getVectorNumElements();
6569     assert(!(NumElements & 1) && "Splitting vector, but not in half!");
6570     LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
6571                                    NumElements/2);
6572   }
6573   return std::make_pair(LoVT, HiVT);
6574 }
6575 
6576 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
6577 /// low/high part.
6578 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)6579 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
6580                           const EVT &HiVT) {
6581   assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
6582          N.getValueType().getVectorNumElements() &&
6583          "More vector elements requested than available!");
6584   SDValue Lo, Hi;
6585   Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
6586                getConstant(0, TLI->getVectorIdxTy()));
6587   Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
6588                getConstant(LoVT.getVectorNumElements(), TLI->getVectorIdxTy()));
6589   return std::make_pair(Lo, Hi);
6590 }
6591 
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count)6592 void SelectionDAG::ExtractVectorElements(SDValue Op,
6593                                          SmallVectorImpl<SDValue> &Args,
6594                                          unsigned Start, unsigned Count) {
6595   EVT VT = Op.getValueType();
6596   if (Count == 0)
6597     Count = VT.getVectorNumElements();
6598 
6599   EVT EltVT = VT.getVectorElementType();
6600   EVT IdxTy = TLI->getVectorIdxTy();
6601   SDLoc SL(Op);
6602   for (unsigned i = Start, e = Start + Count; i != e; ++i) {
6603     Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
6604                            Op, getConstant(i, IdxTy)));
6605   }
6606 }
6607 
6608 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const6609 unsigned GlobalAddressSDNode::getAddressSpace() const {
6610   return getGlobal()->getType()->getAddressSpace();
6611 }
6612 
6613 
getType() const6614 Type *ConstantPoolSDNode::getType() const {
6615   if (isMachineConstantPoolEntry())
6616     return Val.MachineCPVal->getType();
6617   return Val.ConstVal->getType();
6618 }
6619 
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool isBigEndian) const6620 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6621                                         APInt &SplatUndef,
6622                                         unsigned &SplatBitSize,
6623                                         bool &HasAnyUndefs,
6624                                         unsigned MinSplatBits,
6625                                         bool isBigEndian) const {
6626   EVT VT = getValueType(0);
6627   assert(VT.isVector() && "Expected a vector type");
6628   unsigned sz = VT.getSizeInBits();
6629   if (MinSplatBits > sz)
6630     return false;
6631 
6632   SplatValue = APInt(sz, 0);
6633   SplatUndef = APInt(sz, 0);
6634 
6635   // Get the bits.  Bits with undefined values (when the corresponding element
6636   // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6637   // in SplatValue.  If any of the values are not constant, give up and return
6638   // false.
6639   unsigned int nOps = getNumOperands();
6640   assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6641   unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6642 
6643   for (unsigned j = 0; j < nOps; ++j) {
6644     unsigned i = isBigEndian ? nOps-1-j : j;
6645     SDValue OpVal = getOperand(i);
6646     unsigned BitPos = j * EltBitSize;
6647 
6648     if (OpVal.getOpcode() == ISD::UNDEF)
6649       SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6650     else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6651       SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6652                     zextOrTrunc(sz) << BitPos;
6653     else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6654       SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6655      else
6656       return false;
6657   }
6658 
6659   // The build_vector is all constants or undefs.  Find the smallest element
6660   // size that splats the vector.
6661 
6662   HasAnyUndefs = (SplatUndef != 0);
6663   while (sz > 8) {
6664 
6665     unsigned HalfSize = sz / 2;
6666     APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6667     APInt LowValue = SplatValue.trunc(HalfSize);
6668     APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6669     APInt LowUndef = SplatUndef.trunc(HalfSize);
6670 
6671     // If the two halves do not match (ignoring undef bits), stop here.
6672     if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6673         MinSplatBits > HalfSize)
6674       break;
6675 
6676     SplatValue = HighValue | LowValue;
6677     SplatUndef = HighUndef & LowUndef;
6678 
6679     sz = HalfSize;
6680   }
6681 
6682   SplatBitSize = sz;
6683   return true;
6684 }
6685 
getSplatValue(BitVector * UndefElements) const6686 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
6687   if (UndefElements) {
6688     UndefElements->clear();
6689     UndefElements->resize(getNumOperands());
6690   }
6691   SDValue Splatted;
6692   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6693     SDValue Op = getOperand(i);
6694     if (Op.getOpcode() == ISD::UNDEF) {
6695       if (UndefElements)
6696         (*UndefElements)[i] = true;
6697     } else if (!Splatted) {
6698       Splatted = Op;
6699     } else if (Splatted != Op) {
6700       return SDValue();
6701     }
6702   }
6703 
6704   if (!Splatted) {
6705     assert(getOperand(0).getOpcode() == ISD::UNDEF &&
6706            "Can only have a splat without a constant for all undefs.");
6707     return getOperand(0);
6708   }
6709 
6710   return Splatted;
6711 }
6712 
6713 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const6714 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
6715   return dyn_cast_or_null<ConstantSDNode>(
6716       getSplatValue(UndefElements).getNode());
6717 }
6718 
6719 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const6720 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
6721   return dyn_cast_or_null<ConstantFPSDNode>(
6722       getSplatValue(UndefElements).getNode());
6723 }
6724 
isConstant() const6725 bool BuildVectorSDNode::isConstant() const {
6726   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6727     unsigned Opc = getOperand(i).getOpcode();
6728     if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
6729       return false;
6730   }
6731   return true;
6732 }
6733 
isSplatMask(const int * Mask,EVT VT)6734 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6735   // Find the first non-undef value in the shuffle mask.
6736   unsigned i, e;
6737   for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6738     /* search */;
6739 
6740   assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6741 
6742   // Make sure all remaining elements are either undef or the same as the first
6743   // non-undef value.
6744   for (int Idx = Mask[i]; i != e; ++i)
6745     if (Mask[i] >= 0 && Mask[i] != Idx)
6746       return false;
6747   return true;
6748 }
6749 
6750 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSet<const SDNode *,32> & Visited,SmallPtrSet<const SDNode *,32> & Checked,const llvm::SelectionDAG * DAG)6751 static void checkForCyclesHelper(const SDNode *N,
6752                                  SmallPtrSet<const SDNode*, 32> &Visited,
6753                                  SmallPtrSet<const SDNode*, 32> &Checked,
6754                                  const llvm::SelectionDAG *DAG) {
6755   // If this node has already been checked, don't check it again.
6756   if (Checked.count(N))
6757     return;
6758 
6759   // If a node has already been visited on this depth-first walk, reject it as
6760   // a cycle.
6761   if (!Visited.insert(N)) {
6762     errs() << "Detected cycle in SelectionDAG\n";
6763     dbgs() << "Offending node:\n";
6764     N->dumprFull(DAG); dbgs() << "\n";
6765     abort();
6766   }
6767 
6768   for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6769     checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked, DAG);
6770 
6771   Checked.insert(N);
6772   Visited.erase(N);
6773 }
6774 #endif
6775 
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)6776 void llvm::checkForCycles(const llvm::SDNode *N,
6777                           const llvm::SelectionDAG *DAG,
6778                           bool force) {
6779 #ifndef NDEBUG
6780   bool check = force;
6781 #ifdef XDEBUG
6782   check = true;
6783 #endif  // XDEBUG
6784   if (check) {
6785     assert(N && "Checking nonexistent SDNode");
6786     SmallPtrSet<const SDNode*, 32> visited;
6787     SmallPtrSet<const SDNode*, 32> checked;
6788     checkForCyclesHelper(N, visited, checked, DAG);
6789   }
6790 #endif  // !NDEBUG
6791 }
6792 
checkForCycles(const llvm::SelectionDAG * DAG,bool force)6793 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
6794   checkForCycles(DAG->getRoot().getNode(), DAG, force);
6795 }
6796